input stringlengths 2.65k 237k | output stringclasses 1
value |
|---|---|
0:
title = title + self.telescope_entry.get()+", "
if len(self.accessory_entry.get()) > 0:
title = title + self.accessory_entry.get()+", "
if len(self.ccd_entry.get()) > 0:
title = title + self.ccd_entry.get()
ax.set_title(title)
ax.title.set_fontsize('x-large')
ax.errorbar(light_curve["JulianDate"]-2400000.5, light_curve["VarAbsMag"], yerr=light_curve["VarAbsErr"], fmt="o", color="black")
ax.set_xlabel('MJD', labelpad=10)
ax.set_ylabel(filter, labelpad=10)
plt.show()
def __init__(self):
self.window = tk.Tk()
self.screen_width = self.window.winfo_screenwidth()
self.screen_height = self.window.winfo_screenheight()
self.program_name = "MetroPSF 0.16"
# Matplotlib settings
matplotlib.rc('xtick', labelsize=7)
matplotlib.rc('ytick', labelsize=7)
# Maximize that works everywhere
m = self.window.maxsize()
self.window.geometry('{}x{}+0+0'.format(*m))
self.window.title(self.program_name)
self.menubar = tk.Menu(self.window)
self.filemenu = tk.Menu(self.menubar, tearoff=0)
self.filemenu.add_command(label="Open...", command=self.open_FITS_file)
self.filemenu.add_command(label="Save", command=self.save_FITS_file)
self.filemenu.add_command(label="Save As...", command=self.save_FITS_file_as)
self.filemenu.add_separator()
self.filemenu.add_command(label="Load Settings...", command=self.load_settings)
self.filemenu.add_command(label="Save Settings As...", command=self.save_settings_as)
self.menubar.add_cascade(label="File", menu=self.filemenu)
self.viewmenu = tk.Menu(self.menubar, tearoff=0)
self.viewmenu.add_command(label="Update", command=self.update_display)
self.viewmenu.add_separator()
self.viewmenu.add_command(label="Zoom In", command=self.zoom_in)
self.viewmenu.add_command(label="Zoom Out", command=self.zoom_out)
self.viewmenu.add_command(label="100% Zoom", command=self.zoom_100)
self.viewmenu.add_separator()
self.viewmenu.add_command(label="Next VSX Source", command=self.next_vsx_source)
self.menubar.add_cascade(label="View", menu=self.viewmenu)
self.photometrymenu = tk.Menu(self.menubar, tearoff=0)
self.photometrymenu.add_command(label="Iteratively Subtracted PSF Photometry", command=self.perform_photometry)
self.photometrymenu.add_command(label="Aperture Photometry", command=self.aperture_photometry)
self.photometrymenu.add_separator()
self.photometrymenu.add_command(label="Plot", command=self.plot_photometry_menu_action)
self.photometrymenu.add_command(label="Hide", command=self.hide_photometry)
self.photometrymenu.add_separator()
self.photometrymenu.add_command(label="Solve Image", command=self.solve_image)
self.photometrymenu.add_command(label="Get Comparison Stars", command=self.get_comparison_stars)
self.photometrymenu.add_command(label="Find Regression Model", command=self.find_linear_regression_model)
self.photometrymenu.add_separator()
self.photometrymenu.add_command(label="Remove Fit Outlier", command=self.remove_fit_outlier)
self.photometrymenu.add_command(label="Remove Fit Outliers Until Ensemble Limit", command=self.remove_fit_outliers_until_ensemble_limit)
self.photometrymenu.add_command(label="Remove Fit Outliers Beyond Separation Limit", command=self.remove_distant_fit_outliers)
self.photometrymenu.add_command(label="Reset Fit Outliers", command=self.reset_fit_outliers)
self.photometrymenu.add_separator()
self.photometrymenu.add_command(label="Delete Photometry File", command=self.delete_photometry_file)
self.photometrymenu.add_command(label="Display Background", command=self.display_background)
#self.photometrymenu.add_command(label="Plot Sigma Heatmap", command=self.plot_sigma_heatmap)
self.menubar.add_cascade(label="Photometry", menu=self.photometrymenu)
self.reportmenu = tk.Menu(self.menubar, tearoff=0)
self.reportmenu.add_command(label="BAA: Generate Report", command=self.generate_baa_report)
self.reportmenu.add_command(label="BAA: Light Curve from Reports..", command=self.display_curve_from_baa_reports)
self.reportmenu.add_separator()
self.reportmenu.add_command(label="AAVSO: Generate Report", command=self.generate_aavso_report)
self.reportmenu.add_separator()
self.reportmenu.add_command(label="BAA/AAVSO Reports on All VSX Sources", command=self.report_on_all_vsx_sources)
self.reportmenu.add_command(label="BAA/AAVSO Batch Reports on All VSX Sources..", command=self.batch_report_on_all_vsx_sources)
self.menubar.add_cascade(label="Report", menu=self.reportmenu)
self.window.config(menu=self.menubar)
self.left_half = tk.Frame(self.window) # Left half of the window
self.left_half.grid(row=0, column=0, sticky=tk.NSEW)
self.center = tk.Frame(self.window) # Center of the window
self.center.grid(row=0, column=1, sticky=tk.NSEW)
self.right_half = tk.Frame(self.window) # Right half of the window
self.right_half.grid(row=0, column=2, sticky=tk.NSEW)
tk.Grid.columnconfigure(self.window, 1, weight=1) # Expand center horizontally
tk.Grid.rowconfigure(self.window, 0, weight=1) # Expand everything vertically
self.filename_label = tk.Label(self.center, text="FITS:" + image_file)
self.filename_label.grid(row=0, column=0) # Place label
self.canvas = tk.Canvas(self.center, bg='black') # Main canvas
self.canvas.grid(row=1, column=0, sticky=tk.N+tk.S+tk.E+tk.W) # Place main canvas, sticky to occupy entire
# cell dimensions
tk.Grid.columnconfigure(self.center, 0, weight=1) # Expand main canvas column to fit whole window
tk.Grid.rowconfigure(self.center, 1, weight=1) # Expand main canvas row to fit whole window
self.canvas_scrollbar_V = tk.Scrollbar(self.center, orient=tk.VERTICAL) # Main canvas scrollbars
self.canvas_scrollbar_V.grid(row=1, column=1)
self.canvas_scrollbar_V.grid(sticky=tk.N+tk.S+tk.E+tk.W, column=1, row=1)
self.canvas_scrollbar_H = tk.Scrollbar(self.center, orient=tk.HORIZONTAL)
self.canvas_scrollbar_H.grid(row=2, column=0)
self.canvas_scrollbar_H.grid(sticky=tk.N + tk.S + tk.E + tk.W, column=0, row=2)
self.canvas_scrollbar_H.config(command=self.canvas.xview)
self.canvas_scrollbar_V.config(command=self.canvas.yview)
self.canvas.config(xscrollcommand=self.canvas_scrollbar_H.set)
self.canvas.config(yscrollcommand=self.canvas_scrollbar_V.set)
self.right_frame = tk.Frame(self.right_half) # We will lay out interface things into the new right_frame grid
self.right_frame.grid(row=1, column=2, sticky=tk.N) # Place right_frame into the top of the main canvas row, right next to it
#self.psf_canvas = tk.Canvas(self.right_frame, bg='grey', width=300, height=300) # Small PSF canvas
self.fig_psf = Figure()
self.psf_plot = self.fig_psf.add_subplot(111, projection='3d')
self.psf_plot_canvas = FigureCanvasTkAgg(self.fig_psf, self.right_frame) # PSF 3D plot canvas - Matplotlib wrapper for Tk
self.psf_plot_canvas.draw()
self.psf_canvas = self.psf_plot_canvas.get_tk_widget()
self.psf_canvas.config(width=int(self.screen_width/8.5), height=int(self.screen_width/8.5))
self.psf_canvas.grid(row=0, column=0) # Allocate small PSF canvas to a new grid inside the right_frame
self.fig = Figure()
self.linreg_plot = self.fig.add_subplot(111)
self.plot_canvas = FigureCanvasTkAgg(self.fig, self.right_frame) # Linear regression canvas - Matplotlib wrapper for Tk
self.plot_canvas.draw()
self.linreg_canvas = self.plot_canvas.get_tk_widget()
self.linreg_canvas.config(width=int(self.screen_width/8.5), height=int(self.screen_width/12))
self.linreg_canvas.grid(row=1, column=0) # Allocate small PSF canvas to a new grid inside the right_frame
self.left_frame = tk.Frame(self.left_half) # We will lay out interface things into the new right_frame grid
self.left_frame.grid(row=0, column=0, sticky=tk.N) # Place right_frame into the top of the main canvas row, right next to it
self.settings_frame = tk.Frame(self.left_frame) # Frame to hold settings grid
self.settings_frame.grid(row=2, column=0, sticky=tk.NSEW) # Settings_frame under the canvas in the right_frame
tk.Grid.columnconfigure(self.settings_frame, 0, weight=1) # Expand settings_frame column that holds labels
settings_entry_width = 16
extended_settings_entry_width = 30
row = 0
self.photometry_aperture_label = tk.Label(self.settings_frame, text="Fitting Width/Height, px:")
self.photometry_aperture_label.grid(row=row, column=0, sticky=tk.W)
self.photometry_aperture_entry = tk.Entry(self.settings_frame, width=settings_entry_width)
self.photometry_aperture_entry.grid(row=row, column=1, sticky=tk.E)
self.set_entry_text(self.photometry_aperture_entry, self.fit_shape)
row = row + 1
self.min_ensemble_magnitude_label = tk.Label(self.settings_frame, text="Minimum Ensemble Magnitude:")
self.min_ensemble_magnitude_label.grid(row=row, column=0, sticky=tk.W)
self.min_ensemble_magnitude_entry = tk.Entry(self.settings_frame, width=settings_entry_width)
self.min_ensemble_magnitude_entry.grid(row=row, column=1, sticky=tk.E)
self.set_entry_text(self.min_ensemble_magnitude_entry, "7")
row = row + 1
self.max_ensemble_magnitude_label = tk.Label(self.settings_frame, text="Maximum Ensemble Magnitude:")
self.max_ensemble_magnitude_label.grid(row=row, column=0, sticky=tk.W)
self.max_ensemble_magnitude_entry = tk.Entry(self.settings_frame, width=settings_entry_width)
self.max_ensemble_magnitude_entry.grid(row=row, column=1, sticky=tk.E)
self.set_entry_text(self.max_ensemble_magnitude_entry, "20")
row = row + 1
self.fwhm_label = tk.Label(self.settings_frame, text="FWHM, px:")
self.fwhm_label.grid(row=row, column=0, sticky=tk.W)
self.fwhm_entry = tk.Entry(self.settings_frame, width=settings_entry_width)
self.fwhm_entry.grid(row=row, column=1, sticky=tk.E)
self.set_entry_text(self.fwhm_entry, "4")
row = row + 1
self.star_detection_threshold_label = tk.Label(self.settings_frame, text="Star Detection Threshold, σ:")
self.star_detection_threshold_label.grid(row=row, column=0, sticky=tk.W)
self.star_detection_threshold_entry = tk.Entry(self.settings_frame, width=settings_entry_width)
self.star_detection_threshold_entry.grid(row=row, column=1, sticky=tk.E)
self.set_entry_text(self.star_detection_threshold_entry, "10")
row = row + 1
self.photometry_iterations_label = tk.Label(self.settings_frame, text="Photometry Iterations:")
self.photometry_iterations_label.grid(row=row, column=0, sticky=tk.W)
self.photometry_iterations_entry = tk.Entry(self.settings_frame, width=settings_entry_width)
self.photometry_iterations_entry.grid(row=row, column=1, sticky=tk.E)
self.set_entry_text(self.photometry_iterations_entry, "1")
row = row + 1
self.sharplo_label = tk.Label(self.settings_frame, text="Lower Bound for Sharpness:")
self.sharplo_label.grid(row=row, column=0, sticky=tk.W)
self.sharplo_entry = tk.Entry(self.settings_frame, width=settings_entry_width)
self.sharplo_entry.grid(row=row, column=1, sticky=tk.E)
self.set_entry_text(self.sharplo_entry, "0.5")
row = row + 1
self.bkg_filter_size_label = tk.Label(self.settings_frame, text="Background Median Filter, px:")
self.bkg_filter_size_label.grid(row=row, column=0, sticky=tk.W)
self.bkg_filter_size_entry = tk.Entry(self.settings_frame, width=settings_entry_width)
self.bkg_filter_size_entry.grid(row=row, column=1, sticky=tk.E)
self.set_entry_text(self.bkg_filter_size_entry, "1")
row = row + 1
self.filter_label = tk.Label(self.settings_frame, text="CCD Filter:")
self.filter_label.grid(row=row, column=0, sticky=tk.W)
self.filter_entry = tk.Entry(self.settings_frame, width=settings_entry_width)
self.filter_entry.grid(row=row, column=1, sticky=tk.E)
self.set_entry_text(self.filter_entry, "")
row = row + 1
self.exposure_label = tk.Label(self.settings_frame, text="Exposure Time:")
self.exposure_label.grid(row=row, column=0, sticky=tk.W)
self.exposure_entry = tk.Entry(self.settings_frame, width=settings_entry_width)
self.exposure_entry.grid(row=row, column=1, sticky=tk.E)
self.set_entry_text(self.exposure_entry, "0")
row = row + 1
self.matching_radius_label = tk.Label(self.settings_frame, text="Matching Radius, arcsec:")
self.matching_radius_label.grid(row=row, column=0, sticky=tk.W)
self.matching_radius_entry = tk.Entry(self.settings_frame, width=settings_entry_width)
self.matching_radius_entry.grid(row=row, column=1, sticky=tk.E)
self.set_entry_text(self.matching_radius_entry, "2")
row = row + 1
self.ensemble_limit_label = tk.Label(self.settings_frame, text="Limit Ensemble to:")
self.ensemble_limit_label.grid(row=row, column=0, sticky=tk.W)
self.ensemble_limit_entry = tk.Entry(self.settings_frame, width=settings_entry_width)
self.ensemble_limit_entry.grid(row=row, column=1, sticky=tk.E)
self.set_entry_text(self.ensemble_limit_entry, "1000")
row = row + 1
self.decimal_places_label = tk.Label(self.settings_frame, text="Decimal Places to Report:")
self.decimal_places_label.grid(row=row, column=0, sticky=tk.W)
self.decimal_places_entry = tk.Entry(self.settings_frame, width=settings_entry_width)
self.decimal_places_entry.grid(row=row, column=1, sticky=tk.E)
self.set_entry_text(self.decimal_places_entry, "2")
row = row + 1
self.max_outliers_separation_label = tk.Label(self.settings_frame, text="Ensemble Outliers Separation Limit, arcsec:")
self.max_outliers_separation_label.grid(row=row, column=0, sticky=tk.W)
self.max_outliers_separation_entry = tk.Entry(self.settings_frame, width=settings_entry_width)
self.max_outliers_separation_entry.grid(row=row, column=1, sticky=tk.E)
self.set_entry_text(self.max_outliers_separation_entry, "300")
row = row + 1
self.crop_fits_label = tk.Label(self.settings_frame, text="FITS Crop, %:")
self.crop_fits_label.grid(row=row, column=0, sticky=tk.W)
self.crop_fits_entry = tk.Entry(self.settings_frame, width=settings_entry_width)
self.crop_fits_entry.grid(row=row, column=1, sticky=tk.E)
self.set_entry_text(self.crop_fits_entry, "100")
row = row + 1
self.astrometrynet_label = tk.Label(self.settings_frame, text="Astrometry.net Server:")
self.astrometrynet_label.grid(row=row, column=0, stick=tk.W)
self.astrometrynet_entry = tk.Entry(self.settings_frame, width=extended_settings_entry_width)
self.astrometrynet_entry.grid(row=row, column=1, sticky=tk.E)
self.set_entry_text(self.astrometrynet_entry, "nova.astrometry.net")
row = row + 1
self.astrometrynet_key_label = tk.Label(self.settings_frame, text="Astrometry.net API Key:")
self.astrometrynet_key_label.grid(row=row, column=0, stick=tk.W)
self.astrometrynet_key_entry = tk.Entry(self.settings_frame, width=extended_settings_entry_width)
self.astrometrynet_key_entry.grid(row=row, column=1, sticky=tk.E)
self.astrometrynet_key_entry.config(show="*")
self.set_entry_text(self.astrometrynet_key_entry, "pwjgdcpwaugkhkln")
row = row + 1
self.obscode_label = tk.Label(self.settings_frame, text="BAA Observer Code:")
self.obscode_label.grid(row=row, column=0, stick=tk.W)
self.obscode_entry = tk.Entry(self.settings_frame, width=extended_settings_entry_width, background='pink')
self.obscode_entry.grid(row=row, column=1, sticky=tk.E)
row = row + 1
self.aavso_obscode_label = tk.Label(self.settings_frame, text="AAVSO Observer Code:")
self.aavso_obscode_label.grid(row=row, column=0, stick=tk.W)
self.aavso_obscode_entry = tk.Entry(self.settings_frame, width=extended_settings_entry_width, background='pink')
self.aavso_obscode_entry.grid(row=row, column=1, sticky=tk.E)
row = row + 1
self.latitude_label = tk.Label(self.settings_frame, text="Observatory Latitude:")
self.latitude_label.grid(row=row, column=0, stick=tk.W)
self.latitude_entry = tk.Entry(self.settings_frame, width=extended_settings_entry_width, background='pink')
self.latitude_entry.grid(row=row, column=1, sticky=tk.E)
row = row + 1
self.longitude_label = tk.Label(self.settings_frame, text="Observatory Longitude:")
self.longitude_label.grid(row=row, column=0, stick=tk.W)
self.longitude_entry = tk.Entry(self.settings_frame, width=extended_settings_entry_width, background='pink')
self.longitude_entry.grid(row=row, column=1, sticky=tk.E)
row = row + 1
self.height_label = tk.Label(self.settings_frame, text="Observatory Height, m ASL:")
self.height_label.grid(row=row, column=0, stick=tk.W)
self.height_entry = tk.Entry(self.settings_frame, width=extended_settings_entry_width, background='pink')
self.height_entry.grid(row=row, column=1, sticky=tk.E)
row = row + 1
self.telescope_label = tk.Label(self.settings_frame, text="Telescope:")
self.telescope_label.grid(row=row, column=0, stick=tk.W)
self.telescope_entry = tk.Entry(self.settings_frame, width=extended_settings_entry_width, background='pink')
self.telescope_entry.grid(row=row, column=1, sticky=tk.EW)
row = row + 1
self.accessory_label = tk.Label(self.settings_frame, text="Accessory:")
self.accessory_label.grid(row=row, column=0, stick=tk.W)
self.accessory_entry = tk.Entry(self.settings_frame, width=extended_settings_entry_width, background='pink')
self.accessory_entry.grid(row=row, column=1, sticky=tk.EW)
row = row + 1
self.ccd_label = tk.Label(self.settings_frame, text="Camera:")
self.ccd_label.grid(row=row, column=0, stick=tk.W)
self.ccd_entry = tk.Entry(self.settings_frame, width=extended_settings_entry_width, background='pink')
self.ccd_entry.grid(row=row, column=1, sticky=tk.EW)
row = row + 1
self.ccd_gain_label = tk.Label(self.settings_frame, text="Gain, e-/ADU:")
self.ccd_gain_label.grid(row=row, column=0, stick=tk.W)
self.ccd_gain_entry = tk.Entry(self.settings_frame, width=extended_settings_entry_width)
self.ccd_gain_entry.grid(row=row, column=1, sticky=tk.EW)
row = row + 1
self.exposure_start_label = tk.Label(self.settings_frame, text="Exposure Start, JD:")
self.exposure_start_label.grid(row=row, column=0, stick=tk.W)
self.exposure_start_entry = tk.Entry(self.settings_frame, width=extended_settings_entry_width, background='pink')
self.exposure_start_entry.grid(row=row, column=1, sticky=tk.EW)
row = row + 1
self.object_name_label = tk.Label(self.settings_frame, text="Object Name:")
self.object_name_label.grid(row=row, column=0, stick=tk.W)
self.object_name_entry = tk.Entry(self.settings_frame, width=extended_settings_entry_width, background='pink')
self.object_name_entry.grid(row=row, column=1, sticky=tk.EW)
row = row + 1
# Here we have full-width settings dropdowns in the right frame
self.weighting_label = tk.Label(self.right_frame, text="Ensemble Fit Weighting:")
self.weighting_label.grid(row=3, column=0, sticky=tk.W)
self.weighting_stringvar = tk.StringVar()
self.weighting_stringvar.set("None")
self.weighting_dropdown = tk.OptionMenu(self.right_frame, self.weighting_stringvar, "None", "Raw Flux", "Instrumental Magnitude", "PSF Sigma")
self.weighting_dropdown.grid(row=4, column=0, sticky=tk.EW)
self.catalog_label = tk.Label(self.right_frame, text="Comparison Catalog:")
self.catalog_label.grid(row=5, column=0, sticky=tk.W)
self.catalog_stringvar = tk.StringVar()
self.catalog_stringvar.set("APASS DR9")
self.catalog_dropdown = tk.OptionMenu(self.right_frame, self.catalog_stringvar, "APASS DR9", "URAT1", "USNO-B1.0", "Gaia DR2", "VizieR Catalog")
self.catalog_dropdown.grid(row=6, column=0, sticky=tk.EW)
self.vizier_catalog_label = tk.Label(self.right_frame, text="VizieR Catalog Number:")
self.vizier_catalog_label.grid(row=7, column=0, sticky=tk.W)
self.vizier_catalog_entry = tk.Entry(self.right_frame)
self.vizier_catalog_entry.grid(row=8, column=0, sticky=tk.EW)
self.fitter_label = tk.Label(self.right_frame, text="PSF Fitter:")
self.fitter_label.grid(row=9, column=0, sticky=tk.W)
self.fitter_stringvar = tk.StringVar()
self.fitter_stringvar.set("Levenberg-Marquardt")
self.fitter_dropdown = tk.OptionMenu(self.right_frame, self.fitter_stringvar, "Levenberg-Marquardt", "Linear Least Square", "Sequential | |
'../sql/sql.gyp:test_support_sql',
'../sync/sync.gyp:sync',
'../sync/sync.gyp:test_support_sync_api',
'../testing/gmock.gyp:gmock',
'../testing/gtest.gyp:gtest',
'../third_party/cld_2/cld_2.gyp:cld2_static',
'../third_party/icu/icu.gyp:icui18n',
'../third_party/leveldatabase/leveldatabase.gyp:leveldatabase',
'../third_party/libaddressinput/libaddressinput.gyp:libaddressinput_util',
'../third_party/libjingle/libjingle.gyp:libjingle',
'../third_party/libphonenumber/libphonenumber.gyp:libphonenumber',
'../third_party/protobuf/protobuf.gyp:protobuf_lite',
'../third_party/re2/re2.gyp:re2',
'../ui/base/ui_base.gyp:ui_base',
'../ui/gfx/gfx.gyp:gfx',
'../ui/gfx/gfx.gyp:gfx_test_support',
'../ui/resources/ui_resources.gyp:ui_resources',
'../ui/resources/ui_resources.gyp:ui_test_pak',
'../ui/strings/ui_strings.gyp:ui_strings',
'../url/url.gyp:url_lib',
'components.gyp:auto_login_parser',
'components.gyp:autofill_core_browser',
'components.gyp:autofill_core_common',
'components.gyp:autofill_core_test_support',
'components.gyp:bookmarks_browser',
'components.gyp:bookmarks_managed',
'components.gyp:bookmarks_test_support',
'components.gyp:browser_sync_browser',
'components.gyp:browser_sync_browser_test_support',
'components.gyp:browsing_data_ui',
'components.gyp:bubble',
'components.gyp:captive_portal_test_support',
'components.gyp:cast_certificate',
'components.gyp:certificate_reporting',
'components.gyp:cloud_devices_common',
'components.gyp:component_updater',
'components.gyp:component_updater_test_support',
'components.gyp:content_settings_core_browser',
'components.gyp:content_settings_core_common',
'components.gyp:content_settings_core_test_support',
'components.gyp:crash_core_common',
'components.gyp:crx_file',
'components.gyp:data_reduction_proxy_core_browser',
'components.gyp:data_reduction_proxy_core_common',
'components.gyp:data_reduction_proxy_test_support',
'components.gyp:data_usage_core',
'components.gyp:data_use_measurement_core',
'components.gyp:device_event_log_component',
'components.gyp:dom_distiller_core',
'components.gyp:dom_distiller_protos',
'components.gyp:dom_distiller_test_support',
'components.gyp:favicon_base',
'components.gyp:favicon_core',
'components.gyp:flags_ui',
'components.gyp:gcm_driver',
'components.gyp:gcm_driver_test_support',
'components.gyp:google_core_browser',
'components.gyp:history_core_browser',
'components.gyp:history_core_common',
'components.gyp:history_core_test_support',
'components.gyp:image_fetcher',
'components.gyp:instance_id_test_support',
'components.gyp:invalidation_impl',
'components.gyp:invalidation_test_support',
'components.gyp:json_schema',
'components.gyp:keyed_service_core',
'components.gyp:language_usage_metrics',
'components.gyp:leveldb_proto',
'components.gyp:leveldb_proto_test_support',
'components.gyp:login',
'components.gyp:memory_pressure',
'components.gyp:metrics',
'components.gyp:metrics_net',
'components.gyp:metrics_profiler',
'components.gyp:metrics_test_support',
'components.gyp:metrics_ui',
'components.gyp:net_log',
'components.gyp:network_time',
'components.gyp:ntp_snippets',
'components.gyp:offline_pages',
'components.gyp:offline_pages_background_offliner',
'components.gyp:offline_pages_test_support',
'components.gyp:omnibox_browser',
'components.gyp:omnibox_test_support',
'components.gyp:open_from_clipboard',
'components.gyp:open_from_clipboard_test_support',
'components.gyp:os_crypt',
'components.gyp:password_manager_core_browser',
'components.gyp:password_manager_core_browser_test_support',
'components.gyp:password_manager_sync_browser',
'components.gyp:precache_core',
'components.gyp:pref_registry_test_support',
'components.gyp:proxy_config',
'components.gyp:query_parser',
'components.gyp:rappor',
'components.gyp:rappor_test_support',
'components.gyp:search',
'components.gyp:search_engines',
'components.gyp:search_engines_test_support',
'components.gyp:search_provider_logos',
'components.gyp:security_state',
'components.gyp:sessions_test_support',
'components.gyp:signin_core_browser',
'components.gyp:signin_core_browser_test_support',
'components.gyp:ssl_config',
'components.gyp:ssl_errors',
'components.gyp:suggestions',
'components.gyp:supervised_user_error_page',
'components.gyp:sync_bookmarks',
'components.gyp:sync_driver',
'components.gyp:sync_driver_test_support',
'components.gyp:sync_sessions',
'components.gyp:sync_sessions_test_support',
'components.gyp:syncable_prefs_test_support',
'components.gyp:toolbar_test_support',
'components.gyp:translate_core_browser',
'components.gyp:translate_core_common',
'components.gyp:translate_core_language_detection',
'components.gyp:undo_component',
'components.gyp:update_client',
'components.gyp:update_client_test_support',
'components.gyp:upload_list',
'components.gyp:url_matcher',
'components.gyp:user_prefs_tracked',
'components.gyp:user_prefs_tracked_test_support',
'components.gyp:variations',
'components.gyp:variations_net',
'components.gyp:variations_service',
'components.gyp:version_info',
'components.gyp:webdata_services_test_support',
'components.gyp:web_resource',
'components.gyp:web_resource_test_support',
'components_resources.gyp:components_resources',
'components_strings.gyp:components_strings',
'components_tests_pak',
'link_header_util/link_header_util.gyp:link_header_util',
'mime_util/mime_util.gyp:mime_util',
'prefs/prefs.gyp:prefs',
'prefs/prefs.gyp:prefs_test_support',
'url_formatter/url_formatter.gyp:url_formatter',
],
'conditions': [
['OS!="mac" and OS!="ios"', {
'sources!': [
'crash/core/common/objc_zombie_unittest.mm',
],
}],
['enable_rlz_support==1', {
'sources': [
'<@(rlz_unittest_sources)',
],
'dependencies': [
'../net/net.gyp:net_test_support',
'../rlz/rlz.gyp:test_support_rlz',
'components.gyp:rlz',
],
'conditions': [
['OS == "ios"', {
'dependencies': [
'../ui/base/ui_base.gyp:ui_base',
],
}],
],
}],
['toolkit_views == 1', {
'sources': [
'bookmarks/browser/bookmark_node_data_unittest.cc',
'constrained_window/constrained_window_views_unittest.cc',
],
'dependencies': [
'<(DEPTH)/ui/views/views.gyp:views',
'<(DEPTH)/ui/views/views.gyp:views_test_support',
'components.gyp:constrained_window',
]
}],
['OS=="win"', {
'dependencies': [
'components.gyp:browser_watcher',
'components.gyp:browser_watcher_client',
]
}],
['OS != "ios"', {
'sources': [
'<@(certificate_transparency_unittest_sources)',
'<@(child_trace_message_filter_unittest_sources)',
'<@(devtools_http_handler_unittest_sources)',
'<@(display_compositor_unittest_sources)',
'<@(domain_reliability_unittest_sources)',
'<@(error_page_unittest_sources)',
'<@(guest_view_unittest_sources)',
'<@(navigation_interception_unittest_sources)',
'<@(network_hints_unittest_sources)',
'<@(packed_ct_ev_whitelist_unittest_sources)',
'<@(page_load_metrics_unittest_sources)',
'<@(power_unittest_sources)',
'<@(safe_browsing_db_unittest_sources)',
'<@(safe_json_unittest_sources)',
'<@(scheduler_unittest_sources)',
'<@(storage_monitor_unittest_sources)',
'<@(tracing_unittest_sources)',
'<@(ui_unittest_sources)',
'<@(visitedlink_unittest_sources)',
'<@(wallpaper_unittest_sources)',
'<@(web_cache_unittest_sources)',
'<@(webcrypto_unittest_sources)',
'<@(web_modal_unittest_sources)',
],
'dependencies': [
'../content/content_shell_and_tests.gyp:test_support_content',
'../skia/skia.gyp:skia',
'components.gyp:autofill_content_browser',
'components.gyp:autofill_content_renderer',
'components.gyp:autofill_content_test_support',
'components.gyp:certificate_transparency',
'components.gyp:crash_test_support',
'components.gyp:data_reduction_proxy_content_browser',
'components.gyp:data_use_measurement_content',
'components.gyp:devtools_http_handler',
'components.gyp:display_compositor',
'components.gyp:dom_distiller_content_browser',
'components.gyp:dom_distiller_content_renderer',
'components.gyp:domain_reliability',
'components.gyp:error_page_renderer',
'components.gyp:favicon_content',
'components.gyp:guest_view_browser',
'components.gyp:guest_view_common',
'components.gyp:guest_view_test_support',
'components.gyp:history_content_browser',
'components.gyp:keyed_service_content',
'components.gyp:navigation_interception',
'components.gyp:network_hints_renderer',
'components.gyp:metrics_gpu',
'components.gyp:packed_ct_ev_whitelist',
'components.gyp:page_load_metrics_browser',
'components.gyp:page_load_metrics_renderer',
'components.gyp:password_manager_content_browser',
'components.gyp:power',
'components.gyp:precache_content',
'components.gyp:safe_browsing_db',
'components.gyp:safe_json',
'components.gyp:safe_json_test_support',
'components.gyp:sessions_content',
'components.gyp:storage_monitor',
'components.gyp:storage_monitor_test_support',
'components.gyp:test_database_manager',
'components.gyp:ui_zoom',
'components.gyp:url_matcher',
'components.gyp:visitedlink_browser',
'components.gyp:visitedlink_renderer',
'components.gyp:wallpaper',
'components.gyp:web_cache_browser',
'components.gyp:web_modal',
'components.gyp:web_modal_test_support',
'scheduler/scheduler.gyp:scheduler',
'test_runner/test_runner.gyp:test_runner',
'tracing.gyp:tracing',
'webcrypto/webcrypto.gyp:webcrypto',
'../third_party/boringssl/boringssl.gyp:boringssl',
'../third_party/re2/re2.gyp:re2',
],
'conditions': [
['OS=="android"', {
'sources' : [
'<@(web_restrictions_unittest_sources)',
],
'dependencies': [
'components.gyp:web_restrictions_browser',
'components.gyp:web_restrictions_test_support',
'../build/android/ndk.gyp:cpu_features',
],
}],
['OS=="android" and configuration_policy == 1', {
'dependencies': [
'components.gyp:policy_java',
],
}],
['safe_browsing == 2 and OS != "ios"', {
'dependencies': [
'components.gyp:safe_browsing_db_mobile',
],
'sources': [
'<@(safe_browsing_db_mobile_unittest_sources)',
],
}],
['OS != "mac" and use_aura == 0', {
'sources!': [
'display_compositor/buffer_queue_unittest.cc',
],
}],
],
}, { # 'OS == "ios"'
'sources': [
'webp_transcode/webp_decoder_unittest.mm',
],
'sources/': [
# Exclude all tests that depends on //content (based on layered-
# component directory structure).
['exclude', '^[^/]*/content/'],
],
'mac_bundle_resources': [
'<(PRODUCT_DIR)/ui_test.pak',
],
'dependencies': [
'../ios/web/ios_web.gyp:ios_web_test_support',
'../third_party/ocmock/ocmock.gyp:ocmock',
'components.gyp:autofill_ios_browser',
'components.gyp:sessions_ios',
'components.gyp:signin_ios_browser',
'components.gyp:signin_ios_browser_test_support',
'components.gyp:translate_ios_browser',
'components.gyp:webp_transcode',
],
'actions': [
{
'action_name': 'copy_test_data',
'variables': {
'test_data_files': [
'../net/data/ssl/certificates',
'test/data',
],
'test_data_prefix': 'components',
},
'includes': [ '../build/copy_test_data_ios.gypi' ],
},
],
'conditions': [
['configuration_policy==1', {
'sources/': [
['include', '^policy/'],
],
}],
],
}],
['disable_nacl==0', {
'sources': [
'<@(nacl_unittest_sources)',
],
'dependencies': [
'nacl.gyp:nacl_browser',
'nacl.gyp:nacl_common',
],
}],
['OS == "mac"', {
'link_settings': {
'libraries': [
'$(SDKROOT)/System/Library/Frameworks/AddressBook.framework',
'$(SDKROOT)/System/Library/Frameworks/Foundation.framework',
'$(SDKROOT)/System/Library/Frameworks/ImageCaptureCore.framework',
],
},
'sources!': [
'password_manager/core/browser/password_store_default_unittest.cc',
],
}],
['OS == "android"', {
'sources': [
'data_usage/android/traffic_stats_amortizer_unittest.cc',
'invalidation/impl/invalidation_logger_unittest.cc',
'invalidation/impl/invalidation_service_android_unittest.cc',
],
'sources!': [
'gcm_driver/gcm_account_mapper_unittest.cc',
'gcm_driver/gcm_channel_status_request_unittest.cc',
'gcm_driver/gcm_client_impl_unittest.cc',
'gcm_driver/gcm_driver_desktop_unittest.cc',
'gcm_driver/gcm_stats_recorder_impl_unittest.cc',
'sessions/core/session_backend_unittest.cc',
'storage_monitor/media_storage_util_unittest.cc',
'storage_monitor/storage_info_unittest.cc',
'storage_monitor/storage_monitor_unittest.cc',
'web_modal/web_contents_modal_dialog_manager_unittest.cc',
],
'dependencies': [
'components.gyp:data_usage_android',
'components.gyp:safe_json_java',
'components.gyp:variations_java',
'../content/content.gyp:content_java',
'../testing/android/native_test.gyp:native_test_native_code',
],
'dependencies!': [
'components.gyp:storage_monitor',
'components.gyp:storage_monitor_test_support',
'components.gyp:web_modal',
'components.gyp:web_modal_test_support',
],
}, {
'sources': [
'<@(invalidation_unittest_sources)',
],
}],
['OS != "ios" and OS != "android"', {
'sources': [
'<@(audio_modem_unittest_sources)',
'<@(copresence_unittest_sources)',
'<@(feedback_unittest_sources)',
'<@(proximity_auth_unittest_sources)',
'<@(webusb_detector_unittest_sources)',
],
'sources!': [
'variations/variations_request_scheduler_mobile_unittest.cc',
'web_resource/promo_resource_service_mobile_ntp_unittest.cc',
],
'dependencies': [
'../device/bluetooth/bluetooth.gyp:device_bluetooth_mocks',
'../device/core/core.gyp:device_core',
'../device/core/core.gyp:device_core_mocks',
'../device/hid/hid.gyp:device_hid_mocks',
'../device/usb/usb.gyp:device_usb',
'../device/usb/usb.gyp:device_usb_mocks',
'../google_apis/google_apis.gyp:google_apis_test_support',
'../third_party/protobuf/protobuf.gyp:protobuf_lite',
'components.gyp:audio_modem',
'components.gyp:audio_modem_test_support',
'components.gyp:copresence',
'components.gyp:copresence_test_support',
'components.gyp:cryptauth',
'components.gyp:cryptauth_proto',
'components.gyp:cryptauth_test_support',
'components.gyp:feedback_component',
'components.gyp:pref_registry_test_support',
'components.gyp:proximity_auth',
'components.gyp:proximity_auth_test_support',
'components.gyp:webusb',
],
}],
['chromeos==1', {
'sources': [
'arc/arc_bridge_service_unittest.cc',
'arc/ime/arc_ime_service_unittest.cc',
'arc/intent_helper/activity_icon_loader_unittest.cc',
'arc/intent_helper/font_size_util_unittest.cc',
'pairing/message_buffer_unittest.cc',
'timers/alarm_timer_unittest.cc',
'wifi_sync/wifi_config_delegate_chromeos_unittest.cc',
'wifi_sync/wifi_credential_syncable_service_unittest.cc',
'wifi_sync/wifi_credential_unittest.cc',
'wifi_sync/wifi_security_class_chromeos_unittest.cc',
'wifi_sync/wifi_security_class_unittest.cc',
'<@(metrics_leak_detector_unittest_sources)',
'<@(ownership_unittest_sources)',
'<@(user_manager_unittest_sources)',
],
'sources!': [
'signin/core/browser/signin_status_metrics_provider_unittest.cc',
'storage_monitor/storage_monitor_linux_unittest.cc',
],
'dependencies': [
'../chromeos/chromeos.gyp:chromeos_test_support',
'components.gyp:arc',
'components.gyp:arc_test_support',
'components.gyp:metrics_leak_detector',
'components.gyp:ownership',
'components.gyp:pairing',
'components.gyp:user_manager_test_support',
'components.gyp:wifi_sync',
],
}],
['OS=="linux"', {
'sources': [
'metrics/serialization/serialization_utils_unittest.cc',
],
'dependencies': [
'components.gyp:metrics_serialization',
'../dbus/dbus.gyp:dbus',
'../device/media_transfer_protocol/media_transfer_protocol.gyp:device_media_transfer_protocol',
],
}],
['OS=="linux" and use_udev==0', {
'dependencies!': [
'components.gyp:storage_monitor',
'components.gyp:storage_monitor_test_support',
],
'sources/': [
['exclude', '^storage_monitor/'],
],
}],
['configuration_policy==1', {
'dependencies': [
'components.gyp:policy_component',
'components.gyp:policy_component_test_support',
'components.gyp:policy_test_support',
],
'sources': [
'<@(policy_unittest_sources)',
'sync_driver/sync_policy_handler_unittest.cc',
],
'conditions': [
['OS=="android"', {
'sources/': [
['exclude', '^policy/core/common/async_policy_provider_unittest\\.cc'],
['exclude', '^tracing/trace_config_file_unittest\\.cc'],
],
}],
['OS=="android" or OS=="ios"', {
# Note: 'sources!' is processed before any 'sources/', so the
# ['include', '^policy/'] on iOS above will include all of the
# policy source files again. Using 'source/' here too will get
# these files excluded as expected.
'sources/': [
['exclude', '^policy/core/common/cloud/component_cloud_policy_service_unittest\\.cc'],
['exclude', '^policy/core/common/cloud/component_cloud_policy_store_unittest\\.cc'],
['exclude', '^policy/core/common/cloud/component_cloud_policy_updater_unittest\\.cc'],
['exclude', '^policy/core/common/cloud/external_policy_data_fetcher_unittest\\.cc'],
['exclude', '^policy/core/common/cloud/external_policy_data_updater_unittest\\.cc'],
['exclude', '^policy/core/common/cloud/resource_cache_unittest\\.cc'],
['exclude', '^policy/core/common/config_dir_policy_loader_unittest\\.cc'],
],
}],
['chromeos==1', {
'sources': [
'policy/core/common/proxy_policy_provider_unittest.cc',
],
'sources!': [
'policy/core/common/cloud/user_cloud_policy_manager_unittest.cc',
'policy/core/common/cloud/user_cloud_policy_store_unittest.cc',
],
}],
['OS=="ios" or OS=="mac"', {
'sources': [
'policy/core/common/mac_util_unittest.cc',
],
}],
],
}, { # configuration_policy!=1
'sources!': [
'search_engines/default_search_policy_handler_unittest.cc',
'sync_driver/sync_policy_handler_unittest.cc',
],
}],
['enable_plugins == 1', {
'sources': [
'content_settings/core/browser/content_settings_provider_unittest.cc',
],
}],
],
# TODO(jschuh): crbug.com/167187 fix size_t to int truncations.
'msvs_disabled_warnings': [4267, ],
},
],
'conditions': [
['OS == "android"', {
'variables': {
'components_browsertests_pak_input_resources': [
'<(PRODUCT_DIR)/components_tests_resources.pak',
'<(PRODUCT_DIR)/content_shell/assets/content_shell.pak',
],
'conditions': [
['icu_use_data_file_flag==1', {
'components_browsertests_pak_input_resources': [
'<(PRODUCT_DIR)/icudtl.dat',
],
}],
],
},
'includes': ['../build/android/v8_external_startup_data_arch_suffix.gypi'],
'targets': [
{
'target_name': 'components_browsertests_paks_copy',
'type': 'none',
'dependencies': [
'components_browsertests',
],
'variables': {
'dest_path': '<(PRODUCT_DIR)/components_browsertests_apk_shell/assets',
'src_files': [
'<@(components_browsertests_pak_input_resources)',
],
'clear': 1,
'conditions': [
['v8_use_external_startup_data==1', {
'renaming_sources': [
'<(PRODUCT_DIR)/natives_blob.bin',
'<(PRODUCT_DIR)/snapshot_blob.bin',
],
'renaming_destinations': [
'natives_blob_<(arch_suffix).bin',
'snapshot_blob_<(arch_suffix).bin',
],
}],
],
},
'includes': ['../build/android/copy_ex.gypi'],
},
{
'target_name': 'components_browsertests_manifest',
'type': 'none',
'variables': {
'jinja_inputs': ['test/android/browsertests_apk/AndroidManifest.xml.jinja2'],
'jinja_output': '<(SHARED_INTERMEDIATE_DIR)/components_browsertests_manifest/AndroidManifest.xml',
},
'includes': [ '../build/android/jinja_template.gypi' ],
},
{
# GN: //components:components_browsertests_apk
'target_name': 'components_browsertests_apk',
'type': 'none',
'dependencies': [
'../content/content.gyp:content_java',
'../content/content_shell_and_tests.gyp:content_java_test_support',
'../content/content_shell_and_tests.gyp:content_shell_browsertests_java',
'../content/content_shell_and_tests.gyp:content_shell_java',
'components_browsertests_paks_copy',
'components_browsertests',
],
'variables': {
'test_suite_name': 'components_browsertests',
'isolate_file': 'components_browsertests.isolate',
'java_in_dir': 'test/android/browsertests_apk',
'android_manifest_path': '<(SHARED_INTERMEDIATE_DIR)/components_browsertests_manifest/AndroidManifest.xml',
'resource_dir': 'test/android/browsertests_apk/res',
'asset_location': '<(PRODUCT_DIR)/components_browsertests_apk_shell/assets',
'conditions': [
['icu_use_data_file_flag==1', {
'additional_input_paths': [
'<(asset_location)/icudtl.dat',
],
}],
['v8_use_external_startup_data==1', {
'additional_input_paths': [
'<(asset_location)/natives_blob_<(arch_suffix).bin',
'<(asset_location)/snapshot_blob_<(arch_suffix).bin',
],
}],
],
},
'includes': [ '../build/apk_browsertest.gypi' ],
},
{
'target_name': 'components_unittests_apk',
'isolate_file': 'components_unittests.isolate',
'type': 'none',
'dependencies': [
'components_unittests',
'components.gyp:instance_id_driver_java',
'components.gyp:instance_id_driver_test_support_java',
'components.gyp:invalidation_java',
'components.gyp:signin_core_browser_java',
'components.gyp:web_restrictions_test_support_java',
],
'variables': {
'test_suite_name': 'components_unittests',
},
'includes': [ '../build/apk_test.gypi' ],
},
{
'target_name': 'components_junit_tests',
'type': 'none',
'dependencies': [
'components.gyp:invalidation_java',
'components.gyp:policy_java',
'components.gyp:policy_java_test_support',
'components.gyp:web_restrictions_java',
'../base/base.gyp:base_java',
'../base/base.gyp:base_java_test_support',
'../testing/android/junit/junit_test.gyp:junit_test_support',
],
'variables': {
'main_class': 'org.chromium.testing.local.JunitTestMain',
'src_paths': [
'invalidation/impl/android/junit/',
'policy/android/junit/',
'web_restrictions/browser/junit/'
],
'wrapper_script_name': 'helper/<(_target_name)',
},
'includes': [ '../build/host_jar.gypi' ],
},
],
'conditions': [
['test_isolation_mode != "noop"',
{
'targets': [
{
'target_name': 'components_browsertests_apk_run',
'type': 'none',
'dependencies': [
'components_browsertests_apk',
],
'includes': [
'../build/isolate.gypi',
],
'sources': [
'components_browsertests_apk.isolate',
],
},
{
'target_name': 'components_unittests_apk_run',
'type': 'none',
'dependencies': [
'components_unittests_apk',
],
'includes': [
'../build/isolate.gypi',
],
'sources': [
'components_unittests_apk.isolate',
],
},
],
},
],
],
}],
['OS != "ios"', {
'targets': [
{
# GN: //components:components_perftests
'target_name': 'components_perftests',
'type': '<(gtest_target_type)',
'dependencies': [
'../base/base.gyp:base',
'../base/base.gyp:test_support_perf',
'../content/content_shell_and_tests.gyp:test_support_content',
'../testing/gtest.gyp:gtest',
'../testing/perf/perf_test.gyp:perf_test',
'components.gyp:visitedlink_browser',
'scheduler/scheduler.gyp:scheduler',
],
'include_dirs': [
'..',
],
'sources': [
'scheduler/base/task_queue_manager_delegate_for_test.cc',
'scheduler/base/task_queue_manager_delegate_for_test.h',
'scheduler/base/task_queue_manager_perftest.cc',
'visitedlink/test/visitedlink_perftest.cc',
],
'conditions': [
['OS == "android"', {
'dependencies': [
'../testing/android/native_test.gyp:native_test_native_code',
],
}],
],
# TODO(jschuh): crbug.com/167187 fix size_t to int truncations.
'msvs_disabled_warnings': [ 4267, ],
},
{
'target_name': 'components_browsertests',
'type': '<(gtest_target_type)',
'defines!': ['CONTENT_IMPLEMENTATION'],
'dependencies': [
'../content/content.gyp:content_common',
'../content/content.gyp:content_gpu',
'../content/content.gyp:content_renderer',
'../content/content_shell_and_tests.gyp:content_browser_test_base',
'../content/content_shell_and_tests.gyp:content_browser_test_support',
'../content/content_shell_and_tests.gyp:content_shell_lib',
'../content/content_shell_and_tests.gyp:content_shell_pak',
'../content/content_shell_and_tests.gyp:test_support_content',
'../skia/skia.gyp:skia',
'../testing/gmock.gyp:gmock',
'../testing/gtest.gyp:gtest',
'components.gyp:autofill_content_browser',
'components.gyp:autofill_content_renderer',
'components.gyp:content_settings_core_browser',
'components.gyp:content_settings_core_common',
'components.gyp:dom_distiller_content_browser',
'components.gyp:dom_distiller_content_renderer',
'components.gyp:dom_distiller_core',
'components.gyp:password_manager_content_renderer',
'components.gyp:pref_registry_test_support',
'components_resources.gyp:components_resources',
'components_strings.gyp:components_strings',
'components_tests_pak',
'tracing.gyp:tracing',
],
'include_dirs': [
'..',
],
'defines': [
'HAS_OUT_OF_PROC_TEST_RUNNER',
],
'sources': [
# Note: test list duplicated in GN build.
'autofill/content/browser/risk/fingerprint_browsertest.cc',
'autofill/content/renderer/password_form_conversion_utils_browsertest.cc',
'dom_distiller/content/browser/distillable_page_utils_browsertest.cc',
'dom_distiller/content/browser/distiller_page_web_contents_browsertest.cc',
'dom_distiller/content/browser/test/dom_distiller_js_browsertest.cc',
'password_manager/content/renderer/credential_manager_client_browsertest.cc',
'tracing/child_trace_message_filter_browsertest.cc',
],
'conditions': [
['OS == "android"', {
'sources' : [
'test/android/browsertests_apk/components_browser_tests_jni_onload.cc',
],
'sources!': [
'autofill/content/browser/risk/fingerprint_browsertest.cc',
],
'dependencies': [
'../testing/android/native_test.gyp:native_test_support',
],
}],
['OS == "linux"', {
'sources': [
# content_extractor_browsertest is a standalone content extraction tool built as
# a MANUAL component_browsertest.
'dom_distiller/standalone/content_extractor_browsertest.cc',
],
}],
['OS=="win"', {
'resource_include_dirs': [
'<(SHARED_INTERMEDIATE_DIR)/content/app/resources',
],
'sources': [
'../content/shell/app/resource.h',
'../content/shell/app/shell.rc',
],
'dependencies': [
'<(DEPTH)/content/app/resources/content_resources.gyp:content_resources',
'<(DEPTH)/content/app/strings/content_strings.gyp:content_strings',
'<(DEPTH)/net/net.gyp:net_resources',
'<(DEPTH)/third_party/WebKit/public/blink_resources.gyp:blink_resources',
'<(DEPTH)/third_party/iaccessible2/iaccessible2.gyp:iaccessible2',
'<(DEPTH)/third_party/isimpledom/isimpledom.gyp:isimpledom',
],
'configurations': {
'Debug_Base': {
'msvs_settings': {
'VCLinkerTool': {
'LinkIncremental': '<(msvs_large_module_debug_link_mode)',
},
},
},
},
# TODO(jschuh): crbug.com/167187 fix size_t to int truncations.
'msvs_disabled_warnings': [ 4267, ],
}],
['OS=="mac"', {
'dependencies': [
'../content/content_shell_and_tests.gyp:content_shell', # Needed for Content Shell.app's Helper.
],
}],
['enable_basic_printing==1 or enable_print_preview==1', {
'dependencies': [
'components.gyp:printing_test_support',
],
'sources' : [
'printing/test/print_web_view_helper_browsertest.cc',
],
}]
],
},
],
'conditions': [
['test_isolation_mode != "noop"', {
'targets': [
{
'target_name': 'components_browsertests_run',
'type': 'none',
'dependencies': [ 'components_browsertests' ],
'includes': [
'../build/isolate.gypi',
],
'sources': [
'components_browsertests.isolate',
],
'conditions': [
['use_x11==1', {
'dependencies': [
'../tools/xdisplaycheck/xdisplaycheck.gyp:xdisplaycheck',
],
}],
],
},
],
}],
],
}],
['test_isolation_mode != "noop"', {
'targets': [
{
| |
E501
collection_formats = {}
path_params = {}
query_params = []
if 'execution_id' in params:
query_params.append(('executionId', params['execution_id'])) # noqa: E501
if 'requisite_stage_ref_ids' in params:
query_params.append(('requisiteStageRefIds', params['requisite_stage_ref_ids'])) # noqa: E501
if 'spel_version' in params:
query_params.append(('spelVersion', params['spel_version'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'expressions' in params:
body_params = params['expressions']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['*/*']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = [] # noqa: E501
return self.api_client.call_api(
'/pipelines/{id}/evaluateVariables', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='dict(str, object)', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def get_pipeline_using_get(self, id, **kwargs): # noqa: E501
"""Retrieve a pipeline execution # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_pipeline_using_get(id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str id: id (required)
:return: object
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.get_pipeline_using_get_with_http_info(id, **kwargs) # noqa: E501
else:
(data) = self.get_pipeline_using_get_with_http_info(id, **kwargs) # noqa: E501
return data
def get_pipeline_using_get_with_http_info(self, id, **kwargs): # noqa: E501
"""Retrieve a pipeline execution # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_pipeline_using_get_with_http_info(id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str id: id (required)
:return: object
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['id'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_pipeline_using_get" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'id' is set
if ('id' not in params or
params['id'] is None):
raise ValueError("Missing the required parameter `id` when calling `get_pipeline_using_get`") # noqa: E501
collection_formats = {}
path_params = {}
if 'id' in params:
path_params['id'] = params['id'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['*/*']) # noqa: E501
# Authentication setting
auth_settings = [] # noqa: E501
return self.api_client.call_api(
'/pipelines/{id}', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='object', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def invoke_pipeline_config_using_post1(self, application, pipeline_name_or_id, **kwargs): # noqa: E501
"""Trigger a pipeline execution # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.invoke_pipeline_config_using_post1(application, pipeline_name_or_id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str application: application (required)
:param str pipeline_name_or_id: pipelineNameOrId (required)
:param object trigger: trigger
:return: None
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.invoke_pipeline_config_using_post1_with_http_info(application, pipeline_name_or_id, **kwargs) # noqa: E501
else:
(data) = self.invoke_pipeline_config_using_post1_with_http_info(application, pipeline_name_or_id, **kwargs) # noqa: E501
return data
def invoke_pipeline_config_using_post1_with_http_info(self, application, pipeline_name_or_id, **kwargs): # noqa: E501
"""Trigger a pipeline execution # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.invoke_pipeline_config_using_post1_with_http_info(application, pipeline_name_or_id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str application: application (required)
:param str pipeline_name_or_id: pipelineNameOrId (required)
:param object trigger: trigger
:return: None
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['application', 'pipeline_name_or_id', 'trigger'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method invoke_pipeline_config_using_post1" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'application' is set
if ('application' not in params or
params['application'] is None):
raise ValueError("Missing the required parameter `application` when calling `invoke_pipeline_config_using_post1`") # noqa: E501
# verify the required parameter 'pipeline_name_or_id' is set
if ('pipeline_name_or_id' not in params or
params['pipeline_name_or_id'] is None):
raise ValueError("Missing the required parameter `pipeline_name_or_id` when calling `invoke_pipeline_config_using_post1`") # noqa: E501
collection_formats = {}
path_params = {}
if 'application' in params:
path_params['application'] = params['application'] # noqa: E501
if 'pipeline_name_or_id' in params:
path_params['pipelineNameOrId'] = params['pipeline_name_or_id'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'trigger' in params:
body_params = params['trigger']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['*/*']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = [] # noqa: E501
return self.api_client.call_api(
'/pipelines/{application}/{pipelineNameOrId}', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type=None, # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def invoke_pipeline_config_via_echo_using_post(self, application, pipeline_name_or_id, **kwargs): # noqa: E501
"""Trigger a pipeline execution # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.invoke_pipeline_config_via_echo_using_post(application, pipeline_name_or_id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str application: application (required)
:param str pipeline_name_or_id: pipelineNameOrId (required)
:param object trigger: trigger
:return: object
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.invoke_pipeline_config_via_echo_using_post_with_http_info(application, pipeline_name_or_id, **kwargs) # noqa: E501
else:
(data) = self.invoke_pipeline_config_via_echo_using_post_with_http_info(application, pipeline_name_or_id, **kwargs) # noqa: E501
return data
def invoke_pipeline_config_via_echo_using_post_with_http_info(self, application, pipeline_name_or_id, **kwargs): # noqa: E501
"""Trigger a pipeline execution # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.invoke_pipeline_config_via_echo_using_post_with_http_info(application, pipeline_name_or_id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str application: application (required)
:param str pipeline_name_or_id: pipelineNameOrId (required)
:param object trigger: trigger
:return: object
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['application', 'pipeline_name_or_id', 'trigger'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method invoke_pipeline_config_via_echo_using_post" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'application' is set
if ('application' not in params or
params['application'] is None):
raise ValueError("Missing the required parameter `application` when calling `invoke_pipeline_config_via_echo_using_post`") # noqa: E501
# verify the required parameter 'pipeline_name_or_id' is set
if ('pipeline_name_or_id' not in params or
params['pipeline_name_or_id'] is None):
raise ValueError("Missing the required parameter `pipeline_name_or_id` when calling `invoke_pipeline_config_via_echo_using_post`") # noqa: E501
collection_formats = {}
path_params = {}
if 'application' in params:
path_params['application'] = params['application'] # noqa: E501
if 'pipeline_name_or_id' in params:
path_params['pipelineNameOrId'] = params['pipeline_name_or_id'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'trigger' in params:
body_params = params['trigger']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['*/*']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = [] # noqa: E501
return self.api_client.call_api(
'/pipelines/v2/{application}/{pipelineNameOrId}', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='object', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def pause_pipeline_using_put(self, id, **kwargs): # noqa: E501
"""Pause a pipeline execution # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.pause_pipeline_using_put(id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str id: id (required)
:return: None
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.pause_pipeline_using_put_with_http_info(id, **kwargs) # noqa: E501
else:
(data) = self.pause_pipeline_using_put_with_http_info(id, **kwargs) # noqa: E501
return data
def pause_pipeline_using_put_with_http_info(self, id, **kwargs): # noqa: E501
"""Pause a pipeline execution # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.pause_pipeline_using_put_with_http_info(id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str | |
storm objects.
"""
if min_size_pixels == 0:
return local_max_dict
num_grid_cells_by_polygon = numpy.array(
[len(r) for r in local_max_dict[temporal_tracking.GRID_POINT_ROWS_KEY]],
dtype=int
)
indices_to_keep = numpy.where(
num_grid_cells_by_polygon >= min_size_pixels
)[0]
for this_key in local_max_dict:
if isinstance(local_max_dict[this_key], list):
local_max_dict[this_key] = [
local_max_dict[this_key][k] for k in indices_to_keep
]
elif isinstance(local_max_dict[this_key], numpy.ndarray):
local_max_dict[this_key] = local_max_dict[this_key][indices_to_keep]
return local_max_dict
def _write_new_tracks(storm_object_table, top_output_dir_name,
valid_times_unix_sec):
"""Writes tracking files (one Pickle file per time step).
These files are the main output of both `run_tracking` and
`reanalyze_across_spc_dates`.
:param storm_object_table: See doc for `storm_tracking_io.write_file`.
:param top_output_dir_name: Name of top-level directory. File locations
therein will be determined by `storm_tracking_io.find_file`.
:param valid_times_unix_sec: 1-D numpy array of valid times. One file will
be written for each.
"""
for this_time_unix_sec in valid_times_unix_sec:
this_file_name = tracking_io.find_file(
top_tracking_dir_name=top_output_dir_name,
valid_time_unix_sec=this_time_unix_sec,
spc_date_string=time_conversion.time_to_spc_date_string(
this_time_unix_sec),
tracking_scale_metres2=DUMMY_TRACKING_SCALE_METRES2,
source_name=tracking_utils.SEGMOTION_NAME,
raise_error_if_missing=False)
print('Writing new data to: "{0:s}"...'.format(this_file_name))
tracking_io.write_file(
storm_object_table=storm_object_table.loc[
storm_object_table[tracking_utils.VALID_TIME_COLUMN] ==
this_time_unix_sec
],
pickle_file_name=this_file_name
)
def _velocities_latlng_to_xy(
east_velocities_m_s01, north_velocities_m_s01, latitudes_deg,
longitudes_deg):
"""Converts velocities from lat-long components to x-y components.
P = number of velocities
:param east_velocities_m_s01: length-P numpy array of eastward instantaneous
velocities (metres per second).
:param north_velocities_m_s01: length-P numpy array of northward
instantaneous velocities (metres per second).
:param latitudes_deg: length-P numpy array of current latitudes (deg N).
:param longitudes_deg: length-P numpy array of current longitudes (deg E).
:return: x_velocities_m_s01: length-P numpy of x-velocities (metres per
second in positive x-direction).
:return: y_velocities_m_s01: Same but for y-direction.
"""
projection_object = projections.init_azimuthal_equidistant_projection(
central_latitude_deg=CENTRAL_PROJ_LATITUDE_DEG,
central_longitude_deg=CENTRAL_PROJ_LONGITUDE_DEG)
scalar_displacements_metres = numpy.sqrt(
east_velocities_m_s01 ** 2 + north_velocities_m_s01 ** 2)
standard_bearings_deg = RADIANS_TO_DEGREES * numpy.arctan2(
north_velocities_m_s01, east_velocities_m_s01)
geodetic_bearings_deg = geodetic_utils.standard_to_geodetic_angles(
standard_bearings_deg)
new_latitudes_deg, new_longitudes_deg = (
geodetic_utils.start_points_and_displacements_to_endpoints(
start_latitudes_deg=latitudes_deg,
start_longitudes_deg=longitudes_deg,
scalar_displacements_metres=scalar_displacements_metres,
geodetic_bearings_deg=geodetic_bearings_deg)
)
x_coords_metres, y_coords_metres = projections.project_latlng_to_xy(
latitudes_deg=latitudes_deg, longitudes_deg=longitudes_deg,
projection_object=projection_object, false_easting_metres=0.,
false_northing_metres=0.)
new_x_coords_metres, new_y_coords_metres = projections.project_latlng_to_xy(
latitudes_deg=new_latitudes_deg, longitudes_deg=new_longitudes_deg,
projection_object=projection_object, false_easting_metres=0.,
false_northing_metres=0.)
return (new_x_coords_metres - x_coords_metres,
new_y_coords_metres - y_coords_metres)
def _storm_objects_latlng_to_xy(storm_object_table):
"""Converts centroids and velocities from lat-long to x-y coordinates.
:param storm_object_table: See doc for `storm_tracking_io.write_file`.
:return: storm_object_table: Same as input but with the following columns.
storm_object_table.centroid_x_metres: x-coordinate of storm-object centroid.
storm_object_table.centroid_y_metres: y-coordinate of storm-object centroid.
storm_object_table.x_velocity_m_s01: Velocity in +x-direction (metres per
second).
storm_object_table.y_velocity_m_s01: Velocity in +y-direction (metres per
second).
"""
projection_object = projections.init_azimuthal_equidistant_projection(
central_latitude_deg=CENTRAL_PROJ_LATITUDE_DEG,
central_longitude_deg=CENTRAL_PROJ_LONGITUDE_DEG)
centroid_x_coords_metres, centroid_y_coords_metres = (
projections.project_latlng_to_xy(
latitudes_deg=storm_object_table[
tracking_utils.CENTROID_LATITUDE_COLUMN].values,
longitudes_deg=storm_object_table[
tracking_utils.CENTROID_LONGITUDE_COLUMN].values,
projection_object=projection_object,
false_easting_metres=0., false_northing_metres=0.)
)
x_velocities_m_s01, y_velocities_m_s01 = _velocities_latlng_to_xy(
east_velocities_m_s01=storm_object_table[
tracking_utils.EAST_VELOCITY_COLUMN].values,
north_velocities_m_s01=storm_object_table[
tracking_utils.NORTH_VELOCITY_COLUMN].values,
latitudes_deg=storm_object_table[
tracking_utils.CENTROID_LATITUDE_COLUMN].values,
longitudes_deg=storm_object_table[
tracking_utils.CENTROID_LONGITUDE_COLUMN].values
)
return storm_object_table.assign(**{
temporal_tracking.CENTROID_X_COLUMN: centroid_x_coords_metres,
temporal_tracking.CENTROID_Y_COLUMN: centroid_y_coords_metres,
temporal_tracking.X_VELOCITY_COLUMN: x_velocities_m_s01,
temporal_tracking.Y_VELOCITY_COLUMN: y_velocities_m_s01
})
def _shuffle_tracking_data(
storm_object_table_by_date, tracking_file_names_by_date,
valid_times_by_date_unix_sec, current_date_index, top_output_dir_name):
"""Shuffles data into and out of memory.
T = number of SPC dates
:param storm_object_table_by_date: length-T list of pandas DataFrames. If
data for the [i]th date are currently out of memory,
storm_object_table_by_date[i] = None. If data for the [i]th date are
currently in memory, storm_object_table_by_date[i] has columns listed in
`storm_tracking_io.write_file`.
:param tracking_file_names_by_date: See doc for
`_find_input_tracking_files`.
:param valid_times_by_date_unix_sec: Same.
:param current_date_index: Index of date currently being processed. Must be
in range 0...(T - 1).
:param top_output_dir_name: Name of top-level output directory. See doc for
`_write_new_tracks`.
:return: storm_object_table_by_date: Same as input, except that different
items are in memory.
"""
num_spc_dates = len(tracking_file_names_by_date)
# Shuffle data out of memory.
if current_date_index == num_spc_dates:
for j in [num_spc_dates - 2, num_spc_dates - 1]:
if j < 0:
continue
_write_new_tracks(
storm_object_table=storm_object_table_by_date[j],
top_output_dir_name=top_output_dir_name,
valid_times_unix_sec=valid_times_by_date_unix_sec[j]
)
print('\n')
storm_object_table_by_date[j] = pandas.DataFrame()
return storm_object_table_by_date
if current_date_index >= 2:
_write_new_tracks(
storm_object_table=storm_object_table_by_date[
current_date_index - 2],
top_output_dir_name=top_output_dir_name,
valid_times_unix_sec=valid_times_by_date_unix_sec[
current_date_index - 2]
)
print('\n')
storm_object_table_by_date[current_date_index - 2] = pandas.DataFrame()
# Shuffle data into memory.
these_indices = numpy.linspace(
current_date_index - 1, current_date_index + 2, num=4, dtype=int)
for j in these_indices:
if j < 0 or j >= num_spc_dates:
continue
if storm_object_table_by_date[j] is not None:
continue
storm_object_table_by_date[j] = tracking_io.read_many_files(
tracking_file_names_by_date[j]
)
print('\n')
storm_object_table_by_date[j] = _storm_objects_latlng_to_xy(
storm_object_table_by_date[j]
)
return storm_object_table_by_date
def _radar_times_to_tracking_periods(
radar_times_unix_sec, max_time_interval_sec):
"""Converts radar times to effective start/end times for tracking.
When there is a gap of > `max_time_interval_sec` between successive radar
times t_0 and t_1, tracking effectively ends at t_0 and then restarts at
t_1.
T = number of effective tracking periods
:param radar_times_unix_sec: 1-D numpy array of radar times.
:param max_time_interval_sec: Max time interval between successive radar
times.
:return: tracking_start_times_unix_sec: length-T numpy array of start times.
:return: tracking_end_times_unix_sec: length-T numpy array of end times.
"""
radar_time_diffs_sec = numpy.diff(radar_times_unix_sec)
num_radar_times = len(radar_times_unix_sec)
gap_indices = numpy.where(radar_time_diffs_sec > max_time_interval_sec)[0]
tracking_start_indices = numpy.concatenate((
numpy.array([0], dtype=int),
gap_indices + 1
))
tracking_end_indices = numpy.concatenate((
gap_indices,
numpy.array([num_radar_times - 1], dtype=int)
))
tracking_start_times_unix_sec = radar_times_unix_sec[
numpy.unique(tracking_start_indices)
]
tracking_end_times_unix_sec = radar_times_unix_sec[
numpy.unique(tracking_end_indices)
]
tracking_start_time_strings = [
time_conversion.unix_sec_to_string(t, TIME_FORMAT)
for t in tracking_start_times_unix_sec
]
tracking_end_time_strings = [
time_conversion.unix_sec_to_string(t, TIME_FORMAT)
for t in tracking_end_times_unix_sec
]
print('\n')
for k in range(len(tracking_start_time_strings)):
print('{0:d}th tracking period = {1:s} to {2:s}'.format(
k + 1, tracking_start_time_strings[k],
tracking_end_time_strings[k]
))
print('\n')
return tracking_start_times_unix_sec, tracking_end_times_unix_sec
def _read_tracking_periods(tracking_file_names):
"""Reads tracking periods from files.
T = number of tracking periods
:param tracking_file_names: 1-D list of paths to input files (will be read
by `storm_tracking_io.read_file`).
:return: tracking_start_times_unix_sec: length-T numpy array of start times.
:return: tracking_end_times_unix_sec: length-T numpy array of end times.
"""
tracking_start_times_unix_sec = numpy.array([], dtype=int)
tracking_end_times_unix_sec = numpy.array([], dtype=int)
for this_file_name in tracking_file_names:
print('Reading tracking periods from: "{0:s}"...'.format(this_file_name))
this_storm_object_table = tracking_io.read_file(this_file_name)
these_start_times_unix_sec = numpy.unique(
this_storm_object_table[
tracking_utils.TRACKING_START_TIME_COLUMN].values
)
these_end_times_unix_sec = numpy.unique(
this_storm_object_table[
tracking_utils.TRACKING_END_TIME_COLUMN].values
)
tracking_start_times_unix_sec = numpy.concatenate((
tracking_start_times_unix_sec, these_start_times_unix_sec
))
tracking_end_times_unix_sec = numpy.concatenate((
tracking_end_times_unix_sec, these_end_times_unix_sec
))
return (
numpy.unique(tracking_start_times_unix_sec),
numpy.unique(tracking_end_times_unix_sec)
)
def _old_to_new_tracking_periods(
tracking_start_times_unix_sec, tracking_end_times_unix_sec,
max_time_interval_sec):
"""Converts old tracking periods to new tracking periods.
N = number of original tracking periods
n = number of final tracking periods
:param tracking_start_times_unix_sec: length-N numpy array with start times
of original periods.
:param tracking_end_times_unix_sec: length-N numpy array with end times
of original periods.
:param max_time_interval_sec: Max time interval between successive local
maxima (storm objects). If successive local maxima are >
`max_time_interval_sec` apart, they cannot be linked. This is the max
time interval for the final tracking periods, and it may be different
than max interval for the original periods.
:return: tracking_start_times_unix_sec: length-n numpy array with start
times of final tracking periods.
:return: tracking_end_times_unix_sec: length-n numpy array with end times of
final tracking periods.
"""
tracking_start_times_unix_sec, tracking_end_times_unix_sec = (
temporal_tracking.check_tracking_periods(
tracking_start_times_unix_sec=tracking_start_times_unix_sec,
tracking_end_times_unix_sec=tracking_end_times_unix_sec)
)
interperiod_diffs_sec = (
tracking_start_times_unix_sec[1:] - tracking_end_times_unix_sec[:-1]
)
tracking_start_time_strings = [
time_conversion.unix_sec_to_string(t, TIME_FORMAT)
for t in tracking_start_times_unix_sec
]
tracking_end_time_strings = [
time_conversion.unix_sec_to_string(t, TIME_FORMAT)
for t in tracking_end_times_unix_sec
]
print('\n')
for k in range(len(tracking_start_time_strings)):
this_message_string = (
'{0:d}th original tracking period = {1:s} to {2:s}'
).format(
k + 1, tracking_start_time_strings[k],
tracking_end_time_strings[k]
)
if k == 0:
print(this_message_string)
continue
this_message_string += (
' ... gap between this and previous period = {0:d} seconds'
).format(interperiod_diffs_sec[k - 1])
print(this_message_string)
bad_indices = numpy.where(interperiod_diffs_sec <= max_time_interval_sec)[0]
tracking_start_times_unix_sec = numpy.delete(
tracking_start_times_unix_sec, bad_indices + 1
)
tracking_end_times_unix_sec = numpy.delete(
tracking_end_times_unix_sec, bad_indices
)
tracking_start_time_strings = [
time_conversion.unix_sec_to_string(t, TIME_FORMAT)
for t in tracking_start_times_unix_sec
]
tracking_end_time_strings = [
time_conversion.unix_sec_to_string(t, TIME_FORMAT)
for t in tracking_end_times_unix_sec
]
print('\n')
for k in range(len(tracking_start_time_strings)):
print('{0:d}th final tracking period = {1:s} to {2:s}'.format(
k + 1, tracking_start_time_strings[k],
tracking_end_time_strings[k]
))
print('\n')
return tracking_start_times_unix_sec, tracking_end_times_unix_sec
def run_tracking(
top_radar_dir_name, top_output_dir_name, first_spc_date_string,
last_spc_date_string, first_time_unix_sec=None, last_time_unix_sec=None,
first_numeric_id=None,
echo_top_field_name=radar_utils.ECHO_TOP_40DBZ_NAME,
radar_source_name=radar_utils.MYRORSS_SOURCE_ID,
top_echo_classifn_dir_name=None,
min_echo_top_km=DEFAULT_MIN_ECHO_TOP_KM,
smoothing_radius_deg_lat=DEFAULT_SMOOTHING_RADIUS_DEG_LAT,
half_width_for_max_filter_deg_lat=
DEFAULT_HALF_WIDTH_FOR_MAX_FILTER_DEG_LAT,
min_intermax_distance_metres=DEFAULT_MIN_INTERMAX_DISTANCE_METRES,
min_polygon_size_pixels=DEFAULT_MIN_SIZE_PIXELS,
max_link_time_seconds=DEFAULT_MAX_LINK_TIME_SECONDS,
max_velocity_diff_m_s01=DEFAULT_MAX_VELOCITY_DIFF_M_S01,
max_link_distance_m_s01=DEFAULT_MAX_LINK_DISTANCE_M_S01,
min_track_duration_seconds=0, recompute_centroids=True):
"""Runs echo-top-tracking. This is effectively the main method.
:param top_radar_dir_name: See doc for `_find_input_radar_files`.
:param top_output_dir_name: See doc for `_write_new_tracks`.
:param first_spc_date_string: See doc for `_check_time_period`.
:param last_spc_date_string: Same.
:param first_time_unix_sec: Same.
:param last_time_unix_sec: Same.
:param first_numeric_id: First numeric storm ID. Both primary and secondary
IDs will start at this number. Default is 100 * (Unix time at beginning
of first SPC date).
:param echo_top_field_name: See doc for `_find_input_radar_files`.
:param radar_source_name: Same.
:param top_echo_classifn_dir_name: Name of top-level directory with
echo-classification files. Files therein will be found by
`echo_classification.find_classification_file` and read by
`echo_classification.read_classifications`. Tracking will be performed
only on convective pixels. If `top_echo_classifn_dir_name is None`,
tracking will be performed on all pixels.
:param min_echo_top_km: See doc for `_local_maxima_to_polygons`.
:param smoothing_radius_deg_lat: See doc for `_gaussian_smooth_radar_field`.
:param half_width_for_max_filter_deg_lat: See doc for `_find_local_maxima`.
:param min_intermax_distance_metres: See doc for
`_remove_redundant_local_maxima`.
:param min_polygon_size_pixels: See doc for `_remove_small_polygons`.
:param max_link_time_seconds: See doc for
`temporal_tracking.link_local_maxima_in_time`.
:param max_velocity_diff_m_s01: Same.
:param max_link_distance_m_s01: Same.
:param min_track_duration_seconds: See doc for
`temporal_tracking.remove_short_lived_storms`.
:param recompute_centroids: Boolean | |
tile (i.e. tile_index==0) should be centered
offset_from_center_for_tile_index_0 = -(tile_width//2)
self.__tiles_x = [ cols//2
+ offset_from_center_for_tile_index_0
+ (tile_width+tiles_gap_x) * tile_index
for tile_index in range(-(self.__game_core.WORD_LENGTH//2),
math.ceil(self.__game_core.WORD_LENGTH/2))]
break
#
# calculate min. required terminal size, notify player to resize if unmet
self.__min_required_total_height = ( self.__header_start_y
+ self.__header_height
+ tiles_small_total_height
+ kb_height)
self.__min_required_total_width = max(self.__game_title_width,
tiles_small_total_width,
kb_width)
if ( rows < self.__min_required_total_height
or cols < self.__min_required_total_width):
raise WindowResized(rows,
cols,
self.__min_required_total_height,
self.__min_required_total_width,
jump_to_panel or self.__class__.__name__)
#
# if min size met (above), tiles must be defined by now:
assert self.__active_tile_def is not None
#
# overall:
self.__leftmost_x = min(self.__tiles_x[0], self.__kb_start_x)
self.__widest = max(self.__game_title_width,
tiles_total_width,
kb_width)
#
# header:
self.__header_start_x = self.__leftmost_x
self.__header_width = self.__widest
# If all border chars, for all LetterStatus.*.value mode tiles, are
# spaces, use color pair 'letter_'+status instead of
# 'border_'+status.
# (*) NOTE: After support was added to this program for the more
# complex multi-line tiles, this logic was necessary in
# order to keep the code reasonably generic while
# retaining the ability to properly render the simpler
# single-line tiles (which originally were the only kind
# of tiles this program supported). The simpler tiles
# are still used, if the larger tiles don't fit.
# In the initial implementation, simpler tiles didn't
# formally have modes, they effectively only had a
# "blank" mode. So if the tile was "[_]", after an 'A'
# was typed it would look like " A ", and after a guess
# was submitted and the tiles redrawn, that entire " A "
# would be drawn with the letter's status color used as
# the background color. In other words it would form a
# rectangular block with nice straight edges.
# The implementation of complex tiles, however, calls for
# all border characters (i.e. everything other than the
# letter) to be drawn using the letter's status color as
# the *foreground* color, and for the background use the
# game board's background color. This meant that spaces
# could no longer be used around the letter (i.e. " A ")
# and it would have to be some glyph that would fill an
# entire cell. The only such glyph that was found was
# U+2588 (i.e. █), but the problem was it was rendered
# with 1 less pixel row (on the bottom) than the letter
# with its background color. This meant that the tile
# did not appear to have nice straight edges. Therefore
# this compromise was created. If all the
# LetterStatus.*.value modes' tiles' border characters
# are spaces (which they are for the simple (smaller)
# tile), then render those spaces using the letter
# status color as a background color, not as a
# foreground color.
self.__use_letter_status_color_for_entire_tile = {c
for s in LetterStatus
for line in self.__active_tile_def[s.value]
for c in line
if c!=self.__TILE_LETTER_PLACEHOLDER} == {' '}
# in case of faulty logic above, assert each individual requirement
assert self.__header_start_y >= 0
assert self.__header_start_x >= 0
assert self.__game_title_start_x >= 0
assert self.__tiles_x[0] >= 0
assert self.__kb_start_y >= self.__tiles_y[-1]+tile_height
assert self.__kb_start_x >= 0
#
assert self.__kb_start_y+kb_height <= rows
assert self.__kb_start_x+kb_width <= cols
assert self.__tiles_x[-1]+tile_width <= cols
assert self.__game_title_start_x+self.__game_title_width <= cols
assert self.__header_start_x+self.__header_width <= cols
def __draw_header(self):
# game title and header line
for y_offset,line in enumerate(self.__game_title_lines):
self._win.addstr(self.__header_start_y + y_offset,
self.__game_title_start_x,
line,
curses.A_BOLD|self._colors.attr('text_default'))
self._win.addstr(self.__header_start_y+len(self.__game_title_lines),
self.__header_start_x,
self.__header_width*glyphs.HORIZONTAL_LINE_SEGMENT,
self._colors.attr('separator_line'))
buttons_y = len(self.__game_title_lines)//2
# help button
# NOTE: This is a double-width character, but the glyph is mostly in
# the left half of the allotted space, so we still only use a
# width of 1 for the click region.
help_x = self.__header_start_x
self._win.addstr(buttons_y,
help_x,
glyphs.HELP_BUTTON,
self._colors.attr('header_button'))
self._input.add_to_click_map(buttons_y,
help_x,
1,
1,
self.__help_panel)
# stats button
# NOTE: This is a double-width character, so we must subtract 3
# (instead of 2), and use a width of 2 for the click region.
if self.__game_core.play_stats is not None:
stats_x = self.__header_start_x+self.__header_width-3
self._win.addstr(buttons_y,
stats_x,
glyphs.STATS_BUTTON,
self._colors.attr('header_button'))
self._input.add_to_click_map(buttons_y,
stats_x,
1,
2,
self.__play_stats_panel)
# settings button
settings_x = self.__header_start_x+self.__header_width-1
self._win.addstr(buttons_y,
settings_x,
glyphs.SETTINGS_BUTTON,
self._colors.attr('header_button'))
self._input.add_to_click_map(buttons_y,
settings_x,
1,
1,
self.__settings_panel)
def __draw_tile(self, y, x, mode, letter=' '):
if mode == 'unsubmitted':
letter_attr = self._colors.attr(f'letter_{mode}')
border_attr = self._colors.attr(f'border_{mode}')
elif mode in {s.value for s in LetterStatus}:
letter_attr = self._colors.attr(f'letter_{mode}')
border_attr = (letter_attr
if self.__use_letter_status_color_for_entire_tile else
self._colors.attr(f'border_{mode}'))
else:
letter_attr = None
border_attr = self._colors.attr(f'border_{mode}')
for y_offset,line in enumerate(self.__active_tile_def[mode]):
border_segments = line.split(self.__TILE_LETTER_PLACEHOLDER, 1)
self._win.addstr(y+y_offset,
x,
border_segments[0],
border_attr)
if len(border_segments) == 2:
self._win.addstr(y+y_offset,
x+len(border_segments[0]),
letter,
curses.A_BOLD|letter_attr)
self._win.addstr(y+y_offset,
x+len(border_segments[0])+1,
border_segments[1],
border_attr)
def __draw_guess(self, index, guess, tile_flip_delay_msec=0): # will refresh between each letter if delay>0, else caller must refresh (note that things like w.getch() and w.getkey() seem to automatically do w.refresh() on window w (getkey does so *before* waiting for the key))
fancy_auto_mode = tile_flip_delay_msec>0
do_animation = fancy_auto_mode and 'blink' in self.__active_tile_def
delay_msec = tile_flip_delay_msec//(2 if do_animation else 1)
for i,(letter,letter_status) in enumerate(zip(guess['word'].upper(), guess['letter_statuses'])):
if fancy_auto_mode and i>0:
curses.napms(delay_msec)
if do_animation:
self.__draw_tile(self.__tiles_y[index],
self.__tiles_x[i],
'blink')
self._win.refresh()
curses.napms(delay_msec)
self.__draw_tile(self.__tiles_y[index],
self.__tiles_x[i],
letter_status.value,
letter)
if fancy_auto_mode:
self._win.refresh()
def __draw_keyboard(self):
pad_height_top = (self.__KB_ROW_HEIGHT-1)//2
pad_height_bottom = self.__KB_ROW_HEIGHT-1-pad_height_top
for r,kb_row in enumerate(self.__KB_ROWS):
row_start_y = ( self.__kb_start_y
+ r*(self.__KB_ROW_HEIGHT+self.__KB_GAP_Y))
x = self.__kb_start_x + kb_row['x_offset']
for key_glyph,key_code,key_width in kb_row['keys']:
self._input.add_to_click_map(row_start_y,
x,
self.__KB_ROW_HEIGHT,
key_width,
key_code)
pad_width_left = (key_width-len(key_glyph))//2
pad_width_right = key_width-len(key_glyph)-pad_width_left
key_letter_status = self.__game_core.letter_status(key_code)
if key_letter_status is None:
key_attr = self._colors.attr('unguessed')
else:
key_attr = self._colors.attr(f'letter_{key_letter_status.value}')
for y_offset in range(pad_height_top):
self._win.addstr(row_start_y + y_offset,
x,
key_width*' ',
key_attr)
try:
# either of the following addstr calls may run into
# https://stackoverflow.com/questions/36387625, so we
# catch the exception and ignore it
self._win.addstr(row_start_y + pad_height_top,
x,
f'{pad_width_left*" "}{key_glyph}{pad_width_right*" "}',
key_attr)
for y_offset in range(pad_height_bottom):
self._win.addstr(row_start_y + pad_height_top + 1 + y_offset,
x,
key_width*' ',
key_attr)
except _curses.error:
pass
x += self.__KB_GAP_X+key_width
def __full_draw(self):
self._win.bkgd(self._colors.attr('background'))
self.__draw_header()
for g in range(self.__game_core.MAX_GUESSES):
if g < len(self.__game_core.guesses):
self.__draw_guess(g, self.__game_core.guesses[g])
else:
for i in range(self.__game_core.WORD_LENGTH):
if ( g == len(self.__game_core.guesses)
and i < len(self.__game_core.pending_guess_letters)):
self.__draw_tile(self.__tiles_y[g],
self.__tiles_x[i],
'unsubmitted',
self.__game_core.pending_guess_letters[i].upper())
else:
self.__draw_tile(self.__tiles_y[g],
self.__tiles_x[i],
'blank')
self.__draw_keyboard()
def __help_panel(self):
pass
def __play_stats_panel(self):
shareable_status = ShareableStatus(self.__game_core,
self.__game_num_str,
self.__config)
play_stats_panel = PlayStatsPanel(self._stdscr,
self._colors,
self.__game_core,
shareable_status)
self._colors.dim()
self.__full_draw()
play_stats_panel.run(parent_min_required_total_height = self.__min_required_total_height,
parent_min_required_total_width = self.__min_required_total_width)
self._colors.undim()
self.__full_draw()
def __settings_panel(self):
settings_panel = SettingsPanel(self._stdscr,
self._colors,
self.__config,
self.__game_core,
self.__game_num_str)
settings_panel.run(height=self._stdscr.getmaxyx()[0],
width=self.__widest,
start_y=0,
start_x=self.__leftmost_x,
parent_min_required_total_height = self.__min_required_total_height,
parent_min_required_total_width = self.__min_required_total_width)
def _run(self, jump_to_panel):
self.__init_size_calculations(jump_to_panel)
# set background and perform initial drawing
self.__full_draw()
if jump_to_panel:
panels = {panel_class.__name__: panel_launcher
for (panel_class, panel_launcher)
in ((HelpPanel, self.__help_panel),
(PlayStatsPanel, self.__play_stats_panel),
(SettingsPanel, self.__settings_panel))}
if jump_to_panel in panels:
panels[jump_to_panel]()
if not self.__game_core.is_completed():
# event loops
# outermost loop (for loop) is "guess loop"
# nested loop (while loop) is "letter loop"
# innermost loop (while loop) is "input loop"
for g in range(len(self.__game_core.guesses),
self.__game_core.MAX_GUESSES):
# BODY OF GUESS LOOP: BEGIN
guess_result = None
i = len(self.__game_core.pending_guess_letters)
while i < self.__game_core.WORD_LENGTH+1: # +1 for an extra iteration to handle player hitting enter or backspace after typing entire word
# BODY OF LETTER LOOP: BEGIN
k = self._input.get(self.__min_required_total_height,
self.__min_required_total_width)
if k == '~':
return
if k == 'KEY_BACKSPACE' and i > 0:
i -= 1
self.__draw_tile(self.__tiles_y[g],
self.__tiles_x[i],
'blank')
self.__game_core.remove_last_letter_from_pending_guess()
elif k and len(k) == 1 and k.isalpha() and i < self.__game_core.WORD_LENGTH:
l = k.upper()
if self.__game_core.append_letter_to_pending_guess(l):
self.__draw_tile(self.__tiles_y[g],
self.__tiles_x[i],
'unsubmitted',
l)
i += 1
elif k == '\n':
(guess_result,
first_offending_letter,
first_offending_position) = self.__game_core.submit_pending_guess()
if guess_result in (GuessResult.WRONG,
GuessResult.WRONG_AND_GAME_OVER,
GuessResult.RIGHT):
self.__draw_guess(g, self.__game_core.guesses[-1], self.__TILE_FLIP_DELAY_MSEC)
self.__draw_keyboard()
curses.flushinp() # drop any keystrokes made by player during __draw_guess()'s "animation"
i += 1
elif guess_result in (GuessResult.INVALID_TOO_SHORT,
GuessResult.INVALID,
GuessResult.INVALID_HARD_MODE_MISSING_PREV_GUESS_MISPLACED_LETTER,
GuessResult.INVALID_HARD_MODE_MISSING_PREV_GUESS_CORRECT_LETTER):
if guess_result == GuessResult.INVALID_TOO_SHORT:
toast_text = 'Not enough letters'
elif guess_result == GuessResult.INVALID:
toast_text = 'Not in word list'
elif guess_result == GuessResult.INVALID_HARD_MODE_MISSING_PREV_GUESS_MISPLACED_LETTER:
toast_text = f'Guess must contain {first_offending_letter.upper()}'
elif guess_result == GuessResult.INVALID_HARD_MODE_MISSING_PREV_GUESS_CORRECT_LETTER:
toast_text = (f'{self.__ORDINALS[first_offending_position]}'
f' letter must be {first_offending_letter.upper()}')
else:
toast_text = None
if toast_text:
toast = Toast(self._stdscr, self._colors)
toast.run(toast_text)
# BODY OF LETTER LOOP: END
if guess_result == GuessResult.RIGHT:
break
# BODY OF GUESS LOOP: END
if guess_result == GuessResult.WRONG_AND_GAME_OVER:
answer | |
data only
:param bool _preload_content: load received data in models
:param bool _request_timeout: maximum wait time for response
:return: Attachments
"""
# verify the required parameter 'xero_tenant_id' is set
if xero_tenant_id is None:
raise ValueError(
"Missing the required parameter `xero_tenant_id` "
"when calling `update_bank_transfer_attachment_by_file_name`"
)
# verify the required parameter 'bank_transfer_id' is set
if bank_transfer_id is None:
raise ValueError(
"Missing the required parameter `bank_transfer_id` "
"when calling `update_bank_transfer_attachment_by_file_name`"
)
# verify the required parameter 'file_name' is set
if file_name is None:
raise ValueError(
"Missing the required parameter `file_name` "
"when calling `update_bank_transfer_attachment_by_file_name`"
)
# verify the required parameter 'body' is set
if body is None:
raise ValueError(
"Missing the required parameter `body` "
"when calling `update_bank_transfer_attachment_by_file_name`"
)
collection_formats = {}
path_params = {
"BankTransferID": bank_transfer_id,
"FileName": file_name,
}
query_params = []
header_params = {
"xero-tenant-id": xero_tenant_id,
}
local_var_files = {}
form_params = []
body_params = body
# HTTP header `Accept`
header_params["Accept"] = self.api_client.select_header_accept(
["application/json"]
)
# HTTP header `Content-Type`
header_params["Content-Type"] = self.api_client.select_header_content_type(
["application/octet-stream"]
)
# Authentication setting
auth_settings = ["OAuth2"]
url = self.get_resource_url(
"/BankTransfers/{BankTransferID}/Attachments/{FileName}"
)
try:
return self.api_client.call_api(
url,
"POST",
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type="Attachments",
response_model_finder=self.get_model_finder(),
auth_settings=auth_settings,
_return_http_data_only=_return_http_data_only,
_preload_content=_preload_content,
_request_timeout=_request_timeout,
collection_formats=collection_formats,
)
except exceptions.HTTPStatusException as error:
raise translate_status_exception(
error, self, "update_bank_transfer_attachment_by_file_name"
)
def update_contact(
self,
xero_tenant_id,
contact_id,
contacts,
_return_http_data_only=True,
_preload_content=True,
_request_timeout=None,
):
"""Updates a specific contact in a Xero organisation # noqa: E501
OAuth2 scope: accounting.contacts
:param str xero_tenant_id: Xero identifier for Tenant (required)
:param str contact_id: Unique identifier for a Contact (required)
:param Contacts contacts: an array of Contacts containing single Contact object with properties to update (required)
:param bool _return_http_data_only: return received data only
:param bool _preload_content: load received data in models
:param bool _request_timeout: maximum wait time for response
:return: Contacts
"""
# verify the required parameter 'xero_tenant_id' is set
if xero_tenant_id is None:
raise ValueError(
"Missing the required parameter `xero_tenant_id` "
"when calling `update_contact`"
)
# verify the required parameter 'contact_id' is set
if contact_id is None:
raise ValueError(
"Missing the required parameter `contact_id` "
"when calling `update_contact`"
)
# verify the required parameter 'contacts' is set
if contacts is None:
raise ValueError(
"Missing the required parameter `contacts` "
"when calling `update_contact`"
)
collection_formats = {}
path_params = {
"ContactID": contact_id,
}
query_params = []
header_params = {
"xero-tenant-id": xero_tenant_id,
}
local_var_files = {}
form_params = []
body_params = contacts
# HTTP header `Accept`
header_params["Accept"] = self.api_client.select_header_accept(
["application/json"]
)
# HTTP header `Content-Type`
header_params["Content-Type"] = self.api_client.select_header_content_type(
["application/json"]
)
# Authentication setting
auth_settings = ["OAuth2"]
url = self.get_resource_url("/Contacts/{ContactID}")
try:
return self.api_client.call_api(
url,
"POST",
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type="Contacts",
response_model_finder=self.get_model_finder(),
auth_settings=auth_settings,
_return_http_data_only=_return_http_data_only,
_preload_content=_preload_content,
_request_timeout=_request_timeout,
collection_formats=collection_formats,
)
except exceptions.HTTPStatusException as error:
raise translate_status_exception(error, self, "update_contact")
def update_contact_attachment_by_file_name(
self,
xero_tenant_id,
contact_id,
file_name,
body,
_return_http_data_only=True,
_preload_content=True,
_request_timeout=None,
):
"""update_contact_attachment_by_file_name # noqa: E501
OAuth2 scope: accounting.attachments
:param str xero_tenant_id: Xero identifier for Tenant (required)
:param str contact_id: Unique identifier for a Contact (required)
:param str file_name: Name of the attachment (required)
:param str body: Byte array of file in body of request (required)
:param bool _return_http_data_only: return received data only
:param bool _preload_content: load received data in models
:param bool _request_timeout: maximum wait time for response
:return: Attachments
"""
# verify the required parameter 'xero_tenant_id' is set
if xero_tenant_id is None:
raise ValueError(
"Missing the required parameter `xero_tenant_id` "
"when calling `update_contact_attachment_by_file_name`"
)
# verify the required parameter 'contact_id' is set
if contact_id is None:
raise ValueError(
"Missing the required parameter `contact_id` "
"when calling `update_contact_attachment_by_file_name`"
)
# verify the required parameter 'file_name' is set
if file_name is None:
raise ValueError(
"Missing the required parameter `file_name` "
"when calling `update_contact_attachment_by_file_name`"
)
# verify the required parameter 'body' is set
if body is None:
raise ValueError(
"Missing the required parameter `body` "
"when calling `update_contact_attachment_by_file_name`"
)
collection_formats = {}
path_params = {
"ContactID": contact_id,
"FileName": file_name,
}
query_params = []
header_params = {
"xero-tenant-id": xero_tenant_id,
}
local_var_files = {}
form_params = []
body_params = body
# HTTP header `Accept`
header_params["Accept"] = self.api_client.select_header_accept(
["application/json"]
)
# HTTP header `Content-Type`
header_params["Content-Type"] = self.api_client.select_header_content_type(
["application/octet-stream"]
)
# Authentication setting
auth_settings = ["OAuth2"]
url = self.get_resource_url("/Contacts/{ContactID}/Attachments/{FileName}")
try:
return self.api_client.call_api(
url,
"POST",
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type="Attachments",
response_model_finder=self.get_model_finder(),
auth_settings=auth_settings,
_return_http_data_only=_return_http_data_only,
_preload_content=_preload_content,
_request_timeout=_request_timeout,
collection_formats=collection_formats,
)
except exceptions.HTTPStatusException as error:
raise translate_status_exception(
error, self, "update_contact_attachment_by_file_name"
)
def update_contact_group(
self,
xero_tenant_id,
contact_group_id,
contact_groups,
_return_http_data_only=True,
_preload_content=True,
_request_timeout=None,
):
"""Updates a specific contact group # noqa: E501
OAuth2 scope: accounting.contacts
:param str xero_tenant_id: Xero identifier for Tenant (required)
:param str contact_group_id: Unique identifier for a Contact Group (required)
:param ContactGroups contact_groups: an array of Contact groups with Name of specific group to update (required)
:param bool _return_http_data_only: return received data only
:param bool _preload_content: load received data in models
:param bool _request_timeout: maximum wait time for response
:return: ContactGroups
"""
# verify the required parameter 'xero_tenant_id' is set
if xero_tenant_id is None:
raise ValueError(
"Missing the required parameter `xero_tenant_id` "
"when calling `update_contact_group`"
)
# verify the required parameter 'contact_group_id' is set
if contact_group_id is None:
raise ValueError(
"Missing the required parameter `contact_group_id` "
"when calling `update_contact_group`"
)
# verify the required parameter 'contact_groups' is set
if contact_groups is None:
raise ValueError(
"Missing the required parameter `contact_groups` "
"when calling `update_contact_group`"
)
collection_formats = {}
path_params = {
"ContactGroupID": contact_group_id,
}
query_params = []
header_params = {
"xero-tenant-id": xero_tenant_id,
}
local_var_files = {}
form_params = []
body_params = contact_groups
# HTTP header `Accept`
header_params["Accept"] = self.api_client.select_header_accept(
["application/json"]
)
# HTTP header `Content-Type`
header_params["Content-Type"] = self.api_client.select_header_content_type(
["application/json"]
)
# Authentication setting
auth_settings = ["OAuth2"]
url = self.get_resource_url("/ContactGroups/{ContactGroupID}")
try:
return self.api_client.call_api(
url,
"POST",
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type="ContactGroups",
response_model_finder=self.get_model_finder(),
auth_settings=auth_settings,
_return_http_data_only=_return_http_data_only,
_preload_content=_preload_content,
_request_timeout=_request_timeout,
collection_formats=collection_formats,
)
except exceptions.HTTPStatusException as error:
raise translate_status_exception(error, self, "update_contact_group")
def update_credit_note(
self,
xero_tenant_id,
credit_note_id,
credit_notes,
unitdp=empty,
_return_http_data_only=True,
_preload_content=True,
_request_timeout=None,
):
"""Updates a specific credit note # noqa: E501
OAuth2 scope: accounting.transactions
:param str xero_tenant_id: Xero identifier for Tenant (required)
:param str credit_note_id: Unique identifier for a Credit Note (required)
:param CreditNotes credit_notes: an array of Credit Notes containing credit note details to update (required)
:param int unitdp: e.g. unitdp=4 – (Unit Decimal Places) You can opt in to use four decimal places for unit amounts
:param bool _return_http_data_only: return received data only
:param bool _preload_content: load received data in models
:param bool _request_timeout: maximum wait time for response
:return: CreditNotes
"""
# verify the required parameter 'xero_tenant_id' is set
if xero_tenant_id is None:
raise ValueError(
"Missing the required parameter `xero_tenant_id` "
"when calling `update_credit_note`"
)
# verify the required parameter 'credit_note_id' is set
if credit_note_id is None:
raise ValueError(
"Missing the required parameter `credit_note_id` "
"when calling `update_credit_note`"
)
# verify the required parameter 'credit_notes' is set
if credit_notes is None:
raise ValueError(
"Missing the required parameter `credit_notes` "
"when calling `update_credit_note`"
)
collection_formats = {}
path_params = {
"CreditNoteID": credit_note_id,
}
query_params = []
if unitdp is not empty:
query_params.append(("unitdp", unitdp))
header_params = {
"xero-tenant-id": xero_tenant_id,
}
local_var_files = {}
form_params = []
body_params = credit_notes
# HTTP header `Accept`
header_params["Accept"] = self.api_client.select_header_accept(
["application/json"]
)
# HTTP header `Content-Type`
header_params["Content-Type"] = self.api_client.select_header_content_type(
["application/json"]
)
# Authentication setting
auth_settings = ["OAuth2"]
url = self.get_resource_url("/CreditNotes/{CreditNoteID}")
try:
return self.api_client.call_api(
url,
"POST",
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type="CreditNotes",
response_model_finder=self.get_model_finder(),
auth_settings=auth_settings,
_return_http_data_only=_return_http_data_only,
_preload_content=_preload_content,
_request_timeout=_request_timeout,
collection_formats=collection_formats,
)
except exceptions.HTTPStatusException as error:
raise translate_status_exception(error, self, "update_credit_note")
def update_credit_note_attachment_by_file_name(
self,
xero_tenant_id,
credit_note_id,
file_name,
body,
_return_http_data_only=True,
_preload_content=True,
_request_timeout=None,
):
"""Updates attachments on a specific credit note by file name # noqa: E501
OAuth2 scope: accounting.attachments
:param str xero_tenant_id: Xero identifier for Tenant (required)
:param str credit_note_id: Unique identifier for a Credit Note (required)
:param str file_name: Name of the attachment (required)
:param str body: Byte array of file in body of request (required)
:param bool _return_http_data_only: return received data only
:param bool _preload_content: load received data in models
:param bool | |
transform. If passed in this function will return
both the data and the corresponding labels for the rows that have
been selected. Default is None.
sample_weight : list or numpy.ndarray or None, shape=(n,), optional
The sample weights to transform. If passed in this function will
return the selected labels (y) and the selected samples, even
if no labels were passed in. Default is None.
sample_cost : list or numpy.ndarray or None, shape=(n,), optional
The cost of each item. If set, indicates that optimization should
be performed with respect to a knapsack constraint.
Returns
-------
X_subset : numpy.ndarray, shape=(n_samples, d)
A subset of the data such that n_samples < n and n_samples is the
integer provided at initialization.
y_subset : numpy.ndarray, shape=(n_samples,), optional
The labels that match with the indices of the samples if y is
passed in. Only returned if passed in.
sample_weight_subset : numpy.ndarray, shape=(n_samples,), optional
The weight of each example.
"""
return self.fit(X, y=y, sample_weight=sample_weight,
sample_cost=sample_cost).transform(X, y=y,
sample_weight=sample_weight)
def _initialize(self, X, idxs=None):
n, d = X.shape
self._X = X if self._X is None else self._X
self.sparse = isinstance(X, csr_matrix)
self.ranking = []
self.gains = []
self.subset = numpy.zeros((0, self._X.shape[1]), dtype='float64')
self.current_values = numpy.zeros(d, dtype='float64')
self.current_concave_values = numpy.zeros(d, dtype='float64')
self.mask = numpy.zeros(n, dtype='int8')
if self.initial_subset is not None:
if self.initial_subset.ndim == 1:
if self.initial_subset.dtype == bool:
self.initial_subset = numpy.where(self.initial_subset == 1)[0]
if len(self.initial_subset) + self.n_samples > X.shape[0]:
raise ValueError("When using a mask for the initial subset" \
" must selected fewer than the size of the subset minus" \
" the initial subset size, i.e., n_samples < X.shape[0] -"\
" initial_subset.shape[0].")
if self.initial_subset.max() > X.shape[0]:
raise ValueError("When passing in an integer mask for the initial subset"\
" the maximum value cannot exceed the size of the data set.")
elif self.initial_subset.min() < 0:
raise ValueError("When passing in an integer mask for the initial subset"\
" the minimum value cannot be negative.")
self.mask[self.initial_subset] = 1
self.idxs = numpy.where(self.mask == 0)[0]
def _calculate_gains(self, X, idxs=None):
raise NotImplementedError
def _calculate_sieve_gains(self, X, thresholds, idxs):
n = X.shape[0]
d = X.shape[1] if self.reservoir is None else self.max_reservoir_size
l = len(thresholds)
if self.sieve_current_values_ is None:
self.sieve_current_values_ = numpy.zeros((l, d),
dtype='float64')
self.sieve_selections_ = numpy.zeros((l, self.n_samples),
dtype='int64') - 1
self.sieve_gains_ = numpy.zeros((l, self.n_samples),
dtype='float64') - 1
self.sieve_n_selected_ = numpy.zeros(l,
dtype='int64')
self.sieve_total_gains_ = numpy.zeros(l,
dtype='float64')
self.sieve_subsets_ = numpy.zeros((l, self.n_samples,
self._X.shape[1]), dtype='float64')
else:
j = l - self.sieve_current_values_.shape[0]
if j > 0:
self.sieve_current_values_ = numpy.vstack([
self.sieve_current_values_, numpy.zeros((j, d),
dtype='float64')])
self.sieve_selections_ = numpy.vstack([
self.sieve_selections_, numpy.zeros((j, self.n_samples),
dtype='int64') - 1])
self.sieve_gains_ = numpy.vstack([self.sieve_gains_,
numpy.zeros((j, self.n_samples), dtype='float64')])
self.sieve_n_selected_ = numpy.concatenate([
self.sieve_n_selected_, numpy.zeros(j, dtype='int64')])
self.sieve_total_gains_ = numpy.concatenate([
self.sieve_total_gains_, numpy.zeros(j, dtype='float64')])
self.sieve_subsets_ = numpy.concatenate([self.sieve_subsets_,
numpy.zeros((j, self.n_samples, self._X.shape[1]),
dtype='float64')])
def _select_next(self, X, gain, idx):
self.ranking.append(idx)
self.gains.append(gain)
self.mask[idx] = True
self.idxs = numpy.where(self.mask == 0)[0]
if self.sparse:
X = self._X[idx:idx+1].toarray()
else:
X = self._X[idx:idx+1]
if self.metric != 'precomputed':
self.subset = numpy.concatenate([self.subset, X])
class BaseGraphSelection(BaseSelection):
"""The base graph selection object.
This object defines the structures that all submodular selection algorithms
should follow if they operate on a graph, such as pairwise similarity
measurements. All algorithms will have the same public methods and the same
attributes.
NOTE: All ~pairwise~ values in your data must be positive for these
selection methods to work.
This implementation allows users to pass in either their own symmetric
square matrix of similarity values, or a data matrix as normal and a function
that calculates these pairwise values.
Parameters
----------
n_samples : int
The number of samples to return.
metric : str
The method for converting a data matrix into a square symmetric matrix
of pairwise similarities. If a string, can be any of the metrics
implemented in sklearn (see https://scikit-learn.org/stable/modules/
generated/sklearn.metrics.pairwise_distances.html), including
"precomputed" if one has already generated a similarity matrix. Note
that sklearn calculates distance matrices whereas apricot operates on
similarity matrices, and so a distances.max() - distances transformation
is performed on the resulting distances. For backcompatibility,
'corr' will be read as 'correlation'.
initial_subset : list, numpy.ndarray or None
If provided, this should be a list of indices into the data matrix
to use as the initial subset, or a group of examples that may not be
in the provided data should beused as the initial subset. If indices,
the provided array should be one-dimensional. If a group of examples,
the data should be 2 dimensional.
optimizer : string or optimizers.BaseOptimizer, optional
The optimization approach to use for the selection. Default is
'two-stage', which makes selections using the naive greedy algorithm
initially and then switches to the lazy greedy algorithm. Must be
one of
'naive' : the naive greedy algorithm
'lazy' : the lazy (or accelerated) greedy algorithm
'approximate-lazy' : the approximate lazy greedy algorithm
'two-stage' : starts with naive and switches to lazy
'stochastic' : the stochastic greedy algorithm
'greedi' : the GreeDi distributed algorithm
'bidirectional' : the bidirectional greedy algorithm
Default is 'naive'.
optimizer_kwds : dict or None
A dictionary of arguments to pass into the optimizer object. The keys
of this dictionary should be the names of the parameters in the optimizer
and the values in the dictionary should be the values that these
parameters take. Default is None.
n_neighbors : int or None
When constructing a similarity matrix, the number of nearest neighbors
whose similarity values will be kept. The result is a sparse similarity
matrix which can significantly speed up computation at the cost of
accuracy. Default is None.
reservoir : numpy.ndarray or None
The reservoir to use when calculating gains in the sieve greedy
streaming optimization algorithm in the `partial_fit` method.
Currently only used for graph-based functions. If a numpy array
is passed in, it will be used as the reservoir. If None is passed in,
will use reservoir sampling to collect a reservoir. Default is None.
max_reservoir_size : int
The maximum size that the reservoir can take. If a reservoir is passed
in, this value is set to the size of that array. Default is 1000.
n_jobs : int
The number of threads to use when performing computation in parallel.
Currently, this parameter is exposed but does not actually do anything.
This will be fixed soon.
random_state : int or RandomState or None, optional
The random seed to use for the random selection process. Only used
for stochastic greedy.
verbose : bool
Whether to print output during the selection process.
Attributes
----------
n_samples : int
The number of samples to select.
metric : callable
A function that takes in a data matrix and converts it to a square
symmetric matrix.
ranking : numpy.array int
The selected samples in the order of their gain.
gains : numpy.array float
The gain of each sample in the returned set when it was added to the
growing subset. The first number corresponds to the gain of the first
added sample, the second corresponds to the gain of the second added
sample, and so forth.
"""
def __init__(self, n_samples, metric='euclidean',
initial_subset=None, optimizer='two-stage', optimizer_kwds={},
n_neighbors=None, reservoir=None, max_reservoir_size=1000,
n_jobs=1, random_state=None, verbose=False):
super(BaseGraphSelection, self).__init__(n_samples=n_samples,
initial_subset=initial_subset, optimizer=optimizer,
optimizer_kwds=optimizer_kwds, reservoir=reservoir,
max_reservoir_size=max_reservoir_size, n_jobs=n_jobs,
random_state=random_state, verbose=verbose)
self.metric = metric.replace("corr", "correlation")
self.n_neighbors = n_neighbors
def fit(self, X, y=None, sample_weight=None, sample_cost=None):
"""Run submodular optimization to select a subset of examples.
This method is a wrapper for the full submodular optimization process.
It takes in some data set (and optionally labels that are ignored
during this process) and selects `n_samples` from it in the greedy
manner specified by the optimizer.
This method will return the selector object itself, not the transformed
data set. The `transform` method will then transform a data set to the
selected points, or alternatively one can use the ranking stored in
the `self.ranking` attribute. The `fit_transform` method will perform
both optimization and selection and return the selected items.
Parameters
----------
X : list or numpy.ndarray, shape=(n, d)
The data set to transform. Must be numeric.
y : list or numpy.ndarray or None, shape=(n,), optional
The labels to transform. If passed in this function will return
both the data and th corresponding labels for the rows that have
been selected.
sample_weight : list or numpy.ndarray or None, shape=(n,), optional
The weight of each example. Currently ignored in apricot but
included to maintain compatibility with sklearn pipelines.
sample_cost : list or numpy.ndarray or None, shape=(n,), optional
The cost of each item. If set, indicates that optimization should
be performed with respect to a knapsack constraint.
Returns
-------
self : BaseGraphSelection
The fit step returns this selector object.
"""
if isinstance(X, csr_matrix) and self.metric not in ("precomputed", "ignore"):
raise ValueError("Must passed in a precomputed sparse " \
"similarity matrix or a dense feature matrix.")
if self.metric == 'precomputed' and X.shape[0] != X.shape[1]:
raise ValueError("Precomputed similarity matrices " \
"must be square and symmetric.")
X_pairwise = _calculate_pairwise_distances(X, metric=self.metric,
n_neighbors=self.n_neighbors)
self._X = X
return super(BaseGraphSelection, self).fit(X_pairwise, y=y,
sample_weight=sample_weight, sample_cost=sample_cost)
def partial_fit(self, X, y=None, sample_weight=None, sample_cost=None):
if self.reservoir is None:
self.reservoir = numpy.empty((self.max_reservoir_size, X.shape[1]))
if self.update_reservoir_:
for i in range(X.shape[0]):
if self.reservoir_size < self.max_reservoir_size:
self.reservoir[self.reservoir_size] = X[i]
self.reservoir_size += 1
else:
r = self.random_state.choice(self.n_seen_ + i)
if r < self.max_reservoir_size:
self.reservoir[r] = X[i]
#self.current_values_[:, r] = 0.
X_pairwise = _calculate_pairwise_distances(X,
Y=self.reservoir[:self.reservoir_size], metric=self.metric)
self._X = X
super(BaseGraphSelection, self).partial_fit(X_pairwise, y=y,
sample_weight=sample_weight, sample_cost=sample_cost)
self.current_values = numpy.zeros(self.reservoir_size,
dtype='float64')
self.n_seen_ += X.shape[0]
def _initialize(self, X_pairwise, idxs=None):
super(BaseGraphSelection, self)._initialize(X_pairwise, idxs=idxs)
def | |
"help":
await ctx.send("Gives an overview of your character's current special actions. \n \tUsage: `!actionoverview [character name]`")
return
#get all special actions associated with this character
self.cursor.execute("""SELECT * FROM special_actions WHERE name = :chrnm AND member = :member""", {
'chrnm': chrnm, 'member': ctx.author.id})
currentActions = self.cursor.fetchall()
#make a new embed an add a field for each special action
actionEmbed = discord.Embed(title=chrnm + "'s special actions")
for x in currentActions:
actionEmbed.add_field(
name=x[1], value=x[4] + "\n" + u'\u200b', inline=False)
#send the embed if there are any special actions
if(len(currentActions) <= 0):
await ctx.send("You have not yet specified any special actions for this character.")
else:
await ctx.send(embed=actionEmbed)
#add special action description
@commands.command(aliases=["actionsdescription"])
async def actiondescription(self, ctx, chrnm="", value=""):
self.cursor.execute("""SELECT * FROM characters WHERE name = :chrnm AND member = :member""", {
'chrnm': chrnm, 'member': ctx.author.id})
oldValueCheck = self.cursor.fetchall()
if chrnm == "":
await ctx.send("You forgot to enter a name")
return
elif chrnm[0] == "'":
await ctx.send("Please use double quotes for characters that have a space in their name.")
return
elif len(oldValueCheck) == 0 and chrnm != "help":
await ctx.send("You do not have a character named '{}'.".format(chrnm))
return
elif value == "":
await ctx.send("You forgot to enter a value.")
return
#help case
if value == "help" or chrnm == "help":
await ctx.send("Prompts you to add a description to one of your character's special actions. \n \tUsage: `!actiondescription [character name] [special action name] OR [special action index]`")
return
#if value is a digit, retrieve special action based on index
if(value.isdigit()):
self.cursor.execute("""SELECT * FROM special_actions WHERE name = :chrnm AND member = :member AND indexID = :index""", {
'chrnm': chrnm, 'member': ctx.author.id, 'index': value})
temp = self.cursor.fetchall()
if len(temp) == 0:
await ctx.send(chrnm + " does not have a special action with that index.")
#else, retrieve special action based on name
else:
self.cursor.execute("""SELECT * FROM special_actions WHERE name = :chrnm AND member = :member AND action_name = :specialAction""", {
'chrnm': chrnm, 'member': ctx.author.id, 'specialAction': value})
temp = self.cursor.fetchall()
if len(temp) == 0:
await ctx.send(chrnm + " does not have a special action with that name.")
#check to see if the next message is by the same author and in the same channel
def check(m):
return m.channel.id == ctx.channel.id and m.author.id == ctx.author.id
#wait for author to send special action description
await ctx.send("You can now type and send the description of the special action: '{}'.".format(temp[0][1]))
description = await self.bot.wait_for('message', check=check)
#update table with new value
self.cursor.execute("""UPDATE special_actions SET action_description = :description WHERE name = :chrnm AND member = :member AND action_name = :specialAction""", {
'chrnm': chrnm, 'member': ctx.author.id, 'specialAction': temp[0][1], 'description': description.content})
self.chardb.commit()
await ctx.send("The description of '{}' is now: \n{}".format(temp[0][1], "'" + description.content + "'"))
#update character special actions
@commands.command(aliases=["addaction"])
async def addactions(self, ctx, chrnm="", value=""):
self.cursor.execute("""SELECT * FROM characters WHERE name = :chrnm AND member = :member""", {
'chrnm': chrnm, 'member': ctx.author.id})
oldValueCheck = self.cursor.fetchall()
if chrnm == "":
await ctx.send("You forgot to enter a name")
return
elif chrnm[0] == "'":
await ctx.send("Please use double quotes for characters that have a space in their name.")
return
elif len(oldValueCheck) == 0 and chrnm != "help":
await ctx.send("You do not have a character named '{}'.".format(chrnm))
return
elif value == "":
await ctx.send("You forgot to enter a value.")
return
#help case
if chrnm == "help" or value == "help":
await ctx.send("Adds special actions to your characters current special actions. \n \tUsage: `!addactions [character name] [action name1; action name2; action name3]`")
return
#remove unnecesary ;
if value.endswith(';'):
value = value[:-1]
#make an array of all the different special actions
actionArray = value.split("; ")
#get the maximum index of the special action table for this character
self.cursor.execute("""SELECT MAX(indexID) FROM special_actions WHERE name = :chrnm AND member = :member""", {
'chrnm': chrnm, 'member': ctx.author.id})
indexID = self.cursor.fetchall()
if indexID[0][0] == None:
index = 1
else:
index = indexID[0][0] + 1
#for every item in the actionarray, extract description and add to database
for x in actionArray:
#extract description by finding and then deleting substring in between {} (not completely finished yet, but functional enough)
description = None
openloc = x.find('{')
closeloc = x.find('}')
if closeloc != -1 and openloc != -1 and openloc < closeloc:
description = x[openloc + 1: closeloc]
x = x.replace(" {" + description + "} ", '')
x = x.replace("{" + description + "} ", '')
x = x.replace(" {" + description + "}", '')
x = x.replace("{" + description + "}", '')
#if one of the { is missing, notify the user and abort
elif openloc != -1:
await ctx.send("You forgot to add closing curly brackets.")
self.chardb.rollback()
return
elif closeloc != -1:
await ctx.send("You forgot to add opening curly brackets.")
self.chardb.rollback()
return
#if there is not a special action name, notify the user and abort
if x == "" or x == " " or x == " ":
await ctx.send("You forgot to enter a special action name")
self.chardb.rollback()
return
#if there is no description, do not insert an empty string
if description == "" or description == " " or description == " ":
description = None
#check if there not aleady exist a special action with that name
self.cursor.execute("""SELECT * FROM special_actions WHERE name = :chrnm AND member = :member AND action_name = :specialAction""", {
'chrnm': chrnm, 'member': ctx.author.id, 'specialAction': x})
actions = self.cursor.fetchall()
if len(actions) > 0:
await ctx.send("You already have a special action with that name.")
self.chardb.rollback()
return
#insert special action and increase index for next loop
self.cursor.execute("""INSERT INTO special_actions VALUES(:chrnm, :specialActions, :member, :index, :description)""", {
'chrnm': chrnm, 'member': ctx.author.id, 'specialActions': x, 'index': index, 'description': description})
index += 1
self.chardb.commit()
#construct string with all current special actions and send it
self.cursor.execute("""SELECT * FROM special_actions WHERE name = :chrnm AND member = :member""", {
'chrnm': chrnm, 'member': ctx.author.id})
newActions = self.cursor.fetchall()
s = ""
for x in newActions:
s += x[1] + ", "
s = s[:-2]
if s == "":
s = None
await ctx.send("These are {}'s special actions: {}.".format(chrnm, s))
#update character special actions
@commands.command(aliases=["replaceaction"])
async def replaceactions(self, ctx, chrnm="", value=""):
self.cursor.execute("""SELECT * FROM characters WHERE name = :chrnm AND member = :member""", {
'chrnm': chrnm, 'member': ctx.author.id})
oldValueCheck = self.cursor.fetchall()
if chrnm == "":
await ctx.send("You forgot to enter a name")
return
elif chrnm[0] == "'":
await ctx.send("Please use double quotes for characters that have a space in their name.")
return
elif len(oldValueCheck) == 0 and chrnm != "help":
await ctx.send("You do not have a character named '{}'.".format(chrnm))
return
elif value == "":
await ctx.send("You forgot to enter a value.")
return
#help case
if value == "help" or chrnm == "help":
await ctx.send("Replaces all your character's current special actions with new special actions \n \tUsage: `!replaceactions [character name] [action name1; action name2; action name3]`")
return
#delete all old special actions
self.cursor.execute("""DELETE FROM special_actions WHERE name = :chrnm AND member = :member""", {
'chrnm': chrnm, 'member': ctx.author.id})
#add the special actions given in parameter
self.addactions(ctx, chrnm, value)
#update character special actions
@commands.command(aliases=["removeaction"])
async def removeactions(self, ctx, chrnm="", value=""):
self.cursor.execute("""SELECT * FROM characters WHERE name = :chrnm AND member = :member""", {
'chrnm': chrnm, 'member': ctx.author.id})
oldValueCheck = self.cursor.fetchall()
if chrnm == "":
await ctx.send("You forgot to enter a name")
return
elif chrnm[0] == "'":
await ctx.send("Please use double quotes for characters that have a space in their name.")
return
elif len(oldValueCheck) == 0 and chrnm != "help":
await ctx.send("You do not have a character named '{}'.".format(chrnm))
return
elif value == "":
await ctx.send("You forgot to enter a value.")
return
#help case
if value == "help" or chrnm == "help":
await ctx.send("Removes special actions from your characters current special actions. \n \t\tUsage: `!removeactions [character name] [action name1; action name2; action name3] OR [index1; index2; index3`")
return
#remove unnecesary ;
if value.endswith(';'):
value = value[:-1]
#araay of special actions in input
actionArray = value.split("; | |
""" 我上一次分享在群里发了ana.py和train_50.csv
ana.py这个程序是用来生成最初的50个特征的train_50.csv训练集文件,
写得非常乱,我自己都看不下去了,这哪个zz写的?
train_50.csv用lightgbm做22分类的log_loss是2.801
我想把这个log_loss降低,所以要做一些新的特征,再合并到train_50.csv里面来
做新特征的程序文件是step2.py"""
import pandas as pd
import numpy as np
import itertools
import pprint
start_close = pd.read_csv('deviceid_package_start_close.tsv', sep='\t', header=None)
start_close.columns = ['device', 'app', 'start', 'close']
#
# tmp1 = pd.DataFrame(start_close['device'])
# tmp1['gap'] = (start_close['close'] - start_close['start'])/1000
# tmp1.to_csv('开关表中所有开关时间的秒数差.csv')
# print(gapmean) # 7689233.43994
#
# deviceid_brand = pd.read_csv('deviceid_brand.tsv', sep='\t', header=None)
# deviceid_packages = pd.read_csv('deviceid_packages.tsv', sep='\t', header=None)
# deviceid_test = pd.read_csv('deviceid_test.tsv', sep='\t', header=None)
# deviceid_train = pd.read_csv('deviceid_train.tsv', sep='\t', header=None)
# package_label = pd.read_csv('package_label.tsv', sep='\t', header=None)
#
# print(7689233/60)
# print(7689233/3600)
#
#
# 统计每台设备上的app数目
# packages = pd.read_csv('deviceid_packages.tsv', sep='\t', header=None)
# packages.columns = ['device', 'apps']
# apps = packages['apps']
# apps = apps.apply(lambda x: x.split(','))
# numapps = apps.apply(lambda x: len(x))
# numapps = pd.DataFrame({'numapps': numapps})
# packages = pd.concat([packages, numapps], axis=1)
# packages = pd.DataFrame(packages)
# packages.to_csv('numapps.csv')
# with pd.option_context('display.float_format', lambda x: '%.3f' % x):
# print(start_close.describe())
# 2 3
# count 36720940.000 36720940.000
# mean 1489081459549.199 1489089148782.905
# std 22214672578.950 22054559373.389
# min 50988.000 59970.000
# 25% 1488864885959.000 1488865682275.500
# 50% 1489490768269.000 1489491372663.000
# 75% 1490185881235.750 1490186457468.750
# max 2136676365305.000 2136677153179.000
#
# def gethour(start):
# GMT_hour = int(start/3600000) % 24
# if GMT_hour <= 15:
# return GMT_hour + 8
# else:
# return GMT_hour - 16
# starthour = pd.DataFrame(start_close['device'])
# starthour['chour'] = start_close['close'].apply(lambda x: gethour(x))
# starthour.to_csv('all_close_hour.csv')
#
# def getunixsecond(start):
# return int(start/1000)
# import time
# print(time.time())
# import datetime
# print(datetime.datetime.now())
# print(gethour(int(time.time())*1000))
# print(deviceid_brand.describe())
# 0 1 2
# count 72554 72550 72517
# unique 72554 1134 2943
# top 80a3fdcaed624d6e30bdd3381de25c9b Xiaomi HM NOTE 1LTE
# freq 1 14074 2055
# with pd.option_context('display.float_format', lambda x: '%.3f' % x):
# print(deviceid_train.describe())
# 1 2
# count 50000.000 50000.000
# mean 1.354 5.622
# std 0.478 2.481
# min 1.000 0.000
# 25 % 1.000 4.000
# 50 % 1.000 6.000
# 75 % 2.000 7.000
# max 2.000 10.000
# print(deviceid_packages.describe())
#
# print(package_label.describe())
# 0 1 2
# count 10368 10368 10368
# unique 10368 45 288
# top 294b017acedad2b1ee90131cd04ac276 系统工具 其它
# freq 1 1005 640
#
# start_close.drop(['app', 'close'], axis=1, inplace=True)
# tmp2 = start_close.groupby(['device']).count()
# tmp2 = pd.DataFrame(tmp2)
# tmp2.to_csv('开关表中每一台设备上所有app总的打开次数.csv')
# #
# #
# start_close['gap'] = (start_close['close'] - start_close['start'])/1000
# start_close.drop(['start', 'close'], axis=1, inplace=True)
# people = dict(list(start_close.groupby(['device'])))
# device_list = []
# app_list = []
# gap_list = []
# for k1 in people.keys():
# people[k1].drop(['device'], axis=1, inplace=True)
# j = dict(list(people[k1].groupby(['app'])))
# for k2 in j.keys():
# gap_list.append(np.sum(j[k2]['gap']))
# app_list.append(k2)
# device_list.append(k1)
# timeall = pd.DataFrame({'device': device_list, 'app': app_list, 'totaltime': gap_list})
# timeall.to_csv('each_app_used_time_on_device_001.csv')
# #
# # 获得每台设备上安装的app的大小类别
#
# 直接把这些大小类别做一个Labelencoder
# deviceid_packages.columns = ['device', 'applist']
# package_label.columns = ['app', 'label_big', 'label_small']
# label_big_unique = list(package_label['label_big'].unique())
# label_small_unique = list(package_label['label_small'].unique())
# from sklearn.preprocessing import LabelEncoder
# le1 = LabelEncoder()
# le2 = LabelEncoder()
# label_big_unique_numeric = le1.fit_transform(label_big_unique)
# label_small_unique_numeric = le2.fit_transform(label_small_unique)
#
# num_dict_label_big = {}
# num_dict_label_small = {}
# for i in range(len(label_big_unique)):
# num_dict_label_big[label_big_unique[i]] = label_big_unique_numeric[i]
# for i in range(len(label_small_unique)):
# num_dict_label_small[label_small_unique[i]] = label_small_unique_numeric[i]
# package_label['label_big_num'] = package_label['label_big'].apply(lambda x: num_dict_label_big[x])
# package_label['label_small_num'] = package_label['label_small'].apply(lambda x: num_dict_label_small[x])
# #
# # with pd.option_context('display.max_rows', None, 'display.max_columns', 100):
# # print(package_label.head(60))
# #
# app_label_big_dict = {}
# app_label_small_dict = {}
# for row in package_label.itertuples(index=True, name='Pandas'):
# key = getattr(row, 'app')
# value_big = getattr(row, 'label_big_num')
# value_small = getattr(row, 'label_small_num')
# app_label_big_dict[key] = value_big
# app_label_small_dict[key] = value_small
# def f_big(applist):
# ans = []
# for i in applist.split(','):
# if i in app_label_big_dict.keys():
# ans.append(app_label_big_dict[i])
# else:
# ans.append(-999)
# return ans
# def f_small(applist):
# ans = []
# for i in applist.split(','):
# if i in app_label_small_dict.keys():
# ans.append(app_label_small_dict[i])
# else:
# ans.append(-999)
# return ans
# def g_big(app):
# ans = 0
# if app in app_label_big_dict.keys():
# ans = app_label_big_dict[app]
# else:
# ans = -999
# return ans
# def g_small(app):
# ans = 0
# if app in app_label_small_dict.keys():
# ans = app_label_small_dict[app]
# else:
# ans = -999
# return ans
# #
# deviceid_packages['label_big_num'] = deviceid_packages['applist'].apply(lambda x: f_big(x))
# deviceid_packages['label_small_num'] = deviceid_packages['applist'].apply(lambda x: f_small(x))
# deviceid_packages.drop(['applist'], axis=1, inplace=True)
# deviceid_packages.to_csv('label of apps on each device.csv')
# 现在这个就是数值化之后的类别了
# 分析一下品牌和型号
# deviceid_brand.columns = ['device', 'brand_big', 'brand_small']
# deviceid_brand['brand_big_up'] = deviceid_brand['brand_big'].apply(lambda x: str(x).upper())
# # 描述频率
# key_list = list(deviceid_brand['brand_big_up'])
# from collections import Counter
# print(Counter(key_list))
# 'XIAOMI': 14074, 'SAMSUNG': 12736, 'HUAWEI': 9967, 'OPPO': 6401, 'VIVO': 4919, 'HONOR': 3972,
# 'COOLPAD': 3764, 'LENOVO': 2877, 'ZTE': 1692, # 做这3个的系列
# 'MEIZU': 897, 'GIONEE': 834, 'SONY': 803,
#
# tmp = dict(list(deviceid_brand.groupby(['brand_big_up'])))
# mydict = {'COOLPAD', 'LENOVO', 'ZTE'}
# mydf = pd.DataFrame()
# mydf = pd.concat([tmp['COOLPAD'], tmp['LENOVO'], tmp['ZTE']], axis=0)
# print(mydf.head())
# 暂时放一下
#
# # 每个人对应每台设备(deviceid)
# # 1.每个人使用时间最长的APP以及这个app的类别
# total = pd.read_csv('each app used time on each device.csv')
# print(total.index)
# print(total.columns)
# RangeIndex(start=0, stop=765698, step=1)
# Index(['Unnamed: 0', 'device', 'app', 'totaltime'], dtype='object')
# total.drop(['Unnamed: 0'], axis=1, inplace=True)
# 用python引擎读取带有中文名或中文路径的文件,不推荐这个做法,因为速度慢而且占内存,最好直接用英文命名文件
# print(total.head())
# group_device = dict(list(total.groupby(['device'])))
# dev = list(group_device.keys())
# f_label_big = []
# f_label_sma = []
# for k in dev:
# v = group_device[k]
# v = v.sort_values(['totaltime'], ascending=False).reset_index(drop=True)
# favorapp = v.loc[0, ]['app']
# # todo
# if g_big(favorapp) == 32:
# if len(v.index) < 2:
# f_label_big.append(32)
# f_label_sma.append(g_small(favorapp))
# else:
# secapp = v.loc[1, ]['app']
# f_label_big.append(g_big(secapp))
# f_label_sma.append(g_small(secapp))
# else:
# f_label_big.append(g_big(favorapp))
# f_label_sma.append(g_small(favorapp))
# ans = pd.DataFrame({'device': dev, 'f_label_b': f_label_big, 'f_label_s': f_label_sma})
# ans.to_csv('label_of_favorite_app.csv')
# app_totaltime_dict = {}
# for row in total.itertuples(index=True, name='Pandas'):
# key = getattr(row, 'totaltime')
# value = getattr(row, 'app')
# app_totaltime_dict[key] = value
#
# fu = total.groupby(['device']).max()
# fu = pd.DataFrame(fu)
# fu.drop(['app'], axis=1, inplace=True)
# # with pd.option_context('display.max_rows', None, 'display.max_columns', 100):
# # print(fu.head(20))
# fu['favorapp'] = fu['totaltime'].apply(lambda x: app_totaltime_dict[x])
# fu['f_lable_big'] = fu['favorapp'].apply(lambda x: g_big(x))
# fu['f_label_small'] = fu['favorapp'].apply(lambda x: g_small(x))
# fu.drop(['totaltime'], axis=1, inplace=True)
# with pd.option_context('display.max_rows', None, 'display.max_columns', 100):
# print(fu.head(20))
# fu.to_csv('label_of_favorite_app.csv')
# 2.每个人按照一天中时间段分开,每个时间段玩了多少次,以及这个频次时间分布的均值偏移(相对于总体人群)、标准差、偏度、峰度
from collections import Counter
# starthour = pd.read_csv('all_start_hour.csv')
# starthour = pd.read_csv('all_close_hour.csv')
# 字典按键排序, 获得一天的时间频次分布
# def sortedDictValues1(adict):
# keys = list(adict.keys())
# keys.sort()
# dis = {key: adict[key] if key in keys else 0 for key in range(24)}
# return dis
# def getmu(dis):
# a = list(dis.values())
# N = np.sum(a)
# ans = np.sum(i*a[i] for i in range(24))
# return ans/N
# def getsigma(dis):
# a = list(dis.values())
# onestd = np.std(a)
# return onestd
# def getskew(dis):
# a = list(dis.values())
# a = pd.Series(a)
# return a.skew()
# def getkurt(dis):
# a = list(dis.values())
# a = pd.Series(a)
# return a.kurt()
# def getBriefDis(dis):
# dis_brief = {'nosleep': 0, 'breakfast': 0, 'AM': 0, 'lunch': 0, 'PM': 0, 'dinner': 0, 'night': 0}
# for i in range(0, 5):
# dis_brief['nosleep'] += dis[i]
# for i in range(5, 8):
# dis_brief['breakfast'] += dis[i]
# for i in range(8, 12):
# dis_brief['AM'] += dis[i]
# for i in range(12, 14):
# dis_brief['lunch'] += dis[i]
# for i in range(14, 18):
# dis_brief['PM'] += dis[i]
# for i in range(18, 19):
# dis_brief['dinner'] += dis[i]
# for i in range(19, 24):
# dis_brief['night'] += dis[i]
# return dis_brief
# tmp = dict(list(starthour.groupby(['device'])))
# for k in tmp.keys():
# tmp[k] = sortedDictValues1(Counter(tmp[k]['shour']))
# print(tmp)
# basemu = getmu(sortedDictValues1(Counter(starthour['chour'])))
# device = list(tmp.keys())
# dis = []
# length = len(device)
# for i in range(length):
# dis.append(tmp[device[i]])
# dis_brief = []
# mean = []
# std = []
# skew = []
# kurt = []
# for i in range(length):
# dis_brief.append(getBriefDis(dis[i]))
# mean.append(getmu(dis[i]) - basemu)
# std.append(getsigma(dis[i]))
# skew.append(getskew(dis[i]))
# kurt.append(getkurt(dis[i]))
# ans = pd.DataFrame({'device': device})
# ans = pd.DataFrame({'device': device, 'mu_offset_cl': mean, 'sigma_cl': std, 'skew_cl': skew, 'kurt_cl': kurt})
#
# for i in range(24):
# cur = []
# for j in range(length):
# cur.append(dis[j][i])
# cur = pd.Series(cur)
# ans['st_freq ' + i] = cur
# ans['st_open' + str(i)] = cur
# ans.to_csv('tmp_time_24_distri.csv')
# # print(getmu(sortedDictValues1(Counter(starthour['shour']))))
# # print(getsigma(sortedDictValues1(Counter(starthour['shour'])))/length)
# # ans.to_csv('freq_hour_distribution_of_allapps_open_on_each_device.csv')
# # ans.to_csv('freq_close_hour_distribution_on_each_device.csv')
#
# #
"""看设备有没有安装这些大类别的app"""
# 0 实用工具 17
# 1 视频 38
# 2 母婴亲子 25
# 3 金融 43
# 4 其它 11
# 5 地图导航 16
# 6 教育 22
# 7 摄影摄像 21
# 8 移动购物 33
# 9 资讯 40
# 10 应用管理 19
# 11 移动阅读 34
# 12 汽车 26
# 13 休闲 7
# 14 ACT(动作类游戏) 0
# 15 音频娱乐 44
# 16 浏览器 27
# 17 体育竞技 9
# 18 社交 32
# 19 射击 18
# 20 交通出行 5
# 21 系统工具 36
# 22 竞速 35
# 23 商务办公 15
# 24 生活 31
# 25 SIM(模拟游戏) 2
# 26 旅游 23
# 27 智能硬件 24
# 28 健康 10
# 29 医疗 13
# 30 动漫娱乐 12
# 31 游戏平台 30
# 32 SLG(策略游戏) 3
# 33 视频直播 39
# 34 体育 8
# 35 企业级应用 6
# 36 游戏工具 29
# 37 输入法 41
# 38 游戏媒体 28
# 39 RPG(角色扮演游戏) 1
# 40 美食 37
# 41 TAB(桌面游戏) 4
# 42 房产 20
# 43 通讯 42
# 44 卡牌 14
# nicelabel = {4, 33, 18, 29, 26, 35, 2,
# 0, 5, 3, 1, 25,
# 8, 37, 30, 28, 9, 6, 12, 13}
# step2label = {14, 42, 20, 41, 39, 10, 23, 24, 31, 15, 36, 32, 27, 44, 7, 34, 19, 40, 21, 22, 16, 11, 43, 38, 17}
# biglabel = list(deviceid_packages['label_big_num'])
# deviceid = deviceid_packages['device']
# res = pd.DataFrame(deviceid)
# # for i in nicelabel:
# for i in step2label:
# th = []
# for j in range(len(deviceid)):
# if i in biglabel[j]:
# th.append(2)
# else:
# if -999 in biglabel[j]:
# th.append(0)
# else:
# th.append(1)
# th = pd.Series(th)
# res['has_' + str(i)] = th
#
# data = pd.read_csv('train_001.csv')
#
# data.index = data['device']
# data.drop(['device'], axis=1, inplace=True)
# res.index = res['device']
# res.drop(['device'], axis=1, inplace=True)
# data = data.join([res])
# data.to_csv('train_002.csv')
# # 得到每个设备的使用总时长
# apptime = pd.read_csv('each app used time on each device.csv')
# apptime.drop(['Unnamed: 0'], axis=1, inplace=True)
# apptime.index = apptime['device']
# apptime.drop(['device'], axis=1, inplace=True)
# apptime.drop(['app'], axis=1, inplace=True)
# ans = apptime.groupby(['device']).sum()
# ans.to_csv('total_time_of_each_device_used.csv')
# 合并47个特征
# tmp = pd.read_csv('df_total.csv', encoding='gbk')
# tmp.columns = ['device', 'brand']
# tmp.index = tmp['device']
# | |
Liaoning', 'zh': u('\u8fbd\u5b81\u7701\u6c88\u9633\u5e02')},
'86138676':{'en': 'Taizhou, Zhejiang', 'zh': u('\u6d59\u6c5f\u7701\u53f0\u5dde\u5e02')},
'86138679':{'en': 'Jinhua, Zhejiang', 'zh': u('\u6d59\u6c5f\u7701\u91d1\u534e\u5e02')},
'86138678':{'en': 'Ningbo, Zhejiang', 'zh': u('\u6d59\u6c5f\u7701\u5b81\u6ce2\u5e02')},
'86138898':{'en': 'Shenyang, Liaoning', 'zh': u('\u8fbd\u5b81\u7701\u6c88\u9633\u5e02')},
'861398464':{'en': '<NAME>', 'zh': u('\u8d35\u5dde\u7701\u516d\u76d8\u6c34\u5e02')},
'861454943':{'en': '<NAME>', 'zh': u('\u798f\u5efa\u7701\u53a6\u95e8\u5e02')},
'861398465':{'en': '<NAME>', 'zh': u('\u8d35\u5dde\u7701\u9ed4\u897f\u5357\u5e03\u4f9d\u65cf\u82d7\u65cf\u81ea\u6cbb\u5dde')},
'861398466':{'en': '<NAME>', 'zh': u('\u8d35\u5dde\u7701\u9ed4\u897f\u5357\u5e03\u4f9d\u65cf\u82d7\u65cf\u81ea\u6cbb\u5dde')},
'861379579':{'en': '<NAME>', 'zh': u('\u56db\u5ddd\u7701\u963f\u575d\u85cf\u65cf\u7f8c\u65cf\u81ea\u6cbb\u5dde')},
'861379578':{'en': '<NAME>', 'zh': u('\u56db\u5ddd\u7701\u7518\u5b5c\u85cf\u65cf\u81ea\u6cbb\u5dde')},
'861379573':{'en': '<NAME>', 'zh': u('\u56db\u5ddd\u7701\u8d44\u9633\u5e02')},
'861379572':{'en': '<NAME>', 'zh': u('\u56db\u5ddd\u7701\u8d44\u9633\u5e02')},
'861379571':{'en': '<NAME>', 'zh': u('\u56db\u5ddd\u7701\u8d44\u9633\u5e02')},
'861379570':{'en': '<NAME>', 'zh': u('\u56db\u5ddd\u7701\u8d44\u9633\u5e02')},
'861379577':{'en': '<NAME>', 'zh': u('\u56db\u5ddd\u7701\u7ef5\u9633\u5e02')},
'861379576':{'en': '<NAME>', 'zh': u('\u56db\u5ddd\u7701\u7ef5\u9633\u5e02')},
'861379575':{'en': '<NAME>', 'zh': u('\u56db\u5ddd\u7701\u9042\u5b81\u5e02')},
'861379574':{'en': 'Lu<NAME>', 'zh': u('\u56db\u5ddd\u7701\u6cf8\u5dde\u5e02')},
'861387198':{'en': 'Huanggang, Hubei', 'zh': u('\u6e56\u5317\u7701\u9ec4\u5188\u5e02')},
'861387199':{'en': 'Huanggang, Hubei', 'zh': u('\u6e56\u5317\u7701\u9ec4\u5188\u5e02')},
'861398462':{'en': 'Liupanshui, Guizhou', 'zh': u('\u8d35\u5dde\u7701\u516d\u76d8\u6c34\u5e02')},
'861387190':{'en': 'Xiaogan, Hubei', 'zh': u('\u6e56\u5317\u7701\u5b5d\u611f\u5e02')},
'861387191':{'en': 'Xiaogan, Hubei', 'zh': u('\u6e56\u5317\u7701\u5b5d\u611f\u5e02')},
'861387192':{'en': 'Xiaogan, Hubei', 'zh': u('\u6e56\u5317\u7701\u5b5d\u611f\u5e02')},
'861387193':{'en': 'Xiaogan, Hubei', 'zh': u('\u6e56\u5317\u7701\u5b5d\u611f\u5e02')},
'861387194':{'en': 'Xiaogan, Hubei', 'zh': u('\u6e56\u5317\u7701\u5b5d\u611f\u5e02')},
'861387195':{'en': 'Huanggang, Hubei', 'zh': u('\u6e56\u5317\u7701\u9ec4\u5188\u5e02')},
'861387196':{'en': 'Hu<NAME>', 'zh': u('\u6e56\u5317\u7701\u9ec4\u5188\u5e02')},
'861387197':{'en': 'Hu<NAME>', 'zh': u('\u6e56\u5317\u7701\u9ec4\u5188\u5e02')},
'861386249':{'en': '<NAME>', 'zh': u('\u6c5f\u82cf\u7701\u5357\u901a\u5e02')},
'861386248':{'en': '<NAME>', 'zh': u('\u6c5f\u82cf\u7701\u5357\u901a\u5e02')},
'861386243':{'en': '<NAME>', 'zh': u('\u6c5f\u82cf\u7701\u9547\u6c5f\u5e02')},
'861386242':{'en': '<NAME>', 'zh': u('\u6c5f\u82cf\u7701\u82cf\u5dde\u5e02')},
'861386241':{'en': '<NAME>', 'zh': u('\u6c5f\u82cf\u7701\u82cf\u5dde\u5e02')},
'861386240':{'en': '<NAME>', 'zh': u('\u6c5f\u82cf\u7701\u82cf\u5dde\u5e02')},
'861386247':{'en': '<NAME>', 'zh': u('\u6c5f\u82cf\u7701\u5357\u901a\u5e02')},
'861386246':{'en': '<NAME>', 'zh': u('\u6c5f\u82cf\u7701\u9547\u6c5f\u5e02')},
'861386245':{'en': '<NAME>', 'zh': u('\u6c5f\u82cf\u7701\u9547\u6c5f\u5e02')},
'861386244':{'en': '<NAME>', 'zh': u('\u6c5f\u82cf\u7701\u9547\u6c5f\u5e02')},
'861452959':{'en': '<NAME>', 'zh': u('\u5e7f\u4e1c\u7701\u4e2d\u5c71\u5e02')},
'861452958':{'en': '<NAME>', 'zh': u('\u5e7f\u4e1c\u7701\u4e2d\u5c71\u5e02')},
'861383994':{'en': '<NAME>', 'zh': u('\u6cb3\u5357\u7701\u9a7b\u9a6c\u5e97\u5e02')},
'861383995':{'en': '<NAME>', 'zh': u('\u6cb3\u5357\u7701\u5f00\u5c01\u5e02')},
'861383996':{'en': '<NAME>', 'zh': u('\u6cb3\u5357\u7701\u5f00\u5c01\u5e02')},
'861383997':{'en': '<NAME>', 'zh': u('\u6cb3\u5357\u7701\u5f00\u5c01\u5e02')},
'861383990':{'en': '<NAME>', 'zh': u('\u6cb3\u5357\u7701\u9a7b\u9a6c\u5e97\u5e02')},
'861383991':{'en': '<NAME>', 'zh': u('\u6cb3\u5357\u7701\u9a7b\u9a6c\u5e97\u5e02')},
'861383992':{'en': '<NAME>', 'zh': u('\u6cb3\u5357\u7701\u9a7b\u9a6c\u5e97\u5e02')},
'861383993':{'en': '<NAME>', 'zh': u('\u6cb3\u5357\u7701\u9a7b\u9a6c\u5e97\u5e02')},
'861383998':{'en': '<NAME>', 'zh': u('\u6cb3\u5357\u7701\u5f00\u5c01\u5e02')},
'861383999':{'en': '<NAME>', 'zh': u('\u6cb3\u5357\u7701\u5f00\u5c01\u5e02')},
'861452953':{'en': 'Zhuhai, Guangdong', 'zh': u('\u5e7f\u4e1c\u7701\u73e0\u6d77\u5e02')},
'861452952':{'en': 'Shantou, Guangdong', 'zh': u('\u5e7f\u4e1c\u7701\u6c55\u5934\u5e02')},
'861453393':{'en': 'Ulanqab, Inner Mongolia', 'zh': u('\u5185\u8499\u53e4\u4e4c\u5170\u5bdf\u5e03\u5e02')},
'861390969':{'en': 'Hefei, Anhui', 'zh': u('\u5b89\u5fbd\u7701\u5408\u80a5\u5e02')},
'861390968':{'en': 'Fuyang, Anhui', 'zh': u('\u5b89\u5fbd\u7701\u961c\u9633\u5e02')},
'861390965':{'en': 'Bengbu, Anhui', 'zh': u('\u5b89\u5fbd\u7701\u868c\u57e0\u5e02')},
'861390964':{'en': 'Huainan, Anhui', 'zh': u('\u5b89\u5fbd\u7701\u6dee\u5357\u5e02')},
'861390967':{'en': 'Fuyang, Anhui', 'zh': u('\u5b89\u5fbd\u7701\u961c\u9633\u5e02')},
'861390966':{'en': 'Anqing, Anhui', 'zh': u('\u5b89\u5fbd\u7701\u5b89\u5e86\u5e02')},
'861390961':{'en': 'Huaibei, Anhui', 'zh': u('\u5b89\u5fbd\u7701\u6dee\u5317\u5e02')},
'861390960':{'en': 'Chuzhou, Anhui', 'zh': u('\u5b89\u5fbd\u7701\u6ec1\u5dde\u5e02')},
'861390963':{'en': 'Wuhu, Anhui', 'zh': u('\u5b89\u5fbd\u7701\u829c\u6e56\u5e02')},
'861390962':{'en': 'Bengbu, Anhui', 'zh': u('\u5b89\u5fbd\u7701\u868c\u57e0\u5e02')},
'861454689':{'en': 'Jiangmen, Guangdong', 'zh': u('\u5e7f\u4e1c\u7701\u6c5f\u95e8\u5e02')},
'861454688':{'en': '<NAME>', 'zh': u('\u5e7f\u4e1c\u7701\u4f5b\u5c71\u5e02')},
'861454681':{'en': 'Guangzhou, Guangdong', 'zh': u('\u5e7f\u4e1c\u7701\u5e7f\u5dde\u5e02')},
'861454680':{'en': 'Guangzhou, Guangdong', 'zh': u('\u5e7f\u4e1c\u7701\u5e7f\u5dde\u5e02')},
'861454683':{'en': 'Shenzhen, Guangdong', 'zh': u('\u5e7f\u4e1c\u7701\u6df1\u5733\u5e02')},
'861454682':{'en': 'Guangzhou, Guangdong', 'zh': u('\u5e7f\u4e1c\u7701\u5e7f\u5dde\u5e02')},
'861454685':{'en': 'Shenzhen, Guangdong', 'zh': u('\u5e7f\u4e1c\u7701\u6df1\u5733\u5e02')},
'861454684':{'en': 'Shenzhen, Guangdong', 'zh': u('\u5e7f\u4e1c\u7701\u6df1\u5733\u5e02')},
'861454687':{'en': 'Dongguan, Guangdong', 'zh': u('\u5e7f\u4e1c\u7701\u4e1c\u839e\u5e02')},
'861454686':{'en': 'Shenzhen, Guangdong', 'zh': u('\u5e7f\u4e1c\u7701\u6df1\u5733\u5e02')},
'86145716':{'en': 'Shanghai', 'zh': u('\u4e0a\u6d77\u5e02')},
'861380744':{'en': 'Zhangjiajie, Hunan', 'zh': u('\u6e56\u5357\u7701\u5f20\u5bb6\u754c\u5e02')},
'86145714':{'en': 'Shanghai', 'zh': u('\u4e0a\u6d77\u5e02')},
'86145715':{'en': 'Shanghai', 'zh': u('\u4e0a\u6d77\u5e02')},
'86145713':{'en': 'Shanghai', 'zh': u('\u4e0a\u6d77\u5e02')},
'861380745':{'en': 'Huaihua, Hunan', 'zh': u('\u6e56\u5357\u7701\u6000\u5316\u5e02')},
'861380746':{'en': 'Yongzhou, Hunan', 'zh': u('\u6e56\u5357\u7701\u6c38\u5dde\u5e02')},
'861380747':{'en': 'Hengyang, Hunan', 'zh': u('\u6e56\u5357\u7701\u8861\u9633\u5e02')},
'861380036':{'en': 'Xinzhou, Shanxi', 'zh': u('\u5c71\u897f\u7701\u5ffb\u5dde\u5e02')},
'861380037':{'en': '<NAME>', 'zh': u('\u6cb3\u5357\u7701\u5546\u4e18\u5e02')},
'861453411':{'en': '<NAME>', 'zh': u('\u8fbd\u5b81\u7701\u5927\u8fde\u5e02')},
'861453410':{'en': '<NAME>', 'zh': u('\u8fbd\u5b81\u7701\u5927\u8fde\u5e02')},
'861453413':{'en': '<NAME>', 'zh': u('\u8fbd\u5b81\u7701\u629a\u987a\u5e02')},
'861379295':{'en': '<NAME>', 'zh': u('\u5c71\u4e1c\u7701\u4e34\u6c82\u5e02')},
'861453415':{'en': '<NAME>', 'zh': u('\u8fbd\u5b81\u7701\u4e39\u4e1c\u5e02')},
'861380742':{'en': '<NAME>', 'zh': u('\u6e56\u5357\u7701\u5e38\u5fb7\u5e02')},
'861453417':{'en': '<NAME>', 'zh': u('\u8fbd\u5b81\u7701\u6c88\u9633\u5e02')},
'861453416':{'en': 'Shenyang, Liaoning', 'zh': u('\u8fbd\u5b81\u7701\u6c88\u9633\u5e02')},
'861453419':{'en': 'Shenyang, Liaoning', 'zh': u('\u8fbd\u5b81\u7701\u6c88\u9633\u5e02')},
'861453418':{'en': 'Shenyang, Liaoning', 'zh': u('\u8fbd\u5b81\u7701\u6c88\u9633\u5e02')},
'861380035':{'en': 'Xinzhou, Shanxi', 'zh': u('\u5c71\u897f\u7701\u5ffb\u5dde\u5e02')},
'861453701':{'en': 'Wuhan, Hubei', 'zh': u('\u6e56\u5317\u7701\u6b66\u6c49\u5e02')},
'861453700':{'en': 'Suizhou, Hubei', 'zh': u('\u6e56\u5317\u7701\u968f\u5dde\u5e02')},
'861396523':{'en': 'Tongling, Anhui', 'zh': u('\u5b89\u5fbd\u7701\u94dc\u9675\u5e02')},
'861396522':{'en': 'Tongling, Anhui', 'zh': u('\u5b89\u5fbd\u7701\u94dc\u9675\u5e02')},
'861396521':{'en': 'Tongling, Anhui', 'zh': u('\u5b89\u5fbd\u7701\u94dc\u9675\u5e02')},
'861396520':{'en': 'Tongling, Anhui', 'zh': u('\u5b89\u5fbd\u7701\u94dc\u9675\u5e02')},
'861396527':{'en': 'Bengbu, Anhui', 'zh': u('\u5b89\u5fbd\u7701\u868c\u57e0\u5e02')},
'861396526':{'en': 'Bengbu, Anhui', 'zh': u('\u5b89\u5fbd\u7701\u868c\u57e0\u5e02')},
'861396525':{'en': 'Bengbu, Anhui', 'zh': u('\u5b89\u5fbd\u7701\u868c\u57e0\u5e02')},
'861396524':{'en': 'Bengbu, Anhui', 'zh': u('\u5b89\u5fbd\u7701\u868c\u57e0\u5e02')},
'861396529':{'en': 'Bengbu, Anhui', 'zh': u('\u5b89\u5fbd\u7701\u868c\u57e0\u5e02')},
'861396528':{'en': 'Bengbu, Anhui', 'zh': u('\u5b89\u5fbd\u7701\u868c\u57e0\u5e02')},
'861390781':{'en': 'Nanning, Guangxi', 'zh': u('\u5e7f\u897f\u5357\u5b81\u5e02')},
'861390780':{'en': 'Fangchenggang, Guangxi', 'zh': u('\u5e7f\u897f\u9632\u57ce\u6e2f\u5e02')},
'861390783':{'en': 'Guilin, Guangxi', 'zh': u('\u5e7f\u897f\u6842\u6797\u5e02')},
'861390782':{'en': 'Liuzhou, Guangxi', 'zh': u('\u5e7f\u897f\u67f3\u5dde\u5e02')},
'86138468':{'en': 'Hegang, Heilongjiang', 'zh': u('\u9ed1\u9f99\u6c5f\u7701\u9e64\u5c97\u5e02')},
'861390784':{'en': 'Wuzhou, Guangxi', 'zh': u('\u5e7f\u897f\u68a7\u5dde\u5e02')},
'861390787':{'en': 'Qinzhou, Guangxi', 'zh': u('\u5e7f\u897f\u94a6\u5dde\u5e02')},
'861390786':{'en': 'Nanning, Guangxi', 'zh': u('\u5e7f\u897f\u5357\u5b81\u5e02')},
'86138464':{'en': 'Qitaihe, Heilongjiang', 'zh': u('\u9ed1\u9f99\u6c5f\u7701\u4e03\u53f0\u6cb3\u5e02')},
'861390788':{'en': 'Hechi, Guangxi', 'zh': u('\u5e7f\u897f\u6cb3\u6c60\u5e02')},
'86138466':{'en': 'Yichun, Heilongjiang', 'zh': u('\u9ed1\u9f99\u6c5f\u7701\u4f0a\u6625\u5e02')},
'86138467':{'en': 'Suihua, Heilongjiang', 'zh': u('\u9ed1\u9f99\u6c5f\u7701\u7ee5\u5316\u5e02')},
'86138460':{'en': 'Jixi, Heilongjiang', 'zh': u('\u9ed1\u9f99\u6c5f\u7701\u9e21\u897f\u5e02')},
'86138461':{'en': 'Jiamusi, Heilongjiang', 'zh': u('\u9ed1\u9f99\u6c5f\u7701\u4f73\u6728\u65af\u5e02')},
'86138462':{'en': 'Qiqihar, Heilongjiang', 'zh': u('\u9ed1\u9f99\u6c5f\u7701\u9f50\u9f50\u54c8\u5c14\u5e02')},
'86138463':{'en': 'Heihe, Heilongjiang', 'zh': u('\u9ed1\u9f99\u6c5f\u7701\u9ed1\u6cb3\u5e02')},
'861398572':{'en': '<NAME>', 'zh': u('\u8d35\u5dde\u7701\u5b89\u987a\u5e02')},
'861398573':{'en': '<NAME>', 'zh': u('\u8d35\u5dde\u7701\u5b89\u987a\u5e02')},
'861398570':{'en': '<NAME>', 'zh': u('\u8d35\u5dde\u7701\u5b89\u987a\u5e02')},
'861398571':{'en': '<NAME>', 'zh': u('\u8d35\u5dde\u7701\u5b89\u987a\u5e02')},
'861398576':{'en': '<NAME>', 'zh': u('\u8d35\u5dde\u7701\u9ed4\u5357\u5e03\u4f9d\u65cf\u82d7\u65cf\u81ea\u6cbb\u5dde')},
'861398577':{'en': '<NAME>', 'zh': u('\u8d35\u5dde\u7701\u9ed4\u5357\u5e03\u4f9d\u65cf\u82d7\u65cf\u81ea\u6cbb\u5dde')},
'861398574':{'en': '<NAME>', 'zh': u('\u8d35\u5dde\u7701\u5b89\u987a\u5e02')},
'861398575':{'en': '<NAME>', 'zh': u('\u8d35\u5dde\u7701\u9ed4\u5357\u5e03\u4f9d\u65cf\u82d7\u65cf\u81ea\u6cbb\u5dde')},
'861398578':{'en': '<NAME>', 'zh': u('\u8d35\u5dde\u7701\u9ed4\u5357\u5e03\u4f9d\u65cf\u82d7\u65cf\u81ea\u6cbb\u5dde')},
'861398579':{'en': '<NAME>', 'zh': u('\u8d35\u5dde\u7701\u9ed4\u5357\u5e03\u4f9d\u65cf\u82d7\u65cf\u81ea\u6cbb\u5dde')},
'861450308':{'en': 'Hengshui, Hebei', 'zh': u('\u6cb3\u5317\u7701\u8861\u6c34\u5e02')},
'861450309':{'en': 'Hengshui, Hebei', 'zh': u('\u6cb3\u5317\u7701\u8861\u6c34\u5e02')},
'861384658':{'en': 'Da Hinggan Ling, Heilongjiang', 'zh': u('\u9ed1\u9f99\u6c5f\u7701\u5927\u5174\u5b89\u5cad\u5730\u533a')},
'861384659':{'en': 'Da Hing<NAME>, Heilongjiang', 'zh': u('\u9ed1\u9f99\u6c5f\u7701\u5927\u5174\u5b89\u5cad\u5730\u533a')},
'861452736':{'en': 'Changde, Hunan', 'zh': u('\u6e56\u5357\u7701\u5e38\u5fb7\u5e02')},
'861384652':{'en': 'Heihe, Heilongjiang', 'zh': u('\u9ed1\u9f99\u6c5f\u7701\u9ed1\u6cb3\u5e02')},
'861384653':{'en': 'Heihe, Heilongjiang', 'zh': u('\u9ed1\u9f99\u6c5f\u7701\u9ed1\u6cb3\u5e02')},
'861384650':{'en': 'Heihe, Heilongjiang', 'zh': u('\u9ed1\u9f99\u6c5f\u7701\u9ed1\u6cb3\u5e02')},
'861384651':{'en': 'Heihe, Heilongjiang', 'zh': u('\u9ed1\u9f99\u6c5f\u7701\u9ed1\u6cb3\u5e02')},
'861384656':{'en': 'Da <NAME>, Heilongjiang', 'zh': u('\u9ed1\u9f99\u6c5f\u7701\u5927\u5174\u5b89\u5cad\u5730\u533a')},
'861384657':{'en': 'Da Hinggan Ling, Heilongjiang', 'zh': u('\u9ed1\u9f99\u6c5f\u7701\u5927\u5174\u5b89\u5cad\u5730\u533a')},
'861384654':{'en': 'Heihe, Heilongjiang', 'zh': u('\u9ed1\u9f99\u6c5f\u7701\u9ed1\u6cb3\u5e02')},
'861384655':{'en': 'Da Hinggan Ling, Heilongjiang', 'zh': u('\u9ed1\u9f99\u6c5f\u7701\u5927\u5174\u5b89\u5cad\u5730\u533a')},
'861379384':{'en': 'TaiAn, Shandong', 'zh': u('\u5c71\u4e1c\u7701\u6cf0\u5b89\u5e02')},
'861379385':{'en': 'Binzhou, Shandong', 'zh': u('\u5c71\u4e1c\u7701\u6ee8\u5dde\u5e02')},
'861379386':{'en': 'Binzhou, Shandong', 'zh': u('\u5c71\u4e1c\u7701\u6ee8\u5dde\u5e02')},
'861379387':{'en': 'Binzhou, Shandong', 'zh': u('\u5c71\u4e1c\u7701\u6ee8\u5dde\u5e02')},
'861379380':{'en': 'TaiAn, Shandong', 'zh': u('\u5c71\u4e1c\u7701\u6cf0\u5b89\u5e02')},
'861379381':{'en': 'TaiAn, Shandong', 'zh': u('\u5c71\u4e1c\u7701\u6cf0\u5b89\u5e02')},
'861379382':{'en': 'TaiAn, Shandong', 'zh': u('\u5c71\u4e1c\u7701\u6cf0\u5b89\u5e02')},
'861379383':{'en': 'TaiAn, Shandong', 'zh': u('\u5c71\u4e1c\u7701\u6cf0\u5b89\u5e02')},
'861379388':{'en': 'Binzhou, Shandong', 'zh': u('\u5c71\u4e1c\u7701\u6ee8\u5dde\u5e02')},
'861379389':{'en': 'Binzhou, Shandong', 'zh': u('\u5c71\u4e1c\u7701\u6ee8\u5dde\u5e02')},
'861452857':{'en': 'Lanzhou, Gansu', 'zh': u('\u7518\u8083\u7701\u5170\u5dde\u5e02')},
'861453704':{'en': '<NAME>', 'zh': u('\u6e56\u5317\u7701\u6b66\u6c49\u5e02')},
'861457208':{'en': '<NAME>', 'zh': u('\u56db\u5ddd\u7701\u7ef5\u9633\u5e02')},
'861450929':{'en': '<NAME>', 'zh': u('\u6c5f\u82cf\u7701\u65e0\u9521\u5e02')},
'8613896':{'en': 'Chongqing', 'zh': u('\u91cd\u5e86\u5e02')},
'861452456':{'en': 'Heihe, Heilongjiang', 'zh': u('\u9ed1\u9f99\u6c5f\u7701\u9ed1\u6cb3\u5e02')},
'861452457':{'en': 'Da <NAME>, Heilongjiang', 'zh': u('\u9ed1\u9f99\u6c5f\u7701\u5927\u5174\u5b89\u5cad\u5730\u533a')},
'861452454':{'en': 'Jiamusi, Heilongjiang', 'zh': u('\u9ed1\u9f99\u6c5f\u7701\u4f73\u6728\u65af\u5e02')},
'861452455':{'en': 'Suihua, Heilongjiang', 'zh': u('\u9ed1\u9f99\u6c5f\u7701\u7ee5\u5316\u5e02')},
'861452452':{'en': 'Qiqihar, Heilongjiang', 'zh': u('\u9ed1\u9f99\u6c5f\u7701\u9f50\u9f50\u54c8\u5c14\u5e02')},
'861452453':{'en': 'Mudanjiang, Heilongjiang', 'zh': u('\u9ed1\u9f99\u6c5f\u7701\u7261\u4e39\u6c5f\u5e02')},
'861452450':{'en': 'Harbin, Heilongjiang', 'zh': u('\u9ed1\u9f99\u6c5f\u7701\u54c8\u5c14\u6ee8\u5e02')},
'861452451':{'en': 'Harbin, Heilongjiang', 'zh': u('\u9ed1\u9f99\u6c5f\u7701\u54c8\u5c14\u6ee8\u5e02')},
'861452458':{'en': 'Yichun, Heilongjiang', 'zh': u('\u9ed1\u9f99\u6c5f\u7701\u4f0a\u6625\u5e02')},
'861452459':{'en': 'Daqing, Heilongjiang', 'zh': u('\u9ed1\u9f99\u6c5f\u7701\u5927\u5e86\u5e02')},
'86138288':{'en': 'Shenzhen, Guangdong', 'zh': u('\u5e7f\u4e1c\u7701\u6df1\u5733\u5e02')},
'86138289':{'en': 'Shanwei, Guangdong', 'zh': u('\u5e7f\u4e1c\u7701\u6c55\u5c3e\u5e02')},
'86138280':{'en': 'Jiangmen, Guangdong', 'zh': u('\u5e7f\u4e1c\u7701\u6c5f\u95e8\u5e02')},
'86138281':{'en': 'Jieyang, Guangdong', 'zh': u('\u5e7f\u4e1c\u7701\u63ed\u9633\u5e02')},
'86138282':{'en': 'Zhanjiang, Guangdong', 'zh': u('\u5e7f\u4e1c\u7701\u6e5b\u6c5f\u5e02')},
'86138283':{'en': 'Chaozhou, Guangdong', 'zh': u('\u5e7f\u4e1c\u7701\u6f6e\u5dde\u5e02')},
'86138284':{'en': 'Guangzhou, Guangdong', 'zh': u('\u5e7f\u4e1c\u7701\u5e7f\u5dde\u5e02')},
'86138285':{'en': 'Qingyuan, Guangdong', 'zh': u('\u5e7f\u4e1c\u7701\u6e05\u8fdc\u5e02')},
'86138286':{'en': 'Maoming, Guangdong', 'zh': u('\u5e7f\u4e1c\u7701\u8302\u540d\u5e02')},
'86138287':{'en': 'Shenzhen, Guangdong', 'zh': u('\u5e7f\u4e1c\u7701\u6df1\u5733\u5e02')},
'861457200':{'en': '<NAME>', 'zh': u('\u56db\u5ddd\u7701\u6210\u90fd\u5e02')},
'861387299':{'en': 'Wuhan, Hubei', 'zh': u('\u6e56\u5317\u7701\u6b66\u6c49\u5e02')},
'861387298':{'en': 'Wuhan, Hubei', 'zh': u('\u6e56\u5317\u7701\u6b66\u6c49\u5e02')},
'861452739':{'en': 'Sh<NAME>', 'zh': u('\u6e56\u5357\u7701\u90b5\u9633\u5e02')},
'861450921':{'en': '<NAME>', 'zh': u('\u6c5f\u82cf\u7701\u6dee\u5b89\u5e02')},
'861387293':{'en': '<NAME>i', 'zh': u('\u6e56\u5317\u7701\u8346\u95e8\u5e02')},
'861387292':{'en': 'Jingmen, Hubei', 'zh': u('\u6e56\u5317\u7701\u8346\u95e8\u5e02')},
'861387291':{'en': 'Jingmen, Hubei', 'zh': u('\u6e56\u5317\u7701\u8346\u95e8\u5e02')},
'861387290':{'en': 'Jingmen, Hubei', 'zh': u('\u6e56\u5317\u7701\u8346\u95e8\u5e02')},
'861387297':{'en': 'Wuhan, Hubei', 'zh': u('\u6e56\u5317\u7701\u6b66\u6c49\u5e02')},
'861387296':{'en': 'Wuhan, Hubei', 'zh': u('\u6e56\u5317\u7701\u6b66\u6c49\u5e02')},
'861387295':{'en': 'Wuhan, Hubei', 'zh': u('\u6e56\u5317\u7701\u6b66\u6c49\u5e02')},
'861387294':{'en': 'Jingmen, Hubei', 'zh': u('\u6e56\u5317\u7701\u8346\u95e8\u5e02')},
'861391430':{'en': 'Changzhou, Jiangsu', 'zh': u('\u6c5f\u82cf\u7701\u5e38\u5dde\u5e02')},
'861380958':{'en': '<NAME>', 'zh': u('\u798f\u5efa\u7701\u5357\u5e73\u5e02')},
'861391432':{'en': 'Changzhou, Jiangsu', 'zh': u('\u6c5f\u82cf\u7701\u5e38\u5dde\u5e02')},
'861391433':{'en': 'Changzhou, Jiangsu', 'zh': u('\u6c5f\u82cf\u7701\u5e38\u5dde\u5e02')},
'861391434':{'en': 'Changzhou, Jiangsu', 'zh': u('\u6c5f\u82cf\u7701\u5e38\u5dde\u5e02')},
'861391435':{'en': '<NAME>', 'zh': u('\u6c5f\u82cf\u7701\u5357\u901a\u5e02')},
'861391436':{'en': '<NAME>', 'zh': u('\u6c5f\u82cf\u7701\u5357\u901a\u5e02')},
'861391437':{'en': '<NAME>', 'zh': u('\u6c5f\u82cf\u7701\u5357\u901a\u5e02')},
'861391438':{'en': '<NAME>', 'zh': u('\u6c5f\u82cf\u7701\u5357\u901a\u5e02')},
'861380950':{'en': 'Fuzhou, Fujian', 'zh': u('\u798f\u5efa\u7701\u798f\u5dde\u5e02')},
'861380953':{'en': 'Fuzhou, Fujian', 'zh': u('\u798f\u5efa\u7701\u798f\u5dde\u5e02')},
'861380952':{'en': 'Fuzhou, Fujian', 'zh': u('\u798f\u5efa\u7701\u798f\u5dde\u5e02')},
'861380955':{'en': 'Fuzhou, Fujian', 'zh': u('\u798f\u5efa\u7701\u798f\u5dde\u5e02')},
'861380954':{'en': 'Fuzhou, Fujian', 'zh': u('\u798f\u5efa\u7701\u798f\u5dde\u5e02')},
'861380957':{'en': 'Ningde, Fujian', 'zh': u('\u798f\u5efa\u7701\u5b81\u5fb7\u5e02')},
'861380956':{'en': 'Ningde, Fujian', 'zh': u('\u798f\u5efa\u7701\u5b81\u5fb7\u5e02')},
'861399957':{'en': 'Ili, Xinjiang', 'zh': u('\u65b0\u7586\u4f0a\u7281\u54c8\u8428\u514b\u81ea\u6cbb\u5dde')},
'861399956':{'en': 'Changji, Xinjiang', 'zh': u('\u65b0\u7586\u660c\u5409\u56de\u65cf\u81ea\u6cbb\u5dde')},
'861399955':{'en': 'Changji, Xinjiang', 'zh': u('\u65b0\u7586\u660c\u5409\u56de\u65cf\u81ea\u6cbb\u5dde')},
'861399954':{'en': 'Changji, Xinjiang', 'zh': u('\u65b0\u7586\u660c\u5409\u56de\u65cf\u81ea\u6cbb\u5dde')},
'861399953':{'en': 'Shihezi, Xinjiang', 'zh': u('\u65b0\u7586\u77f3\u6cb3\u5b50\u5e02')},
'861399952':{'en': 'Karamay, Xinjiang', 'zh': u('\u65b0\u7586\u514b\u62c9\u739b\u4f9d\u5e02')},
'861399951':{'en': 'Karamay, Xinjiang', 'zh': u('\u65b0\u7586\u514b\u62c9\u739b\u4f9d\u5e02')},
'861399950':{'en': 'Karamay, Xinjiang', 'zh': u('\u65b0\u7586\u514b\u62c9\u739b\u4f9d\u5e02')},
'861450927':{'en': '<NAME>', 'zh': u('\u6c5f\u82cf\u7701\u65e0\u9521\u5e02')},
'861399959':{'en': '<NAME>', 'zh': u('\u65b0\u7586\u4f0a\u7281\u54c8\u8428\u514b\u81ea\u6cbb\u5dde')},
'861399958':{'en': '<NAME>', 'zh': u('\u65b0\u7586\u4f0a\u7281\u54c8\u8428\u514b\u81ea\u6cbb\u5dde')},
'861389721':{'en': 'Xining, Qinghai', 'zh': u('\u9752\u6d77\u7701\u897f\u5b81\u5e02')},
'861389720':{'en': 'Xining, Qinghai', 'zh': u('\u9752\u6d77\u7701\u897f\u5b81\u5e02')},
'861389723':{'en': 'Xining, Qinghai', 'zh': u('\u9752\u6d77\u7701\u897f\u5b81\u5e02')},
'861389722':{'en': 'Haidong, Qinghai', 'zh': u('\u9752\u6d77\u7701\u6d77\u4e1c\u5730\u533a')},
'861389725':{'en': 'Xining, Qinghai', 'zh': u('\u9752\u6d77\u7701\u897f\u5b81\u5e02')},
'861389724':{'en': 'Xining, Qinghai', 'zh': u('\u9752\u6d77\u7701\u897f\u5b81\u5e02')},
'861389727':{'en': 'Haixi, Qinghai', 'zh': u('\u9752\u6d77\u7701\u6d77\u897f\u8499\u53e4\u65cf\u85cf\u65cf\u81ea\u6cbb\u5dde')},
'861389726':{'en': 'Xining, Qinghai', 'zh': u('\u9752\u6d77\u7701\u897f\u5b81\u5e02')},
'861389729':{'en': 'Haixi, Qinghai', 'zh': u('\u9752\u6d77\u7701\u6d77\u897f\u8499\u53e4\u65cf\u85cf\u65cf\u81ea\u6cbb\u5dde')},
'861389728':{'en': 'Xining, Qinghai', 'zh': u('\u9752\u6d77\u7701\u897f\u5b81\u5e02')},
'861454519':{'en': 'Yongzhou, Hunan', 'zh': u('\u6e56\u5357\u7701\u6c38\u5dde\u5e02')},
'861454518':{'en': 'Zhangjiajie, Hunan', 'zh': u('\u6e56\u5357\u7701\u5f20\u5bb6\u754c\u5e02')},
'861392208':{'en': 'Zhanjiang, Guangdong', 'zh': u('\u5e7f\u4e1c\u7701\u6e5b\u6c5f\u5e02')},
'861392209':{'en': 'Zhanjiang, Guangdong', 'zh': u('\u5e7f\u4e1c\u7701\u6e5b\u6c5f\u5e02')},
'861392206':{'en': 'Zhanjiang, Guangdong', 'zh': u('\u5e7f\u4e1c\u7701\u6e5b\u6c5f\u5e02')},
'861392207':{'en': 'Zhanjiang, Guangdong', 'zh': u('\u5e7f\u4e1c\u7701\u6e5b\u6c5f\u5e02')},
'861392204':{'en': 'Maoming, Guangdong', 'zh': u('\u5e7f\u4e1c\u7701\u8302\u540d\u5e02')},
'861392205':{'en': 'Maoming, Guangdong', 'zh': u('\u5e7f\u4e1c\u7701\u8302\u540d\u5e02')},
'861392202':{'en': 'Yangjiang, Guangdong', 'zh': u('\u5e7f\u4e1c\u7701\u9633\u6c5f\u5e02')},
'861392203':{'en': '<NAME>', 'zh': u('\u5e7f\u4e1c\u7701\u8302\u540d\u5e02')},
'861392200':{'en': '<NAME>', 'zh': u('\u5e7f\u4e1c\u7701\u9633\u6c5f\u5e02')},
'861392201':{'en': '<NAME>', 'zh': u('\u5e7f\u4e1c\u7701\u9633\u6c5f\u5e02')},
'861458429':{'en': '<NAME>', 'zh': u('\u6cb3\u5357\u7701\u65b0\u4e61\u5e02')},
'861458428':{'en': '<NAME>', 'zh': u('\u6cb3\u5357\u7701\u65b0\u4e61\u5e02')},
'861458359':{'en': '<NAME>', 'zh': u('\u5c71\u4e1c\u7701\u6d4e\u5357\u5e02')},
'861458358':{'en': '<NAME>', 'zh': u('\u5c71\u4e1c\u7701\u6d4e\u5357\u5e02')},
'861458353':{'en': '<NAME>', 'zh': u('\u5c71\u4e1c\u7701\u4e1c\u8425\u5e02')},
'861458352':{'en': 'Dongying, Shandong', 'zh': u('\u5c71\u4e1c\u7701\u4e1c\u8425\u5e02')},
'861458351':{'en': 'Dongying, Shandong', 'zh': u('\u5c71\u4e1c\u7701\u4e1c\u8425\u5e02')},
'861458350':{'en': '<NAME>', 'zh': u('\u5c71\u4e1c\u7701\u4e1c\u8425\u5e02')},
'861458357':{'en': '<NAME>', 'zh': u('\u5c71\u4e1c\u7701\u6d4e\u5357\u5e02')},
'861458356':{'en': '<NAME>', 'zh': u('\u5c71\u4e1c\u7701\u6d4e\u5357\u5e02')},
'861458355':{'en': '<NAME>', 'zh': u('\u5c71\u4e1c\u7701\u6d4e\u5357\u5e02')},
'861458354':{'en': '<NAME>', 'zh': u('\u5c71\u4e1c\u7701\u4e1c\u8425\u5e02')},
'861454385':{'en': 'Qiandongnan, Guizhou', 'zh': u('\u8d35\u5dde\u7701\u9ed4\u4e1c\u5357\u82d7\u65cf\u4f97\u65cf\u81ea\u6cbb\u5dde')},
'861454386':{'en': '<NAME>', 'zh': u('\u8d35\u5dde\u7701\u94dc\u4ec1\u5730\u533a')},
'861454387':{'en': 'Bijie, Guizhou', 'zh': u('\u8d35\u5dde\u7701\u6bd5\u8282\u5730\u533a')},
'861454472':{'en': 'Yulin, Shaanxi', 'zh': u('\u9655\u897f\u7701\u6986\u6797\u5e02')},
'861454473':{'en': 'Weinan, Shaanxi', 'zh': u('\u9655\u897f\u7701\u6e2d\u5357\u5e02')},
'861454470':{'en': 'Xianyang, Shaanxi', 'zh': u('\u9655\u897f\u7701\u54b8\u9633\u5e02')},
'861454471':{'en': 'YanAn, Shaanxi', 'zh': u('\u9655\u897f\u7701\u5ef6\u5b89\u5e02')},
'861454476':{'en': 'Hanzhong, Shaanxi', 'zh': u('\u9655\u897f\u7701\u6c49\u4e2d\u5e02')},
'861454382':{'en': 'Zunyi, Guizhou', 'zh': u('\u8d35\u5dde\u7701\u9075\u4e49\u5e02')},
'861454474':{'en': 'Shangluo, Shaanxi', 'zh': u('\u9655\u897f\u7701\u5546\u6d1b\u5e02')},
'861454475':{'en': 'Ankang, Shaanxi', 'zh': u('\u9655\u897f\u7701\u5b89\u5eb7\u5e02')},
'861454478':{'en': 'XiAn, Shaanxi', 'zh': u('\u9655\u897f\u7701\u897f\u5b89\u5e02')},
'861454479':{'en': 'Tongchuan, Shaanxi', 'zh': u('\u9655\u897f\u7701\u94dc\u5ddd\u5e02')},
'861454388':{'en': 'Liupanshui, Guizhou', 'zh': u('\u8d35\u5dde\u7701\u516d\u76d8\u6c34\u5e02')},
'861383398':{'en': 'Cangzhou, Hebei', 'zh': u('\u6cb3\u5317\u7701\u6ca7\u5dde\u5e02')},
'861454389':{'en': 'Qianxinan, Guizhou', 'zh': u('\u8d35\u5dde\u7701\u9ed4\u897f\u5357\u5e03\u4f9d\u65cf\u82d7\u65cf\u81ea\u6cbb\u5dde')},
'861383399':{'en': 'Cangzhou, Hebei', | |
#!/usr/bin/env python
"""
sire: create a new python3.7 project using all the extra stuff i like.
"""
import argparse
import getpass
import os
import re
import shutil
import stat
import subprocess
import sys
from typing import Any, Mapping
import requests
from .string_matches import BADLINES
GIT_URLS = dict(
github="https://github.com/{git_username}/{name}",
bitbucket="https://bitbucket.org/{git_username}/{name}",
gitlab="https://gitlab.com/{git_username}/{name}",
)
# here we store the out paths that will be generated. not included are git and
# mkdocs-related files, as they are added in later if the user requests them
PATHS = {
".bumpversion.cfg",
".coveragerc",
".flake8",
".travis.yml",
"CHANGELOG.md",
"LICENSE",
"mypy.ini",
"publish.sh",
"README.md",
"requirements.txt",
"setup.py",
"tests/tests.py",
"{name}/__init__.py",
}
# translate between user input for exclude and the pip name, so we can remove
# unused things from requirements
MODULE_TRANSLATION = dict(codecoverage="codecov", bumpversion="bump2version")
# we use these to format help for the user
SHORT_PATHS = [
os.path.basename(os.path.splitext(i)[0]).strip(".").lower() for i in PATHS
]
# the below just helps with interpreting exclude patterns, so that 'codecov'
# will remove .coveragerc, and so on
EXCLUDE_TRANSLATIONS = dict(
codecov="coveragerc",
coverage="coveragerc",
bump2version="bumpversion",
rtd="readthedocs",
readthedocs="readthedocs",
venv="virtualenv",
docs="readthedocs",
test="tests",
)
class SafeDict(dict):
"""
Need a custom object to not error when formatting files that contain {str}
"""
def __missing__(self, key):
return "{" + key + "}"
def _kwargs_to_clean_args(kwargs):
"""
Add mkdocs, virtualenv and git to exclude set
"""
exclude = kwargs["exclude"] or str()
exclude = {EXCLUDE_TRANSLATIONS.get(i, i) for i in exclude.split(",")}
for special in {"git", "virtualenv", "mkdocs"}:
if not kwargs[special]:
print(f"* Skipping {special} because it is in the exclude list.")
exclude.add(special)
return kwargs["project_name"], kwargs["git"], kwargs["interactive"], exclude
def _parse_cmdline_args():
"""
Command line argument parsing. Doing it here means less duplication than
would be the case in bin/
Returns tuple of project_name (str), git (str), interactive (bool), exclude (set)
"""
parser = argparse.ArgumentParser(description="sire a new Python 3.7 project.")
paths = "/".join(sorted(SHORT_PATHS))
parser.add_argument(
"-e",
"--exclude",
nargs="?",
type=str,
required=False,
help=f"Comma separated files/modules to exclude. Any of: {paths}",
)
parser.add_argument(
"-m",
"--mkdocs",
default=False,
action="store_true",
required=False,
help="Generate files for mkdocs/readthedocs",
)
parser.add_argument(
"-i",
"--interactive",
default=False,
action="store_true",
required=False,
help="Interactive prompt with a few extra fields to autofill",
)
parser.add_argument(
"-v",
"--virtualenv",
default=False,
action="store_true",
required=False,
help="Generate a virtualenv for this project",
)
parser.add_argument(
"-g",
"--git",
nargs="?",
type=str,
required=False,
choices=["github", "gitlab", "bitbucket"],
help="Generate .git, .gitignore and hook(s). 'github/bitbucket/gitlab...'",
)
parser.add_argument("project_name", help="Name of the new Python project")
kwargs = vars(parser.parse_args())
return _kwargs_to_clean_args(kwargs)
def _locate_templates():
"""
templates dir seems to move around depending on how you install!?
todo: remove some of these if they are not possible. right now i don't know.
"""
fpath = os.path.dirname(__file__)
first = os.path.dirname(fpath)
second = os.path.dirname(first)
third = sys.prefix
fourth = os.path.join(third, "sire")
dirs = [first, second, third, fourth]
for path in dirs:
if os.path.isdir(os.path.join(path, "templates")):
return os.path.join(path, "templates")
raise ValueError(f"No templates found in: {dirs}")
def _obtain_git_username(git, name):
"""
try to figure out the username for a given git remote and project name.
"""
if git in {"github", "gitlab", "bitbucket"}:
command = f'ssh -o "StrictHostKeyChecking=no" -T git@{git}.com'
namefind = r"(?:Hi |logged in as |Welcome to GitLab, @)([a-zA-Z\d]{2,40})"
else:
raise NotImplementedError(f"Git host {git} not implemented yet. Make an issue?")
# first try: see if we can get a good response from our host
prms: Mapping[str, Any] = dict(
shell=True, stderr=subprocess.PIPE, universal_newlines=True
)
result = subprocess.run(command, **prms)
match = re.search(namefind, result.stderr)
if match:
return match.group(1)
# otherwise, see if our computer's username exists on the remote...imperfect
guess = getpass.getuser()
url = GIT_URLS[git].format(git_username=guess, name=name)
if requests.get(url).ok:
return getpass.getuser()
return False
# directory containing our templates
TEMPLATES = _locate_templates()
def _remove_excluded_lines(formatted, exclude):
"""
If something is excluded, we don't need it in the readme. Problem is, this
is a hard thing to automate. So, we just add lots of strings in BADLINES
"""
out = list()
for line in formatted.splitlines():
for ex in exclude:
badlines = BADLINES.get(ex, list())
if any(line.startswith(i) for i in badlines):
break
else:
out.append(line)
return "\n".join(out)
def _write(proj, outpath, formatters, exclude):
"""
Get the filename from outpath
read it from templates dir
format any variables in the templates with projname/other formatters
write to outpath
"""
fname = os.path.basename(outpath)
template = os.path.join(TEMPLATES, fname)
with open(template, "r") as fo:
formatted = fo.read().format_map(SafeDict(name=proj, **formatters))
# hack in some handling of requirements file
if "requirements" in template:
deps = {MODULE_TRANSLATION.get(i, i) for i in exclude}
formatted = "\n".join(i for i in formatted.splitlines() if i not in deps)
# remove bad lines?
formatted = _remove_excluded_lines(formatted, exclude)
with open(os.path.join(proj, outpath.format(name=proj)), "w") as fo:
fo.write(formatted.strip() + "\n")
def _show_todos(name, paths, exclude, formatters, git):
"""
Make a formatted str of things to do from here. Mostly so the user can copy
urls and so on (to quickly set up hooks, git remote)
"""
todos = [f"Actually write some tests: {name}/tests.py"]
if ".coveragerc" in paths:
todos.append("Set up codecov and a git hook for it.")
if "mkdocs" not in exclude:
rtd = "https://readthedocs.org/dashboard/import"
todos.append(f"Set up a readthedocs and a git hook for it: {rtd}")
if "git" not in exclude:
git_username = formatters.get("git_username", "<username>")
git_url = GIT_URLS[git].format(git_username=git_username, name=name)
add_remote = f"git remote add origin {git_url}"
set_remote = f"git remote set-url origin {git_url}"
os.chdir(name)
subprocess.call(add_remote.split())
print(f"Added remote 'origin': {git_url}")
subprocess.call(set_remote.split())
print(f"Set remote: {git_url}")
os.chdir("..")
form = "\n* ".join(todos)
# right now, there is always at least one todo note (do tests!)
cd = f"`cd {name}`"
print(f"\nAll done! {cd} to check out your new project.\n\nTo do:\n\n* {form}")
def _filter_excluded(exclude):
"""
Get just the subset of PATH strings that we need to process, based on the
contents of exclude, which was already pre-processed during argument parsing
"""
if not exclude:
return PATHS
filtered = set()
for path in PATHS:
# remove path and extension from
no_pth = os.path.basename(path).lstrip(".")
no_ext = os.path.splitext(no_pth)[0]
possible = {path, no_pth, no_ext}
if any(i in exclude for i in possible):
print(f"* Skipping {path} because it is in the exclude list.")
continue
filtered.add(path)
return filtered
def _build_virtualenv(name):
"""
If the user wants, make a new virtualenv, install dependencies, and
print some helpful copyable strings along the way
"""
print("Making virtualenv and installing dependencies")
subprocess.call(f"python3.7 -m venv {name}/venv-{name}".split())
pip = os.path.abspath(f"{name}/venv-{name}/bin/pip")
subprocess.call(f"{pip} install wheel".split())
subprocess.call(f"{pip} install -r {name}/requirements.txt".split())
vfile = os.path.join(os.path.dirname(pip), "activate")
print(f"\n* virtualenv created: activate with `source {vfile}`")
def _input_wrap(prompt, default=None):
"""
Run input() with formatted prompt, and return
The while loop can be used to ensure correct output
"""
while True: # while input not understood
result = input(prompt.format(default=default)).lower().strip()
if result in {"y", "yes"}:
return True
if result in {"n", "no"}:
return False
if not result:
return default
if result in {"quit", "q", "exit"}:
raise RuntimeError("User quit.")
if not isinstance(default, bool):
return result
print("Error: answer not understood. You can 'quit' or hit ctrl+c to exit.")
def _interactive(git, name):
"""
Interactive assistant. This will supercede any command line arguments, meaning
that it is pointless to add any other arguments when using the -i argument.
"""
prompt = (
"\n========================================================================\n"
"This is the interactive helper for *sire*. Details entered here will \n"
"determine which files are included, and format them with the correct \n"
"information. Leaving a field blank is OK, but can result in incompletely \n"
"formatted files. Hit enter to begin, or type 'quit' to quit.\n"
"========================================================================\n\n"
)
_input_wrap(prompt)
output = dict()
# attempt to get some variables from shell. not sure how this looks when absent
usr = _obtain_git_username(git, name)
email = "git config user.email".split()
email = subprocess.check_output(email).decode("utf-8").strip()
real_name = "git config user.name".split()
real_name = subprocess.check_output(real_name).decode("utf-8").strip()
short = "/".join(sorted(SHORT_PATHS))
exes = f"Comma separated list of files to exclude\n(e.g. {short}): "
# tuples are field name, prompt text, default
prompts = [
("real_name", "Real name (for license, setup.py) ({default}): ", real_name),
("username", "Username ({default}): ", usr),
("email", "Email ({default}): ", email),
("git_username", "GitHub/GitLab/Bitbucket username ({default}): ", usr),
("description", "Short project description: ", None),
# ("license", "Licence to use ({default}): ", "MIT"),
("mkdocs", "Use mkdocs/readthedocs for documentation (y/N): ", False),
("virtualenv", "Generate a virtualenv for this project (y/N): ", False),
("git", "Git host to use (github,gitlab,bitbucket/None): ", None),
("exclude", exes, set()),
]
for field, prompt, default in prompts:
output[field] = _input_wrap(prompt, default)
return output.pop("git"), output
def sire(name, git=None, interactive=False, exclude=None):
"""
Generate a new Python 3.7 project, optionally with .git, virtualenv and
mkthedocs basics present too.
"""
git, formatters = | |
<filename>bibliotheque.py
#!/usr/bin/python3
""" Exemples de structures issues de Wikipedia
Titre : Le jeu de la Vie
Auteur : <NAME>
Création : 29/04/2020
Version : 1.1 (16/05/2020)
Version 1.1 :
- PRESENTATION : Amélioration conformité PEP8
"""
bibliotheque = \
{
"Vaisseau : Planeur" : \
[
[0, 1, 0],
[0, 0, 1],
[1, 1, 1]
],
"Vaisseau : Poids plume (LWSS) " : \
[
[1, 0, 0, 1, 0],
[0, 0, 0, 0, 1],
[1, 0, 0, 0, 1],
[0, 1, 1, 1, 1]
],
"Vaisseau : Poids lourd (HWSS) " : \
[
[0, 0, 1, 1, 0, 0, 0],
[1, 0, 0, 0, 0, 1, 0],
[0, 0, 0, 0, 0, 0, 1],
[1, 0, 0, 0, 0, 0, 1],
[0, 1, 1, 1, 1, 1, 1]
],
"Vaisseau : Suiveur de <NAME>" : \
[
[1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0],
[1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0],
[0, 1, 0, 0, 1, 0, 0, 1, 1, 0, 0, 0, 0, 0, 1, 1, 1, 0],
[0, 0, 0, 0, 0, 0, 1, 1, 1, 0, 0, 0, 0, 0, 0, 1, 1, 1],
[0, 1, 0, 0, 1, 0, 0, 1, 1, 0, 0, 0, 0, 0, 1, 1, 1, 0],
[1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0],
[1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0],
[1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
],
"Vaisseau : <NAME>" : \
[
[0, 0, 1, 1, 1, 0, 1, 0, 0, 0, 0, 0],
[0, 1, 1, 1, 1, 0, 1, 0, 0, 0, 0, 0],
[1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 1, 1, 1, 0, 1, 0, 0, 0, 0],
[0, 0, 0, 1, 1, 1, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 0],
[0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1],
[0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 1],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0]
],
"Vaisseau : Flottille" : \
[
[0, 0, 0, 0, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 1, 0, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0],
[0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1],
[1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 1, 0, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
],
"Canon : Canon à planeurs de <NAME>" : \
[
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1],
[1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 1, 0, 1, 1, 0, 0, 0, 0, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
],
"Puffeur : <NAME> 1" : \
[
[0, 0, 0, 0, 0, 1, 0],
[0, 0, 0, 0, 0, 0, 1],
[1, 0, 0, 0, 0, 0, 1],
[0, 1, 1, 1, 1, 1, 1],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[1, 1, 1, 0, 0, 0, 0],
[1, 1, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 1, 0, 0],
[0, 0, 0, 0, 1, 1, 0],
[0, 0, 0, 0, 0, 1, 1],
[0, 0, 0, 0, 1, 1, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 1, 1, 0],
[0, 0, 0, 0, 0, 1, 1],
[0, 0, 0, 0, 1, 1, 0],
[0, 0, 0, 0, 1, 0, 0],
[1, 1, 0, 0, 0, 0, 0],
[1, 1, 1, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 1, 1, 1, 1, 1, 1],
[1, 0, 0, 0, 0, 0, 1],
[0, 0, 0, 0, 0, 0, 1],
[0, 0, 0, 0, 0, 1, 0]
],
"Puffeur : 2" : \
[
[0, 1, 1, 1, 1],
[1, 0, 0, 0, 1],
[0, 0, 0, 0, 1],
[1, 0, 0, 1, 0],
[0, 0, 0, 0, 0],
[0, 0, 0, 0, 0],
[0, 1, 0, 0, 0],
[0, 0, 1, 0, 0],
[0, | |
# None of this is django-specific. Don't import from django.
import os
import sys
import contextlib
import subprocess
import getpass
import logging
from os.path import commonprefix
# try both the python 2 and 3 import to avoid six dependency or django import
try:
from urllib import quote
except ImportError:
from urllib.parse import quote
from . import environment
from .utils import get_next_available_port
_SCRIPT_NAME = 'dev_appserver.py'
_API_SERVER = None
# We use this list to prevent user using certain dev_appserver options that
# might collide with some Django settings.
WHITELISTED_DEV_APPSERVER_OPTIONS = [
'A',
'admin_host',
'admin_port',
'auth_domain',
'storage_path',
'log_level',
'max_module_instances',
'use_mtime_file_watcher',
'appidentity_email_address',
'appidentity_private_key_path',
'blobstore_path',
'datastore_path',
'clear_datastore',
'datastore_consistency_policy',
'require_indexes',
'auto_id_policy',
'logs_path',
'show_mail_body',
'enable_sendmail',
'prospective_search_path',
'clear_prospective_search',
'search_indexes_path',
'clear_search_indexes',
'enable_task_running',
'allow_skipped_files',
'api_port',
'dev_appserver_log_level',
'skip_sdk_update_check',
'default_gcs_bucket_name',
]
DEFAULT_API_PORT = 8010
DEFAULT_ADMIN_PORT = 8011
DEFAULT_BLOBSTORE_SERVICE_PORT = 8012
# This is a temporary workaround for the issue with 1.9.49 version where
# version is set to [0, 0, 0] instead of [1, 9, 49]. This could be removed
# after this: https://code.google.com/p/googleappengine/issues/detail?id=13439
# issue is resolved. If that is done, we should remove all references to
# TEMP_1_9_49_VERSION_NO here and in djangae/management/command/runserver.
TEMP_1_9_49_VERSION_NO = [0, 0, 0]
class Filter(object):
def filter(self, record):
if record.funcName == '__StarSchemaQueryPlan' and record.module == 'datastore_sqlite_stub':
return 0
elif record.funcName == 'Run' and record.module == 'datastore':
return 0
else:
return 1
def _disable_sqlite_stub_logging():
"""
For some reason, Google decided to log all queries at debug level to the
root logger when running stuff locally. This switches that off (if you want it, then just
remove the filter)
"""
logging.getLogger().addFilter(Filter())
def _find_sdk_from_python_path():
import google.appengine
# Make sure we get the path of the 'google' module which contains 'appengine', as it's possible
# that there are several.
return os.path.abspath(os.path.dirname(os.path.dirname(google.appengine.__path__[0])))
def _find_sdk_from_path():
# Assumes `script_name` is on your PATH - SDK installers set this up
which = 'where' if sys.platform == "win32" else 'which'
path = subprocess.check_output([which, _SCRIPT_NAME]).strip()
sdk_dir = os.path.dirname(os.path.realpath(path))
if os.path.exists(os.path.join(sdk_dir, 'bootstrapping')):
# Cloud SDK
sdk_dir = os.path.abspath(os.path.join(sdk_dir, '..', 'platform', 'google_appengine'))
if not os.path.exists(sdk_dir):
raise RuntimeError(
'The Cloud SDK is on the path, but the app engine SDK dir could not be found'
)
else:
return sdk_dir
else:
# Regular App Engine SDK
return sdk_dir
def _create_dispatcher(configuration, options):
from google.appengine.tools.devappserver2 import dispatcher
from google.appengine.tools.devappserver2.devappserver2 import DevelopmentServer
from djangae.compat import _LOG_LEVEL_TO_RUNTIME_CONSTANT
from google.appengine.tools.sdk_update_checker import GetVersionObject, \
_VersionList
if hasattr(_create_dispatcher, "singleton"):
return _create_dispatcher.singleton
class UnsupportedOption(object):
pass
current_version = _VersionList(GetVersionObject()['release'])
supports_go_config = current_version >= _VersionList('1.9.50')
supports_custom_config = current_version >= _VersionList('1.9.22') or current_version == TEMP_1_9_49_VERSION_NO
supports_external_port = current_version >= _VersionList('1.9.19') or current_version == TEMP_1_9_49_VERSION_NO
supports_watcher_ignore_re = current_version >= _VersionList('1.9.54')
dispatcher_args = [
configuration,
options.host,
options.port,
options.auth_domain,
_LOG_LEVEL_TO_RUNTIME_CONSTANT[options.log_level],
DevelopmentServer._create_php_config(options),
DevelopmentServer._create_python_config(options),
DevelopmentServer._create_java_config(options),
DevelopmentServer._create_go_config(options) if supports_go_config else UnsupportedOption,
None if supports_custom_config else UnsupportedOption,
DevelopmentServer._create_cloud_sql_config(options),
DevelopmentServer._create_vm_config(options),
DevelopmentServer._create_module_to_setting(options.max_module_instances,
configuration, '--max_module_instances'),
options.use_mtime_file_watcher,
None if supports_watcher_ignore_re else UnsupportedOption,
options.automatic_restart,
options.allow_skipped_files,
DevelopmentServer._create_module_to_setting(options.threadsafe_override,
configuration, '--threadsafe_override'),
options.external_port if supports_external_port else UnsupportedOption
]
dispatcher_args = [x for x in dispatcher_args if not x is UnsupportedOption]
_create_dispatcher.singleton = dispatcher.Dispatcher(*dispatcher_args)
return _create_dispatcher.singleton
@contextlib.contextmanager
def _local(devappserver2=None, configuration=None, options=None, wsgi_request_info=None, **kwargs):
# If we use `_LocalRequestInfo`, deferred tasks don't seem to work,
# but with the default `WSGIRequestInfo`, building the request url for
# blobstore uploads fails. So we inherit from `WSGIRequestInfo` and copy
# the `get_request_url` from `_LocalRequestInfo`
class CustomWSGIRequestInfo(wsgi_request_info.WSGIRequestInfo):
def get_request_url(self, request_id):
"""Returns the URL the request e.g. 'http://localhost:8080/foo?bar=baz'.
Args:
request_id: The string id of the request making the API call.
Returns:
The URL of the request as a string.
"""
try:
host = os.environ['HTTP_HOST']
except KeyError:
host = os.environ['SERVER_NAME']
port = os.environ['SERVER_PORT']
if port != '80':
host += ':' + port
url = 'http://' + host
url += quote(os.environ.get('PATH_INFO', '/'))
if os.environ.get('QUERY_STRING'):
url += '?' + os.environ['QUERY_STRING']
return url
global _API_SERVER
_disable_sqlite_stub_logging()
original_environ = os.environ.copy()
# Silence warnings about this being unset, localhost:8080 is the dev_appserver default.
# Note that we're setting things for the *Blobstore* handler in os.environ here, which seems
# kind of crazy, and probably is, but it seems to be necessary to make stuff work.
url = "localhost"
port = get_next_available_port(url, DEFAULT_BLOBSTORE_SERVICE_PORT)
os.environ.setdefault("HTTP_HOST", "{}:{}".format(url, port))
os.environ['SERVER_NAME'] = url
os.environ['SERVER_PORT'] = str(port)
os.environ['DEFAULT_VERSION_HOSTNAME'] = '%s:%s' % (os.environ['SERVER_NAME'], os.environ['SERVER_PORT'])
# devappserver2._setup_environ(configuration.app_id)
os.environ['APPLICATION_ID'] = configuration.app_id
from google.appengine.tools.devappserver2 import api_server
from google.appengine.tools.sdk_update_checker import GetVersionObject, _VersionList
if hasattr(api_server, "get_storage_path"):
storage_path = api_server.get_storage_path(options.storage_path, configuration.app_id)
else:
# SDK < 1.9.51
storage_path = devappserver2._get_storage_path(options.storage_path, configuration.app_id)
dispatcher = _create_dispatcher(configuration, options)
request_data = CustomWSGIRequestInfo(dispatcher)
# Remember the wsgi request info object so it can be reused to avoid duplication.
dispatcher._request_data = request_data
# We set the API and Admin ports so that they are beyond any modules (if you
# have 10 modules then these values will shift, but it's better that they are predictable
# in the common case)
options.api_port = get_next_available_port(url, max(DEFAULT_API_PORT, port + 1))
options.admin_port = get_next_available_port(url, max(DEFAULT_ADMIN_PORT, options.api_port + 1))
if hasattr(api_server, "create_api_server"):
current_version = _VersionList(GetVersionObject()['release'])
app_rather_than_config = current_version >= _VersionList('1.9.54')
# Google changed the argument structure in version 1.9.54 so we have to
# conditionally supply the args here
if app_rather_than_config:
_API_SERVER = api_server.create_api_server(
request_data,
storage_path,
options,
configuration.app_id,
environment.get_application_root()
)
else:
_API_SERVER = api_server.create_api_server(
request_data, storage_path, options, configuration
)
# We have to patch api_server.create_api_server to return _API_SERVER
# every time it's called, without this we end up with all kinds of
# problems. Basically we need one api server for the lifetime of the
# sandbox (including in `runserver`)
def create_api_server_patch(*args, **kwargs):
return _API_SERVER
api_server.create_api_server = create_api_server_patch
else:
_API_SERVER = devappserver2.DevelopmentServer._create_api_server(
request_data, storage_path, options, configuration
)
from .blobstore_service import start_blobstore_service, stop_blobstore_service
start_blobstore_service()
try:
yield
finally:
try:
api_server.cleanup_stubs()
except:
api_server.stub_util.cleanup_stubs()
os.environ = original_environ
stop_blobstore_service()
@contextlib.contextmanager
def _remote(configuration=None, remote_api_stub=None, apiproxy_stub_map=None, **kwargs):
def auth_func():
return raw_input('Google Account Login: '), getpass.getpass('Password: ')
original_apiproxy = apiproxy_stub_map.apiproxy
if configuration.app_id.startswith('dev~'):
app_id = configuration.app_id[4:]
else:
app_id = configuration.app_id
os.environ['HTTP_HOST'] = '{0}.appspot.com'.format(app_id)
os.environ['DEFAULT_VERSION_HOSTNAME'] = os.environ['HTTP_HOST']
try:
from google.appengine.tools import appengine_rpc_httplib2
# from google.appengine.tools.appcfg import APPCFG_CLIENT_ID, APPCFG_CLIENT_NOTSOSECRET
# Having to import this here because appcfg.py tries to import devshell fro oauth2, and I have a later oauth2
# library installed, that doesn't have devshell in it, thus raising an ImportError
APPCFG_CLIENT_ID = '550516889912.apps.googleusercontent.com'
APPCFG_CLIENT_NOTSOSECRET = '<KEY>'
params = appengine_rpc_httplib2.HttpRpcServerOAuth2.OAuth2Parameters(
access_token=None,
client_id=APPCFG_CLIENT_ID,
client_secret=APPCFG_CLIENT_NOTSOSECRET,
scope=remote_api_stub._OAUTH_SCOPES,
refresh_token=None,
credential_file=os.path.expanduser("~/.djangae_oauth2_tokens"),
token_uri=None
)
def factory(*args, **kwargs):
kwargs["auth_tries"] = 3
return appengine_rpc_httplib2.HttpRpcServerOAuth2(*args, **kwargs)
remote_api_stub.ConfigureRemoteApi(
app_id=None,
path='/_ah/remote_api',
auth_func=params,
servername='{0}.appspot.com'.format(app_id),
secure=True,
save_cookies=True,
rpc_server_factory=factory
)
except ImportError:
logging.exception("Unable to use oauth2 falling back to username/password")
remote_api_stub.ConfigureRemoteApi(
None,
'/_ah/remote_api',
auth_func,
servername='{0}.appspot.com'.format(app_id),
secure=True,
)
ps1 = getattr(sys, 'ps1', None)
red = "\033[0;31m"
native = "\033[m"
sys.ps1 = red + '(remote) ' + app_id + native + ' >>> '
try:
yield
finally:
apiproxy_stub_map.apiproxy = original_apiproxy
sys.ps1 = ps1
@contextlib.contextmanager
def _test(**kwargs):
"""
This stub uses the testbed to initialize the bare minimum to use the
Datastore connector. Tests themselves should setup/tear down their own
stubs by using DjangaeDiscoverRunner or the nose plugin.
The stubs here are just for bootstrapping the tests. Obviously any data inserted
between here, and the tests themselves will be wiped out when the tests begin!
"""
from google.appengine.ext import testbed
from google.appengine.datastore import datastore_stub_util
MINIMAL_STUBS = {
"init_app_identity_stub": {},
"init_memcache_stub": {},
"init_datastore_v3_stub": {
"use_sqlite": True,
"auto_id_policy": testbed.AUTO_ID_POLICY_SCATTERED,
"consistency_policy": datastore_stub_util.PseudoRandomHRConsistencyPolicy(probability=1)
}
}
from .blobstore_service import start_blobstore_service, stop_blobstore_service
# Dummy values for testing, based on the defaults for dev_appserver
# (differentiating between the default runserver port of 8000 can also be useful
# for picking up hard-coding issues etc.)
os.environ["HTTP_HOST"] = "localhost:8080"
os.environ['SERVER_NAME'] = "localhost"
os.environ['SERVER_PORT'] = "8080"
os.environ['DEFAULT_VERSION_HOSTNAME'] = '%s:%s' % (
os.environ['SERVER_NAME'], os.environ['SERVER_PORT']
)
testbed = testbed.Testbed()
testbed.activate()
for init_name, stub_kwargs in MINIMAL_STUBS.items():
getattr(testbed, init_name)(**stub_kwargs)
start_blobstore_service()
try:
yield
finally:
stop_blobstore_service()
if testbed:
testbed.deactivate()
LOCAL = 'local'
REMOTE = 'remote'
TEST = 'test'
SANDBOXES = {
LOCAL: _local,
REMOTE: _remote,
TEST: _test,
}
_OPTIONS = None
_CONFIG = None
@contextlib.contextmanager
def activate(sandbox_name, add_sdk_to_path=False, new_env_vars=None, **overrides):
"""Context manager for command-line scripts started outside of dev_appserver.
:param sandbox_name: str, one of 'local', 'remote' or 'test'
:param add_sdk_to_path: bool, optionally adds the App Engine SDK to sys.path
:param options_override: an options structure to pass down to dev_appserver setup
Available sandboxes:
local: Adds libraries specified in app.yaml to the path and initializes local service stubs as though
dev_appserver were running.
remote: Adds libraries specified in app.yaml to the path and initializes remote | |
<filename>inventarios/business.py
# coding: utf-8
from .models import MovimientoCabecera
from .models import MovimientoDetalle
from .models import Stock
# from trabajos.models import OrdenTrabajo
from trabajos.models import Material
class EntradaAlmacenBusiness(object):
def __init__(self):
self.tipo = "ENT"
def crear_CabeceraEntrada(self, formulario, usuario, clase):
# si la clase es COMPRA
if clase == "COM":
if formulario.is_valid():
datos_formulario = formulario.cleaned_data
cabecera = MovimientoCabecera()
cabecera.descripcion = datos_formulario.get('descripcion')
cabecera.fecha = datos_formulario.get('fecha')
cabecera.almacen_destino = datos_formulario.get(
'almacen_destino'
)
cabecera.proveedor = datos_formulario.get('proveedor')
cabecera.persona_recibe = datos_formulario.get(
'persona_recibe'
)
cabecera.created_by = usuario
cabecera.tipo = self.tipo
cabecera.clasificacion = clase
cabecera.save()
return cabecera
# si la clase es AJUSTE o SALDO INICIAL
elif clase == "AJU" or clase == "SAL":
if formulario.is_valid():
datos_formulario = formulario.cleaned_data
cabecera = MovimientoCabecera()
cabecera.descripcion = datos_formulario.get('descripcion')
cabecera.fecha = datos_formulario.get('fecha')
cabecera.almacen_destino = datos_formulario.get(
'almacen_destino'
)
cabecera.created_by = usuario
cabecera.tipo = self.tipo
cabecera.clasificacion = clase
cabecera.save()
return cabecera
def actualizar_CabeceraEntrada(self, cabecera, formulario, usuario):
if cabecera.estado != "CER":
if formulario.is_valid():
cabecera = formulario.save(commit=False)
cabecera.updated_by = usuario
cabecera.save()
def guardar_LineasEntradaEnStock(self, cabecera):
# Se valida el estado de la cabecera:
if cabecera.estado != "CER":
lineas_detalle = cabecera.movimientodetalle_set.all()
# Se valida que se tenga al menos una linea
if len(lineas_detalle) > 0:
# Recorriendo lineas
for linea in lineas_detalle:
# Registro Existe
fila_stock = Stock.objects.filter(
almacen=cabecera.almacen_destino,
articulo=linea.articulo
)
if fila_stock:
fila_stock[0].cantidad = fila_stock[
0].cantidad + linea.cantidad
fila_stock[0].save()
# Registro No Existe
else:
# Se crea el registro
Stock.objects.create(
almacen=cabecera.almacen_destino,
articulo=linea.articulo,
cantidad=linea.cantidad
)
cabecera.estado = "CER"
cabecera.save()
return True
else:
return "El detalle no tiene lineas"
else:
return "El movimiento ya está cerrado"
class SalidaAlmacenBusiness(object):
def __init__(self):
self.tipo = "SAL"
def crear_CabeceraSalida(self, formulario, usuario, clase):
# si la clase es DESPACHO A PERSONAL
if clase == "DES":
if formulario.is_valid():
datos_formulario = formulario.cleaned_data
cabecera = MovimientoCabecera()
cabecera.descripcion = datos_formulario.get('descripcion')
cabecera.fecha = datos_formulario.get('fecha')
cabecera.almacen_origen = datos_formulario.get(
'almacen_origen'
)
cabecera.persona_entrega = datos_formulario.get(
'persona_entrega'
)
cabecera.persona_recibe = datos_formulario.get(
'persona_recibe'
)
cabecera.created_by = usuario
cabecera.tipo = self.tipo
cabecera.clasificacion = clase
cabecera.save()
return cabecera
# si la clase es AJUSTE
elif clase == "AJU":
if formulario.is_valid():
datos_formulario = formulario.cleaned_data
cabecera = MovimientoCabecera()
cabecera.descripcion = datos_formulario.get('descripcion')
cabecera.fecha = datos_formulario.get('fecha')
cabecera.almacen_origen = datos_formulario.get(
'almacen_origen'
)
cabecera.created_by = usuario
cabecera.tipo = self.tipo
cabecera.clasificacion = clase
cabecera.save()
return cabecera
def actualizar_CabeceraSalida(self, cabecera, formulario, usuario):
if cabecera.estado != "CER":
if formulario.is_valid():
cabecera = formulario.save(commit=False)
cabecera.updated_by = usuario
cabecera.save()
def guardar_LineasSalidaEnStock(self, cabecera):
# Se valida el estado de la cabecera:
if cabecera.estado != "CER":
lineas_detalle = cabecera.movimientodetalle_set.all()
errores = []
# Se valida que se tenga al menos una linea
if len(lineas_detalle) > 0:
# contador = len(lineas_detalle)
# Recorriendo lineas
# while lineas_detalle > contador:
for linea in lineas_detalle:
# Registro Existe
fila_stock = Stock.objects.filter(
almacen=cabecera.almacen_origen,
articulo=linea.articulo
)
if fila_stock:
if fila_stock[0].cantidad < linea.cantidad:
# return "No hay articulos suficientes"
error = "%s %s. %s %s" % (
"No hay articulos suficientes en almacen: ",
fila_stock[0].articulo,
",Su cantidad actual es:",
fila_stock[0].cantidad)
errores.append(error)
else:
fila_stock[0].cantidad = fila_stock[
0].cantidad - linea.cantidad
fila_stock[0].save()
# Registro No Existe
else:
error = "%s: %s." % (
"No existe articulo registrado en almacen ",
linea.articulo)
errores.append(error)
# return errores
# return "No existe articulo registrado en Stock"
# contador = contador + 1
if len(errores) > 0:
return errores
else:
cabecera.estado = "CER"
cabecera.save()
return True
else:
error = "%s" % ("El detalle no tiene lineas")
errores.append(error)
return errores
else:
errores.append("El movimiento ya está cerrado")
return errores
class TraspasoBusiness(object):
def __init__(self):
self.tipo = "SAL"
def crear_CabeceraTraspaso(self, formulario, usuario, clase):
if formulario.is_valid():
datos_formulario = formulario.cleaned_data
cabecera = MovimientoCabecera()
cabecera.descripcion = datos_formulario.get('descripcion')
cabecera.fecha = datos_formulario.get('fecha')
cabecera.almacen_origen = datos_formulario.get(
'almacen_origen'
)
cabecera.almacen_destino = datos_formulario.get(
'almacen_destino'
)
cabecera.persona_entrega = datos_formulario.get('persona_entrega')
cabecera.persona_recibe = datos_formulario.get('persona_recibe')
cabecera.created_by = usuario
cabecera.tipo = self.tipo
cabecera.clasificacion = clase
cabecera.save()
return cabecera
def actualizar_CabeceraTraspaso(self, cabecera, formulario, usuario):
if cabecera.estado != "CER":
if formulario.is_valid():
cabecera = formulario.save(commit=False)
cabecera.updated_by = usuario
cabecera.save()
def crear_CabeceraEntradaTraspaso(self, cabecera):
entrada = MovimientoCabecera()
entrada.descripcion = cabecera.descripcion
entrada.fecha = cabecera.fecha
entrada.almacen_origen = cabecera.almacen_origen
entrada.almacen_destino = cabecera.almacen_destino
entrada.persona_entrega = cabecera.persona_entrega
entrada.persona_recibe = cabecera.persona_recibe
entrada.created_by = cabecera.created_by
entrada.tipo = "ENT"
entrada.clasificacion = "TRA"
entrada.estado = "TRAN"
entrada.save()
print entrada
return entrada
def crear_LineasDetalleEntradaTraspaso(self, salida, entrada):
lineas = salida.movimientodetalle_set.all()
for linea in lineas:
mov_entrada = MovimientoDetalle()
mov_entrada.cabecera = entrada
mov_entrada.articulo = linea.articulo
mov_entrada.cantidad = linea.cantidad
mov_entrada.created_date = linea.created_date
mov_entrada.updated_date = linea.updated_date
mov_entrada.created_by = linea.created_by
mov_entrada.updated_by = linea.updated_by
mov_entrada.save()
return True
def guardar_LineasSalidaTraspasoEnStock(self, cabecera):
# Se valida el estado de la cabecera:
if cabecera.estado != "CER":
lineas_detalle = cabecera.movimientodetalle_set.all()
errores = []
# Se valida que se tenga al menos una linea
if len(lineas_detalle) > 0:
# Recorriendo lineas
for linea in lineas_detalle:
# Registro Existe
fila_stock = Stock.objects.filter(
almacen=cabecera.almacen_origen,
articulo=linea.articulo
)
if fila_stock:
if fila_stock[0].cantidad < linea.cantidad:
error = "%s %s %s %s" % (
"No hay articulos suficientes en almacen del articulo:",
fila_stock[0].articulo,
" Su cantidad actual es:",
fila_stock[0].cantidad
)
errores.append(error)
# return "No hay articulos suficientes en el stock."
else:
fila_stock[0].cantidad = fila_stock[
0].cantidad - linea.cantidad
fila_stock[0].save()
# Registro No Existe
else:
error = "%s: %s." % (
"No existe articulo registrado en stock ",
linea.articulo)
errores.append(error)
# return "No existe articulo registrado en el Stock"
if len(errores) > 0:
return errores
else:
cabecera.estado = "TRAN"
cabecera.save()
return True
else:
return "El detalle no tiene lineas"
else:
return "El movimiento ya está cerrado"
def guardar_LineasEntradaTraspasoEnStock(self, cabecera):
# Se valida el estado de la cabecera:
if cabecera.estado != "CER":
lineas_detalle = cabecera.movimientodetalle_set.all()
# Se valida que se tenga al menos una linea
if len(lineas_detalle) > 0:
# Recorriendo lineas
for linea in lineas_detalle:
# Registro Existe
fila_stock = Stock.objects.filter(
almacen=cabecera.almacen_destino,
articulo=linea.articulo
)
if fila_stock:
fila_stock[0].cantidad = fila_stock[
0].cantidad + linea.cantidad
fila_stock[0].save()
# Registro No Existe
else:
# Se crea el registro
Stock.objects.create(
almacen=cabecera.almacen_destino,
articulo=linea.articulo,
cantidad=linea.cantidad
)
print "Se afecto el stock almacen destino"
return "Exito"
else:
print "el detalle no tiene lineas"
return "El detalle no tiene lineas"
else:
print "el movimiento ya esta cerrado"
return "El movimiento ya está cerrado"
def cerrar_Traspaso(self, entrada, usuario):
if entrada.estado != "CER" and entrada.estado != "CAP":
print "es diferente"
# se busca el registro de salida
salida = MovimientoCabecera.objects.filter(
tipo="SAL",
descripcion=entrada.descripcion,
fecha=entrada.fecha,
created_by=entrada.created_by)
if salida:
print salida
salida[0].estado = "CER"
salida[0].updated_by = usuario
salida[0].save()
entrada.estado = "CER"
entrada.updated_by = usuario
entrada.save()
return True
else:
print "no hay salida"
return False
class SalidaOrdenTrabajoBusiness(object):
def crear_Cabecera(self, formulario, usuario):
if formulario.is_valid():
datos_formulario = formulario.cleaned_data
cabecera = MovimientoCabecera()
cabecera.descripcion = datos_formulario.get('descripcion')
cabecera.fecha = datos_formulario.get('fecha')
cabecera.almacen_origen = datos_formulario.get(
'almacen_origen'
)
cabecera.persona_entrega = datos_formulario.get(
'persona_entrega'
)
cabecera.persona_recibe = datos_formulario.get(
'persona_recibe'
)
cabecera.orden_trabajo = datos_formulario.get(
'orden_trabajo'
)
cabecera.created_by = usuario
cabecera.tipo = "SAL"
cabecera.clasificacion = "OT"
cabecera.save()
# Buscar materiales en la orden, excepto herramientas
materiales_ot = Material.objects.filter(
orden__pk=cabecera.orden_trabajo.pk
).exclude(articulo__tipo="HER")
# Crear lineas:
for r in materiales_ot:
linea = MovimientoDetalle()
linea.cabecera = cabecera
linea.cantidad = r.cantidad_estimada
linea.articulo = r.articulo
linea.created_by = usuario
linea.save()
return cabecera
def actualizar_Cabecera(self, cabecera, formulario, usuario):
# Si el estado de la orden es "CAPTURA"
if cabecera.estado == "CAP":
# Si formulario es valido:
if formulario.is_valid():
# Se guarda el nuevo valor de la orden
orden_new = cabecera.orden_trabajo.pk
cabecera = formulario.save(commit=False)
cabecera.updated_by = usuario
# Si la orden cambio en la cabecera:
if cabecera.orden_trabajo.pk != orden_new:
# Eliminar Anteriores Lineas
lineas = MovimientoDetalle.objects.filter(cabecera=cabecera)
for r in lineas:
r.delete()
# Buscar materiales de la nueva orden, a excepcion de herramientas
materiales_ot = Material.objects.filter(
orden__pk=cabecera.orden_trabajo.pk
).exclude(articulo__tipo="HER")
# Crear las nuevas lineas
for r in materiales_ot:
linea = MovimientoDetalle()
linea.cabecera = cabecera
linea.cantidad = r.cantidad_estimada
linea.articulo = r.articulo
linea.created_by = usuario
linea.save()
cabecera.save()
def guardar_Lineas(self, cabecera):
# Genero lista de errores
errores = []
# Si el estado de la orden es "CAPTURA"
if cabecera.estado == "CAP":
# Se obtiene las lineas del movimiento
lineas_detalle = cabecera.movimientodetalle_set.all()
# Se valida que se tenga al menos una linea
if len(lineas_detalle) > 0:
# Se recorre las lineas a guardar
for linea in lineas_detalle:
# Se consulta el stock por linea
fila_stock = Stock.objects.filter(
almacen=cabecera.almacen_origen,
articulo=linea.articulo
)
# Si se tiene el registro de stock:
if fila_stock:
# | |
# schema support is there
success_response = responses['200']
# Check that the API renderers aren't included, but custom renderers are
assert set(success_response['content']) == {'application/json', 'text/plain'}
def test_openapi_yaml_rendering_without_aliases(self):
renderer = OpenAPIRenderer()
reused_object = {'test': 'test'}
data = {
'o1': reused_object,
'o2': reused_object,
}
assert (
renderer.render(data) == b'o1:\n test: test\no2:\n test: test\n' or
renderer.render(data) == b'o2:\n test: test\no1:\n test: test\n' # py <= 3.5
)
def test_serializer_filefield(self):
path = '/{id}/'
method = 'POST'
class ItemSerializer(serializers.Serializer):
attachment = serializers.FileField()
class View(generics.CreateAPIView):
serializer_class = ItemSerializer
view = create_view(
View,
method,
create_request(path),
)
inspector = AutoSchema()
inspector.view = view
components = inspector.get_components(path, method)
component = components['Item']
properties = component['properties']
assert properties['attachment']['format'] == 'binary'
def test_retrieve_response_body_generation(self):
"""
Test that a list of properties is returned for retrieve item views.
Pagination properties should not be added as the view represents a single item.
"""
path = '/{id}/'
method = 'GET'
class Pagination(pagination.BasePagination):
def get_paginated_response_schema(self, schema):
return {
'type': 'object',
'item': schema,
}
class ItemSerializer(serializers.Serializer):
text = serializers.CharField()
class View(generics.GenericAPIView):
serializer_class = ItemSerializer
pagination_class = Pagination
view = create_view(
View,
method,
create_request(path),
)
inspector = AutoSchema()
inspector.view = view
responses = inspector.get_responses(path, method)
assert responses == {
'200': {
'description': '',
'content': {
'application/json': {
'schema': {
'$ref': '#/components/schemas/Item'
},
},
},
},
}
components = inspector.get_components(path, method)
assert components == {
'Item': {
'type': 'object',
'properties': {
'text': {
'type': 'string',
},
},
'required': ['text'],
}
}
def test_operation_id_generation(self):
path = '/'
method = 'GET'
view = create_view(
views.ExampleGenericAPIView,
method,
create_request(path),
)
inspector = AutoSchema()
inspector.view = view
operationId = inspector.get_operation_id(path, method)
assert operationId == 'listExamples'
def test_operation_id_custom_operation_id_base(self):
path = '/'
method = 'GET'
view = create_view(
views.ExampleGenericAPIView,
method,
create_request(path),
)
inspector = AutoSchema(operation_id_base="Ulysse")
inspector.view = view
operationId = inspector.get_operation_id(path, method)
assert operationId == 'listUlysses'
def test_operation_id_custom_name(self):
path = '/'
method = 'GET'
view = create_view(
views.ExampleGenericAPIView,
method,
create_request(path),
)
inspector = AutoSchema(operation_id_base='Ulysse')
inspector.view = view
operationId = inspector.get_operation_id(path, method)
assert operationId == 'listUlysses'
def test_operation_id_override_get(self):
class CustomSchema(AutoSchema):
def get_operation_id(self, path, method):
return 'myCustomOperationId'
path = '/'
method = 'GET'
view = create_view(
views.ExampleGenericAPIView,
method,
create_request(path),
)
inspector = CustomSchema()
inspector.view = view
operationId = inspector.get_operation_id(path, method)
assert operationId == 'myCustomOperationId'
def test_operation_id_override_base(self):
class CustomSchema(AutoSchema):
def get_operation_id_base(self, path, method, action):
return 'Item'
path = '/'
method = 'GET'
view = create_view(
views.ExampleGenericAPIView,
method,
create_request(path),
)
inspector = CustomSchema()
inspector.view = view
operationId = inspector.get_operation_id(path, method)
assert operationId == 'listItem'
def test_repeat_operation_ids(self):
router = routers.SimpleRouter()
router.register('account', views.ExampleGenericViewSet, basename="account")
urlpatterns = router.urls
generator = SchemaGenerator(patterns=urlpatterns)
request = create_request('/')
schema = generator.get_schema(request=request)
schema_str = str(schema)
print(schema_str)
assert schema_str.count("operationId") == 2
assert schema_str.count("newExample") == 1
assert schema_str.count("oldExample") == 1
def test_duplicate_operation_id(self):
patterns = [
path('duplicate1/', views.ExampleOperationIdDuplicate1.as_view()),
path('duplicate2/', views.ExampleOperationIdDuplicate2.as_view()),
]
generator = SchemaGenerator(patterns=patterns)
request = create_request('/')
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter('always')
generator.get_schema(request=request)
assert len(w) == 1
assert issubclass(w[-1].category, UserWarning)
print(str(w[-1].message))
assert 'You have a duplicated operationId' in str(w[-1].message)
def test_operation_id_viewset(self):
router = routers.SimpleRouter()
router.register('account', views.ExampleViewSet, basename="account")
urlpatterns = router.urls
generator = SchemaGenerator(patterns=urlpatterns)
request = create_request('/')
schema = generator.get_schema(request=request)
print(schema)
assert schema['paths']['/account/']['get']['operationId'] == 'listExampleViewSets'
assert schema['paths']['/account/']['post']['operationId'] == 'createExampleViewSet'
assert schema['paths']['/account/{id}/']['get']['operationId'] == 'retrieveExampleViewSet'
assert schema['paths']['/account/{id}/']['put']['operationId'] == 'updateExampleViewSet'
assert schema['paths']['/account/{id}/']['patch']['operationId'] == 'partialUpdateExampleViewSet'
assert schema['paths']['/account/{id}/']['delete']['operationId'] == 'destroyExampleViewSet'
def test_serializer_datefield(self):
path = '/'
method = 'GET'
view = create_view(
views.ExampleGenericAPIView,
method,
create_request(path),
)
inspector = AutoSchema()
inspector.view = view
components = inspector.get_components(path, method)
component = components['Example']
properties = component['properties']
assert properties['date']['type'] == properties['datetime']['type'] == 'string'
assert properties['date']['format'] == 'date'
assert properties['datetime']['format'] == 'date-time'
def test_serializer_hstorefield(self):
path = '/'
method = 'GET'
view = create_view(
views.ExampleGenericAPIView,
method,
create_request(path),
)
inspector = AutoSchema()
inspector.view = view
components = inspector.get_components(path, method)
component = components['Example']
properties = component['properties']
assert properties['hstore']['type'] == 'object'
def test_serializer_callable_default(self):
path = '/'
method = 'GET'
view = create_view(
views.ExampleGenericAPIView,
method,
create_request(path),
)
inspector = AutoSchema()
inspector.view = view
components = inspector.get_components(path, method)
component = components['Example']
properties = component['properties']
assert 'default' not in properties['uuid_field']
def test_serializer_validators(self):
path = '/'
method = 'GET'
view = create_view(
views.ExampleValidatedAPIView,
method,
create_request(path),
)
inspector = AutoSchema()
inspector.view = view
components = inspector.get_components(path, method)
component = components['ExampleValidated']
properties = component['properties']
assert properties['integer']['type'] == 'integer'
assert properties['integer']['maximum'] == 99
assert properties['integer']['minimum'] == -11
assert properties['string']['minLength'] == 2
assert properties['string']['maxLength'] == 10
assert properties['lst']['minItems'] == 2
assert properties['lst']['maxItems'] == 10
assert properties['regex']['pattern'] == r'[ABC]12{3}'
assert properties['regex']['description'] == 'must have an A, B, or C followed by 1222'
assert properties['decimal1']['type'] == 'number'
assert properties['decimal1']['multipleOf'] == .01
assert properties['decimal1']['maximum'] == 10000
assert properties['decimal1']['minimum'] == -10000
assert properties['decimal2']['type'] == 'number'
assert properties['decimal2']['multipleOf'] == .0001
assert properties['decimal3'] == {
'type': 'string', 'format': 'decimal', 'maximum': 1000000, 'minimum': -1000000, 'multipleOf': 0.01
}
assert properties['decimal4'] == {
'type': 'string', 'format': 'decimal', 'maximum': 1000000, 'minimum': -1000000, 'multipleOf': 0.01
}
assert properties['decimal5'] == {
'type': 'string', 'format': 'decimal', 'maximum': 10000, 'minimum': -10000, 'multipleOf': 0.01
}
assert properties['email']['type'] == 'string'
assert properties['email']['format'] == 'email'
assert properties['email']['default'] == '<EMAIL>'
assert properties['url']['type'] == 'string'
assert properties['url']['nullable'] is True
assert properties['url']['default'] == 'http://www.example.com'
assert '\\Z' not in properties['url']['pattern']
assert properties['uuid']['type'] == 'string'
assert properties['uuid']['format'] == 'uuid'
assert properties['ip4']['type'] == 'string'
assert properties['ip4']['format'] == 'ipv4'
assert properties['ip6']['type'] == 'string'
assert properties['ip6']['format'] == 'ipv6'
assert properties['ip']['type'] == 'string'
assert 'format' not in properties['ip']
def test_overridden_tags(self):
class ExampleStringTagsViewSet(views.ExampleGenericAPIView):
schema = AutoSchema(tags=['example1', 'example2'])
url_patterns = [
path('test/', ExampleStringTagsViewSet.as_view()),
]
generator = SchemaGenerator(patterns=url_patterns)
schema = generator.get_schema(request=create_request('/'))
assert schema['paths']['/test/']['get']['tags'] == ['example1', 'example2']
def test_overridden_get_tags_method(self):
class MySchema(AutoSchema):
def get_tags(self, path, method):
if path.endswith('/new/'):
tags = ['tag1', 'tag2']
elif path.endswith('/old/'):
tags = ['tag2', 'tag3']
else:
tags = ['tag4', 'tag5']
return tags
class ExampleStringTagsViewSet(views.ExampleGenericViewSet):
schema = MySchema()
router = routers.SimpleRouter()
router.register('example', ExampleStringTagsViewSet, basename="example")
generator = SchemaGenerator(patterns=router.urls)
schema = generator.get_schema(request=create_request('/'))
assert schema['paths']['/example/new/']['get']['tags'] == ['tag1', 'tag2']
assert schema['paths']['/example/old/']['get']['tags'] == ['tag2', 'tag3']
def test_auto_generated_apiview_tags(self):
class RestaurantAPIView(views.ExampleGenericAPIView):
schema = AutoSchema(operation_id_base="restaurant")
pass
class BranchAPIView(views.ExampleGenericAPIView):
pass
url_patterns = [
path('any-dash_underscore/', RestaurantAPIView.as_view()),
path('restaurants/branches/', BranchAPIView.as_view())
]
generator = SchemaGenerator(patterns=url_patterns)
schema = generator.get_schema(request=create_request('/'))
assert schema['paths']['/any-dash_underscore/']['get']['tags'] == ['any-dash-underscore']
assert schema['paths']['/restaurants/branches/']['get']['tags'] == ['restaurants']
@pytest.mark.skipif(uritemplate is None, reason='uritemplate not installed.')
@override_settings(REST_FRAMEWORK={'DEFAULT_SCHEMA_CLASS': 'rest_framework.schemas.openapi.AutoSchema'})
class TestGenerator(TestCase):
def test_override_settings(self):
assert isinstance(views.ExampleListView.schema, AutoSchema)
def test_paths_construction(self):
"""Construction of the `paths` key."""
patterns = [
path('example/', views.ExampleListView.as_view()),
]
generator = SchemaGenerator(patterns=patterns)
generator._initialise_endpoints()
paths = generator.get_schema()["paths"]
assert '/example/' in paths
example_operations = paths['/example/']
assert len(example_operations) == 2
assert 'get' in example_operations
assert 'post' in example_operations
def test_prefixed_paths_construction(self):
"""Construction of the `paths` key maintains a common prefix."""
patterns = [
path('v1/example/', views.ExampleListView.as_view()),
path('v1/example/{pk}/', views.ExampleDetailView.as_view()),
]
generator = SchemaGenerator(patterns=patterns)
generator._initialise_endpoints()
paths = generator.get_schema()["paths"]
assert '/v1/example/' in paths
assert '/v1/example/{id}/' in paths
def test_mount_url_prefixed_to_paths(self):
patterns = [
path('example/', views.ExampleListView.as_view()),
path('example/{pk}/', views.ExampleDetailView.as_view()),
]
generator = SchemaGenerator(patterns=patterns, url='/api')
generator._initialise_endpoints()
paths = generator.get_schema()["paths"]
assert '/api/example/' in paths
assert '/api/example/{id}/' in paths
def test_schema_construction(self):
"""Construction of the top level dictionary."""
patterns = [
path('example/', views.ExampleListView.as_view()),
]
generator = SchemaGenerator(patterns=patterns)
request = create_request('/')
schema = generator.get_schema(request=request)
assert 'openapi' in schema
assert 'paths' in schema
def test_schema_with_no_paths(self):
patterns = []
generator = SchemaGenerator(patterns=patterns)
request = create_request('/')
schema = generator.get_schema(request=request)
assert schema['paths'] == {}
def test_schema_information(self):
"""Construction of the top level dictionary."""
patterns = [
path('example/', views.ExampleListView.as_view()),
]
generator = SchemaGenerator(patterns=patterns, title='My title', version='1.2.3', description='My description')
request = create_request('/')
schema = generator.get_schema(request=request)
assert schema['info']['title'] == 'My title'
assert schema['info']['version'] == '1.2.3'
assert schema['info']['description'] == 'My description'
def test_schema_information_empty(self):
"""Construction of the top level dictionary."""
patterns = [
path('example/', views.ExampleListView.as_view()),
]
generator = SchemaGenerator(patterns=patterns)
request = create_request('/')
schema = generator.get_schema(request=request)
assert schema['info']['title'] == ''
assert schema['info']['version'] == ''
def test_serializer_model(self):
"""Construction of the top level dictionary."""
patterns = [
path('example/', views.ExampleGenericAPIViewModel.as_view()),
]
generator = SchemaGenerator(patterns=patterns)
request = create_request('/')
schema = generator.get_schema(request=request)
print(schema)
assert 'components' in schema
assert 'schemas' in schema['components']
assert 'ExampleModel' in schema['components']['schemas']
def test_authtoken_serializer(self):
patterns = [
path('api-token-auth/', obtain_auth_token)
]
generator = SchemaGenerator(patterns=patterns)
request = create_request('/')
schema = generator.get_schema(request=request)
print(schema)
route = schema['paths']['/api-token-auth/']['post']
body_schema = route['requestBody']['content']['application/json']['schema']
assert body_schema == {
'$ref': '#/components/schemas/AuthToken'
}
assert schema['components']['schemas']['AuthToken'] == {
'type': 'object',
'properties': {
'username': {'type': 'string', 'writeOnly': True},
'password': {'type': 'string', 'writeOnly': True},
'token': {'type': 'string', 'readOnly': True},
},
'required': ['username', 'password']
}
def test_component_name(self):
patterns = [
| |
<gh_stars>0
# -*- coding: utf-8 -*-
# Copyright (c) 2020, Frappe and contributors
# For license information, please see license.txt
from __future__ import unicode_literals
import frappe
from frappe.model.document import Document
import json
class PchManufacturingRecord(Document):
pass
@frappe.whitelist()
def get_start_end_process_raw_materials(start_process, end_process, method):
if start_process == end_process:
start_process_raw_item_data = get_pro_order_wise_manufacture_method_details_raw_items(start_process,
start_process, method)
return start_process_raw_item_data
else:
# product_order_wise_data_start
# end_process_raw_item_data = get_manufacture_method_details_raw_items(end_process)
start_end_process_raw_materials_data = get_pro_order_wise_manufacture_method_details_raw_items(start_process,
end_process,
method)
return start_end_process_raw_materials_data
@frappe.whitelist()
def get_start_end_process_raw_materials_for_packing(start_process, end_process, method ,item_made_list):
item_made_list_packing_raw_materials = get_pro_order_wise_manufacture_method_details_raw_items(start_process,start_process, method,item_made_list)
return item_made_list_packing_raw_materials
# product_ordee_wise_data_end
@frappe.whitelist()
def get_start_end_p_process_details(start_process, end_process, method):
if start_process == end_process:
start_process_raw_item_data = get_pro_order_wise_process_details(start_process, start_process, method)
return start_process_raw_item_data
else:
# product_order_wise_data_start
# end_process_raw_item_data = get_manufacture_method_details_raw_items(end_process)
start_end_process_raw_materials_data = get_pro_order_wise_process_details(start_process, end_process, method)
return start_end_process_raw_materials_data
def get_pro_order_wise_process_details(start_process, end_process, method):
start_process_pro_ord_no = frappe.db.get_value("Pch Manufacturing Method Details", {"name": start_process},
"process_order")
end_process_pro_ord_no = frappe.db.get_value("Pch Manufacturing Method Details", {"name": end_process},
"process_order")
mmd_process_details = frappe.db.sql("""select
mmd.name,mmd.pch_process,mmd.pch_method,mmd.process_order,mmd.turnaround_time,mmd.touch_points
from
`tabPch Manufacturing Method Details` mmd
where
mmd.process_order>=%s and mmd.process_order<= %s and mmd.pch_method= %s order by mmd.process_order asc""",
(start_process_pro_ord_no, end_process_pro_ord_no, method), as_dict=1)
return mmd_process_details
def get_pro_order_wise_manufacture_method_details_raw_items(start_process, end_process, method):
start_process_pro_ord_no = frappe.db.get_value("Pch Manufacturing Method Details", {"name": start_process},
"process_order")
end_process_pro_ord_no = frappe.db.get_value("Pch Manufacturing Method Details", {"name": end_process},
"process_order")
manufacture_method_details_raw_items = frappe.db.sql("""select
mmd.name,mmd.pch_process,mmdi.item_code,mmdi.item_name,mmdi.qty_uom,mmdi.qty_per_unit_made,mmdi.consumption_type,mmdi.stock_uom,mmdi.conversion_factor,mmdi.operand,mmdi.qty_in_stock_uom
from`tabPch Manufacturing Method Details` mmd,`tabPch Manufacturing Method Details RM Child` mmdi where
mmd.name=mmdi.parent and process_order>=%s and process_order<= %s and pch_method= %s """,
(start_process_pro_ord_no, end_process_pro_ord_no, method),
as_dict=1)
return manufacture_method_details_raw_items
#for packing
@frappe.whitelist()
def get_packing_raw_materials(multiple_method_items,start_process,end_process):
multiple_method_items = json.loads(multiple_method_items)
start_process_pro_ord_no = frappe.db.get_value("Pch Manufacturing Method Details", {"name": start_process},"process_order")
end_process_pro_ord_no = frappe.db.get_value("Pch Manufacturing Method Details", {"name": end_process},"process_order")
item_made_json ={}
for multiple_method_item in multiple_method_items:
item_made_json[multiple_method_item.get("item_made")] = multiple_method_item.get("units_s_r")
print("item_made_json", item_made_json)
item_made_list_str = ','.join("'{0}'".format(item_made) for item_made, ob_data in item_made_json.items())
print("item_made_list_str", item_made_list_str)
query = "select mmd.name,mmd.pch_process,mmd.item_code as item_made,mmdi.item_code,mmdi.item_name,mmdi.qty_uom,mmdi.qty_per_unit_made,mmdi.consumption_type,mmdi.stock_uom,mmdi.conversion_factor,mmdi.operand,mmdi.qty_in_stock_uom from`tabPch Manufacturing Method Details` mmd,`tabPch Manufacturing Method Details RM Child` mmdi where mmd.name=mmdi.parent and mmd.process_order>={0} and mmd.process_order<= {1} and mmd.item_code in ( {2} )".format(start_process_pro_ord_no,end_process_pro_ord_no,item_made_list_str)
print ("query",query)
packing_raw_materials = frappe.db.sql(query, as_dict=1)
return packing_raw_materials
@frappe.whitelist()
def get_child_doc_data(doc_type, parent):
table = "tab" + doc_type
# table='`tab'+doc_type+'`'
sql = "select * from `" + table + "` where parent='" + parent + "'"
# sql = "select * from `"+table+"`"
doc_data = frappe.db.sql(sql, as_dict=1)
return doc_data
@frappe.whitelist()
def get_wh_ac_to_location(location_name, wh_type, process):
wh_name_dic = frappe.db.sql(
"""select outbound_warehouse,inbound_warehouse from `tabPch Locations Child` where parent = %s and process_name = %s """,
(location_name, process), as_dict=1)
return wh_name_dic[0][wh_type] if wh_name_dic else None
# Ak
@frappe.whitelist()
def validate_start_and_end_process(start_process, end_process):
flag = 1;
st_list = frappe.db.sql(
"""select `process_order` as `start_process_order` from `tabPch Manufacturing Method Details` where name=%s""",
(start_process), as_dict=1);
en_list = frappe.db.sql(
"""select `process_order` as `end_process_order` from `tabPch Manufacturing Method Details` where name=%s""",
(end_process), as_dict=1);
start_process_order_value = st_list[0]["start_process_order"];
end_process_order_value = en_list[0]["end_process_order"];
if (start_process_order_value > end_process_order_value):
# print('End process cannot occur before start process');
flag = 0;
return flag
# raw_material_transactions_start
# pch_locations_id,items
# issue from raw material wh of location
@frappe.whitelist()
def send_material_for_manufacturing(entity):
entity = json.loads(entity)
item_payload_account = frappe.db.get_value("Pch Locations", {"name": entity.get("location")},
"item_payload_account")
units = entity.get("units_to_be_sr");
location = entity.get("location");
company = frappe.db.get_value("Pch Locations", {"name": entity.get("location")}, "company");
raw_material_warehouse = frappe.db.get_value("Pch Locations", {"name": entity.get("location")},
"raw_material_warehouse")
start_process_pro_ord_no = frappe.db.get_value("Pch Manufacturing Method Details",
{"name": entity.get("start_process")}, "process_order")
# issue_start
issue_items_list = []
for i_row in entity.get("req_items"):
item_dic = {
"item_code": i_row.get("item_code"),
# making a change here "qty":i_row.get("total_qty") was the code before this
"qty": i_row.get("dispatched_quantity_in_uom"),
"uom": i_row.get("qty_uom"),
"conversion_factor": i_row.get("conversion_factor"),
"t_wh": None,
"s_wh": raw_material_warehouse,
"item_payload_account": item_payload_account
}
issue_items_list.append(item_dic)
se_issue_entity = {"action": "Material Issue", "items_list": issue_items_list, "company": company}
# print "se_issue_entity",se_issue_entity
se_issue = create_stock_entry(se_issue_entity)
# issue_end
response = [];
if se_issue[0]["Exception"] == "Not Occured":
# issue is done #call_next_transaction #material_rec
# Response JSON to validate Stock Entry Creation
response.append({"Name": se_issue[0]["Name"], "Status": "Created", "Stock Entry Type": "Material Issue"});
# transfer is must
trans_entity = {
"items": entity.get("method_items"),
"s_wh": entity.get("outbound_warehouse"),
"t_wh": entity.get("target_warehouse"),
"units_to_be_sr": entity.get("units_to_be_sr"),
"company": company
}
labour_account = frappe.db.get_value("Pch Locations", {"name": entity.get("location")}, "labour_account")
trans_entity["labour_account"] = labour_account
trans_entity["isAdditionCost"] = 1
trans_entity["add_amount"] = frappe.db.get_value("Stock Entry", {"name": se_issue[0]["Name"]},
"total_outgoing_value")
trans_entity["item_payload_account"] = item_payload_account
# transfer is must
start_process_pro_ord_no = int(start_process_pro_ord_no)
if start_process_pro_ord_no == 1:
# print "se_issue created 3t:",se_issue
# receipt fetch method item #Pch Manufacturing Record Child Method (method_items)
receipt_items_list = []
for i_row in entity.get("method_items"):
val = i_row.get("qty_made")
actual_qty = units * val;
item_dic = {
"item_code": i_row.get("item_made"),
"qty": actual_qty,
"uom": i_row.get("qty_uom"),
"conversion_factor": i_row.get("conversion_factor"),
"t_wh": entity.get("outbound_warehouse"),
"s_wh": None,
"item_payload_account": item_payload_account
}
receipt_items_list.append(item_dic)
se_rec_entity = {"action": "Material Receipt", "items_list": receipt_items_list, "company": company}
# print "se_rec_entity data",se_rec_entity
se_receipt = create_stock_entry(se_rec_entity)
if se_receipt[0]["Exception"] == "Not Occured":
# print "se_receipt created ",se_receipt
# print "transfer data ",trans_entity
response.append(
{"Name": se_receipt[0]["Name"], "Status": "Created", "Stock Entry Type": "Material Receipt"});
se_transfer3 = make_transfer(trans_entity)
if se_transfer3[0]["Exception"] == "Not Occured":
response.append({"Name": se_transfer3[0]["Name"], "Status": "Created",
"Stock Entry Type": "Material Transfer"});
# return response
# print "se_transfer3 created ",se_transfer3
else:
response.append({"Name": se_transfer3[0]["Name"], "Status": "Not Created",
"Stock Entry Type": "Material Transfer"});
doc1 = frappe.get_doc("Stock Entry", se_issue[0]["Name"]);
doc1.docstatus = 2
doc1.save()
doc2 = frappe.get_doc("Stock Entry", se_receipt[0]["Name"]);
doc2.docstatus = 2
doc2.save()
else:
response.append(
{"Name": se_receipt[0]["Name"], "Status": "Not Created", "Stock Entry Type": "Material Receipt"});
doc1 = frappe.get_doc("Stock Entry", se_issue[0]["Name"]);
doc1.docstatus = 2
doc1.save()
else:
# print "se_issue created 2t:",se_issue
# print "transfer data ",trans_entity
se_transfer2 = make_transfer(trans_entity)
# print "se_transfer2 created ",se_transfer2
if se_transfer2[0]["Exception"] == "Not Occured":
response.append(
{"Name": se_transfer2[0]["Name"], "Status": "Created", "Stock Entry Type": "Material Transfer"});
else:
response.append({"Name": se_transfer2[0]["Name"], "Status": "Not Created",
"Stock Entry Type": "Material Transfer"});
doc1 = frappe.get_doc("Stock Entry", se_issue[0]["Name"]);
doc1.docstatus = 2
doc1.save()
#print(response)
else:
# print "se_transfer3 created ",se_transfer3
response.append({"Name": se_issue[0]["Name"], "Status": "Not Created", "Stock Entry Type": "Material Issue"});
#print(response)
return response
@frappe.whitelist()
def receive_material_for_manufacturing(entity):
entity = json.loads(entity)
labour_account = frappe.db.get_value("Pch Locations", {"name": entity.get("location")}, "labour_account")
item_payload_account = frappe.db.get_value("Pch Locations", {"name": entity.get("location")},
"item_payload_account")
units = entity.get("units_s_r")
location = entity.get("location");
company = frappe.db.get_value("Pch Locations", {"name": entity.get("location")}, "company");
response = [];
# make_transfer
# from method_item table Subcontractor Warehouse== sourch wh and Receiving Warehouse==
transfer_items_list = []
for i_row in entity.get("method_items"):
val = i_row.get("qty_made")
actual_qty = units * val;
item_dic = {
"item_code": i_row.get("item_made"),
"qty": actual_qty,
"uom": i_row.get("qty_uom"),
"conversion_factor": i_row.get("conversion_factor"),
"s_wh": entity.get("target_warehouse"), # subcontractor wh
"t_wh": entity.get("receiving_warehouse"), # receiving_warehouse
"item_payload_account": item_payload_account
}
transfer_items_list.append(item_dic)
se_trans_entity = {"action": "Material Transfer", "items_list": transfer_items_list, "company": company}
se_trans_entity["add_amount"] = entity.get("subcontracting_rate") * entity.get("units_s_r")
se_trans_entity["labour_account"] = labour_account
se_trans_entity["isAdditionCost"] = 1
se_transfer = create_stock_entry(se_trans_entity)
# print(se_transfer,"-----------------------------------------------");
if (se_transfer[0]["Exception"] == "Not Occured"):
# response.append({"Name":se_transfer,"Status":"Created"});
response.append({"Name": se_transfer[0]["Name"], "Status": "Created", "Stock Entry Type": "Material Transfer"});
# print(response)
# return response
else:
response.append(
{"Name": se_transfer[0]["Name"], "Status": "Not Created", "Stock Entry Type": "Material Transfer"});
# response.append({"Name":se_transfer,"Status":"Not Created"});
# print(response)
return response
#packing type suresh
#create issue data first.use same logic of send material for manufacturing
@frappe.whitelist()
def send_material_for_packing(entity):
entity = json.loads(entity)
response = []
#print "entity send_material_for_packing" ,entity
labour_account = frappe.db.get_value("Pch Locations", {"name": entity.get("location")}, "labour_account")
item_payload_account = frappe.db.get_value("Pch Locations", {"name": entity.get("location")},"item_payload_account")
company = frappe.db.get_value("Pch Locations", {"name": entity.get("location")}, "company");
raw_material_warehouse = frappe.db.get_value("Pch Locations", {"name": entity.get("location")},"raw_material_warehouse")
#new code start
#on transfer and one issue for each item made .if one fails cancel all previous ones
#store all trnsactions in a list and cancell in one go
raw_trans_id_list =[]
for im_row in entity.get("multiple_method_items"):
issue_items_list = []
#print "came inside item made ",im_row.get("item_made")
for i_row in entity.get("req_items"):
if im_row.get("item_made") == i_row.get("item_made") :
#print "if passed"
issue_item_dic = {
"item_code": i_row.get("item_code"),
"qty": i_row.get("dispatched_quantity_in_uom"),
"uom": i_row.get("qty_uom"),
"conversion_factor": i_row.get("conversion_factor"),
"t_wh": None,
"s_wh": raw_material_warehouse,
"item_payload_account": item_payload_account
}
issue_items_list.append(issue_item_dic)
#create issue for each item made raw materials
se_issue_entity = {"action": "Material Issue", "items_list": issue_items_list, "company": company}
se_issue = create_stock_entry(se_issue_entity)
transfer_items_list = []
if se_issue[0]["Exception"] == "Not Occured":
raw_trans_id_list.append( se_issue[0]["Name"])
response.append({"Name": se_issue[0]["Name"], "Status": "Created", "Stock Entry Type": "Material Issue"});
#create transfer for each item made
trans_item_dic = {
"item_code": im_row.get("item_made"),
"qty": im_row.get("units_s_r"),
"uom": im_row.get("qty_uom"),
"conversion_factor": im_row.get("conversion_factor"),
"s_wh": entity.get("outbound_warehouse"),
"t_wh": entity.get("target_warehouse"),
"item_payload_account": item_payload_account
}
transfer_items_list.append(trans_item_dic)
se_trans_entity = {"action": "Material Transfer", "items_list": transfer_items_list, "company": company}
se_trans_entity["add_amount"] = frappe.db.get_value("Stock Entry", {"name": se_issue[0]["Name"]},"total_outgoing_value")
se_trans_entity["labour_account"] = labour_account # only for send material for manufacturing
se_trans_entity["isAdditionCost"] = 1
se_transfer2 = create_stock_entry(se_trans_entity)
if se_transfer2[0]["Exception"] == "Not Occured":
raw_trans_id_list.append(se_transfer2[0]["Name"])
response.append({"Name": se_transfer2[0]["Name"], "Status": "Created", "Stock Entry Type": "Material Transfer"});
else:
cancel_raw_transactions(raw_trans_id_list)
response.append({"Name": se_transfer2[0]["Name"], "Status": "Not Created","Stock Entry Type": "Material Transfer"});
break
else:
cancel_raw_transactions(raw_trans_id_list)
response.append({"Name": se_issue[0]["Name"], "Status": "Not Created", "Stock Entry Type": "Material Issue"});
break
#new code end
return response
def cancel_raw_transactions(raw_trans_id_list) :
#print "custom cancel worked",raw_trans_id_list
for raw_trans in raw_trans_id_list :
doc = frappe.get_doc('Stock Entry', {'name': raw_trans})
if (doc):
doc.docstatus = 2
doc.save()
else:
frappe.throw("No such un-cancelled document")
@frappe.whitelist()
def receive_material_from_packing(entity):
entity = json.loads(entity)
#print "receive_material_from_packing",entity
labour_account = frappe.db.get_value("Pch Locations", {"name": | |
resume(self, inter: disnake.ApplicationCommandInteraction):
player: Union[LavalinkPlayer, YTDLPlayer] = inter.player
embed = disnake.Embed(color=disnake.Colour.red())
if not player.paused:
embed.description = "A música não está pausada."
await send_message(inter, embed=embed)
return
await player.set_pause(False)
txt = ["retomou a música.", "Música retomada"]
await self.interaction_message(inter, txt)
@check_voice()
@has_source()
@is_dj()
@commands.dynamic_cooldown(user_cooldown(2, 10), commands.BucketType.member)
@commands.max_concurrency(1, commands.BucketType.member)
@commands.slash_command(description="Avançar/Retomar a música para um tempo específico.")
async def seek(
self,
inter: disnake.ApplicationCommandInteraction,
position: str = commands.Param(name="tempo", description="Tempo para avançar/voltar (ex: 1:45 / 40 / 0:30)", autocomplete=seek_suggestions)
):
embed = disnake.Embed(color=disnake.Colour.red())
player: Union[LavalinkPlayer, YTDLPlayer] = inter.player
if player.current.is_stream:
embed.description = "Você não pode usar este comando em uma livestream."
await send_message(inter, embed=embed)
return
position = position.split(" | ")[0]
seconds = string_to_seconds(position)
if seconds is None:
embed.description = "Você usou um tempo inválido! Use segundos (1 ou 2 digitos) ou no formato (minutos):(segundos)"
return await send_message(inter, embed=embed)
milliseconds = seconds * 1000
if milliseconds < 0:
milliseconds = 0
try:
await player.seek(milliseconds)
if player.paused:
await player.set_pause(False)
except Exception as e:
embed.description = f"Ocorreu um erro no comando\n```py\n{repr(e)}```."
await send_message(inter, embed=Embed)
return
txt = [
f"{'avançou' if milliseconds > player.position else 'voltou'} o tempo da música para: {time_format(milliseconds)}",
f"O tempo da música foi {'avançada' if milliseconds > player.position else 'retornada'} para: {time_format(milliseconds)}"
]
await self.interaction_message(inter, txt)
@check_voice()
@has_source()
@is_dj()
@commands.dynamic_cooldown(user_cooldown(3, 5), commands.BucketType.member)
@commands.slash_command(description="Selecionar modo de repetição entre: atual / fila ou desativar.")
async def loop_mode(
self,
inter: disnake.ApplicationCommandInteraction,
mode: Literal['current', 'queue', 'off'] = commands.Param(name="modo",
description="current = Música atual / queue = fila / off = desativar",
default=lambda inter: 'off' if inter.player.loop else 'current'
)
):
player = inter.player
if mode == 'off':
mode = False
if mode == player.loop:
await self.interaction_message(inter, "Não teve alteração no modo de repetição atual.")
return
if mode:
txt = [f"ativou a repetição da {'música' if mode == 'current' else 'fila'}.", f"Repetição da {'música' if mode == 'current' else 'fila'} ativada com sucesso."]
else:
txt = ['desativou a repetição.', "Repetição desativada."]
player.loop = mode
await self.interaction_message(inter, txt)
@check_voice()
@has_source()
@is_dj()
@commands.dynamic_cooldown(user_cooldown(3, 5), commands.BucketType.member)
@commands.slash_command(description="Definir quantidade de repetições da música atual.")
async def loop_amount(
self,
inter: disnake.ApplicationCommandInteraction,
value: int = commands.Param(name="valor", description="número de repetições.")
):
player: Union[LavalinkPlayer, YTDLPlayer] = inter.player
player.current.repeats = value
embed = disnake.Embed(color=self.bot.get_color(inter.me))
txt = f"{inter.author.mention} definiu a quantidade de repetições da música " \
f"[`{(fix_characters(player.current.title, 25))}`]({player.current.uri}) para **{value}**."
player.command_log = txt
embed.description=f"**Quantidade de repetições [{value}] definida para a música:** [`{player.current.title}`]({player.current.uri})"
embed.set_thumbnail(url=player.current.thumb)
await inter.send(embed=embed, ephemeral=True)
await player.update_message()
@check_voice()
@has_player()
@is_dj()
@commands.slash_command(description="Remover uma música específica da fila.")
async def remove(
self,
inter: disnake.ApplicationCommandInteraction,
query: str = commands.Param(name="nome", description="Nome da música completo.", autocomplete=queue_tracks)
):
embed = disnake.Embed(color=disnake.Colour.red())
index = get_track_index(inter, query)
if index is None:
embed.description = f"{inter.author.mention} **não há músicas na fila com o nome: {query}**"
await inter.send(embed=embed, ephemeral=True)
return
player: Union[LavalinkPlayer, YTDLPlayer] = inter.player
track = player.queue[index]
player.queue.remove(track)
embed = disnake.Embed(color=disnake.Colour.green())
txt = f"{inter.author.mention} removeu a música [`{(fix_characters(track.title, 25))}`]({track.uri}) da fila."
player.command_log = txt
embed.description=f"**Música removida:** [`{track.title}`]({track.uri})"
embed.set_thumbnail(url=track.thumb)
await inter.send(embed=embed, ephemeral=True)
await player.update_message()
@check_voice()
@has_player()
@is_dj()
@commands.dynamic_cooldown(user_cooldown(2, 10), commands.BucketType.guild)
@commands.slash_command(description="Readicionar as músicas tocadas na fila.")
async def readd(self, inter: disnake.ApplicationCommandInteraction):
player: Union[LavalinkPlayer, YTDLPlayer] = inter.player
embed = disnake.Embed(color=disnake.Colour.red())
if not player.played:
embed.description = f"{inter.author.mention} **não há músicas tocadas.**"
await inter.send(embed=embed, ephemeral=True)
return
embed.colour = disnake.Colour.green()
txt = f"{inter.author.mention} **readicionou [{(qsize:=len(player.played))}] música(s) tocada(s) na fila.**"
player.played.reverse()
player.queue.extend(player.played)
player.played.clear()
player.command_log = txt
embed.description = f"**você readicionou {qsize} música(s).**"
await inter.send(embed=embed, ephemeral=True)
await player.update_message()
if not player.current:
await player.process_next()
else:
await player.update_message()
@check_voice()
@has_source()
@is_dj()
@commands.dynamic_cooldown(user_cooldown(2, 8), commands.BucketType.guild)
@commands.slash_command(description="Pular para a música especificada.")
async def skipto(
self,
inter: disnake.ApplicationCommandInteraction, *,
query: str = commands.Param(name="nome", description="Nome da música completo.", autocomplete=queue_tracks)
):
embed = disnake.Embed(color=disnake.Colour.red())
index = get_track_index(inter, query)
if index is None:
embed.description = f"{inter.author.mention} **não há músicas na fila com o nome: {query}**"
await inter.send(embed=embed, ephemeral=True)
return
player: Union[LavalinkPlayer, YTDLPlayer] = inter.player
track = player.queue[index]
player.queue.append(player.last_track)
player.last_track = None
if player.loop == "current":
player.loop = False
if index > 0:
player.queue.rotate(0 - (index))
embed.colour = disnake.Colour.green()
player.command_log = f"{inter.author.mention} pulou para a música atual"
embed.description = f"**Você pulou para a música:** [`{track.title}`]({track.uri})"
embed.set_thumbnail(track.thumb)
await inter.send(embed=embed, ephemeral=True)
await player.stop()
@check_voice()
@has_source()
@is_dj()
@commands.slash_command(description="Move uma música para a posição especificada da fila.")
async def move(
self,
inter: disnake.ApplicationCommandInteraction,
query: str = commands.Param(name="nome", description="Nome da música completo.", autocomplete=queue_tracks),
position: int = commands.Param(name="posição", description="Posição de destino na fila.", default=1)
):
embed = disnake.Embed(colour=disnake.Colour.red())
if position < 1:
embed.description = f"{inter.author.mention}, {position} não é uma posição válida."
await send_message(inter, embed=embed)
return
index = get_track_index(inter, query)
if index is None:
embed.description = f"{inter.author.mention} **não há músicas na fila com o nome: {query}**"
await inter.send(embed=embed, ephemeral=True)
return
player: Union[LavalinkPlayer, YTDLPlayer] = inter.player
track = player.queue[index]
player.queue.remove(track)
player.queue.insert(int(position) - 1, track)
txt = f"{inter.author.mention} moveu a música [`{fix_characters(track.title, limit=25)}`]({track.uri}) para a posição **[{position}]** da fila."
embed = disnake.Embed(color=disnake.Colour.green())
embed.description = f"**A música foi movida para a posição {position} da fila:** [`{fix_characters(track.title)}`]({track.uri})"
embed.set_thumbnail(url=track.thumb)
player.command_log = txt
await inter.send(embed=embed, ephemeral=True)
await player.update_message()
@check_voice()
@has_source()
@is_dj()
@commands.dynamic_cooldown(user_cooldown(2, 10), commands.BucketType.guild)
@commands.slash_command(description="Rotacionar a fila para a música especificada.")
async def rotate(
self,
inter: disnake.ApplicationCommandInteraction,
query: str = commands.Param(
name="nome", description="Nome da música completo.", autocomplete=queue_tracks)
):
embed = disnake.Embed(colour=disnake.Colour.red())
index = get_track_index(inter, query)
if index is None:
embed.description = f"{inter.author.mention} **não há músicas na fila com o nome: {query}**"
await inter.send(embed=embed, ephemeral=True)
return
player: Union[LavalinkPlayer, YTDLPlayer] = inter.player
track = player.queue[index]
if index <= 0:
embed.description = f"{inter.author.mention} **a música **[`{track.title}`]({track.uri}) já é a próxima da fila."
await inter.send(embed=embed, ephemeral=True)
return
player.queue.rotate(0 - (index))
embed.colour = disnake.Colour.green()
txt = f"{inter.author.mention} rotacionou a fila para a música [`{(fix_characters(track.title, limit=25))}`]({track.uri})."
embed.description = f"**Fila rotacionada para a música:** [`{track.title}`]({track.uri})."
embed.set_thumbnail(url=track.thumb)
player.command_log = txt
await inter.send(embed=embed, ephemeral=True)
await player.update_message()
@check_voice()
@has_source()
@is_dj()
@commands.cooldown(1, 5, commands.BucketType.guild)
@commands.slash_command(description="Ativar/Desativar o efeito nightcore (Música acelerada com tom mais agudo).")
async def nightcore(self, inter: disnake.ApplicationCommandInteraction):
player: Union[LavalinkPlayer, YTDLPlayer] = inter.player
player.nightcore = not player.nightcore
if player.nightcore:
await player.set_timescale(pitch=1.2, speed=1.1)
txt = ["ativou", "ativado"]
else:
try:
del player.filters["timescale"]
except:
pass
await player.update_filters()
txt = ["desativou", "desativado"]
txt = [f"{txt[0]} o efeito nightcore.", f"Efeito nightcore {txt[1]}."]
# tempfix (função stop ao ativar efeito no modo ytdl)
if isinstance(player, YTDLPlayer) and isinstance(inter, disnake.MessageInteraction):
try:
await inter.response.defer()
except:
pass
await self.interaction_message(inter, txt)
@has_source()
@commands.slash_command(description="Reenvia a mensagem do player com a música atual.")
async def nowplaying(self, inter: disnake.ApplicationCommandInteraction):
player: Union[LavalinkPlayer, YTDLPlayer] = inter.player
if player.static:
await inter.send("este comando não pode ser usado no modo fixo do player.", ephemeral=True)
return
await player.destroy_message()
await player.invoke_np()
await inter.send("Player reenviado com sucesso!", ephemeral=True)
@has_player()
@is_dj()
@commands.user_command(name="add dj")
async def adddj_u(self, inter: disnake.UserCommandInteraction):
await self.add_dj(inter, user=inter.target)
@has_player()
@is_dj()
@commands.slash_command(description="Adicionar um membro à lista de DJ's na sessão atual do player.")
async def add_dj(
self,
inter: disnake.ApplicationCommandInteraction, *,
user: disnake.Member = commands.Param(name="membro", description="Membro a ser adicionado.")
):
error_text = None
if user == inter.author:
error_text = "Você não pode adicionar a si mesmo na lista de DJ's."
elif user.guild_permissions.manage_channels:
error_text = f"você não pode adicionar o membro {user.mention} na lista de DJ's (ele(a) possui permissão de **gerenciar canais**)."
elif user in inter.player.dj:
error_text = f"O membro {user.mention} já está na lista de DJ's"
if error_text:
embed = disnake.Embed(color=disnake.Colour.red(), description=error_text)
await send_message(inter, embed=embed)
return
inter.player.dj.append(user)
text = [f"adicionou {user.mention} à lista de DJ's.", f"{user.mention} foi adicionado à lista de DJ's."]
if (inter.player.static and inter.channel == inter.player.text_channel) or isinstance(inter.application_command, commands.InvokableApplicationCommand):
await inter.send(f"{inter.target.mention} adicionado à lista de DJ's!")
await self.interaction_message(inter, txt=text, update=True)
@check_voice()
@has_player()
@is_dj()
@commands.slash_command(description="Parar o player e me desconectar do canal de voz.")
async def stop(self, inter: disnake.Interaction):
player: Union[LavalinkPlayer, YTDLPlayer] = inter.player
embed = disnake.Embed(color=disnake.Colour.red())
player.command_log = f"{inter.author.mention} **parou o player!**"
embed.description = f"**{inter.author.mention} parou o player!**"
await inter.send(embed=embed, ephemeral=player.static)
await player.destroy()
@has_player()
@commands.slash_command(name="queue")
async def q(self, inter):
pass
@check_voice()
@is_dj()
@commands.dynamic_cooldown(user_cooldown(3, 5), commands.BucketType.member)
@q.sub_command(name="shuffle", description="Misturar as músicas da fila")
async def shuffle_(self, inter: disnake.ApplicationCommandInteraction):
player = inter.player
if len(player.queue) < 3:
embed = disnake.Embed(color=disnake.Colour.red())
embed.description = "A fila tem que ter no mínimo 3 músicas para ser misturada."
await send_message(inter, embed=embed)
return
shuffle(player.queue)
txt = [f"misturou | |
m = M()
g = globals()
self.assertEqual(eval('a', g, m), 12)
self.assertRaises(NameError, eval, 'b', g, m)
self.assertEqual(eval('dir()', g, m), list('xyz'))
self.assertEqual(eval('globals()', g, m), g)
self.assertEqual(eval('locals()', g, m), m)
self.assertRaises(TypeError, eval, 'a', m)
class A:
"Non-mapping"
pass
m = A()
self.assertRaises(TypeError, eval, 'a', g, m)
# Verify that dict subclasses work as well
class D(dict):
def __getitem__(self, key):
if key == 'a':
return 12
return dict.__getitem__(self, key)
def keys(self):
return list('xyz')
d = D()
self.assertEqual(eval('a', g, d), 12)
self.assertRaises(NameError, eval, 'b', g, d)
self.assertEqual(eval('dir()', g, d), list('xyz'))
self.assertEqual(eval('globals()', g, d), g)
self.assertEqual(eval('locals()', g, d), d)
# Verify locals stores (used by list comps)
eval('[locals() for i in (2,3)]', g, d)
eval('[locals() for i in (2,3)]', g, collections.UserDict())
class SpreadSheet:
"Sample application showing nested, calculated lookups."
_cells = {}
def __setitem__(self, key, formula):
self._cells[key] = formula
def __getitem__(self, key):
return eval(self._cells[key], globals(), self)
ss = SpreadSheet()
ss['a1'] = '5'
ss['a2'] = 'a1*6'
ss['a3'] = 'a2*7'
self.assertEqual(ss['a3'], 210)
# Verify that dir() catches a non-list returned by eval
# SF bug #1004669
class C:
def __getitem__(self, item):
raise KeyError(item)
def keys(self):
return 1 # used to be 'a' but that's no longer an error
self.assertRaises(TypeError, eval, 'dir()', globals(), C())
def test_exec(self):
g = {}
exec('z = 1', g)
if '__builtins__' in g:
del g['__builtins__']
self.assertEqual(g, {'z': 1})
exec('z = 1+1', g)
if '__builtins__' in g:
del g['__builtins__']
self.assertEqual(g, {'z': 2})
g = {}
l = {}
with check_warnings():
warnings.filterwarnings("ignore", "global statement",
module="<string>")
exec('global a; a = 1; b = 2', g, l)
if '__builtins__' in g:
del g['__builtins__']
if '__builtins__' in l:
del l['__builtins__']
self.assertEqual((g, l), ({'a': 1}, {'b': 2}))
def test_exec_globals(self):
code = compile("print('Hello World!')", "", "exec")
# no builtin function
self.assertRaisesRegex(NameError, "name 'print' is not defined",
exec, code, {'__builtins__': {}})
# __builtins__ must be a mapping type
self.assertRaises(TypeError,
exec, code, {'__builtins__': 123})
# no __build_class__ function
code = compile("class A: pass", "", "exec")
self.assertRaisesRegex(NameError, "__build_class__ not found",
exec, code, {'__builtins__': {}})
class frozendict_error(Exception):
pass
class frozendict(dict):
def __setitem__(self, key, value):
raise frozendict_error("frozendict is readonly")
# read-only builtins
if isinstance(__builtins__, types.ModuleType):
frozen_builtins = frozendict(__builtins__.__dict__)
else:
frozen_builtins = frozendict(__builtins__)
code = compile("__builtins__['superglobal']=2; print(superglobal)", "test", "exec")
self.assertRaises(frozendict_error,
exec, code, {'__builtins__': frozen_builtins})
# read-only globals
namespace = frozendict({})
code = compile("x=1", "test", "exec")
self.assertRaises(frozendict_error,
exec, code, namespace)
def test_exec_redirected(self):
savestdout = sys.stdout
sys.stdout = None # Whatever that cannot flush()
try:
# Used to raise SystemError('error return without exception set')
exec('a')
except NameError:
pass
finally:
sys.stdout = savestdout
def test_filter(self):
self.assertEqual(list(filter(lambda c: 'a' <= c <= 'z', 'Hello World')), list('elloorld'))
self.assertEqual(list(filter(None, [1, 'hello', [], [3], '', None, 9, 0])), [1, 'hello', [3], 9])
self.assertEqual(list(filter(lambda x: x > 0, [1, -3, 9, 0, 2])), [1, 9, 2])
self.assertEqual(list(filter(None, Squares(10))), [1, 4, 9, 16, 25, 36, 49, 64, 81])
self.assertEqual(list(filter(lambda x: x%2, Squares(10))), [1, 9, 25, 49, 81])
def identity(item):
return 1
filter(identity, Squares(5))
self.assertRaises(TypeError, filter)
class BadSeq(object):
def __getitem__(self, index):
if index<4:
return 42
raise ValueError
self.assertRaises(ValueError, list, filter(lambda x: x, BadSeq()))
def badfunc():
pass
self.assertRaises(TypeError, list, filter(badfunc, range(5)))
# test bltinmodule.c::filtertuple()
self.assertEqual(list(filter(None, (1, 2))), [1, 2])
self.assertEqual(list(filter(lambda x: x>=3, (1, 2, 3, 4))), [3, 4])
self.assertRaises(TypeError, list, filter(42, (1, 2)))
def test_filter_pickle(self):
for proto in range(pickle.HIGHEST_PROTOCOL + 1):
f1 = filter(filter_char, "abcdeabcde")
f2 = filter(filter_char, "abcdeabcde")
self.check_iter_pickle(f1, list(f2), proto)
def test_getattr(self):
self.assertTrue(getattr(sys, 'stdout') is sys.stdout)
self.assertRaises(TypeError, getattr, sys, 1)
self.assertRaises(TypeError, getattr, sys, 1, "foo")
self.assertRaises(TypeError, getattr)
self.assertRaises(AttributeError, getattr, sys, chr(sys.maxunicode))
# unicode surrogates are not encodable to the default encoding (utf8)
self.assertRaises(AttributeError, getattr, 1, "\uDAD1\uD51E")
def test_hasattr(self):
self.assertTrue(hasattr(sys, 'stdout'))
self.assertRaises(TypeError, hasattr, sys, 1)
self.assertRaises(TypeError, hasattr)
self.assertEqual(False, hasattr(sys, chr(sys.maxunicode)))
# Check that hasattr propagates all exceptions outside of
# AttributeError.
class A:
def __getattr__(self, what):
raise SystemExit
self.assertRaises(SystemExit, hasattr, A(), "b")
class B:
def __getattr__(self, what):
raise ValueError
self.assertRaises(ValueError, hasattr, B(), "b")
def test_hash(self):
hash(None)
self.assertEqual(hash(1), hash(1))
self.assertEqual(hash(1), hash(1.0))
hash('spam')
self.assertEqual(hash('spam'), hash(b'spam'))
hash((0,1,2,3))
def f(): pass
self.assertRaises(TypeError, hash, [])
self.assertRaises(TypeError, hash, {})
# Bug 1536021: Allow hash to return long objects
class X:
def __hash__(self):
return 2**100
self.assertEqual(type(hash(X())), int)
class Z(int):
def __hash__(self):
return self
self.assertEqual(hash(Z(42)), hash(42))
def test_hex(self):
self.assertEqual(hex(16), '0x10')
self.assertEqual(hex(-16), '-0x10')
self.assertRaises(TypeError, hex, {})
def test_id(self):
id(None)
id(1)
id(1.0)
id('spam')
id((0,1,2,3))
id([0,1,2,3])
id({'spam': 1, 'eggs': 2, 'ham': 3})
# Test input() later, alphabetized as if it were raw_input
def test_iter(self):
self.assertRaises(TypeError, iter)
self.assertRaises(TypeError, iter, 42, 42)
lists = [("1", "2"), ["1", "2"], "12"]
for l in lists:
i = iter(l)
self.assertEqual(next(i), '1')
self.assertEqual(next(i), '2')
self.assertRaises(StopIteration, next, i)
def test_isinstance(self):
class C:
pass
class D(C):
pass
class E:
pass
c = C()
d = D()
e = E()
self.assertTrue(isinstance(c, C))
self.assertTrue(isinstance(d, C))
self.assertTrue(not isinstance(e, C))
self.assertTrue(not isinstance(c, D))
self.assertTrue(not isinstance('foo', E))
self.assertRaises(TypeError, isinstance, E, 'foo')
self.assertRaises(TypeError, isinstance)
def test_issubclass(self):
class C:
pass
class D(C):
pass
class E:
pass
c = C()
d = D()
e = E()
self.assertTrue(issubclass(D, C))
self.assertTrue(issubclass(C, C))
self.assertTrue(not issubclass(C, D))
self.assertRaises(TypeError, issubclass, 'foo', E)
self.assertRaises(TypeError, issubclass, E, 'foo')
self.assertRaises(TypeError, issubclass)
def test_len(self):
self.assertEqual(len('123'), 3)
self.assertEqual(len(()), 0)
self.assertEqual(len((1, 2, 3, 4)), 4)
self.assertEqual(len([1, 2, 3, 4]), 4)
self.assertEqual(len({}), 0)
self.assertEqual(len({'a':1, 'b': 2}), 2)
class BadSeq:
def __len__(self):
raise ValueError
self.assertRaises(ValueError, len, BadSeq())
class InvalidLen:
def __len__(self):
return None
self.assertRaises(TypeError, len, InvalidLen())
class FloatLen:
def __len__(self):
return 4.5
self.assertRaises(TypeError, len, FloatLen())
class NegativeLen:
def __len__(self):
return -10
self.assertRaises(ValueError, len, NegativeLen())
class HugeLen:
def __len__(self):
return sys.maxsize + 1
self.assertRaises(OverflowError, len, HugeLen())
class HugeNegativeLen:
def __len__(self):
return -sys.maxsize-10
self.assertRaises(ValueError, len, HugeNegativeLen())
class NoLenMethod(object): pass
self.assertRaises(TypeError, len, NoLenMethod())
def test_map(self):
self.assertEqual(
list(map(lambda x: x*x, range(1,4))),
[1, 4, 9]
)
try:
from math import sqrt
except ImportError:
def sqrt(x):
return pow(x, 0.5)
self.assertEqual(
list(map(lambda x: list(map(sqrt, x)), [[16, 4], [81, 9]])),
[[4.0, 2.0], [9.0, 3.0]]
)
self.assertEqual(
list(map(lambda x, y: x+y, [1,3,2], [9,1,4])),
[10, 4, 6]
)
def plus(*v):
accu = 0
for i in v: accu = accu + i
return accu
self.assertEqual(
list(map(plus, [1, 3, 7])),
[1, 3, 7]
)
self.assertEqual(
list(map(plus, [1, 3, 7], [4, 9, 2])),
[1+4, 3+9, 7+2]
)
self.assertEqual(
list(map(plus, [1, 3, 7], [4, 9, 2], [1, 1, 0])),
[1+4+1, 3+9+1, 7+2+0]
)
self.assertEqual(
list(map(int, Squares(10))),
[0, 1, 4, 9, 16, 25, 36, 49, 64, 81]
)
def Max(a, b):
if a is None:
return b
if b is None:
return a
return max(a, b)
self.assertEqual(
list(map(Max, Squares(3), Squares(2))),
[0, 1]
)
self.assertRaises(TypeError, map)
self.assertRaises(TypeError, map, lambda x: x, 42)
class BadSeq:
def __iter__(self):
raise ValueError
yield None
self.assertRaises(ValueError, list, map(lambda x: x, BadSeq()))
def badfunc(x):
raise RuntimeError
self.assertRaises(RuntimeError, list, map(badfunc, range(5)))
def test_map_pickle(self):
for proto in range(pickle.HIGHEST_PROTOCOL + 1):
m1 = map(map_char, "Is this the real life?")
m2 = map(map_char, "Is this the real life?")
self.check_iter_pickle(m1, list(m2), proto)
def test_max(self):
self.assertEqual(max('123123'), '3')
self.assertEqual(max(1, 2, 3), 3)
self.assertEqual(max((1, 2, 3, 1, 2, 3)), 3)
self.assertEqual(max([1, 2, 3, 1, 2, 3]), 3)
self.assertEqual(max(1, 2, 3.0), 3.0)
self.assertEqual(max(1, 2.0, 3), 3)
self.assertEqual(max(1.0, 2, 3), 3)
self.assertRaises(TypeError, max)
self.assertRaises(TypeError, max, 42)
self.assertRaises(ValueError, max, ())
class BadSeq:
def __getitem__(self, index):
raise ValueError
self.assertRaises(ValueError, max, BadSeq())
for stmt in (
"max(key=int)", # no args
"max(default=None)",
"max(1, 2, default=None)", # require container for default
"max(default=None, key=int)",
"max(1, key=int)", # single arg not iterable
"max(1, 2, keystone=int)", # wrong keyword
"max(1, 2, key=int, abc=int)", # two many keywords
"max(1, 2, key=1)", # keyfunc is not callable
):
try:
exec(stmt, globals())
except TypeError:
pass
else:
self.fail(stmt)
self.assertEqual(max((1,), key=neg), 1) # one elem iterable
self.assertEqual(max((1,2), key=neg), 1) # two elem iterable
self.assertEqual(max(1, 2, key=neg), 1) # two elems
self.assertEqual(max((), default=None), None) # zero elem iterable
self.assertEqual(max((1,), default=None), 1) # one elem iterable
self.assertEqual(max((1,2), default=None), 2) # two elem iterable
self.assertEqual(max((), default=1, key=neg), 1)
self.assertEqual(max((1, 2), default=3, key=neg), 1)
data = [random.randrange(200) for i in range(100)]
keys = dict((elem, random.randrange(50)) for elem in data)
f = keys.__getitem__
self.assertEqual(max(data, key=f),
sorted(reversed(data), key=f)[-1])
def test_min(self):
self.assertEqual(min('123123'), '1')
self.assertEqual(min(1, 2, 3), 1)
self.assertEqual(min((1, 2, 3, 1, 2, 3)), 1)
self.assertEqual(min([1, 2, 3, 1, 2, | |
<gh_stars>10-100
# -*- coding: utf-8 -*-
import numpy as np
import pyny3d.geoms as pyny
class ShadowsManager(object):
"""
Class in charge of the management for the shadows simulations.
It can be initialize as standalone object or associated to a
``pyny.Space`` through the ``.shadow`` method.
The only argument needed for the simulator to run is ``t`` or ``dt``
and the ``latitude``. If the ShadowsManager is initialized from
``pyny.Space.shadows`` it is possible to run the execution in *auto*
mode without inputing anything.
Some explanaions about how it works:
The shadows are computed discretely using a set of distributed
**sensible points** through the model. These points can be set with
the ``.get_height(attach=True)`` or the ``.mesh()`` methods.
At the same time, the sun positions are also discretized. The
simulator needs a finite number of positions, given by their azimuth
and zenit. Anyway, it is more convenient to give it a time vector
and the latitude and let the program calculate the sun positions for
you.
For convenience, the time is managed in "absolute minutes" within
the range of a year in the computations, that is, the first possible
interval [0] is the Jan 1 00:00 and the last [525599] is Dec 31
23:59. February 29 is not taken into account. It is possible to
automatically create an equally spaced t vector by giving a fixed
interval, althought the inputed vectors an be irregular.
In view of the fact that there are, potentially, more than 8000
sunnys half-hour intervals in an year, the program precomputes a
discretization for the Solar Horizont (azimuth, zenit pairs) and
classify the *t* and *data* vectors. The goal is to approximate
these 8000 interval simulations to a less than 340 with an maximum
error of 3 deg (0.05rads).
This discretization is manually\* adjustable to be able to fastly
compute large datasets at low resolution before the serious
computations start.
For now, the Solar Horizont discretization can only be automatically
computed by a mesh. In the future more complex and convenient
discretizations will be available. Anyway, it is possible to input
a custom discretization by manually introducing the atributtes
described in :func:`Voronoi_SH`.
Finally,
the atributes which can be safely manipulated to tune up the
simulator before the computations are all which start with *arg_*
(= default values):
* .arg_data
* .arg_t
* .arg_dt
* .arg_latitude = None
* .arg_run_true_time = False
* .arg_longitude = None (only for ``true_time``)
* .arg_UTC = None (only for ``true_time``)
* .arg_zenitmin = 0.1 (minimum zenit, avoid irrelevant errors
from trigonometric approximations)
* .arg_vor_size = 0.15 (mesh_size of the Voronoi diagram)
:param space: 3D model to run the simulation.
:type space: ``pyny.Space``
:param data: Data timeseries to project on the 3D model (radiation,
for example).
:type data: ndarray (shape=N), None
:param t: Time vector in absolute minutes or datetime objects
:type t: ndarray or list, None
:param dt: Interval time to generate t vector.
:type dt: int, None
:param latitude: Local latitude.
:type latitude: float (radians)
:returns: None
.. note:: \* In the future, the discretizations will be
automated based on error adjustment.
.. warning:: The shadows computation do not take care
of the holes\*, instead, they can be emulated by a collection of
polygons.
"""
def __init__(self, space, data=None, t=None, dt=None, latitude=None):
from pyny3d.shadows import Viz
self.viz = Viz(self)
self.space = space
# Arguments
self.arg_data = data
self.arg_t = t
self.arg_dt = dt
self.arg_latitude = latitude
self.arg_run_true_time = False
self.arg_longitude = None
self.arg_UTC = None
self.arg_zenitmin = 0.05
self.arg_vor_size = 0.15
# Processed information
## Precalculations
self.diff_t = None
self.integral = None
## Voronoi
self.t2vor_map = None
self.vor_freq = None
self.vor_surf = None
self.vor_centers = None
## get_sunpos
self.azimuth_zenit = None
self.true_time = None
## compute_shadows
self.light_vor = None
## project_data
self.proj_vor = None
self.proj_points = None
def run(self):
"""
Run the shadowing computation with the values stored in
``self.arg_``. Precomputed information is stored in:
* **.diff_t** (*ndarray*): ``np.diff(t)``
* **.integral** (*ndarray*): Trapezoidal data integration
over time.
The steps are:
* :func:`get_sunpos`
* :func:`Vonoroi_SH`
* :func:`compute_shadows`
* :func:`project_data`
:retruns: None
"""
# Adapt series
## time
if self.integral is None:
if self.arg_t is not None:
import datetime
if type(self.arg_t[0]) == datetime.datetime:
self.arg_t = self.to_minutes(time_obj=self.arg_t)
else:
self.arg_t = np.round(self.arg_t)
elif self.arg_dt is not None:
self.arg_dt = np.round(self.arg_dt)
self.arg_t = self.to_minutes(dt=self.arg_dt)
else:
raise ValueError('At least one time parameter is needed.')
self.diff_t = np.diff(self.arg_t)
## data
if self.arg_data is None:
self.arg_data = np.ones(self.arg_t.shape[0])
dt = self.diff_t/60 # hs
rect = self.arg_data[:-1]/1000*dt # kilounits
triang_side = np.diff(self.arg_data)
triang = 0.5*triang_side*dt
self.integral = rect + triang
self.integral = np.hstack((0, self.integral))
# Computation
if self.azimuth_zenit is None:
self.get_sunpos(self.arg_t, self.arg_run_true_time)
if self.vor_centers is None:
self.Vonoroi_SH(self.arg_vor_size)
self.compute_shadows()
self.project_data()
def Vonoroi_SH(self, mesh_size=0.1):
"""
Generates a equally spaced mesh on the Solar Horizont (SH).
Computes the Voronoi diagram from a set of points given by pairs
of (azimuth, zenit) values. This discretization completely
covers all the Sun positions.
The smaller mesh size, the better resolution obtained. It is
important to note that this heavily affects the performance.
The generated information is stored in:
* **.t2vor_map** (*ndarray*): Mapping between time vector and
the Voronoi diagram.
* **.vor_freq** (*ndarray*): Number of times a Sun position
is inside each polygon in the Voronoi diagram.
* **.vor_surf** (*``pyny.Surface``*): Voronoi diagram.
* **.vor_centers** (*ndarray`*): Mass center of the
``pyny.Polygons`` that form the Voronoi diagram.
:param mesh_size: Mesh size for the square discretization of the
Solar Horizont.
:type mesh_size: float (in radians)
:param plot: If True, generates a visualization of the Voronoi
diagram.
:type plot: bool
:returns: None
.. note:: In future versions this discretization will be
improved substantially. For now, it is quite rigid and only
admits square discretization.
"""
from scipy.spatial import Voronoi
from pyny3d.utils import sort_numpy
state = pyny.Polygon.verify
pyny.Polygon.verify = False
# Sort and remove NaNs
xy_sorted, order_back = sort_numpy(self.azimuth_zenit, col=1,
order_back=True)
# New grid
x1 = np.arange(-np.pi, np.pi, mesh_size)
y1 = np.arange(-mesh_size*2, np.pi/2+mesh_size*2, mesh_size)
x1, y1 = np.meshgrid(x1, y1)
centers = np.array([x1.ravel(), y1.ravel()]).T
# Voronoi
vor = Voronoi(centers)
# Setting the SH polygons
pyny_polygons = [pyny.Polygon(vor.vertices[v], False)
for v in vor.regions[1:] if len(v) > 3]
raw_surf = pyny.Surface(pyny_polygons)
# Classify data into the polygons discretization
map_ = raw_surf.classify(xy_sorted, edge=True, col=1,
already_sorted=True)
map_ = map_[order_back]
# Selecting polygons with points inside
vor = []
count = []
for i, poly_i in enumerate(np.unique(map_)[1:]):
vor.append(raw_surf[poly_i])
bool_0 = map_==poly_i
count.append(bool_0.sum())
map_[bool_0] = i
# Storing the information
self.t2vor_map = map_
self.vor_freq = np.array(count)
self.vor_surf = pyny.Surface(vor)
self.vor_centers = np.array([poly.get_centroid()[:2]
for poly in self.vor_surf])
pyny.Polygon.verify = state
def get_sunpos(self, t, true_time=False):
"""
Computes the Sun positions for the *t* time vector.
*t* have to be in absolute minutes (0 at 00:00 01 Jan). The and
in Sun positions calculated are in solar time, that is, maximun
solar zenit exactly at midday.
The generated information is stored in:
* **.azimuth_zenit** (*ndarray*)
* **.true_time** (*datetime*): local time
:param t: Absolute minutes vector.
:type t: ndarray (dtype=int)
:param true_time: If True, a datetime vector with the true local
time will be stored at ``.true_time``
:type true_time: bool
:returns: Equivalent times in absolute minutes in year.
:rtype: ndarray (dtype=int)
:returns: None
.. seealso:: :func:`to_minutes` to easily genetare valid input
t.
"""
import numpy as np
lat = self.arg_latitude
long = self.arg_longitude
alphamin = self.arg_zenitmin
# Solar calculations
day = np.modf(t/1440)[0]
fractional_year = 2*np.pi/(365*24*60)*(-24*60+t)
declination = 0.006918 - \
0.399912*np.cos(fractional_year) + \
0.070257*np.sin(fractional_year) - \
0.006758*np.cos(2*fractional_year) | |
we want to convert
path_to_pet = os.path.join(path_to_pet_3, image_ID[y]) #
sub_ID.append(i)
ses_ID.append(session_ID)
path_pet.append(path_to_pet)
data = pandas.DataFrame({'Subjects_ID': sub_ID,
'Session_ID': ses_ID,
'Path_to_pet': path_pet})
# data=final dataframe
return data
def find_path_to_T1_ADNI(file_mri, subjects_ID, path_to_dataset):
"""
This method creates a Dataframe which contains all the paths to the T1
images which are ADNI compliant (as explained in the AIBL website).
This images differ from the others T1 of the dataset since in the
cvs_file is reported the exame date.
:param file_mri: in the clinical data there are two files which
describe the parameters of the T1 images (MRI 1.5 T and MRI 3T)
:param subjects_ID: subjects_id in the dataset dowloaded
:param path_to_dataset: path to AIBL dataset
:return: A dataframe which contains the path for T1 images and
subject_ID and session_ID are reported for each path
"""
import os
sub_ID = []
ses_ID = []
path_T1 = []
for i in subjects_ID:
for jj in file_mri:
# it checks all the file_mri
if int(i) in list(jj.RID):
# check if the information of the subject are present in the csv_file
path_to_T1_1 = os.path.join(path_to_dataset, str(i))
# subdirectories = os.listdir(path_to_T1_1)
subdirectories = listdir_nohidden(path_to_T1_1)
for j in range(len(subdirectories)):
# check if the subdirectory can contain a T1 image
path_to_T1_2 = find_T1_folder(subdirectories[j], path_to_T1_1)
if path_to_T1_2 != 'NaN':
exame_date = listdir_nohidden(path_to_T1_2) # this is the string I need to compare with the csv
for x in range(len(exame_date)):
# check if the corresponding session_ID can be found in the csv_file
session_ID = match_data(exame_date[x], i, jj)
if session_ID != '-4':
path_to_T1_3 = os.path.join(path_to_T1_2, str(exame_date[x]))
image_ID = listdir_nohidden(path_to_T1_3)
for y in range(len(image_ID)):
# compute the final path
path_to_T1 = os.path.join(path_to_T1_3, image_ID[y])
sub_ID.append(i)
ses_ID.append(session_ID)
path_T1.append(path_to_T1)
return [sub_ID, ses_ID, path_T1]
def find_path_to_T1_SAG(path_to_dataset, subjects_ID, sub_ID, ses_ID, path_T1):
"""
This method creates a Dataframe which contains all the paths to the T1
images which are not ADNI compliant, they contain the word "SAG" in
their name
:param path_to_dataset: path to AIBL dataset
:param subjects_ID: subjects_id in the dataset dowloaded
:param sub_ID: the previous list (from T1_ADNI) where new subjects ID
will be appended
:param ses_ID: the previous list (from T1_ADNI) where new session ID
will be appended
:param path_T1:the previous list (from T1_ADNI) where new paths will be
appended
:return: it completes the list of all the T1 paths including all the
images where we didn't find the exame-data but we can fix it with a
further analysis
"""
import os
for i in subjects_ID:
subdirectory_for_subject = []
path_to_T1_1 = os.path.join(path_to_dataset, str(i))
# subdirectories = os.listdir(path_to_T1_1)
subdirectories = listdir_nohidden(path_to_T1_1)
for j in range(len(subdirectories)):
# we convert only the images which are in this list and we take only one of them for subject
if subdirectories[j] in ['MPRAGESAGISOp2ND', 'MPRAGE_SAG_ISO_p2_ND', 'MPRAGE_SAG_ISO_p2']:
subdirectory_for_subject.append(subdirectories[j])
if not subdirectory_for_subject:
pass
else:
path_to_T1_2 = os.path.join(path_to_T1_1, subdirectory_for_subject[0])
exame_date = listdir_nohidden(path_to_T1_2)
if i in [342, 557]:
session_ID = 'M54'
else:
session_ID = 'M00'
if (i in sub_ID and session_ID != ses_ID[sub_ID.index(i)]) or (i not in sub_ID):
# if for a subject in the same session we have both this image and the "ADNI" compliant we are converting the second one since the exame-date is more precise
path_to_T1_3 = os.path.join(path_to_T1_2, str(exame_date[0]))
image_ID = listdir_nohidden(path_to_T1_3)
path_to_T1 = os.path.join(path_to_T1_3, image_ID[0])
# we append the result to the list
sub_ID.append(i)
ses_ID.append(session_ID)
path_T1.append(path_to_T1)
return [sub_ID, ses_ID, path_T1]
def find_path_to_T1(path_to_dataset, path_to_csv):
"""
This method creates a DataFrame for the T1 images, where for each of
them the subject ID, the session ID and the path to the image are
reported
:param path_to_dataset: path to AIBL dataset
:param path_to_csv: path to the csv files downloaded
:return: pandas dataframe which contains all the paths for the T1
images, and the correisponding subject_ID and session_ID
"""
import os
import pandas
import glob
# two csv_files contain information regarding the T1w MRI images
mri_meta = pandas.read_csv(glob.glob(os.path.join(path_to_csv, "aibl_mrimeta_*.csv"))[0])
mri_3meta = pandas.read_csv(glob.glob(os.path.join(path_to_csv, "aibl_mri3meta_*.csv"))[0])
file_mri = [mri_meta, mri_3meta]
subjects_ID = listdir_nohidden(path_to_dataset)
# list of all the folders which correspond to the subject_ID
# all the subjects downloaded are taken into account for the conversion, except this sample
if '0151083' in subjects_ID:
del subjects_ID[subjects_ID.index('0151083')]
[sub_ID, ses_ID, path_T1] = find_path_to_T1_ADNI(file_mri, subjects_ID, path_to_dataset)
[sub_ID, ses_ID, path_T1] = find_path_to_T1_SAG(path_to_dataset, subjects_ID, sub_ID, ses_ID, path_T1)
data = pandas.DataFrame({'Subjects_ID': sub_ID,
'Session_ID': ses_ID,
'Path_to_T1': path_T1})
# data= final dataframe
return data
# Covert the AIBL PET images into the BIDS specification.
# There are three pet modalities: av45, pib, flute. All of them are converted
# in BIDS
def paths_to_bids(path_to_dataset, path_to_csv, bids_dir, modality):
"""
This method converts all the T1 images found in the AIBL dataset
downloaded in BIDS
:param path_to_dataset: path_to_dataset
:param path_to_csv: path to the csv file containing clinical data
:param bids_dir: path to save the AIBL-T1-dataset converted in a
BIDS format
:param modality: string 't1', 'av45', 'flute' or 'pib'
:return: list of all the images that are potentially converted in a
BIDS format and saved in the bids_dir. This does not guarantee
existence
"""
from os.path import join, exists
from numpy import nan
import pandas as pds
from clinica.utils.stream import cprint
from multiprocessing.dummy import Pool
from multiprocessing import cpu_count, Value
import glob
if modality.lower() not in ['t1', 'av45', 'flute', 'pib']:
# This should never be reached
raise RuntimeError(modality.lower()
+ ' is not supported for conversion')
counter = None
def init(args):
""" store the counter for later use """
global counter
counter = args
def create_file(image):
global counter
subject = image.Subjects_ID
session = image.Session_ID
name_of_path = {'t1': 'Path_to_T1',
'av45': 'Path_to_pet',
'flute': 'Path_to_pet',
'pib': 'Path_to_pet'}
# depending on the dataframe, there is different way of accessing
# the iage object
image_path = image[name_of_path[modality]]
with counter.get_lock():
counter.value += 1
if image_path is nan:
cprint('No path specified for ' + subject + ' in session '
+ session)
return nan
cprint('[' + modality.upper() + '] Processing subject ' + str(subject)
+ ' - session ' + session + ', ' + str(counter.value) + ' / '
+ str(total))
session = viscode_to_session(session)
# creation of the path
if modality == 't1':
output_path = join(bids_dir, 'sub-AIBL' + subject,
'ses-' + session, 'anat')
output_filename = 'sub-AIBL' + subject + '_ses-' + session + '_T1w'
elif modality in ['flute', 'pib', 'av45']:
output_path = join(bids_dir, 'sub-AIBL' + subject,
'ses-' + session, 'pet')
output_filename = 'sub-AIBL' + subject + '_ses-' + session \
+ '_task-rest_acq-' + modality + '_pet'
# image is saved following BIDS specifications
if exists(join(output_path, output_filename + '.nii.gz')):
cprint('Subject ' + str(subject) + ' - session '
+ session + ' already processed.')
output_image = join(output_path, output_filename + '.nii.gz')
else:
output_image = dicom_to_nii(subject,
output_path,
output_filename,
image_path)
return output_image
# it reads the dataframe where subject_ID, session_ID and path are saved
if modality == 't1':
images = find_path_to_T1(path_to_dataset, path_to_csv)
else:
path_to_csv_pet_modality = glob.glob(join(
path_to_csv, 'aibl_' + modality + 'meta_*.csv')
)[0]
if not exists(path_to_csv_pet_modality):
raise FileNotFoundError(path_to_csv_pet_modality
+ ' file not found in clinical data folder')
# Latest version of Flutemetamol CSV file (aibl_flutemeta_01-Jun-2018.csv)
# has an extra column for some rows. However, each CSV file (regarding PET tracers)
# contains the same columns. The usecols fixes this issue.
df_pet = pds.read_csv(path_to_csv_pet_modality, sep=',|;', usecols=list(range(0, 36)))
images = find_path_to_pet_modality(path_to_dataset,
df_pet)
images.to_csv(join(bids_dir, modality + '_paths_aibl.tsv'),
index=False, sep='\t', encoding='utf-8')
counter = Value('i', 0)
total = images.shape[0]
# Reshape inputs to give it as a list to the workers
images_list = []
for i in range(total):
images_list.append(images.iloc[i])
# intializer are used with the counter variable to keep track of how many
# files have been processed
poolrunner = Pool(cpu_count(), initializer=init, initargs=(counter,))
output_file_treated = poolrunner.map(create_file, images_list)
del counter
return output_file_treated
# -- Methods for the clinical data --
def create_participants_df_AIBL(input_path, clinical_spec_path, clinical_data_dir, delete_non_bids_info=True):
"""
This methods create a participants file for the AIBL dataset where
information regarding the patients are reported
:param input_path: path to the input directory :param
clinical_spec_path: path to the clinical file :param clinical_data_dir:
directory to the clinical data files :param delete_non_bids_info: if
True delete all the rows of the subjects that are not available in the
BIDS | |
<filename>fst2/processers.py
import copy
import os
import csv
import json
import torch
import logging
from transformers.file_utils import is_tf_available, is_torch_available
from functools import wraps
from .utils import CACHE_PARAMS
logger = logging.getLogger(__name__)
class InputExample(object):
"""
A single training/test example for simple sequence classification.
Args:
guid: Unique id for the example.
text_a: string. The untokenized text of the first sequence. For single
sequence tasks, only this sequence must be specified.
text_b: (Optional) string. The untokenized text of the second sequence.
Only must be specified for sequence pair tasks.
label: (Optional) string. The label of the example. This should be
specified for train and dev examples, but not for test examples.
"""
def __init__(self, guid, text_a, text_b=None, label=None):
self.guid = guid
self.text_a = text_a
self.text_b = text_b
self.label = label
def __repr__(self):
return str(self.to_json_string())
def to_dict(self):
"""Serializes this instance to a Python dictionary."""
output = copy.deepcopy(self.__dict__)
return output
def to_json_string(self):
"""Serializes this instance to a JSON string."""
return json.dumps(self.to_dict(), indent=2, sort_keys=True) + "\n"
class InputFeatures(object):
"""
A single set of features of data.
Args:
input_ids: Indices of input sequence tokens in the vocabulary.
attention_mask: Mask to avoid performing attention on padding token indices.
Mask values selected in ``[0, 1]``:
Usually ``1`` for tokens that are NOT MASKED, ``0`` for MASKED (padded) tokens.
token_type_ids: Segment token indices to indicate first and second portions of the inputs.
label: Label corresponding to the input
"""
def __init__(self, input_ids, attention_mask=None, token_type_ids=None, label=None):
self.input_ids = input_ids
self.attention_mask = attention_mask
self.token_type_ids = token_type_ids
self.label = label
def __repr__(self):
return str(self.to_json_string())
def to_dict(self):
"""Serializes this instance to a Python dictionary."""
output = copy.deepcopy(self.__dict__)
return output
def to_json_string(self):
"""Serializes this instance to a JSON string."""
return json.dumps(self.to_dict(), indent=2, sort_keys=True) + "\n"
class DataProcessor(object):
"""Base class for data converters for sequence classification data sets."""
def get_example_from_tensor_dict(self, tensor_dict):
"""Gets an example from a dict with tensorflow tensors
Args:
tensor_dict: Keys and values should match the corresponding Glue
tensorflow_dataset examples.
"""
raise NotImplementedError()
def get_train_examples(self, data_dir):
"""Gets a collection of `InputExample`s for the train set."""
raise NotImplementedError()
def get_dev_examples(self, data_dir):
"""Gets a collection of `InputExample`s for the dev set."""
raise NotImplementedError()
def get_labels(self):
"""Gets the list of labels for this data set."""
raise NotImplementedError()
def tfds_map(self, example):
"""Some tensorflow_datasets datasets are not formatted the same way the GLUE datasets are.
This method converts examples to the correct format."""
if len(self.get_labels()) > 1:
example.label = self.get_labels()[int(example.label)]
return example
@classmethod
def _read_csv(cls, input_file, delimiter="\t", quotechar=None):
"""Reads a tab separated csv/tsv file."""
with open(input_file, "r", encoding="utf-8-sig") as f:
return list(csv.reader(f, delimiter=delimiter, quotechar=quotechar))
class SingleSentenceClassificationProcessor(DataProcessor):
""" Generic processor for a single sentence classification data set."""
def __init__(self, labels=None, examples=None, mode="classification", verbose=False):
self.labels = [] if labels is None else labels
self.examples = [] if examples is None else examples
self.mode = mode
self.verbose = verbose
def __len__(self):
return len(self.examples)
def __getitem__(self, idx):
if isinstance(idx, slice):
return SingleSentenceClassificationProcessor(labels=self.labels, examples=self.examples[idx])
return self.examples[idx]
@classmethod
def create_from_csv(
cls, file_name, delimiter, split_name="", column_label=0, column_text=1, column_id=None, skip_first_row=False, **kwargs
):
processor = cls(**kwargs)
processor.add_examples_from_csv(
file_name,
delimiter,
split_name=split_name,
column_label=column_label,
column_text=column_text,
column_id=column_id,
skip_first_row=skip_first_row,
overwrite_labels=True,
overwrite_examples=True,
)
return processor
@classmethod
def create_from_examples(cls, texts_or_text_and_labels, labels=None, **kwargs):
processor = cls(**kwargs)
processor.add_examples(texts_or_text_and_labels, labels=labels)
return processor
def add_examples_from_csv(
self,
file_name,
delimiter,
split_name="",
column_label=0,
column_text=1,
column_id=None,
skip_first_row=False,
overwrite_labels=False,
overwrite_examples=False,
):
lines = self._read_csv(file_name, delimiter=delimiter)
if skip_first_row:
lines = lines[1:]
texts = []
labels = []
ids = []
for (i, line) in enumerate(lines):
texts.append(line[column_text])
labels.append(line[column_label])
if column_id is not None:
ids.append(line[column_id])
else:
guid = "%s-%s" % (split_name, i) if split_name else "%s" % i
ids.append(guid)
return self.add_examples(
texts, labels, ids, overwrite_labels=overwrite_labels, overwrite_examples=overwrite_examples
)
def add_examples(
self, texts_or_text_and_labels, labels=None, ids=None, overwrite_labels=False, overwrite_examples=False
):
assert labels is None or len(texts_or_text_and_labels) == len(labels)
assert ids is None or len(texts_or_text_and_labels) == len(ids)
if ids is None:
ids = [None] * len(texts_or_text_and_labels)
if labels is None:
labels = [None] * len(texts_or_text_and_labels)
examples = []
added_labels = set()
for (text_or_text_and_label, label, guid) in zip(texts_or_text_and_labels, labels, ids):
if isinstance(text_or_text_and_label, (tuple, list)) and label is None:
text, label = text_or_text_and_label
else:
text = text_or_text_and_label
added_labels.add(label)
examples.append(InputExample(guid=guid, text_a=text, text_b=None, label=label))
# Update examples
if overwrite_examples:
self.examples = examples
else:
self.examples.extend(examples)
# Update labels
if overwrite_labels:
self.labels = list(added_labels)
else:
self.labels = list(set(self.labels).union(added_labels))
return self.examples
def get_features(
self,
tokenizer,
max_length=None,
pad_on_left=False,
pad_token=0,
mask_padding_with_zero=True,
return_tensors="pt",
):
"""
Convert examples in a list of ``InputFeatures``
Args:
tokenizer: Instance of a tokenizer that will tokenize the examples
max_length: Maximum example length
task: GLUE task
label_list: List of labels. Can be obtained from the processor using the ``processor.get_labels()`` method
output_mode: String indicating the output mode. Either ``regression`` or ``classification``
pad_on_left: If set to ``True``, the examples will be padded on the left rather than on the right (default)
pad_token: Padding token
mask_padding_with_zero: If set to ``True``, the attention mask will be filled by ``1`` for actual values
and by ``0`` for padded values. If set to ``False``, inverts it (``1`` for padded values, ``0`` for
actual values)
Returns:
If the ``examples`` input is a ``tf.data.Dataset``, will return a ``tf.data.Dataset``
containing the task-specific features. If the input is a list of ``InputExamples``, will return
a list of task-specific ``InputFeatures`` which can be fed to the model.
"""
if max_length is None:
max_length = tokenizer.max_len
label_map = {label: i for i, label in enumerate(self.labels)}
all_input_ids = []
for (ex_index, example) in enumerate(self.examples):
if ex_index % 10000 == 0:
logger.info("Tokenizing example %d", ex_index)
input_ids = tokenizer.encode(
example.text_a, add_special_tokens=True, max_length=min(max_length, tokenizer.max_len),
)
all_input_ids.append(input_ids)
batch_length = max(len(input_ids) for input_ids in all_input_ids)
features = []
for (ex_index, (input_ids, example)) in enumerate(zip(all_input_ids, self.examples)):
if ex_index % 10000 == 0:
logger.info("Writing example %d/%d" % (ex_index, len(self.examples)))
# The mask has 1 for real tokens and 0 for padding tokens. Only real
# tokens are attended to.
attention_mask = [1 if mask_padding_with_zero else 0] * len(input_ids)
# Zero-pad up to the sequence length.
padding_length = batch_length - len(input_ids)
if pad_on_left:
input_ids = ([pad_token] * padding_length) + input_ids
attention_mask = ([0 if mask_padding_with_zero else 1] * padding_length) + attention_mask
else:
input_ids = input_ids + ([pad_token] * padding_length)
attention_mask = attention_mask + ([0 if mask_padding_with_zero else 1] * padding_length)
assert len(input_ids) == batch_length, "Error with input length {} vs {}".format(
len(input_ids), batch_length
)
assert len(attention_mask) == batch_length, "Error with input length {} vs {}".format(
len(attention_mask), batch_length
)
if self.mode == "classification":
label = label_map[example.label]
elif self.mode == "regression":
label = float(example.label)
else:
raise ValueError(self.mode)
if ex_index < 5 and self.verbose:
logger.info("*** Example ***")
logger.info("guid: %s" % (example.guid))
logger.info("input_ids: %s" % " ".join([str(x) for x in input_ids]))
logger.info("attention_mask: %s" % " ".join([str(x) for x in attention_mask]))
logger.info("label: %s (id = %d)" % (example.label, label))
features.append(InputFeatures(input_ids=input_ids, attention_mask=attention_mask, label=label))
if return_tensors is None:
return features
elif return_tensors == "tf":
if not is_tf_available():
raise RuntimeError("return_tensors set to 'tf' but TensorFlow 2.0 can't be imported")
import tensorflow as tf
def gen():
for ex in features:
yield ({"input_ids": ex.input_ids, "attention_mask": ex.attention_mask}, ex.label)
dataset = tf.data.Dataset.from_generator(
gen,
({"input_ids": tf.int32, "attention_mask": tf.int32}, tf.int64),
({"input_ids": tf.TensorShape([None]), "attention_mask": tf.TensorShape([None])}, tf.TensorShape([])),
)
return dataset
elif return_tensors == "pt":
if not is_torch_available():
raise RuntimeError("return_tensors set to 'pt' but PyTorch can't be imported")
import torch
from torch.utils.data import TensorDataset
all_input_ids = torch.tensor([f.input_ids for f in features], dtype=torch.long)
all_attention_mask = torch.tensor([f.attention_mask for f in features], dtype=torch.long)
if self.mode == "classification":
all_labels = torch.tensor([f.label for f in features], dtype=torch.long)
elif self.mode == "regression":
all_labels = torch.tensor([f.label for f in features], dtype=torch.float)
dataset = TensorDataset(all_input_ids, all_attention_mask, all_labels)
return dataset
else:
raise ValueError("return_tensors should be one of 'tf' or 'pt'")
class SequnceTokenClassificationProcessor(DataProcessor):
def __init__(self, labels=None, examples=None, mode="classification", verbose=False):
self.labels = [] if labels is None else labels
self.examples = [] if examples is None else examples
self.mode = mode
self.verbose = verbose
@classmethod
def create_from_txt(cls, file_name, delimiter, **kwargs):
processor = cls(**kwargs)
processor.read_examples_from_txt(file_name, delimiter)
return processor
def read_examples_from_txt(self, file_name, delimiter):
examples = []
guid_index = 1
with open(file_name, encoding="utf-8") as f:
words = []
labels = []
for line in f:
if line.startswith("-DOCSTART-") or line == "" or line == | |
random columns belonging to samples we misclassify
# # Pred=0, Actual=1
# badguy = {
# 'TransactionAmt': 175.132982,
# 'C1': 26.855532,
# 'C2': 31.275574,
# 'D1': 30.312290,
# 'D2': 91.129977,
# 'V100': 0.107696,
# 'V310': 36.764886,
# }
# for key,val in badguy.items():
# data['BGuy_'+key] = data[key] - val
# Interaction
updateme('Interactions')
interactions = [['addr1','card1'], ['card1','card5'],
['C5','C9'], ['C5','C13'],['C5','C14'],['C13','C14']]
for a,b in interactions:
data[a + '_x_' + b] = stringy(data[a]) + stringy(data[b])
data[a + '_*_' + b] = (data[a] * data[b])
del data['tdt'], data['tdate']
new_features = list(set(data.columns) - set(old_features))
data.reset_index(drop=True, inplace=True)
return data, new_features
# # Categorical FE
# In[336]:
def build_cat_features(trx, idn):
updateme('Mergind DFrame + Computing NANs')
trx['nulls_trx'] = trx.isna().sum(axis=1)
idn['nulls_idn'] = idn.isna().sum(axis=1)
data = trx.merge(idn, how='left', on='TransactionID')
old_features = [c for c in data.columns if c not in ['nulls_trx', 'nulls_idn']]
# Make sure everything is lowercase
for c1, c2 in data.dtypes.reset_index().values:
if not c2=='O': continue
data[c1] = data[c1].astype(str).apply(str.lower)
updateme('Building Groups')
stringy = lambda x: x.astype(str) + ' '
data['CardID'] = stringy(data.card1) + stringy(data.card2) + stringy(data.card3) + stringy(data.card4) + stringy(data.card5) + stringy(data.card6) + stringy(data.addr1) # + stringy(data.addr2) # Sergey says addr1 only: https://www.kaggle.com/c/ieee-fraud-detection/discussion/101785#latest-588573
data['DeviceID'] = stringy(data.DeviceType) + stringy(data.DeviceInfo) + stringy(data.id_31) # TODO: Clean
data['PAccountID'] = stringy(data.addr1) + stringy(data.addr2) + stringy(data.P_emaildomain)
data['RAccountID'] = stringy(data.addr1) + stringy(data.addr2) + stringy(data.R_emaildomain)
updateme('Breaking Groups')
data.loc[
data.id_31.astype(str).str.contains('android') & ~data.id_30.astype(str).str.contains('android') & ids.id_30.isin(['other','func','nan']),
'id_30'
] = 'android'
data.loc[
data.id_31.astype(str).str.contains('mobile safari') & ~data.id_30.astype(str).str.contains('ios') & ids.id_30.isin(['other','func','nan']),
'id_30'
] = 'ios'
data.loc[
data.id_31.astype(str).str.contains('ios') & ~data.id_30.astype(str).str.contains('ios') & ids.id_30.isin(['other','func','nan']),
'id_30'
] = 'mac'
data.loc[
data.id_31.astype(str).str.contains('safari') & ~data.id_31.astype(str).str.contains('mobile safari') & ~data.id_30.astype(str).str.contains('mac') & ids.id_30.isin(['other','func','nan']),
'id_30'
] = 'mac'
data.loc[
data.id_31.astype(str).str.contains('edge') & ~data.id_30.astype(str).str.contains('windows') & ids.id_30.isin(['other','func','nan']),
'id_30'
] = 'windows'
data.loc[
data.id_31.astype(str).str.startswith('ie') & ~data.id_30.astype(str).str.contains('windows') & ids.id_30.isin(['other','func','nan']),
'id_30'
] = 'windows'
data.loc[
data.id_31.astype(str).str.contains('windows') & ~data.id_30.astype(str).str.contains('windows') & ids.id_30.isin(['other','func','nan']),
'id_30'
] = 'windows'
# Special:
data.loc[
data.DeviceInfo.str.contains('windows') & ~data.id_30.astype(str).str.contains('android') & data.id_30.isin(['other','func','nan']),
'id_30'
] = 'windows'
data.loc[
data.DeviceInfo.str.contains('android') & ~data.id_30.astype(str).str.contains('android') & data.id_30.isin(['other','func','nan']),
'id_30'
] = 'android'
data['manufacturer'] = np.nan
data.loc[data.DeviceInfo.str.contains(r'^samsung|^sm-|^gt-|^sgh-|^sch-'), 'manufacturer'] = 'samsung'
data.loc[data.DeviceInfo.str.contains(r'^lenovo'), 'manufacturer'] = 'lenovo'
data.loc[data.DeviceInfo.str.contains(r'^ta-|nokia'), 'manufacturer'] = 'nokia'
data.loc[data.DeviceInfo.str.contains(r'^lg|^lm-|^vs\d'), 'manufacturer'] = 'lg'
data.loc[data.DeviceInfo.str.contains(r'^mot|^xt\d{4}'), 'manufacturer'] = 'motorolla'
data.loc[data.DeviceInfo.str.contains(r'^android|nexus|pixel|oneplus'), 'manufacturer'] = 'google'
data.loc[data.DeviceInfo.str.contains(r'htc'), 'manufacturer'] = 'htc'
data.loc[data.DeviceInfo.str.contains(r'windows|microsoft|trident|rv:11.0|mddrjs'), 'manufacturer'] = 'microsoft'
data.loc[data.DeviceInfo.str.contains(r'linux'), 'manufacturer'] = 'linux'
data.loc[data.DeviceInfo.str.contains(r'ios device|macos|iphone'), 'manufacturer'] = 'apple'
data.loc[data.DeviceInfo.str.contains(r'^[a-z]{3}-l|huawei|hi6210sft|^chc'), 'manufacturer'] = 'huawei'
data.loc[data.DeviceInfo.str.contains(r'hisense'), 'manufacturer'] = 'hisense'
data.loc[data.DeviceInfo.str.contains(r'redmi|^mi |^mi$'), 'manufacturer'] = 'xiaomi'
data.loc[data.DeviceInfo.str.contains(r'ilium'), 'manufacturer'] = 'lanix'
data.loc[data.DeviceInfo.str.contains(r'asus'), 'manufacturer'] = 'asus'
data.loc[data.DeviceInfo.str.contains(r'zte|blade|^z\d{3} |^z\d{3}$'), 'manufacturer'] = 'zte'
data.loc[data.DeviceInfo.str.contains(r'^kf'), 'manufacturer'] = 'amazon'
data.loc[data.DeviceInfo.str.contains(r'^m4|m4tel'), 'manufacturer'] = 'm4tel'
data.loc[data.DeviceInfo.str.contains(r'^\d{4}[a-z]$|^\d{4,}[a-z] |alcatel|one '), 'manufacturer'] = 'alcatel'
data.loc[data.DeviceInfo.str.contains(r'^[a-z]\d{4}a$|^[a-z]\d{4}a |polaroid'), 'manufacturer'] = 'polaroid'
data.loc[data.DeviceInfo.str.contains(r'^[a-z]\d{4}$|^[a-z]\d{4} |^sgp'), 'manufacturer'] = 'sony'
data.loc[(data.DeviceInfo!='nan') & (data.manufacturer=='nan'), 'manufacturer'] = 'other'
data.loc[
data.id_30.isin(['other','func','nan']) & (data.manufacturer=='microsoft'),
'id_30'
] = 'windows'
data['platform'] = data.id_30.apply(lambda x: 'android' if 'android' in x else 'windows' if 'windows' in x else 'mac' if 'mac' in x else 'ios' if 'ios' in x else 'linux' if 'linux' in x else 'other')
data['platform_manufacturer'] = stringy(data.platform) + stringy(data.manufacturer)
data['temp'] = data.id_33.astype(str).apply(lambda x: x.lower().split('x'))
data['_rezx'] = data.temp.apply(lambda x: x[0] if len(x)==2 else np.nan).astype(np.float64)
data['_rezy'] = data.temp.apply(lambda x: x[1] if len(x)==2 else np.nan).astype(np.float64)
data['_aspect_ratio'] = data._rezx / data._rezy
data['TransactionAmtCents'] = np.ceil(data.TransactionAmt) - np.floor(data.TransactionAmt)
del data['temp']
updateme('Email Features')
country_map = {
'com':'us', 'net':'us', 'edu':'us', 'gmail':'us',
'mx': 'mx', 'es':'es', 'de':'de', 'fr':'fr',
'uk':'uk', 'jp':'jp'
}
domain = lambda x: x.split('.')[0]
pemail_country = lambda x: x.split('.')[-1]
data['pemail_domain'] = data.P_emaildomain.astype(str).apply(domain)
data['pemail_ext'] = data.P_emaildomain.astype(str).apply(pemail_country).map(country_map)
data['remail_domain'] = data.R_emaildomain.astype(str).apply(domain)
data['remail_ext'] = data.R_emaildomain.astype(str).apply(pemail_country).map(country_map)
data['p_and_r_email'] = data.P_emaildomain.astype(str) + ' ' + data.R_emaildomain.astype(str)
updateme('Count + Label Encoding Everything')
encodeit = [
'CardID', 'DeviceID', 'PAccountID', 'RAccountID', 'ProductCD',
'nulls_idn', 'nulls_trx',
'P_emaildomain','R_emaildomain',
'pemail_domain', 'pemail_ext', 'remail_domain', 'remail_ext', 'p_and_r_email',
'platform','manufacturer','platform_manufacturer'
]
for col in encodeit:
mapper = {key:val for val,key in enumerate(data[col].unique())}
if np.nan in mapper: mapper[np.nan] = np.nan # Keep Nans Nan
data['ce_' + col] = data[col].map(data[col].value_counts(dropna=False))
data['le_' + col] = data[col].map(mapper)
data.drop(encodeit, axis=1, inplace=True)
updateme('Time Features')
slope = 1 / (60*60*24) # sec/day
for i in range(1,16):
if i in [9]: continue
feature = 'D' + str(i)
data[feature+'_mfix'.format(i)] = np.round_(data[feature] - (data.TransactionDT - data.TransactionDT.min()) * slope)
START_DATE = '2017-12-01'
startdate = datetime.datetime.strptime(START_DATE, '%Y-%m-%d')
data['tdt'] = data['TransactionDT'].apply(lambda x: (startdate + datetime.timedelta(seconds = x)))
data['tdow'] = data.tdt.dt.dayofweek
data['thour'] = data.tdt.dt.hour
data['tdate'] = data.tdt.dt.date
data['flag_D9na'] = data.D9.isna()
for col in ['tdow', 'thour']:
mapper = {key:val for val,key in enumerate(data[col].unique())}
if np.nan in mapper: mapper[np.nan] = np.nan # Keep Nans Nan
data['ce_' + col] = data[col].map(data[col].value_counts(dropna=False))
data['_OSRelease'] = (pd.to_datetime(data.tdt) - pd.to_datetime(data.id_30.map(id_30_dates))) / datetime.timedelta(days = 1)
del data['tdt'], data['tdate'], data['D9']
new_features = list(set(data.columns) - set(old_features))
data.reset_index(drop=True, inplace=True)
# NOTE: WE CAN TRY BUILDING ID_31 BROWSER RELEASE DATE
return data, new_features
# # Start
# In[789]:
traintr = pd.read_csv('input/train_transaction.csv.zip')
trainid = pd.read_csv('input/train_identity.csv.zip')
testtr = pd.read_csv('input/test_transaction.csv.zip')
testid = pd.read_csv('input/test_identity.csv.zip')
# In[529]:
# In[536]:
# from sklearn.feature_extraction.text import CountVectorizer
# from sklearn.discriminant_analysis import LinearDiscriminantAnalysis
# card1_of_addr1 = {}
# for sample_card1, sample_addr1 in tqdm(traintr[['card1','addr1']].values):
# card1_of_addr1.setdefault(str(sample_card1), []).append(str(sample_addr1))
# addr1s = list(card1_of_addr1.keys())
# addr1s[:50]
# card1s_as_sentence = [' '.join(card1_of_addr1[addr1]) for addr1 in addr1s]
# card1s_as_matrix = CountVectorizer().fit_transform(card1s_as_sentence)
# topics_of_addr1s = LinearDiscriminantAnalysis(n_components=5).fit_transform(card1s_as_matrix)
# In[337]:
gc.collect()
trx_size = traintr.shape[0]
trans = traintr.append(testtr, sort=False)
ids = trainid.append(testid, sort=False)
trans.reset_index(drop=True, inplace=True)
ids.reset_index(drop=True, inplace=True)
data, new_features = build_cat_features(trans, ids)
gc.collect()
# In[338]:
data[new_features].dtypes.sort_values()
# In[339]:
data['noise0'] = np.random.normal(size=data.shape[0])
data['noise1'] = np.random.uniform(size=data.shape[0])
data_trn = data.iloc[:trx_size]
data_sub = data.iloc[trx_size:]
# In[340]:
seed = 123
# Fast FI!
params = {
# Simple trees
'max_depth': 16,
'num_leaves': 4,
# 'bagging_fraction': 0.80,
'objective': 'binary',
'save_binary': True,
'seed': seed,
'feature_fraction_seed': seed,
'bagging_seed': seed,
'drop_seed': seed,
'data_random_seed': seed,
'boosting_type': 'gbdt',
'boost_from_average': True,
'metric':'auc',
'verbosity': -1,
'verbose': -1,
'is_unbalance': False,
#'scale_pos_weight':2,
}
# In[341]:
def experimemt(data, features, params, runs=3, train_frac=0.75, seed=123, expnum=0, retoof=False, max_trees=25):
global trx_size
# Run a quick experiment with 3 runs
# Ideally with just a few features to evaluate their efficiency
# Feature Selection Validator
recalls = []
roc_aucs = []
f1_scores = []
accuracies = []
precisions = []
all_y_true = []
all_y_pred = []
tprs = []
fi = pd.DataFrame()
fi['feature'] = features
fi['expnum'] = expnum
oof = np.zeros(trx_size) # todo: logic for last fold
index_trn = np.arange(int(train_frac*trx_size))
index_val = np.arange(int(train_frac*trx_size),trx_size)
trn_data = lgb.Dataset(data[features].iloc[index_trn], label=data.isFraud.iloc[index_trn])
val_data = lgb.Dataset(data[features].iloc[index_val], label=data.isFraud.iloc[index_val])
params = params.copy()
for run in range(runs):
params['seed'] = seed + run
params['feature_fraction_seed'] = seed + run
params['bagging_seed'] = seed + run
params['drop_seed'] = seed + run
params['data_random_seed'] = seed + run
clf = lgb.train(
params,
trn_data,
max_trees,
valid_sets = [trn_data, val_data],
verbose_eval=600,
early_stopping_rounds=5
)
# Scores
y_true = data.iloc[index_val].isFraud.values
y_pred = clf.predict(data[features].iloc[index_val])
all_y_true.append(y_true)
all_y_pred.append(y_pred)
oof[index_val] = y_pred
warnings.filterwarnings("ignore")
accuracies.append(accuracy_score(y_true, y_pred.round()))
recalls.append(recall_score(y_true, y_pred.round()))
precisions.append(precision_score(y_true, y_pred.round()))
f1_scores.append(f1_score(y_true, y_pred.round()))
roc_aucs.append(clf.best_score['valid_1']['auc'])
warnings.filterwarnings("default")
fi['gain_run_{}'.format(run + 1)] = clf.feature_importance(importance_type='gain')
fi['split_run_{}'.format(run + 1)] = clf.feature_importance(importance_type='split')
fi['iter_run_{}'.format(run + 1)] = clf.best_iteration
fi['roc_run_{}'.format(run + 1)] = clf.best_score['valid_1']['auc']
fi['gain'] = fi[[f for f in fi if 'gain_run_' in f]].mean(axis=1)
fi['split'] = fi[[f for f in fi if 'split_run_' in f]].mean(axis=1)
fi['iter'] = fi[[f for f in fi if 'iter_run_' in f]].mean(axis=1)
fi['roc'] = fi[[f for f in fi if 'roc_run_' in f]].mean(axis=1)
print(
'\tCV roc score : {0:.4f}, std: {1:.4f}.'.format(np.mean(roc_aucs), np.std(roc_aucs)),
#'\n\tCV accuracy score : {0:.4f}, std: {1:.4f}.'.format(np.mean(accuracies), np.std(accuracies)),
#'\n\tCV recall score : {0:.4f}, std: {1:.4f}.'.format(np.mean(recalls), np.std(recalls)),
#'\n\tCV precision score : {0:.4f}, std: {1:.4f}.'.format(np.mean(precisions), np.std(precisions)),
#'\n\tCV f1 score : {0:.4f}, std: {1:.4f}.'.format(np.mean(f1_scores), np.std(f1_scores))
)
if retoof:
return oof, fi, np.mean(roc_aucs)
return fi, np.mean(roc_aucs)
# In[429]:
from sklearn.ensemble import RandomForestClassifier
def test_cvs(data, feature):
global trx_size
data['covariate_shift'] = (np.arange(data.shape[0]) >= trx_size).astype(np.uint8)
peek = data[~data[feature].isna()]
f, ax = plt.subplots(2,2,figsize=(14,8))
ax[0, 0].set_title('Train')
ax[0, 1].set_title('Test')
ax[0, 0].hist(peek[peek.index<trx_size][feature],100)
ax[0, 1].hist(peek[peek.index>=trx_size][feature],100)
# TODO: Plot against TIME:
ax[1, 0].scatter(peek[peek.index<trx_size].TransactionDT, peek[peek.index<trx_size][feature], s=0.1, alpha=0.1)
ax[1, 1].scatter(peek[peek.index>=trx_size].TransactionDT, peek[peek.index>=trx_size][feature], s=0.1, alpha=0.1)
plt.show()
# Test covariate shift:
index_trn = np.random.choice(np.arange(peek.shape[0]), size=peek.shape[0]//2)
index_val = list(set(np.arange(peek.shape[0])) - set(index_trn))
clf = RandomForestClassifier(n_estimators=10, max_depth=15, random_state=1237, n_jobs=-1)
clf.fit(peek[feature].iloc[index_trn].values.reshape(-1,1), peek.covariate_shift.iloc[index_trn])
y_true = peek.iloc[index_val].covariate_shift.values.flatten()
y_pred = clf.predict(peek[feature].iloc[index_val].values.reshape(-1,1))
del data['covariate_shift']
return roc_auc_score(y_true, y_pred)
# # Run
# In[306]:
# First, let's find which V-vars have the BEST roc
# And select the top 1 as candidate (by vnullgroup) a
# continuous variables. and the top 1 that is not correlated to it
vrez = []
for vvar in ['V'+str(i) for i in range(1,340)]:
print('\n',vvar)
vrez.append(experimemt(
traintr[[vvar, 'isFraud']],
[vvar],
params,
runs=1,
train_frac=0.75,
seed=123,
expnum=vvar
)[0])
vrez = pd.concat(vrez, axis=0)
# In[308]:
# Select the best V* variable from each VNanGroup:
z = traintr[['V'+str(i) for i in range(1,340)]].isnull().sum().sort_values().reset_index().sort_values([0,'index'])
z.rename(columns={0:'cntna','index':'expnum'}, | |
= comment
self.is_string = is_string
frame = tk.Frame(self.root)
var = tk.Label(frame, text=variable ,width=20,background='light gray')
var.config(relief=tk.GROOVE)
var.grid(column=0, row=0, padx=5, sticky='WE')
val = tk.Entry(frame,textvariable=self.v_value)
val.grid(column=1, row=0, sticky='E')
CreateToolTip(var,comment)
frame.pack()
return self
def row_to_param_str(self):
#default_var("RunName","testrun") // Name of simulation run
proto = "default_var(\"{}\",{})\t\t// {}"
if self.is_string:
proto = "default_var(\"{}\",\"{}\")\t\t// {}"
line = proto.format(self.variable,self.v_value.get(),self.comment)
return line
def pack(self,*args,**kwargs):
super(Row,self).pack(*args,**kwargs)
self.root.pack(*args,**kwargs)
def grid(self,*args,**kwargs):
super(Row,self).grid(*args,**kwargs)
self.root.grid(*args,**kwargs)
row_header = ['variable','value','comment','value_is_string']
rows=[]
def load(filename):
params = []
with open(filename) as fp:
line = fp.readline()
cnt = 1
while line:
m = re.search('default_var\((.+?)\)', line)
if m:
line_variable = re.search('\"(.+?)\"', m.group(1)).group(1)
line_value = re.search(',(.+?)$', m.group(1)).group(1)
line_comment = re.search('\/\/ (.+?)$',line).group(1)
line_value_string = False
n = re.search('\"(.*?)\"',line_value)
if n:
line_value_string = True
line_value=n.group(1)
params.append([line_variable,line_value,line_comment,line_value_string])
line = fp.readline()
cnt += 1
df = pd.DataFrame(params,columns=row_header)
return df
def save():
file = open(params_file,"w")
for r in rows:
file.write(r.row_to_param_str()+"\n")
file.close()
display_app_status('Parameters \"'+params_file+'\" saved')
df = load(params_file)
re_set_file_params(df)
return
general_frame = tk.LabelFrame(table_frame, text="General",fg="blue")
general_frame.grid(column=0,row=0,sticky='news',padx=10,pady=5)
dropdown_frame = tk.LabelFrame(table_frame, text="Data Sources",fg="blue")
dropdown_frame.grid(column=1,row=0,sticky='news',padx=10,pady=5)
space_frame = tk.LabelFrame(table_frame, text="Spacial Config",fg="blue")
space_frame.grid(column=0,row=1,sticky='news',padx=10,pady=5)
print_frame = tk.LabelFrame(table_frame, text="Print/Output",fg="blue")
print_frame.grid(column=1,row=2,sticky='news',padx=10,pady=5)
misc_frame = tk.LabelFrame(table_frame, text="Miscellaneous",fg="blue")
misc_frame.grid(column=0,row=2,sticky='news',padx=10,pady=5)
lfp_frame = tk.LabelFrame(table_frame, text="LFP Config",fg="blue")
lfp_frame.grid(column=1,row=1,sticky='news',padx=10,pady=5)
param_file_vars = ['ConnData','SynData','NumData','PhasicData']
general_vars = ['RunName', 'Scale','SimDuration','StepBy','TemporalResolution','RandomVrest','RandomVinit']
space_vars = ['TransverseLength','LongitudinalLength','LayerHeights','SpatialResolution']
dropdown_vars = ['Connectivity','Stimulation']
print_vars = ['PrintVoltage','PrintTerminal','PrintConnDetails','PrintCellPositions','PrintConnSummary','CatFlag','EstWriteTime','NumTraces']
lfp_vars = ['lfp_dt','ElectrodePoint','ComputeNpoleLFP','ComputeDipoleLFP','LFPCellTypes','MaxEDist']
def refresh(df):
param_changed(val=False)
rows.clear()
padtopbot = 3
Row(general_frame).pack(pady=padtopbot-1)
Row(dropdown_frame).pack(pady=padtopbot-1)
Row(space_frame).pack(pady=padtopbot-1)
Row(print_frame).pack(pady=padtopbot-1)
Row(misc_frame).pack(pady=padtopbot-1)
Row(lfp_frame).pack(pady=padtopbot-1)
for i, row in df.iterrows():
temp = []
temp.append(row.tolist())
temp = temp[0]
#config(self, variable, value, comment, is_string):
frame = misc_frame
if temp[0] in general_vars:
frame=general_frame
elif temp[0] in dropdown_vars or temp[0] in param_file_vars:
frame=dropdown_frame
elif temp[0] in space_vars:
frame=space_frame
elif temp[0] in print_vars:
frame=print_frame
elif temp[0] in lfp_vars:
frame=lfp_frame
#This is all pages to change
row = Row(frame).config(temp[0],temp[1],temp[2],temp[3])
row.pack(padx=10)
rows.append(row)
set_public_param(temp[0],row.v_value)
Row(general_frame).pack(pady=padtopbot)
Row(dropdown_frame).pack(pady=padtopbot)
Row(space_frame).pack(pady=padtopbot)
Row(print_frame).pack(pady=padtopbot)
Row(misc_frame).pack(pady=padtopbot)
Row(lfp_frame).pack(pady=padtopbot)
return
def re_set_file_params(df):
for i, row in df.iterrows():
temp = []
temp.append(row.tolist())
temp = temp[0]
if temp[0] in param_file_vars:
set_public_param(temp[0],temp[1])
return
def verify():
display_app_status('Not implemented')
return
def load_configs():
display_app_status('Not implemented')
return
def import_model():
display_app_status('Not implemented')
return
def export_model():
display_app_status('Not implemented')
return
verifyBuildButton = tk.Button(top_option_frame, text="Verify Model Configuration", command=verify)
verifyBuildButton.grid(column=1, row =0, padx=5, pady=5, sticky='W')
verifyBuildButton.config(state=tk.DISABLED)
loadConfigsButton = tk.Button(top_option_frame, text="Load Parameters into Views", command=load_configs)
loadConfigsButton.grid(column=2, row =0, padx=5, pady=5, sticky='W')
loadConfigsButton.config(state=tk.DISABLED)
saveButton = tk.Button(top_option_frame, text="Save Parameters File", command=save)
saveButton.grid(column=0, row =0, padx=5, pady=5, sticky='W')
importButton = tk.Button(import_export_frame, text="Import Model", command=import_model)
importButton.grid(column=0, row =0, padx=5, pady=5, sticky='WE')
importButton.config(state=tk.DISABLED)
exportButton = tk.Button(import_export_frame, text="Export Model", command=export_model)
exportButton.grid(column=0, row =1, padx=5, pady=5, sticky='WE')
exportButton.config(state=tk.DISABLED)
df = load(params_file)
refresh(df)
def cells_page(root):
column_names = ["Friendly Cell Name", "Cell File Name", "Number of Cells", "Layer Index","Artificial:1 Real:0"]
top_option_frame = tk.LabelFrame(root, text="File Management")
table_frame = tk.LabelFrame(root, text="Cell Numbers")
bottom_option_frame = tk.Frame(root)
top_option_frame.grid(column=0,row=0,sticky='news',padx=10,pady=5)
table_frame.grid(column=0,row=1,sticky='news',padx=10,pady=5)
bottom_option_frame.grid(column=0,row=2)
pt = PandasTable(table_frame, show_add_row_button=True)
cellclasses_a = []
options = glob.glob(cellnums_glob)
if len(options) is 0:
options.append('')
def generate_files_available():
cellclasses_a.clear()
search = 'cells\\\\class_(.+?).hoc'
for c in cellclasses:
m = re.search(search, c)
if m:
cellclasses_a.append(m.group(1))
def update_scrollbar(panda_table_root):
panda_table_root.update()
root.master.configure(scrollregion=(0, 0, panda_table_root.winfo_width()*1.25, panda_table_root.winfo_height()*1.5 ))
def load(*args):
if not filename.get() or filename.get() is '':
return
#print ("loading: " + filename.get())
cellnums_pd = pd.read_csv(filename.get() ,delimiter=' ',\
skiprows=1,header=None,\
names = column_names)
cellnums_pd[column_names[2]] = cellnums_pd[column_names[2]].astype(int)
cellnums_pd[column_names[3]] = cellnums_pd[column_names[3]].astype(int)
cellnums_pd[column_names[4]] = cellnums_pd[column_names[4]].astype(int)
pt.set_dataframe(cellnums_pd, options_dict=d, show_numbering=True, show_delete_row=True, first_column_is_header=False)
pt.pack()
update_scrollbar(pt.root)
set_public_param("loaded_cellnums",filename.get())
display_app_status('Cells file \"'+filename.get()+'\" loaded')
def save(save_to=None):
pt_df = pt.get_dataframe()
(nr,nc) = pt_df.shape
tb = pt_df.to_csv(sep=' ',header=False,index=False)
if not save_to:
save_to = filename.get()
file = open(save_to,"w")
file.write(str(nr)+'\n')
file.write(tb)
file.close()
display_app_status('Cells file \"'+filename.get()+'\" saved')
def new():
if pt.has_changed():
result = messagebox.askquestion("New", "Are you sure? Data has been changed.", icon='warning')
if result != 'yes':
return
d = DialogEntryBox(root,text="New File Name:",lefttext=os.path.join(dataset_folder, cellnums_file_prefix),righttext=cellnums_file_postfix)
root.wait_window(d.top)
if d.confirm==False:
return
newfilename = os.path.join(dataset_folder, cellnums_file_prefix+ d.value.get() + cellnums_file_postfix)
f = open(newfilename,"w+")
f.close
#pt.new()
generate_files_available()
#https://stackoverflow.com/questions/17580218/changing-the-options-of-a-optionmenu-when-clicking-a-button
m = fileMenu.children['menu']
m.delete(0,tk.END)
newvalues = options
newvalues.append(newfilename)
for val in newvalues:
m.add_command(label=val,command=lambda v=filename,l=val:v.set(l))
filename.set(newfilename)
pt.new()
display_app_status('Cells file \"'+newfilename+'\" created')
def new_clone():
if pt.has_changed():
result = messagebox.askquestion("New", "Are you sure? Data has been changed.", icon='warning')
if result != 'yes':
return
d = DialogEntryBox(root,text="New File Name:",lefttext=os.path.join(dataset_folder, cellnums_file_prefix),righttext=cellnums_file_postfix)
root.wait_window(d.top)
if d.confirm==False:
return
newfilename = os.path.join(dataset_folder,cellnums_file_prefix+ d.value.get() + cellnums_file_postfix)
f = open(newfilename,"w+")
f.close()
save(save_to=newfilename)
m = fileMenu.children['menu']
m.delete(0,tk.END)
newvalues = options
newvalues.append(newfilename)
for val in newvalues:
m.add_command(label=val,command=lambda v=filename,l=val:v.set(l))
filename.set(newfilename)
display_app_status('Cells file \"'+filename.get()+'\" created')
return
def set_numdata_param():
fn = filename.get()
search = cellnums_file_prefix+'(.+?)'+cellnums_file_postfix
m = re.search(search,fn)
if m:
fn = m.group(1)
set_public_param("NumData", fn)
display_app_status('NumData parameter set to \"'+ filename.get() +'\" in current parameters file')
return
def delete_current_file():
return
def load_numdata_param():
numdat = get_public_param("NumData")
numdat = os.path.join(dataset_folder, cellnums_file_prefix + numdat + cellnums_file_postfix)
#filename.set('')
filename.set(numdat)
generate_files_available()
d = defaultdict(list)
d[1].append(cellclasses_a)
#Create the choice option panel
filename = tk.StringVar(top_option_frame)
filename.trace("w",load)
#numdat = get_public_param("NumData")
#numdat = os.path.join(dataset_folder, cellnums_file_prefix + numdat + cellnums_file_postfix)
filename.set('')
#filename.set(numdat)
#filename.set(options[0])
load()#initial load
newButton = tk.Button(top_option_frame, text="New", command=new,width=30)
newButton.grid(column=0, row =0, padx=5,columnspan=2, sticky='WE')
useButton = tk.Button(top_option_frame, text="Set as NumData", command=set_numdata_param,width=15)
useButton.grid(column=0, row =1, padx=5, sticky='W')
loadButton = tk.Button(top_option_frame, text="Load NumData", command=load_numdata_param,width=15)
loadButton.grid(column=1, row =1, padx=5, sticky='W')
fileMenu = tk.OptionMenu(top_option_frame, filename, *options)
fileMenu.grid(column=2, row =0, padx=5, sticky='WE',columnspan=2)
saveButton = tk.Button(top_option_frame, text="Save", command=save)
saveButton.grid(column=2, row =1, padx=5, pady=5, sticky='WE')
newCloneButton = tk.Button(top_option_frame, text="Save As", command=new_clone)
newCloneButton.grid(column=3, row =1, padx=5, sticky='WE')
deleteButton = tk.Button(top_option_frame, text="Delete", command=delete_current_file)
deleteButton.grid(column=4, row =0, padx=5, pady=5, sticky='W')
deleteButton.config(state=tk.DISABLED)
def connections_page(root):
class connections_adapter(object):
def __init__(self, root, col, text=''):
self.root = root
self.col = col
tk.Label(root, text=text ,fg='blue').pack(anchor='w')
self.pt = PandasTable(self.root, show_add_row_button=False, allow_sorting=False)
self.pt.pack()
def read_internal(self, df, astype=None):
df1 = df[df.columns[[0,1,self.col]]]
pre = df1[df1.columns[0]].unique()
pre = pd.DataFrame(pre)
post = df1[df1.columns[1]].unique()
vals = df1[df1.columns[2]]
vals = pd.DataFrame(vals.values.reshape(len(pre),len(post)),columns=post)
if astype:
vals = vals.astype(astype)
df1 = pd.concat([pre,vals],axis=1)
#df1[df1.columns[self.col]] = df1[df1.columns[self.col]]
return pd.DataFrame(df1)
def get_df(self):
pt_df = self.pt.get_dataframe()
(nr,nc) = pt_df.shape
cols = list(range(1,nc))
df1 = pt_df[pt_df.columns[cols]]
data_column = pd.DataFrame(df1.values.reshape(nr*(nc-1),1))#.astype(float)
post_column = pd.DataFrame(pt_df[pt_df.columns[0]])
post_column = np.repeat(post_column[post_column.columns[0]],nc-1).reset_index(drop=True)
pre_column = list(pt_df)
del pre_column[0]
pre_column = pd.DataFrame(pre_column*nr)
df_ret = pd.concat([post_column, pre_column, data_column],axis=1)
df_ret.columns = range(df_ret.shape[1])
return df_ret
def refresh(self, df, astype=None):
self.pt.set_dataframe(self.read_internal(df, astype), show_delete_row=False,\
show_header=True, show_numbering=False, \
first_column_is_id=True)
self.pt.pack()
def has_changed(self):
return self.pt.has_changed()
def raise_frame(frame):
frame.tkraise()
top_option_frame = tk.LabelFrame(root, text="File Management")
table_frame = tk.LabelFrame(root, text="Connection Data")
table_frame_internal = tk.Frame(table_frame)
table_frame_controls = tk.Frame(table_frame)
bottom_option_frame = tk.LabelFrame(root)
top_option_frame.grid(column=0,row=0,sticky='we',padx=10,pady=5)
table_frame.grid(column=0,row=1,sticky='we',padx=10,pady=5)
table_frame_controls.grid(column=0, row=0, sticky='we')
table_frame_internal.grid(column=0, row=1, sticky='news')
bottom_option_frame.grid(column=0,row=2,sticky='we')
page2 = tk.Frame(table_frame_internal)
page3 = tk.Frame(table_frame_internal)
page1 = tk.Frame(table_frame_internal)
######################################
cellclasses_a = []
options = glob.glob(connections_glob)
if len(options) is 0:
options.append('')
d = defaultdict(list)
d[1].append(cellclasses_a)
tk.Button(table_frame_controls, text='Synaptic Weights', command=lambda:raise_frame(page1)).grid(column=0,row=0,padx=4,pady=4)
text = 'Synaptic weight refers to the strength of a connection between two nodes, corresponding in biology to the influence the firing neuron on another neuron.'
synaptic_weight_page_obj = connections_adapter(page1,2,text=text)
tk.Button(table_frame_controls, text='Convergence', command=lambda:raise_frame(page2)).grid(column=1,row=0,padx=4,pady=4)
text = 'Convergence defines the *total* number of connections to be randomly distributed between the presynaptic type and the postsynaptic type neuron.'
convergence_page_obj = connections_adapter(page2,3,text)#convergence_page(page2)
tk.Button(table_frame_controls, text='Synapses', command=lambda:raise_frame(page3)).grid(column=2,row=0,padx=4,pady=4)
text = 'Synapses per connection to be made.'
synapses_page_obj = connections_adapter(page3,4,text)#synapses_page(page3)
######################################
def generate_files_available():
cellclasses_a.clear()
search = 'cells\\\\class_(.+?).hoc'
for c in cellclasses:
m = re.search(search, c)
if m:
cellclasses_a.append(m.group(1))
def set_whole_df(df):
page1.grid_forget()
page2.grid_forget()
page3.grid_forget()
convergence_page_obj.refresh(df,'uint')
synapses_page_obj.refresh(df,'uint')
synaptic_weight_page_obj.refresh(df)
page2.grid(column=0,row=0,sticky='news')
page3.grid(column=0,row=0,sticky='news')
page1.grid(column=0,row=0,sticky='news')
return
def update_scrollbar(panda_table_root):
panda_table_root.update()
root.master.configure(scrollregion=(0, 0, panda_table_root.winfo_width()*1.25, panda_table_root.winfo_height()*1.5 ))
def load(*args,load_from=None):
if not load_from:
if not filename.get() or filename.get() is '':
return
else:
load_from = filename.get()
df = pd.read_csv(load_from ,delimiter=' ',\
skiprows=1,header=None,\
names = ["Friendly Cell Name", "Cell File Name", "Num Cells", "Layer Index","Artificial:1 Real:0"])
set_whole_df(df)
update_scrollbar(synaptic_weight_page_obj.pt.root)
display_app_status('Connections Data file \"'+filename.get()+'\" loaded')
return
def get_whole_df():
wei_df = synaptic_weight_page_obj.get_df()
con_df = convergence_page_obj.get_df()
syn_df = synapses_page_obj.get_df()
head_df = pd.DataFrame(wei_df[wei_df.columns[0:2]])
wei_df = pd.DataFrame(wei_df[wei_df.columns[2]]).astype('float')
wei_df.columns = [2]
con_df | |
_string) # /STACK
_Same(_link, 'TypeLibraryFile', _file_name) # /TLBOUT
_Same(_link, 'TypeLibraryResourceID', _integer) # /TLBID
_Same(_link, 'Version', _string) # /VERSION
_Same(_link, 'AssemblyDebug',
_Enumeration(['',
'true', # /ASSEMBLYDEBUG
'false'])) # /ASSEMBLYDEBUG:DISABLE
_Same(_link, 'CLRImageType',
_Enumeration(['Default',
'ForceIJWImage', # /CLRIMAGETYPE:IJW
'ForcePureILImage', # /Switch="CLRIMAGETYPE:PURE
'ForceSafeILImage'])) # /Switch="CLRIMAGETYPE:SAFE
_Same(_link, 'CLRThreadAttribute',
_Enumeration(['DefaultThreadingAttribute', # /CLRTHREADATTRIBUTE:NONE
'MTAThreadingAttribute', # /CLRTHREADATTRIBUTE:MTA
'STAThreadingAttribute'])) # /CLRTHREADATTRIBUTE:STA
_Same(_link, 'Driver',
_Enumeration(['NotSet',
'Driver', # /Driver
'UpOnly', # /DRIVER:UPONLY
'WDM'])) # /DRIVER:WDM
_Same(_link, 'LinkTimeCodeGeneration',
_Enumeration(['Default',
'UseLinkTimeCodeGeneration', # /LTCG
'PGInstrument', # /LTCG:PGInstrument
'PGOptimization', # /LTCG:PGOptimize
'PGUpdate'])) # /LTCG:PGUpdate
_Same(_link, 'ShowProgress',
_Enumeration(['NotSet',
'LinkVerbose', # /VERBOSE
'LinkVerboseLib'], # /VERBOSE:Lib
new=['LinkVerboseICF', # /VERBOSE:ICF
'LinkVerboseREF', # /VERBOSE:REF
'LinkVerboseSAFESEH', # /VERBOSE:SAFESEH
'LinkVerboseCLR'])) # /VERBOSE:CLR
_Same(_link, 'UACExecutionLevel',
_Enumeration(['AsInvoker', # /level='asInvoker'
'HighestAvailable', # /level='highestAvailable'
'RequireAdministrator'])) # /level='requireAdministrator'
_Same(_link, 'MinimumRequiredVersion', _string)
_Same(_link, 'TreatLinkerWarningAsErrors', _boolean) # /WX
# Options found in MSVS that have been renamed in MSBuild.
_Renamed(_link, 'IgnoreDefaultLibraryNames', 'IgnoreSpecificDefaultLibraries',
_file_list) # /NODEFAULTLIB
_Renamed(_link, 'ResourceOnlyDLL', 'NoEntryPoint', _boolean) # /NOENTRY
_Renamed(_link, 'SwapRunFromNet', 'SwapRunFromNET', _boolean) # /SWAPRUN:NET
_Moved(_link, 'GenerateManifest', '', _boolean)
_Moved(_link, 'IgnoreImportLibrary', '', _boolean)
_Moved(_link, 'LinkIncremental', '', _newly_boolean)
_Moved(_link, 'LinkLibraryDependencies', 'ProjectReference', _boolean)
_Moved(_link, 'UseLibraryDependencyInputs', 'ProjectReference', _boolean)
# MSVS options not found in MSBuild.
_MSVSOnly(_link, 'OptimizeForWindows98', _newly_boolean)
_MSVSOnly(_link, 'UseUnicodeResponseFiles', _boolean)
# MSBuild options not found in MSVS.
_MSBuildOnly(_link, 'BuildingInIDE', _boolean)
_MSBuildOnly(_link, 'ImageHasSafeExceptionHandlers', _boolean) # /SAFESEH
_MSBuildOnly(_link, 'LinkDLL', _boolean) # /DLL Visible='false'
_MSBuildOnly(_link, 'LinkStatus', _boolean) # /LTCG:STATUS
_MSBuildOnly(_link, 'PreventDllBinding', _boolean) # /ALLOWBIND
_MSBuildOnly(_link, 'SupportNobindOfDelayLoadedDLL', _boolean) # /DELAY:NOBIND
_MSBuildOnly(_link, 'TrackerLogDirectory', _folder_name)
_MSBuildOnly(_link, 'MSDOSStubFileName', _file_name) # /STUB Visible='false'
_MSBuildOnly(_link, 'SectionAlignment', _integer) # /ALIGN
_MSBuildOnly(_link, 'SpecifySectionAttributes', _string) # /SECTION
_MSBuildOnly(_link, 'ForceFileOutput',
_Enumeration([], new=['Enabled', # /FORCE
# /FORCE:MULTIPLE
'MultiplyDefinedSymbolOnly',
'UndefinedSymbolOnly'])) # /FORCE:UNRESOLVED
_MSBuildOnly(_link, 'CreateHotPatchableImage',
_Enumeration([], new=['Enabled', # /FUNCTIONPADMIN
'X86Image', # /FUNCTIONPADMIN:5
'X64Image', # /FUNCTIONPADMIN:6
'ItaniumImage'])) # /FUNCTIONPADMIN:16
_MSBuildOnly(_link, 'CLRSupportLastError',
_Enumeration([], new=['Enabled', # /CLRSupportLastError
'Disabled', # /CLRSupportLastError:NO
# /CLRSupportLastError:SYSTEMDLL
'SystemDlls']))
'''
SwitchConverter.__init__(self, table, booltable)
CLSWITCHES = ClSwitchConverter()
LINKSWITCHES = LinkSwitchConverter()
#-------------------------------------------------------------------------------
# Return a Windows path from a native path
def winpath(path):
drive, rest = ntpath.splitdrive(path)
result = []
while rest and rest != ntpath.sep:
rest, part = ntpath.split(rest)
result.insert(0, part)
if rest:
result.insert(0, rest)
return ntpath.join(drive.upper(), *result)
def makeList(x):
if not x:
return []
if type(x) is not list:
return [x]
return x
#-------------------------------------------------------------------------------
class Configuration(object):
def __init__(self, variant, platform, target, env):
self.name = '%s|%s' % (variant, platform)
self.variant = variant
self.platform = platform
self.target = target
self.env = env
#-------------------------------------------------------------------------------
class Item(object):
'''Represents a file item in the Solution Explorer'''
def __init__(self, path, builder):
self._path = path
self._builder = builder
self.node = dict()
if builder == 'Object':
self._tag = 'ClCompile'
self._excluded = False
elif builder == 'Protoc':
self._tag = 'CustomBuild'
self._excluded = False
else:
ext = os.path.splitext(self._path)[1]
if ext in ['.c', '.cc', '.cpp']:
self._tag = 'ClCompile'
self._excluded = True
else:
if ext in ['.h', '.hpp', '.hxx', '.inl', '.inc']:
self._tag = 'ClInclude'
else:
self._tag = 'None'
self._excluded = False;
def __repr__(self):
return '<VSProject.Item "%s" %s>' % (
self.path, self.tag, str(self.node))
def path(self):
return self._path
def tag(self):
return self._tag
def builder(self):
return self._builder
def is_compiled(self):
return self._builder == 'Object'
def is_excluded(self):
return self._excluded
#-------------------------------------------------------------------------------
def _guid(seed, name = None):
m = hashlib.md5()
m.update(seed)
if name:
m.update(name)
d = m.hexdigest().upper()
guid = "{%s-%s-%s-%s-%s}" % (d[:8], d[8:12], d[12:16], d[16:20], d[20:32])
return guid
class _ProjectGenerator(object):
'''Generates a project file for Visual Studio 2013'''
def __init__(self, project_node, filters_node, env):
try:
self.configs = xsorted(env['VSPROJECT_CONFIGS'])
except KeyError:
raise ValueError ('Missing VSPROJECT_CONFIGS')
self.root_dir = os.getcwd()
self.root_dirs = [os.path.abspath(x) for x in makeList(env['VSPROJECT_ROOT_DIRS'])]
self.project_dir = os.path.dirname(os.path.abspath(str(project_node)))
self.project_node = project_node
self.project_file = None
self.filters_node = filters_node
self.filters_file = None
self.guid = _guid(os.path.basename(str(self.project_node)))
self.buildItemList(env)
def buildItemList(self, env):
'''Build the Item set associated with the configurations'''
items = {}
def _walk(target, items, prefix=''):
if os.path.isabs(str(target)):
return
if target.has_builder():
builder = target.get_builder().get_name(env)
bsources = target.get_binfo().bsources
if builder == 'Program':
for child in bsources:
_walk(child, items, prefix+' ')
else:
for child in bsources:
item = items.setdefault(str(child), Item(str(child), builder=builder))
item.node[config] = target
_walk(child, items, prefix+' ')
for child in target.children(scan=1):
if not os.path.isabs(str(child)):
item = items.setdefault(str(child), Item(str(child), builder=None))
_walk(child, items, prefix+' ')
for config in self.configs:
targets = config.target
for target in targets:
_walk(target, items)
self.items = xsorted(items.values())
def makeListTag(self, items, prefix, tag, attrs, inherit=True):
'''Builds an XML tag string from a list of items. If items is
empty, then the returned string is empty.'''
if not items:
return ''
s = '%(prefix)s<%(tag)s%(attrs)s>' % locals()
s += ';'.join(items)
if inherit:
s += ';%%(%(tag)s)' % locals()
s += '</%(tag)s>\r\n' % locals()
return s
def relPaths(self, paths):
items = []
for path in paths:
if not os.path.isabs(path):
items.append(winpath(os.path.relpath(path, self.project_dir)))
return items
def extraRelPaths(self, paths, base):
extras = []
for path in paths:
if not path in base:
extras.append(path)
return self.relPaths(extras)
def writeHeader(self):
global clSwitches
encoding = 'utf-8'
project_guid = self.guid
name = os.path.splitext(os.path.basename(str(self.project_node)))[0]
f = self.project_file
f.write(UnicodeByteMarker)
f.write(V14DSPHeader % locals())
f.write(V14DSPGlobals % locals())
f.write(' <ItemGroup Label="ProjectConfigurations">\r\n')
for config in self.configs:
variant = config.variant
platform = config.platform
f.write(V14DSPProjectConfiguration % locals())
f.write(' </ItemGroup>\r\n')
f.write(' <Import Project="$(VCTargetsPath)\Microsoft.Cpp.Default.props" />\r\n')
for config in self.configs:
variant = config.variant
platform = config.platform
use_debug_libs = variant == 'Debug'
variant_dir = os.path.relpath(os.path.dirname(
config.target[0].get_abspath()), self.project_dir)
out_dir = winpath(variant_dir) + ntpath.sep
int_dir = winpath(ntpath.join(variant_dir, 'src')) + ntpath.sep
f.write(V14DSPPropertyGroup % locals())
f.write(' <Import Project="$(VCTargetsPath)\Microsoft.Cpp.props" />\r\n')
f.write(' <ImportGroup Label="ExtensionSettings" />\r\n')
for config in self.configs:
variant = config.variant
platform = config.platform
f.write(V14DSPImportGroup % locals())
f.write(' <PropertyGroup Label="UserMacros" />\r\n')
for config in self.configs:
variant = config.variant
platform = config.platform
f.write(V14DSPItemDefinitionGroup % locals())
# Cl options
f.write(' <ClCompile>\r\n')
f.write(
' <PreprocessorDefinitions>%s%%(PreprocessorDefinitions)</PreprocessorDefinitions>\r\n' % (
itemList(config.env['CPPDEFINES'], ';')))
props = ''
props += self.makeListTag(self.relPaths(xsorted(config.env['CPPPATH'])),
' ', 'AdditionalIncludeDirectories', '', True)
f.write(props)
f.write(CLSWITCHES.getXml(xsorted(config.env['CCFLAGS']), ' '))
f.write(' </ClCompile>\r\n')
f.write(' <Link>\r\n')
props = ''
props += self.makeListTag(xsorted(config.env['LIBS']),
' ', 'AdditionalDependencies', '', True)
try:
props += self.makeListTag(self.relPaths(xsorted(config.env['LIBPATH'])),
' ', 'AdditionalLibraryDirectories', '', True)
except:
pass
f.write(props)
f.write(LINKSWITCHES.getXml(xsorted(config.env['LINKFLAGS']), ' '))
f.write(' </Link>\r\n')
f.write(' </ItemDefinitionGroup>\r\n')
def writeProject(self):
self.writeHeader()
f = self.project_file
self.project_file.write(' <ItemGroup>\r\n')
for item in self.items:
path = winpath(os.path.relpath(item.path(), self.project_dir))
tag = item.tag()
props = ''
if item.builder() == 'Object':
props = ''
for config in self.configs:
name = config.name
variant = config.variant
platform = config.platform
if not config in item.node:
props += \
''' <ExcludedFromBuild Condition="'$(Configuration)|$(Platform)'=='%(variant)s|%(platform)s'">True</ExcludedFromBuild>\r\n''' % locals()
for config, output in xsorted(item.node.items()):
name = config.name
env = output.get_build_env()
variant = config.variant
platform = config.platform
props += self.makeListTag(self.extraRelPaths(xsorted(env['CPPPATH']), config.env['CPPPATH']),
' ', 'AdditionalIncludeDirectories',
''' Condition="'$(Configuration)|$(Platform)'=='%(variant)s|%(platform)s'"''' % locals(),
True)
elif item.is_excluded():
props = ' <ExcludedFromBuild>True</ExcludedFromBuild>\r\n'
elif item.builder() == 'Protoc':
for config, output in xsorted(item.node.items()):
name = config.name
out_dir = os.path.relpath(os.path.dirname(str(output)), self.project_dir)
cpp_out = winpath(out_dir)
out_parts = out_dir.split(os.sep)
out_parts.append(os.path.splitext(os.path.basename(item.path()))[0])
base_out = ntpath.join(*out_parts)
props += V14CustomBuildProtoc % locals()
f.write(' <%(tag)s Include="%(path)s">\r\n' % locals())
f.write(props)
f.write(' </%(tag)s>\r\n' % locals())
f.write(' </ItemGroup>\r\n')
f.write(
' <Import Project="$(VCTargetsPath)\Microsoft.Cpp.targets" />\r\n'
' <ImportGroup Label="ExtensionTargets">\r\n'
' </ImportGroup>\r\n'
'</Project>\r\n')
def writeFilters(self):
def getGroup(abspath):
abspath = os.path.dirname(abspath)
for d in self.root_dirs:
common = os.path.commonprefix([abspath, d])
if common == d:
return winpath(os.path.relpath(abspath, common))
return winpath(os.path.split(abspath)[1])
f = self.filters_file
f.write(UnicodeByteMarker)
f.write(V14DSPFiltersHeader)
f.write(' <ItemGroup>\r\n')
groups = set()
for item in self.items:
group = getGroup(os.path.abspath(item.path()))
while group != '':
groups.add(group)
group = ntpath.split(group)[0]
for group in xsorted(groups):
guid = _guid(self.guid, group)
f.write(
' <Filter Include="%(group)s">\r\n'
' <UniqueIdentifier>%(guid)s</UniqueIdentifier>\r\n'
' </Filter>\r\n' % locals())
f.write(' </ItemGroup>\r\n')
f.write(' <ItemGroup>\r\n')
for item in self.items:
path = os.path.abspath(item.path())
group = getGroup(path)
path = winpath(os.path.relpath(path, self.project_dir))
tag = item.tag()
f.write (
' <%(tag)s Include="%(path)s">\r\n'
' <Filter>%(group)s</Filter>\r\n'
' </%(tag)s>\r\n' % locals())
f.write(' </ItemGroup>\r\n')
f.write('</Project>\r\n')
def build(self):
try:
self.project_file = open(str(self.project_node), 'wb')
except IOError, detail:
raise SCons.Errors.InternalError('Unable to open "' +
str(self.project_node) + '" for writing:' + str(detail))
try:
self.filters_file = open(str(self.filters_node), 'wb')
except IOError, detail:
raise SCons.Errors.InternalError('Unable to open "' +
str(self.filters_node) + '" for writing:' + str(detail))
self.writeProject()
self.writeFilters()
self.project_file.close()
self.filters_file.close()
#-------------------------------------------------------------------------------
class _SolutionGenerator(object):
def __init__(self, slnfile, projfile, env):
pass
def build(self):
pass
#-------------------------------------------------------------------------------
# Generate the VS2013 project
def buildProject(target, source, env):
if env.get('auto_build_solution', 1):
if len(target) != 3:
raise ValueError ("Unexpected len(target) != 3")
if not env.get('auto_build_solution', 1):
if len(target) != 2:
raise ValueError ("Unexpected len(target) != 2")
g = _ProjectGenerator (target[0], target[1], env)
g.build()
if env.get('auto_build_solution', 1):
g = _SolutionGenerator (target[2], target[0], env)
g.build()
def projectEmitter(target, source, env):
if len(target) != 1:
raise ValueError ("Exactly one target must be specified")
# If source is unspecified this | |
calling this method.
"""
callback_value = run_callback(USER_CB, "user_current_voucher_level", ctx=self.ctx, user=self)
if callback_value != self.current_voucher_level:
self.current_voucher_level = callback_value
with db.conn(self.ctx) as ctx:
self.send_chips(ctx, self)
def modify_struct(self, struct, is_full_struct):
if is_full_struct:
struct['urls'] = {
'settings_notifications':urls.user_settings_notifications(),
'update_viewed_alerts_at':urls.user_update_viewed_alerts_at()
}
return struct
def load_gamestate_row_cache(self):
"""
Ask the user object to load and cache all of the gamestate data so that only a few
queries are executed instead of a large number of queries for every collection lazy loader.
For example, targets have 4 collection like fields, each of which executes a query, so as the number
of targets in the gamestate grows, the number of queries multiply by 4 (at least).
The results are cached in u.ctx.row_cache and used in the lazy loader functions.
"""
# Be sure the ctx is wrapped so there is a row_cache.
with db.conn(self.ctx) as ctx:
ctx.row_cache.set_rows_from_query(ctx, lambda r: [get_uuid(r['rover_id'])],
"gamestate/select_targets_by_user_id", user_id=self.user_id)
ctx.row_cache.set_rows_from_query(ctx, lambda r: [get_uuid(r['target_id'])],
"gamestate/select_target_sounds_by_user_id", user_id=self.user_id)
ctx.row_cache.set_rows_from_query(ctx, lambda r: [get_uuid(r['target_id'])],
"gamestate/select_target_image_rects_by_user_id", user_id=self.user_id)
ctx.row_cache.set_rows_from_query(ctx, lambda r: [get_uuid(r['target_id'])],
"gamestate/select_target_images_by_user_id", user_id=self.user_id)
ctx.row_cache.set_rows_from_query(ctx, lambda r: [get_uuid(r['target_id'])],
"gamestate/select_target_metadata_by_user_id", user_id=self.user_id)
def species_count(self, only_subspecies_id=None):
'''
Returns a Counter object of the number of times a given species_id was
detected in all targets for this user.
:param only_subspecies_id: int, if included, limit counts to this subspecies type.
'''
count = Counter()
for rover in self.rovers.itervalues():
for target in rover.targets.itervalues():
if not target.picture:
continue
count += target.species_count(only_subspecies_id=only_subspecies_id)
return count
def subspecies_count_for_species(self, species_id):
'''
Returns a Counter object of the number of times a given subspecies_id was
observed for the indicated species.
:param species_id: int, the id of the species that we're interested in.
'''
count = Counter()
for rover in self.rovers.itervalues():
for target in rover.targets.itervalues():
if not target.picture:
continue
count += target.subspecies_count_for_species(species_id=species_id)
return count
def all_picture_targets(self, user_created_only=False):
""" Returns a list of all targets with pictures for this user, processed or not, arrived at or not,
sorted by arrival_time.
If user_created_only is True then only targets created by the user and not any automated process,
like initial rover targets, will be returned. """
pictures = []
for r in self.rovers.itervalues():
pictures += r.targets.pictures()
if user_created_only:
pictures = [t for t in pictures if t.was_user_created()]
return sorted(pictures, key=lambda t: t.arrival_time)
def all_arrived_picture_targets(self):
""" Returns a list of all targets with pictures for this user which are processed and
have been arrived at sorted newest first, by arrival_time. """
pictures = []
for r in self.rovers.itervalues():
pictures += r.targets.processed_pictures()
return sorted(pictures, key=lambda t: t.arrival_time, reverse=True)
def all_image_rects(self):
""" Returns a list of all image_rects captured by this user. """
rects = []
for t in self.all_picture_targets():
rects += t.image_rects.values()
return rects
def all_image_rects_with_species(self):
""" Returns a list of all image_rects that identified at least one species. """
return [r for r in self.all_image_rects() if r.has_species()]
def get_edmodo_teacher_credentials(self):
""" In order for a user to be authorized to access classroom data, they must be
a teacher that's authenticated with Edmodo. For other users, return None.
Return the access_token, user_token, and a boolean that indicates whether
to use the sandbox servers.
"""
if self.auth == 'EDMO':
with db.conn(self.ctx) as ctx:
try:
r = db.row(ctx, 'get_edmodo_teacher_credentials', user_id=self.user_id)
return {'access_token':r['access_token'], 'user_token':r['user_token'], 'sandbox':r['sandbox']==1}
except db.TooFewRowsError:
return None
return None
## Crosslink Methods.
# Use the following set of routines to create links between various UI screens. Note that
# these routines don't actually create an anchor. Rather, they create spans with all the
# data necessary for creating an anchor in the ce4.ui.update_crosslinks routine on the client.
# This two-step process allows static template data to be sent from the server while the
# client can adapt the links based on the available gamestate data.
def crosslink_region(self, linked_text, *region_names):
'''
Pack the data necessary to create a link to a map region on the client.
:param linked_text: str, The text that will be linked
:param *region_names: a list of region names. On the client, the first valid region
in the list will be linked to.
'''
for region_name in region_names:
assert region_module.is_known_region_id(region_name)
region_links = ":".join(region_names);
return "<span class='ce4_crosslink ce4_crosslink_region' data-region-type='" + region_links + "'>" + linked_text + "</span>";
def crosslink_message(self, linked_text, msg_type):
'''
Pack the data necessary to create a link to a message on the client.
:param linked_text: str, The text that will be linked
:param msg_type: str, A valid message type.
'''
assert message.is_known_msg_type(msg_type)
return "<span class='ce4_crosslink ce4_crosslink_message' data-msg-type='" + msg_type + "'>" + linked_text + "</span>";
def crosslink_mission(self, linked_text, mission_definition):
'''
Pack the data necessary to create a link to a mission on the client.
:param linked_text: str, The text that will be linked
:param mission_definition: str, A valid mission definition.
'''
assert mission.is_known_mission_definition(mission_definition)
return "<span class='ce4_crosslink ce4_crosslink_mission' data-mission-definition='" + mission_definition + "'>" + linked_text + "</span>";
def crosslink_catalog(self, linked_text, species_key):
'''
Pack the data necessary to create a link to the species within the catalog on the client.
:param linked_text: str, The text that will be linked
:param species_key: str, A valid species key, e.g., SPC_PLANT015.
'''
assert species.is_known_species_key(species_key)
return "<span class='ce4_crosslink ce4_crosslink_catalog' data-species-key='" + species_key + "'>" + linked_text + "</span>";
def crosslink_store(self, linked_text):
'''
Pack the data necessary to create a link to the store from text in a template.
:param linked_text: str, The text that will be linked
'''
return "<span class='ce4_crosslink ce4_crosslink_store'>" + linked_text + "</span>";
def crosslink_profile(self, linked_text):
'''
Pack the data necessary to create a link to the profile from text in a template.
:param linked_text: str, The text that will be linked
'''
return "<span class='ce4_crosslink ce4_crosslink_profile'>" + linked_text + "</span>";
def crosslink_map(self, linked_text):
'''
Pack the data necessary to create a link to the map from text in a template.
:param linked_text: str, The text that will be linked
'''
return "<span class='ce4_crosslink ce4_crosslink_map'>" + linked_text + "</span>";
## End Crosslink Methods.
## Public Profile Methods.
# Methods in this namespace are intended to be used in the public profile page or similar user facing page.
def profile_approx_time_since_activated(self):
""" Returns a string which is a user friendly description of the amount of time
that has elapsed time between when this user
was activated (first able to make a move) and now. """
return utils.format_time_approx(self.time_since_activated)
def profile_approx_time_since_last_accessed(self):
""" Returns a string which is a user friendly description of the amount of time
that has elapsed time between when this user
was last 'active' in the game and now. """
return utils.format_time_approx(self.time_since_last_accessed)
def profile_total_distance_traveled_rounded(self):
return round(self.total_distance_traveled(), 1)
## End Public Profile Methods.
## Lazy load attribute methods.
def _load_user_attributes(self):
if self._user_attributes is None:
with db.conn(self.ctx) as ctx:
self._user_attributes = db.row(ctx, "get_user_row", user_id=self.user_id)
return self._user_attributes
def _load_user_metadata(self):
with db.conn(self.ctx) as ctx:
rows = db.rows(ctx, "select_user_metadata", user_id=self.user_id)
return dict(((r['key'], r['value']) for r in rows))
def _load_password_hash(self):
if self.auth == "PASS":
with db.conn(self.ctx) as ctx:
r = db.row(ctx, 'get_user_password_by_user_id', user_id=self.user_id)
return r['password']
else:
return None
def _load_activity_alert_frequency(self):
with db.conn(self.ctx) as ctx:
r = db.row(ctx, 'notifications/get_users_notification_by_user_id', user_id=self.user_id)
return r['activity_alert_frequency']
def _load_inviter_attributes(self):
if self.inviter_id is None:
return {}
else:
return {'url_public_profile': urls.user_public_profile(self.inviter_id)}
def _load_inviter_user(self):
if self.inviter_id is None:
return None
else:
return user_from_context(self.ctx, self.inviter_id)
def _load_current_voucher_level(self):
return run_callback(USER_CB, "user_current_voucher_level", ctx=self.ctx, user=self)
def _load_shop(self):
with db.conn(self.ctx) as ctx:
row = db.row(ctx, "shop/get_user_shop", user_id=self.user_id)
return shop.Shop(**row)
## Lazy load collection methods.
def _load_rovers(self):
with db.conn(self.ctx) as ctx:
rows = db.rows(ctx, 'select_rovers_by_user_id', user_id=self.user_id)
return rows
def _load_messages(self):
with db.conn(self.ctx) as ctx:
rows = db.rows(ctx, 'select_messages_by_user_id', user_id=self.user_id)
return rows
def _load_missions(self):
with db.conn(self.ctx) as ctx:
rows = db.rows(ctx, 'select_missions_by_user_id', user_id=self.user_id)
missions = [mission.Mission(user=self, **row) for row in rows]
missions_dict = dict([(m.get_id(), m) for m in missions])
# Wire up the mission hierarchy.
for m in missions_dict.itervalues():
if m.parent_id is not None:
mission_parent = missions_dict.get(m.parent_id)
if not mission_parent:
logger.error("Data fail, user %s has sub-mission %s without parent %s",
self.user_id, m.mission_id, m.parent_id)
if mission_parent == m:
logger.error("Data fail, mission %s is its own parent.", m.mission_id)
m.set_silent(mission_parent = mission_parent)
mission_parent.parts.append(m)
# Keep the children | |
<gh_stars>0
#!/usr/bin/env python
# -*- coding:utf-8 -*-
from __future__ import absolute_import
from __future__ import unicode_literals
from __future__ import division
from __future__ import print_function
from typing import Optional, Iterable, Tuple, Set, Type, List, Dict, Callable, Union
from collections import defaultdict, Counter
from functools import lru_cache
import warnings
import networkx as nx
import numpy as np
import math
import random
from .lexical_knowledge import HyponymyDataset
class BasicTaxonomy(object):
_DEBUG_MODE = False
def __init__(self, hyponymy_dataset: HyponymyDataset):
# build taxonomy as a DAG
iter_hyponymy_pairs = ((record["hypernym"], record["hyponym"]) for record in hyponymy_dataset if record["distance"] == 1.0)
self.build_directed_acyclic_graph(iter_hyponymy_pairs)
iter_hyponymy_pairs = ((record["hypernym"], record["hyponym"]) for record in hyponymy_dataset)
self.record_ancestors_and_descendants(iter_hyponymy_pairs)
self._random_number_generator = self._random_number_generator_iterator(max_value=self.n_nodes_max)
@property
def dag(self):
return self._dag
@property
def nodes(self):
return self._nodes
@property
def n_nodes_max(self):
return len(self.nodes)
@property
def trainset_ancestors(self):
return self._trainset_ancestors
@property
def trainset_descendants(self):
return self._trainset_descendants
def build_directed_acyclic_graph(self, iter_hyponymy_pairs: Iterable[Tuple[str, str]]):
"""
build taxonomy as a DAG based on the set of hyponymy relations
@param iter_hyponymy_pairs: iterable of the tuple (hypernym, hyponym)
"""
graph = nx.DiGraph()
graph.add_edges_from(iter_hyponymy_pairs)
self._dag = graph
self._cache_root_nodes = {}
self._nodes = tuple(graph.nodes)
def record_ancestors_and_descendants(self, iter_hyponymy_pairs):
self._trainset_ancestors = defaultdict(set)
self._trainset_descendants = defaultdict(set)
for hypernym, hyponym in iter_hyponymy_pairs:
self._trainset_ancestors[hyponym].add(hypernym)
self._trainset_descendants[hypernym].add(hyponym)
def _random_number_generator_iterator(self, max_value: int):
seeds = np.arange(max_value)
while True:
np.random.shuffle(seeds)
for idx in seeds:
yield idx
def _find_root_nodes(self, graph) -> Set[str]:
hash_value = graph.__hash__() + graph.number_of_nodes()
if hash_value in self._cache_root_nodes:
return self._cache_root_nodes[hash_value]
root_nodes = set([k for k,v in graph.in_degree() if v == 0])
self._cache_root_nodes[hash_value] = root_nodes
return root_nodes
def dag_ancestors(self, entity):
return self._dag_ancestors(entity, self.dag)
@lru_cache(maxsize=1000000)
def _dag_ancestors(self, entity, graph):
return nx.ancestors(graph, entity)
def hypernyms(self, entity):
return self.dag_ancestors(entity).union(self.trainset_ancestors.get(entity, set()))
def hyponyms(self, entity):
return nx.descendants(self.dag, entity).union(self.trainset_descendants.get(entity, set()))
def hypernyms_and_hyponyms_and_self(self, entity):
return self._hypernyms_and_hyponyms_and_self(entity, self.dag)
@lru_cache(maxsize=1000000)
def _hypernyms_and_hyponyms_and_self(self, entity, graph):
return self.hyponyms(entity) | self.hypernyms(entity) | {entity}
def hyponyms_and_self(self, entity):
return self._hyponyms_and_self(entity, self.dag)
@lru_cache(maxsize=1000000)
def _hyponyms_and_self(self, entity, graph):
return self.hyponyms(entity) | {entity}
def co_hyponyms(self, entity):
graph = self.dag
if entity not in graph:
return {}
direct_root_nodes = nx.ancestors(graph, entity) & self._find_root_nodes(graph)
branches = map(lambda entity: nx.descendants(self.dag, entity), direct_root_nodes)
branches = set().union(*branches)
co_hyponyms = branches - self.hypernyms_and_hyponyms_and_self(entity)
return co_hyponyms
def depth(self, entity, offset=1, not_exists=None, **kwargs):
return self._depth(entity, self.dag, offset, not_exists)
@lru_cache(maxsize=1000000)
def _depth(self, entity, graph, offset=1, not_exists=None):
if entity not in graph:
return not_exists
direct_root_nodes = self.dag_ancestors(entity) & self._find_root_nodes(graph)
if len(direct_root_nodes) == 0:
depth = 0
else:
f_path_length_to_entity = lambda source: nx.shortest_path_length(graph, source, entity)
depth = max(map(f_path_length_to_entity, direct_root_nodes))
return depth + offset
def hyponymy_score_slow(self, hypernym, hyponym, dtype: Type = float):
graph = self.dag
if hypernym not in graph:
raise ValueError(f"invalid node is specified: {hypernym}")
if hyponym not in graph:
raise ValueError(f"invalid node is specified: {hyponym}")
lowest_common_ancestor = nx.lowest_common_ancestor(graph, hypernym, hyponym)
# 1) hypernym is the ancestor of the hyponym (=hyponymy)
if nx.has_path(graph, hypernym, hyponym):
dist = nx.shortest_path_length(graph, hypernym, hyponym)
# 2) hyponym is the ancestor of the hypernym (=reverse hyponymy)
elif nx.has_path(graph, hyponym, hypernym):
dist = - nx.shortest_path_length(graph, hyponym, hypernym)
# 3) these two entities are the co-hyponym
elif lowest_common_ancestor is not None:
dist = - nx.shortest_path_length(graph, lowest_common_ancestor, hypernym)
# 4) other
else:
dist = - self.depth(hypernym)
return dtype(dist)
def hyponymy_score(self, hypernym, hyponym, dtype: Type = float, **kwargs):
graph = self.dag
if (hypernym not in graph) or (hyponym not in graph):
if self._DEBUG_MODE:
raise ValueError(f"invalid node is specified: {hypernym}, {hyponym}")
else:
return None
ancestors_hypernym = self.dag_ancestors(hypernym)
ancestors_hyponym = self.dag_ancestors(hyponym)
ancestors_common = ancestors_hypernym.intersection(ancestors_hyponym)
# 1) hypernym is the ancestor of the hyponym
if hypernym in ancestors_hyponym:
dist = nx.shortest_path_length(graph, hypernym, hyponym)
# 2) hyponym is the ancestor of the hypernym (=reverse hyponymy)
elif hyponym in ancestors_hypernym:
dist = - nx.shortest_path_length(graph, hyponym, hypernym)
# 3) not connected
elif len(ancestors_common) == 0:
dist = - self.depth(hypernym)
# 4) these two entities are the co-hyponym
elif len(ancestors_common) > 0:
depth_lca = max(map(self.depth, ancestors_common))
dist = depth_lca - self.depth(hypernym)
return dtype(dist)
def lowest_common_ancestor_depth(self, hypernym, hyponym, offset: int =1, dtype: Type = float, **kwargs):
graph = self.dag
if hypernym not in graph:
raise ValueError(f"invalid node is specified: {hypernym}")
if hyponym not in graph:
raise ValueError(f"invalid node is specified: {hyponym}")
ancestors_hypernym = self.dag_ancestors(hypernym)
ancestors_hyponym = self.dag_ancestors(hyponym)
ancestors_common = ancestors_hypernym.intersection(ancestors_hyponym)
# 1) hypernym is the ancestor of the hyponym: LCA is hypernym
if hypernym in ancestors_hyponym:
depth_lca = self.depth(entity=hypernym, offset=offset)
# 2) hyponym is the ancestor of the hypernym (=reverse hyponymy): LCA is hyponym
elif hyponym in ancestors_hypernym:
depth_lca = self.depth(entity=hyponym, offset=offset)
# 3) not connected -> LCA is empty
elif len(ancestors_common) == 0:
depth_lca = 0
# 4) these two entities are the co-hyponym: LCA is the deepest co-hyponym.
elif len(ancestors_common) > 0:
lst_depth = (self.depth(entity=common, offset=offset) for common in ancestors_common)
depth_lca = max(lst_depth)
return dtype(depth_lca)
def sample_non_hyponymy(self, entity, candidates: Optional[Iterable[str]] = None,
size: int = 1, exclude_hypernyms: bool = True) -> List[str]:
graph = self.dag
if entity not in graph:
return []
if exclude_hypernyms:
non_candidates = self.hypernyms_and_hyponyms_and_self(entity)
else:
non_candidates = self.hyponyms_and_self(entity)
candidates = self.nodes if candidates is None else tuple(set(candidates).intersection(set(self.nodes)))
if len(candidates) - len(non_candidates) <= 0:
return []
elif len(non_candidates)/len(candidates) >= 0.9:
candidates = tuple(set(candidates) - non_candidates)
elif len(candidates) < size:
candidates = (candidates)*(math.ceil(size/len(candidates)))
# sampling with replacement
sampled = tuple()
n_candidates = len(candidates)
for rnd_idx in self._random_number_generator:
sampled_new = candidates[rnd_idx % n_candidates]
if sampled_new in non_candidates:
continue
sampled = sampled + (sampled_new,)
if len(sampled) >= size:
break
return sampled
def sample_random_hyponyms(self, entity: str,
candidates: Optional[Iterable[str]] = None,
size: int = 1, exclude_hypernyms: bool = True, **kwargs):
lst_non_hyponymy_entities = self.sample_non_hyponymy(entity=entity, candidates=candidates,
size=size, exclude_hypernyms=exclude_hypernyms)
lst_ret = [(entity, hyponym, self.hyponymy_score(entity, hyponym)) for hyponym in lst_non_hyponymy_entities]
return lst_ret
def sample_random_hypernyms(self, entity: str,
candidates: Optional[Iterable[str]] = None,
size: int = 1, exclude_hypernyms: bool = True, **kwargs):
lst_non_hyponymy_entities = self.sample_non_hyponymy(entity=entity, candidates=candidates,
size=size, exclude_hypernyms=exclude_hypernyms)
lst_ret = [(hypernym, entity, self.hyponymy_score(hypernym, entity)) for hypernym in lst_non_hyponymy_entities]
return lst_ret
def is_hyponymy_relation(self, hypernym, hyponym, include_reverse_hyponymy: bool = True, not_exists = None):
graph = self.dag
if (hypernym not in graph) or (hyponym not in graph):
return not_exists
if include_reverse_hyponymy:
candidates = self.hyponyms(hypernym) | self.hypernyms(hypernym)
else:
candidates = self.hyponyms(hypernym)
ret = hyponym in candidates
return ret
def sample_random_co_hyponyms(self, hypernym: str, hyponym: str, size: int = 1, break_probability: float = 0.8, **kwargs):
graph = self.dag
lst_co_hyponymy = []
if (hypernym not in graph) or (hyponym not in graph):
return lst_co_hyponymy
if not nx.has_path(graph, source=hypernym, target=hyponym):
return lst_co_hyponymy
for _ in range(size):
co_hyponymy_triple = self._sample_random_co_hyponymy(hypernym, hyponym, break_probability)
if co_hyponymy_triple is not None:
lst_co_hyponymy.append(co_hyponymy_triple)
return lst_co_hyponymy
def _sample_random_co_hyponymy(self, hypernym: str, hyponym: str, break_probability: float) -> Tuple[str, str, float]:
graph = self.dag
shortest_path = self.hypernyms(hyponym) - self.hypernyms(hypernym)
non_candidates = self.hyponyms_and_self(hyponym)
children = set(graph.successors(hypernym)) - non_candidates
hyponymy_score = None
while len(children) > 0:
node = random.sample(children, 1)[0]
if node not in shortest_path:
hyponymy_score = -1 if hyponymy_score is None else hyponymy_score - 1
q = random.uniform(0,1)
if q <= break_probability:
break
# update children
children = set(graph.successors(node)) - non_candidates
if hyponymy_score is None:
return None
else:
hyponymy_score = self.hyponymy_score(hypernym=node, hyponym=hyponym)
return (node, hyponym, hyponymy_score)
class WordNetTaxonomy(BasicTaxonomy):
def __init__(self, hyponymy_dataset: Optional[HyponymyDataset] = None):
# build taxonomy as for each part-of-speech tags as DAG
dict_iter_hyponymy_pairs = defaultdict(list)
dict_iter_trainset_pairs = defaultdict(list)
for record in hyponymy_dataset:
entity_type = record["pos"]
entity_hyper = record["hypernym"]
entity_hypo = record["hyponym"]
dict_iter_trainset_pairs[entity_type].append((entity_hyper, entity_hypo))
if record["distance"] == 1.0:
dict_iter_hyponymy_pairs[entity_type].append((entity_hyper, entity_hypo))
self.build_directed_acyclic_graph(dict_iter_hyponymy_pairs)
self.record_ancestors_and_descendants(dict_iter_trainset_pairs)
self._random_number_generator = self._random_number_generator_iterator(max_value=self.n_nodes_max)
def build_directed_acyclic_graph(self, dict_iter_hyponymy_pairs: Dict[str, Iterable[Tuple[str, str]]]):
self._dag = {}
for entity_type, iter_hyponymy_pairs in dict_iter_hyponymy_pairs.items():
print(f"building graph. entity type: {entity_type}")
graph = nx.DiGraph()
graph.add_edges_from(iter_hyponymy_pairs)
self._dag[entity_type] = graph
# assert nx.is_directed_acyclic_graph(graph), f"failed to construct directed acyclic graph."
self._active_entity_type = None
self._cache_root_nodes = {}
self._nodes = {entity_type:tuple(graph.nodes) for entity_type, graph in self._dag.items()}
def record_ancestors_and_descendants(self, dict_iter_hyponymy_pairs):
self._trainset_ancestors = defaultdict(lambda :defaultdict(set))
self._trainset_descendants = defaultdict(lambda :defaultdict(set))
for entity_type, iter_hyponymy_pairs in dict_iter_hyponymy_pairs.items():
for hypernym, hyponym in iter_hyponymy_pairs:
self._trainset_ancestors[entity_type][hyponym].add(hypernym)
self._trainset_descendants[entity_type][hypernym].add(hyponym)
def depth(self, entity, offset=1, not_exists=None, **kwargs):
self.ACTIVE_ENTITY_TYPE = kwargs.get("part_of_speech", None)
return super().depth(entity, offset, not_exists)
def lowest_common_ancestor_depth(self, hypernym, hyponym, offset: int =1, dtype: Type = float, **kwargs):
self.ACTIVE_ENTITY_TYPE = kwargs.get("part_of_speech", None)
return super().lowest_common_ancestor_depth(hypernym, hyponym, offset, dtype)
def hyponymy_score_slow(self, hypernym, hyponym, dtype: Type = float, not_exists=None, **kwargs):
self.ACTIVE_ENTITY_TYPE = kwargs.get("part_of_speech", None)
if not nx.is_directed_acyclic_graph(self.dag):
raise NotImplementedError(f"you can't use this method.")
return super().hyponymy_score_slow(hypernym, hyponym, dtype)
def hyponymy_score(self, hypernym, hyponym, dtype: Type = float, **kwargs):
self.ACTIVE_ENTITY_TYPE = kwargs.get("part_of_speech", None)
return super().hyponymy_score(hypernym, | |
<reponame>elcarrion06/mcedit2<filename>src/mceditlib/multi_block.py<gh_stars>100-1000
import numpy
from mceditlib import relight
from mceditlib.blocktypes import BlockType
from mceditlib.fakechunklevel import GetBlocksResult
import logging
log = logging.getLogger(__name__)
def chunkPosArray(x, z):
"""
Construct an array of 8-byte integers made by packing the 4-byte chunk coordinate arrays x and z together.
:type x: ndarray
:type z: ndarray
"""
cx = x >> 4
cz = z >> 4
cPos = numpy.zeros(cx.shape, 'i8')
view = cPos.view('i4')
view.shape = cx.shape + (2,)
view[..., 0] = cx
view[..., 1] = cz
return cPos
def decodeChunkPos(cPos):
view = cPos.view('i4')
view.shape = view.shape[:-1] + (view.shape[-1] / 2, 2)
return view
def coords_by_chunk(x, y, z):
"""
Split the x, y, and z coordinate arrays according to chunk location. Return an iterator over tuples of the chunk's
cx and cz coordinates and arrays of the x y z coordinates located in that chunk.
Performance note: Implicitly sorts the elements of an intermediate array. May perform better when input arrays are
short.
:param x: Array of x coordinates
:param y: Array of y coordinates
:param z: Array of z coordinates
:return: iterator over (cx, cz, x, y, z) tuples, where x, y, and z are arrays and cx and cz are integers
"""
cPos = chunkPosArray(x, z)
x = x & 0xf
z = z & 0xf
x, y, z = numpy.broadcast_arrays(x, y, z)
elements, inverse = numpy.unique(cPos, return_inverse=True)
view = decodeChunkPos(elements)
cxs, czs = view[..., 0], view[..., 1]
for index, cx in numpy.ndenumerate(cxs):
cz = czs[index]
localMask = inverse == index
localMask.shape = x.shape
localX = x[localMask]
localY = y[localMask]
localZ = z[localMask]
yield (cx, cz, localX, localY, localZ, localMask)
def getBlocks(world, x, y, z,
return_Blocks=True,
return_Data=False,
return_BlockLight=False,
return_SkyLight=False,
return_Biomes=False):
"""
High performance method for accessing multiple blocks and their lighting info.
Requires `world` have a `getChunk(cx, cz)` method
Requires that method return an object that has a `getBlocks` method that takes the same
parameters as this one.
Return the blocks at the requested locations as one or more ndarrays. Returns a
tuple of ndarrays, one for each `return_` parameter with a True value, in the order
that the parameters appear. Returns the blocks as raw Block IDs, which you can convert
to BlockType instances using world.blocktype
The `x`, `y`, and `z` parameters must have the same shape.
:param x: Array of x coordinates
:param y: Array of y coordinates
:param z: Array of z coordinates
:param return_Blocks:
:param return_Data:
:param return_BlockLight:
:param return_SkyLight:
:param return_Biomes:
:return: GetBlocksResult
"""
x = numpy.atleast_1d(x)
y = numpy.atleast_1d(y)
z = numpy.atleast_1d(z)
Blocks = Data = BlockLight = SkyLight = Biomes = None
if return_Blocks:
Blocks = numpy.zeros(shape=x.shape, dtype='uint16')
if return_Data:
Data = numpy.zeros(shape=x.shape, dtype='uint8')
if return_BlockLight:
BlockLight = numpy.zeros(shape=x.shape, dtype='uint8')
if return_SkyLight:
SkyLight = numpy.zeros(shape=x.shape, dtype='uint8')
if return_Biomes:
Biomes = numpy.zeros(shape=x.shape, dtype='uint8')
result = GetBlocksResult(Blocks, Data, BlockLight, SkyLight, Biomes)
for cx, cz, x, y, z, mask in coords_by_chunk(x, y, z):
if not world.containsChunk(cx, cz):
continue
chunk = world.getChunk(cx, cz)
arrays = getChunkBlocks(chunk, x, y, z,
return_Blocks,
return_Data,
return_BlockLight,
return_SkyLight,
return_Biomes)
for dest, source in zip(result, arrays):
if dest is not None and source is not None:
dest[mask] = source
return result
def getChunkBlocks(chunk, x, y, z,
return_Blocks=True,
return_Data=False,
return_BlockLight=False,
return_SkyLight=False,
return_Biomes=False):
"""
High performance method for accessing multiple blocks and their lighting info.
Parameters are identical to getBlocks
:type x: numpy.ndarray
:type y: numpy.ndarray
:type z: numpy.ndarray
"""
Blocks = Data = BlockLight = SkyLight = Biomes = None
if return_Blocks:
Blocks = numpy.zeros(x.shape, 'uint16')
if return_Data:
Data = numpy.zeros(x.shape, 'uint8')
if return_BlockLight:
BlockLight = numpy.zeros(x.shape, 'uint8')
if return_SkyLight:
SkyLight = numpy.zeros(x.shape, 'uint8')
if return_Biomes:
Biomes = numpy.zeros(x.shape, 'uint8')
if hasattr(chunk, 'Biomes'):
Biomes[:] = chunk.Biomes[x, z]
result = GetBlocksResult(Blocks, Data, BlockLight, SkyLight, Biomes)
if hasattr(chunk, 'Biomes') and return_Biomes:
result.Biomes[:] = chunk.Biomes[x, z]
for cy in chunk.sectionPositions():
section = chunk.getSection(cy)
if section is None:
continue
sectionMask = (y >> 4) == cy
if not sectionMask.any():
continue
sx = x[sectionMask]
sy = y[sectionMask]
sz = z[sectionMask]
sx &= 0xf
sy &= 0xf
sz &= 0xf
arrays = getSectionBlocks(section, sx, sy, sz,
return_Blocks,
return_Data,
return_BlockLight,
return_SkyLight)
for dest, src in zip(result, arrays):
if dest is not None:
dest[sectionMask] = src
return result
def getSectionBlocks(section, x, y, z,
return_Blocks=True,
return_Data=False,
return_BlockLight=False,
return_SkyLight=False,
):
"""
Return the blocks at the given positions. Returns a list of one or more arrays depending on which `return_*`
parameters were passed.
x, y, z must be in the range 0..15
"""
return_arrays = []
if return_Blocks and hasattr(section, 'Blocks'):
return_arrays.append(section.Blocks[y, z, x])
else:
return_arrays.append(None)
if return_Data and hasattr(section, 'Data'):
return_arrays.append(section.Data[y, z, x])
else:
return_arrays.append(None)
if return_BlockLight and hasattr(section, 'BlockLight'):
return_arrays.append(section.BlockLight[y, z, x])
else:
return_arrays.append(None)
if return_SkyLight and hasattr(section, 'SkyLight'):
return_arrays.append(section.SkyLight[y, z, x])
else:
return_arrays.append(None)
return return_arrays
def maskArray(array, mask):
if array is None:
return None
if array.shape == (1,):
return array
else:
return array[mask]
def atleast_3d(ary):
"""
numpy.atleast_3d adds axes on either side of a 1d array's axis, but I want the new axes to come afterward.
:param ary:
:type ary:
:return:
:rtype:
"""
ary = numpy.asanyarray(ary)
if len(ary.shape) == 0:
result = ary.reshape(1, 1, 1)
elif len(ary.shape) == 1:
result = ary[:, None, None]
elif len(ary.shape) == 2:
result = ary[:,:, None]
else:
result = ary
return result
def setBlocks(dimension, x, y, z,
Blocks=None,
Data=None, # Deprecate soon
BlockLight=None,
SkyLight=None,
Biomes=None,
updateLights=True):
"""
Change the blocks at the given positions. All parameters must be arrays of the same shape, or single values.
:type dimension: mceditlib.worldeditor.WorldEditorDimension
"""
x = atleast_3d(x)
y = atleast_3d(y)
z = atleast_3d(z)
if Blocks is not None:
if isinstance(Blocks, (BlockType, basestring)):
Blocks = [Blocks]
_Blocks = []
_Data = []
if isinstance(Blocks, int):
Blocks = [Blocks]
for block in Blocks:
if isinstance(block, basestring):
block = dimension.blocktypes[block]
if isinstance(block, BlockType):
_Blocks.append(block.ID)
_Data.append(block.meta)
else:
_Blocks.append(block)
if len(_Blocks):
Blocks = _Blocks
if len(_Data):
Data = _Data
if Blocks is None and Data is None:
updateLights = False
Blocks = atleast_3d(Blocks) if Blocks is not None else None
Data = atleast_3d(Data) if Data is not None else None
BlockLight = atleast_3d(BlockLight) if BlockLight is not None else None
SkyLight = atleast_3d(SkyLight) if SkyLight is not None else None
Biomes = atleast_3d(Biomes) if Biomes is not None else None
arrays_to_broadcast = [x, y, z]
if Blocks is not None:
arrays_to_broadcast.append(Blocks)
if Data is not None:
arrays_to_broadcast.append(Data)
if BlockLight is not None:
arrays_to_broadcast.append(BlockLight)
if SkyLight is not None:
arrays_to_broadcast.append(SkyLight)
if Biomes is not None:
arrays_to_broadcast.append(Biomes)
if any(a.size == 0 for a in (x, y, z)):
return
broadcasted_arrays = numpy.broadcast_arrays(*arrays_to_broadcast)
x, y, z = broadcasted_arrays[:3]
broadcasted_arrays = broadcasted_arrays[3:]
if Blocks is not None:
Blocks, broadcasted_arrays = broadcasted_arrays[0], broadcasted_arrays[1:]
if Data is not None:
Data, broadcasted_arrays = broadcasted_arrays[0], broadcasted_arrays[1:]
if BlockLight is not None:
BlockLight, broadcasted_arrays = broadcasted_arrays[0], broadcasted_arrays[1:]
if SkyLight is not None:
SkyLight, broadcasted_arrays = broadcasted_arrays[0], broadcasted_arrays[1:]
if Biomes is not None:
Biomes, broadcasted_arrays = broadcasted_arrays[0], broadcasted_arrays[1:]
for cx, cz, sx, sy, sz, mask in coords_by_chunk(x, y, z):
chunk = dimension.getChunk(cx, cz, create=True)
if chunk is None:
continue
setChunkBlocks(chunk, sx, sy, sz,
maskArray(Blocks, mask),
maskArray(Data, mask),
maskArray(BlockLight, mask),
maskArray(SkyLight, mask),
maskArray(Biomes, mask))
chunk.dirty = True
if updateLights:
relight.updateLightsByCoord(dimension, x, y, z)
def setChunkBlocks(chunk, x, y, z,
Blocks=None,
Data=None,
BlockLight=None,
SkyLight=None,
Biomes=None,
):
"""
Change the blocks at the given positions. All parameters must be arrays of the same shape, or single values.
Chunk must have a `world` attribute and `getSection` function.
"""
for cy in chunk.sectionPositions():
section = chunk.getSection(cy)
if section is None:
continue
sectionMask = (y >> 4 == cy)
sx = x[sectionMask]
if not len(sx):
continue
sy = y[sectionMask]
sz = z[sectionMask]
sx &= 0xf
sy &= 0xf
sz &= 0xf
setSectionBlocks(section, sx, sy, sz,
maskArray(Blocks, sectionMask),
maskArray(Data, sectionMask),
maskArray(BlockLight, sectionMask),
maskArray(SkyLight, sectionMask))
if Biomes is not None and hasattr(chunk, 'Biomes'):
chunk.Biomes[x & 0xf, z & 0xf] = Biomes
def setSectionBlocks(section, x, y, z,
Blocks=None,
Data=None,
BlockLight=None,
SkyLight=None,
):
"""
Change the blocks at the given positions. All parameters must be arrays of the same shape, or single | |
<filename>experiment.py
import json
import numpy as np
class RealVectorDPMechanism:
def execute(self, query_output: np.ndarray, epsilon: float) -> np.ndarray:
raise NotImplementedError()
@staticmethod
def infer_sensitivity_from_data(input_data_points: np.ndarray) -> float:
"""
We will compute sensitivity from the dimensionality of the data
It is simply the second dimension - min value is 0, max is 1, so sensitivity = (max-min) * n-dims
:param input_data_points: 2-dim array
:return: sensitivity
"""
sensitivity = input_data_points.shape[1]
print("Sensitivity\n", sensitivity)
return sensitivity
@property
def name(self) -> str:
return self.__class__.__name__
class NoPrivacyMechanism(RealVectorDPMechanism):
"""
Simply copy input to output without any privatization; this must lead to infinite privacy loss
"""
def execute(self, query_output: np.ndarray, epsilon: float) -> np.ndarray:
return query_output
class CompletelyRandomMechanism(RealVectorDPMechanism):
"""
Randomly returns every single value with 0.5 probability regardless of the input
"""
def execute(self, query_output: np.ndarray, epsilon: float) -> np.ndarray:
return np.random.rand(*query_output.shape).round().astype(int)
class LaplaceMechanism(RealVectorDPMechanism):
@staticmethod
def laplace_inv_cdf_correct(no_samples: int) -> np.ndarray:
# print("no_samples:", no_samples)
_x = np.random.uniform(0, 1, no_samples)
return - np.sign(_x - 0.5) * np.log(1 - 2 * np.abs(_x - 0.5))
def execute(self, input_data_points: np.ndarray, epsilon: float) -> np.ndarray:
# assert 2-dim array: list of n-dimensional data points
assert input_data_points.ndim == 2
sensitivity = self.infer_sensitivity_from_data(input_data_points)
# we will sample a 1-d vector of list_size x dimensionality, will be "reshaped" back
total_number_of_samples_required = input_data_points.size
# print(total_number_of_samples_required)
# draw zero-mean samples using inverse CDF and reshape to match input data
zero_mean_samples = self.laplace_inv_cdf_correct(total_number_of_samples_required).reshape(
input_data_points.shape
)
# print(zero_mean_samples)
# scale b = sensitivity over epsilon
b = sensitivity / epsilon
# rescale and add; we'll utilize broadcasting
return input_data_points + b * zero_mean_samples
class LaplaceMechanismWrongSensitivity(LaplaceMechanism):
def execute(self, input_data_points: np.ndarray, epsilon: float) -> np.ndarray:
sensitivity = super().execute(input_data_points, epsilon)
# make it 10-times smaller
return 0.5 * sensitivity
class LaplaceMechanismADePT(LaplaceMechanism):
def execute(self, input_data_points: np.ndarray, epsilon: float) -> np.ndarray:
# assert 2-dim array: list of n-dimensional data points
assert input_data_points.ndim == 2
# ADePT's sensitivity was 2C -- from -1, to +1; so it's 1
sensitivity = 1
# we will sample a 1-d vector of list_size x dimensionality, will be "reshaped" back
total_number_of_samples_required = input_data_points.size
# print(total_number_of_samples_required)
# draw zero-mean samples using inverse CDF and reshape to match input data
zero_mean_samples = self.laplace_inv_cdf_correct(total_number_of_samples_required).reshape(
input_data_points.shape
)
# print(zero_mean_samples)
# scale b = sensitivity over epsilon
b = sensitivity / epsilon
# rescale and add; we'll utilize broadcasting
return input_data_points + b * zero_mean_samples
class BeigiEtAlLaplaceMechanism(RealVectorDPMechanism):
@staticmethod
def laplace_inv_cdf_dptext(no_samples: int) -> np.ndarray:
_x = np.random.uniform(0, 1, no_samples)
result = - np.sign(_x) * np.log(1 - 2 * np.abs(_x))
# print("with nan\n", result)
# but we need to fix NaNs -- replace with zero
np.nan_to_num(result, False, nan=0.0)
# print("after nan_to_0\n", result)
return result
def execute(self, input_data_points: np.ndarray, epsilon: float) -> np.ndarray:
# we will sample a 1-d vector of list_size x dimensionality, will be "reshaped" back
total_number_of_samples_required = input_data_points.size
# draw zero-mean samples using inverse CDF and reshape to match input data
zero_mean_samples = self.laplace_inv_cdf_dptext(total_number_of_samples_required).reshape(
input_data_points.shape
)
# We will to compute sensitivity from the dimensionality of the data
# It is simply the second dimension - min value is 0, max is 1, so sensitivity = (max-min) * n-dims
sensitivity = self.infer_sensitivity_from_data(input_data_points)
# scale b = sensitivity over epsilon
b = sensitivity / epsilon
# rescale and add
return input_data_points + b * zero_mean_samples
class Experiment:
@staticmethod
def privatize_data_points(input_data_points: np.ndarray, epsilon: float,
mechanism: RealVectorDPMechanism) -> np.ndarray:
# we have two dimensions: 0-axis are individual data points; 1-axis each data point secret values vector
assert input_data_points.ndim == 2
# print(input_data_points.shape)
print("Input data points\n", input_data_points)
privatized = mechanism.execute(input_data_points, epsilon)
print("Privatized\n", privatized)
# double check output from DP mechanism
assert input_data_points.shape == privatized.shape
# round and truncate
truncated = np.where(privatized < 0.5, 0, 1)
print("Truncated\n", truncated)
# assert all values are either ones or zeros
assert np.min(truncated) >= 0
assert np.max(truncated) <= 1
return truncated
@staticmethod
def compute_loss_from_frequency_matrix(frequency_matrix: np.ndarray) -> float:
print(frequency_matrix)
# for all Y, estimate Pr(D | Y) / Pr(D' | Y)
marginal_sum_y0 = np.sum(frequency_matrix[:, 0])
marginal_sum_y1 = np.sum(frequency_matrix[:, 1])
cond_d_given_y0 = frequency_matrix[0, 0] / marginal_sum_y0 # this is also precision for class D
cond_d_prime_given_y0 = frequency_matrix[1, 0] / marginal_sum_y0 # this is also precision for class D'
print("Some cond probs", cond_d_given_y0, cond_d_prime_given_y0)
if cond_d_prime_given_y0 == 0:
# 100% precision of reconstruction = infinity privacy loss
privacy_loss_y0 = np.infty
else:
privacy_loss_y0 = np.max([np.log(cond_d_given_y0) - np.log(cond_d_prime_given_y0),
(np.log(cond_d_prime_given_y0) - np.log(cond_d_given_y0))])
print("privacy loss Y0", privacy_loss_y0)
cond_d_given_y1 = frequency_matrix[0, 1] / marginal_sum_y1
cond_d_prime_given_y1 = frequency_matrix[1, 1] / marginal_sum_y1
if cond_d_given_y1 == 0:
# 100% precision of reconstruction = infinity privacy loss
privacy_loss_y1 = np.infty
else:
privacy_loss_y1 = np.max([np.log(cond_d_given_y1) - np.log(cond_d_prime_given_y1),
(np.log(cond_d_prime_given_y1) - np.log(cond_d_given_y1))])
print("privacy loss Y1", privacy_loss_y1)
empirical_loss = np.max([privacy_loss_y0, privacy_loss_y1])
print("Estimated empirical loss", empirical_loss)
return empirical_loss
@staticmethod
def estimate_empirical_loss(dimensionality, epsilon, mechanism: RealVectorDPMechanism,
number_of_repeats=10_000_000) -> float:
"""
:param dimensionality:
:param epsilon:
:param mechanism:
:param number_of_repeats: instead of repeating N times with the same instance, we create
them at once to utilize numpy vectorization
:return:
"""
frequency_matrix = np.zeros(shape=(2, 2), dtype=int)
actual_number_of_data_points = number_of_repeats
repetitions = 1
# by default, 10M samples is great, but for bigger dimensions, it will fails on memory
if dimensionality > 16:
repetitions = 10
actual_number_of_data_points = 1_000_000
for _ in range(repetitions):
# create D and D' (where D = 0; and D' = 1)
d_data_points = np.zeros(shape=(actual_number_of_data_points, dimensionality))
dp_d_output = Experiment.privatize_data_points(d_data_points, epsilon, mechanism)
# turn DP output vector (mixed 0 and 1) into either all zeros or all ones -- reconstruct the original vector
reconstructed_d = Experiment.reconstruct_original_vector(dp_d_output)
# count how many correct zeros and ones were reconstructed
frequency_matrix[0, 0] = np.sum(np.where(reconstructed_d == 0, 1, 0))
frequency_matrix[0, 1] = np.sum(np.where(reconstructed_d == 1, 1, 0))
d_prime_data_points = np.ones(shape=(actual_number_of_data_points, dimensionality))
dp_d_prime_output = Experiment.privatize_data_points(d_prime_data_points, epsilon, mechanism)
reconstructed_d_prime = Experiment.reconstruct_original_vector(dp_d_prime_output)
frequency_matrix[1, 0] = np.sum(np.where(reconstructed_d_prime == 0, 1, 0))
frequency_matrix[1, 1] = np.sum(np.where(reconstructed_d_prime == 1, 1, 0))
return Experiment.compute_loss_from_frequency_matrix(frequency_matrix)
@staticmethod
def save_results(results_dict: dict) -> None:
with open('results.json', 'w') as f:
json.dump(results_dict, f, indent=4)
f.flush()
f.close()
@staticmethod
def main():
np.random.seed(1234)
# collecting results into JSON
results_dict = {}
for mechanism in (LaplaceMechanismADePT(), LaplaceMechanismWrongSensitivity(),
LaplaceMechanism(), NoPrivacyMechanism(),
CompletelyRandomMechanism(),
BeigiEtAlLaplaceMechanism(),):
for epsilon in (0.1, 0.2, 0.5, 1.0, 2.0, 5.0, 10.0):
for dimensionality in (1, 2, 8, 32, 64, 128):
empirical_loss = Experiment.estimate_empirical_loss(dimensionality, epsilon, mechanism)
# update dict
if mechanism.name not in results_dict:
results_dict[mechanism.name] = dict()
if epsilon not in results_dict[mechanism.name]:
results_dict[mechanism.name][epsilon] = dict()
results_dict[mechanism.name][epsilon][dimensionality] = empirical_loss
# save results
Experiment.save_results(results_dict)
@staticmethod
def main_dptext_test():
np.random.seed(1234)
# collecting results into JSON
results_dict = {}
for mechanism in (BeigiEtAlLaplaceMechanism(),):
for epsilon in (1.0,):
for dimensionality in (1,):
empirical_loss = Experiment.estimate_empirical_loss(dimensionality, epsilon, mechanism,
number_of_repeats=20)
# update dict
if mechanism.name not in results_dict:
results_dict[mechanism.name] = dict()
if epsilon not in results_dict[mechanism.name]:
results_dict[mechanism.name][epsilon] = dict()
results_dict[mechanism.name][epsilon][dimensionality] = empirical_loss
# save results
Experiment.save_results(results_dict)
@staticmethod
def reconstruct_original_vector(privatized_vectors: np.ndarray) -> np.ndarray:
# Now for each data point we use simple "majority voting" to turn a vector of (0, 0, 1, 0, 1) into
# (0, 0, 0, 0, 0) and vice versa; we do it by computing mean and rounding
# compute mean along the 2-nd axis (which will be somewhere between 0 and 1)
mean_value_for_each_data_point = np.mean(privatized_vectors, axis=1)
print("Mean value\n", mean_value_for_each_data_point)
# round the mean value
reconstructed_value_for_each_data_point = np.round(mean_value_for_each_data_point).astype(int)
# this is now a 1-D vector - size of all data points; we need to extend it back to 2-D by repeating the
# reconstructed value for all columns, e.g.
# [0, 1, 1] ->
# [0, 0, 0, 0, 0]
# [1, 1, 1, 1, 1]
# [1, 1, 1, 1, 1]
# (if the input dimensionality were 5)
assert reconstructed_value_for_each_data_point.ndim == 1
# make it a column array
expanded = np.expand_dims(reconstructed_value_for_each_data_point, axis=1)
print("expanded", expanded)
# and copy each value to all columns
result = np.tile(expanded, (1, privatized_vectors.shape[1]))
print(result)
assert result.shape == privatized_vectors.shape
print("result\n", result)
return result
@staticmethod
def estimate_errors():
# collecting results
empirical_losses = []
for i in range(100):
for mechanism in (LaplaceMechanismADePT(),):
for epsilon in (0.1,):
for dimensionality in (2,):
np.random.seed(i)
empirical_loss = Experiment.estimate_empirical_loss(dimensionality, epsilon, mechanism)
empirical_losses.append(empirical_loss)
print("Empirical losses")
# print(empirical_losses)
print(np.mean(empirical_losses))
print(np.std(empirical_losses))
| |
= '\n [\n' + '\n'.join(aList) + '\n ]'
# The following doesn't work well.
# val = g.objToString(aList, indent=' '*4)
else:
val = '<%s non-comment lines>' % len(aList)
elif isinstance(val, str) and val.startswith('<?xml'):
val = '<xml>'
key2 = '@%-6s %s' % (gs.kind, key)
yield key2, val, c, letter
#@+node:ekr.20171115062202.1: *3* gcm.valueInMyLeoSettings
def valueInMyLeoSettings(self, settingName):
"""Return the value of the setting, if any, in myLeoSettings.leo."""
lm = g.app.loadManager
d = lm.globalSettingsDict.d
gs = d.get(self.munge(settingName))
# A GeneralSetting object.
if gs:
path = gs.path
if path.find('myLeoSettings.leo') > -1:
return gs.val
return None
#@-others
#@+node:ekr.20041118104831.1: ** class LocalConfigManager
class LocalConfigManager:
"""A class to hold config settings for commanders."""
#@+others
#@+node:ekr.20120215072959.12472: *3* c.config.Birth
#@+node:ekr.20041118104831.2: *4* c.config.ctor
def __init__(self, c, previousSettings=None):
self.c = c
lm = g.app.loadManager
#
# c.__init__ and helpers set the shortcuts and settings dicts for local files.
if previousSettings:
self.settingsDict = previousSettings.settingsDict
self.shortcutsDict = previousSettings.shortcutsDict
assert isinstance(self.settingsDict, g.TypedDict), repr(self.settingsDict)
assert isinstance(self.shortcutsDict,g.TypedDict), repr(self.shortcutsDict) # was TypedDictOfLists.
else:
self.settingsDict = d1 = lm.globalSettingsDict
self.shortcutsDict = d2 = lm.globalBindingsDict
assert d1 is None or isinstance(d1, g.TypedDict), repr(d1)
assert d2 is None or isinstance(d2, g.TypedDict), repr(d2) # was TypedDictOfLists.
# Define these explicitly to eliminate a pylint warning.
if 0:
# No longer needed now that c.config.initIvar always sets
# both c and c.config ivars.
self.default_derived_file_encoding = g.app.config.default_derived_file_encoding
self.redirect_execute_script_output_to_log_pane = \
g.app.config.redirect_execute_script_output_to_log_pane
self.defaultBodyFontSize = g.app.config.defaultBodyFontSize
self.defaultLogFontSize = g.app.config.defaultLogFontSize
self.defaultMenuFontSize = g.app.config.defaultMenuFontSize
self.defaultTreeFontSize = g.app.config.defaultTreeFontSize
for key in sorted(list(g.app.config.encodingIvarsDict.keys())):
self.initEncoding(key)
for key in sorted(list(g.app.config.ivarsDict.keys())):
self.initIvar(key)
#@+node:ekr.20041118104414: *4* c.config.initEncoding
def initEncoding(self, key):
# Important: the key is munged.
gs = g.app.config.encodingIvarsDict.get(key)
encodingName = gs.ivar
encoding = self.get(encodingName, kind='string')
# Use the global setting as a last resort.
if encoding:
setattr(self, encodingName, encoding)
else:
encoding = getattr(g.app.config, encodingName)
setattr(self, encodingName, encoding)
if encoding and not g.isValidEncoding(encoding):
g.es('bad', f"{encodingName}: {encoding}")
#@+node:ekr.20041118104240: *4* c.config.initIvar
def initIvar(self, key):
c = self.c
# Important: the key is munged.
gs = g.app.config.ivarsDict.get(key)
ivarName = gs.ivar
val = self.get(ivarName, kind=None)
if val or not hasattr(self, ivarName):
# Set *both* the commander ivar and the c.config ivar.
setattr(self, ivarName, val)
setattr(c, ivarName, val)
#@+node:ekr.20190831030206.1: *3* c.config.createActivesSettingsOutline (new: #852)
def createActivesSettingsOutline(self):
"""
Create and open an outline, summarizing all presently active settings.
The outline retains the organization of all active settings files.
See #852: https://github.com/leo-editor/leo-editor/issues/852
"""
ActiveSettingsOutline(self.c)
#@+node:ekr.20190901181116.1: *3* c.config.getSource (new)
def getSource(self, setting):
"""
Return a string representing the source file of the given setting,
one of ("local_file", "theme_file", "myLeoSettings", "leoSettings", "ignore", "error")
"""
trace = False
if not isinstance(setting, g.GeneralSetting):
return "error"
try:
path = setting.path
except Exception:
return "error"
val = g.truncate(repr(setting.val), 50)
if not path:
# g.trace('NO PATH', setting.kind, val)
return "local_file"
path = path.lower()
for tag in ('myLeoSettings.leo', 'leoSettings.leo'):
if path.endswith(tag.lower()):
if setting.kind == 'color':
if trace: g.trace('FOUND:', tag.rstrip('.leo'), setting.kind, setting.ivar, val)
return tag.rstrip('.leo')
theme_path = g.app.loadManager.theme_path
if theme_path and g.shortFileName(theme_path.lower()) in path:
if trace: g.trace('FOUND:', "theme_file", setting.kind, setting.ivar, val)
return "theme_file"
# g.trace('NOT FOUND', repr(theme_path), repr(path))
if path == 'register-command' or path.find('mode') > -1:
return 'ignore'
return "local_file"
#@+node:ekr.20120215072959.12471: *3* c.config.Getters
#@+node:ekr.20041123092357: *4* c.config.findSettingsPosition & helper
# This was not used prior to Leo 4.5.
def findSettingsPosition(self, setting):
"""Return the position for the setting in the @settings tree for c."""
munge = g.app.config.munge
# c = self.c
root = self.settingsRoot()
if not root:
return None
setting = munge(setting)
for p in root.subtree():
#BJ munge will return None if a headstring is empty
h = munge(p.h) or ''
if h.startswith(setting):
return p.copy()
return None
#@+node:ekr.20041120074536: *5* c.config.settingsRoot
def settingsRoot(self, theme=False):
"""Return the position of the @settings tree."""
c = self.c
for p in c.all_unique_positions():
if p.h.rstrip() == "@settings":
if not theme:
return p.copy()
# Look for an inner @theme node
for p2 in p.subtree():
if g.match_word(p2.h, 0, '@theme'):
return p2.copy()
return None
#@+node:ekr.20120215072959.12515: *4* c.config.Getters
#@@nocolor-node
#@+at Only the following need to be defined.
#
# get (self,setting,theType)
# getAbbrevDict (self)
# getBool (self,setting,default=None)
# getButtons (self)
# getColor (self,setting)
# getData (self,setting)
# getDirectory (self,setting)
# getFloat (self,setting)
# getFontFromParams (self,family,size,slant,weight,defaultSize=12)
# getInt (self,setting)
# getLanguage (self,setting)
# getMenusList (self)
# getOutlineData (self)
# getOpenWith (self)
# getRatio (self,setting)
# getShortcut (self,commandName)
# getString (self,setting)
#@+node:ekr.20120215072959.12519: *5* c.config.get & allies
def get(self, setting, kind):
"""Get the setting and make sure its type matches the expected type."""
d = self.settingsDict
if d:
assert isinstance(d, g.TypedDict), repr(d)
val, junk = self.getValFromDict(d, setting, kind)
return val
return None
#@+node:ekr.20120215072959.12520: *6* getValFromDict
def getValFromDict(self, d, setting, requestedType, warn=True):
"""
Look up the setting in d. If warn is True, warn if the requested type
does not (loosely) match the actual type.
returns (val,exists)
"""
gs = d.get(g.app.config.munge(setting))
if not gs: return None, False
assert isinstance(gs, g.GeneralSetting), repr(gs)
val = gs.val
isNone = val in ('None', 'none', '')
if not self.typesMatch(gs.kind, requestedType):
# New in 4.4: make sure the types match.
# A serious warning: one setting may have destroyed another!
# Important: this is not a complete test of conflicting settings:
# The warning is given only if the code tries to access the setting.
if warn:
g.error('warning: ignoring', gs.kind, '', setting, 'is not', requestedType)
g.error('there may be conflicting settings!')
return None, False
if isNone:
return '', True
# 2011/10/24: Exists, a *user-defined* empty value.
return val, True
#@+node:ekr.20120215072959.12521: *6* typesMatch
def typesMatch(self, type1, type2):
"""
Return True if type1, the actual type, matches type2, the requeseted type.
The following equivalences are allowed:
- None matches anything.
- An actual type of string or strings matches anything *except* shortcuts.
- Shortcut matches shortcuts.
"""
# The shortcuts logic no longer uses the get/set code.
shortcuts = ('shortcut', 'shortcuts',)
if type1 in shortcuts or type2 in shortcuts:
g.trace('oops: type in shortcuts')
return (
type1 is None or type2 is None or
type1.startswith('string') and type2 not in shortcuts or
type1 == 'int' and type2 == 'size' or
(type1 in shortcuts and type2 in shortcuts) or
type1 == type2
)
#@+node:ekr.20120215072959.12522: *5* c.config.getAbbrevDict
def getAbbrevDict(self):
"""Search all dictionaries for the setting & check it's type"""
d = self.get('abbrev', 'abbrev')
return d or {}
#@+node:ekr.20120215072959.12523: *5* c.config.getBool
def getBool(self, setting, default=None):
"""Return the value of @bool setting, or the default if the setting is not found."""
val = self.get(setting, "bool")
if val in (True, False):
return val
return default
#@+node:ekr.20120215072959.12525: *5* c.config.getColor
def getColor(self, setting):
"""Return the value of @color setting."""
col = self.get(setting, "color")
while col and col.startswith('@'):
col = self.get(col[1:], "color")
return col
#@+node:ekr.20120215072959.12527: *5* c.config.getData
def getData(self, setting, strip_comments=True, strip_data=True):
"""Return a list of non-comment strings in the body text of @data setting."""
# 904: Add local abbreviations to global settings.
append = setting == 'global-abbreviations'
if append:
data0 = g.app.config.getData(setting,
strip_comments=strip_comments,
strip_data=strip_data,
)
data = self.get(setting, "data")
# New in Leo 4.11: parser.doData strips only comments now.
# New in Leo 4.12: parser.doData strips *nothing*.
if isinstance(data, str):
data = [data]
if data and strip_comments:
data = [z for z in data if not z.strip().startswith('#')]
if data and strip_data:
data = [z.strip() for z in data if z.strip()]
if append and data != data0:
if data:
data.extend(data0)
else:
data = data0
return data
#@+node:ekr.20131114051702.16542: *5* c.config.getOutlineData
def getOutlineData(self, setting):
"""Return the pastable (xml) text of the entire @outline-data tree."""
data = self.get(setting, "outlinedata")
if setting == 'tree-abbreviations':
# 904: Append local tree abbreviations to the global abbreviations.
data0 = g.app.config.getOutlineData(setting)
if data and data0 and data != data0:
assert isinstance(data0, str)
assert isinstance(data, str)
# We can't merge the data here: they are .leo files!
# abbrev.init_tree_abbrev_helper does the merge.
data = [data0, data]
return data
#@+node:ekr.20120215072959.12528: *5* c.config.getDirectory
def getDirectory(self, setting):
"""Return the value of @directory setting, or None if the directory does not exist."""
# Fix https://bugs.launchpad.net/leo-editor/+bug/1173763
theDir = self.get(setting, 'directory')
if g.os_path_exists(theDir) and g.os_path_isdir(theDir):
return theDir
return None
#@+node:ekr.20120215072959.12530: *5* c.config.getFloat
def getFloat(self, setting):
"""Return the value of @float setting."""
| |
x_,
),
cons59,
cons60,
cons61,
cons62,
cons4,
cons5,
cons57,
cons149,
)
rule753 = ReplacementRule(pattern753, replacement753)
pattern754 = Pattern(
Integral(
(x_ * WC("c", S(1))) ** WC("m", S(1))
* (a1_ + x_ ** n_ * WC("b1", S(1))) ** p_
* (a2_ + x_ ** n_ * WC("b2", S(1))) ** p_,
x_,
),
cons59,
cons60,
cons61,
cons62,
cons8,
cons19,
cons4,
cons5,
cons57,
cons496,
)
rule754 = ReplacementRule(pattern754, replacement754)
pattern755 = Pattern(
Integral(
(x_ * WC("c", S(1))) ** WC("m", S(1)) * (x_ ** n_ * WC("b", S(1))) ** p_, x_
),
cons3,
cons8,
cons19,
cons4,
cons5,
cons501,
cons502,
)
rule755 = ReplacementRule(pattern755, replacement755)
pattern756 = Pattern(
Integral(
(x_ * WC("c", S(1))) ** WC("m", S(1))
* (x_ ** WC("n", S(1)) * WC("b", S(1))) ** p_,
x_,
),
cons3,
cons8,
cons19,
cons4,
cons5,
cons501,
cons503,
)
rule756 = ReplacementRule(pattern756, replacement756)
pattern757 = Pattern(
Integral((c_ * x_) ** m_ * (x_ ** WC("n", S(1)) * WC("b", S(1))) ** p_, x_),
cons3,
cons8,
cons19,
cons4,
cons5,
cons21,
)
rule757 = ReplacementRule(pattern757, replacement757)
pattern758 = Pattern(
Integral(x_ ** WC("m", S(1)) * (a_ + x_ ** n_ * WC("b", S(1))) ** p_, x_),
cons2,
cons3,
cons19,
cons4,
cons40,
cons504,
)
rule758 = ReplacementRule(pattern758, replacement758)
pattern759 = Pattern(
Integral(
(x_ * WC("c", S(1))) ** WC("m", S(1))
* (a_ + x_ ** n_ * WC("b", S(1))) ** p_,
x_,
),
cons2,
cons3,
cons8,
cons19,
cons4,
cons5,
cons505,
cons68,
)
rule759 = ReplacementRule(pattern759, replacement759)
pattern760 = Pattern(
Integral(
(x_ * WC("c", S(1))) ** WC("m", S(1))
* (a1_ + x_ ** n_ * WC("b1", S(1))) ** p_
* (a2_ + x_ ** n_ * WC("b2", S(1))) ** p_,
x_,
),
cons59,
cons60,
cons61,
cons62,
cons8,
cons19,
cons4,
cons5,
cons57,
cons506,
cons68,
)
rule760 = ReplacementRule(pattern760, replacement760)
pattern761 = Pattern(
Integral(
x_ ** WC("m", S(1)) * (x_ ** n_ * WC("b", S(1)) + WC("a", S(0))) ** p_, x_
),
cons2,
cons3,
cons19,
cons4,
cons5,
cons502,
)
rule761 = ReplacementRule(pattern761, replacement761)
pattern762 = Pattern(
Integral(
x_ ** WC("m", S(1))
* (a1_ + x_ ** n_ * WC("b1", S(1))) ** p_
* (a2_ + x_ ** n_ * WC("b2", S(1))) ** p_,
x_,
),
cons59,
cons60,
cons61,
cons62,
cons19,
cons4,
cons5,
cons57,
cons507,
)
rule762 = ReplacementRule(pattern762, replacement762)
pattern763 = Pattern(
Integral((c_ * x_) ** m_ * (a_ + x_ ** n_ * WC("b", S(1))) ** p_, x_),
cons2,
cons3,
cons8,
cons19,
cons4,
cons5,
cons502,
)
rule763 = ReplacementRule(pattern763, replacement763)
pattern764 = Pattern(
Integral(
(c_ * x_) ** m_
* (a1_ + x_ ** n_ * WC("b1", S(1))) ** p_
* (a2_ + x_ ** n_ * WC("b2", S(1))) ** p_,
x_,
),
cons59,
cons60,
cons61,
cons62,
cons8,
cons19,
cons4,
cons5,
cons57,
cons507,
)
rule764 = ReplacementRule(pattern764, replacement764)
pattern765 = Pattern(
Integral(
(x_ * WC("c", S(1))) ** WC("m", S(1))
* (a_ + x_ ** n_ * WC("b", S(1))) ** WC("p", S(1)),
x_,
),
cons2,
cons3,
cons8,
cons19,
cons4,
cons130,
)
rule765 = ReplacementRule(pattern765, replacement765)
pattern766 = Pattern(
Integral(x_ ** m_ * (a_ + x_ ** n_ * WC("b", S(1))) ** p_, x_),
cons2,
cons3,
cons19,
cons4,
cons5,
cons508,
cons68,
)
rule766 = ReplacementRule(pattern766, replacement766)
pattern767 = Pattern(
Integral(
x_ ** m_
* (a1_ + x_ ** n_ * WC("b1", S(1))) ** p_
* (a2_ + x_ ** n_ * WC("b2", S(1))) ** p_,
x_,
),
cons59,
cons60,
cons61,
cons62,
cons19,
cons4,
cons5,
cons57,
cons509,
cons68,
)
rule767 = ReplacementRule(pattern767, replacement767)
pattern768 = Pattern(
Integral(
(x_ * WC("c", S(1))) ** WC("m", S(1))
* (a_ + x_ ** n_ * WC("b", S(1))) ** p_,
x_,
),
cons2,
cons3,
cons8,
cons19,
cons4,
cons5,
cons508,
cons56,
)
rule768 = ReplacementRule(pattern768, replacement768)
pattern769 = Pattern(
Integral(
(x_ * WC("c", S(1))) ** WC("m", S(1))
* (a1_ + x_ ** n_ * WC("b1", S(1))) ** p_
* (a2_ + x_ ** n_ * WC("b2", S(1))) ** p_,
x_,
),
cons59,
cons60,
cons61,
cons62,
cons8,
cons19,
cons4,
cons5,
cons57,
cons509,
cons56,
)
rule769 = ReplacementRule(pattern769, replacement769)
pattern770 = Pattern(
Integral(x_ ** WC("m", S(1)) * (a_ + x_ ** n_ * WC("b", S(1))) ** p_, x_),
cons2,
cons3,
cons5,
cons150,
cons20,
CustomConstraint(With770),
)
rule770 = ReplacementRule(pattern770, replacement770)
pattern771 = Pattern(
Integral(
x_ ** WC("m", S(1))
* (a1_ + x_ ** n_ * WC("b1", S(1))) ** p_
* (a2_ + x_ ** n_ * WC("b2", S(1))) ** p_,
x_,
),
cons59,
cons60,
cons61,
cons62,
cons5,
cons57,
cons497,
cons20,
CustomConstraint(With771),
)
rule771 = ReplacementRule(pattern771, replacement771)
pattern772 = Pattern(
Integral(
(x_ * WC("c", S(1))) ** WC("m", S(1))
* (a_ + x_ ** n_ * WC("b", S(1))) ** p_,
x_,
),
cons2,
cons3,
cons8,
cons150,
cons246,
cons165,
cons96,
cons510,
cons511,
)
rule772 = ReplacementRule(pattern772, replacement772)
pattern773 = Pattern(
Integral(
(x_ * WC("c", S(1))) ** WC("m", S(1))
* (a1_ + x_ ** n_ * WC("b1", S(1))) ** p_
* (a2_ + x_ ** n_ * WC("b2", S(1))) ** p_,
x_,
),
cons59,
cons60,
cons61,
cons62,
cons8,
cons19,
cons57,
cons497,
cons246,
cons165,
cons512,
cons513,
)
rule773 = ReplacementRule(pattern773, replacement773)
pattern774 = Pattern(
Integral(
(x_ * WC("c", S(1))) ** WC("m", S(1))
* (a_ + x_ ** n_ * WC("b", S(1))) ** p_,
x_,
),
cons2,
cons3,
cons8,
cons19,
cons150,
cons246,
cons165,
cons514,
cons511,
)
rule774 = ReplacementRule(pattern774, replacement774)
pattern775 = Pattern(
Integral(x_ ** S(2) / (a_ + x_ ** S(4) * WC("b", S(1))) ** (S(5) / 4), x_),
cons2,
cons3,
cons468,
)
rule775 = ReplacementRule(pattern775, replacement775)
pattern776 = Pattern(
Integral(x_ ** m_ / (a_ + x_ ** S(4) * WC("b", S(1))) ** (S(5) / 4), x_),
cons2,
cons3,
cons468,
cons515,
)
rule776 = ReplacementRule(pattern776, replacement776)
pattern777 = Pattern(
Integral(x_ ** m_ / (a_ + x_ ** S(4) * WC("b", S(1))) ** (S(5) / 4), x_),
cons2,
cons3,
cons468,
cons516,
)
rule777 = ReplacementRule(pattern777, replacement777)
pattern778 = Pattern(
Integral(
sqrt(x_ * WC("c", S(1))) / (a_ + x_ ** S(2) * WC("b", S(1))) ** (S(5) / 4),
x_,
),
cons2,
cons3,
cons8,
cons468,
)
rule778 = ReplacementRule(pattern778, replacement778)
pattern779 = Pattern(
Integral(
(x_ * WC("c", S(1))) ** m_
/ (a_ + x_ ** S(2) * WC("b", S(1))) ** (S(5) / 4),
x_,
),
cons2,
cons3,
cons8,
cons468,
cons517,
cons518,
)
rule779 = ReplacementRule(pattern779, replacement779)
pattern780 = Pattern(
Integral(
(x_ * WC("c", S(1))) ** m_
/ (a_ + x_ ** S(2) * WC("b", S(1))) ** (S(5) / 4),
x_,
),
cons2,
cons3,
cons8,
cons468,
cons517,
cons96,
)
rule780 = ReplacementRule(pattern780, replacement780)
pattern781 = Pattern(
Integral(x_ ** S(2) / (a_ + x_ ** S(4) * WC("b", S(1))) ** (S(5) / 4), x_),
cons2,
cons3,
cons485,
)
rule781 = ReplacementRule(pattern781, replacement781)
pattern782 = Pattern(
Integral(
(x_ * WC("c", S(1))) ** WC("m", S(1))
* (a_ + x_ ** n_ * WC("b", S(1))) ** p_,
x_,
),
cons2,
cons3,
cons8,
cons150,
cons246,
cons139,
cons519,
cons520,
cons511,
)
rule782 = ReplacementRule(pattern782, replacement782)
pattern783 = Pattern(
Integral(
(x_ * WC("c", S(1))) ** WC("m", S(1))
* (a1_ + x_ ** n_ * WC("b1", S(1))) ** p_
* (a2_ + x_ ** n_ * WC("b2", S(1))) ** p_,
x_,
),
cons59,
cons60,
cons61,
cons62,
cons8,
cons57,
cons497,
cons246,
cons139,
cons521,
cons522,
cons513,
)
rule783 = ReplacementRule(pattern783, replacement783)
pattern784 = Pattern(
Integral(
(x_ * WC("c", S(1))) ** WC("m", S(1))
* (a_ + x_ ** n_ * WC("b", S(1))) ** p_,
x_,
),
cons2,
cons3,
cons8,
cons19,
cons150,
cons246,
cons139,
cons511,
)
rule784 = ReplacementRule(pattern784, replacement784)
pattern785 = Pattern(
Integral(
(x_ * WC("c", S(1))) ** WC("m", S(1))
* (a1_ + x_ ** n_ * WC("b1", S(1))) ** p_
* (a2_ + x_ ** n_ * WC("b2", S(1))) ** p_,
x_,
),
cons59,
cons60,
cons61,
cons62,
cons8,
cons19,
cons57,
cons497,
cons246,
cons139,
cons513,
)
rule785 = ReplacementRule(pattern785, replacement785)
pattern786 = Pattern(
Integral(x_ / (a_ + x_ ** S(3) * WC("b", S(1))), x_), cons2, cons3, cons69
)
rule786 = ReplacementRule(pattern786, replacement786)
pattern787 = Pattern(
Integral(x_ ** WC("m", S(1)) / (a_ + x_ ** n_ * WC("b", S(1))), x_),
| |
<filename>src/blc.py
#!/usr/bin/env python3
"""
A BLC parser/interpreter in python
:created: 2019-12-14 12:35:36 -0800
:copyright: (c) 2019, <NAME>
:license: MIT
"""
import sys
import os
import argparse
import tqdm
import numpy as np
import anytree
import numpy
from matplotlib import pyplot as plt
import multiprocessing
import ctypes
# Non-terminals
NT_LAMBDA = '00'
NT_APPLY = '01'
NT_COMMENT_BEGIN = '#'
NT_NEWLINE = '\n'
NT_LAMBDA_LAM = '\\'
# Compiler output
EVALUATION_HEADER = '#!/usr/bin/env blc'
# Language extensions
BLC = 'blc' # .blc files
LAM = 'lam' # .lam files
DEFAULT_LANG = BLC
# Terminals
LAMBDA = 'λ'
APPLY = 'A'
COMMENT_BEGIN = 'COMMENT_BEGIN'
NEWLINE = 'NEWLINE'
NOP = None
# Grammar
# 1^{n+1}0 = int
# 00M = (LAMBDA, M)
# 01MN = [M, N]
# Shell settings
DEFAULT_PS1 = '> '
class CycleException(Exception):
pass
def IS_VAR(two):
return two == '10' or two == '11'
def int_to_debruijn(integer):
return '1'*(integer + 1) + '0'
def lex(source_string):
i = 0
while True:
try:
two = source_string[i] + source_string[i + 1]
except IndexError:
break
if two == NT_LAMBDA:
i = i + 2
yield LAMBDA
elif two == NT_APPLY:
i = i + 2
yield APPLY
elif IS_VAR(two):
n = source_string[i + 1]
msg = "Must be either 0, 1. Got = '" + n + "'"
assert n == '0' or n == '1', msg
dbindex = 0
while n != '0':
dbindex += 1
n = source_string[i + 1 + dbindex]
i = i + 2 + dbindex
yield dbindex
elif two[0] == NT_COMMENT_BEGIN:
i = i + 1
yield COMMENT_BEGIN
elif two[0] == NT_NEWLINE:
i = i + 1
yield NEWLINE
else:
i = i + 1
yield two[0]
# assert False, "Not a valid character. {}".format(two)
def body(expr):
"""Gets the body of a parsed lambda (represented as 2-tuples)"""
assert type(expr) == tuple
return expr[1]
def end_of_first_lambda(a):
for index in range(len(a)):
c = a[index]
cn = a[index + 1]
if type(c) == int and (cn == LAMBDA or cn == APPLY):
return index + 1
def find_argument(a):
"""
start of first lambda or free variable reading right (this is the b portion
of an (a b) application).
"""
if len(a) == 3:
x, y, z = a
if x == APPLY and type(y) == int and type(z) == int:
return 2
for index in reversed(range(len(a))):
c = a[index]
cp = a[index - 1]
if c == LAMBDA and type(cp) == int:
return index
if c == APPLY and type(cp) == int:
return index
assert False, "Couldn't find a lambda or variable, malformed code."
def linecount(toklist):
return sum(1 for tok in toklist if tok == NEWLINE)
def parse(tokens):
"""
Every time you generate a statement, backtrack to see if you have completed
an application.
"""
revlist = list(reversed(tokens))
stack = []
line = linecount(revlist)
col = 1
for tok in revlist:
col += 1
try:
if type(tok) == int:
r = parse_var(tok, stack)
elif tok == APPLY:
r = parse_apply(tok, stack)
elif tok == LAMBDA:
r = parse_lambda(tok, stack)
elif tok == NEWLINE:
col = 1
line -= 1
r = NOP
else:
# Some other token that's unknown, probably a comment.
r = NOP
if r is not NOP:
stack.insert(0, r)
except IndexError:
raise SyntaxError("Invalid syntax at: Line {}, Column {}"
.format(line, col))
return stack.pop(0)
def parse_lambda(tok, context):
body = context.pop(0)
return (LAMBDA, body)
def parse_apply(tok, context):
lhs = context.pop(0)
rhs = context.pop(0)
return [lhs, rhs]
def parse_var(tok, context):
return tok
def slurp(filepath):
if filepath == '-':
return sys.stdin.read()
with open(filepath) as f:
return f.read()
def spit(filepath, body, executable=False):
if filepath == '-':
sys.stdout.write(body)
sys.stdout.flush()
else:
with open(filepath, 'w+') as f:
f.write(body)
if executable:
os.chmod(filepath, 0o755)
def read(verbose=False):
raw_text = ''.join(sys.stdin)
if verbose:
print("raw text: " + raw_text)
return parse_blc_raw(raw_text)
def parse_blc_raw(raw_text):
return parse(list(lex(raw_text)))
def parse_lambda_raw(raw_text):
raise Exception("Can't parse \\x.x lambda use blc.")
def pprint(parse_tree):
print(parse_tree)
return parse_tree
def shift(tree, amt=1, depth=0):
# Only shift free variables
# (those not bound within this particular expression, i.e. whose value is
# >= depth)
if type(tree) == int:
if tree >= depth:
return tree + amt
else:
return tree
elif type(tree) == tuple:
return (LAMBDA, shift(tree[1], amt=amt, depth=depth+1))
elif type(tree) == list:
a, b = tree
return [shift(a, amt=amt, depth=depth),
shift(b, amt=amt, depth=depth)]
else:
raise TypeError(tree)
def substitute(lhs, rhs, depth=0):
"""
1. Replace de bruijn index bound by the outside lambda in lhs with rhs.
2. Increment any indexes in rhs that are bound to lambdas outside of lhs
shift up according to how deep rhs is inserted into lhs.
3. Drop the external lambda and decrement any free variables of lhs.
"""
# Traverse the tree in pre-order (root, left, right)
# Walk the entire tree to replace subterms.
result = None
if type(lhs) == int:
if lhs + 1 == depth: # The depth we're looking for:
result = rhs
else:
result = lhs
elif type(lhs) == tuple:
# we shift up rhs's free variables every time we push it into an
# abstraction.
result = (LAMBDA,
substitute(lhs[1],
shift(rhs, amt=+1),
depth=depth+1))
elif type(lhs) == list:
a, b = lhs
result = [substitute(a, rhs, depth=depth),
substitute(b, rhs, depth=depth)]
else:
raise TypeError("Unknown type for substitution {}."
.format(lhs))
# Pop off the lambda for the top expression
if depth == 0:
result = shift(result[1], amt=-1)
return result
def normal_order_reduction(tree, verbose=False):
# choose the left-most redex first
# substitute in the left hand side then reduce the rest
if type(tree) == list:
a, b = tree
if type(a) == list:
return [normal_order_reduction(a, verbose=verbose), b]
elif type(a) == tuple:
return substitute(a, b)
else: # int
assert type(a) == int
return [a, normal_order_reduction(b, verbose=verbose)]
elif type(tree) == tuple:
return (LAMBDA, normal_order_reduction(tree[1], verbose=verbose))
elif type(tree) == int:
return tree
else:
raise TypeError("Unknown type passed to reducer '{}': {}."
.format(type(tree), tree))
# Set reduction strategy here.
beta_reduce = normal_order_reduction
def is_head_normal_form(parse_tree):
return type(parse_tree) != list
def is_normal_form(tree, verbose=True):
if type(tree) == list:
if type(tree[0]) == tuple:
return False
else:
return is_normal_form(tree[0]) and is_normal_form(tree[1])
elif type(tree) == tuple:
return is_normal_form(tree[1])
elif type(tree) == int:
return True
else:
raise Exception("Unknown type.")
def evaluate(parse_tree, until=is_normal_form, verbose=False,
stop_if_looping=False, language=None,
max_evaluations=200):
"""
Until is some state like `is_normal_form`
stop_if_looping is just a silly way for us to kill the program if a cycle
is detected. No this doesn't work for all infinite loops, but it
certainly does for most of them.
max_evaluations: maximum number of steps we will do before halting.
"""
idx = 0
if stop_if_looping:
observed_states = {}
while not until(parse_tree):
redex = beta_reduce(parse_tree, verbose=verbose)
if stop_if_looping and observed_states.get(str(redex)):
raise CycleException("We're looping. Cycle length: {}"
.format(len(observed_states)))
if verbose:
tree = tree_to_lang(parse_tree, language=LAM)
print("eval{} {}".format(idx, tree))
parse_tree = redex
idx += 1
if stop_if_looping:
observed_states[str(redex)] = True
if idx == max_evaluations:
if verbose:
sys.stderr.write("Reached maximum number of cycles.\n")
break
if verbose:
tree = tree_to_lang(parse_tree, language=LAM)
print("eval{} {}".format(idx, tree))
return parse_tree
def evaluate_generator(parse_tree, until=is_normal_form,
stop_if_looping=False, language=None,
verbose=False,
max_evaluations=2000):
"""
Until is some state like `is_normal_form`
stop_if_looping is just a silly way for us to kill the program if a cycle
is detected. No this doesn't work for all infinite loops, but it
certainly does for many of them.
evaluate generator is like evaluate but results in a generator of every
step instead of a single tree
max_evaluations: maximum number of steps we will do before halting.
"""
idx = 0
if stop_if_looping:
observed_states = {}
while not until(parse_tree):
idx += 1
if idx == max_evaluations:
if verbose:
sys.stderr.write("Reached maximum number of cycles.\n")
break
redex = beta_reduce(parse_tree)
if stop_if_looping and observed_states.get(str(redex)):
raise CycleException("We're looping. Cycle length: {}"
.format(len(observed_states)))
tree = tree_to_lang(parse_tree, language=language)
yield tree
parse_tree = redex
if stop_if_looping:
observed_states[str(redex)] = True
tree = tree_to_lang(parse_tree, language=language)
yield tree
def tree_to_lang(parse_tree, language=None):
if language == BLC:
return tree_to_blc(parse_tree)
elif language == LAM:
return tree_to_lam(parse_tree)
else:
raise Exception("Unknown language: {}".format(language))
def tree_to_blc(parse_tree):
def walker(tree):
if type(tree) == int:
yield int_to_debruijn(tree)
elif type(tree) == tuple:
yield NT_LAMBDA + tree_to_blc(tree[1])
elif type(tree) == list:
a, b = tree
yield NT_APPLY + tree_to_blc(a) + tree_to_blc(b)
else:
raise TypeError("Unknown type for parse_tree {}"
.format(parse_tree))
# We perform a pre-order traversal node,left,right to go from parse tree to
# blc
return ''.join(walker(parse_tree))
def tree_to_lam(parse_tree, depth=0, lambda_sym=NT_LAMBDA_LAM, debruijn=False):
# currently supports only 26 | |
This is defined as 0xffff, the largest number e_phnum can
have, specifying where the actual number of program headers
is assigned.
uint16_t e_shentsize;
This member holds a sections header's size in bytes. A section
header is one entry in the section header table; all
entries are the same size.
uint16_t e_shnum;
This member holds the number of entries in the section header table.
Thus the product of e_shentsize and e_shnum gives the section
header table's size in bytes. If a file has no section header table,
e_shnum holds the value of zero.
If the number of entries in the section header table is larger than or
equal to SHN_LORESERVE (0xff00), e_shnum holds
the value zero and the real number of entries in the section
header table is held in the sh_size member of the initial
entry in section header table. Otherwise, the sh_size member of
the initial entry in the section header table holds
the value zero.
uint16_t e_shstrndx;
This member holds the section header table index of the entry
associated with the section name string table. If the
file has no section name string table, this member holds
the value SHN_UNDEF.
If the index of section name string table section is larger than
or equal to SHN_LORESERVE (0xff00), this member holds
SHN_XINDEX (0xffff) and the real index of the section name
string table section is held in the sh_link member of the
initial entry in section header table. Otherwise, the sh_link
member of the initial entry in section header table contains
the value zero.
'''
if self.bits == 32:
unpackedHeader = struct.unpack('< 2H I 3I I 6H', buffer_list[16:52])
elif self.bits == 64:
unpackedHeader = struct.unpack('< 2H I 3Q I 6H', buffer_list[16:64])
(
self.header.e_type,
self.header.e_machine,
self.header.e_version,
self.header.e_entry, # 32/64 bit!
self.header.e_phoff, # 32/64 bit!
self.header.e_shoff, # 32/64 bit!
self.header.e_flags,
self.header.e_ehsize,
self.header.e_phentsize,
self.header.e_phnum,
self.header.e_shentsize,
self.header.e_shnum,
self.header.e_shstrndx,
) = unpackedHeader
###############################################
# check if ELF is supported
'''
The sixth byte specifies the data encoding of the
processor-specific data in the file.
'''
if self.header.e_ident[5] == ElfN_Ehdr.EI_DATA.ELFDATANONE:
raise NotImplementedError("ELFDATANONE: Unknown data format.")
elif self.header.e_ident[5] == ElfN_Ehdr.EI_DATA.ELFDATA2MSB:
raise NotImplementedError("ELFDATA2MSB: Not yet supported.")
elif self.header.e_ident[5] != ElfN_Ehdr.EI_DATA.ELFDATA2LSB:
raise NotImplementedError("Unknown data format.")
'''
The version number of the ELF specification
'''
if self.header.e_ident[6] == ElfN_Ehdr.EI_VERSION.EV_NONE:
raise NotImplementedError("EV_NONE: Invalid version.")
elif self.header.e_ident[6] != ElfN_Ehdr.EI_VERSION.EV_CURRENT:
raise NotImplementedError("Invalid version.")
'''
This byte identifies the operating system and ABI to which the
object is targeted. Some fields in other ELF structures have flags
and values that have platform-specific meanings; the
interpretation of those fields is determined by the value of
this byte.
'''
if not (self.header.e_ident[7] == ElfN_Ehdr.EI_OSABI.ELFOSABI_NONE
or
self.header.e_ident[7] == ElfN_Ehdr.EI_OSABI.ELFOSABI_LINUX):
raise NotImplementedError("EI_OSABI not yet supported")
'''
This byte identifies the version of the ABI to which the object is
targeted. This field is used to distinguish among incompatible
versions of an ABI. The interpretation of this version number is
dependent on the ABI identified by the EI_OSABI field. Applications
conforming to this specification use the value 0.
'''
if self.header.e_ident[8] != 0:
raise NotImplementedError("EI_ABIVERSION not yet supported")
# check if e_type is supported at the moment
if not (self.header.e_type == ElfN_Ehdr.E_type.ET_EXEC
or self.header.e_type == ElfN_Ehdr.E_type.ET_DYN):
raise NotImplementedError("Only e_type ET_EXEC and ET_DYN " \
+ "are supported yet")
# check if e_machine is supported at the moment
expectedMachine = {
32: ElfN_Ehdr.E_machine.EM_386,
64: ElfN_Ehdr.E_machine.EM_X86_64,
}[self.bits]
if self.header.e_machine != expectedMachine:
try:
EM = ElfN_Ehdr.E_machine.reverse_lookup[self.header.e_machine]
except KeyError:
EM = hex(self.header.e_machine)
raise NotImplementedError("Only e_machine EM_386 for ELFCLASS32" \
+ " and EM_X86_64 for ELFCLASS64 are supported yet" \
+ " (file has {})".format(EM))
# check if only the header of the ELF file should be parsed
# for example to speed up the process for checking if a list of files
# are valid ELF files
if onlyParseHeader is True:
return
# mark file as completely parsed (actually it is just parsing
# but without this flag internal functions will not work)
self.fileParsed = True
###############################################
# parse section header table
'''
The section header has the following structure:
typedef struct { // differences in ELF64:
uint32_t sh_name;
uint32_t sh_type;
uint32_t sh_flags; // uint64_t
Elf32_Addr sh_addr; // Elf64_Addr
Elf32_Off sh_offset; // Elf64_Off
uint32_t sh_size; // uint64_t
uint32_t sh_link;
uint32_t sh_info;
uint32_t sh_addralign; // uint64_t
uint32_t sh_entsize; // uint64_t
} Elf32_Shdr; // } Elf64_Shdr;
'''
# create a list of the section_header_table
self.sections = list()
for i in range(self.header.e_shnum):
'''
uint32_t sh_name;
This member specifies the name of the section. Its value is an
index into the section header string table section, giving the
location of a null-terminated string.
uint32_t sh_type;
This member categorizes the section's contents and semantics.
uintN_t sh_flags; (N = 32/64)
Sections support one-bit flags that describe miscellaneous
attributes. If a flag bit is set in sh_flags, the attribute
is "on" for the section. Otherwise, the attribute is "off" or
does not apply. Undefined attributes are set to zero.
ElfN_Addr sh_addr; (N = 32/64)
If this section appears in the memory image of a process, this
member holds the address at which the section's first byte
should reside. Otherwise, the member contains zero.
ElfN_Off sh_offset; (N = 32/64)
This member's value holds the byte offset from the beginning
of the file to the first byte in the section. One section
type, SHT_NOBITS, occupies no space in the file, and its
sh_offset member locates the conceptual placement in the file.
uintN_t sh_size; (N = 32/64)
This member holds the section's size in bytes. Unless the section
type is SHT_NOBITS, the section occupies sh_size bytes
in the file. A section of type SHT_NOBITS may have a nonzero
size, but it occupies no space in the file.
uint32_t sh_link;
This member holds a section header table index link, whose
interpretation depends on the section type.
uint32_t sh_info;
This member holds extra information, whose interpretation
depends on the section type.
uintN_t sh_addralign; (N = 32/64)
Some sections have address alignment constraints. If a
section holds a doubleword, the system must ensure doubleword
alignment for the entire section. That is, the value of sh_addr
must be congruent to zero, modulo the value of
sh_addralign. Only zero and positive integral powers of two
are allowed. Values of zero or one mean the section has no
alignment constraints.
uintN_t sh_entsize; (N = 32/64)
Some sections hold a table of fixed-sized entries, such as a
symbol table. For such a section, this member gives the
size in bytes for each entry. This member contains zero if
the section does not hold a table of fixed-size entries.
'''
tempSectionEntry = ElfN_Shdr()
tempOffset = self.header.e_shoff + i*self.header.e_shentsize
if self.bits == 32:
fmt = '< 2I 4I 2I 2I'
elif self.bits == 64:
fmt = '< 2I 4Q 2I 2Q'
fmtSize = struct.calcsize(fmt)
assert fmtSize == self.header.e_shentsize
(
tempSectionEntry.sh_name,
tempSectionEntry.sh_type,
tempSectionEntry.sh_flags, # 32/64 bit!
tempSectionEntry.sh_addr, # 32/64 bit!
tempSectionEntry.sh_offset, # 32/64 bit!
tempSectionEntry.sh_size, # 32/64 bit!
tempSectionEntry.sh_link,
tempSectionEntry.sh_info,
tempSectionEntry.sh_addralign, # 32/64 bit!
tempSectionEntry.sh_entsize, # 32/64 bit!
) = struct.unpack(fmt, buffer_list[tempOffset:tempOffset+fmtSize])
del tempOffset
del fmtSize
# create new section and add to sections list
section = Section()
section.elfN_shdr = tempSectionEntry
self.sections.append(section)
###############################################
# parse section string table
# section string table first byte always 0 byte
# section string table last byte always 0 byte
# section string table holds null terminated strings
# empty section string table => sh_size of string table section = 0
# => Non-zero indexes to string table are invalid
# list of sections not empty => read whole string table
if self.sections:
nStart = self.sections[self.header.e_shstrndx].elfN_shdr.sh_offset
nEnd = nStart + self.sections[self.header.e_shstrndx].elfN_shdr.sh_size
stringtable_str = buffer_list[nStart:nEnd]
# get name from string table for each section
for i in range(len(self.sections)):
# check if string table exists => abort reading
if len(stringtable_str) == 0:
break
nStart = self.sections[i].elfN_shdr.sh_name
nEnd = stringtable_str.find('\x00', nStart)
# use empty string if string is not | |
0x19, 0xf6, 0x4a,
0x92, 0x55, 0xf8, 0x4a, 0x91, 0x43, 0xb2, 0x4a,
0x79, 0x24, 0xf8, 0x42, 0x92, 0x47, 0x82, 0xa9,
0xf9, 0x24, 0x48, 0x8f, 0x44, 0xf9, 0x24, 0x4a,
0x46, 0x0b, 0x2c, 0xd4, 0xc1, 0x56, 0x28, 0x19,
0x36, 0x48, 0x15, 0x34, 0x4a, 0x4c, 0x32, 0x4a,
0x1d, 0x24, 0xaf, 0x44, 0x18, 0xf1, 0x48, 0xa4,
0x41, 0x8b, 0xa4, 0x43, 0xf2, 0x48, 0x81, 0x43,
0x92, 0x82, 0x47, 0x82, 0x2d, 0x81, 0x65, 0xc8,
0x81, 0x6f, 0xa2, 0xc5, 0x81, 0x4d, 0xca, 0x1c,
0xf8, 0x24, 0x48, 0x86, 0xe1, 0x22, 0x64, 0x18,
0xbc, 0x64, 0x48, 0x1c, 0x74, 0x81, 0xcc, 0x42,
0x17, 0xcc, 0x85, 0xf2, 0x81, 0x44, 0x83, 0x74,
0x81, 0x74, 0x4a, 0x32, 0x81, 0x83, 0x54, 0x41,
0xad, 0x84, 0x11, 0xaf, 0x44, 0x0b, 0xab, 0x94,
0x41, 0x8b, 0x94, 0x48, 0x2b, 0x94, 0x45, 0x98,
0x12, 0x6d, 0x48, 0x98, 0x6d, 0x1a, 0xd7, 0xdb,
0x00, 0x80, 0x84, 0x01, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x44, 0x40, 0x01, 0x00, 0x90, 0x12,
0x80, 0x08, 0x00, 0x00, 0x00, 0x00, 0x44, 0x00,
0x00, 0x00, 0x00, 0x12, 0x00, 0x28, 0x00, 0x40,
0x08, 0x00, 0x00, 0xb0, 0xfb, 0x0c, 0x24, 0x14,
0x14, 0x81, 0x16, 0x28, 0xc2, 0x21, 0x24, 0x48,
0x50, 0x29, 0x12, 0x24, 0x13, 0x44, 0xc1, 0x1c,
0xb0, 0x41, 0x02, 0x27, 0x28, 0x44, 0x28, 0x4b,
0x42, 0x1c, 0x42, 0x04, 0x4b, 0x42, 0x22, 0x26,
0x85, 0x02, 0x90, 0x44, 0x98, 0x00, 0x40, 0x04,
0x44, 0x46, 0x4c, 0x72, 0x19, 0xe4, 0x24, 0x63,
0x21, 0x24, 0x00, 0xa4, 0x48, 0x50, 0x22, 0x8a,
0x11, 0x02, 0x20, 0x24, 0x42, 0xf2, 0x99, 0xd2,
0x00, 0x82, 0xc4, 0x22, 0x28, 0x91, 0x84, 0x1a,
0x52, 0x84, 0x89, 0x93, 0x42, 0x14, 0x17, 0x84,
0x85, 0xc4, 0x18, 0x29, 0x44, 0x4c, 0x42, 0x99,
0x58, 0x22, 0x81, 0x10, 0xc8, 0x22, 0x66, 0x54,
0x22, 0x81, 0x25, 0x52, 0x18, 0x25, 0x02, 0x28,
0xc3, 0x52, 0x12, 0x14, 0x40, 0x28, 0x12, 0x09,
0x2d, 0x1c, 0x40, 0x38, 0x48, 0x96, 0x68, 0x84,
0x1e, 0x88, 0x12, 0x94, 0x81, 0x4d, 0xf8, 0x84,
0x26, 0x64, 0xc8, 0x18, 0x4c, 0x69, 0x11, 0x43,
0xf8, 0x84, 0x95, 0x00, 0x42, 0x84, 0x89, 0x82,
0x82, 0x62, 0x82, 0x70, 0x42, 0x48, 0xd2, 0x94,
0x24, 0xf1, 0x21, 0x48, 0x47, 0x94, 0x16, 0x38,
0x1a, 0x84, 0x66, 0x41, 0x33, 0xc8, 0x25, 0x12,
0x18, 0x34, 0x19, 0x24, 0xa2, 0x4b, 0x42, 0x87,
0x22, 0x49, 0xd2, 0x18, 0x02, 0x31, 0x46, 0xc4,
0x96, 0x91, 0x11, 0x42, 0x84, 0x28, 0x95, 0x64,
0xa4, 0x2c, 0x91, 0x11, 0x8b, 0x25, 0x2d, 0x18,
0x67, 0x81, 0xa9, 0x64, 0x8b, 0x89, 0x71, 0x41,
0x58, 0x28, 0x8b, 0x92, 0x87, 0x21, 0x49, 0x22,
0x48, 0xb2, 0x14, 0xa4, 0x21, 0x43, 0xb1, 0xd8,
0x07, 0x24, 0x11, 0x45, 0x01, 0x89, 0x51, 0x81,
0x14, 0x1a, 0x12, 0xe4, 0x81, 0x42, 0x12, 0x41,
0x02, 0x14, 0x49, 0x44, 0x04, 0xa1, 0x12, 0x4c,
0x22, 0xa2, 0x24, 0x22, 0x4c, 0xd2, 0x24, 0x04,
0x21, 0x14, 0x25, 0x02, 0x80, 0x62, 0x82, 0x00,
0x10, 0x04, 0x40, 0x14, 0x4c, 0x0c, 0x48, 0x88,
0x00, 0x10, 0x02, 0x46, 0x26, 0x08, 0x45, 0x88,
0x21, 0x08, 0x86, 0xf2, 0x48, 0xfa, 0x40, 0x02,
0x60, 0x81, 0x18, 0x84, 0x42, 0x40, 0xc2, 0x28,
0x2f, 0x14, 0x08, 0x87, 0x14, 0x41, 0x24, 0x15,
0x18, 0x02, 0x18, 0x80, 0x21, 0x02, 0x00, 0x21,
0x42, 0x10, 0x58, 0x22, 0x00, 0x20, 0x82, 0x12,
0x12, 0x51, 0x14, 0x13, 0x11, 0x04, 0xa9, 0xc1,
0x28, 0x24, 0x1a, 0x24, 0xe4, 0x81, 0x44, 0x23,
0x81, 0x28, 0xd1, 0x62, 0x4a, 0x88, 0x08, 0x70,
0x84, 0x18, 0x08, 0x1f, 0xa6, 0x02, 0x27, 0x54,
0x11, 0x61, 0x24, 0xa0, 0x12, 0x25, 0x35, 0x4b,
0x63, 0x14, 0x04, 0x23, 0x02, 0x25, 0xd9, 0x81,
0x24, 0x14, 0x84, 0xd1, 0x1c, 0xf4, 0x12, 0x23,
0x1c, 0x44, 0x91, 0x48, 0x19, 0x76, 0x44, 0x69,
0x16, 0xc5, 0x92, 0x21, 0x5b, 0x68, 0x41, 0x50,
0x6a, 0x42, 0x47, 0x84, 0x27, 0x14, 0x2e, 0x41,
0x2f, 0x62, 0x01, 0x84, 0x21, 0xab, 0x52, 0x44,
0x29, 0xc1, 0x14, 0x8c, 0xaa, 0x28, 0x29, 0x22,
0x18, 0xc9, 0x4f, 0x89, 0xc8, 0x58, 0x2a, 0xf5,
0x24, 0x48, 0x2f, 0x28, 0x73, 0xb4, 0x08, 0x96,
0x64, 0x84, 0xdf, 0xb7, 0x0d, 0x24, 0x10, 0x36,
0x18, 0x80, 0x13, 0x72, 0x48, 0x21, 0x04, 0x39,
0x88, 0x12, 0x11, 0x47, 0x39, 0x4b, 0x80, 0x61,
0x84, 0x22, 0x29, 0x64, 0x12, 0x81, 0x22, 0x53,
0x04, 0x2c, 0x84, 0x52, 0xa4, 0x45, 0x02, 0x85,
0x22, 0x54, 0x24, 0x90, 0x21, 0x44, 0x21, 0x18,
0x10, 0x3a, 0x12, 0x2a, 0x01, 0x4a, 0x2a, 0x2a,
0x84, 0x98, 0x1a, 0x14, 0x8b, 0x81, 0x2b, 0x21,
0x23, 0x12, 0xa4, 0x92, 0x90, 0x8c, 0xc9, 0x81,
0xf4, 0xce, 0xf9, 0xd0, 0x43, 0x44, 0x01, 0x24,
0x12, 0x84, 0x46, 0x55, 0x22, 0x45, 0x72, 0x28,
0x24, 0x14, 0x32, 0x48, 0x84, 0x42, 0x24, 0x44,
0xd0, 0x41, 0x31, 0x13, 0x13, 0x01, 0x42, 0x1c,
0x14, 0xb8, 0x23, 0x54, 0x48, 0x11, 0x1b, 0xc2,
0x40, 0x42, 0x04, 0x8b, 0x24, 0x1b, 0x24, 0x27,
0x24, 0x1c, 0x53, 0x14, 0x91, 0x25, 0xa4, 0x34,
0x85, 0x54, 0x82, 0x16, 0x34, 0x1a, 0x46, 0xe2,
0x81, 0x12, 0x41, 0xc1, 0xcc, 0x21, 0x4e, 0x8c,
0x8b, 0x14, 0x2e, 0xc8, 0x21, 0x6d, 0xb8, 0x80,
0x18, 0xf8, 0xe5, 0xfc, 0x24, 0x40, 0x54, 0x83,
0x51, 0x28, 0x80, 0xd6, 0x22, 0x81, 0xc4, 0x14,
0x30, 0x48, 0x28, 0x85, 0x76, 0x18, 0x84, 0x42,
0x82, 0x51, 0x84, 0x21, 0x00, 0xc0, 0x41, 0x10,
0x04, 0x26, 0x82, 0x01, 0x13, 0x12, 0x31, 0x44,
0x10, 0x28, 0x34, 0x74, 0x90, 0x28, 0x2a, 0x44,
0x85, 0x04, 0x26, 0x64, 0x44, 0x98, 0x81, 0x82,
0xc3, 0x21, 0xe1, 0x21, 0x02, 0x27, 0x26, 0xe0,
0x22, 0xc4, 0x89, 0x80, 0xb4, 0x84, 0xf3, 0x4b,
0xc7, 0x14, 0x24, 0x42, 0x00, 0x25, 0x68, 0x32,
0x10, 0x81, 0x01, 0x49, 0x12, 0x91, 0x21, 0x46,
0x01, 0x41, 0x46, 0x04, 0x00, 0x24, 0xc6, 0x08,
0x4d, 0x32, 0x48, 0x81, 0x64, 0x46, 0x78, 0x28,
0x52, 0x48, 0x49, 0x51, 0x49, 0x40, 0x78, 0x28,
0x04, 0x29, 0x44, 0x98, 0x14, 0x21, 0x23, 0x41,
0x5d, 0xa2, 0x81, 0x24, 0x41, 0xc0, 0x28, 0x86,
0x12, 0x08, 0xc6, 0x18, 0x84, 0x11, 0x41, 0x62,
0x84, 0x80, 0xf1, 0x97, 0xd3, 0x80, 0x24, 0x01,
0x23, 0xd2, 0x61, 0x11, 0x44, 0x48, 0x08, 0x40,
0x41, 0x12, 0x08, 0x14, 0x87, 0x24, 0x70, 0x44,
0x13, 0x51, 0x24, 0x22, 0x40, 0x05, 0x40, 0x41,
0x09, 0x40, 0x02, 0x52, 0x21, 0x25, 0x42, 0xa3,
0x12, 0x80, 0x41, 0x41, 0x04, 0x20, 0x62, 0x11,
0x00, 0x62, 0x21, 0x20, 0x14, 0x04, 0x81, 0x2a,
0x38, 0x11, 0x53, 0x82, 0x38, 0x14, 0xf0, 0x48,
0x5b, 0x34, 0x40, 0x06, 0x69, 0xd2, 0xa6, 0xd3,
0x24, 0x82, 0x02, 0x1a, 0x84, 0xc1, 0x21, 0x10,
0x4a, 0x39, 0x1d, 0x64, 0x50, 0x32, 0x81, 0x65,
0x82, 0x22, 0x14, 0x24, 0x14, 0x52, 0x84, 0x51,
0x29, 0x92, 0x12, 0x44, 0x79, 0xd1, 0x2c, 0x31,
0x44, 0x25, 0xca, 0x21, 0x38, 0x63, 0x32, 0x18,
0x8f, 0x61, 0xd2, 0x24, 0xd2, 0x14, 0x51, 0x49,
0x8c, 0x72, 0x12, 0x01, 0x12, 0x28, 0x83, 0xd8,
0x24, 0x18, 0x38, 0x14, 0x25, 0xd8, 0x4a, 0x82,
0xa1, 0x12, 0x81, 0x84, 0x43, 0x11, 0xf4, 0xb8,
0xd8, 0xc0, 0x42, 0x12, 0x85, 0x11, 0x41, 0x11,
0x71, 0x48, 0x72, 0x11, 0x18, 0x42, 0x04, 0x1d,
0x48, 0x14, 0x49, 0x14, 0xe2, 0x82, 0x64, 0x44,
0x4b, 0x41, 0x54, 0x00, 0x86, 0x48, 0x31, 0x25,
0x42, 0x11, 0x81, 0x8c, 0x94, 0x28, 0x25, 0x88,
0x82, 0x04, 0x40, 0x94, 0x42, 0x12, 0x10, 0x21,
0x04, 0x46, 0x02, 0xc3, 0xc2, 0x44, 0x29, 0xc8,
0x48, 0x21, 0x16, 0xa8, 0x14, 0x22, 0x4a, 0x88,
0x94, 0xa2, 0x48, 0x2d, 0xa4, 0x8a, 0x24, 0x24,
0xfa, 0x3d, 0xd2, 0x00, 0x14, 0x40, 0x54, 0x42,
0x10, 0x04, 0x10, 0x04, 0x29, 0x44, 0x11, 0x48,
0x0c, 0x00, 0x42, 0x21, 0x84, 0x46, 0x08, 0x10,
0x31, 0x42, 0x30, 0x4a, 0x50, 0x48, 0x00, 0xc0,
0x21, 0xb0, 0x11, 0x16, 0x32, 0x24, 0x24, 0x26,
0x1c, 0x43, 0x22, 0x41, 0x34, 0x88, 0x90, 0x22,
0x89, 0x02, 0x88, 0xc2, 0x20, 0x48, 0xc8, 0x14,
0x23, 0x01, 0x00, 0xef, 0xfe, 0x06, 0x26, 0x14,
0x18, 0xac, 0x14, 0x22, 0x52, 0x89, 0x84, 0x74,
0x41, 0xa2, 0x41, 0x00, 0x83, 0x02, 0x70, 0x56,
0x08, 0x2b, 0x14, 0x60, 0x84, 0x20, 0x01, 0x48,
0x00, 0x11, 0x50, 0x14, 0x18, 0x14, 0x85, 0x94,
0x38, 0x46, 0x18, 0x41, 0x44, 0x02, 0x18, 0x80,
0x62, 0x84, 0x00, 0x29, 0x34, 0x48, 0x00, 0x20,
0x01, 0x30, 0x28, 0x43, 0xb7, 0x62, 0x28, 0x24,
0x02, 0x88, 0x9a, 0x08, 0x40, 0x08, 0x34, 0x16,
0x01, 0x90, 0x44, 0x81, 0x50, 0x82, 0x48, 0x29,
0x81, 0xc4, 0x24, 0x50, 0x48, 0x29, 0x54, 0xa4,
0x10, 0x04, 0x85, 0x41, 0x08, 0x12, 0x00, 0x41,
0x43, 0x14, 0x38, 0x43, 0x00, 0x28, 0x80, 0x02,
0x4a, 0x22, 0x04, 0x00, 0x21, 0x22, 0x18, 0x20,
0x01, 0x50, 0x82, 0x81, 0x21, | |
# -*- coding: utf-8 -*-
# Copyright (c) 2012-2020, <NAME>
#
# This module is part of anima and is released under the MIT
# License: http://www.opensource.org/licenses/MIT
import re
from anima import logger
from anima.ui.base import AnimaDialogBase, ui_caller
from anima.ui.lib import QtCore, QtWidgets
def UI(app_in=None, executor=None, **kwargs):
"""
:param app_in: A Qt Application instance, which you can pass to let the UI
be attached to the given applications event process.
:param executor: Instead of calling app.exec_ the UI will call this given
function. It also passes the created app instance to this executor.
"""
return ui_caller(app_in, executor, MainDialog, **kwargs)
class MainDialog(QtWidgets.QDialog, AnimaDialogBase):
"""The Project Dialog
"""
max_project_name_length = 32
def __init__(self, parent=None, project=None):
logger.debug("initializing the interface")
super(MainDialog, self).__init__(parent)
# store the logged in user
self.logged_in_user = None
self.project = project
self.mode = 'Create'
if self.project:
self.mode = 'Update'
self.image_format = None
self._setup_ui()
self._setup_signals()
self._set_defaults()
if self.project:
self.fill_ui_with_project(self.project)
def _setup_ui(self):
"""create UI elements
"""
self.resize(517, 545)
self.vertical_layout = QtWidgets.QVBoxLayout(self)
self.setWindowTitle("Project Dialog")
# ----------------------
# Dialog Label
self.dialog_label = QtWidgets.QLabel(self)
self.dialog_label.setText('%s Project' % self.mode)
self.dialog_label.setStyleSheet(
"color: rgb(71, 143, 202);\nfont: 18pt;"
)
self.vertical_layout.addWidget(self.dialog_label)
self.line = QtWidgets.QFrame(self)
self.line.setFrameShape(QtWidgets.QFrame.HLine)
self.line.setFrameShadow(QtWidgets.QFrame.Sunken)
self.vertical_layout.addWidget(self.line)
self.project_info_form_layout = QtWidgets.QFormLayout()
self.project_info_form_layout.setLabelAlignment(
QtCore.Qt.AlignRight |
QtCore.Qt.AlignTrailing |
QtCore.Qt.AlignVCenter
)
# ----------------------
# Name Fields
self.name_label = QtWidgets.QLabel(self)
self.name_label.setText("Name")
self.project_info_form_layout.setWidget(
0, QtWidgets.QFormLayout.LabelRole, self.name_label
)
self.name_fields_vertical_layout = QtWidgets.QVBoxLayout()
self.name_validator_label = QtWidgets.QLabel(self)
self.name_validator_label.setText("Validator Message")
self.name_validator_label.setStyleSheet("color: rgb(255, 0, 0);")
self.name_fields_vertical_layout.addWidget(self.name_validator_label)
self.project_info_form_layout.setLayout(
0,
QtWidgets.QFormLayout.FieldRole,
self.name_fields_vertical_layout
)
# add name_line_edit
from anima.ui.widgets import ValidatedLineEdit
self.name_line_edit = ValidatedLineEdit(
message_field=self.name_validator_label
)
self.name_fields_vertical_layout.insertWidget(
0, self.name_line_edit
)
# ----------------------
# Code Fields
self.code_label = QtWidgets.QLabel(self)
self.code_label.setText("Code")
self.project_info_form_layout.setWidget(
1,
QtWidgets.QFormLayout.LabelRole,
self.code_label
)
self.code_fields_vertical_layout = QtWidgets.QVBoxLayout()
self.code_validator_label = QtWidgets.QLabel(self)
self.code_validator_label.setText("Validator Message")
self.code_validator_label.setStyleSheet("color: rgb(255, 0, 0);")
self.code_fields_vertical_layout.addWidget(self.code_validator_label)
self.project_info_form_layout.setLayout(
1,
QtWidgets.QFormLayout.FieldRole,
self.code_fields_vertical_layout
)
# add code_line_edit
self.code_line_edit = ValidatedLineEdit(
message_field=self.code_validator_label
)
self.code_fields_vertical_layout.insertWidget(
0, self.code_line_edit
)
# ----------------------
# Type Fields
self.type_label = QtWidgets.QLabel(self)
self.type_label.setText("Type")
self.project_info_form_layout.setWidget(
2,
QtWidgets.QFormLayout.LabelRole,
self.type_label
)
self.type_combo_box = QtWidgets.QComboBox(self)
self.type_combo_box.setEditable(True)
self.project_info_form_layout.setWidget(
2,
QtWidgets.QFormLayout.FieldRole,
self.type_combo_box
)
# ----------------------
# Date Fields
self.date_label = QtWidgets.QLabel(self)
self.date_label.setText("Date")
self.project_info_form_layout.setWidget(
3,
QtWidgets.QFormLayout.LabelRole,
self.date_label
)
self.date_date_edit = QtWidgets.QDateEdit(self)
self.project_info_form_layout.setWidget(
3,
QtWidgets.QFormLayout.FieldRole,
self.date_date_edit
)
# ----------------------
# Image Format Fields
from anima.ui.widgets.image_format import ImageFormatWidget
self.image_format = ImageFormatWidget(
parent=self,
parent_form_layout=self.project_info_form_layout,
parent_form_layout_index=4
)
# ----------------------
# FPS Fields
self.fps_label = QtWidgets.QLabel(self)
self.fps_label.setText("FPS")
self.project_info_form_layout.setWidget(
5,
QtWidgets.QFormLayout.LabelRole,
self.fps_label
)
self.fps_spin_box = QtWidgets.QSpinBox(self)
self.fps_spin_box.setMinimum(1)
self.fps_spin_box.setProperty("value", 25)
self.project_info_form_layout.setWidget(
5,
QtWidgets.QFormLayout.FieldRole,
self.fps_spin_box
)
# ----------------------
# Repository Fields
self.repository_label = QtWidgets.QLabel(self)
self.repository_label.setText("Repository")
self.project_info_form_layout.setWidget(
6,
QtWidgets.QFormLayout.LabelRole,
self.repository_label
)
self.repository_horizontal_layout = QtWidgets.QHBoxLayout()
self.repository_combo_box = QtWidgets.QComboBox(self)
self.repository_horizontal_layout.addWidget(
self.repository_combo_box)
# Update Repository Push Button
self.update_repository_push_button = QtWidgets.QPushButton(self)
self.update_repository_push_button.setText("Update...")
self.repository_horizontal_layout.addWidget(
self.update_repository_push_button
)
# Create Repository Push Button
self.create_repository_pushButton = QtWidgets.QPushButton(self)
self.create_repository_pushButton.setText("New...")
self.repository_horizontal_layout.addWidget(
self.create_repository_pushButton
)
self.repository_horizontal_layout.setStretch(0, 1)
self.project_info_form_layout.setLayout(
6,
QtWidgets.QFormLayout.FieldRole,
self.repository_horizontal_layout
)
# ----------------------
self.structure_label = QtWidgets.QLabel(self)
self.structure_label.setText("Structure")
self.project_info_form_layout.setWidget(
7,
QtWidgets.QFormLayout.LabelRole,
self.structure_label
)
self.structure_horizontal_layout = QtWidgets.QHBoxLayout()
self.structure_combo_box = QtWidgets.QComboBox(self)
self.structure_horizontal_layout.addWidget(self.structure_combo_box)
# Update Structure Push Button
self.update_structure_push_button = QtWidgets.QPushButton(self)
self.update_structure_push_button.setText("Update...")
self.structure_horizontal_layout.addWidget(
self.update_structure_push_button
)
# Create Structure Push Button
self.create_structure_push_button = QtWidgets.QPushButton(self)
self.create_structure_push_button.setText("New...")
self.structure_horizontal_layout.addWidget(
self.create_structure_push_button
)
self.structure_horizontal_layout.setStretch(0, 1)
self.project_info_form_layout.setLayout(
7,
QtWidgets.QFormLayout.FieldRole,
self.structure_horizontal_layout
)
# ----------------------
# Status Fields
self.status_label = QtWidgets.QLabel(self)
self.status_label.setText("Status")
self.project_info_form_layout.setWidget(
8,
QtWidgets.QFormLayout.LabelRole,
self.status_label
)
self.status_combo_box = QtWidgets.QComboBox(self)
self.project_info_form_layout.setWidget(
8,
QtWidgets.QFormLayout.FieldRole,
self.status_combo_box
)
self.vertical_layout.addLayout(self.project_info_form_layout)
# ----------------------
# Client Fields
self.client_info_label = QtWidgets.QLabel(self)
self.client_info_label.setText("Client Info")
self.client_info_label.setStyleSheet(
"color: rgb(71, 143, 202);\nfont: 18pt;"
)
self.vertical_layout.addWidget(self.client_info_label)
self.line_2 = QtWidgets.QFrame(self)
self.line_2.setFrameShape(QtWidgets.QFrame.HLine)
self.line_2.setFrameShadow(QtWidgets.QFrame.Sunken)
self.vertical_layout.addWidget(self.line_2)
self.client_info_formLayout = QtWidgets.QFormLayout()
self.client_info_formLayout.setLabelAlignment(
QtCore.Qt.AlignRight |
QtCore.Qt.AlignTrailing |
QtCore.Qt.AlignVCenter
)
# Client Fields
self.client_label = QtWidgets.QLabel(self)
self.client_label.setText("Client")
self.client_info_formLayout.setWidget(
0,
QtWidgets.QFormLayout.LabelRole,
self.client_label
)
self.client_combo_box = QtWidgets.QComboBox(self)
self.client_combo_box.setEditable(True)
self.client_info_formLayout.setWidget(
0,
QtWidgets.QFormLayout.FieldRole,
self.client_combo_box
)
# Agency Fields
self.agency_label = QtWidgets.QLabel(self)
self.agency_label.setText("Agency")
self.client_info_formLayout.setWidget(
1,
QtWidgets.QFormLayout.LabelRole,
self.agency_label
)
self.agency_combo_box = QtWidgets.QComboBox(self)
self.agency_combo_box.setEditable(True)
self.client_info_formLayout.setWidget(
1,
QtWidgets.QFormLayout.FieldRole,
self.agency_combo_box
)
# Production Company Fields
self.production_company_label = QtWidgets.QLabel(self)
self.production_company_label.setText(
"<html><head/><body><p align=\"right\">Production<br/>"
"Company</p></body></html>"
)
self.client_info_formLayout.setWidget(
2,
QtWidgets.QFormLayout.LabelRole,
self.production_company_label
)
self.production_company_combo_box = QtWidgets.QComboBox(self)
self.production_company_combo_box.setEditable(True)
self.client_info_formLayout.setWidget(
2,
QtWidgets.QFormLayout.FieldRole,
self.production_company_combo_box
)
self.vertical_layout.addLayout(self.client_info_formLayout)
self.button_box = QtWidgets.QDialogButtonBox(self)
self.button_box.setOrientation(QtCore.Qt.Horizontal)
self.button_box.setStandardButtons(
QtWidgets.QDialogButtonBox.Cancel | QtWidgets.QDialogButtonBox.Ok)
self.vertical_layout.addWidget(self.button_box)
self.vertical_layout.setStretch(2, 2)
self.vertical_layout.setStretch(5, 1)
def _setup_signals(self):
"""creates the signals
"""
QtCore.QObject.connect(
self.button_box,
QtCore.SIGNAL("accepted()"),
self.accept
)
QtCore.QObject.connect(
self.button_box,
QtCore.SIGNAL("rejected()"),
self.reject
)
# name_line_edit is changed
QtCore.QObject.connect(
self.name_line_edit,
QtCore.SIGNAL('textChanged(QString)'),
self.name_line_edit_changed
)
# code_line_edit is changed
QtCore.QObject.connect(
self.code_line_edit,
QtCore.SIGNAL('textChanged(QString)'),
self.code_line_edit_changed
)
# create_repository_pushButton
QtCore.QObject.connect(
self.create_repository_pushButton,
QtCore.SIGNAL('clicked()'),
self.create_repository_push_button_clicked
)
# update_repository_push_button
QtCore.QObject.connect(
self.update_repository_push_button,
QtCore.SIGNAL('clicked()'),
self.update_repository_push_button_clicked
)
# create_structure_push_button
QtCore.QObject.connect(
self.create_structure_push_button,
QtCore.SIGNAL('clicked()'),
self.create_structure_push_button_clicked
)
# update_structure_push_button
QtCore.QObject.connect(
self.update_structure_push_button,
QtCore.SIGNAL('clicked()'),
self.update_structure_push_button_clicked
)
def _set_defaults(self):
"""setup the default values
"""
# set size policies
# self.name_line_edit
self.type_combo_box.setSizePolicy(
QtWidgets.QSizePolicy.Expanding,
QtWidgets.QSizePolicy.Fixed
)
self.status_combo_box.setSizePolicy(
QtWidgets.QSizePolicy.Expanding,
QtWidgets.QSizePolicy.Fixed
)
self.client_combo_box.setSizePolicy(
QtWidgets.QSizePolicy.Expanding,
QtWidgets.QSizePolicy.Fixed
)
self.agency_combo_box.setSizePolicy(
QtWidgets.QSizePolicy.Expanding,
QtWidgets.QSizePolicy.Fixed
)
self.production_company_combo_box.setSizePolicy(
QtWidgets.QSizePolicy.Expanding,
QtWidgets.QSizePolicy.Fixed
)
# invalidate the name and code fields by default
self.name_line_edit.set_invalid('Enter a name')
self.code_line_edit.set_invalid('Enter a code')
# update type field
from stalker import Type
from stalker.db.session import DBSession
project_types = \
DBSession.query(Type.id, Type.name)\
.filter(Type.target_entity_type == 'Project')\
.order_by(Type.name)\
.all()
self.type_combo_box.clear()
self.type_combo_box.addItem('', -1)
for type_id, type_name in project_types:
self.type_combo_box.addItem(type_name, type_id)
self.image_format.fill_combo_box()
self.fill_repository_combo_box()
self.fill_structure_combo_box()
# fill status field
sql = """select
"SimpleEntities".id,
"SimpleEntities".name
from "Statuses"
join "SimpleEntities" on "Statuses".id = "SimpleEntities".id
join "StatusList_Statuses" on "Statuses".id = "StatusList_Statuses".status_id
join "StatusLists" on "StatusLists".id = "StatusList_Statuses".status_list_id
where "StatusLists".target_entity_type = 'Project'"""
all_project_statuses = \
DBSession.connection().execute(sql).fetchall()
for st_id, st_name in all_project_statuses:
self.status_combo_box.addItem(st_name, st_id)
def show(self):
"""overridden show method
"""
logger.debug('MainDialog.show is started')
self.logged_in_user = self.get_logged_in_user()
if not self.logged_in_user:
self.reject()
return_val = None
else:
return_val = super(MainDialog, self).show()
logger.debug('MainDialog.show is finished')
return return_val
def fill_repository_combo_box(self):
"""fills the repository_combo_box with Repository instances
"""
# fill the repository field
from stalker import Repository
from stalker.db.session import DBSession
all_repos = DBSession \
.query(Repository.id, Repository.name) \
.order_by(Repository.name) \
.all()
self.repository_combo_box.clear()
for repo_id, repo_name in all_repos:
self.repository_combo_box.addItem(repo_name, repo_id)
def fill_structure_combo_box(self):
"""fills the structure_combo_box with Structure instances
"""
# fill the structure field
from stalker import Structure
from stalker.db.session import DBSession
all_structures = DBSession \
.query(Structure.id, Structure.name) \
.order_by(Structure.name) \
.all()
self.structure_combo_box.clear()
for st_id, st_name in all_structures:
self.structure_combo_box.addItem(st_name, st_id)
def name_line_edit_changed(self, text):
"""runs when the name_line_edit text has changed
"""
if re.findall(r'[^a-zA-Z0-9\-_ ]+', text):
self.name_line_edit.set_invalid('Invalid character')
else:
if text == '':
self.name_line_edit.set_invalid('Enter a name')
else:
self.name_line_edit.set_valid()
# update code field also
formatted_text = re.sub(r'[^A-Z0-9_]+', '', text)
self.code_line_edit.setText(formatted_text)
def code_line_edit_changed(self, text):
"""runs when the code_line_edit text has changed
"""
if re.findall(r'[^a-zA-Z0-9_]+', text):
self.code_line_edit.set_invalid('Invalid character')
else:
if text == '':
self.code_line_edit.set_invalid('Enter a code')
else:
if len(text) > self.max_project_name_length:
self.code_line_edit.set_invalid(
'Code is too long (>%s)' %
self.max_project_name_length
)
else:
self.code_line_edit.set_valid()
def fill_ui_with_project(self, project):
"""fills the UI fields with the given project
:param project: A Stalker Project instance
:return:
"""
if not project:
return
self.project = project
self.name_line_edit.setText(project.name)
self.name_line_edit.set_valid()
self.code_line_edit.setText(project.code)
self.code_line_edit.set_valid()
if project.type:
index = self.type_combo_box.findData(project.type.id)
if index:
self.type_combo_box.setCurrentIndex(index)
if project.image_format:
index = self.image_format.combo_box.findData(
project.image_format.id
)
if index:
self.image_format.combo_box.setCurrentIndex(index)
self.fps_spin_box.setValue(project.fps)
if project.repository:
# TODO: allow multiple repositories
index = self.repository_combo_box.findText(
project.repository.name,
QtCore.Qt.MatchExactly
)
if index:
self.repository_combo_box.setCurrentIndex(index)
if project.structure:
index = self.structure_combo_box.findText(
project.structure.name,
QtCore.Qt.MatchExactly
)
if index:
self.structure_combo_box.setCurrentIndex(index)
if project.status:
index = self.status_combo_box.findText(
project.status.name,
QtCore.Qt.MatchExactly
)
if index:
self.status_combo_box.setCurrentIndex(index)
def create_repository_push_button_clicked(self):
"""runs when create_repository_pushButton is clicked
"""
try:
# PySide
accepted = QtWidgets.QDialog.DialogCode.Accepted
except AttributeError:
# PyQt4
accepted = QtWidgets.QDialog.Accepted
from anima.ui import repository_dialog
create_repository_dialog = \
repository_dialog.MainDialog(parent=self)
create_repository_dialog.exec_()
result = create_repository_dialog.result()
if result == accepted:
repository = create_repository_dialog.repository
# select the created repository
self.fill_repository_combo_box()
index = self.repository_combo_box.findData(repository.id)
if index:
self.repository_combo_box.setCurrentIndex(index)
create_repository_dialog.deleteLater()
def update_repository_push_button_clicked(self):
"""runs when update_repository_push_button is clicked
"""
try:
# PySide
accepted = QtWidgets.QDialog.DialogCode.Accepted
except AttributeError:
# PyQt4
accepted = QtWidgets.QDialog.Accepted
repo = self.get_current_repository()
if not repo:
return
from anima.ui import repository_dialog
update_repository_dialog = \
repository_dialog.MainDialog(parent=self, repository=repo)
update_repository_dialog.exec_()
result = update_repository_dialog.result()
if result == accepted:
repository = update_repository_dialog.repository
# select the created repository
self.fill_repository_combo_box()
index = self.repository_combo_box.findData(repository.id)
if index:
self.repository_combo_box.setCurrentIndex(index)
update_repository_dialog.deleteLater()
def create_structure_push_button_clicked(self):
"""runs when create_structure_push_button is clicked
"""
try:
# PySide
accepted = QtWidgets.QDialog.DialogCode.Accepted
except AttributeError:
# PyQt4
accepted = QtWidgets.QDialog.Accepted
from anima.ui import structure_dialog
create_structure_dialog = \
structure_dialog.MainDialog(parent=self)
create_structure_dialog.exec_()
result = create_structure_dialog.result()
if result == accepted:
structure = create_structure_dialog.structure
# select the created repository
self.fill_structure_combo_box()
index = self.structure_combo_box.findData(structure.id)
if index:
self.structure_combo_box.setCurrentIndex(index)
create_structure_dialog.deleteLater()
def update_structure_push_button_clicked(self):
"""runs when update_structure_push_button is clicked
"""
try:
# PySide
accepted = QtWidgets.QDialog.DialogCode.Accepted
except AttributeError:
# PyQt4
accepted = QtWidgets.QDialog.Accepted
structure = self.get_current_structure()
if not structure:
return
from anima.ui import structure_dialog
update_structure_dialog = \
structure_dialog.MainDialog(parent=self, structure=structure)
update_structure_dialog.exec_()
result = update_structure_dialog.result()
if result == | |
#
# This source file is part of appleseed.
# Visit https://appleseedhq.net/ for additional information and resources.
#
# This software is released under the MIT license.
#
# Copyright (c) 2016-2019 <NAME>, The appleseedhq Organization
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
#
# Maya imports.
import maya.OpenMaya as om
import maya.cmds as mc
import maya.mel as mel
import pymel.core as pm
# appleseedMaya imports.
from logger import logger
def createGlobalNodes():
if mc.objExists("appleseedRenderGlobals"):
return
sel = mc.ls(sl=True)
mc.createNode(
"appleseedRenderGlobals",
name="appleseedRenderGlobals",
shared=True,
skipSelect=True)
mc.lockNode("appleseedRenderGlobals")
mc.select(sel, replace=True)
logger.debug("Created appleseed render global node")
def createRenderTabsMelProcedures():
pm.mel.source("createMayaSoftwareCommonGlobalsTab.mel")
mel.eval('''
global proc appleseedUpdateCommonTabProcedure()
{
updateMayaSoftwareCommonGlobalsTab();
python("import appleseedMaya.renderGlobals");
python("appleseedMaya.renderGlobals.postUpdateCommonTab()");
}
'''
)
mel.eval('''
global proc appleseedCreateAppleseedMainTabProcedure()
{
python("import appleseedMaya.renderGlobals");
python("appleseedMaya.renderGlobals.g_appleseedMainTab.create()");
}
'''
)
mel.eval('''
global proc appleseedUpdateAppleseedMainTabProcedure()
{
python("import appleseedMaya.renderGlobals");
python("appleseedMaya.renderGlobals.g_appleseedMainTab.update()");
}
'''
)
mel.eval('''
global proc appleseedCreateAppleseedLightingTabProcedure()
{
python("import appleseedMaya.renderGlobals");
python("appleseedMaya.renderGlobals.g_appleseedLightingTab.create()");
}
'''
)
mel.eval('''
global proc appleseedUpdateAppleseedLightingTabProcedure()
{
python("import appleseedMaya.renderGlobals");
python("appleseedMaya.renderGlobals.g_appleseedLightingTab.update()");
}
'''
)
mel.eval('''
global proc appleseedCreateAppleseedOutputTabProcedure()
{
python("import appleseedMaya.renderGlobals");
python("appleseedMaya.renderGlobals.g_appleseedOutputTab.create()");
}
'''
)
mel.eval('''
global proc appleseedUpdateAppleseedOutputTabProcedure()
{
python("import appleseedMaya.renderGlobals");
python("appleseedMaya.renderGlobals.g_appleseedOutputTab.update()");
}
'''
)
mel.eval('''
global proc appleseedCreateAppleseedSystemTabProcedure()
{
python("import appleseedMaya.renderGlobals");
python("appleseedMaya.renderGlobals.g_appleseedSystemTab.create()");
}
'''
)
mel.eval('''
global proc appleseedUpdateAppleseedSystemTabProcedure()
{
python("import appleseedMaya.renderGlobals");
python("appleseedMaya.renderGlobals.g_appleseedSystemTab.update()");
}
'''
)
def renderSettingsBuiltCallback(renderer):
logger.debug("appleseedRenderSettingsBuilt called!")
pm.renderer(
"appleseed",
edit=True,
addGlobalsTab=(
"Common",
"createMayaSoftwareCommonGlobalsTab",
"appleseedUpdateCommonTabProcedure"
)
)
pm.renderer(
"appleseed",
edit=True,
addGlobalsTab=(
"appleseed",
"appleseedCreateAppleseedMainTabProcedure",
"appleseedUpdateAppleseedMainTabProcedure"
)
)
pm.renderer(
"appleseed",
edit=True,
addGlobalsTab=(
"Lighting",
"appleseedCreateAppleseedLightingTabProcedure",
"appleseedUpdateAppleseedLightingTabProcedure"
)
)
pm.renderer(
"appleseed",
edit=True,
addGlobalsTab=(
"Output",
"appleseedCreateAppleseedOutputTabProcedure",
"appleseedUpdateAppleseedOutputTabProcedure"
)
)
pm.renderer(
"appleseed",
edit=True,
addGlobalsTab=(
"System",
"appleseedCreateAppleseedSystemTabProcedure",
"appleseedUpdateAppleseedSystemTabProcedure"
)
)
g_nodeAddedCallbackID = None
g_nodeRemovedCallbackID = None
g_environmentLightsList = []
APPLESEED_ENVIRONMENT_LIGHTS = [
"appleseedSkyDomeLight",
"appleseedPhysicalSkyLight"
]
g_columnWidth = 400
g_subColumnWidth = g_columnWidth - 20
g_margin = 2
def __nodeAdded(node, data):
depNodeFn = om.MFnDependencyNode(node)
nodeType = depNodeFn.typeName()
if nodeType in APPLESEED_ENVIRONMENT_LIGHTS:
logger.debug("Added or removed appleseed environment light")
global g_environmentLightsList
g_environmentLightsList.append(depNodeFn.name())
g_appleseedMainTab.updateEnvLightControl()
def __nodeRemoved(node, data):
depNodeFn = om.MFnDependencyNode(node)
nodeType = depNodeFn.typeName()
if nodeType in APPLESEED_ENVIRONMENT_LIGHTS:
logger.debug("Removed appleseed environment light")
global g_environmentLightsList
g_environmentLightsList.remove(depNodeFn.name())
g_appleseedMainTab.updateEnvLightControl()
def addRenderGlobalsScriptJobs():
logger.debug("Adding render globals script jobs")
global g_nodeAddedCallbackID
assert g_nodeAddedCallbackID is None
g_nodeAddedCallbackID = om.MDGMessage.addNodeAddedCallback(__nodeAdded)
global g_nodeRemovedCallbackID
assert g_nodeRemovedCallbackID is None
g_nodeRemovedCallbackID = om.MDGMessage.addNodeRemovedCallback(
__nodeRemoved)
# This is evalDeferred so it doesn't get
# called before createMayaSoftwareCommonGlobalsTab
python_script = "import appleseedMaya.renderGlobals; appleseedMaya.renderGlobals.currentRendererChanged()"
mc.scriptJob(
attributeChange=[
"defaultRenderGlobals.currentRenderer",
lambda: mc.evalDeferred(python_script, lowestPriority=True),
]
)
# For fixing the render globals common tab when opening new scene
# and the default renderer is appleseed
mc.scriptJob(
event=[
'NewSceneOpened',
lambda: mc.evalDeferred(python_script, lowestPriority=True),
]
)
# For fixing the render globals common tab on initial startup of maya
# when the default renderer is appleseed
mc.evalDeferred(python_script, lowestPriority=True)
def removeRenderGlobalsScriptJobs():
global g_nodeAddedCallbackID
assert g_nodeAddedCallbackID is not None
om.MMessage.removeCallback(g_nodeAddedCallbackID)
g_nodeAddedCallbackID = None
global g_nodeRemovedCallbackID
assert g_nodeRemovedCallbackID is not None
om.MMessage.removeCallback(g_nodeRemovedCallbackID)
g_nodeRemovedCallbackID = None
logger.debug("Removed render globals script jobs")
def imageFormatChanged():
logger.debug("imageFormatChanged called")
# Since we only support two file formats atm., we can hardcode things.
# 32 is the format code for png, 51 is custom image format.
# We also update the extension attribute (used in the file names preview).
newFormat = mc.getAttr("appleseedRenderGlobals.imageFormat")
if newFormat == 0: # EXR
mc.setAttr("defaultRenderGlobals.imageFormat", 51)
mc.setAttr("defaultRenderGlobals.imfkey", "exr", type="string")
mc.optionMenuGrp("imageMenuMayaSW", edit=True, select=newFormat + 1)
elif newFormat == 1: # PNG
mc.setAttr("defaultRenderGlobals.imageFormat", 32)
mc.setAttr("defaultRenderGlobals.imfkey", "png", type="string")
mc.optionMenuGrp("imageMenuMayaSW", edit=True, select=newFormat + 1)
else:
raise RuntimeError("Unknown render global image file format")
def currentRendererChanged():
newRenderer = mel.eval("currentRenderer()")
logger.debug("currentRendererChanged called, new renderer = %s", newRenderer)
if newRenderer != "appleseed":
return
# Make sure our render globals node exists.
createGlobalNodes()
# If the render globals window does not exist, create it.
if not mc.window("unifiedRenderGlobalsWindow", exists=True):
mel.eval("unifiedRenderGlobalsWindow")
if pm.versions.current() >= 2017000:
mc.workspaceControl("unifiedRenderGlobalsWindow", edit=True, visible=False)
else:
mc.window("unifiedRenderGlobalsWindow", edit=True, visible=False)
# This can happen if currentRendererChanged is called too soon during startup
# and unifiedRenderGlobalsWindow isn't complete or delayed for some reason.
# Known to happen if default renderer is appleseed and the scene is opened as
# a commandline argument. In that case the NewSceneOpened scriptjob will call the
# currentRendererChanged function again later.
if not mc.optionMenuGrp('imageMenuMayaSW', q=True, ex=True):
logger.warn("imageMenuMayaSW does not exists yet")
return
# "Customize" the image formats menu.
mc.setParent("unifiedRenderGlobalsWindow")
mel.eval("setParentToCommonTab;")
mc.setParent("imageFileOutputSW")
mc.setParent("imageMenuMayaSW")
mc.setParent("..")
parent = mc.setParent(q=True)
# Remove the menu callback and the menu items.
mel.eval('optionMenuGrp -e -changeCommand "" imageMenuMayaSW;')
items = mc.optionMenuGrp("imageMenuMayaSW", q=True, itemListLong=True)
for item in items:
mc.deleteUI(item)
# Add the formats we support.
menu = parent + "|imageMenuMayaSW|OptionMenu"
mc.menuItem(parent=menu, label="OpenEXR (.exr)", data=0)
mc.menuItem(parent=menu, label="PNG (.png)", data=1)
# Connect the control to one internal attribute in our globals node
# so that we can add a changed callback to it.
mc.connectControl(
"imageMenuMayaSW", "appleseedRenderGlobals.imageFormat", index=1)
mc.connectControl(
"imageMenuMayaSW", "appleseedRenderGlobals.imageFormat", index=2)
# Add a callback when our internal attribute changes.
# This callback gets the current value from our internal attribute and
# uses it to update the original image format attribute (closing the circle.)
mc.scriptJob(
parent=parent,
replacePrevious=True,
attributeChange=[
"appleseedRenderGlobals.imageFormat",
"import appleseedMaya.renderGlobals; appleseedMaya.renderGlobals.imageFormatChanged()"]
)
# Update the image format controls now.
imageFormatChanged()
def postUpdateCommonTab():
imageFormatChanged()
class AppleseedRenderGlobalsTab(object):
def __init__(self):
self._uis = {}
def _addControl(self, ui, attrName, connectIndex=2):
self._uis[attrName] = ui
attr = pm.Attribute("appleseedRenderGlobals." + attrName)
pm.connectControl(ui, attr, index=connectIndex)
def _addFieldSliderControl(self, attrName, **kwargs):
attr = pm.Attribute("appleseedRenderGlobals." + attrName)
self._uis[attrName] = pm.attrFieldSliderGrp(
attribute=attr,
**kwargs)
def _getAttributeMenuItems(self, attrName):
attr = pm.Attribute("appleseedRenderGlobals." + attrName)
menuItems = [
(i, v) for i, v in enumerate(attr.getEnums().keys())
]
return menuItems
class AppleseedRenderGlobalsMainTab(AppleseedRenderGlobalsTab):
def __adaptiveSamplerChanged(self, value):
self._uis["minPixelSamples"].setEnable(value)
self._uis["batchSampleSize"].setEnable(value)
self._uis["sampleNoiseThreshold"].setEnable(value)
if value:
mc.setAttr("appleseedRenderGlobals.samples", 256)
else:
mc.setAttr("appleseedRenderGlobals.samples", 32)
def __motionBlurChanged(self, value):
self._uis["mbCameraSamples"].setEnable(value)
self._uis["mbTransformSamples"].setEnable(value)
self._uis["mbDeformSamples"].setEnable(value)
self._uis["shutterOpen"].setEnable(value)
self._uis["shutterClose"].setEnable(value)
def __environmentLightSelected(self, envLight):
logger.debug("Environment light selected: %s" % envLight)
connections = mc.listConnections(
"appleseedRenderGlobals.envLight",
plugs=True)
if connections:
mc.disconnectAttr(
connections[0], "appleseedRenderGlobals.envLight")
if envLight != "<none>":
mc.connectAttr(
envLight + ".globalsMessage",
"appleseedRenderGlobals.envLight")
def updateEnvLightControl(self):
if "envLight" in self._uis:
logger.debug("Updating env lights menu")
uiName = self._uis["envLight"]
# Return if the menu does not exist yet.
if not pm.optionMenu(uiName, exists=True):
return
# Remove the callback.
pm.optionMenu(uiName, edit=True, changeCommand="")
# Delete the menu items.
items = pm.optionMenu(uiName, query=True, itemListLong=True)
for item in items:
pm.deleteUI(item)
connections = mc.listConnections("appleseedRenderGlobals.envLight")
# Rebuild the menu.
pm.menuItem(parent=uiName, label="<none>")
for envLight in g_environmentLightsList:
pm.menuItem(parent=uiName, label=envLight)
# Update the currently selected item.
if connections:
node = connections[0]
if mc.nodeType(node) == "transform":
shapes = mc.listRelatives(node, shapes=True)
assert shapes
node = shapes[0]
pm.optionMenu(uiName, edit=True, value=node)
else:
pm.optionMenu(uiName, edit=True, value="<none>")
# Restore the callback.
pm.optionMenu(
uiName, edit=True, changeCommand=self.__environmentLightSelected)
def __lockSamplingPatternChanged(self, value):
self._uis["noiseSeed"].setEnable(value)
def create(self):
# Create default render globals node if needed.
createGlobalNodes()
parentForm = pm.setParent(query=True)
pm.setUITemplate("renderGlobalsTemplate", pushTemplate=True)
pm.setUITemplate("attributeEditorTemplate", pushTemplate=True)
with pm.scrollLayout("appleseedScrollLayout", horizontalScrollBarThickness=0):
with pm.columnLayout("appleseedColumnLayout", adjustableColumn=True, width=g_columnWidth):
with pm.frameLayout("samplingFrameLayout", label="Sampling", collapsable=True, collapse=False):
with pm.columnLayout("samplingColumnLayout", adjustableColumn=True, width=g_subColumnWidth,
rowSpacing=2):
pm.separator(height=2)
self._addFieldSliderControl(
label="Render Passes",
columnWidth=(3, 160),
columnAttach=(1, "right", 4),
minValue=1,
fieldMinValue=1,
maxValue=100,
fieldMaxValue=1000000,
attrName="passes")
pm.separator(height=2)
self._addControl(
ui=pm.checkBoxGrp(
label="Adaptive Sampling",
height=18,
columnAttach=(1, "right", 4),
changeCommand=self.__adaptiveSamplerChanged),
attrName="adaptiveSampling")
pm.separator(height=2)
adaptiveSampling = mc.getAttr("appleseedRenderGlobals.adaptiveSampling")
self._addFieldSliderControl(
label="Min Samples",
columnWidth=(3, 160),
columnAttach=(1, "right", 4),
minValue=0,
fieldMinValue=0,
maxValue=256,
fieldMaxValue=1000000,
enable=adaptiveSampling,
attrName="minPixelSamples")
self._addFieldSliderControl(
label="Max Samples",
columnWidth=(3, 160),
columnAttach=(1, "right", 4),
minValue=1,
fieldMinValue=0,
maxValue=1024,
fieldMaxValue=1000000,
attrName="samples")
self._addFieldSliderControl(
label="Batch Sample Size",
columnWidth=(3, 160),
columnAttach=(1, "right", 4),
minValue=1,
fieldMinValue=1,
maxValue=128,
fieldMaxValue=1000000,
enable=adaptiveSampling,
attrName="batchSampleSize")
self._addFieldSliderControl(
label="Noise Threshold",
step=0.02,
precision=4,
columnWidth=(3, 160),
columnAttach=(1, "right", 4),
minValue=0.0001,
fieldMinValue=0.0,
maxValue=2.0,
fieldMaxValue=25.0,
enable=adaptiveSampling,
attrName="sampleNoiseThreshold")
pm.separator(height=2)
self._addControl(
ui=pm.attrEnumOptionMenuGrp(
label="Pixel Filter",
columnAttach=(1, "right", 4),
enumeratedItem=self._getAttributeMenuItems("pixelFilter")),
attrName="pixelFilter")
self._addFieldSliderControl(
label="Pixel Filter Size",
sliderStep=0.5,
precision=1,
columnWidth=(3, 160),
columnAttach=(1, "right", 4),
minValue=0.5,
fieldMinValue=0.5,
maxValue=4.0,
fieldMaxValue=20.0,
attrName="pixelFilterSize")
self._addFieldSliderControl(
label="Tile Size",
columnWidth=(3, 160),
columnAttach=(1, | |
{k: val for k, val in self.hyperparams.items() if k in
self.hyperparam_keys - self.nn_specific_keys - self.rf_specific_keys}
subcombo['model_type'] = [model_type]
subcombo['featurizer'] = [featurizer]
self.param_combos.extend(self.generate_combos(subcombo))
def generate_combos(self, params_dict):
"""
Calls sub-function generate_combo and then uses itertools.product to generate all desired combinations
Args:
params_dict:
Returns:
"""
new_dict = self.generate_combo(params_dict)
hyperparam_combos = []
hyperparams = new_dict.keys()
hyperparam_vals = new_dict.values()
for ind, hyperparameter_tuple in enumerate(itertools.product(*hyperparam_vals)):
model_params = {}
for hyperparam, hyperparam_val in zip(hyperparams, hyperparameter_tuple):
model_params[hyperparam] = hyperparam_val
hyperparam_combos.append(model_params)
return hyperparam_combos
def assemble_layers(self):
"""
Reformats layer parameters
Returns:
None
"""
tmp_list = []
for i in range(min([len(x) for x in list(self.layers.values())])):
tmp_dict = {}
for key, value in self.layers.items():
tmp_dict[key] = value[i]
x = [len(y) for y in tmp_dict.values()]
try:
assert x.count(x[0]) == len(x)
except:
continue
tmp_list.append(tmp_dict)
self.hyperparams['layers'] = tmp_list
self.hyperparam_keys.add('layers')
def generate_assay_list(self):
"""
Generates the list of datasets to build models for, with their key, bucket, split, and split uuid
Returns:
"""
# Creates the assay list with additional options for use_shortlist
if not self.params.use_shortlist:
if type(self.params.splitter) == str:
splitters = [self.params.splitter]
else:
splitters = self.params.splitter
self.assays = []
for splitter in splitters:
if 'previously_split' in self.params.__dict__.keys() and 'split_uuid' in self.params.__dict__.keys() \
and self.params.previously_split and self.params.split_uuid is not None:
self.assays.append((self.params.dataset_key, self.params.bucket, splitter, self.params.split_uuid))
else:
try:
split_uuid = self.return_split_uuid(self.params.dataset_key, splitter=splitter)
self.assays.append((self.params.dataset_key, self.params.bucket, splitter, split_uuid))
except Exception as e:
print(e)
print(traceback.print_exc())
sys.exit(1)
else:
self.assays = self.get_shortlist_df(split_uuids=True)
self.assays = [(t[0].strip(), t[1].strip(), t[2].strip(), t[3].strip()) for t in self.assays]
def get_dataset_metadata(self, assay_params, retry_time=60):
"""
Gather the required metadata for a dataset
Args:
assay_params: dataset metadata
Returns:
None
"""
if not self.params.datastore:
return
print(assay_params['dataset_key'])
retry = True
i = 0
#TODO: need to catch if dataset doesn't exist versus 500 failure
while retry:
try:
metadata = dsf.get_keyval(dataset_key=assay_params['dataset_key'], bucket=assay_params['bucket'])
retry = False
except Exception as e:
if i < 5:
print("Could not get metadata from datastore for dataset %s because of exception %s, sleeping..."
% (assay_params['dataset_key'], e))
time.sleep(retry_time)
i += 1
else:
print("Could not get metadata from datastore for dataset %s because of exception %s, exiting"
% (assay_params['dataset_key'], e))
return None
if 'id_col' in metadata.keys():
assay_params['id_col'] = metadata['id_col']
if 'response_cols' not in assay_params or assay_params['response_cols'] is None:
if 'param' in metadata.keys():
assay_params['response_cols'] = [metadata['param']]
if 'response_col' in metadata.keys():
assay_params['response_cols'] = [metadata['response_col']]
if 'response_cols' in metadata.keys():
assay_params['response_cols'] = metadata['response_cols']
if 'smiles_col' in metadata.keys():
assay_params['smiles_col'] = metadata['smiles_col']
if 'class_name' in metadata.keys():
assay_params['class_name'] = metadata['class_name']
if 'class_number' in metadata.keys():
assay_params['class_number'] = metadata['class_number']
if 'num_row' in metadata.keys():
self.num_rows[assay_params['dataset_key']] = metadata['num_row']
assay_params['dataset_name'] = assay_params['dataset_key'].split('/')[-1].rstrip('.csv')
assay_params['hyperparam_uuid'] = self.hyperparam_uuid
def split_and_save_dataset(self, assay_params):
"""
Splits a given dataset, saves it, and sets the split_uuid in the metadata
Args:
assay_params: Dataset metadata
Returns:
None
"""
self.get_dataset_metadata(assay_params)
# TODO: check usage with defaults
namespace_params = parse.wrapper(assay_params)
# TODO: Don't want to recreate each time
featurization = feat.create_featurization(namespace_params)
data = model_datasets.create_model_dataset(namespace_params, featurization)
data.get_featurized_data()
data.split_dataset()
data.save_split_dataset()
assay_params['previously_split'] = True
assay_params['split_uuid'] = data.split_uuid
def return_split_uuid(self, dataset_key, bucket=None, splitter=None, split_combo=None, retry_time=60):
"""
Loads a dataset, splits it, saves it, and returns the split_uuid
Args:
dataset_key: key for dataset to split
bucket: datastore-specific user group bucket
splitter: Type of splitter to use to split the dataset
split_combo: tuple of form (split_valid_frac, split_test_frac)
Returns:
"""
if bucket is None:
bucket = self.params.bucket
if splitter is None:
splitter=self.params.splitter
if split_combo is None:
split_valid_frac = self.params.split_valid_frac
split_test_frac = self.params.split_test_frac
else:
split_valid_frac = split_combo[0]
split_test_frac = split_combo[1]
retry = True
i = 0
#TODO: need to catch if dataset doesn't exist versus 500 failure
while retry:
try:
metadata = dsf.get_keyval(dataset_key=dataset_key, bucket=bucket)
retry = False
except Exception as e:
if i < 5:
print("Could not get metadata from datastore for dataset %s because of exception %s, sleeping..." % (dataset_key, e))
time.sleep(retry_time)
i += 1
else:
print("Could not get metadata from datastore for dataset %s because of exception %s, exiting" % (dataset_key, e))
return None
assay_params = {'dataset_key': dataset_key, 'bucket': bucket, 'splitter': splitter,
'split_valid_frac': split_valid_frac, 'split_test_frac': split_test_frac}
#Need a featurizer type to split dataset, but since we only care about getting the split_uuid, does not matter which featurizer you use
if type(self.params.featurizer) == list:
assay_params['featurizer'] = self.params.featurizer[0]
else:
assay_params['featurizer'] = self.params.featurizer
if 'id_col' in metadata.keys():
assay_params['id_col'] = metadata['id_col']
if 'response_cols' not in assay_params or assay_params['response_cols'] is None:
if 'param' in metadata.keys():
assay_params['response_cols'] = [metadata['param']]
if 'response_col' in metadata.keys():
assay_params['response_cols'] = [metadata['response_col']]
if 'response_cols' in metadata.keys():
assay_params['response_cols'] = metadata['response_cols']
if 'smiles_col' in metadata.keys():
assay_params['smiles_col'] = metadata['smiles_col']
if 'class_name' in metadata.keys():
assay_params['class_name'] = metadata['class_name']
if 'class_number' in metadata.keys():
assay_params['class_number'] = metadata['class_number']
assay_params['dataset_name'] = assay_params['dataset_key'].split('/')[-1].rstrip('.csv')
assay_params['datastore'] = True
assay_params['previously_featurized'] = self.params.previously_featurized
try:
assay_params['descriptor_key'] = self.params.descriptor_key
assay_params['descriptor_bucket'] = self.params.descriptor_bucket
except:
print("")
#TODO: check usage with defaults
namespace_params = parse.wrapper(assay_params)
# TODO: Don't want to recreate each time
featurization = feat.create_featurization(namespace_params)
data = model_datasets.create_model_dataset(namespace_params, featurization)
retry = True
i = 0
while retry:
try:
data.get_featurized_data()
data.split_dataset()
data.save_split_dataset()
return data.split_uuid
except Exception as e:
if i < 5:
print("Could not get metadata from datastore for dataset %s because of exception %s, sleeping" % (dataset_key, e))
time.sleep(retry_time)
i += 1
else:
print("Could not save split dataset for dataset %s because of exception %s" % (dataset_key, e))
return None
def generate_split_shortlist(self, retry_time=60):
"""
Processes a shortlist, generates splits for each dataset on the list, and uploads a new shortlist file with the
split_uuids included. Generates splits for the split_combos [[0.1,0.1], [0.1,0.2],[0.2,0.2]], [random, scaffold]
Returns:
None
"""
retry = True
i = 0
while retry:
try:
shortlist_metadata = dsf.retrieve_dataset_by_datasetkey(
bucket=self.params.bucket, dataset_key=self.params.shortlist_key, return_metadata=True)
retry = False
except Exception as e:
if i < 5:
print("Could not retrieve shortlist %s from datastore because of exception %s, sleeping..." %
(self.params.shortlist_key, e))
time.sleep(retry_time)
i += 1
else:
print("Could not retrieve shortlist %s from datastore because of exception %s, exiting" %
(self.params.shortlist_key, e))
return None
datasets = self.get_shortlist_df()
rows = []
for assay, bucket in datasets:
split_uuids = {'dataset_key': assay, 'bucket': bucket}
for splitter in ['random', 'scaffold']:
for split_combo in [[0.1,0.1], [0.1,0.2],[0.2,0.2]]:
split_name = "%s_%d_%d" % (splitter, split_combo[0]*100, split_combo[1]*100)
try:
split_uuids[split_name] = self.return_split_uuid(assay, bucket, splitter, split_combo)
except Exception as e:
print(e)
print("Splitting failed for dataset %s" % assay)
split_uuids[split_name] = None
continue
rows.append(split_uuids)
df = pd.DataFrame(rows)
new_metadata = {}
new_metadata['dataset_key'] = shortlist_metadata['dataset_key'].strip('.csv') + '_with_uuids.csv'
new_metadata['has_uuids'] = True
new_metadata['description'] = '%s, with UUIDs' % shortlist_metadata['description']
retry = True
i = 0
while retry:
try:
dsf.upload_df_to_DS(df,
bucket=self.params.bucket,
filename=new_metadata['dataset_key'],
title=new_metadata['dataset_key'].replace('_', ' '),
description=new_metadata['description'],
tags=[],
key_values={},
dataset_key=new_metadata['dataset_key'])
retry=False
except Exception as e:
if i < 5:
print("Could not save new shortlist because of exception %s, sleeping..." % e)
time.sleep(retry_time)
i += 1
else:
#TODO: Add save to disk.
print("Could not save new shortlist because of exception %s, exiting" % e)
retry = False
def get_shortlist_df(self, split_uuids=False, retry_time=60):
"""
Args:
split_uuids: Boolean value saying if you want just datasets returned or the split_uuids as well
Returns:
The list of dataset_keys, along with their accompanying bucket, split type, and split_uuid if split_uuids is True
"""
if self.params.datastore:
retry = True
i = 0
while retry:
try:
df = dsf.retrieve_dataset_by_datasetkey(self.params.shortlist_key, self.params.bucket)
retry=False
except Exception as e:
if i < 5:
print("Could not retrieve shortlist %s because of exception %s, sleeping..." % (self.params.shortlist_key, e))
time.sleep(retry_time)
i += 1
else:
print("Could not retrieve shortlist %s because of exception %s, exiting" % (self.params.shortlist_key, e))
sys.exit(1)
else:
if not os.path.exists(self.params.shortlist_key):
return None
df = pd.read_csv(self.params.shortlist_key, index_col=False)
if df is None:
sys.exit(1)
if len(df.columns) == 1:
assays = df[df.columns[0]].values.tolist()
else:
if 'task_name' in df.columns:
col_name = 'task_name'
else:
col_name = 'dataset_key'
assays = df[col_name].values.tolist()
if 'bucket' in df.columns:
datasets = list(zip(assays, df.bucket.values.tolist()))
elif 'bucket_name' in df.columns:
datasets = list(zip(assays, df.bucket_name.values.tolist()))
else:
datasets = list(zip(assays, [self.params.bucket]))
datasets = [(d[0].strip(), d[1].strip()) for d in datasets]
if not split_uuids:
return datasets
if type(self.params.splitter) == str:
splitters = [self.params.splitter]
else:
splitters = self.params.splitter
assays = []
for splitter in splitters:
split_name = '%s_%d_%d' % (splitter, self.params.split_valid_frac*100, self.params.split_test_frac*100)
if split_name in df.columns:
for i, row in df.iterrows():
assays.append((datasets[i][0], datasets[i][1], splitter, row[split_name]))
else:
for assay, bucket in datasets:
try:
# do | |
import random
import time
from datetime import date
from colorama import *
import math
def pasirinkimas():
print('|------------------------------------------|')
print(Fore.RED + '\t|- Ką nori mokytis? ' + Fore.LIGHTBLUE_EX + '(pasirink raidę)' + Fore.RED + ' -|')
koks_veiksmas = input(Fore.GREEN + '\tDaugybą (' + Fore.LIGHTBLUE_EX +
'D' + Fore.GREEN +'), \n \tDalybą (' + Fore.LIGHTBLUE_EX +
'B' + Fore.GREEN + '), \n \tSudėtį (' + Fore.LIGHTBLUE_EX +
'S' + Fore.GREEN + '), \n \tAtimtį (' + Fore.LIGHTBLUE_EX +
'A' + Fore.GREEN + '), \n \tkartotis daugybos lentelę (' + Fore.LIGHTBLUE_EX +
'L' + Fore.GREEN + '), \n \tar peržiūrėti statistiką (' + Fore.LIGHTBLUE_EX +
'T' + Fore.GREEN + ')?- ').lower()
if koks_veiksmas == 'd':
return daugyba()
if koks_veiksmas == 'b':
return dalyba()
elif koks_veiksmas == 's':
return sudetis()
elif koks_veiksmas == 'a':
return atimtis()
elif koks_veiksmas == 'l':
return lentele()
elif koks_veiksmas == 't':
return statistika()
else:
print('\tAtsakymas turi būti :' + Fore.RED + '"D", "S"' + Fore.GREEN +
' arba ' + Fore.RED + '"A"' + Fore.GREEN + ' raidės')
pasirinkimas()
def daugyba():
try:
print(Fore.RED + '\t|- Daugybos testas -|')
bandymai = int(input(Fore.YELLOW + '\tKelis kartus nori spręsti?' + Fore.RED +
' (10/25/50)' + Fore.YELLOW + '- '))
if bandymai in [2, 10, 25, 50]:
pagalba = 1
spejimai = 0
atsakyti = []
testo_pradzia = time.time()
kartojimas_teigiami = ['t', 'taip', 'teip', 'y', 'yes', 'ok']
while True:
x = random.randint(2, 9)
y = random.randint(2, 9)
teisingas = x * y
dabar = time.time()
uzgaista = math.ceil(dabar - testo_pradzia)
laiko_vertimas = int(uzgaista)
minutes = laiko_vertimas // 60
sekundes = laiko_vertimas % 60
print(Style.RESET_ALL)
print('|------------------------------------------|')
print('\tKiek bus ' + str(x) + ' x ' + str(y) + '?')
if pagalba == 1:
print(Fore.BLUE + '\tPaspaudus NULĮ (0) galėsi vieną karta pasinaudoti pagalba')
else:
pass
atsakimas = int(input(Fore.BLUE + '\tĮrašyk atsakymą ir paspausk ENTER:- '))
print('|------------------------------------------|')
if atsakimas == 0 and pagalba == 1:
ar_pagalba = input(Fore.GREEN + '\tAr tikrai nori pasinaudoti pagalba? (T/N)- ')
if ar_pagalba in kartojimas_teigiami:
pagalba -= 1
print(f'\tTeisingas atsakymas yra: ' + Fore.RED + f'{teisingas}')
else:
continue
elif atsakimas == 0 and pagalba == 0:
print('\tPagalba išnaudota')
elif atsakimas == teisingas:
bandymai -= 1
spejimai += 1
atsakyti.append([f'{x} x {y} = {teisingas}'])
print(Fore.GREEN + '\tPuiku! Atsakei i ' + Fore.RED + f'{spejimai}' + Fore.GREEN + ' klausymų(-us)')
print('\tLiko ' + Fore.RED + f'{bandymai}' + Fore.GREEN + ' klausimų(-ai)')
print(Style.RESET_ALL)
if bandymai == 0:
print(Fore.MAGENTA + 'Atsakei teisingai ' + Fore.GREEN +
f'{spejimai}' + Fore.MAGENTA + ' kartųus iš eilės.')
print(Fore.YELLOW + 'Teisingi atsakymai buvo šie:')
print(*atsakyti, sep="\n")
print('Testas išspręstas per:' + Fore.RED + f' {minutes} min. {sekundes} sek.')
vardas = input('Koks tavo vardas?- ')
f = open('statistika.txt', 'a+')
f.write(f'\n[{date.today()}] Vardas: {vardas}. Veiksmas: Daugyba. Atsakymai: {spejimai}. Laikas: {minutes} min. {sekundes} sek.')
f.close()
kartojimas = input(Fore.GREEN + '\nNori bandyti dar kartą? (T/N)- ').lower()
if kartojimas in kartojimas_teigiami:
daugyba()
else:
pasirinkimas()
else:
pass
else:
print('\nNETEISINGAI! ' + Fore.RED + 'Teisingas atsakymas: ' + Fore.GREEN + '{}'.format(teisingas))
print(Fore.RED + 'Atsakei teisingai ' + Fore.GREEN +
f'{spejimai}' + Fore.RED + ' kartą(-us) iš eilės.')
if spejimai > 0:
print(Fore.YELLOW + 'Teisingi atsakymai buvo šie:')
print(*atsakyti, sep="\n")
print('\nTestas neišspręstas. Užtrukai:' + Fore.RED + f' {minutes} minutes. {sekundes} sek.')
print(Style.RESET_ALL)
else:
pass
kartojimas = input(Fore.GREEN + 'Nori bandyti dar kartą? (T/N)- ').lower()
if kartojimas in kartojimas_teigiami:
daugyba()
else:
pasirinkimas()
else:
print(
'\tGalima rinktis iš' + Fore.RED + ' 25' + Fore.YELLOW +
' arba' + Fore.RED + ' 50' + Fore.YELLOW + ' spėjimų')
daugyba()
except ValueError:
print('Atsakymas negali būti raidė!')
daugyba()
def sudetis():
try:
print(Fore.RED + '\t|- Sudėties testas -|')
bandymai = int(input(Fore.YELLOW + '\tKelis kartus nori spręsti?' + Fore.RED + ' (10/25/50)' + Fore.YELLOW + '- '))
if bandymai in [2, 10, 25, 50]:
pagalba = 1
spejimai = 0
atsakyti = []
testo_pradzia = time.time()
kartojimas_teigiami = ['t', 'taip', 'teip', 'y', 'yes', 'ok']
while True:
x = random.randint(11, 99)
y = random.randint(11, 99)
teisingas = x + y
dabar = time.time()
uzgaista = math.ceil(dabar - testo_pradzia)
laiko_vertimas = int(uzgaista)
minutes = laiko_vertimas // 60
sekundes = laiko_vertimas % 60
print(Style.RESET_ALL)
print('|------------------------------------------|')
print('\tKiek bus ' + str(x) + ' + ' + str(y) + '?')
if pagalba == 1:
print(Fore.BLUE + '\tPaspaudus NULĮ (0) galėsi vieną karta pasinaudoti pagalba')
else:
pass
atsakimas = int(input(Fore.BLUE + '\tĮrašyk atsakymą ir paspausk ENTER:- '))
print('|------------------------------------------|')
if atsakimas == 0 and pagalba == 1:
ar_pagalba = input(Fore.GREEN + '\tAr tikrai nori pasinaudoti pagalba? (T/N)- ')
if ar_pagalba in kartojimas_teigiami:
pagalba -= 1
print(f'\tTeisingas atsakymas yra: ' + Fore.RED + f'{teisingas}')
else:
continue
elif atsakimas == 0 and pagalba == 0:
print('\tPagalba išnaudota')
elif atsakimas == teisingas:
bandymai -= 1
spejimai += 1
atsakyti.append([f'{x} + {y} = {teisingas}'])
print(Fore.GREEN + '\tPuiku! Atsakei i ' + Fore.RED + f'{spejimai}' + Fore.GREEN + ' klausymų(-us)')
print('\tLiko ' + Fore.RED + f'{bandymai}' + Fore.GREEN + ' klausimų(-ai)')
print(Style.RESET_ALL)
if bandymai == 0:
print(Fore.MAGENTA + 'Atsakei teisingai ' + Fore.GREEN +
f'{spejimai}' + Fore.MAGENTA + ' kartus iš eilės.')
print(Fore.YELLOW + 'Teisingi atsakymai buvo šie:')
print(*atsakyti, sep="\n")
print('Testas išspręstas per:' + Fore.RED + f' {minutes} min. {sekundes} sek.')
vardas = input('Koks tavo vardas?- ')
f = open('statistika.txt', 'a+')
f.write(f'\n[{date.today()}] Vardas: {vardas}. Veiksmas: Sudėtis. Atsakymai: {spejimai}. Laikas: {minutes} min. {sekundes} sek.')
f.close()
kartojimas = input(Fore.GREEN + '\nNori bandyti dar kartą? (T/N)- ').lower()
if kartojimas in kartojimas_teigiami:
sudetis()
else:
pasirinkimas()
else:
pass
else:
print('\nNETEISINGAI! ' + Fore.RED + 'Teisingas atsakymas: ' + Fore.GREEN + '{}'.format(teisingas))
print(Fore.RED + 'Atsakei teisingai ' + Fore.GREEN +
f'{spejimai}' + Fore.RED + ' kartą(-us) iš eilės.')
if spejimai > 0:
print(Fore.YELLOW + 'Teisingi atsakymai buvo šie:')
print(*atsakyti, sep="\n")
print('\nTestas neišspręstas. Užtrukai:' + Fore.RED + f' {minutes} minutes. {sekundes} sek.')
print(Style.RESET_ALL)
else:
pass
kartojimas = input(Fore.GREEN + 'Nori bandyti dar kartą? (T/N)- ').lower()
if kartojimas in kartojimas_teigiami:
sudetis()
else:
pasirinkimas()
else:
print(
'\tGalima rinktis iš' + Fore.RED + ' 25' + Fore.YELLOW +
' arba' + Fore.RED + ' 50' + Fore.YELLOW + ' spėjimų')
sudetis()
except ValueError:
print('Atsakymas negali būti raidė!')
sudetis()
def atimtis():
try:
print(Fore.RED + '\t|- Atimties testas -|')
bandymai = int(input(Fore.YELLOW + '\tKelis kartus nori spręsti?' + Fore.RED +
' (10/25/50)' + Fore.YELLOW + '- '))
if bandymai in [2, 10, 25, 50]:
pagalba = 1
spejimai = 0
atsakyti = []
testo_pradzia = time.time()
kartojimas_teigiami = ['t', 'taip', 'teip', 'y', 'yes', 'ok']
while True:
x = random.randint(89, 199)
y = random.randint(11, 88)
teisingas = x - y
dabar = time.time()
uzgaista = math.ceil(dabar - testo_pradzia)
laiko_vertimas = int(uzgaista)
minutes = laiko_vertimas // 60
sekundes = laiko_vertimas % 60
print(Style.RESET_ALL)
print('|------------------------------------------|')
print('\tKiek bus ' + str(x) + ' - ' + str(y) + '?')
if pagalba == 1:
print(Fore.BLUE + '\tPaspaudus NULĮ (0) galėsi vieną karta pasinaudoti pagalba')
else:
pass
atsakimas = int(input(Fore.BLUE + '\tĮrašyk atsakymą ir paspausk ENTER:- '))
print('|------------------------------------------|')
if atsakimas == 0 and pagalba == 1:
ar_pagalba = input(Fore.GREEN + '\tAr tikrai nori pasinaudoti pagalba? (T/N)- ')
if ar_pagalba in kartojimas_teigiami:
pagalba -= 1
print(f'\tTeisingas atsakymas yra: ' + Fore.RED + f'{teisingas}')
else:
continue
elif atsakimas == 0 and pagalba == 0:
print('\tPagalba išnaudota')
elif atsakimas == teisingas:
bandymai -= 1
spejimai += 1
atsakyti.append([f'{x} - {y} = {teisingas}'])
print(Fore.GREEN + '\tPuiku! Atsakei i ' + Fore.RED + f'{spejimai}' + Fore.GREEN + ' klausymų(-us)')
print('\tLiko ' + Fore.RED + f'{bandymai}' + Fore.GREEN + ' klausimų(-ai)')
print(Style.RESET_ALL)
if bandymai == 0:
print(Fore.MAGENTA + 'Atsakei teisingai ' + Fore.GREEN +
f'{spejimai}' + Fore.MAGENTA + ' kartus iš eilės.')
print(Fore.YELLOW + 'Teisingi atsakymai buvo šie:')
print(*atsakyti, sep="\n")
print('Testas išspręstas per:' + Fore.RED + f' {minutes} min. {sekundes} sek.')
vardas = input('Koks tavo vardas?- ')
f = open('statistika.txt', 'a+')
f.write(f'\n[{date.today()}] Vardas: {vardas}. Veiksmas: Atimtis. Atsakymai: {spejimai}. Laikas: {minutes} min. {sekundes} sek.')
f.close()
kartojimas = input(Fore.GREEN + '\nNori bandyti dar kartą? (T/N)- ').lower()
if kartojimas in kartojimas_teigiami:
atimtis()
else:
pasirinkimas()
else:
pass
else:
print('\nNETEISINGAI! ' + Fore.RED + | |
2, 3],
'b': [4, 5, 6]
})
transformed_data1 = pd.DataFrame({
'a.out1': ['1', '2', '3'],
'b': [4, 5, 6]
})
transformer1 = Mock()
transformer2 = Mock()
transformer1.get_output_types.return_value = {
'a.out1': 'categorical'
}
transformer1.get_next_transformers.return_value = None
transformer1.transform.return_value = transformed_data1
get_transformer_instance_mock.side_effect = [transformer1]
ht = HyperTransformer()
ht._get_next_transformer = Mock()
ht._get_next_transformer.side_effect = [transformer2]
ht._multi_column_fields = Mock()
ht._multi_column_fields.get.return_value = ('a.out1', 'b.out1')
# Run
out = ht._fit_field_transformer(data, 'a', transformer1)
# Assert
expected = pd.DataFrame({
'a.out1': ['1', '2', '3'],
'b': [4, 5, 6]
})
assert ht._output_columns == []
pd.testing.assert_frame_equal(out, expected)
transformer1.fit.assert_called_once()
transformer1.transform.assert_called_once_with(data)
transformer2.fit.assert_not_called()
assert ht._transformers_sequence == [transformer1]
@patch('rdt.hyper_transformer.get_transformer_instance')
def test__fit_field_transformer_multi_column_field_ready(self, get_transformer_instance_mock):
"""Test the ``_fit_field_transformer`` method.
This tests that the ``_fit_field_transformer`` behaves as expected.
If the column is part of a multi-column field, and the other columns
are present in the data, then it should fit the next transformer.
It should also transform the data.
Setup:
- A mock for ``get_transformer_instance``.
- A mock for the transformer returned by ``get_transformer_instance``.
The ``get_output_types`` method will return one output that is part of
a multi-column field.
- A mock for ``_multi_column_fields`` to return the multi-column field.
Input:
- A DataFrame with the other columns in the multi-column field.
- A column name to fit the transformer to.
- A transformer.
Output:
- A DataFrame with columns that result from transforming the
outputs of the original transformer.
- ``_output_columns`` should add the column name of the output of
the transformer used on the multi-column field.
"""
# Setup
data = pd.DataFrame({
'a': [1, 2, 3],
'b.out1': ['4', '5', '6']
})
transformed_data1 = pd.DataFrame({
'a.out1': ['1', '2', '3'],
'b.out1': ['4', '5', '6']
})
transformer1 = Mock()
transformer2 = Mock()
transformer1.get_output_types.return_value = {
'a.out1': 'categorical'
}
transformer1.get_next_transformers.return_value = None
transformer1.transform.return_value = transformed_data1
transformer2.get_output_types.return_value = {
'a.out1#b.out1': 'numerical'
}
get_transformer_instance_mock.side_effect = [
transformer1,
transformer2
]
ht = HyperTransformer()
ht._get_next_transformer = Mock()
ht._get_next_transformer.side_effect = [
transformer2,
None
]
ht._multi_column_fields = Mock()
ht._multi_column_fields.get.return_value = ('a.out1', 'b.out1')
# Run
out = ht._fit_field_transformer(data, 'a', transformer1)
# Assert
expected = pd.DataFrame({
'a.out1': ['1', '2', '3'],
'b.out1': ['4', '5', '6']
})
assert ht._output_columns == ['a.out1#b.out1']
pd.testing.assert_frame_equal(out, expected)
transformer1.fit.assert_called_once()
transformer1.transform.assert_called_once_with(data)
transformer2.fit.assert_called_once()
transformer2.transform.assert_called_once()
assert ht._transformers_sequence == [transformer1, transformer2]
@patch('rdt.hyper_transformer.warnings')
def test__validate_all_fields_fitted(self, warnings_mock):
"""Test the ``_validate_all_fields_fitted`` method.
Tests that the ``_validate_all_fields_fitted`` method raises a warning
if there are fields in ``field_transformers`` that were not fitted.
Setup:
- A mock for warnings.
- A mock for ``_field_transformers`` with a misspelled field.
- A mock for ``_fitted_fields`` containing the other fields.
Expected behavior:
- Warnings should be raised.
"""
# Setup
int_transformer = Mock()
float_transformer = Mock()
field_transformers = {
'integer': int_transformer,
'float': float_transformer,
'intege': int_transformer
}
ht = HyperTransformer(field_transformers=field_transformers)
ht._fitted_fields = {'integer', 'float'}
# Run
ht._validate_all_fields_fitted()
# Assert
warnings_mock.warn.assert_called_once()
def get_data(self):
return pd.DataFrame({
'integer': [1, 2, 1, 3],
'float': [0.1, 0.2, 0.1, 0.1],
'categorical': ['a', 'a', 'b', 'a'],
'bool': [False, False, True, False],
'datetime': pd.to_datetime(['2010-02-01', '2010-01-01', '2010-02-01', '2010-01-01'])
})
def get_transformed_data(self, drop=False):
data = pd.DataFrame({
'integer': [1, 2, 1, 3],
'float': [0.1, 0.2, 0.1, 0.1],
'categorical': ['a', 'a', 'b', 'a'],
'bool': [False, False, True, False],
'datetime': pd.to_datetime(['2010-02-01', '2010-01-01', '2010-02-01', '2010-01-01']),
'integer.out': ['1', '2', '1', '3'],
'integer.out.value': [1, 2, 1, 3],
'float.value': [0.1, 0.2, 0.1, 0.1],
'categorical.value': [0.375, 0.375, 0.875, 0.375],
'bool.value': [0.0, 0.0, 1.0, 0.0],
'datetime.value': [
1.2649824e+18,
1.262304e+18,
1.2649824e+18,
1.262304e+18
]
})
if drop:
return data.drop([
'integer',
'float',
'categorical',
'bool',
'datetime',
'integer.out'
], axis=1)
return data
@patch('rdt.hyper_transformer.get_default_transformer')
def test_fit(self, get_default_transformer_mock):
"""Test the ``fit`` method.
Tests that the ``fit`` method loops through the fields in ``field_transformers``
and ``field_data_types`` that are in the data. It should try to find a transformer
in ``default_data_type_transformers`` and then use the default if it doesn't find one
when looping through ``field_data_types``. It should then call ``_fit_field_transformer``
with the correct arguments.
Setup:
- A mock for ``_fit_field_transformer``.
- A mock for ``_field_in_set``.
- A mock for ``get_default_tranformer``.
Input:
- A DataFrame with multiple columns of different types.
Expected behavior:
- The ``_fit_field_transformer`` mock should be called with the correct
arguments in the correct order.
"""
# Setup
int_transformer = Mock()
int_out_transformer = Mock()
float_transformer = Mock()
categorical_transformer = Mock()
bool_transformer = Mock()
datetime_transformer = Mock()
data = self.get_data()
field_transformers = {
'integer': int_transformer,
'float': float_transformer,
'integer.out': int_out_transformer
}
default_data_type_transformers = {
'boolean': bool_transformer,
'categorical': categorical_transformer
}
get_default_transformer_mock.return_value = datetime_transformer
ht = HyperTransformer(
field_transformers=field_transformers,
default_data_type_transformers=default_data_type_transformers
)
ht._fit_field_transformer = Mock()
ht._fit_field_transformer.return_value = data
ht._field_in_set = Mock()
ht._field_in_set.side_effect = [True, True, False, False, False]
ht._validate_all_fields_fitted = Mock()
# Run
ht.fit(data)
# Assert
ht._fit_field_transformer.assert_has_calls([
call(data, 'integer', int_transformer),
call(data, 'float', float_transformer),
call(data, 'categorical', categorical_transformer),
call(data, 'bool', bool_transformer),
call(data, 'datetime', datetime_transformer)
])
ht._validate_all_fields_fitted.assert_called_once()
def test_transform(self):
"""Test the ``transform`` method.
Tests that ``transform`` loops through the ``_transformers_sequence``
and calls ``transformer.transform`` in the correct order.
Setup:
- The ``_transformers_sequence`` will be hardcoded with a list
of transformer mocks.
- The ``_input_columns`` will be hardcoded.
- The ``_output_columns`` will be hardcoded.
Input:
- A DataFrame of multiple types.
Output:
- The transformed DataFrame with the correct columns dropped.
"""
# Setup
int_transformer = Mock()
int_out_transformer = Mock()
float_transformer = Mock()
categorical_transformer = Mock()
bool_transformer = Mock()
datetime_transformer = Mock()
data = self.get_data()
transformed_data = self.get_transformed_data()
datetime_transformer.transform.return_value = transformed_data
ht = HyperTransformer()
ht._fitted = True
ht._transformers_sequence = [
int_transformer,
int_out_transformer,
float_transformer,
categorical_transformer,
bool_transformer,
datetime_transformer
]
ht._input_columns = list(data.columns)
expected = self.get_transformed_data(True)
ht._output_columns = list(expected.columns)
# Run
transformed = ht.transform(data)
# Assert
pd.testing.assert_frame_equal(transformed, expected)
int_transformer.transform.assert_called_once()
int_out_transformer.transform.assert_called_once()
float_transformer.transform.assert_called_once()
categorical_transformer.transform.assert_called_once()
bool_transformer.transform.assert_called_once()
datetime_transformer.transform.assert_called_once()
def test_transform_raises_error_if_not_fitted(self):
"""Test that ``transform`` raises an error.
The ``transform`` method should raise a ``NotFittedError`` if the
``_transformers_sequence`` is empty.
Setup:
- The ``_fitted`` attribute will be False.
Input:
- A DataFrame of multiple types.
Expected behavior:
- A ``NotFittedError`` is raised.
"""
# Setup
data = self.get_data()
ht = HyperTransformer()
# Run
with pytest.raises(NotFittedError):
ht.transform(data)
def test_fit_transform(self):
"""Test call fit_transform"""
# Run
transformer = Mock()
HyperTransformer.fit_transform(transformer, pd.DataFrame())
# Asserts
expect_call_count_fit = 1
expect_call_count_transform = 1
expect_call_args_fit = pd.DataFrame()
expect_call_args_transform = pd.DataFrame()
assert transformer.fit.call_count == expect_call_count_fit
pd.testing.assert_frame_equal(
transformer.fit.call_args[0][0],
expect_call_args_fit
)
assert transformer.transform.call_count == expect_call_count_transform
pd.testing.assert_frame_equal(
transformer.transform.call_args[0][0],
expect_call_args_transform
)
def test_reverse_transform(self):
"""Test the ``reverse_transform`` method.
Tests that ``reverse_transform`` loops through the ``_transformers_sequence``
in reverse order and calls ``transformer.reverse_transform``.
Setup:
- The ``_transformers_sequence`` will be hardcoded with a list
of transformer mocks.
- The ``_output_columns`` will be hardcoded.
- The ``_input_columns`` will be hardcoded.
Input:
- A DataFrame of multiple types.
Output:
- The reverse transformed DataFrame with the correct columns dropped.
"""
# Setup
int_transformer = Mock()
int_out_transformer = Mock()
float_transformer = Mock()
categorical_transformer = Mock()
bool_transformer = Mock()
datetime_transformer = Mock()
data = self.get_transformed_data(True)
reverse_transformed_data = self.get_transformed_data()
int_transformer.reverse_transform.return_value = reverse_transformed_data
ht = HyperTransformer()
ht._fitted = True
ht._transformers_sequence = [
int_transformer,
int_out_transformer,
float_transformer,
categorical_transformer,
bool_transformer,
datetime_transformer
]
ht._output_columns = list(data.columns)
expected = self.get_data()
ht._input_columns = list(expected.columns)
# Run
reverse_transformed = ht.reverse_transform(data)
# Assert
pd.testing.assert_frame_equal(reverse_transformed, expected)
int_transformer.reverse_transform.assert_called_once()
int_out_transformer.reverse_transform.assert_called_once()
float_transformer.reverse_transform.assert_called_once()
categorical_transformer.reverse_transform.assert_called_once()
bool_transformer.reverse_transform.assert_called_once()
datetime_transformer.reverse_transform.assert_called_once()
def test_reverse_transform_raises_error_if_not_fitted(self):
"""Test that ``reverse_transform`` raises an error.
The ``reverse_transform`` method should raise a ``NotFittedError`` if the
``_transformers_sequence`` is empty.
Setup:
- The ``_fitted`` attribute will be False.
Input:
- A DataFrame of multiple types.
Expected behavior:
- A ``NotFittedError`` is raised.
"""
# Setup
data = self.get_transformed_data()
ht = HyperTransformer()
# Run
with pytest.raises(NotFittedError):
ht.reverse_transform(data)
def test_get_field_data_types(self):
"""Test the ``get_field_data_types`` method.
This method should return the ``field_data_types`` attribute.
Output:
- Dict mapping fields to data types.
"""
# Setup
field_data_types = {
'a': 'categorical',
'b': 'integer'
}
ht = HyperTransformer(field_data_types=field_data_types)
# Run
out = ht.get_field_data_types()
# Assert
assert out == {'a': 'categorical', 'b': 'integer'}
def test_update_field_data_types(self):
"""Test the ``update_field_data_types`` method.
This method should update the ``field_data_types`` attribute.
Setup:
- Initialize ``HyperTransformer`` with ``field_data_types`` having
one entry.
Input:
- Dict mapping fields to data types.
"""
# Setup
field_data_types = {
'a': 'categorical',
'b': 'integer'
}
ht = HyperTransformer(field_data_types={'a': 'float'})
ht._transformers_sequence = [CategoricalTransformer()]
ht._unfit = Mock()
# Run
ht.update_field_data_types(field_data_types)
# Assert
assert ht.field_data_types == {'a': 'categorical', 'b': 'integer'}
ht._unfit.assert_called_once()
def test_get_default_data_type_transformers(self):
"""Test the ``get_default_data_type_transformers`` method.
This method should return the ``default_data_type_transformers`` | |
ts_sorted.index[0], ts_sorted.index[0], 0, ts_sorted.iloc[0]
params = {'t0': t0, 't1': t1, 'integral': 0, 'threshold': thresh}
while i < len(ts_sorted) and integral <= capacity and (ts_sorted.iloc[0] - ts_sorted.iloc[i]) < rate_limit:
params = {'t0': pd.Timestamp(t0), 't1': pd.Timestamp(t1), 'threshold': thresh, 'integral': integral}
i += 1
times = ts_sorted.index[:i]
# print(times)
t0 = times.min()
t1 = times.max()
# print(ts_sorted.index[:3])
thresh = min(ts_sorted.iloc[:i])
integral = clipped_area(ts, thresh=thresh)
if integral <= capacity:
return {'t0': pd.Timestamp(t0), 't1': pd.Timestamp(t1), 'threshold': thresh, 'integral': integral}
return params
def square_off(series, time_delta=None, transition_seconds=1):
"""Insert samples in regularly sampled data to produce stairsteps from ramps when plotted.
New samples are 1 second (1e9 ns) before each existing samples, to facilitate plotting and sorting
>>> square_off(pd.Series(range(3), index=pd.date_range('2014-01-01', periods=3, freq='15m')),
... time_delta=5.5) # doctest: +NORMALIZE_WHITESPACE
2014-01-31 00:00:00 0
2014-01-31 00:00:05.500000 0
2015-04-30 00:00:00 1
2015-04-30 00:00:05.500000 1
2016-07-31 00:00:00 2
2016-07-31 00:00:05.500000 2
dtype: int64
>>> square_off(pd.Series(range(2), index=pd.date_range('2014-01-01', periods=2, freq='15min')),
... transition_seconds=2.5) # doctest: +NORMALIZE_WHITESPACE
2014-01-01 00:00:00 0
2014-01-01 00:14:57.500000 0
2014-01-01 00:15:00 1
2014-01-01 00:29:57.500000 1
dtype: int64
"""
if time_delta:
# int, float means delta is in seconds (not years!)
if isinstance(time_delta, (int, float)):
time_delta = datetime.timedelta(0, time_delta)
new_times = series.index + time_delta
else:
diff = np.diff(series.index)
time_delta = np.append(diff, [diff[-1]])
new_times = series.index + time_delta
new_times = pd.DatetimeIndex(new_times) - datetime.timedelta(0, transition_seconds)
return pd.concat([series, pd.Series(series.values, index=new_times)]).sort_index()
def clipping_threshold(ts, capacity=100, rate_limit=10):
"""Start and end index (datetime) that clips the price/value of a time series the most
Assumes that the integrated maximum includes the peak (instantaneous maximum).
Arguments:
ts (TimeSeries): Time series of prices or power readings to be "clipped" as much as possible.
capacity (float): Total "funds" or "energy" available for clipping (in $ or Joules)
The maximum allowed integrated area under time series and above the clipping threshold.
rate_limit: Maximum rate at which funds or energy can be expended (in $/s or Watts)
The clipping threshold is limitted to no less than the peak power (price rate) minus this rate_limit
Returns:
dict: Timestamp of the start and end of the period of the maximum clipped integrated increase
>>> t = ['2014-12-09T00:00', '2014-12-09T00:15', '2014-12-09T00:30', '2014-12-09T00:45',
... '2014-12-09T01:00', '2014-12-09T01:15', '2014-12-09T01:30', '2014-12-09T01:45']
>>> import pandas as pd
>>> ts = pd.Series([217, 234, 235, 231, 219, 219, 231, 232], index=pd.to_datetime(t))
>>> clipping_threshold(ts, capacity=60000) # doctest: +NORMALIZE_WHITESPACE, +ELLIPSIS
218.13...
>>> clipping_threshold(ts, capacity=30000) # doctest: +NORMALIZE_WHITESPACE, +ELLIPSIS
224.15...
"""
params = clipping_params(ts, capacity=capacity, rate_limit=rate_limit)
if params:
return params['threshold']
return None
def join_time_series(serieses, ignore_year=False, T_s=None, aggregator='mean'):
"""Combine a dict of pd.Series objects into a single pd.DataFrame with optional downsampling
FIXME:
For ignore_year and multi-year data, the index (in seconds) is computed assuming
366 days per year (leap year). So 3 out of 4 years will have a 1-day (86400 s) gap
Arguments:
series (dict of Series): dictionary of named timestamp-indexed Series objects
ignore_year (bool): ignore the calendar year, but not the season (day of year)
If True, the DataFrame index will be seconds since the beginning of the
year in each Series index, i.e. midnight Jan 1, 2014 will have index=0
as will Jan 1, 2010 if two Series start on those two dates.
T_s (float): sample period in seconds (for downsampling)
aggregator (str or func): e.g. 'mean', 'sum', np.std
"""
if ignore_year:
df = pd.DataFrame()
for name, ts in serieses.iteritems():
# FIXME: deal with leap years
sod = np.array(map(lambda x: (x.hour * 3600 + x.minute * 60 + x.second),
ts.index.time))
# Coerce soy to an integer so that merge/join operations identify same values
# (floats don't equal!?)
soy = (ts.index.dayofyear + 366 * (ts.index.year - ts.index.year[0])) * 3600 * 24 + sod
ts2 = pd.Series(ts.values, index=soy)
ts2 = ts2.dropna()
ts2 = ts2.sort_index()
df2 = pd.DataFrame({name: ts2.values}, index=soy)
df = df.join(df2, how='outer')
if T_s and aggregator:
df = df.groupby(lambda x: int(x /
float(T_s))).aggregate(dict((name, aggregator) for name in df.columns))
else:
df = pd.DataFrame(serieses)
if T_s and aggregator:
x0 = df.index[0]
df = df.groupby(lambda x: int((x - x0).total_seconds() /
float(T_s))).aggregate(dict((name, aggregator) for name in df.columns))
# FIXME: convert seconds since begninning of first year back into Timestamp instances
return df
def simulate(t=1000, poly=(0.,), sinusoids=None, sigma=0, rw=0, irw=0, rrw=0):
"""Simulate a random signal with seasonal (sinusoids), linear and quadratic trend, RW, IRW, and RRW
Arguments:
t (int or list of float): number of samples or time vector, default = 1000
poly (list of float): polynomial coefficients (in decreasing "order") passed to `numpy.polyval`
i.e. poly[0]*x**(N-1) + ... + poly[N-1]
sinusoids (list of list): [[period], [amplitude, period], or [ampl., period, phase]]
>>> len(simulate(poly=(0,),rrw=1))
1000
>>> simulate(t=range(3), poly=(1,2)) # doctest: +NORMALIZE_WHITESPACE
0 2
1 3
2 4
dtype: float64
>>> all(simulate(t=50, sinusoids=((1,2,3),)) == simulate(t=range(50), sinusoids=((1,2,3),)))
True
>>> any(simulate(t=100))
False
>>> abs(simulate(sinusoids=42.42).values[1] + simulate(sinusoids=42.42).values[-1]) < 1e-10
True
>>> simulate(t=17,sinusoids=[42, 16]).min()
-42.0
>>> all((simulate(t=range(10), sinusoids=(1, 9, 4.5))+simulate(t=10, sinusoids=(1,9))).abs() < 1e-10)
True
"""
if t and isinstance(t, int):
t = np.arange(t, dtype=np.float64)
else:
t = np.array(t, dtype=np.float64)
N = len(t)
poly = poly or (0.,)
poly = listify(poly)
y = np.polyval(poly, t)
sinusoids = listify(sinusoids or [])
if any(isinstance(ATP, (int, float)) for ATP in sinusoids):
sinusoids = [sinusoids]
for ATP in sinusoids:
# default period is 1 more than the length of the simulated series (no values of the cycle are repeated)
T = (t[-1] - t[0]) * N / (N - 1.)
# default amplitude is 1 and phase is 0
A, P = 1., 0
try:
A, T, P = ATP
except (TypeError, ValueError):
try:
A, T = ATP
except (TypeError, ValueError):
# default period is 1 more than the length of the simulated series
# (no values of the cycle are repeated)
A = ATP[0]
# print(A, T, P)
# print(t[1] - t[0])
y += A * np.sin(2 * np.pi * (t - P) / T)
if sigma:
y += np.random.normal(0.0, float(sigma), N)
if rw:
y += np.random.normal(0.0, float(rw), N).cumsum()
if irw:
y += np.random.normal(0.0, float(irw), N).cumsum().cumsum()
if rrw:
y += np.random.normal(0.0, float(rrw), N).cumsum().cumsum().cumsum()
return pd.Series(y, index=t)
def normalize_symbols(symbols, *args, **kwargs):
"""Coerce into a list of uppercase strings like "GOOG", "$SPX, "XOM"
Flattens nested lists in `symbols` and converts all list elements to strings
Arguments:
symbols (str or list of str): list of market ticker symbols to normalize
If `symbols` is a str a get_symbols_from_list() call is used to retrieve the list of symbols
postrprocess (func): function to apply to strings after they've been stripped
default = str.upper
FIXME:
- list(set(list(symbols))) and `args` separately so symbols may be duplicated in symbols and args
- `postprocess` should be a method to facilitate monkey-patching
Returns:
list of str: list of cananical ticker symbol strings (typically after .upper().strip())
Examples:
>> normalize_symbols("Goog,AAPL")
['GOOG', 'AAPL']
>> normalize_symbols(" $SPX ", " aaPL ")
['$SPX', 'AAPL']
>> normalize_symbols(" $SPX ", " aaPL ", postprocess=str)
['$SPX', 'aaPL']
>> normalize_symbols(["$SPX", ["GOOG", "AAPL"]])
['GOOG', 'AAPL', '$SPX']
>> normalize_symbols("$spy", ["GOOGL", "Apple"], postprocess=str)
['$spy', 'GOOGL', 'Apple']
"""
postprocess = kwargs.get('postprocess', None) or str.upper
if ( (hasattr(symbols, '__iter__') and not any(symbols))
or (isinstance(symbols, (list, tuple, Mapping)) and (not symbols or not any(symbols)))):
return []
args = normalize_symbols(args, postprocess=postprocess)
if isinstance(symbols, basestring):
try:
return list(set(get_symbols_from_list(symbols))) + args
except:
return [postprocess(s.strip()) for s in symbols.split(',')] + args
else:
ans = []
for sym in list(symbols):
ans += normalize_symbols(sym, postprocess=postprocess)
return list(set(ans))
def series_bollinger(series, window=20, sigma=1., plot=False):
mean = pd.rolling_mean(series, window=window)
std = pd.rolling_std(series, window=window)
df = pd.DataFrame({'value': series, 'mean': mean, 'upper': mean + sigma * std, 'lower': mean - sigma * std})
bollinger_values = (series - pd.rolling_mean(series, window=window)) / (pd.rolling_std(series, window=window))
if plot:
df.plot()
pd.DataFrame({'bollinger': bollinger_values}).plot()
plt.show()
return bollinger_values
def frame_bollinger(df, window=20, sigma=1., plot=False):
bol = pd.DataFrame()
for col in df.columns:
bol[col] = series_bollinger(df[col], plot=False)
return bol
def double_sinc(T_0=120, T_N=240, T_s=0.01, A=[1, .9], sigma=0.01, T_cyc=10, N_cyc=[3, 2], verbosity=0):
# T0, TN, A, sigma = np.array(T0), np.array(TN), np.array(A), np.array(sigma)
N = int(T_N / T_s)
t = np.arange(0, T_N, T_s)
# t_mid = 0.5 * (t[-1] + t[0])
e = sigma * np.random.randn(N)
x = A[0] * np.sinc(((t - T_0) * N_cyc[0] | |
import os
import json
import time
import logging
import functools
from typing import List, Any, Optional, Callable, Union, Tuple, Dict
from web3 import Web3
from web3.eth import Contract
from web3.contract import ContractFunction
from web3.types import (
TxParams,
Wei,
Address,
ChecksumAddress,
ENS,
Nonce,
HexBytes,
)
from eth_utils import is_same_address
from eth_typing import AnyAddress
ETH_ADDRESS = "0x0000000000000000000000000000000000000000"
logger = logging.getLogger(__name__)
# TODO: Consider dropping support for ENS altogether and instead use AnyAddress
AddressLike = Union[Address, ChecksumAddress, ENS]
try:
class InvalidToken(Exception):
def __init__(self, address: Any) -> None:
Exception.__init__(self, f"Invalid token address: {address}")
class InsufficientBalance(Exception):
def __init__(self, had: int, needed: int) -> None:
Exception.__init__(self, f"Insufficient balance. Had {had}, needed {needed}")
def _load_abi(name: str) -> str:
path = f"{os.path.dirname(os.path.abspath(__file__))}/assets/"
with open(os.path.abspath(path + f"{name}.abi")) as f:
abi: str = json.load(f)
return abi
def check_approval(method: Callable) -> Callable:
"""Decorator to check if user is approved for a token. It approves them if they
need to be approved."""
@functools.wraps(method)
def approved(self: Any, *args: Any, **kwargs: Any) -> Any:
# Check to see if the first token is actually ETH
token = args[0] if args[0] != ETH_ADDRESS else None
token_two = None
# Check second token, if needed
if method.__name__ == "make_trade" or method.__name__ == "make_trade_output":
token_two = args[1] if args[1] != ETH_ADDRESS else None
# Approve both tokens, if needed
if token:
is_approved = self._is_approved(token)
if not is_approved:
self.approve(token)
if token_two:
is_approved = self._is_approved(token_two)
if not is_approved:
self.approve(token_two)
return method(self, *args, **kwargs)
return approved
def supports(versions: List[int]) -> Callable:
def g(f: Callable) -> Callable:
@functools.wraps(f)
def check_version(self: "Uniswap", *args: List, **kwargs: Dict) -> Any:
if self.version not in versions:
raise Exception(
"Function does not support version of Uniswap passed to constructor"
)
return f(self, *args, **kwargs)
return check_version
return g
def _str_to_addr(s: str) -> AddressLike:
if s.startswith("0x"):
return Address(bytes.fromhex(s[2:]))
elif s.endswith(".eth"):
return ENS(s)
else:
raise Exception("Could't convert string {s} to AddressLike")
def _addr_to_str(a: AddressLike) -> str:
if isinstance(a, bytes):
# Address or ChecksumAddress
addr: str = Web3.toChecksumAddress("0x" + bytes(a).hex())
return addr
elif isinstance(a, str):
if a.endswith(".eth"):
# Address is ENS
raise Exception("ENS not supported for this operation")
elif a.startswith("0x"):
addr = Web3.toChecksumAddress(a)
return addr
else:
raise InvalidToken(a)
def _validate_address(a: AddressLike) -> None:
assert _addr_to_str(a)
_netid_to_name = {1: "mainnet", 4: "rinkeby"}
class Uniswap:
def __init__(
self,
address: Union[str, AddressLike],
private_key: str,
provider: str = None,
web3: Web3 = None,
version: int = 1,
max_slippage: float = 0.1,
) -> None:
self.address: AddressLike = _str_to_addr(address) if isinstance(
address, str
) else address
self.private_key = private_key
self.version = version
# TODO: Write tests for slippage
self.max_slippage = max_slippage
if web3:
self.w3 = web3
else:
# Initialize web3. Extra provider for testing.
self.provider = provider or os.environ["PROVIDER"]
self.w3 = Web3(
Web3.HTTPProvider(self.provider, request_kwargs={"timeout": 60})
)
netid = int(self.w3.net.version)
if netid in _netid_to_name:
self.network = _netid_to_name[netid]
else:
raise Exception(f"Unknown netid: {netid}")
logger.info(f"Using {self.w3} ('{self.network}')")
self.last_nonce: Nonce = self.w3.eth.getTransactionCount(self.address)
# This code automatically approves you for trading on the exchange.
# max_approval is to allow the contract to exchange on your behalf.
# max_approval_check checks that current approval is above a reasonable number
# The program cannot check for max_approval each time because it decreases
# with each trade.
self.max_approval_hex = f"0x{64 * 'f'}"
self.max_approval_int = int(self.max_approval_hex, 16)
self.max_approval_check_hex = f"0x{15 * '0'}{49 * 'f'}"
self.max_approval_check_int = int(self.max_approval_check_hex, 16)
if self.version == 1:
factory_contract_addresses = {
"mainnet": "0xc0a47dFe034B400B47bDaD5FecDa2621de6c4d95",
"ropsten": "0x9c83dCE8CA20E9aAF9D3efc003b2ea62aBC08351",
"rinkeby": "0xf5D915570BC477f9B8D6C0E980aA81757A3AaC36",
"kovan": "0xD3E51Ef092B2845f10401a0159B2B96e8B6c3D30",
"görli": "0x6Ce570d02D73d4c384b46135E87f8C592A8c86dA",
}
self.factory_contract = self._load_contract(
abi_name="uniswap-v1/factory",
address=_str_to_addr(factory_contract_addresses[self.network]),
)
elif self.version == 2:
# For v2 the address is the same on mainnet, Ropsten, Rinkeby, Görli, and Kovan
# https://uniswap.org/docs/v2/smart-contracts/factory
factory_contract_address_v2 = _str_to_addr(
"0x5C69bEe701ef814a2B6a3EDD4B1652CB9cc5aA6f"
)
self.factory_contract = self._load_contract(
abi_name="uniswap-v2/factory", address=factory_contract_address_v2,
)
self.router_address: AddressLike = _str_to_addr(
"0x7a250d5630B4cF539739dF2C5dAcb4c659F2488D"
)
"""Documented here: https://uniswap.org/docs/v2/smart-contracts/router02/"""
self.router = self._load_contract(
abi_name="uniswap-v2/router02", address=self.router_address,
)
else:
raise Exception("Invalid version, only 1 or 2 supported")
logger.info(f"Using factory contract: {self.factory_contract}")
@supports([1])
def get_all_tokens(self) -> List[dict]:
# FIXME: This is a very expensive operation, would benefit greatly from caching
tokenCount = self.factory_contract.functions.tokenCount().call()
tokens = []
for i in range(tokenCount):
address = self.factory_contract.functions.getTokenWithId(i).call()
if address == "0x0000000000000000000000000000000000000000":
# Token is ETH
continue
token = self.get_token(address)
tokens.append(token)
return tokens
@supports([1])
def get_token(self, address: AddressLike) -> dict:
# FIXME: This function should always return the same output for the same input
# and would therefore benefit from caching
token_contract = self._load_contract(abi_name="erc20", address=address)
try:
symbol = token_contract.functions.symbol().call()
name = token_contract.functions.name().call()
except Exception as e:
logger.warning(
f"Exception occurred while trying to get token {_addr_to_str(address)}: {e}"
)
raise InvalidToken(address)
return {"name": name, "symbol": symbol}
@supports([1])
def exchange_address_from_token(self, token_addr: AddressLike) -> AddressLike:
ex_addr: AddressLike = self.factory_contract.functions.getExchange(
token_addr
).call()
# TODO: What happens if the token doesn't have an exchange/doesn't exist? Should probably raise an Exception (and test it)
return ex_addr
@supports([1])
def token_address_from_exchange(self, exchange_addr: AddressLike) -> Address:
token_addr: Address = (
self.exchange_contract(ex_addr=exchange_addr)
.functions.tokenAddress(exchange_addr)
.call()
)
return token_addr
@functools.lru_cache()
@supports([1])
def exchange_contract(
self, token_addr: AddressLike = None, ex_addr: AddressLike = None
) -> Contract:
if not ex_addr and token_addr:
ex_addr = self.exchange_address_from_token(token_addr)
if ex_addr is None:
raise InvalidToken(token_addr)
abi_name = "uniswap-v1/exchange"
contract = self._load_contract(abi_name=abi_name, address=ex_addr)
logger.info(f"Loaded exchange contract {contract} at {contract.address}")
return contract
@functools.lru_cache()
def erc20_contract(self, token_addr: AddressLike) -> Contract:
return self._load_contract(abi_name="erc20", address=token_addr)
@functools.lru_cache()
@supports([2])
def get_weth_address(self) -> ChecksumAddress:
# Contract calls should always return checksummed addresses
address: ChecksumAddress = self.router.functions.WETH().call()
return address
def _load_contract(self, abi_name: str, address: AddressLike) -> Contract:
return self.w3.eth.contract(address=address, abi=_load_abi(abi_name))
# ------ Exchange ------------------------------------------------------------------
@supports([1, 2])
def get_fee_maker(self) -> float:
"""Get the maker fee."""
return 0
@supports([1, 2])
def get_fee_taker(self) -> float:
"""Get the taker fee."""
return 0.003
# ------ Market --------------------------------------------------------------------
@supports([1, 2])
def get_eth_token_input_price(self, token: AddressLike, qty: Wei) -> Wei:
"""Public price for ETH to Token trades with an exact input."""
if self.version == 1:
ex = self.exchange_contract(token)
price: Wei = ex.functions.getEthToTokenInputPrice(qty).call()
elif self.version == 2:
price = self.router.functions.getAmountsOut(
qty, [self.get_weth_address(), token]
).call()[-1]
return price
@supports([1, 2])
def get_token_eth_input_price(self, token: AddressLike, qty: int) -> int:
"""Public price for token to ETH trades with an exact input."""
if self.version == 1:
ex = self.exchange_contract(token)
price: int = ex.functions.getTokenToEthInputPrice(qty).call()
else:
price = self.router.functions.getAmountsOut(
qty, [token, self.get_weth_address()]
).call()[-1]
return price
@supports([2])
def get_token_token_input_price(
self, token0: AnyAddress, token1: AnyAddress, qty: int
) -> int:
"""Public price for token to token trades with an exact input."""
# If one of the tokens are WETH, delegate to appropriate call.
# See: https://github.com/shanefontaine/uniswap-python/issues/22
if is_same_address(token0, self.get_weth_address()):
return int(self.get_eth_token_input_price(token1, qty))
elif is_same_address(token1, self.get_weth_address()):
return int(self.get_token_eth_input_price(token0, qty))
price: int = self.router.functions.getAmountsOut(
qty, [token0, self.get_weth_address(), token1]
).call()[-1]
return price
@supports([1, 2])
def get_eth_token_output_price(self, token: AddressLike, qty: int) -> Wei:
"""Public price for ETH to Token trades with an exact output."""
if self.version == 1:
ex = self.exchange_contract(token)
price: Wei = ex.functions.getEthToTokenOutputPrice(qty).call()
else:
price = self.router.functions.getAmountsIn(
qty, [self.get_weth_address(), token]
).call()[0]
return price
@supports([1, 2])
def get_token_eth_output_price(self, token: AddressLike, qty: Wei) -> int:
"""Public price for token to ETH trades with an exact output."""
if self.version == 1:
ex = self.exchange_contract(token)
price: int = ex.functions.getTokenToEthOutputPrice(qty).call()
else:
price = self.router.functions.getAmountsIn(
qty, [token, self.get_weth_address()]
).call()[0]
return price
@supports([2])
def get_token_token_output_price(
self, token0: AnyAddress, token1: AnyAddress, qty: int
) -> int:
"""Public price for token to token trades with an exact output."""
# If one of the tokens are WETH, delegate to appropriate call.
# See: https://github.com/shanefontaine/uniswap-python/issues/22
# TODO: Will these equality checks always work? (Address vs ChecksumAddress vs str)
if is_same_address(token0, self.get_weth_address()):
return int(self.get_eth_token_output_price(token1, qty))
elif is_same_address(token1, self.get_weth_address()):
return int(self.get_token_eth_output_price(token0, qty))
price: int = self.router.functions.getAmountsIn(
qty, [token0, self.get_weth_address(), token1]
).call()[0]
return price
# ------ Wallet balance ------------------------------------------------------------
def get_eth_balance(self) -> Wei:
"""Get the balance of ETH in a wallet."""
return self.w3.eth.getBalance(self.address)
def get_token_balance(self, token: AddressLike) -> int:
"""Get the balance of a token in a wallet."""
_validate_address(token)
if _addr_to_str(token) == ETH_ADDRESS:
return self.get_eth_balance()
erc20 = self.erc20_contract(token)
balance: int = erc20.functions.balanceOf(self.address).call()
return balance
# ------ ERC20 Pool ----------------------------------------------------------------
@supports([1])
def get_ex_eth_balance(self, token: AddressLike) -> int:
"""Get the balance of ETH in an exchange contract."""
ex_addr: AddressLike = self.exchange_address_from_token(token)
return self.w3.eth.getBalance(ex_addr)
@supports([1])
def get_ex_token_balance(self, token: AddressLike) -> int:
"""Get the balance of a token in an exchange contract."""
| |
<filename>InitProb.py
from __future__ import division
from copy import deepcopy
import random as r
import math as m
import numpy as np
import matplotlib.pyplot as plt
import copy
import sys
########################################
# General
########################################
class PolyNodeData:
def __init__(self):
self.PCC = []
self.polychains = [[]]
self.nodes = []
self.NC = None
def loadPND(self, polyname, nodename):
with open(polyname) as p:
polylines = p.readlines()
###Main Polychain###
self.PCC.append(int(polylines[1].split()[0])) #PCC : PolyChainCount : A list of integers
#Integers describe number of edges in polychains, with first being main and subsequent
#being holes
self.polychains = [[]]
idx = 0
for j in range(2, 2+self.PCC[0]):
self.polychains[0].append(int(polylines[j].split()[1]))
if len(polylines) == self.PCC[0] + 2:
return
###Hole Polychains###
zer0 = self.PCC[0]+3
self.PCC += [None for i in range(int(polylines[zer0-1].split()[0]))] #Number of Holes
for j in range(0, len(self.PCC)-1):
p0s = sum(self.PCC[1:j+1]) + j
if zer0+p0s == len(polylines):
break
self.PCC[j+1] = int(polylines[zer0 + p0s].split()[0])
self.polychains.append([])
for k in range(zer0 + p0s + 1, zer0 + p0s + 1 + self.PCC[j+1]):
self.polychains[j+1].append(int(polylines[k].split()[1]))
###Node Data###
with open(nodename) as p:
nodelines = p.readlines()
for i in range(1, len(nodelines)):
self.nodes.append([float(j) for j in nodelines[i].split()])
self.NC = len(self.nodes)
def exportPND(self, fileprefix):
with open(fileprefix + ".node", "w") as f:
f.write("{} 2 0 1\n".format(self.NC))
for i in self.nodes:
f.write("{} {}\n".format(*[float(j) for j in i]))
with open(fileprefix + ".poly", "w") as f:
f.write("0 2 0 1\n")
f.write("{} 1\n".format(int(self.PCC[0])))
CC = self.polychains[0] #CurrentChain
for i in range(1, len(self.polychains[0])):
f.write("{1} {0} {1}\n".format(CC[i-1], CC[i]))
f.write("TEST\n")
f.write("{1} {0} {2}\n".format(CC[i], CC[i]+1, CC[0]))
f.write("{}\n".format(int(len(self.PCC)-1)))
for j in range(1, len(self.PCC)):
f.write("{} 1\n".format(self.PCC[j]))
CC = self.polychains[j] #CurrentChain
for i in range(1, len(CC)):
f.write("{1} {0} {1}\n".format(CC[i-1], CC[i]))
f.write("TEST\n")
f.write("{1} {0} {2}\n".format(CC[i], CC[i]+1, CC[0]))
return 0
def loadOFF(self, filename):
with open(filename) as f:
lines = f.readlines()
header = [int(i) for i in lines[1].split()]
self.NC = header[0]
NE = header[1]
self.nodes = []
for j in range(2, 2+self.NC):
self.nodes.append([float(i) for i in lines[j].split()[0:2]])
self.polychains = []
for j in range(2+self.NC, 2+self.NC+NE):
line = lines[j].split()
self.polychains.append([int(i) for i in line[1:]])
self.PCC.append(int(line[0]))
def exportOFF(self, filename):
exportToOFF(self.nodes, self.polychains, filename)
def rechain(self):
self.polychains = []
for idx, item in enumerate(self.PCC):
self.polychains.append([sum(self.PCC[0:idx]) + j for j in range(item)])
def manual(self, nodeslist): #nodeslist being list of lists, lists being list of nodes repr chains
for i in nodeslist:
self.PCC.append(len(i))
self.nodes += i
self.NC = len(nodeslist)
self.rechain()
def nearestNode(self, tIdx): #target [Node] Index
sD = float("inf") #shortest Distance
sIdx = None #shortest [Node by] Index
for cIdx, cNode in enumerate(self.nodes): #current Index current Node
if cIdx == tIdx:
continue
D = dist(self.nodes[cIdx], self.nodes[tIdx])
if D < sD:
sD = D
sIdx = cIdx
return([sD, sIdx])
def clipAcute(self, C=0.5):
aL = identifyAcute(self)
rO = 0 #rolling Offset
for idx, angles in enumerate(aL):
for A in angles:
D = self.nearestNode(A)[0]
trpl = [self.nodes[(A-i+rO)%sum(self.PCC[0:idx+1])] for i in range(-1,2)]
quad = clipTrpl(trpl, D, C)
print("clipTrpl({}, {}, {}) = \n\t{}\n".format(trpl, D, C, quad))
self.nodes[A+rO] = quad[1]
self.nodes.insert(A+rO, quad[2])
self.PCC[idx]+=1
rO+=1
self.rechain()
def clipTrpl(trpl, D, C=0.5):
trpl = [np.array(i) for i in deepcopy(trpl)]
v = [np.array([i[1],i[3]]) for i in vectorFormat(trpl)]
trpl1A = trpl[1] + C*D/dist(trpl[0], trpl[1])*v[1]
trpl1B = trpl[1] + C*D/dist(trpl[1], trpl[2])*v[0]
quad = [trpl[0], trpl1A, trpl1B, trpl[2]]
return [i.tolist() for i in quad]
def printAngles(PND):
angleList = [[] for i in range(len(PND.PCC))]
for i, chain in enumerate(PND.polychains):
for j in range(len(chain)):
trpl = [PND.nodes[j] for j in (chain[j:]+chain[:j])[0:3]]
angleList[i].append(angleDebug(trpl))
with open("angles_lsup.txt", "w") as f:
for chain in angleList:
for a in chain:
f.write(str(180 - (a/(np.pi) * 180)) + "\n")
f.write("\n")
return angleList
def printAngleIndices(PND):
acuteList = identifyAcute(PND)
with open("anglesIndices_lsup.txt", "w") as f:
for chain in acuteList:
for a in chain:
f.write(str(a) + "\n")
f.write("\n")
return acuteList
def identifyAcute(PND):
acuteList = [[] for i in range(len(PND.PCC))]
for i, chain in enumerate(PND.polychains):
for j in range(len(chain)):
trpl = [PND.nodes[j] for j in (chain[j:]+chain[:j])[0:3]]
if isAcute(trpl):
S = sum([PND.PCC[i] for i in range(i)])
acuteList[i].append((j+1+S)%PND.NC)
#acuteList[i].append(angleDebug(trpl))
return acuteList
def averageDist(PND):
S = 0 #Shortest
for idx, item in enumerate(PND.nodes):
for jdx, jtem in enumerate(PND.nodes):
if idx == jdx:
continue
D = dist(item, jtem)
S += D
return S/PND.NC
def averageEdgeDist(PND):
S = [] #Shortest
for polychain in PND.polychains:
for edgeIdx in polychain:
D = dist(PND.nodes[edgeIdx[0]],PND.nodes[edgeIdx[1]])
S.append(D)
return sum(S)/len(S)
def longestDist(PND):
S = 0 #Shortest
for idx, item in enumerate(PND.nodes):
for jdx, jtem in enumerate(PND.nodes):
D = dist(item, jtem)
if D > S:
S = D
return S
def dist(P1, P2):
return ( (P2[0] - P1[0])**2 + (P2[1] - P1[1])**2 )**.5
def angleDebug(trpl):
#LAST FOCUS
"""Takes list len=3 of nodes and returns True if acute angle, false otherwise"""
vd = [np.array(i) for i in vectorFormat(trpl)]
return angle_between(*vd)
def isAcute(trpl):
"""Takes list len=3 of nodes and returns True if acute angle, false otherwise"""
vd = vectorFormat(trpl)
if angle_between(*vd) < np.pi/2:
return True
else:
return False
def vectorFormat(trpl):
"""trpl: [[p0x, p0y], [p1x, p1y], [p2x, p2y]]"""
v = [[trpl[1][0], trpl[2][0], trpl[1][1], trpl[2][1]], [trpl[1][0], trpl[0][0], trpl[1][1], trpl[0][1]]]
vd = [dispOrigin(vi) for vi in v]
return vd
def robinsonAspectRatio(QVL):
""" QuadVertexList : [[X0,Y0],...,[X3,Y3]] """
bisectors = [ [.5*(QVL[i][0]+QVL[(i+1)%4][0]),.5*(QVL[i][1]+QVL[(i+1)%4][1])] for i in range(4) ]
centroid = [0,0]
for i in range(4):
centroid[0] += .25*QVL[i][0]
centroid[1] += .25*QVL[i][1]
r1_h1 = ((centroid[0] - bisectors[0][0])**2 + (centroid[1] - bisectors[0][1])**2)**.5
r2_h1 = ((centroid[0] - bisectors[1][0])**2 + (centroid[1] - bisectors[1][1])**2)**.5
n1 = ((bisectors[0][0] - centroid[0]) / r1_h1, (bisectors[0][1] - centroid[1]) / r1_h1)
n2 = ((bisectors[1][0] - centroid[0]) / r2_h1, (bisectors[1][1] - centroid[1]) / r2_h1)
theta = 180/m.pi * m.acos(n1[0] * n2[0] + n1[1] * n2[1])
sin = m.sin(theta * m.pi / 180)
r1_h2 = sin * r2_h1
r2_h2 = sin * r1_h1
return max(max(r1_h1,r1_h2)/min(r1_h1,r1_h2), max(r2_h1,r2_h2)/min(r2_h1,r2_h2))
def exportToOFFDebug( vertices, filename ):
try:
f = open(filename, "w")
f.write("OFF\n")
f.write("{} {} 0\n".format(str(len(vertices)), str( len(edgelists) )))
for i, vertex in enumerate(vertices):
f.write("{} {} 0.0\n".format(float(vertex[0]), float(vertex[1])))
for i, vertex in enumerate(vertices):
f.write("{}".format(len(edges)))
f.write((" {}"*len(edges)).format(*edges))
f.write("\n")
except:
print("exportQuadsToOFF(): FileError\n")
return 1
finally:
f.close()
return 0
def exportToOFF( vertices, edgelists, filename ):
try:
f = open(filename, "w")
f.write("OFF\n")
f.write("{} {} 0\n".format(str(len(vertices)), str( len(edgelists) )))
for i, vertex in enumerate(vertices):
f.write("{} {} 0.0\n".format(float(vertex[0]), float(vertex[1])))
for edges in edgelists:
f.write("{}".format(len(edges)))
f.write((" {}"*len(edges)).format(*edges))
f.write("\n")
except:
print("exportQuadsToOFF(): FileError\n")
return 1
finally:
f.close()
return 0
def exportQuadsToOFF( quads, filename ):
""" quads is a list of QVL's such as those given by unitQuad and assessed by robinsonAspectRatio : quads = [ [[X0,Y0],...,[X3,Y3]], ... , [[U0,V0],...,[U3,V3]] ] """
try:
f = open(filename, "w")
f.write("OFF\n")
f.write("{} {} 0\n".format(str(4*len(quads)), str(len(quads))))
for i, quad in enumerate(quads):
for vertex in quad:
f.write("{} {} 0.0\n".format(float(vertex[0] + 3*i), float(vertex[1])))
for i in range(len(quads)):
f.write("4 {} {} {} {}\n".format(*[4*i + j for j in range(4)]))
except:
print("exportQuadsToOFF(): FileError\n")
return 1
finally:
f.close()
return 0
def convertOFFtoELENODE( offname ):
"""Converts an off in the same directory to ele node files"""
with open(offname, "r") as OFF:
OFFLines = OFF.readlines()
OFFData = []
for line in OFFLines:
OFFData.append(line.split())
numVertices = int(OFFData[1][0])
numFaces = int(OFFData[1][1])
numPerFace = int(OFFData[2+numVertices+1][0])
outname = offname.split(".")[0] #To name the output files
with open( outname + ".ele", "w") as ELE:
ELE.write( "{}\t{}\t0\n".format(numFaces, numPerFace)) #Placing the number of elements, and taking the number of vertices in an element from the first element that appears in the off
for i in range(2 + numVertices, 2 + numVertices + numFaces):
temp = []
for j in range( 1, 1+numPerFace):
temp.append( int(OFFData[i][j]) + 1 )
template = "{}\t" + "{}\t"*numPerFace + "\n"
ELE.write( template.format( i-numVertices-1, *temp))
with open( outname + ".node", "w") as NODE:
NODE.write( "{}\t2\t0\t0\n".format(numVertices)) #Placing the number of elements, and taking the number of vertices in an element from the first element that appears in the off
for i in range(2, 2 + numVertices):
template = "{}\t{}\t{}\n"
NODE.write( template.format( i-1, *OFFData[i]))
return
def listFilenameFormat( L ):
return str(L).strip("[]").replace(",","_").replace(" ","").replace("\'", "")
#############################
#Perimarea
#############################
def PerimareaRatio(QVL):
scale(QVL, perimeter(QVL)**-1)
return 16 * quadArea(QVL) / perimeter(QVL)
def perimeter(PVL):
P = 0
n = len(PVL)
for i in range(n):
P += ( (PVL[(i)%n][0] - PVL[(i+1)%n][0])**2 + (PVL[(i)%n][1] - PVL[(i+1)%n][1])**2 )**.5
return P
def quadArea(QVL):
area = 0
for | |
<reponame>ihmpdcc/hmp-qc-humann2
#!/usr/bin/env python3
'''
Description: This script is used to execute the HUMAnN2 docker pipeline
Author: <NAME>
Input:
- An SRR ID or a set of fastq files
- Mode: run qc only, humann2 only, both (qc & humann), or metaphlan only
- S3_path if
- AWS credentials
Output:
- Statistics file
- QC'ed files
- HUMAnN2 Output files: _humann2_genefamilies.tsv, _humann2_pathabundance.tsv, _humann2_pathcoverage.tsv, _metaphlan_bugs_list.tsv
'''
import argparse
import json
import pandas as pd
import os
import shutil
import logging
from subprocess import Popen, PIPE, STDOUT
import subprocess
import re
import sys
import boto3
from botocore.exceptions import ClientError
import hashlib
import gzip
import bz2
import ntpath
import time
import glob
def main():
parser = argparse.ArgumentParser( description='Execute Dockerized HUMAnN2 pipeline')
group = parser.add_mutually_exclusive_group(required=False)
group.add_argument('-x', '--srr', type=str, help="temp")
group.add_argument('-i', '--input', type=str, help='Prefix for a set of FASTQ files')
parser.add_argument('-s', '--samp', type=str, required=True, help='SRS ID or list file containing paths to sample files located either in an S3 bucket or locally')
parser.add_argument('-r', '--srr_list', type=str, required=True, help='SRR_list')
parser.add_argument('-p', '--input_pair', type=str, required=False, help='paired-file')
parser.add_argument('-m', '--mode', type=str, required=False, help='mode: qc, humann2, metaphlan, both',choices=['qc', 'humann2', 'metaphlan', 'both'])
parser.add_argument('-b', '--bucket', type=str, required=False, help='Path to S3 bucket')
global args
args = parser.parse_args()
global f
global l
global samp
global samp_id
global srr
global srr_2
global json_string
global result
l = open("log.txt", 'w')
json_string = []
os.mkdir("input_seqs")
samp = args.samp
#samp_id = samp.split('_', 1)[0]
samp_id = samp
#SRR List is not provided. Get list from SRA. Not working yet
if args.srr_list is None:
if not (re.match("SRS[0-9]{6,8}$", samp)):
print("SRS ID is invalid\n")
sys.exit(1)
#fetch SRS IDs
print("\nFetching SRR IDs for Sample " + samp + "\n")
pull_srs_cmd = "esearch -db sra -query " + samp + "| efetch -format runinfo | cut -f1 -d",""
p = Popen(pull_srs_cmd, shell=True, stdin=PIPE, stdout=PIPE, stderr=STDOUT, close_fds=True)
result = p.stdout.read()
print (result)
#download fastq files from SRA
srr_list = (args.srr_list).split("\n")
for srr in srr_list:
fastq_download(srr)
if re.match('.*err.*',result.decode()):
print("SRA download failed. Trying again in 1 minute...\n" + result.decode())
time.sleep(60)
fastq_download(srr)
if re.match('.*err.*',result.decode()):
print("SRA download failed. Trying again in 5 minutes...\n" + result.decode())
time.sleep(300)
fastq_download(srr)
if re.match('.*err.*',result.decode()):
print("SRA download failed. Trying again in 15 minutes...\n" + result.decode())
time.sleep(900)
fastq_download(srr)
if re.match('.*err.*',result.decode()):
print("SRA download failed after 4 tries. Exiting...\n" + result.decode())
sys.exit(1)
#SRR list with S3 bucket paths provided
elif args.srr_list.startswith('s3'):
summ_file = samp_id + "_summary_stats.txt"
f = open(summ_file, 'w')
srr_list = (args.srr_list).split(",")
print("\nDownloading files from S3 bucket...\n")
f.write("Downloading files from S3 bucket...\n")
for srr in srr_list:
failure = os.system("aws s3 cp " + srr + " input_seqs/")
print (failure)
if failure:
print ("Failed to download " + srr + " from S3 bucket")
sys.exit(1)
f.write(srr + " downloaded\n")
if srr.endswith('gz'):
os.system("gunzip input_seqs/" + ntpath.basename(srr))
elif srr.endswith('.bz2'):
os.system("bzip2 -d input_seqs/" + ntpath.basename(srr))
#SRR list with SRR IDs provided
elif args.srr_list.startswith('SRR'):
summ_file = samp_id + "_summary_stats.txt"
f = open(summ_file, 'w')
srr_list = (args.srr_list).split(",")
f.write("SRA Download:\n")
print("\nSRA Download starting...\n")
for srr in srr_list:
fastq_download(srr)
if re.match('.*err.*',result.decode()):
print("SRA download failed. Trying again in 1 minute...\n" + result.decode())
time.sleep(60)
fastq_download(srr)
if re.match('.*err.*',result.decode()):
print("SRA download failed. Trying again in 5 minutes...\n" + result.decode())
time.sleep(300)
fastq_download(srr)
if re.match('.*err.*',result.decode()):
print("SRA download failed. Trying again in 15 minutes...\n" + result.decode())
time.sleep(900)
fastq_download(srr)
if re.match('.*err.*',result.decode()):
print("SRA download failed after 4 tries. Exiting...\n" + result.decode())
sys.exit(1)
os.system("ls -l input_seqs")
if os.path.exists("input_seqs/"+ srr + ".sra_1.fastq"):
os.system("mv input_seqs/" + srr + ".sra_1.fastq input_seqs/" + srr + "_1.fastq")
os.system("mv input_seqs/" + srr + ".sra_2.fastq input_seqs/" + srr + "_2.fastq")
elif os.path.exists("input_seqs/"+ srr + ".sra.fastq"):
os.system("mv input_seqs/" + srr + ".sra.fastq input_seqs/" + srr + "_1.fastq")
if os.path.isdir("input_seqs/sra"):
os.system("rm -r input_seqs/sra")
print("\n" + srr + " Downloaded...\n")
sra_match = re.match('spots read\s+:\s+(\S+)\nreads read\s+:\s+(\S+)\nreads written\s+:\s+(\S+)',result.decode())
spots_read = sra_match.group(1).replace(",","")
reads_read = sra_match.group(2).replace(",","")
reads_written = sra_match.group(3).replace(",","")
#os.system("ls -l input_seqs/")
hash_sra1 = hashlib.sha256(open("input_seqs/" + srr + "_1.fastq",'rb').read()).hexdigest()
size_sra1 = os.path.getsize("input_seqs/" + srr + "_1.fastq")
bz_sra = bz2.compress(open("input_seqs/" + srr + "_1.fastq", 'rb').read())
sra1_bz = "input_seqs/" + srr + "_1.fastq.bz2"
fh = open(sra1_bz, "wb")
fh.write(bz_sra)
fh.close()
if os.path.exists("input_seqs/"+ srr + "_2.fastq"):
hash_sra2 = hashlib.sha256(open("input_seqs/" + srr + "_2.fastq",'rb').read()).hexdigest()
size_sra2 = os.path.getsize("input_seqs/" + srr + "_2.fastq")
bz_sra2 = bz2.compress(open("input_seqs/" + srr + "_2.fastq", 'rb').read())
sra2_bz = "input_seqs/" + srr + "_2.fastq.bz2"
fh = open(sra2_bz, "wb")
fh.write(bz_sra2)
fh.close()
size_sra2_bz = os.path.getsize("input_seqs/" + srr + "_2.fastq.bz2")
file_2_name = srr + "_2.fastq.bz2"
else:
hash_sra2 = "n/a"
size_sra2 = "n/a"
bz_sra2 = "n/a"
sra2_bz = "n/a"
size_sra2_bz = "n/a"
file_2_name = "n/a"
size_sra1_bz = os.path.getsize("input_seqs/" + srr + "_1.fastq.bz2")
download_cmd = "fasterq-dump --version"
p = Popen(download_cmd, shell=True, stdin=PIPE, stdout=PIPE, stderr=STDOUT, close_fds=True)
sra_version = p.stdout.read()
sra_version = sra_version.decode().replace("\n","").replace("\"","")
f.write("software: " + sra_version + "\nspots read: " + spots_read + "\nreads read: " + reads_read + \
"\nreads written: " + reads_written + \
"\n" + srr + "_1_file_name: " + srr + "_1.fastq.bz2" + \
"\n" + srr + "_1_file_size_uncompressed: " + str(size_sra1) + \
"\n" + srr + "_1_file_size_compressed: " + str(size_sra1_bz) + \
"\n" + srr + "_1_file_sha256: " + hash_sra1 + \
"\n" + srr + "_2_file_name: " + file_2_name + \
"\n" + srr + "_2_file_size_uncompressed: " + str(size_sra2) + \
"\n" + srr + "_2_file_size_compressed: " + str(size_sra2_bz) + \
"\n" + srr + "_2_file_sha256: " + hash_sra2 + "\n")
sra_download = {
'sra_download':{
'software': sra_version,
'spots_read': int(spots_read),
'reads_read': int(reads_read),
'reads_written': int(reads_written),
'file1':{
'file_name': srr + "_1.fastq",
'file_size': size_sra1,
'sha256': hash_sra1
},
'file2':{
'file_name': file_2_name,
'file_size': size_sra2,
'sha256': hash_sra2
}
}
}
json_string.append(sra_download)
#Input files are local
else:
summ_file = samp_id + "_summary_stats.txt"
f = open(summ_file, 'w')
srr_list = (args.srr_list).split(",")
for srr in srr_list:
failure = os.system("cp input/" + srr + " input_seqs/")
if failure:
print ("Failed to copy " + srr + " from local directory")
sys.exit(1)
if srr.endswith('gz'):
os.system("gunzip input_seqs/" + ntpath.basename(srr))
elif srr.endswith('.bz2'):
os.system("bzip2 -d input_seqs/" + ntpath.basename(srr))
#Concatenate downloaded files
if glob.glob("input_seqs/*_1.fastq"):
os.system("cat input_seqs/*_1.fastq > input_seqs/" + samp_id + "_1.fastq")
elif glob.glob("input_seqs/*_R1.fastq"):
os.system("cat input_seqs/*_R1.fastq > input_seqs/" + samp_id + "_1.fastq")
elif glob.glob("input_seqs/*_1.fq"):
os.system("cat input_seqs/*_1.fq > input_seqs/" + samp_id + "_1.fastq")
elif glob.glob("input_seqs/*_R1.fq"):
os.system("cat input_seqs/*_R1.q > input_seqs/" + samp_id + "_1.fastq")
elif args.mode == 'metaphlan':
os.system("cat input_seqs/* > input_seqs/" + samp_id + "_1.fastq")
else:
print("Input files do not follow naming convention *_1.fq, *_1.fastq, *_R1.fq or *_R1.fastq\n")
sys.exit(1)
if glob.glob("input_seqs/*_2.fastq"):
os.system("cat input_seqs/*_2.fastq > input_seqs/" + samp_id + "_2.fastq")
elif glob.glob("input_seqs/*_R2.fastq"):
os.system("cat input_seqs/*_R2.fastq > input_seqs/" + samp_id + "_2.fastq")
elif glob.glob("input_seqs/*_2.fq"):
os.system("cat input_seqs/*_2.fq > input_seqs/" + samp_id + "_2.fastq")
elif glob.glob("input_seqs/*_R2.fq"):
os.system("cat input_seqs/*_R2.q > input_seqs/" + samp_id + "_2.fastq")
#add to stats file, info for concatenated file
hash_samp1 = hashlib.sha256(open("input_seqs/" + samp_id + "_1.fastq",'rb').read()).hexdigest()
size_samp1 = os.path.getsize("input_seqs/" + samp_id + "_1.fastq")
bz_samp = bz2.compress(open("input_seqs/" + samp_id + "_1.fastq", 'rb').read())
samp1_bz = "input_seqs/" + samp_id + "_1.fastq.bz2"
fh = open(samp1_bz, "wb")
fh.write(bz_samp)
fh.close()
size_samp1_bz = os.path.getsize("input_seqs/" + samp_id + "_1.fastq.bz2")
if os.path.exists("input_seqs/"+ samp_id + "_2.fastq"):
hash_samp2 = hashlib.sha256(open("input_seqs/" + samp_id + "_2.fastq",'rb').read()).hexdigest()
size_samp2 = os.path.getsize("input_seqs/" + samp_id + "_2.fastq")
bz_samp2 = bz2.compress(open("input_seqs/" + samp_id + "_2.fastq", 'rb').read())
samp2_bz = "input_seqs/" + samp_id + "_2.fastq.bz2"
fh = open(samp2_bz, "wb")
fh.write(bz_samp2)
fh.close()
size_samp2_bz = os.path.getsize("input_seqs/" + samp_id + "_2.fastq.bz2")
file_2_name = samp_id + "_2.fastq.bz2"
else:
hash_samp2 = "n/a"
size_samp2 = "n/a"
bz_samp2 = "n/a"
samp2_bz = "n/a"
size_samp2_bz = "n/a"
file_2_name = "n/a"
f.write( "\nSample input:\n" + samp_id + "_1.fastq" + \
"\n" + samp_id + "_1_file_size_uncompressed: " + str(size_samp1) + \
"\n" + samp_id + "_1_file_size_compressed: " + str(size_samp1_bz) + \
"\n" + samp_id + "_1_file_sha256: " + hash_samp1 + \
"\n" + samp_id + "_2.fastq" + \
"\n" + samp_id + "_2_file_size_uncompressed: " + str(size_samp2) + \
"\n" + samp_id + "_2_file_size_compressed: " + str(size_samp2_bz) + \
"\n" + samp_id + "_2_file_sha256: " + hash_samp2 + "\n")
sample_input = {
'sample_input':{
'file1':{
'file_name': samp_id + "_1.fastq",
'file_size': size_samp1,
'sha256': hash_samp1
},
'file2':{
'file_name': file_2_name,
'file_size': size_samp2,
'sha256': hash_samp2
}
}
}
json_string.append(sample_input)
#Delete original files
os.system("mv input_seqs/" + samp_id + "_*.fastq .")
os.system("rm -r input_seqs/*")
os.system("mv " + samp_id + "_*.fastq input_seqs/")
# If SRR ID is provided, download data
if args.srr is None:
| |
<reponame>dstansby/EISpy
# -*- coding: utf-8 -*-
# Author: <NAME> <<EMAIL>>
# pylint: disable=E1101, C0330
"""
Utilities used in EIS calculations, corrections and fits.
"""
import os
import datetime as dt
import warnings
import urllib
import pathlib
from scipy.io import readsav
from scipy.interpolate import interp1d
import numpy as np
import astropy.constants as const
from astropy import units as u
import sunpy
__housekeeping_memo__ = {}
# Constant and quadratic term values for long and short wavelengths
__detector__ = {'LONG': {'refwvl': 199.9389,
'dispersion' : 0.022332, 'dispersion_sq' : -1.329e-8},
'SHORT': {'refwvl': 166.131,
'dispersion': 0.022317, 'dispersion_sq' : -1.268e-8}}
def get_dict_from_file(date, prefix="eis3"):
"""
Reads an IDL .sav file containing EIS housekeeping data and returns its
contents as a python dictionary. For speed, if the file has already been
read, it may return the contents from a hidden memo. If the file is not
found in the location specified it will attempt to download it once and
save the file in the location originally specified.
Parameters
----------
date: date or datetime object
Date of the observation required. If the file is present in the sunpy
data directory, it will be read from there, or downloaded to that
location if it isn't.
prefix: str
file prefix (eis3 for thermal correction, fpp1 for doppler shift)
"""
key = '{0}_{1:%Y%m}.sav'.format(prefix, date)
if key in __housekeeping_memo__:
file_dict = __housekeeping_memo__[key]
else:
download_dir = pathlib.Path('~') / 'EISpy' / 'eispy' / 'data' / key
download_dir.parent.mkdir(parents=True, exist_ok=True)
try:
file_dict = readsav(str(download_dir), python_dict=True)
except IOError:
url = "http://sdc.uio.no/eis_wave_corr_hk_data/" + key
urllib.request.urlretrieve(url, filename=download_dir)
file_dict = readsav(str(download_dir), python_dict=True)
warnings.warn("File was not found, so it was downloaded and " +
"placed at the given location", UserWarning)
__housekeeping_memo__.update({key: file_dict})
return file_dict
def get_hk_temperatures(time, _pos=None):
"""
Given a housekeeping filename and a time, returns the array of temperature
correction values for that time. If the time is out of range for that file
the closest available time will be used (i.e. the very first or very last)
Parameters
----------
time: datetime object
The date and time of the observation. Should be present even if it is
overridden because it is used to read the appropriate file
_pos: int
The index of the desired time in the file's time field. This overrides
the time argument, and should be used internally only.
"""
file_dict = get_dict_from_file(time)
if _pos is None:
timestamp = datetime_to_ssw_time(time)
position = np.argmin(np.abs(file_dict['time'] - timestamp))
else:
position = _pos
pos_before = position - 5 if position > 5 else 0
times = file_dict['time'].shape[0]
pos_after = position + 5 if position + 5 < times else times - 1
temps = np.zeros(34)
# Excuse the magic numbers, these are the temperatures we're interested in,
# as defined in the IDL file eis_sts3_temp.pro. I have no idea why these
# and not other values, but never mind...
main_temps = [1, 5, 7, 10, 11, 13, 14, 15, 16, 17, 21, 22, 23, 24, 26, 28]
aux_temps = [1, 10, 11, 13, 14, 22, 23, 24, 26]
temps[:16] = [file_dict['data'][position].temp[i] for i in main_temps]
temps[16:25] = [file_dict['data'][pos_before].temp[i] for i in aux_temps]
temps[25:] = [file_dict['data'][pos_after].temp[i] for i in aux_temps]
return temps
def correct_pixel(temps, time=None, slit2=False):
"""
Calculates the orbital correction for a single pixel
Parameters
----------
temps: numpy ndarray
The housekeeping temperature values at the given time
time: datetime object
The time at which the observation took place. If no time is given, then
current system time is assumed (note this will most likely be wrong)
slit2: boolean
If set to true, the correction will assume the observation was done
using the 2" slit
"""
if time is None:
time = dt.datetime.now()
warnings.warn("Time not set, assuming current time.", UserWarning)
correction_arr, pixel_ref = _get_corr_parameters(time)
slit2_offset = -8.2
pixel_ref += slit2_offset if slit2 else 0
return np.sum(correction_arr * (temps - 15.0) / 10.0) + pixel_ref
def _get_corr_parameters(sswtime):
"""
Returns the correct correction parameters for the given time. They are
different because of three adjustments that have been made to the device.
The coefficients were calculated from S. Kamio's neural network approach.
Parameters
----------
sswtime: float
The time of the observation, in SSW format
"""
# Heater adjustment time
adj1 = datetime_to_ssw_time(dt.datetime(2007, 11, 29, 00, 00, 00))
# slit focus adjustment
adj2 = datetime_to_ssw_time(dt.datetime(2008, 8, 24, 00, 00, 00))
# grating focus adjustment
adj3 = datetime_to_ssw_time(dt.datetime(2008, 10, 21, 8, 00, 00))
if sswtime < adj1:
correction_arr = np.array([ 4.10562e-01, 2.51204e+00, -7.03979e-01,
1.21183e+00, -1.46165e+00, -2.03801e+00,
-5.09189e+00, -3.31613e+00, 2.28654e-01,
3.72455e+00, 8.19741e-01, 1.17212e+00,
3.19226e+00, 2.21462e+00, -2.76307e+00,
-7.75230e+00, 2.27707e+00, 8.62746e-02,
-3.87772e+00, 8.50736e-01, 2.50457e-01,
-4.62109e+00, -1.49986e+00, -9.98911e-01,
-5.24012e+00, -4.88090e+00, 8.41629e-01,
1.53231e+00, -5.56888e+00, 5.46359e+00,
5.00476e+00, 6.83911e+00, 2.10491e+00,
6.89056e+00])
pixel_ref = 1.34524e+3
elif adj1 < sswtime < adj2:
correction_arr = np.array([-7.60169e+00, -1.46383e+00, 3.64224e+00,
6.22838e+00, 1.02071e+00, -5.87856e+00,
-7.07813e+00, -3.29145e+00, -2.68002e+00,
6.44214e+00, -5.64250e+00, 9.41400e+00,
1.02490e+01, 1.00514e+00, 1.54987e+01,
-2.43897e+01, 6.93774e+00, 7.99804e+00,
-4.24839e+00, 1.94191e+00, -4.11472e+00,
2.67682e+00, 2.63193e+00, -1.58034e+00,
-1.36976e+01, -1.78314e+00, -3.97698e+00,
-5.86437e+00, 2.30465e+00, 1.23473e+01,
-1.35947e+00, 1.85987e+00, 4.27904e+00,
-4.35809e+00])
pixel_ref = 1.34915e+3
else:
correction_arr = np.array([-9.69118e-01, 2.12159e+00, -2.99428e+00,
2.61100e+00, 1.41035e+00, -9.76397e-01,
-1.61651e+01, -9.94312e-01, 1.04603e+00,
8.57033e-01, 2.07951e+00, 4.80522e+00,
8.65133e+00, -2.37848e-02, 1.09901e+00,
-5.51204e+00, 1.58325e+00, 1.97708e+00,
-3.42620e+00, 1.76606e+00, 6.50817e+00,
-7.19983e+00, -3.21551e+00, -6.81840e-01,
-5.75801e+00, -1.08458e-01, -3.76701e+00,
-3.05294e+00, -4.01884e+00, 1.00570e+01,
4.61089e-01, 6.69429e+00, -6.84122e-01,
4.38880e+00])
pixel_ref = 1.34281e+3
pixel_ref += 4.88 if adj2 < sswtime < adj3 else 0
return correction_arr, pixel_ref
def calc_hk_thermal_corrections(times, slit2=False):
"""
For a given filename (or month in the format 'yyyymm') and times of
measurements, calculate the corrections needed on each of those times,
interpolating if necessary when the file does not contain those exact times
Parameters
----------
times: numpy array of datetime objects # TODO why numpy array? not a list?
Times the observations occurred
slit2: boolean
Whether the observation was made using the 2" slit
"""
# TODO: include good and bad data samples
measurement_times = get_dict_from_file(times[0])['time']
sswtimes = [datetime_to_ssw_time(t) for t in times]
min_wanted_index = np.argmin(measurement_times - np.min(sswtimes))
max_wanted_index = np.argmax(measurement_times - np.max(sswtimes))
pixels = np.zeros(max_wanted_index - min_wanted_index + 1)
for i in range(min_wanted_index, max_wanted_index):
temperatures = get_hk_temperatures(times[0], _pos=i)
pixels[min_wanted_index - i] = correct_pixel(temperatures,
measurement_times[i],
slit2)
shifted_corrections_fun = interp1d(measurement_times, pixels)
return shifted_corrections_fun(sswtimes)
def datetime_to_ssw_time(time):
"""
Converts a datetime oject into SSW-format timestamp, which is the number of
seconds elapsed since 1979-01-01 00:00:00.
Parameters
----------
time: datetime object
The datetime object to convert.
"""
epoch = dt.datetime(1979, 1, 1, 0, 0, 0) # Solarsoft epoch
delta = time - epoch
return delta.total_seconds()
def calc_slit_tilt(y_window_start, n_y_pixels, date, band, slit):
"""
Calculates the slit tilt correction, returning it as an array to be applied
to each pixel in the observation, in units of wavelength.
Parameters
----------
y_window_start: int
The pixel where the observation starts
n_y_pixels: int
The number of y pixels from the observation
date:
The date of the observation. This is used because the slit focus was
adjusted on Aug 24, 2008.
band: 'SHORT' or 'LONG'
The corrections depend on whether the observation was done using the
short or long wavelength modes of the instrument.
slit: 1 or 2
Which of the two slits was used in the observation.
"""
slit_focus_adjustment = dt.datetime(2008, 8, 24)
coefficients = np.array( # Before adjustment, short wl, 2" slit
[[-2.7607165427059641e+00, 6.4390116579832180e-03,
-2.7122949886142483e-06, 1.3035120912928136e-09],
# Before adjustment, short wl, 1" slit
[-5.5413445912854886e-01, 1.6348272018403623e-03,
-1.1681674813813158e-06, 1.7382970471312863e-10],
# Before adjustment, long wl, 2" slit
[-2.5786595570389181e+00, 6.1481799132252490e-03,
-3.1526317889607469e-06, 1.9165497210094085e-09],
# Before adjustment, long wl, 1" slit
[-3.8458103837911040e-01, 1.3331898117030505e-03,
-1.5399093968859745e-06, 7.8727203402240153e-10],
# After adjustment, short wl, 2" slit
[-3.0444030416965404e+00, 6.4056986231720847e-03,
-8.0170597073123649e-07, -1.8739881646780448e-10],
# After adjustment, short wl, 1" slit
[-8.5068355624974856e-01, 2.2398405213417405e-03,
-1.0665536954454296e-06, -1.2311442746502073e-10],
# After adjustment, long wl, 2" slit
[-2.4247171434108168e+00, 5.0832726508360793e-03,
-7.9727705770693547e-07, 2.3158597348832410e-10],
# After adjustment, long wl, 1" slit
[-2.0225535365170799e-01, 7.4854735225926561e-04,
-7.6258247316829397e-07, 1.4085716859395248e-10]])
coef_index = 0
coef_index += 4 if date > slit_focus_adjustment else 0
coef_index += 2 if band == 'LONG' else 0
coef_index += 1 if slit == 1 else 0
poly_coefs = coefficients[coef_index]
y_pixels = np.arange(y_window_start, y_window_start + n_y_pixels)
y_polyval = np.polyval(poly_coefs[::-1], y_pixels)
dispersion_factor = 0.0223
return y_polyval * dispersion_factor * u.Angstrom
def calc_dispersion(wavelength):
"""
Calculates dispersion at a given wavelength.
Parameters
----------
wavelength: Astropy Quantity
The wavelength at which to calculate dispersion
"""
ccd_pix = wavelength_to_ccd_pixel(wavelength)
band = 'LONG' | |
/ (len(importances) * min_ch_rate * bandwidth + bandwidth**2)
lambd = BisectionSearchMonotoneDecr(PolicyValueLagr_NoObs, (importances, ch_rates, bandwidth), lambda_lb, lambda_ub, epsilon)
crawl_rates = np.zeros_like(importances, dtype=float)
for i in range(len(crawl_rates)):
crawl_rates[i] = (-ch_rates[i] + math.sqrt(ch_rates[i]**2 + 4 * importances[i] * ch_rates[i] / lambd)) / 2
return crawl_rates
# LambdaCrawl approximation for sources with incomplete change observations that assumes importance_w/change_rate_w = c for a fixed (unknown) c
# for all sources w.
#
# See Proposition 9 in the NeurIPS-2019 paper for details.
def LambdaCrawlApprox_IncomplObs(importances, ch_rates, bandwidth, epsilon):
crawl_rates = np.zeros_like(importances, dtype=float)
sum_imp = sum(importances)
for w in range(len(importances)):
crawl_rates[w] = importances[w] * bandwidth / sum_imp
return crawl_rates
# LambdaCrawl for sources with complete change observations.
#
# See the pseudocode in Algorithm 2 in the NeurIPS-2019 paper for details.
def LambdaCrawl_ComplObs(importances, ch_rates, bandwidth):
if (bandwidth == 0):
return np.zeros_like(importances, dtype=float)
crawl_probs = np.zeros_like(importances, dtype=float)
crawl_probs_approx = np.empty_like(importances, dtype=float)
remaining_bandwidth = bandwidth
num_it = 0
while True:
num_it += 1
saturated_a_constraint = False
# For the calculation of the denominator later in this loop, we will need the sum of importances, *but only of those sources
# for which we haven't determined crawl_probs[w] = 1 in previous iterations*. Sources for which we have determined this have
# been excluded from consideration entirely.
sum_importances = 0
for w in range(len(importances)):
if crawl_probs[w] == 0:
sum_importances += importances[w]
for w in range(len(importances)):
# under any acceptable solution, the crawl probability of every source is > 0, so crawl_probs[i] = 0
# meanst that we haven't determined the final value for this source yet
if crawl_probs[w] == 0:
p_hat_w = bandwidth * importances[w] / (ch_rates[w] * sum_importances)
# if the probability constraint p_w <= 1 is saturated for source w...
if (p_hat_w >= 1.0):
crawl_probs[w] = 1.0
# indicate that we don't need to reconsider the crawl probability value of source w in subsequent iterations
crawl_probs_approx[w] = -1
remaining_bandwidth -= (crawl_probs[w] * ch_rates[w])
saturated_a_constraint = True
else:
crawl_probs_approx[w] = p_hat_w
# if we didn't saturate any constraints in the last iteration, the non-negative values in
# crawl_probs_approx are the final crawl probability values of the corresponding sources
if saturated_a_constraint == False:
for w in range(len(importances)):
if (crawl_probs_approx[w] != -1):
crawl_probs[w] = crawl_probs_approx[w]
break
# otherwise, repeat with remaining bandwidth and sources that don't have crawl_probs_approx[w] = -1 yet
else:
bandwidth = remaining_bandwidth
return crawl_probs
# Computes the harmonic policy cost for URLs with incomplete observations.
#
# See Proposition 1/Equation 4 in the NeurIPS-2019 paper.
def HarmonicPolicyCost_IncomplObs(importances, ch_rates, crawl_rates):
if (len(importances) == 0):
return 0
cost_incompl_obs = 0
for w in range(len(importances)):
if crawl_rates[w] == 0:
cost_incompl_obs = math.inf
break
cost_incompl_obs -= importances[w] * math.log(crawl_rates[w] / (crawl_rates[w] + ch_rates[w]))
return cost_incompl_obs / len(importances)
# Computes the harmonic policy cost for URLs with complete observations.
#
# See the formula in Proposition 4 in the NeurIPS-2019 paper.
def HarmonicPolicyCost_ComplObs(importances, ch_rates, crawl_probs):
if (len(importances) == 0):
return 0
cost_compl_obs = 0
for w in range(len(importances)):
if crawl_probs[w] == 0:
cost_compl_obs = math.inf
break
cost_compl_obs -= importances[w] * math.log(crawl_probs[w])
return cost_compl_obs / len(importances)
# Computes the harmonic policy cost for a mix of URLs with complete and incomplete observations.
#
# See the Equation 8 in the NeurIPS-2019 paper.
def HarmonicPolicyCost(crawl_rates_incompl_obs, crawl_probs_compl_obs, importances_incompl_obs, ch_rates_incompl_obs, \
importances_compl_obs, ch_rates_compl_obs):
cost_incompl_obs = HarmonicPolicyCost_IncomplObs(importances_incompl_obs, ch_rates_incompl_obs, crawl_rates_incompl_obs)
cost_compl_obs = HarmonicPolicyCost_ComplObs(importances_compl_obs, ch_rates_compl_obs, crawl_probs_compl_obs)
return ((len(importances_incompl_obs) * cost_incompl_obs + len(importances_compl_obs) * cost_compl_obs) \
/ (len(importances_incompl_obs) + len(importances_compl_obs)))
# Computes the binary policy cost for URLs with incomplete observations.
#
# See Equation 12 in the NeurIPS-2019 paper's supplement.
def BinaryPolicyCost_IncomplObs(importances, ch_rates, crawl_rates):
if (len(importances) == 0):
return 0
cost_incompl_obs = 0
for w in range(len(importances)):
cost_incompl_obs += importances[w] * ch_rates[w] / (crawl_rates[w] + ch_rates[w])
return cost_incompl_obs / len(importances)
# Computes the binary policy cost for URLs with complete observations.
#
# See Equation 13 in the NeurIPS-2019 paper's supplement.
def BinaryPolicyCost_ComplObs(importances, ch_rates, crawl_probs):
if (len(importances) == 0):
return 0
cost_compl_obs = 0
for w in range(len(importances)):
cost_compl_obs += importances[w] * (1 - crawl_probs[w])
return cost_compl_obs / len(importances)
# Computes the binary policy cost for a mix of URLs with complete and incomplete observations.
def BinaryPolicyCost(crawl_rates_incompl_obs, crawl_probs_compl_obs, importances_incompl_obs, ch_rates_incompl_obs, \
importances_compl_obs, ch_rates_compl_obs):
cost_incompl_obs = BinaryPolicyCost_IncomplObs(importances_incompl_obs, ch_rates_incompl_obs, crawl_rates_incompl_obs)
cost_compl_obs = BinaryPolicyCost_ComplObs(importances_compl_obs, ch_rates_compl_obs, crawl_probs_compl_obs)
return ((len(importances_incompl_obs) * cost_incompl_obs + len(importances_compl_obs) * cost_compl_obs) \
/ (len(importances_incompl_obs) + len(importances_compl_obs)))
# Computes the harmonic policy cost for a given bandwidth split across complete- and incomplete-change-history URLs.
#
# See Algorithm 3 in the NeurIPS-2019 paper. The return of the function in the pseudocode is the negative of SplitEval_JStar.
def SplitEval_JStar(bandwidth_compl_obs, solver_x_incompl_obs, importances_incompl_obs, ch_rates_incompl_obs, epsilon_incompl_obs, importances_compl_obs, ch_rates_compl_obs, bandwidth):
if (bandwidth_compl_obs > bandwidth):
raise ValueError('SplitEval_JStar ERROR: bandwidth allocation to sources with complete observations exceeds total bandwidth! Bandwidth allocation to sources with complete observations: ', bandwidth_compl_obs, ", total bandwidth: ", bandwidth)
crawl_rates_incompl_obs = solver_x_incompl_obs(importances_incompl_obs, ch_rates_incompl_obs, bandwidth - bandwidth_compl_obs, epsilon_incompl_obs)
J_incompl_obs = 0
for w in range(len(importances_incompl_obs)):
if crawl_rates_incompl_obs[w] == 0:
J_incompl_obs = math.inf
break
else:
J_incompl_obs -= (importances_incompl_obs[w] * math.log(crawl_rates_incompl_obs[w] / (crawl_rates_incompl_obs[w] + ch_rates_incompl_obs[w])))
crawl_probs_compl_obs = LambdaCrawl_ComplObs(importances_compl_obs, ch_rates_compl_obs, bandwidth_compl_obs)
J_compl_obs = 0
for w in range(len(importances_compl_obs)):
if crawl_probs_compl_obs[w] == 0:
J_compl_obs = math.inf
break
else:
J_compl_obs -= (importances_compl_obs[w] * math.log(crawl_probs_compl_obs[w]))
return J_incompl_obs + J_compl_obs
# Implements the LambdaCrawl family of algorithms.
#
# See Algorithm 3 in the NeurIPS-2019 paper. The implementation can use either the optimal LambdaCrawlApprox_IncomplObs or the approximate
# LambdaCrawlApprox_IncomplObs for handling the incomplete-change-observation sources.
def LambdaCrawl_X(solver_x_incompl_obs, importances_incompl_obs, ch_rates_incompl_obs, epsilon_incompl_obs, importances_compl_obs, ch_rates_compl_obs, bandwidth):
# We use a minimization routine here, so SplitEval_JStar returns the value of J* for a given split, _not_ of \overline{J}^* = -J^*
# as in LambdaCrawl's description in the paper.
result = sp.optimize.minimize_scalar(SplitEval_JStar, bounds=(0, min(bandwidth, sum(ch_rates_compl_obs))), \
args=(solver_x_incompl_obs, importances_incompl_obs, ch_rates_incompl_obs, epsilon_incompl_obs, importances_compl_obs, ch_rates_compl_obs, bandwidth), \
method='bounded', options={'xatol': 0.005 * bandwidth})
if result.success:
bandwidth_compl_obs = result.x
crawl_rates_incompl_obs = solver_x_incompl_obs(importances_incompl_obs, ch_rates_incompl_obs, bandwidth - bandwidth_compl_obs, epsilon_incompl_obs)
crawl_probs_compl_obs = LambdaCrawl_ComplObs(importances_compl_obs, ch_rates_compl_obs, bandwidth_compl_obs)
return (crawl_rates_incompl_obs, crawl_probs_compl_obs)
else:
raise ValueError('LambdaCrawl ERROR: bounded minimization failed')
# Implements LambdaCrawl proper. See LambdaCrawl_X for details.
def LambdaCrawl(importances_incompl_obs, ch_rates_incompl_obs, epsilon_incompl_obs, importances_compl_obs, ch_rates_compl_obs, bandwidth):
return LambdaCrawl_X(LambdaCrawl_IncomplObs, importances_incompl_obs, ch_rates_incompl_obs, epsilon_incompl_obs, importances_compl_obs, ch_rates_compl_obs, bandwidth)
# Implements LambdaCrawlApprox, i.e., LambdaCrawl that uses the approximation from Proposition 9 in the NeurIPS-2019 paper to handle the
# incomplete-change-observation sources. See LambdaCrawl_X for details.
def LambdaCrawlApprox(importances_incompl_obs, ch_rates_incompl_obs, epsilon_incompl_obs, importances_compl_obs, ch_rates_compl_obs, bandwidth):
return LambdaCrawl_X(LambdaCrawlApprox_IncomplObs, importances_incompl_obs, ch_rates_incompl_obs, epsilon_incompl_obs, importances_compl_obs, ch_rates_compl_obs, bandwidth)
# Implements the BinaryLambdaCrawl family of algorithms.
#
# This is a generalization of the algorithm from <NAME>, <NAME>, <NAME>, <NAME>, <NAME>. "Tractable near-optimal policies for crawling."
# PNAS-2018, which the NeurIPS-2019 paper refers to as BinaryLambdaCrawl. That algorithm optimizes the binary policy cost (see that paper for
# details) and as a result may fail to allocate any crawl rate to some of the sources. BinaryLambdaCrawl(epsilon) (see Subsection 9.3 in the
# supplement of the NeurIPS-2019 paper) is a modification of BinaryLambdaCrawl that forces it to allocate some bandwidth even to pages that
# BinaryLambdaCrawl would otherwise crawl-starve. In this family, BinaryLambdaCrawl(0.0) corresponds to the original BinaryLambdaCrawl.
# BinaryLambdaCrawl(0.4) has the best performance on the NeurIPS-2019 paper's dataset w.r.t. the harmonic policy cost, of all
# BinaryLambdaCrawl(epsilon) with epsilon in {0.0, 0.1,...,1}.
#
# The wrapper handles sources with complete and incomplete change observations. BinaryLambdaCrawl doesn't know how to handle the former in any special
# way; it simply treats them as if their observation history was incomplete. See LambdaCrawlBinary_Epsilon_Helper for most of this algorithm's logic.
def LambdaCrawlBinary_Epsilon(importances_incompl_obs, ch_rates_incompl_obs, epsilon, importances_compl_obs, ch_rates_compl_obs, bandwidth):
if (len(importances_compl_obs) > 0):
sys.exit("ERROR: LambdaCrawlBinary_Epsilon doesn't know how to handle complete observation histories, but importances_compl_obs is nonempty")
imps_and_chrates = np.column_stack((importances_incompl_obs, ch_rates_incompl_obs))
crawl_rates = np.zeros_like(imps_and_chrates[:,0], dtype=float)
min_crawl_rate = bandwidth / imps_and_chrates.shape[0] * epsilon
LambdaCrawlBinary_Epsilon_Helper(imps_and_chrates, crawl_rates, bandwidth, min_crawl_rate)
return crawl_rates, []
# The main part of BinaryLambdaCrawl implementation.
#
# See Section 9.3 of the NeurIPS-2019 paper's supplement and <NAME>, <NAME>, <NAME>, <NAME>, <NAME>. "Tractable near-optimal policies
# for crawling." PNAS-2018 for details.
def LambdaCrawlBinary_Epsilon_Helper(imps_and_chrates, crawl_rates, bandwidth, min_crawl_rate):
idxs_and_value_ratios = []
for w in range(imps_and_chrates.shape[0]):
idxs_and_value_ratios.append((imps_and_chrates[w, 0] * imps_and_chrates[w, 1] / (imps_and_chrates[w, 1] + min_crawl_rate)**2, w))
r = 0
for w in range(len(idxs_and_value_ratios)):
| |
<filename>featured_code/blockudoku code/main.py<gh_stars>0
import numpy as np
from tkinter import Tk, Canvas, messagebox, Label, Button
import random
from time import sleep
import subprocess
root = Tk()
width = 470
height = 700
offset1 = int(-width / 2 + root.winfo_screenwidth() / 2)
offset2 = int(-height / 2 + root.winfo_screenheight() / 2)
root.geometry(f'{width}x{height}+{offset1}+{offset2}')
root.resizable(width=False, height=False)
root.configure(bg='#ffffff')
piece_list = [[(2, 2)],
[(2, 2), (3, 3)],
[(2, 2), (3, 1)],
[(2, 2), (2, 3)],
[(2, 2), (3, 2)],
[(2, 2), (1, 1), (3, 3)],
[(2, 2), (3, 1), (1, 3)],
[(2, 1), (2, 2), (2, 3)],
[(2, 2), (1, 2), (3, 2)],
[(2, 2), (1, 2), (2, 3)],
[(2, 2), (1, 2), (2, 1)],
[(2, 2), (3, 2), (2, 1)],
[(2, 2), (3, 2), (2, 3)],
[(2, 1), (2, 2), (2, 3), (2, 4)],
[(2, 2), (1, 2), (3, 2), (4, 2)],
[(2, 1), (2, 2), (2, 3), (3, 1)],
[(2, 1), (2, 2), (2, 3), (1, 3)],
[(2, 1), (2, 2), (2, 3), (1, 1)],
[(2, 1), (2, 2), (2, 3), (3, 3)],
[(2, 1), (2, 2), (2, 3), (1, 2)],
[(2, 1), (2, 2), (2, 3), (3, 2)],
[(2, 2), (1, 2), (3, 2), (3, 1)],
[(2, 2), (1, 2), (3, 2), (1, 3)],
[(2, 2), (1, 2), (3, 2), (1, 1)],
[(2, 2), (1, 2), (3, 2), (3, 3)],
[(2, 2), (1, 2), (3, 2), (2, 3)],
[(2, 2), (1, 2), (3, 2), (2, 1)],
[(2, 2), (3, 3), (2, 3), (3, 2)],
[(2, 2), (2, 3), (3, 2), (3, 1)],
[(2, 2), (3, 2), (2, 1), (3, 3)],
[(2, 2), (2, 3), (3, 2), (1, 3)],
[(2, 2), (3, 2), (2, 1), (1, 1)],
[(0, 2), (1, 2), (2, 2), (3, 2), (4, 2)],
[(2, 0), (2, 1), (2, 2), (2, 3), (2, 4)],
[(2, 1), (1, 2), (2, 2), (3, 2), (2, 3)],
[(3, 1), (2, 1), (2, 2), (2, 3), (3, 3)],
[(1, 1), (2, 1), (2, 2), (2, 3), (1, 3)],
[(1, 1), (1, 2), (2, 2), (3, 2), (3, 1)],
[(1, 3), (1, 2), (2, 2), (3, 2), (3, 3)],
[(3, 3), (1, 2), (2, 2), (3, 2), (3, 1)],
[(1, 3), (1, 2), (2, 2), (3, 2), (1, 1)],
[(1, 1), (2, 1), (2, 2), (2, 3), (3, 1)],
[(3, 3), (2, 1), (2, 2), (2, 3), (1, 3)],
[(1, 3), (2, 3), (3, 3), (3, 2), (3, 1)],
[(3, 1), (2, 1), (1, 1), (1, 2), (1, 3)],
[(3, 3), (2, 3), (1, 3), (1, 2), (1, 1)],
[(1, 1), (2, 1), (3, 1), (3, 2), (3, 3)],
]
class Board:
def __init__(self):
self.state = np.zeros((17, 17))
def small_board(self):
result = np.zeros((9, 9))
for i in range(4, 13):
for j in range(4, 13):
result[i - 4][j - 4] = self.state[i][j]
return result
def print_board(self):
print(self.small_board())
def generate_moves(self, piece):
result = []
for i in range(13):
for j in range(13):
add_move = True
for square in piece:
if 13 > i + square[0] > 3 and 13 > j + square[1] > 3:
if self.state[i + square[0]][j + square[1]] == 1:
add_move = False
break
else:
add_move = False
break
if add_move:
result.append((i, j))
return result
def place_piece(self, piece, location):
for square in piece:
self.state[location[0] + square[0]][location[1] + square[1]] = 1
def remove_piece(self, piece, location):
for square in piece:
self.state[location[0] + square[0]][location[1] + square[1]] = 0
def clear_board(self):
to_clear = []
bonus = 0
for i in range(9):
clear = True
for j in range(9):
if self.state[4 + i][4 + j] == 0:
clear = False
break
if clear:
bonus += 16
for j in range(9):
to_clear.append((4 + i, 4 + j))
for i in range(9):
clear = True
for j in range(9):
if self.state[4 + j][4 + i] == 0:
clear = False
break
if clear:
bonus += 16
for j in range(9):
to_clear.append((4 + j, 4 + i))
for a in range(3):
for b in range(3):
clear = True
for i in range(3):
for j in range(3):
if self.state[3 * a + 4 + j][3 * b + 4 + i] == 0:
clear = False
if clear:
bonus += 16
for i in range(3):
for j in range(3):
to_clear.append((3 * a + 4 + j, 3 * b + 4 + i))
for square in to_clear:
self.state[square[0]][square[1]] = 0
return bonus
def empty_board(self):
for i in range(17):
for j in range(17):
self.state[i][j] = 0
class BruteForce:
def __init__(self, path_engine: str = "Engine Path"):
print("Log: engine started.")
# Open the engine executable
self.engine = subprocess.Popen(path_engine, universal_newlines=True, stdin=subprocess.PIPE, stdout=subprocess.PIPE)
def _put(self, command):
# Write commands to the engine
if not self.engine.stdin:
raise BrokenPipeError()
self.engine.stdin.write(f"{command}\n")
self.engine.stdin.flush()
def _read_line(self):
# Read engine output
if not self.engine.stdout:
raise BrokenPipeError()
return self.engine.stdout.readline().strip()
def play_move(self, state, pieces):
self._put(piece_list.index(pieces[0]) + 1)
self._put(piece_list.index(pieces[1]) + 1)
self._put(piece_list.index(pieces[2]) + 1)
for i in range(4, 13):
for j in range(4, 13):
self._put(int(state.state[i][j]))
last_line = ""
while True:
line = self._read_line()
if line == "Engine done.":
break
if line == "Cannot avoid loss.":
print(line)
return "Dead position"
last_line = line
last_line = last_line.split(" ")
last_line = [int(x) for x in last_line]
return last_line
def engine_kill(self):
print("Log: engine terminated.")
self.engine.terminate()
class Interface:
def __init__(self, setup_board):
self.grey = Canvas(root, width='449', height='449', highlightthickness=2, highlightbackground='#001347')
self.grey.place(x=10, y=75)
self.last = (-4, -4)
self.selection = Canvas(root, width='449', height='150', highlightthickness=2, highlightbackground='#001347',
bg='#ffffff')
self.selection.place(x=10, y=540)
self.score = 0
self.score_text = Label(root, text='Score: ' + str(self.score), bg='#ffffff', font=("calibri bold", 30))
self.score_text.place(x=10, y=8)
self.state = setup_board
# self.small_board = self.state.small_board()
self.selectable_pieces = [(), (), ()]
self.masked_pieces = []
self.selected_piece = -1
# show initial board
self.display_board()
self.selection.bind('<Button-1>', self.pick_piece)
self.grey.bind('<Button-1>', self.drop_piece)
self.grey.bind('<Motion>', self.movement)
# generate engine terminal
root.protocol("WM_DELETE_WINDOW", self.close)
self.engine_player = BruteForce('blockudoku')
self.start_engine = Button(root, text="Start Engine", bg='#ffffff', font=("calibri bold", 15), command=lambda: self.engine_move())
self.start_engine.place(x=345, y=18)
def close(self):
if messagebox.askokcancel("Quit", "Do you want to quit?"):
self.engine_player.engine_kill()
root.destroy()
def display_board(self):
self.score_text.config(text='Score: ' + str(self.score))
self.grey.delete('all')
self.selection.delete('all')
local_small_board = self.state.small_board()
for i in range(9):
for j in range(9):
if ((j // 3) % 2 + (i // 3) % 2) % 2 == 0:
self.grey.create_rectangle((50 * j, 50 * i, 50 + 50 * j, 50 + 50 * i),
fill='#ffffff', outline='#d2d8df')
else:
self.grey.create_rectangle((50 * j, 50 * i, 50 + 50 * j, 50 + 50 * i),
fill='#e4e9ef', outline='#d2d8df')
if self.last != (-4, -4):
for square in self.selectable_pieces[self.selected_piece]:
j = square[1] + self.last[0]
i = square[0] + self.last[1]
if ((j // 3) % 2 + (i // 3) % 2) % 2 == 0:
self.grey.create_rectangle((50 * j, 50 * i, 50 + 50 * j, 50 + 50 * i),
fill='#cdcccf', outline='#d2d8df')
else:
self.grey.create_rectangle((50 * j, 50 * i, 50 + 50 * j, 50 + 50 * i),
fill='#b6bbc1', outline='#d2d8df')
for i in range(9):
for j in range(9):
if local_small_board[j][i] == 1:
self.grey.create_rectangle((50 * i, 50 * j, 50 + 50 * i, 50 + 50 * j),
fill='#2a57b4', outline='#001347')
for i in range(3):
if i in self.masked_pieces:
continue
color_piece = '#2a57b4'
color_edge = '#001347'
if len(self.state.generate_moves(self.selectable_pieces[i])) == 0:
color_piece = '#d8e3ff'
color_edge = '#cad2df'
for square in self.selectable_pieces[i]:
self.selection.create_rectangle((i * 150 + 13 + 25 * square[1], 13 + 25 * square[0],
i * 150 + 38 + 25 * square[1], 38 + 25 * square[0]),
fill=color_piece, outline=color_edge)
root.update()
def pick_piece(self, event):
if event.x // 150 not in self.masked_pieces:
self.selected_piece = event.x // 150
def drop_piece(self, event):
if not 3 > self.selected_piece >= 0:
return
move = (event.y // 50 + 2, event.x // 50 + 2)
moves = self.state.generate_moves(self.selectable_pieces[self.selected_piece])
if move in moves:
print(move)
self.state.place_piece(self.selectable_pieces[self.selected_piece], move)
self.masked_pieces.append(self.selected_piece)
self.score += self.state.clear_board() + len(self.selectable_pieces[self.selected_piece])
self.last = (-4, -4)
if len(self.masked_pieces) == 3:
self.reset()
self.display_board()
self.check_terminal()
self.selected_piece = -1
else:
self.selected_piece = -1
def reset(self):
self.masked_pieces = []
self.selected_piece = -1
self.selectable_pieces = [(), (), ()]
self.display_board()
self.selectable_pieces = [random.choice(piece_list), random.choice(piece_list), random.choice(piece_list)]
sleep(0.25)
self.display_board()
def check_terminal(self):
terminal = True
for i in range(3):
if i not in self.masked_pieces:
if len(self.state.generate_moves(self.selectable_pieces[i])) != 0:
terminal = False
| |
#!/usr/bin/python
#Author: <NAME>
# CREATE DATE: 2014
# DESCRIPTION: extracts text from sqlite3 database tables and creates html or text outputs. Creates all output pages.
import sys,os
from optparse import OptionParser
import sqlite3 as lite
parser = OptionParser()
parser.add_option('-i', '--output_dir', dest='output_dir',help='-d /data/newTargetOrtho/run/TargetOrtho_output/123232')
parser.add_option('-j', '--jobID',dest='jobID',help='-g 2012419')
parser.add_option('-g', '--jobTag',dest='jobTag',help='-j job_name')
parser.add_option( '-c', '--TargetOrtho_path', dest='TargetOrtho_path', help='-c /data/newTargetOrtho/run' )
parser.add_option('-m','--matrix_count',dest='matrix_count',help='matrix_count: -m 3')
parser.add_option('-n','--matrixNames',dest='matrixNamesList',help='-n name1*name2*name3')
parser.add_option('-q','--file',dest='queryFile_path',help='-f queryFile.txt')
parser.add_option('-w','--QueryOnly',dest='QueryOnly', help='=w True')
parser.add_option('-S','--speciesList',dest='speciesList',help='-S c_eleg-c_brig-c_bren-')
(options, args) = parser.parse_args()
speciesList=options.speciesList
speciesList=speciesList.split('-')[:-1]
QueryOnly=options.QueryOnly
queryFile_path=options.queryFile_path
matrix_count=int(options.matrix_count)
TargetOrtho_path=options.TargetOrtho_path
output_dir=options.output_dir
jobID=options.jobID
jobTag=options.jobTag
matrixNamesList=options.matrixNamesList.split('*')[:-1]
conn = lite.connect('%s/run/sqlite_tmp_files/%s_TargetOrtho.db' %(TargetOrtho_path,jobID),isolation_level=None)
cursor = conn.cursor()
NameDic={'c_eleg':('C. elegans','c_elegans'),
'c_bren':('C. brenneri','c_brenneri'),
'c_brig':('C. briggae','c_briggsae'),
'c_rema':('C. remanei','c_remanei'),
'c_japo':('C. japonica','c_japonica'),
'd_mel':('D. melanogaster','d_melanogaster'),
'd_sec':('D. sechellia','d_sechellia'),
'd_sim':('D. simulans','d_simulans'),
'd_yak':('D. yakuba','d_yakuba'),
'd_ere':('D. erecta','erecta')}
print speciesList,'speciesList'
def color(seq):
colorDic={'G':'orange\">G','A':'red\">A','C':'blue\">C','T':'green\">T','N':'black\">N','X':'black\">X'}
final_str=''
for j in seq:
fontStr='<span style=\"color:%s</span>' %colorDic[j]
final_str=final_str+fontStr
return final_str
def mvTablesFromSqltoTEXT(table,m,db):
if db!='':cursor.execute("attach '%s' as a" %(db))
if m!='':dir="%s/%s/Results_text" %(output_dir,matrixNamesList[m-1])
else:dir=output_dir
def exportTable(table):
outfile=file('%s/%s.txt' %(dir,table),'w')
[outfile.write('%s\t' %str(i).strip()) for i in [col[1] for col in cursor.execute("PRAGMA table_info(%s)" %table)]]
outfile.write('\n')
if ('top_ranked_hits' in table or 'All_conserved' in table or 'QueryList' in table) and ('CRM' not in table):
[([outfile.write('%s\t' %str(i).strip()) for i in row],outfile.write('\n')) for row in cursor.execute("select * from %s order by rank ASC " %(table))]
else:
[([outfile.write('%s\t' %str(i).strip()) for i in row],outfile.write('\n')) for row in cursor.execute("select * from %s" %(table))]
exportTable(table)
if db!='':cursor.execute("detach a")
def galaxy_results_file(matrix_count):
outfile=file('%s/index.html' %(output_dir),'w')
outfile.write("<html>\n<head>\n<title>TargetOrthoV1.0 Results</title>\n<h1>TargetOrtho v1.0 Results</h1>\n<p>\n\
job name: %s <br>\n\
jobID: %s <br>\n\
motif names: %s<br>\n</p>\n</head>\n<body>\n<p>\n" %(jobTag,jobID,matrixNamesList))
outfile.write("<a href=\"Download_%s.tar.gz\"><img src=\"download_icon.png\" alt=\"Download full results directory\" width=\"100\" height=\"100\"></a><br>" %(jobID))
outfile.write("<a href=\"Download_%s.tar.gz\">Download full results directory (tar.gz) </a><br><br>\n" %(jobID))
pref=''
if matrix_count>1:
ref1=''
ref2='<a href=\"%s_CRM_results.html\">CRM_results.html (whole genome)</a>/<a href=\"%s_CRM_results.txt\">(text)</a>:<br> The subset of motif matches and their associated genes in which at least one motif match for each input motif is found in the same gene region. Each line in this table shows a unique combination of motif matches per gene. Each input motif match rank is shown where the ranks is based on the cumulative site score for the motif match in the CRM. Results are ordered by average motif rank. See <a href="http://hobertlab.org/TargetOrtho/5_OACisOrtho_RankingCriteria.pdf" target="_blank">TargetOrtho Ranking Criteria></a> for an explanation of column headers. <br><br>\n' %(jobID,jobID)
ref3='<a href=\"%s_top_ranked_per_gene_CRM.html\">CRM_results_top_ranked_per_gene.html</a>/<a href=\"%s_top_ranked_per_gene_CRM.txt\">(text)</a>:<br> Data for the top ranked motif match per gene. For the complete set of motif match data, see the CRM_results table or the QueryListCRM table if the Query Only option was True. Each row shows data for the best ranked motif match per gene. Each input motif match rank is shown where the ranks is based on the cumulative site score for the motif match in the CRM. Results are ordered by average motif rank. See <a href="http://hobertlab.org/TargetOrtho/5_CisOrtho_RankingCriteria.pdf" target="_blank">TargetOrtho Ranking Criteria</a> for an explanation of column headers. <br><br><br><br>\n' %(jobID,jobID)
if queryFile_path != 'None' and queryFile_path!=None:
ref1='<a href=\"%s_QueryListResults_CRM.html\">CRM_QueryList_results.html</a>/<a href=\"%s_QueryListResults_CRM.txt\">(text)</a>:<br> The subset of result CRM data in which each associated gene is present in the user input query list file. The subset of motif matches and their associated genes in which at least one motif match for each input motif is found in the same gene region. Each line in this table shows a unique combination of motif matches per gene. Each input motif match rank is shown where the ranks is based on the cumulative site score for the motif match in the CRM. Results are ordered by average motif rank. See <a href="http://hobertlab.org/TargetOrtho/5_CisOrtho_RankingCriteria.pdf" target="_blank">TargetOrtho Ranking Criteria</a> for an explanation of column headers. <br><br>\n' %(jobID,jobID)
if QueryOnly=='True':ref2=''
outfile.write('<h2>Cis Regulatory Module (CRM) Results</h2>\n')
outfile.write('%s\n%s\n%s\n' %(ref1,ref2,ref3))
outfile.write('<h3>Individual motif results</h3>\n')
pref='.'
for n in range(matrix_count):
outfile.write('<B>Results for motif %s</B>: %s<br>\n' %(n+1,matrixNamesList[n]))
ref1=''
ref2='<a href=\"%s%s_All_conserved_hits%s_ranked.html\">All_conserved_hits_ranked.html (whole genome)</a>/<a href=\"%s%s_All_conserved_hits%s_ranked.txt\">(text)</a>:<br> Each row shows data associated with one motif match so that data for any gene associated with multiple motif matches will be shown in separate rows. See <a href="http://hobertlab.org/TargetOrtho/5_CisOrtho_RankingCriteria.pdf" target="_blank">TargetOrtho Ranking Criteria</a> for an explanation of column headers. <br><br>\n' %(pref,jobID,n+1,pref,jobID,n+1)
ref3='<a href=\"%s%s_top_ranked_hits%s.html\">Top_ranked_hits_per_gene.html</a>/<a href=\"%s%s_top_ranked_hits%s.txt\">(text)</a>:<br>Data for the top ranked motif match per gene. For the complete set of motif match data, see the All conserved hits ranked table (or the QueryList Results table if the query only option was used). Each row shows data for the best ranked motif match per gene. See <a href="http://hobertlab.org/TargetOrtho/5_CisOrtho_RankingCriteria.pdf" target="_blank">TargetOrtho Ranking Criteria</a> for an explanation of column headers. <br><br>\n' %(pref,jobID,n+1,pref,jobID,n+1)
if queryFile_path != 'None' and queryFile_path!=None:
ref1='<a href=\"%s%s_QueryListResults%s.html\">Query List Results.html</a>/<a href=\"%s%s_QueryListResults%s.txt\">(text)</a>:<br>The subset of result data in which each associated gene is present in the user input query list file.Each row shows data associated with one motif match so that data for any gene associated with multiple motif matches will be shown in separate rows. See <a href="http://hobertlab.org/TargetOrtho/5_CisOrtho_RankingCriteria.pdf" target="_blank">TargetOrtho Ranking Criteria</a> for an explanation of column headers. <br><br>\n' %(pref,jobID,n+1,pref,jobID,n+1)
if QueryOnly=='True':ref2=''
outfile.write("%s%s%s\n" %(ref1,ref2,ref3))
outfile.write("<a href=\"%s%s_%s_%s.bed\">GenomeBrowserFile.bed</a>:<br> genome browser track file. Each site is assigned a color score correlated with the log-odds score of the motif match so that stronger predicted binding sites are colored darker than weaker binding sites. Each motif match score is normalized between 0 and 1000 where 1000 represents the strongest possible binding site score. <br><br><a href=\"%s%s_ResultsSummary%s.html\">ResultsSummary.html</a>:<br> Summary of TargetOrtho results<br><br>\n" %(pref,jobID,speciesList[0],matrixNamesList[n],pref,jobID,n+1))
outfile.write("<a href=\"%s_inputSummary.txt\">inputSummary.txt</a>:<br> Summary of TargetOrtho input parameters<br><br>\n" %(jobID))
outfile.write("<a href=\"%sTargetOrthoStdout.out\">job_output.txt</a>: Standard output from job execution<br>\n" %(jobID))
outfile.write("If you use TargetOrtho, please cite:\n <NAME>, <NAME>, <NAME> and <NAME>. TargetOrtho: A Phylogenetic Footprinting Tool to Identify Transcription Factor Targets. Genetics May 1, 2014 vol. 197 no. 1 61-76\n")
def mkHTMLtables(table,m,db):
if db !='':cursor.execute("attach '%s' as c" %(db))
if m !='':dir="%s/%s/Results_html" %(output_dir,matrixNamesList[m-1])
else:dir=output_dir
outfile=file('%s/%s.html' %(dir,table),'w')
if ('CRM' in table or 'per_gene_CRM' in table or 'QueryListResults_CRM' in table) and (matrix_count>1):
if matrix_count >2:rank3=' +motif3_site_rank'
else:rank3=''
if matrix_count >3:rank4=' +motif4_site_rank'
else:rank4=''
if matrix_count >4:rank5=' +motif5_site_rank'
else:rank5=''
command="""select * from %s order by (motif1_site_rank+motif2_site_rank%s%s%s) ASC"""%(table,rank3,rank4,rank5)
rows=list([row for row in cursor.execute(command)])
else:
if ('top_ranked_hits' in table or 'QueryList' in table or 'All_conserved' in table) and ('CRM' not in table):
command="""select * from %s order by rank ASC""" %table
rows=list([row[1:] for row in cursor.execute(command)])
else:
command="""select * from %s""" %table
rows=list([row for row in cursor.execute(command)])
outfile.write("<html>\n<head>\n<title>TargetOrthoV1.0 Results</title>\n<script src=\".sorttable.js\"></script>\n")
outfile.write("<script type=\"text/javascript\" src=\".jquery-1.8.2.min.js\"></script>\n")
num=table[-1]
try:matrixName="matrix_name: %s" %(matrixNamesList[int(num)-1])
except:matrixName=''
t=table
#assign table descriptions for html headers
if 'CRM' in t:
desc=''
if 'if top' in t:desc='Data for the top ranked motif match per gene. For the complete set of motif match data, see the CRM_results table or the QueryListCRM table if the Query Only option was True. Each row shows data for the best ranked motif match per gene. Each input motif match rank is shown where the ranks is based on the cumulative site score for the motif match in the CRM. Results are ordered by average motif rank. See <a href="http://hobertlab.org/TargetOrtho/5_CisOrtho_RankingCriteria.pdf" target="_blank">TargetOrtho Ranking Criteria</a> for an explanation of column headers.'
elif 'Query' in t:desc='The subset of result CRM data in which each associated gene is present in the user input query list file. The subset of motif matches and their associated genes in which at least one motif match for each input motif is found in the same gene region. Each line in this table shows a unique combination of motif matches per gene. Each input motif match rank is shown where the ranks is based on the cumulative site score for the motif match in the CRM. Results are ordered by average motif rank. See <a href="http://hobertlab.org/TargetOrtho/5_CisOrtho_RankingCriteria.pdf" target="_blank">TargetOrtho Ranking Criteria</a> for an explanation of column headers.'
else:
desc='The subset of motif matches and their associated genes in which at least one motif match for each input motif is found in the same gene region. Each line in this table shows a unique combination of motif matches per gene. Each input motif match rank is shown where the ranks is based on the cumulative site score for the motif match in the CRM. Results are ordered by average motif rank. See <a href="http://hobertlab.org/TargetOrtho/5_CisOrtho_RankingCriteria.pdf" target="_blank">TargetOrtho Ranking Criteria</a> for an explanation of column headers.'
else:
if 'ResultsSummary' in t:desc='Summary of TargetOrtho results'
if 'top_ranked' in t:desc='Data for the top ranked motif match per gene. For the complete set of motif match data, see the All conserved hits ranked table.. Each row shows data for the best ranked motif match per gene. See | |
#!/usr/bin/python
# -*- coding: utf-8 -*-
#add the path of the twitter egg
import sys
egg_path = '/home/users/web/........./cgi-bin/PyPkg/twitter-1.14.3-py2.7.egg'
sys.path.append(egg_path)
# Import the CGI, string, sys, and md5crypt modules
import json, urllib2, re, time, datetime, sys, cgi, os
import sqlite3
import MySQLdb as mdb
import string, random
from urlparse import urlparse
from twitter import *
from tempfile import TemporaryFile
from collections import *
from py_site_header import *
def thisPYfile():
return 'twit_analytics.py'
def define_keys():
CONSUMER_KEY="......................"
CONSUMER_SECRET="...................."
ACCESS_TOKEN="..........................."
ACCESS_TOKEN_SECRET="...................................."
return CONSUMER_KEY, CONSUMER_SECRET, ACCESS_TOKEN, ACCESS_TOKEN_SECRET
def start_database_to_store_tweets():
dbhost="......................" # Host name
dbuser="......." # Mysql username
dbpswd="......." # Mysql password
dbname = '........' # MySql db
try:
conn = mdb.connect(host=dbhost,user=dbuser,passwd=dbpswd,db=dbname)
c = conn.cursor()
return c, True, conn
except mdb.Error, e:
return e, False
def site_header(st=''):
site_start()
print '</div>'
site_title(st)
def site_start():
print '''
Content-type:text/html\r\n\r\n
<html>
<div class="wrap" id="wrap_id">
<head>
<meta http-equiv="content-type" content="text/html;charset=utf-8" />
<meta name="viewport" content="width=device-width, initial-scale=1">
<title>Financial Models</title>
<script src="https://ajax.googleapis.com/ajax/libs/jquery/1.11.3/jquery.min.js"></script>
<script type="text/javascript" src="../js/js_functions.js"></script>
<link rel="stylesheet" href="http://www.w3schools.com/lib/w3.css">
<link rel="stylesheet" href="http://www.w3schools.com/lib/w3-theme-indigo.css">
<link href='http://code.ionicframework.com/ionicons/2.0.1/css/ionicons.min.css' rel='stylesheet' type='text/css'>
<link rel="stylesheet" href="http://cdnjs.cloudflare.com/ajax/libs/font-awesome/4.4.0/css/font-awesome.min.css">
<style>
a:link { text-decoration: none; }
a:visited { text-decoration: none; }
a:hover { text-decoration: none; }
a:active { text-decoration: none; }
</style>
</head>
<body>
'''
def site_title(s_title):
print '''
<div id="site_title" class="w3-container w3-theme-d4 w3-center w3-padding-jumbo">
<p> </p>
<div class="w3-row w3-jumbo">
'''
print s_title
print '''
<br>
</div>
</div>
'''
def site_footer():
import datetime
curr_year = datetime.datetime.now().strftime("%Y")
print '<div class="w3-container w3-border-top" style="text-align:center">'
print '<p> © 2013-'+curr_year+' | '
print '<a>Contact Us</a> </p>'
print '<p><a href="./termsofuse.py">Terms of Use</a> |',
print '<a href="./home.py#aboutus">About Us</a> </p>'
print '</div>'
print '</form>'
print ' </body>'
print ' </div>' #for the div id = wrap
print ' </html>'
def html_start():
# Start the HLML Block
site_header('Twitter Analytics')
def html_end():
site_footer()
def top_list(in_l,topx):
#function to get the top xx items in a list
# Need this because v2.6 of python does not have Counter in collections
counter = {}
for i in in_l:
counter[i] = counter.get(i, 0) + 1
final_dict = sorted([ (freq,word) for word, freq in counter.items() ], reverse=True)[:topx]
return final_dict
def text_sanitize(in_text):
out_text = in_text.replace("'","")
out_text = out_text.replace("\""," ").replace("\\"," ").replace("="," ").replace("''",'\"').replace("' '",'\"')
return out_text
def generate_form():
html_start()
print '<div id="body_sty">'
print '<p>Explore the world of Twitter and discover information about twitter users, their friends and followers as well as lexical analysis of the tweets.</p>'
print '<TABLE style="display: block;" BORDER = 0>'
print "<FORM METHOD = post ACTION=\'"+thisPYfile()+"\'>"
print "<TR><TH align=\"left\">Screen Name:</TH><TD><INPUT type = text name=\"scn_name\"></TD><TR>"
print "</TABLE>"
print "<INPUT TYPE = hidden NAME = \"action\" VALUE = \"display\">"
print "<INPUT TYPE = submit VALUE = \"Enter\">"
print "</FORM>"
print '</div>'
html_end()
def user_public_info(find_id_for):
#html_start()
#this line gets the public info for the user
print '<h2>'+'\nUsers Public Info'+'</h2>'
do_rest_of_module = 0
try:
t = Twitter(auth=OAuth(define_keys()[2],define_keys()[3],define_keys()[0],define_keys()[1]))
response = t.users.lookup(screen_name=find_id_for)
do_rest_of_module = 1
except:
print '<p>', 'Error getting public data' ,'</p>'
if do_rest_of_module == 1:
print '<h3>'+'\nBasic Info for: ', find_id_for+'</h3>'
print '<p>', '\tKey Data' ,'</p>'
print '<ul>'
print '<li>ID:',response[0]['id'],'</li>'
print '<li>Screen Name:',response[0]['screen_name'],'</li>'
print '<li>Name:',response[0]['name'] ,'</li>'
print '<li>Location:',response[0]['location'] ,'</li>'
print '<li>Friends:',response[0]['friends_count'] ,'</li>'
print '<li>Followers:',response[0]['followers_count'] ,'</li>'
print '<li>Messages posted:',response[0]['statuses_count'] ,'</li>'
print '</ul>'
def get_last200_tweets(in_user):
#this method will get the last 200 tweets of the user
#rate limit is 180 requests per 15 min window
#print '<h2>'+'\nAnalysis of Past Tweets for',in_user,'</h2>'
do_rest_of_module = 0
try:
t = Twitter(auth=OAuth(define_keys()[2],define_keys()[3],define_keys()[0],define_keys()[1]))
response=t.statuses.user_timeline(screen_name=in_user,count=200)
#print '<p>', '\tResponses left:', response.headers['x-rate-limit-remaining'] ,'</p>'
#print '<p>Line 201. Response length: ',len(response),'</p>'
if len(response) > 0:
do_rest_of_module = 1
else:
print '<p>', 'No info found for: ',in_user ,'</p>'
except:
print '<p>', 'Error getting tweets info for: ',in_user ,'</p>'
if do_rest_of_module == 1:
base_twit_list = []
data_for_plots = []
x = response
#x = [element.lower() for element in response] #x is list - LOWER CASE
hashtag_list = [] #start an empty list of hashtags
at_list = [] #start an empty list of twitter IDs
re_twt_list = [] #start a list of retweets
#get the start and end dates
sdf = x[0]['created_at'] #get the full date of last tweet
start_date = datetime.date(int(sdf[26:30]), int(time.strptime(sdf[4:7],'%b').tm_mon), int(sdf[8:10]))
edf = x[len(x)-1]['created_at'] #get the full date of first tweet
end_date = datetime.date(int(edf[26:30]), int(time.strptime(edf[4:7],'%b').tm_mon), int(edf[8:10]))
#end_date = str(edf[8:10])+'-'+str(edf[4:7])+'-'+str(edf[26:30])
twit_day_range = (start_date-end_date).days
avg_twit_day = (1.0*len(x)/max(1,twit_day_range))
print >> t2, '<h4>'+'Tweet Stats for ', in_user+'</h4>'
#print x[0]
#print '\tStats for last',len(x), 'tweets by',in_user
fix_nm = x[0]['user']['screen_name']
try:
if str(x[0]['user']['name']).decode('ascii'): fix_nm = str(x[0]['user']['name'])
except:
#print 'something wrong with the name for ', x[0]['user']['name']
fix_nm = x[0]['user']['screen_name']
print >> t2, '<ul>'
print >> t2, '<li>Key Personal Data</li>'
print >> t2, '<ul>'
print >> t2, '<li>ID:',x[0]['user']['id'],'</li>'
print >> t2, '<li>Screen Name:',x[0]['user']['screen_name'],'</li>'
print >> t2, '<li>Name:',fix_nm,'</li>'
#print '<li>Location:',x[0]['user']['location'],'</li>'
print >> t2, '<li>Friends:',x[0]['user']['friends_count'] ,'</li>'
print >> t2, '<li>Followers:',x[0]['user']['followers_count'] ,'</li>'
print >> t2, '<li>Messages posted:',x[0]['user']['statuses_count'] ,'</li>'
foll_frnd_rat = 1.0*x[0]['user']['followers_count'] / max(1,x[0]['user']['friends_count'])
print >> t2, '<li>Follower to Friend Ratio:', '%.1f' %(foll_frnd_rat),'</li>'
print >> t2, '</ul>'
print >> t2, '</ul>'
print >> t2, '<ul>'
print >> t2, '<li>',len(x),'tweets in past',twit_day_range,'days',
print >> t2, '(',end_date,'to',start_date,')' ,'</li>'
print >> t2, '<li>', 'Avg of ','%.1f' %(avg_twit_day),'tweets per day' ,'</li>'
#add info to the data for charts list
data_for_plots.extend([x[0]['user']['screen_name']])
data_for_plots.extend([x[0]['user']['friends_count']])
data_for_plots.extend([x[0]['user']['followers_count']])
data_for_plots.extend([x[0]['user']['statuses_count']])
data_for_plots.extend([twit_day_range])
data_for_plots.extend([len(x)])
for item in x:
#the encode(ascii,ignore) will convert text to ascii and ignore other
td = item['created_at']
twt_date = datetime.date(int(td[26:30]), int(time.strptime(td[4:7],'%b').tm_mon), int(td[8:10]))
fix_nm = item['user']['screen_name']
try:
if str(item['user']['name']).encode('utf8','ignore'): fix_nm = str(item['user']['name'])
except:
fix_nm = item['user']['screen_name']
try:
fix_text = text_sanitize(item['text'].encode('utf8','ignore'))
except:
#print 'something wrong with the text in tweet for: ',in_user
fix_text = 'Did not process'
#print fix_text,'\t',type(item['text']),'\t',len(item['text']),'\t',item['text'],
twt_list_data = [twt_date] + [fix_nm.lower()] + [fix_text]
try:
base_twit_list.append(twt_list_data)
except:
print '<p>Unknown Error:', type(twt_list_data), twt_list_data, '</p>'
textitem = fix_text
newhastags = re.findall('[#]\w+',textitem)
newatitems = re.findall('[@]\w+',textitem)
re_tweets = re.findall('RT',textitem)
#before adding to the final lists, convert the hashtags and atitems
#to lower case. This will avoid issues of double counting same names
newhastags = [hti.lower() for hti in newhastags]
newatitems = [ati.lower() for ati in newatitems]
#Now add to the list.
#Use EXTEND function that adds elements to the list rahter than another list.
hashtag_list.extend(newhastags)
at_list.extend(newatitems)
re_twt_list.extend(re_tweets)
#now try to find some patterns in the last 200 tweets
#print 'use the collections library to find out the top 5'
#Version 2.6 of python does not support Counters within collections
#py2.6 hashcollect = collections.Counter(hashtag_list)
#py2.6 atcollect = collections.Counter(at_list)
totalretweets = len(re_twt_list)
retwpercent = (1.0 * totalretweets / max(1,len(x)) ) * 100
top10users = []
#print '\n.............................' ,'</p>'
print >> t2, '<li>', '\t',"%.2f%%" % retwpercent, 'are retweets (',totalretweets,'of a total of',len(x),'tweets)' ,'</li>'
print >> t2, '<ul>'
print >> t2, '<li>',(len(x)-totalretweets), 'tweets in ',twit_day_range,' days (without retweets)</li>'
print >> t2, '<li>','Avg of ','%.1f' %( 1.0*(len(x)-totalretweets)/max(twit_day_range,1) ),'tweets per day (without retweets)</li>'
print >> t2, '</ul></ul>'
data_for_plots.extend([totalretweets])
print >> t2, '<ul>'
print >> t2, '<li>', '\tHastags referenced over past',len(x),'tweets = ',len(hashtag_list) ,'</li>'
print >> t2, '<li>', '\t10 Most referenced hashtags' ,'</li>'
print >> t2, '<ul>'
#py2.6 for h_item in hashcollect.most_common(10): #can't use in python 2.6
for h_item in top_list(hashtag_list,10):
print >> t2, '<li>',text_sanitize(h_item[1]),'|',h_item[0] ,'</li>'
print >> t2, '</ul></ul>'
print >> t2, '<ul>'
print >> t2, '<li>', '\tTwitter IDs referenced over past',len(x),'tweets = ',len(at_list) ,'</li>'
print >> t2, '<li>', '\t10 Most referenced Tweeter IDs' ,'</li>'
print >> t2, '<ul>'
#py2.6 for at_item in atcollect.most_common(10):
for at_item in top_list(at_list,10):
print >> t2, '<li>', '\t\t',text_sanitize(at_item[1]),'|',at_item[0],'</li>'
#add the list of users to the top10user list
top10users.append(at_item[1].replace('@',''))
print >> t2, '</ul></ul>'
#print '<p>Twit list:',type(base_twit_list),'\t',len(base_twit_list),'</p>'
return top10users, base_twit_list, data_for_plots
def display_data(scn_name):
html_start()
print '<div id="body_sty">'
print '<h4>Data shown for '+scn_name.upper()+' and 10 other users most referenced in '+scn_name.upper()+'\'s tweets.</h4><hr>'
user_to_check = scn_name
if user_to_check[0] == '@':
user_raw = user_to_check
user_to_check = user_raw.replace('@','')
# the following lines get the user info
# -- this is response limited to 180
#user_public_info(user_to_check)
max_items_to_show = 200
max_tweets_to_get = 200
#if temp file exists, close it
global t2
try:
t2.close()
except:
print ''
#open the temp file
t2=TemporaryFile()
print >> | |
# Copyright (c) 2015-2019 The Switch Authors. All rights reserved.
# Licensed under the Apache License, Version 2.0, which is in the LICENSE file.
"""
Defines load zone parameters for the Switch model.
INPUT FILE INFORMATION
Import load zone data. The following tab-separated files are
expected in the input directory. Their index columns need to be on
the left, but the data columns can be in any order. Extra columns
will be ignored during import, and optional columns can be dropped.
Other modules (such as local_td) may look for additional columns in
some of these files. If you don't want to specify data for any
optional parameter, use a dot . for its value. Optional columns and
files are noted with a *.
load_zones.csv
LOAD_ZONE, zone_ccs_distance_km*, zone_dbid*
loads.csv
LOAD_ZONE, TIMEPOINT, zone_demand_mw
zone_coincident_peak_demand.csv*
LOAD_ZONE, PERIOD, zone_expected_coincident_peak_demand
"""
import os
from pyomo.environ import *
from switch_model.reporting import write_table
from switch_model.tools.graph import graph
dependencies = 'switch_model.timescales'
optional_dependencies = 'switch_model.transmission.local_td'
def define_dynamic_lists(mod):
"""
Zone_Power_Injections and Zone_Power_Withdrawals are lists of
components that contribute to load-zone level power balance equations.
sum(Zone_Power_Injections[z,t]) == sum(Zone_Power_Withdrawals[z,t])
for all z,t
Other modules may append to either list, as long as the components they
add are indexed by [zone, timepoint] and have units of MW. Other modules
often include Expressions to summarize decision variables on a zonal basis.
"""
mod.Zone_Power_Injections = []
mod.Zone_Power_Withdrawals = []
def define_components(mod):
"""
Augments a Pyomo abstract model object with sets and parameters that
describe load zones and associated power balance equations. Unless
otherwise stated, each set and parameter is mandatory.
LOAD_ZONES is the set of load zones. Each zone is effectively modeled as a
single bus connected to the inter-zonal transmission network (assuming
transmission is enabled). If local_td is included, the central zonal bus,
is connected to a "distributed bus" via local transmission and
distribution that incurs efficiency losses and must be upgraded over time
to always meet peak demand. Load zones are abbreviated as zone in
parameter names and as z for indexes.
zone_demand_mw[z,t] describes the power demand from the high voltage
transmission grid each load zone z and timepoint t. This will either go
into the Zone_Power_Withdrawals or the Distributed_Power_Withdrawals power
balance equations, depending on whether the local_td module is included
and has defined a distributed node for power balancing. If the local_td
module is excluded, this value should be the total withdrawals from the
central grid and should include any distribution losses. If the local_td
module is included, this should be set to total end-use demand (aka sales)
and should not include distribution losses. zone_demand_mw must be
non-negative.
zone_dbid[z] stores an external database id for each load zone. This
is optional and defaults to the name of the load zone. It will be
printed out when results are exported.
zone_ccs_distance_km[z] describes the length of a pipeline in
kilometers that would need to be built to transport CO2 from a load
zones central bus to the nearest viable CCS reservoir. This
parameter is optional and defaults to 0.
EXTERNAL_COINCIDENT_PEAK_DEMAND_ZONE_PERIODS is a set of load zones and
periods (z,p) that have zone_expected_coincident_peak_demand specified.
zone_expected_coincident_peak_demand[z,p] is an optional parameter than can
be used to externally specify peak load planning requirements in MW.
Currently local_td and planning_reserves determine capacity requirements
use zone_expected_coincident_peak_demand as well as load timeseries. Do not
specify this parameter if you wish for the model to endogenously determine
capacity requirements after accounting for both load and Distributed
Energy Resources (DER).
Derived parameters:
zone_total_demand_in_period_mwh[z,p] describes the total energy demand
of each load zone in each period in Megawatt hours.
"""
mod.LOAD_ZONES = Set(dimen=1, input_file='load_zones.csv')
mod.ZONE_TIMEPOINTS = Set(dimen=2,
initialize=lambda m: m.LOAD_ZONES * m.TIMEPOINTS,
doc="The cross product of load zones and timepoints, used for indexing.")
mod.zone_demand_mw = Param(
mod.ZONE_TIMEPOINTS,
input_file="loads.csv",
within=NonNegativeReals)
mod.zone_ccs_distance_km = Param(
mod.LOAD_ZONES,
within=NonNegativeReals,
input_file="load_zones.csv",
default=0.0)
mod.zone_dbid = Param(
mod.LOAD_ZONES,
input_file="load_zones.csv",
default=lambda m, z: z)
mod.min_data_check('LOAD_ZONES', 'zone_demand_mw')
try:
mod.Distributed_Power_Withdrawals.append('zone_demand_mw')
except AttributeError:
mod.Zone_Power_Withdrawals.append('zone_demand_mw')
mod.EXTERNAL_COINCIDENT_PEAK_DEMAND_ZONE_PERIODS = Set(
dimen=2, within=mod.LOAD_ZONES * mod.PERIODS,
input_file="zone_coincident_peak_demand.csv",
input_optional=True,
doc="Zone-Period combinations with zone_expected_coincident_peak_demand data.")
mod.zone_expected_coincident_peak_demand = Param(
mod.EXTERNAL_COINCIDENT_PEAK_DEMAND_ZONE_PERIODS,
input_file="zone_coincident_peak_demand.csv",
within=NonNegativeReals)
mod.zone_total_demand_in_period_mwh = Param(
mod.LOAD_ZONES, mod.PERIODS,
within=NonNegativeReals,
initialize=lambda m, z, p: (
sum(m.zone_demand_mw[z, t] * m.tp_weight[t]
for t in m.TPS_IN_PERIOD[p])))
# Make sure the model has duals enabled since we use the duals in post_solve()
mod.enable_duals()
def define_dynamic_components(mod):
"""
Adds components to a Pyomo abstract model object to enforce the
first law of thermodynamics at the level of load zone buses. Unless
otherwise stated, all terms describing power are in units of MW and
all terms describing energy are in units of MWh.
Zone_Energy_Balance[load_zone, timepoint] is a constraint that mandates
conservation of energy in every load zone and timepoint. This constraint
sums the model components in the lists Zone_Power_Injections and
Zone_Power_Withdrawals - each of which is indexed by (z, t) and
has units of MW - and ensures they are equal. The term tp_duration_hrs
is factored out of the equation for brevity.
"""
mod.Zone_Energy_Balance = Constraint(
mod.ZONE_TIMEPOINTS,
rule=lambda m, z, t: (
sum(
getattr(m, component)[z, t]
for component in m.Zone_Power_Injections
) == sum(
getattr(m, component)[z, t]
for component in m.Zone_Power_Withdrawals)))
def post_solve(instance, outdir):
"""
Exports load_balance.csv, load_balance_annual_zonal.csv, and load_balance_annual.csv.
Each component registered with Zone_Power_Injections and Zone_Power_Withdrawals will
become a column in these .csv files. As such, each column represents a power injection
or withdrawal and the sum of across all columns should be zero. Note that positive
terms are net injections (e.g. generation) while negative terms are net withdrawals
(e.g. load).
load_balance.csv contains the energy balance terms for for every zone and timepoint.
We also include a column called normalized_energy_balance_duals_dollar_per_mwh
that is a proxy for the locational marginal pricing (LMP). This value represents
the incremental cost per hour to increase the demand by 1 MW (or equivalently
the incremental cost of providing one more MWh of energy). This is not a perfect
proxy for LMP since it factors in build costs etc.
load_balance_annual_zonal.csv contains the energy injections and withdrawals
throughout a year for a given load zone.
load_balance_annual.csv contains the energy injections and withdrawals
throughout a year across all zones.
"""
write_table(
instance, instance.LOAD_ZONES, instance.TIMEPOINTS,
output_file=os.path.join(outdir, "load_balance.csv"),
headings=("load_zone", "timestamp", "normalized_energy_balance_duals_dollar_per_mwh",) + tuple(
instance.Zone_Power_Injections +
instance.Zone_Power_Withdrawals),
values=lambda m, z, t:
(
z,
m.tp_timestamp[t],
m.get_dual(
"Zone_Energy_Balance",
z, t,
divider=m.bring_timepoint_costs_to_base_year[t]
)
)
+ tuple(getattr(m, component)[z, t] for component in m.Zone_Power_Injections)
+ tuple(-getattr(m, component)[z, t] for component in m.Zone_Power_Withdrawals)
)
def get_component_per_year(m, z, p, component):
"""
Returns the weighted sum of component across all timepoints in the given period.
The components must be indexed by zone and timepoint.
"""
return sum(getattr(m, component)[z, t] * m.tp_weight_in_year[t] for t in m.TPS_IN_PERIOD[p])
write_table(
instance, instance.LOAD_ZONES, instance.PERIODS,
output_file=os.path.join(outdir, "load_balance_annual_zonal.csv"),
headings=("load_zone", "period",) + tuple(instance.Zone_Power_Injections + instance.Zone_Power_Withdrawals),
values=lambda m, z, p:
(z, p)
+ tuple(get_component_per_year(m, z, p, component) for component in m.Zone_Power_Injections)
+ tuple(-get_component_per_year(m, z, p, component) for component in m.Zone_Power_Withdrawals)
)
write_table(
instance, instance.PERIODS,
output_file=os.path.join(outdir, "load_balance_annual.csv"),
headings=("period",) + tuple(instance.Zone_Power_Injections + instance.Zone_Power_Withdrawals),
values=lambda m, p:
(p,)
+ tuple(sum(get_component_per_year(m, z, p, component) for z in m.LOAD_ZONES)
for component in m.Zone_Power_Injections)
+ tuple(-sum(get_component_per_year(m, z, p, component) for z in m.LOAD_ZONES)
for component in m.Zone_Power_Withdrawals)
)
@graph(
"energy_balance_duals",
title="Energy balance duals per period",
note="Note: Outliers and zero-valued duals are ignored."
)
def graph_energy_balance(tools):
load_balance = tools.get_dataframe('load_balance.csv')
load_balance = tools.transform.timestamp(load_balance)
load_balance["energy_balance_duals"] = tools.pd.to_numeric(
load_balance["normalized_energy_balance_duals_dollar_per_mwh"], errors="coerce") / 10
load_balance = load_balance[["energy_balance_duals", "time_row"]]
load_balance = load_balance.pivot(columns="time_row", values="energy_balance_duals")
percent_of_zeroes = sum(load_balance == 0) / len(load_balance) * 100
# Don't include the zero-valued duals
load_balance = load_balance.replace(0, tools.np.nan)
if load_balance.count().sum() != 0:
load_balance.plot.box(
ax=tools.get_axes(note=f"{percent_of_zeroes:.1f}% of duals are zero"),
xlabel='Period',
ylabel='Energy balance duals (cents/kWh)',
showfliers=False
)
@graph(
"daily_demand",
title="Total daily demand",
supports_multi_scenario=True
)
def demand(tools):
df = tools.get_dataframe("loads.csv", from_inputs=True, drop_scenario_info=False)
df = df.groupby(["TIMEPOINT", "scenario_name"], as_index=False).sum()
df = tools.transform.timestamp(df, key_col="TIMEPOINT", use_timepoint=True)
df = df.groupby(["season", "hour", "scenario_name", "time_row"], as_index=False).mean()
df["zone_demand_mw"] /= 1e3
pn = tools.pn
plot = pn.ggplot(df) + \
pn.geom_line(pn.aes(x="hour", y="zone_demand_mw", color="scenario_name")) + \
pn.facet_grid("time_row ~ season") + \
pn.labs(x="Hour (PST)", y="Demand (GW)", color="Scenario")
tools.save_figure(plot.draw())
@graph(
"demand",
title="Total demand",
supports_multi_scenario=True
)
def yearly_demand(tools):
df = tools.get_dataframe("loads.csv", from_inputs=True, drop_scenario_info=False)
df = df.groupby(["TIMEPOINT", "scenario_name"], as_index=False).sum()
df = tools.transform.timestamp(df, key_col="TIMEPOINT", | |
update_fid_err_log(self, fid_err):
"""add an entry to the fid_err log"""
self.fid_err_log.append(fid_err)
if self.write_to_file:
if len(self.fid_err_log) == 1:
mode = 'w'
else:
mode = 'a'
f = open(self.fid_err_file, mode)
f.write("{}\n".format(fid_err))
f.close()
@property
def grad_norm_file(self):
if self._grad_norm_file is None:
fname = "{}-grad_norm_log.{}".format(self.fname_base,
self.dump_file_ext)
self._grad_norm_file = os.path.join(self.dump_dir, fname)
return self._grad_norm_file
def update_grad_norm_log(self, grad_norm):
"""add an entry to the grad_norm log"""
self.grad_norm_log.append(grad_norm)
if self.write_to_file:
if len(self.grad_norm_log) == 1:
mode = 'w'
else:
mode = 'a'
f = open(self.grad_norm_file, mode)
f.write("{}\n".format(grad_norm))
f.close()
def update_grad_log(self, grad):
"""add an entry to the grad log"""
self.grad_log.append(grad)
if self.write_to_file:
fname = "{}-fid_err_gradients{}.{}".format(self.fname_base,
len(self.grad_log),
self.dump_file_ext)
fpath = os.path.join(self.dump_dir, fname)
np.savetxt(fpath, grad, delimiter=self.data_sep)
def writeout(self, f=None):
"""write all the logs and the summary out to file(s)
Parameters
----------
f : filename or filehandle
If specified then all summary and object data will go in one file.
If None is specified then type specific files will be generated
in the dump_dir
If a filehandle is specified then it must be a byte mode file
as numpy.savetxt is used, and requires this.
"""
fall = None
# If specific file given then write everything to it
if hasattr(f, 'write'):
if not 'b' in f.mode:
raise RuntimeError("File stream must be in binary mode")
# write all to this stream
fall = f
fs = f
closefall = False
closefs = False
elif f:
# Assume f is a filename
fall = open(f, 'wb')
fs = fall
closefs = False
closefall = True
else:
self.create_dump_dir()
closefall = False
if self.dump_summary:
fs = open(self.summary_file, 'wb')
closefs = True
if self.dump_summary:
for ois in self.iter_summary:
if ois.idx == 0:
fs.write(asbytes("{}\n{}\n".format(
ois.get_header_line(self.summary_sep),
ois.get_value_line(self.summary_sep))))
else:
fs.write(asbytes("{}\n".format(
ois.get_value_line(self.summary_sep))))
if closefs:
fs.close()
logger.info("Optim dump summary saved to {}".format(
self.summary_file))
if self.dump_fid_err:
if fall:
fall.write(asbytes("Fidelity errors:\n"))
np.savetxt(fall, self.fid_err_log)
else:
np.savetxt(self.fid_err_file, self.fid_err_log)
if self.dump_grad_norm:
if fall:
fall.write(asbytes("gradients norms:\n"))
np.savetxt(fall, self.grad_norm_log)
else:
np.savetxt(self.grad_norm_file, self.grad_norm_log)
if self.dump_grad:
g_num = 0
for grad in self.grad_log:
g_num += 1
if fall:
fall.write(asbytes("gradients (call {}):\n".format(g_num)))
np.savetxt(fall, grad)
else:
fname = "{}-fid_err_gradients{}.{}".format(self.fname_base,
g_num,
self.dump_file_ext)
fpath = os.path.join(self.dump_dir, fname)
np.savetxt(fpath, grad, delimiter=self.data_sep)
if closefall:
fall.close()
logger.info("Optim dump saved to {}".format(f))
else:
if fall:
logger.info("Optim dump saved to specified stream")
else:
logger.info("Optim dump saved to {}".format(self.dump_dir))
class DynamicsDump(Dump):
"""
A container for dumps of dynamics data.
Mainly time evolution calculations
Attributes
----------
dump_summary : bool
If True a summary is recorded
evo_summary : list of :class:`tslotcomp.EvoCompSummary'
Summary items are appended if dump_summary is True
at each recomputation of the evolution.
dump_amps : bool
If True control amplitudes are dumped
dump_dyn_gen : bool
If True the dynamics generators (Hamiltonians) are dumped
dump_prop : bool
If True propagators are dumped
dump_prop_grad : bool
If True propagator gradients are dumped
dump_fwd_evo : bool
If True forward evolution operators are dumped
dump_onwd_evo : bool
If True onward evolution operators are dumped
dump_onto_evo : bool
If True onto (or backward) evolution operators are dumped
evo_dumps : list of :class:`EvoCompDumpItem`
A new dump item is appended at each recomputation of the evolution.
That is if any of the calculation objects are to be dumped.
"""
def __init__(self, dynamics, level='SUMMARY'):
from qutip.control.dynamics import Dynamics
if not isinstance(dynamics, Dynamics):
raise TypeError("Must instantiate with {} type".format(
Dynamics))
self.parent = dynamics
self._level = level
self.reset()
def reset(self):
Dump.reset(self)
self._apply_level()
self.evo_dumps = []
self.evo_summary = []
self._fname_base = 'dyndump'
def clear(self):
del self.evo_dumps[:]
del self.evo_summary[:]
@property
def dump_any(self):
"""True if any of the calculation objects are to be dumped"""
if (self.dump_amps or
self.dump_dyn_gen or
self.dump_prop or
self.dump_prop_grad or
self.dump_fwd_evo or
self.dump_onwd_evo or
self.dump_onto_evo):
return True
else:
return False
@property
def dump_all(self):
"""True if all of the calculation objects are to be dumped"""
dyn = self.parent
if (self.dump_amps and
self.dump_dyn_gen and
self.dump_prop and
self.dump_prop_grad and
self.dump_fwd_evo and
(self.dump_onwd_evo) or
(self.dump_onwd_evo == dyn.fid_computer.uses_onwd_evo) and
(self.dump_onto_evo or
(self.dump_onto_evo == dyn.fid_computer.uses_onto_evo))):
return True
else:
return False
def _apply_level(self, level=None):
dyn = self.parent
if level is None:
level = self._level
if not _is_string(level):
raise ValueError("Dump level must be a string")
level = level.upper()
if level == 'CUSTOM':
if self._level == 'CUSTOM':
# dumping level has not changed keep the same specific config
pass
else:
# Switching to custom, start from SUMMARY
level = 'SUMMARY'
if level == 'SUMMARY':
self.dump_summary = True
self.dump_amps = False
self.dump_dyn_gen = False
self.dump_prop = False
self.dump_prop_grad = False
self.dump_fwd_evo = False
self.dump_onwd_evo = False
self.dump_onto_evo = False
elif level == 'FULL':
self.dump_summary = True
self.dump_amps = True
self.dump_dyn_gen = True
self.dump_prop = True
self.dump_prop_grad = True
self.dump_fwd_evo = True
self.dump_onwd_evo = dyn.fid_computer.uses_onwd_evo
self.dump_onto_evo = dyn.fid_computer.uses_onto_evo
else:
raise ValueError("No option for dumping level '{}'".format(level))
def add_evo_dump(self):
"""Add dump of current time evolution generating objects"""
dyn = self.parent
item = EvoCompDumpItem(self)
item.idx = len(self.evo_dumps)
self.evo_dumps.append(item)
if self.dump_amps:
item.ctrl_amps = copy.deepcopy(dyn.ctrl_amps)
if self.dump_dyn_gen:
item.dyn_gen = copy.deepcopy(dyn._dyn_gen)
if self.dump_prop:
item.prop = copy.deepcopy(dyn._prop)
if self.dump_prop_grad:
item.prop_grad = copy.deepcopy(dyn._prop_grad)
if self.dump_fwd_evo:
item.fwd_evo = copy.deepcopy(dyn._fwd_evo)
if self.dump_onwd_evo:
item.onwd_evo = copy.deepcopy(dyn._onwd_evo)
if self.dump_onto_evo:
item.onto_evo = copy.deepcopy(dyn._onto_evo)
if self.write_to_file:
item.writeout()
return item
def add_evo_comp_summary(self, dump_item_idx=None):
"""add copy of current evo comp summary"""
dyn = self.parent
if dyn.tslot_computer.evo_comp_summary is None:
raise RuntimeError("Cannot add evo_comp_summary as not available")
ecs = copy.copy(dyn.tslot_computer.evo_comp_summary)
ecs.idx = len(self.evo_summary)
ecs.evo_dump_idx = dump_item_idx
if dyn.stats:
ecs.iter_num = dyn.stats.num_iter
ecs.fid_func_call_num = dyn.stats.num_fidelity_func_calls
ecs.grad_func_call_num = dyn.stats.num_grad_func_calls
self.evo_summary.append(ecs)
if self.write_to_file:
if ecs.idx == 0:
f = open(self.summary_file, 'w')
f.write("{}\n{}\n".format(
ecs.get_header_line(self.summary_sep),
ecs.get_value_line(self.summary_sep)))
else:
f = open(self.summary_file, 'a')
f.write("{}\n".format(ecs.get_value_line(self.summary_sep)))
f.close()
return ecs
def writeout(self, f=None):
"""write all the dump items and the summary out to file(s)
Parameters
----------
f : filename or filehandle
If specified then all summary and object data will go in one file.
If None is specified then type specific files will be generated
in the dump_dir
If a filehandle is specified then it must be a byte mode file
as numpy.savetxt is used, and requires this.
"""
fall = None
# If specific file given then write everything to it
if hasattr(f, 'write'):
if not 'b' in f.mode:
raise RuntimeError("File stream must be in binary mode")
# write all to this stream
fall = f
fs = f
closefall = False
closefs = False
elif f:
# Assume f is a filename
fall = open(f, 'wb')
fs = fall
closefs = False
closefall = True
else:
self.create_dump_dir()
closefall = False
if self.dump_summary:
fs = open(self.summary_file, 'wb')
closefs = True
if self.dump_summary:
for ecs in self.evo_summary:
if ecs.idx == 0:
fs.write(asbytes("{}\n{}\n".format(
ecs.get_header_line(self.summary_sep),
ecs.get_value_line(self.summary_sep))))
else:
fs.write(asbytes("{}\n".format(
ecs.get_value_line(self.summary_sep))))
if closefs:
fs.close()
logger.info("Dynamics dump summary saved to {}".format(
self.summary_file))
for di in self.evo_dumps:
di.writeout(fall)
if closefall:
fall.close()
logger.info("Dynamics dump saved to {}".format(f))
else:
if fall:
logger.info("Dynamics dump saved to specified stream")
else:
logger.info("Dynamics dump saved to {}".format(self.dump_dir))
class DumpItem(object):
"""
An item in a dump list
"""
def __init__(self):
pass
class EvoCompDumpItem(DumpItem):
"""
A copy of all objects generated to calculate one time evolution
Note the attributes are only set if the corresponding
:class:`DynamicsDump` dump_ attribute is set.
"""
def __init__(self, dump):
if not isinstance(dump, DynamicsDump):
raise TypeError("Must instantiate with {} type".format(
DynamicsDump))
self.parent = dump
self.reset()
def reset(self):
self.idx = None
# self.num_ctrls = None
# self.num_tslots = None
self.ctrl_amps = None
self.dyn_gen = None
self.prop = None
self.prop_grad = None
self.fwd_evo = None
self.onwd_evo = None
self.onto_evo = None
def writeout(self, f=None):
""" write all the objects out to files
Parameters
----------
f : filename or filehandle
If specified then all object data will go in one file.
If None is specified then type specific files will be generated
in the dump_dir
If a filehandle is specified then it must be a byte mode file
as numpy.savetxt is used, and requires this.
"""
dump = self.parent
fall = None
closefall = True
closef = False
# If specific file given then write everything to it
if hasattr(f, 'write'):
if not 'b' in f.mode:
raise RuntimeError("File stream must be in binary mode")
# write all to this stream
fall = f
closefall = False
f.write(asbytes("EVOLUTION COMPUTATION {}\n".format(self.idx)))
elif f:
| |
self.returnAST)
while True:
if (self.LA(1)==NEWLINE):
pass
self.match(NEWLINE)
else:
break
else:
break
_cnt135 += 1
if _cnt135 < 1:
raise antlr.NoViableAltException(self.LT(1), self.getFilename())
else:
raise antlr.NoViableAltException(self.LT(1), self.getFilename())
self.suiteEnd()
self.addASTChild(currentAST, self.returnAST)
initFunctionSuite_AST = currentAST.root
self.returnAST = initFunctionSuite_AST
def classSuite(self):
self.returnAST = None
currentAST = antlr.ASTPair()
classSuite_AST = None
pass
tmp67_AST = None
tmp67_AST = self.astFactory.create(self.LT(1))
self.makeASTRoot(currentAST, tmp67_AST)
self.match(COLON)
self.suiteStart()
self.addASTChild(currentAST, self.returnAST)
_cnt119= 0
while True:
if (_tokenSet_5.member(self.LA(1))):
pass
self.classStatement()
self.addASTChild(currentAST, self.returnAST)
while True:
if (self.LA(1)==NEWLINE):
pass
self.match(NEWLINE)
else:
break
else:
break
_cnt119 += 1
if _cnt119 < 1:
raise antlr.NoViableAltException(self.LT(1), self.getFilename())
self.suiteEnd()
self.addASTChild(currentAST, self.returnAST)
classSuite_AST = currentAST.root
self.returnAST = classSuite_AST
def interfaceSuite(self):
self.returnAST = None
currentAST = antlr.ASTPair()
interfaceSuite_AST = None
pass
tmp69_AST = None
tmp69_AST = self.astFactory.create(self.LT(1))
self.makeASTRoot(currentAST, tmp69_AST)
self.match(COLON)
self.suiteStart()
self.addASTChild(currentAST, self.returnAST)
_cnt124= 0
while True:
if (self.LA(1)==LITERAL_def):
pass
self.interfaceStatement()
self.addASTChild(currentAST, self.returnAST)
while True:
if (self.LA(1)==NEWLINE):
pass
self.match(NEWLINE)
else:
break
else:
break
_cnt124 += 1
if _cnt124 < 1:
raise antlr.NoViableAltException(self.LT(1), self.getFilename())
self.suiteEnd()
self.addASTChild(currentAST, self.returnAST)
interfaceSuite_AST = currentAST.root
self.returnAST = interfaceSuite_AST
def noCommaExpression(self):
self.returnAST = None
currentAST = antlr.ASTPair()
noCommaExpression_AST = None
la1 = self.LA(1)
if False:
pass
elif la1 and la1 in [LPAREN]:
pass
self.match(LPAREN)
self.expression()
self.addASTChild(currentAST, self.returnAST)
self.match(RPAREN)
noCommaExpression_AST = currentAST.root
elif la1 and la1 in [INT,FLOAT,STRING,SYMBOL,LITERAL_quote]:
pass
self.calcExpression()
self.addASTChild(currentAST, self.returnAST)
noCommaExpression_AST = currentAST.root
else:
raise antlr.NoViableAltException(self.LT(1), self.getFilename())
self.returnAST = noCommaExpression_AST
def calcExpression(self):
self.returnAST = None
currentAST = antlr.ASTPair()
calcExpression_AST = None
pass
self.simpleExpression()
self.addASTChild(currentAST, self.returnAST)
la1 = self.LA(1)
if False:
pass
elif la1 and la1 in [PLUS,MINUS,TIMES,GT,LT,EQUALSEQUALS]:
pass
la1 = self.LA(1)
if False:
pass
elif la1 and la1 in [EQUALSEQUALS]:
pass
tmp73_AST = None
tmp73_AST = self.astFactory.create(self.LT(1))
self.makeASTRoot(currentAST, tmp73_AST)
self.match(EQUALSEQUALS)
elif la1 and la1 in [PLUS]:
pass
tmp74_AST = None
tmp74_AST = self.astFactory.create(self.LT(1))
self.makeASTRoot(currentAST, tmp74_AST)
self.match(PLUS)
elif la1 and la1 in [MINUS]:
pass
tmp75_AST = None
tmp75_AST = self.astFactory.create(self.LT(1))
self.makeASTRoot(currentAST, tmp75_AST)
self.match(MINUS)
elif la1 and la1 in [TIMES]:
pass
tmp76_AST = None
tmp76_AST = self.astFactory.create(self.LT(1))
self.makeASTRoot(currentAST, tmp76_AST)
self.match(TIMES)
elif la1 and la1 in [GT]:
pass
tmp77_AST = None
tmp77_AST = self.astFactory.create(self.LT(1))
self.makeASTRoot(currentAST, tmp77_AST)
self.match(GT)
elif la1 and la1 in [LT]:
pass
tmp78_AST = None
tmp78_AST = self.astFactory.create(self.LT(1))
self.makeASTRoot(currentAST, tmp78_AST)
self.match(LT)
else:
raise antlr.NoViableAltException(self.LT(1), self.getFilename())
self.noCommaExpression()
self.addASTChild(currentAST, self.returnAST)
elif la1 and la1 in [EOF,INT,DOT,FLOAT,STRING,LBRACE,RBRACE,LPAREN,RPAREN,RSQUBR,COMMA,SEMICOLON,SYMBOL,COLON,LITERAL_def,LITERAL_def_init,LITERAL_class,LITERAL_interface,LITERAL_while,LITERAL_import,LITERAL_quote,NEWLINE,DEDENT,LITERAL_return]:
pass
else:
raise antlr.NoViableAltException(self.LT(1), self.getFilename())
calcExpression_AST = currentAST.root
self.returnAST = calcExpression_AST
def commaExpression(self):
self.returnAST = None
currentAST = antlr.ASTPair()
commaExpression_AST = None
pass
self.lookupExpression()
self.addASTChild(currentAST, self.returnAST)
la1 = self.LA(1)
if False:
pass
elif la1 and la1 in [COMMA]:
pass
tmp79_AST = None
tmp79_AST = self.astFactory.create(self.LT(1))
self.makeASTRoot(currentAST, tmp79_AST)
self.match(COMMA)
self.lookupExpression()
self.addASTChild(currentAST, self.returnAST)
while True:
if (self.LA(1)==COMMA):
pass
self.match(COMMA)
self.lookupExpression()
self.addASTChild(currentAST, self.returnAST)
else:
break
elif la1 and la1 in [EOF,INT,FLOAT,STRING,LBRACE,RBRACE,LPAREN,RPAREN,RSQUBR,SEMICOLON,SYMBOL,COLON,LITERAL_def,LITERAL_def_init,LITERAL_class,LITERAL_interface,LITERAL_while,LITERAL_import,LITERAL_quote,NEWLINE,DEDENT,LITERAL_return]:
pass
else:
raise antlr.NoViableAltException(self.LT(1), self.getFilename())
commaExpression_AST = currentAST.root
self.returnAST = commaExpression_AST
def lookupExpression(self):
self.returnAST = None
currentAST = antlr.ASTPair()
lookupExpression_AST = None
pass
self.calcExpression()
self.addASTChild(currentAST, self.returnAST)
la1 = self.LA(1)
if False:
pass
elif la1 and la1 in [DOT]:
pass
tmp81_AST = None
tmp81_AST = self.astFactory.create(self.LT(1))
self.makeASTRoot(currentAST, tmp81_AST)
self.match(DOT)
self.calcExpression()
self.addASTChild(currentAST, self.returnAST)
elif la1 and la1 in [EOF,INT,FLOAT,STRING,LBRACE,RBRACE,LPAREN,RPAREN,RSQUBR,COMMA,SEMICOLON,SYMBOL,COLON,LITERAL_def,LITERAL_def_init,LITERAL_class,LITERAL_interface,LITERAL_while,LITERAL_import,LITERAL_quote,NEWLINE,DEDENT,LITERAL_return]:
pass
else:
raise antlr.NoViableAltException(self.LT(1), self.getFilename())
lookupExpression_AST = currentAST.root
self.returnAST = lookupExpression_AST
def simpleExpression(self):
self.returnAST = None
currentAST = antlr.ASTPair()
simpleExpression_AST = None
la1 = self.LA(1)
if False:
pass
elif la1 and la1 in [INT]:
pass
tmp82_AST = None
tmp82_AST = self.astFactory.create(self.LT(1))
self.addASTChild(currentAST, tmp82_AST)
self.match(INT)
simpleExpression_AST = currentAST.root
elif la1 and la1 in [FLOAT]:
pass
tmp83_AST = None
tmp83_AST = self.astFactory.create(self.LT(1))
self.addASTChild(currentAST, tmp83_AST)
self.match(FLOAT)
simpleExpression_AST = currentAST.root
elif la1 and la1 in [STRING]:
pass
tmp84_AST = None
tmp84_AST = self.astFactory.create(self.LT(1))
self.addASTChild(currentAST, tmp84_AST)
self.match(STRING)
simpleExpression_AST = currentAST.root
elif la1 and la1 in [LITERAL_quote]:
pass
self.quotedCode()
self.addASTChild(currentAST, self.returnAST)
simpleExpression_AST = currentAST.root
else:
if (self.LA(1)==SYMBOL) and (_tokenSet_6.member(self.LA(2))):
pass
self.functionCallOrSymbol()
self.addASTChild(currentAST, self.returnAST)
simpleExpression_AST = currentAST.root
elif (self.LA(1)==SYMBOL) and (self.LA(2)==LSQUBR):
pass
self.arrayLookup()
self.addASTChild(currentAST, self.returnAST)
simpleExpression_AST = currentAST.root
else:
raise antlr.NoViableAltException(self.LT(1), self.getFilename())
self.returnAST = simpleExpression_AST
def functionCallOrSymbol(self):
self.returnAST = None
currentAST = antlr.ASTPair()
functionCallOrSymbol_AST = None
pass
tmp85_AST = None
tmp85_AST = self.astFactory.create(self.LT(1))
self.addASTChild(currentAST, tmp85_AST)
self.match(SYMBOL)
while True:
if (self.LA(1)==LPAREN) and (_tokenSet_7.member(self.LA(2))):
pass
tmp86_AST = None
tmp86_AST = self.astFactory.create(self.LT(1))
self.makeASTRoot(currentAST, tmp86_AST)
self.match(LPAREN)
self.argumentsList()
self.addASTChild(currentAST, self.returnAST)
self.match(RPAREN)
else:
break
functionCallOrSymbol_AST = currentAST.root
self.returnAST = functionCallOrSymbol_AST
def arrayLookup(self):
self.returnAST = None
currentAST = antlr.ASTPair()
arrayLookup_AST = None
pass
tmp88_AST = None
tmp88_AST = self.astFactory.create(self.LT(1))
self.addASTChild(currentAST, tmp88_AST)
self.match(SYMBOL)
tmp89_AST = None
tmp89_AST = self.astFactory.create(self.LT(1))
self.makeASTRoot(currentAST, tmp89_AST)
self.match(LSQUBR)
self.expression()
self.addASTChild(currentAST, self.returnAST)
self.match(RSQUBR)
arrayLookup_AST = currentAST.root
self.returnAST = arrayLookup_AST
def quotedCode(self):
self.returnAST = None
currentAST = antlr.ASTPair()
quotedCode_AST = None
pass
tmp91_AST = None
tmp91_AST = self.astFactory.create(self.LT(1))
self.makeASTRoot(currentAST, tmp91_AST)
self.match(LITERAL_quote)
self.suite()
self.addASTChild(currentAST, self.returnAST)
quotedCode_AST = currentAST.root
self.returnAST = quotedCode_AST
def argumentsList(self):
self.returnAST = None
currentAST = antlr.ASTPair()
argumentsList_AST = None
pass
la1 = self.LA(1)
if False:
pass
elif la1 and la1 in [INT,FLOAT,STRING,LPAREN,SYMBOL,LITERAL_quote]:
pass
self.noCommaExpression()
self.addASTChild(currentAST, self.returnAST)
while True:
if (self.LA(1)==COMMA):
pass
tmp92_AST = None
tmp92_AST = self.astFactory.create(self.LT(1))
self.addASTChild(currentAST, tmp92_AST)
self.match(COMMA)
self.noCommaExpression()
self.addASTChild(currentAST, self.returnAST)
else:
break
elif la1 and la1 in [RPAREN]:
pass
else:
raise antlr.NoViableAltException(self.LT(1), self.getFilename())
argumentsList_AST = currentAST.root
self.returnAST = argumentsList_AST
def suiteStart(self):
self.returnAST = None
currentAST = antlr.ASTPair()
suiteStart_AST = None
pass
_cnt104= 0
while True:
if (self.LA(1)==NEWLINE):
pass
self.match(NEWLINE)
else:
break
_cnt104 += 1
if _cnt104 < 1:
raise antlr.NoViableAltException(self.LT(1), self.getFilename())
self.match(INDENT)
while True:
if (self.LA(1)==NEWLINE):
pass
self.match(NEWLINE)
else:
break
suiteStart_AST = currentAST.root
self.returnAST = suiteStart_AST
def suiteEnd(self):
self.returnAST = None
currentAST = antlr.ASTPair()
suiteEnd_AST = None
pass
self.match(DEDENT)
suiteEnd_AST = currentAST.root
self.returnAST = suiteEnd_AST
def returnStatement(self):
self.returnAST = None
currentAST = antlr.ASTPair()
returnStatement_AST = None
pass
tmp97_AST = None
tmp97_AST = self.astFactory.create(self.LT(1))
self.makeASTRoot(currentAST, tmp97_AST)
self.match(LITERAL_return)
self.expression()
self.addASTChild(currentAST, self.returnAST)
returnStatement_AST = currentAST.root
self.returnAST = returnStatement_AST
def varStatement(self):
self.returnAST = None
currentAST = antlr.ASTPair()
varStatement_AST = None
pass
tmp98_AST = None
tmp98_AST = self.astFactory.create(self.LT(1))
self.makeASTRoot(currentAST, tmp98_AST)
self.match(LITERAL_var)
self.varSuite()
self.addASTChild(currentAST, self.returnAST)
varStatement_AST = currentAST.root
self.returnAST = varStatement_AST
def varSuite(self):
self.returnAST = None
currentAST = antlr.ASTPair()
varSuite_AST = None
pass
tmp99_AST = None
tmp99_AST = self.astFactory.create(self.LT(1))
self.makeASTRoot(currentAST, tmp99_AST)
self.match(COLON)
self.suiteStart()
self.addASTChild(currentAST, self.returnAST)
_cnt140= 0
while True:
if (_tokenSet_1.member(self.LA(1))):
pass
self.initialisation()
self.addASTChild(currentAST, self.returnAST)
while True:
if (self.LA(1)==NEWLINE):
pass
self.match(NEWLINE)
else:
break
else:
break
_cnt140 += 1
if _cnt140 < 1:
raise antlr.NoViableAltException(self.LT(1), self.getFilename())
self.suiteEnd()
self.addASTChild(currentAST, self.returnAST)
varSuite_AST = currentAST.root
self.returnAST = varSuite_AST
def initialisation(self):
self.returnAST = None
currentAST = antlr.ASTPair()
initialisation_AST = None
pass
self.expression()
self.addASTChild(currentAST, self.returnAST)
tmp101_AST = None
tmp101_AST = self.astFactory.create(self.LT(1))
self.addASTChild(currentAST, tmp101_AST)
self.match(SYMBOL)
tmp102_AST = None
tmp102_AST = self.astFactory.create(self.LT(1))
self.makeASTRoot(currentAST, tmp102_AST)
self.match(EQUALS)
self.expression()
self.addASTChild(currentAST, self.returnAST)
initialisation_AST = currentAST.root
self.returnAST = initialisation_AST
def buildTokenTypeASTClassMap(self):
self.tokenTypeToASTClassMap = None
_tokenNames = [
"<0>",
"EOF",
"<2>",
"NULL_TREE_LOOKAHEAD",
"WHITESPACE",
"COMMENT",
"DIGIT",
"DIGITS",
"INT",
"DOT",
"FLOAT",
"INT_OR_DOT_OR_FLOAT",
"TRIPLEDOUBLEQUOTE",
"DOUBLEQUOTE",
"DOUBLEQUOTESTRING",
"TRIPLEDOUBLEQUOTESTRING",
"STRING",
"LBRACE",
"RBRACE",
"PIPE",
"LPAREN",
"RPAREN",
"LSQUBR",
"RSQUBR",
"COMMA",
"SEMICOLON",
"STARTSYMBOLCHAR",
"MIDSYMBOLCHAR",
"SYMBOL",
"PLUS",
"MINUS",
"TIMES",
"PLUSEQUALS",
"GT",
"LT",
"COLON",
"EQUALS",
"EQUALSEQUALS",
"\"def\"",
"\"def_init\"",
"\"class\"",
"\"interface\"",
"\"while\"",
"\"import\"",
"\"quote\"",
"NEWLINE",
"INDENT",
"DEDENT",
"\"var\"",
"\"return\""
]
### generate bit set
def mk_tokenSet_0():
### var1
data = [ 34360041604352L, 0L]
return data
_tokenSet_0 = antlr.BitSet(mk_tokenSet_0())
### generate bit set
def mk_tokenSet_1():
### var1
data = [ 17592455595264L, 0L]
return data
_tokenSet_1 = antlr.BitSet(mk_tokenSet_1())
### generate bit set
def mk_tokenSet_2():
### var1
data = [ 773982959109890L, 0L]
return data
_tokenSet_2 = antlr.BitSet(mk_tokenSet_2())
### generate bit set
def mk_tokenSet_3():
### var1
data = [ 773781611545858L, 0L]
return data
_tokenSet_3 = antlr.BitSet(mk_tokenSet_3())
### generate bit set
def mk_tokenSet_4():
### var1
data = [ 773987265087234L, 0L]
return | |
*****************************************************
rcof=rt.get('run_correctness_output_files',[])
if len(rcof)==0:
rcof=meta.get('run_correctness_output_files',[])
rcvars=rt.get('run_correctness_vars',[])
if ccc['run_success_bool'] and len(rcof)>0 and i.get('skip_output_validation','')!='yes' and not b_min_run:
ck.out('')
ck.out(' (checking output correctness ...)')
# Prepare directory with output files
po=kcmd+'-'+dduid
if dfile!='':
if rt.get('run_correctness_extra_keys_from_dataset_file_json','')=='yes':
for q in sorted(dfile_keys):
po+='-'+str(env.get(q,''))
else:
po+='-'+dfile
if rt.get('output_invariant_of_repeat','')!='yes':
po+='-'+str(xrepeat)
# Check if output depends on extra vars
if len(rcvars)>0:
for q in rcvars:
po+='-'+str(env.get(q,''))
oruoa=i.get('output_validation_repo','')
pox=''
found=False
# Check if output from another program
program_output_uoa=duoa
if i.get('program_output_uoa','')!='':
program_output_uoa=i['program_output_uoa']
if rt.get('program_output_uoa','')!='':
program_output_uoa=rt['program_output_uoa']
# Check UID of program_output_uoa
rx=ck.access({'action':'find',
'module_uoa':work['self_module_uid'],
'data_uoa':program_output_uoa})
if rx['return']>0: return rx
program_output_uoa=rx['data_uid']
if i.get('overwrite_reference_output','')!='yes':
if o=='con':
ck.out(' * Searching directory with reference output "'+po+'" ...')
# Search related entries with outputs (can be multiple - in local and project repos!)
rx=ck.access({'action':'search',
'module_uoa':cfg['module_deps']['program.output'],
'data_uoa':'program-uid-'+program_output_uoa})
if rx['return']>0: return rx
dslst=rx['lst']
for q in dslst:
pox=os.path.join(q['path'],po)
if os.path.isdir(pox):
found=True
break
vfail=False
vo={}
if found:
if o=='con':
ck.out(' * Reference output found - validating ...')
ck.out(' * File: '+pox)
for fz in rcof:
vr=''
p1=os.path.join(cdir,fz)
if not os.path.isfile(p1):
vr='file not found'
vfail=True
else:
p2=os.path.join(pox,fz)
# If reference file doesn't exist (for example, we later updated meta),
# copy it to the reference ..
if not os.path.isfile(p2):
shutil.copyfile(p1,p2)
else:
if ck_check_output!=None:
r=ck_check_output({'ck_kernel':ck,
'file1':p1,
'file2':p2,
'meta':meta,
'env':env})
if r['return']>0:
vr=r['error']
vfail=True
elif r['failed']:
vr=r['fail_reason']
vfail=True
else:
import filecmp
vx=filecmp.cmp(p1,p2)
if not vx:
vr='exact match failed'
vfail=True
if vr!='':
if o=='con':
ck.out(' - check failed on "'+fz+'" ('+vr+')')
vo[fz]={'fail_reason':vr}
if not vfail and o=='con':
ck.out(' Validated successfully!')
# If at least one failed, fail pipeline
if vfail:
import json
misc['run_success']='no'
misc['run_success_bool']=False
misc['fail_reason']='output is not matching with the reference one: '+json.dumps(vo,indent=2)
ccc['run_success']=misc['run_success']
ccc['run_success_bool']=misc['run_success_bool']
ccc['fail_reason']=misc['fail_reason']
else:
if o=='con':
ck.out(' * Recording reference output ...')
# First create / update entry
potags=meta.get('tags',[])
if dalias!='':
potags.append(dalias)
if oruoa=='': oruoa='local' # avoid recording to existing repositories rather than local
# unless explictly specified (to avoid pulluting shared project repos)
ii={'action':'update',
'module_uoa':cfg['module_deps']['program.output'],
'data_uoa':'program-uid-'+program_output_uoa,
'data_name':dalias,
'repo_uoa':oruoa,
'ignore_update':'yes',
'tags':potags}
r=ck.access(ii)
if r['return']>0: return r
pd=r['path']
if o=='con':
ck.out(' * Directory with output: '+pd)
# Create sub-directory to hold correct output
pd1=os.path.join(pd,po)
if not os.path.isdir(pd1):
os.makedirs(pd1)
for fz in rcof:
p1=os.path.join(cdir,fz)
p2=os.path.join(pd,po,fz)
if not os.path.isfile(p1):
return {'return':1, 'error':'reference output file '+fz+' not found!'}
shutil.copyfile(p1,p2)
# Update stats with output check
svfail='no'
if vfail: svfail='yes'
misc['output_check_failed']=svfail
misc['output_check_failed_bool']=vfail
ccc['output_check_failed']=svfail
ccc['output_check_failed_bool']=vfail
if len(vo)>0:
misc['output_check_failures']=vo
ccc['output_check_failures']=vo
# Output final execution time
if o=='con' and rt.get('skip_print_execution_time','')!='yes':
ck.out('')
x='Execution time: '+('%.3f'%exec_time)+' sec.'
if repeat>1:
x+='; Repetitions: '+str(abs(repeat))+'; Normalized execution time: '+('%.9f'%(exec_time/abs(repeat)))+' sec.'
ck.out(x)
# Check to clean random directory
#if grtd=='yes' and sca!='yes':
# os.chdir(odir)
# try:
# shutil.rmtree(cdir, ignore_errors=True)
# except Exception as e:
# pass
if misc.get('run_success','')=='no' and o=='con':
ck.out('')
ck.out('Program execution likely failed ('+misc.get('fail_reason','')+')!')
ck.out('')
return {'return':0, 'tmp_dir':rcdir, 'misc':misc, 'characteristics':ccc, 'deps':deps}
##############################################################################
# clean program work and tmp files
def clean(i):
"""
Input: {
}
Output: {
return - return code = 0, if successful
> 0, if error
(error) - error text if return > 0
}
"""
i['sub_action']='clean'
return process(i)
##############################################################################
# compile program
def compile(i):
"""
See "process_in_dir" API
"""
i['sub_action']='compile'
return process(i)
##############################################################################
# run program
def run(i):
"""
See "process_in_dir" API
"""
i['sub_action']='run'
run_output_dict = process(i)
if i.get('treat_return_code_as_exit_code', '')=='yes' and run_output_dict.get('return')==0:
run_output_dict['return'] = run_output_dict['characteristics']['return_code']
run_output_dict['error'] = run_output_dict['characteristics'].get('fail_reason')
return run_output_dict
##############################################################################
# prepare and run program pipeline (clean, compile, run, etc)
def pipeline(i):
"""
Input: {
(repo_uoa) - program repo UOA
(module_uoa) - program module UOA
(data_uoa) - program data UOA
or
(program_uoa) - useful if univeral pipeline is used, i.e. ck run pipeline:program program_uoa=...
or taken from .cm/meta.json from current directory
(random) - if 'yes', random selection of program, cmd, dataset uoa and dataset file
(to support collaborative program optimization)
(skip_local) - if 'yes', skip detection of program in a local path
(program_tags) - select programs by these tags
(program_dir) - force program directory
(target) - target machine added via 'ck add machine' with prepared target description
(useful to create farms of machines for crowd-benchmarking and crowd-tuning using CK)
(host_os) - host OS (detect, if omitted)
(target_os) - OS module to check (if omitted, analyze host)
(device_id) - device id if remote (such as adb)
(local_platform) - if 'yes', use host_os/target_os from the current platform
(useful when replaying experiments from another machine and even OS)
(prepare) - if 'yes', only prepare setup, but do not clean/compile/run program
(save_to_file) - if !='', save updated input/output (state) to this file
(skip_interaction) - if 'yes' and out=='con', skip interaction to choose parameters
(skip_device_init) - if 'yes', skip device init
(skip_info_collection) - if 'yes', skip info collection
(skip_device_info) - if 'yes', skip any device info -
useful to prepare experiment crowdsourcing packs for remote devices
Pipeline sections' settings:
(compute_platform_id) - if !='', set env['CK_COMPUTE_PLATFORM_ID']
(compute_device_id) - if !='', set env['CK_COMPUTE_DEVICE_ID']
(no_platform_features) - if 'yes', do not collect full platform features
(no_dataset_features) - if 'yes', do not search and extract data set features
(no_clean) - if 'yes', do not clean directory before compile/run
(no_compile) - if 'yes', do not compile program (useful when running the same program
under different system state conditions: CPU/GPU freq, cache/bus contentions, etc)
(compile_only_once) - if 'yes', compile only at first iteration
(no_compiler_description) - if 'yes', do not search for most close compiler description with flags ...
(no_run) - if 'yes', do not run program
useful when using autotuning to find bugs in compiler,
or find differently generated code sequencies, etc ...
(no_state_check) - do not check system/CPU state (frequency) over iterations ...
(generate_rnd_tmp_dir) - if 'yes', compile and run program in randomly generated temporal dir
or
(tmp_dir) - if !='', use this tmp_dir
(skip_clean_after) - if 'yes', do not remove run batch
(keep) - the same as skip_clean_after
(console) - if 'yes', output from program goes to console rather than file
(usually for testing/demos)
(cmd_key) - CMD key
(cmd_keys) - Select only from this list of available CMD keys
(dataset_uoa) - UOA of a dataset
(dataset_file) - dataset filename (if more than one inside one entry - suggest to have a UID in name)
(extra_dataset_tags) - list of extra data set tags (useful to set "small" during mobile phone crowdtuning)
(compiler_env_uoa) - env of a compiler
(compile_type) - static or dynamic (dynamic by default;
however takes compiler default_compile_type into account)
or
(static or dynamic)
(compiler_description_uoa) - compiler description UOA (module compiler),
if not set, there will be an attempt to detect the most close
by version
(compiler_vars) - dict with set up compiler flags (-Dvar=value) ->
they will update the ones defined as default in program description ...
(no_vars) - skip compiler vars (if you want to use default ones from the sources) ...
(remove_compiler_vars) - list of compiler vars to remove
(extra_env_for_compilation) - set environment variables before compiling program
(flags) - compile flags
(lflags) - link flags
(compiler_flags) - dict from compiler description (during autotuning),
if set, description should exist in input:choices_desc#compiler_flags# ...
(best_base_flag) - if 'yes', try to select best flag if available ...
(speed) - the same as above
(skip_best_base_flag) - if 'yes', do not use best base flag (useful for exploration of other levels -O2,-O1,etc)
(env_speed) - use environment flag for best optimization (CK_OPT_SPEED)
(shared_solution_cid) - CID-UID1-UID2 of the shared optimization solution at cKnowledge.org/repo
You can find it by clicking on a "Copy CID to clipboard" button of a given solution.
See example at http://cknowledge.org/repo/web.php?wcid=8289e0cf24346aa7:79bca2b76876b5c6
27bc42ee449e880e:79bca2b76876b5c6-8289e0cf24346aa7-f49649288ab0accd
(Ocid-uid1-uid2) Substituting compiler -Ox levels with shared solutions in above format
(-O27bc42ee449e880e:79bca2b76876b5c6-8289e0cf24346aa7-f49649288ab0accd)
(select_best_base_flag_for_first_iteration) - if 'yes' and autotuning_iteration=0
(env) - preset environment
(env.{KEY}) - set env[KEY]=value (user-friendly interface via CMD)
(deps.{KEY}) - set deps[KEY]["uoa']=value (user-friendly interface via CMD to set any given dependency)
(preset_deps) - dict with {"KEY":"UOA"} to preset dependencies
(remove_deps) [str] - a list of keys to remove from deps separated by comma.
Useful to run a given program workflow with an externally
installed dependency (compiler, library, model, tool).
(params) - dictionary with parameters passed via pre/post processing to third-party tools
for example, to configure ARM Workload | |
= df.read_metadata()
self.assertEqual(md['X-Timestamp'], normalize_timestamp(42))
def test_get_metadata_not_opened(self):
df = self.df_mgr.get_diskfile('sda1', '0', 'a', 'c', 'o')
self.assertRaises(DiskFileNotOpen, df.get_metadata)
def test_not_opened(self):
df = self.df_mgr.get_diskfile('sda1', '0', 'a', 'c', 'o')
try:
with df:
pass
except DiskFileNotOpen:
pass
else:
self.fail("Expected DiskFileNotOpen exception")
def test_disk_file_default_disallowed_metadata(self):
# build an object with some meta (ts 41)
orig_metadata = {'X-Object-Meta-Key1': 'Value1',
'Content-Type': 'text/garbage'}
df = self._get_open_disk_file(ts=41, extra_metadata=orig_metadata)
with df.open():
self.assertEquals('1024', df._metadata['Content-Length'])
# write some new metadata (fast POST, don't send orig meta, ts 42)
df = self.df_mgr.get_diskfile('sda1', '0', 'a', 'c', 'o')
df.write_metadata({'X-Timestamp': normalize_timestamp(42),
'X-Object-Meta-Key2': 'Value2'})
df = self.df_mgr.get_diskfile('sda1', '0', 'a', 'c', 'o')
with df.open():
# non-fast-post updateable keys are preserved
self.assertEquals('text/garbage', df._metadata['Content-Type'])
# original fast-post updateable keys are removed
self.assert_('X-Object-Meta-Key1' not in df._metadata)
# new fast-post updateable keys are added
self.assertEquals('Value2', df._metadata['X-Object-Meta-Key2'])
def test_disk_file_reader_iter(self):
df = self._create_test_file('1234567890')
quarantine_msgs = []
reader = df.reader(_quarantine_hook=quarantine_msgs.append)
self.assertEqual(''.join(reader), '1234567890')
self.assertEqual(quarantine_msgs, [])
def test_disk_file_reader_iter_w_quarantine(self):
df = self._create_test_file('1234567890')
def raise_dfq(m):
raise DiskFileQuarantined(m)
reader = df.reader(_quarantine_hook=raise_dfq)
reader._obj_size += 1
self.assertRaises(DiskFileQuarantined, ''.join, reader)
def test_disk_file_app_iter_corners(self):
df = self._create_test_file('1234567890')
quarantine_msgs = []
reader = df.reader(_quarantine_hook=quarantine_msgs.append)
self.assertEquals(''.join(reader.app_iter_range(0, None)),
'1234567890')
self.assertEquals(quarantine_msgs, [])
df = self.df_mgr.get_diskfile('sda', '0', 'a', 'c', 'o')
with df.open():
reader = df.reader()
self.assertEqual(''.join(reader.app_iter_range(5, None)), '67890')
def test_disk_file_app_iter_range_w_none(self):
df = self._create_test_file('1234567890')
quarantine_msgs = []
reader = df.reader(_quarantine_hook=quarantine_msgs.append)
self.assertEqual(''.join(reader.app_iter_range(None, None)),
'1234567890')
self.assertEqual(quarantine_msgs, [])
def test_disk_file_app_iter_partial_closes(self):
df = self._create_test_file('1234567890')
quarantine_msgs = []
reader = df.reader(_quarantine_hook=quarantine_msgs.append)
it = reader.app_iter_range(0, 5)
self.assertEqual(''.join(it), '12345')
self.assertEqual(quarantine_msgs, [])
self.assertTrue(reader._fp is None)
def test_disk_file_app_iter_ranges(self):
df = self._create_test_file('012345678911234567892123456789')
quarantine_msgs = []
reader = df.reader(_quarantine_hook=quarantine_msgs.append)
it = reader.app_iter_ranges([(0, 10), (10, 20), (20, 30)],
'plain/text',
'\r\n--someheader\r\n', 30)
value = ''.join(it)
self.assertTrue('0123456789' in value)
self.assertTrue('1123456789' in value)
self.assertTrue('2123456789' in value)
self.assertEqual(quarantine_msgs, [])
def test_disk_file_app_iter_ranges_w_quarantine(self):
df = self._create_test_file('012345678911234567892123456789')
quarantine_msgs = []
reader = df.reader(_quarantine_hook=quarantine_msgs.append)
reader._obj_size += 1
it = reader.app_iter_ranges([(0, 30)],
'plain/text',
'\r\n--someheader\r\n', 30)
value = ''.join(it)
self.assertTrue('0123456789' in value)
self.assertTrue('1123456789' in value)
self.assertTrue('2123456789' in value)
self.assertEqual(quarantine_msgs,
["Bytes read: 30, does not match metadata: 31"])
def test_disk_file_app_iter_ranges_w_no_etag_quarantine(self):
df = self._create_test_file('012345678911234567892123456789')
quarantine_msgs = []
reader = df.reader(_quarantine_hook=quarantine_msgs.append)
it = reader.app_iter_ranges([(0, 10)],
'plain/text',
'\r\n--someheader\r\n', 30)
value = ''.join(it)
self.assertTrue('0123456789' in value)
self.assertEqual(quarantine_msgs, [])
def test_disk_file_app_iter_ranges_edges(self):
df = self._create_test_file('012345678911234567892123456789')
quarantine_msgs = []
reader = df.reader(_quarantine_hook=quarantine_msgs.append)
it = reader.app_iter_ranges([(3, 10), (0, 2)], 'application/whatever',
'\r\n--someheader\r\n', 30)
value = ''.join(it)
self.assertTrue('3456789' in value)
self.assertTrue('01' in value)
self.assertEqual(quarantine_msgs, [])
def test_disk_file_large_app_iter_ranges(self):
# This test case is to make sure that the disk file app_iter_ranges
# method all the paths being tested.
long_str = '01234567890' * 65536
target_strs = ['3456789', long_str[0:65590]]
df = self._create_test_file(long_str)
quarantine_msgs = []
reader = df.reader(_quarantine_hook=quarantine_msgs.append)
it = reader.app_iter_ranges([(3, 10), (0, 65590)], 'plain/text',
'5e816ff8b8b8e9a5d355497e5d9e0301', 655360)
# The produced string actually missing the MIME headers
# need to add these headers to make it as real MIME message.
# The body of the message is produced by method app_iter_ranges
# off of DiskFile object.
header = ''.join(['Content-Type: multipart/byteranges;',
'boundary=',
'5e816ff8b8b8e9a5d355497e5d9e0301\r\n'])
value = header + ''.join(it)
self.assertEquals(quarantine_msgs, [])
parts = map(lambda p: p.get_payload(decode=True),
email.message_from_string(value).walk())[1:3]
self.assertEqual(parts, target_strs)
def test_disk_file_app_iter_ranges_empty(self):
# This test case tests when empty value passed into app_iter_ranges
# When ranges passed into the method is either empty array or None,
# this method will yield empty string
df = self._create_test_file('012345678911234567892123456789')
quarantine_msgs = []
reader = df.reader(_quarantine_hook=quarantine_msgs.append)
it = reader.app_iter_ranges([], 'application/whatever',
'\r\n--someheader\r\n', 100)
self.assertEqual(''.join(it), '')
df = self.df_mgr.get_diskfile('sda', '0', 'a', 'c', 'o')
with df.open():
reader = df.reader()
it = reader.app_iter_ranges(None, 'app/something',
'\r\n--someheader\r\n', 150)
self.assertEqual(''.join(it), '')
self.assertEqual(quarantine_msgs, [])
def test_disk_file_mkstemp_creates_dir(self):
tmpdir = os.path.join(self.testdir, 'sda1', 'tmp')
os.rmdir(tmpdir)
df = self.df_mgr.get_diskfile('sda1', '0', 'a', 'c', 'o')
with df.create():
self.assert_(os.path.exists(tmpdir))
def _get_open_disk_file(self, invalid_type=None, obj_name='o', fsize=1024,
csize=8, mark_deleted=False, prealloc=False,
ts=None, mount_check=False, extra_metadata=None):
'''returns a DiskFile'''
df = self.df_mgr.get_diskfile('sda1', '0', 'a', 'c', obj_name)
data = '0' * fsize
etag = md5()
if ts:
timestamp = ts
else:
timestamp = normalize_timestamp(time())
if prealloc:
prealloc_size = fsize
else:
prealloc_size = None
with df.create(size=prealloc_size) as writer:
upload_size = writer.write(data)
etag.update(data)
etag = etag.hexdigest()
metadata = {
'ETag': etag,
'X-Timestamp': timestamp,
'Content-Length': str(upload_size),
}
metadata.update(extra_metadata or {})
writer.put(metadata)
if invalid_type == 'ETag':
etag = md5()
etag.update('1' + '0' * (fsize - 1))
etag = etag.hexdigest()
metadata['ETag'] = etag
diskfile.write_metadata(writer._fd, metadata)
elif invalid_type == 'Content-Length':
metadata['Content-Length'] = fsize - 1
diskfile.write_metadata(writer._fd, metadata)
elif invalid_type == 'Bad-Content-Length':
metadata['Content-Length'] = 'zero'
diskfile.write_metadata(writer._fd, metadata)
elif invalid_type == 'Missing-Content-Length':
del metadata['Content-Length']
diskfile.write_metadata(writer._fd, metadata)
elif invalid_type == 'Bad-X-Delete-At':
metadata['X-Delete-At'] = 'bad integer'
diskfile.write_metadata(writer._fd, metadata)
if mark_deleted:
df.delete(timestamp)
data_files = [os.path.join(df._datadir, fname)
for fname in sorted(os.listdir(df._datadir),
reverse=True)
if fname.endswith('.data')]
if invalid_type == 'Corrupt-Xattrs':
# We have to go below read_metadata/write_metadata to get proper
# corruption.
meta_xattr = xattr.getxattr(data_files[0], "user.swift.metadata")
wrong_byte = 'X' if meta_xattr[0] != 'X' else 'Y'
xattr.setxattr(data_files[0], "user.swift.metadata",
wrong_byte + meta_xattr[1:])
elif invalid_type == 'Truncated-Xattrs':
meta_xattr = xattr.getxattr(data_files[0], "user.swift.metadata")
xattr.setxattr(data_files[0], "user.swift.metadata",
meta_xattr[:-1])
elif invalid_type == 'Missing-Name':
md = diskfile.read_metadata(data_files[0])
del md['name']
diskfile.write_metadata(data_files[0], md)
elif invalid_type == 'Bad-Name':
md = diskfile.read_metadata(data_files[0])
md['name'] = md['name'] + 'garbage'
diskfile.write_metadata(data_files[0], md)
self.conf['disk_chunk_size'] = csize
self.conf['mount_check'] = mount_check
self.df_mgr = diskfile.DiskFileManager(self.conf, FakeLogger())
df = self.df_mgr.get_diskfile('sda1', '0', 'a', 'c', obj_name)
df.open()
if invalid_type == 'Zero-Byte':
fp = open(df._data_file, 'w')
fp.close()
df.unit_test_len = fsize
return df
def test_keep_cache(self):
df = self._get_open_disk_file(fsize=65)
with mock.patch("swift.obj.diskfile.drop_buffer_cache") as foo:
for _ in df.reader():
pass
self.assertTrue(foo.called)
df = self._get_open_disk_file(fsize=65)
with mock.patch("swift.obj.diskfile.drop_buffer_cache") as bar:
for _ in df.reader(keep_cache=False):
pass
self.assertTrue(bar.called)
df = self._get_open_disk_file(fsize=65)
with mock.patch("swift.obj.diskfile.drop_buffer_cache") as boo:
for _ in df.reader(keep_cache=True):
pass
self.assertFalse(boo.called)
df = self._get_open_disk_file(fsize=5 * 1024, csize=256)
with mock.patch("swift.obj.diskfile.drop_buffer_cache") as goo:
for _ in df.reader(keep_cache=True):
pass
self.assertTrue(goo.called)
def test_quarantine_valids(self):
def verify(*args, **kwargs):
try:
df = self._get_open_disk_file(**kwargs)
reader = df.reader()
for chunk in reader:
pass
except DiskFileQuarantined:
self.fail(
"Unexpected quarantining occurred: args=%r, kwargs=%r" % (
args, kwargs))
else:
pass
verify(obj_name='1')
verify(obj_name='2', csize=1)
verify(obj_name='3', csize=100000)
def run_quarantine_invalids(self, invalid_type):
def verify(*args, **kwargs):
open_exc = invalid_type in ('Content-Length', 'Bad-Content-Length',
'Corrupt-Xattrs', 'Truncated-Xattrs',
'Missing-Name', 'Bad-X-Delete-At')
open_collision = invalid_type == 'Bad-Name'
reader = None
quarantine_msgs = []
try:
df = self._get_open_disk_file(**kwargs)
reader = df.reader(_quarantine_hook=quarantine_msgs.append)
except DiskFileQuarantined as err:
if not open_exc:
self.fail(
"Unexpected DiskFileQuarantine raised: %r" % err)
return
except DiskFileCollision as err:
if not open_collision:
self.fail(
"Unexpected DiskFileCollision raised: %r" % err)
return
else:
if open_exc:
self.fail("Expected DiskFileQuarantine exception")
try:
for chunk in reader:
pass
except DiskFileQuarantined as err:
self.fail("Unexpected DiskFileQuarantine raised: :%r" % err)
else:
if not open_exc:
self.assertEqual(1, len(quarantine_msgs))
verify(invalid_type=invalid_type, obj_name='1')
verify(invalid_type=invalid_type, obj_name='2', csize=1)
verify(invalid_type=invalid_type, obj_name='3', csize=100000)
verify(invalid_type=invalid_type, obj_name='4')
def verify_air(params, start=0, adjustment=0):
"""verify (a)pp (i)ter (r)ange"""
open_exc = invalid_type in ('Content-Length', 'Bad-Content-Length',
'Corrupt-Xattrs', 'Truncated-Xattrs',
'Missing-Name', 'Bad-X-Delete-At')
open_collision = invalid_type == 'Bad-Name'
reader = None
try:
df = self._get_open_disk_file(**params)
reader = df.reader()
except DiskFileQuarantined as err:
if not open_exc:
self.fail(
"Unexpected DiskFileQuarantine raised: %r" % err)
return
except DiskFileCollision as err:
if not open_collision:
self.fail(
"Unexpected DiskFileCollision raised: %r" % err)
return
else:
if open_exc:
self.fail("Expected DiskFileQuarantine exception")
try:
for chunk in reader.app_iter_range(
start,
df.unit_test_len + adjustment):
pass
except DiskFileQuarantined as err:
self.fail("Unexpected DiskFileQuarantine raised: :%r" % err)
verify_air(dict(invalid_type=invalid_type, obj_name='5'))
verify_air(dict(invalid_type=invalid_type, obj_name='6'), 0, 100)
verify_air(dict(invalid_type=invalid_type, obj_name='7'), 1)
verify_air(dict(invalid_type=invalid_type, obj_name='8'), 0, -1)
verify_air(dict(invalid_type=invalid_type, obj_name='8'), 1, 1)
def test_quarantine_corrupt_xattrs(self):
self.run_quarantine_invalids('Corrupt-Xattrs')
def test_quarantine_truncated_xattrs(self):
self.run_quarantine_invalids('Truncated-Xattrs')
def test_quarantine_invalid_etag(self):
self.run_quarantine_invalids('ETag')
def test_quarantine_invalid_missing_name(self):
self.run_quarantine_invalids('Missing-Name')
def test_quarantine_invalid_bad_name(self):
self.run_quarantine_invalids('Bad-Name')
def test_quarantine_invalid_bad_x_delete_at(self):
self.run_quarantine_invalids('Bad-X-Delete-At')
def test_quarantine_invalid_content_length(self):
self.run_quarantine_invalids('Content-Length')
def test_quarantine_invalid_content_length_bad(self):
self.run_quarantine_invalids('Bad-Content-Length')
def test_quarantine_invalid_zero_byte(self):
self.run_quarantine_invalids('Zero-Byte')
def test_quarantine_deleted_files(self):
try:
self._get_open_disk_file(invalid_type='Content-Length')
except DiskFileQuarantined:
pass
else:
self.fail("Expected DiskFileQuarantined exception")
try:
self._get_open_disk_file(invalid_type='Content-Length',
mark_deleted=True)
except DiskFileQuarantined as err:
self.fail("Unexpected DiskFileQuarantined exception"
" encountered: %r" % err)
except DiskFileNotExist:
pass
else:
self.fail("Expected DiskFileNotExist exception")
try:
self._get_open_disk_file(invalid_type='Content-Length',
mark_deleted=True)
except DiskFileNotExist:
pass
else:
self.fail("Expected DiskFileNotExist exception")
def test_quarantine_missing_content_length(self):
self.assertRaises(
DiskFileQuarantined,
self._get_open_disk_file,
invalid_type='Missing-Content-Length')
def test_quarantine_bad_content_length(self):
self.assertRaises(
DiskFileQuarantined,
self._get_open_disk_file,
invalid_type='Bad-Content-Length')
def test_quarantine_fstat_oserror(self):
invocations = [0]
orig_os_fstat = os.fstat
def bad_fstat(fd):
invocations[0] += 1
if invocations[0] == 4:
# FIXME - yes, this an icky way to get code coverage ... worth
# it?
raise OSError()
return orig_os_fstat(fd)
with mock.patch('os.fstat', bad_fstat):
self.assertRaises(
DiskFileQuarantined,
self._get_open_disk_file)
def test_quarantine_hashdir_not_a_directory(self):
df = self._create_test_file('1234567890', account="abc",
container='123', obj='xyz')
hashdir = df._datadir
rmtree(hashdir)
with open(hashdir, 'w'):
pass
df = self.df_mgr.get_diskfile('sda', '0', 'abc', '123', 'xyz')
self.assertRaises(DiskFileQuarantined, df.open)
# make sure the right thing got quarantined; the suffix dir should not
# have moved, as that could have many objects in it
self.assertFalse(os.path.exists(hashdir))
| |
<gh_stars>1-10
#!/usr/bin/python
"""
Data structure:
Each task/step/ticket is an Item instance.
Each item may have one parent and several children nodes.
"""
import os
import sys
import time
import re
import sqlite3
from datetime import datetime
DATE_FORMAT = '%a %b %d %H:%M:%S %Y %Z'
__version__ = '0.3.0'
__author__ = "<NAME>"
__url__ = 'https://github.com/dudarev/progressio'
PROGRESS_DB_FILE_NAME = 'progress.db'
class Item(object):
"""
The following fields are stored in the database:
pk (id) - int
children - str - a list of children ids, order is important
title - str - title
added_at - datetime
is_done - boolean
done_at - datetime
"""
def __init__(self, pk, children=None, title=None, added_at=None, is_done=False, done_at=None):
self.pk = int(pk)
if children is not None:
self.children = map(int, filter(None, children.split(',')))
else:
self.children = []
self.title = title
self.added_at = added_at
self.is_done = is_done
self.done_at = done_at
def __str__(self):
return self.__unicode__()
def __repr__(self):
return self.__unicode__()
def __unicode__(self):
return '{} - {}'.format(self.pk, self.title)
def __cmp__(self, other):
return cmp(int(self.pk), int(other.pk))
@property
def children_str(self):
return ','.join(set(map(str, self.children)))
def _create_db_if_needed():
"""
Checks if db file exists. Creates it if it does not exist.
:returns: a string with message describing what happened.
"""
if not os.path.exists(PROGRESS_DB_FILE_NAME):
con = sqlite3.connect(PROGRESS_DB_FILE_NAME)
cur = con.cursor()
# root item that has pk=0 is always considered done
cur.execute(
"CREATE TABLE item(" +
"pk INTEGER PRIMARY KEY, children, title, added_at, is_done DEFAULT FALSE, done_at)")
cur.execute("INSERT INTO item(pk, children, title, is_done) values(0, '', 'root', 1)")
con.commit()
con.close()
return 'DB file did not exist and was created.'
return 'DB file exists'
def count_items():
"""
:returns: a dictionary with counts in fields 'total', 'done'.
"""
con = sqlite3.connect(PROGRESS_DB_FILE_NAME)
cur = con.cursor()
# do not count root
cur.execute("SELECT COUNT(*) FROM item WHERE pk<>0")
total = cur.fetchone()[0]
cur.execute("SELECT COUNT(*) FROM item WHERE is_done='TRUE' AND pk<>0")
done = cur.fetchone()[0]
done_items = load_items(is_done=True)
done_today = 0
done_yesterday = 0
for i in done_items:
date_item = datetime.strptime(i.done_at, DATE_FORMAT)
date_now = datetime.now()
if date_now.date() == date_item.date():
done_today += 1
if (date_now.date() - date_item.date()).days == 1:
done_yesterday += 1
return {
'done': done,
'total': total,
'done_today': done_today,
'done_yesterday': done_yesterday,
}
def load_items(is_done=False):
"""
:returns: a list with Item instances that are NOT done.
"""
con = sqlite3.connect(PROGRESS_DB_FILE_NAME)
cur = con.cursor()
if is_done:
query = "SELECT * FROM item WHERE is_done='TRUE'"
else:
query = "SELECT * FROM item WHERE is_done='FALSE'"
cur.execute(query)
items = cur.fetchall()
item_instances = [Item(*i) for i in items]
con.close()
return item_instances
def parse_item_from_string(line):
"""
:param line: format: pk - title
:returns: Item with such pk and title.
"""
item_re = re.compile('(\w+) - (.+)')
pk, title = item_re.findall(line)[0]
return Item(pk, title)
def get_item(pk):
"""
:returns: Item for a given :param pk:, primary key.
:returns: None if such item does not exist.
"""
con = sqlite3.connect(PROGRESS_DB_FILE_NAME)
cur = con.cursor()
cur.execute('SELECT * FROM item WHERE pk={}'.format(pk))
item_data = cur.fetchone()
if item_data is None:
return None
item = Item(*item_data)
con.close()
return item
def active(pk_active=None):
"""
Mark an item `pk_active` as active.
If item is not specified as a variable get it from stdin.
"""
_create_db_if_needed()
try:
if pk_active is None:
if len(sys.argv) > 2:
pk_active = sys.argv[2]
else:
print "Specify item to make active."
return
con = sqlite3.connect(PROGRESS_DB_FILE_NAME)
cur = con.cursor()
query = "UPDATE item SET is_done='FALSE' WHERE pk={pk_active}".format(
pk_active=pk_active)
cur.execute(query)
con.commit()
con.close()
print "Item {} is marked as active.".format(pk_active)
except sqlite3.OperationalError, e:
print "Database error:", e
def add(item_title=None, parent_pk=0):
"""
Adds a item - step/task/goal...
Title is obtained from sys.argv.
If no parent_pk is specified item is added to root (pk=0).
"""
if not item_title:
from optparse import OptionParser
parser = OptionParser()
parser.add_option("-t", "--title", dest="title")
parser.add_option("-p", "--parent", dest="parent_pk")
(opts, args) = parser.parse_args(sys.argv[2:])
if not getattr(opts, "title"):
sys.stderr.write('Error: no title is specified (use flag -t)\n')
exit(1)
item_title = opts.title
if opts.parent_pk:
parent_pk = opts.parent_pk
# save new item and update its parent in database
_create_db_if_needed()
parent = get_item(parent_pk)
con = sqlite3.connect(PROGRESS_DB_FILE_NAME)
cur = con.cursor()
added_at = time.strftime(DATE_FORMAT)
query = "INSERT INTO item(title, added_at) values('{title}', '{added_at}')".format(
title=item_title,
added_at=added_at)
cur.execute(query)
con.commit()
pk = cur.lastrowid
parent.children.append(pk)
children = ','.join(map(str, parent.children))
query = "UPDATE item SET children='{children}' WHERE pk={parent_pk}".format(
children=children, parent_pk=parent_pk)
cur.execute(query)
con.commit()
con.close()
print "Added item:"
item = get_item(pk)
print item
def count():
counts = count_items()
print "done: {}".format(counts['done'])
print "total items: {}".format(counts['total'])
print ""
print "done today: {}".format(counts['done_today'])
print "done yesterday: {}".format(counts['done_yesterday'])
def done(pk_done=None):
"""
Mark an item `pk_done` as done.
If item is not specified as a variable get it from stdin.
"""
_create_db_if_needed()
try:
if pk_done is None:
if len(sys.argv) > 2:
pk_done = sys.argv[2]
else:
print "Specify item done."
return
print "Marking item %s as done." % pk_done
con = sqlite3.connect(PROGRESS_DB_FILE_NAME)
cur = con.cursor()
done_at = time.strftime(DATE_FORMAT)
query = "UPDATE item SET done_at='{done_at}', is_done='TRUE' WHERE pk={pk_done}".format(
done_at=done_at, pk_done=pk_done)
cur.execute(query)
con.commit()
con.close()
except sqlite3.OperationalError, e:
print "Database error:", e
def delete(pk_delete=None):
"""
Remove item with `pk_delete` from database.
"""
try:
if pk_delete is None:
if len(sys.argv) > 2:
pk_delete = sys.argv[2]
else:
print "Specify item to delete."
return
sys.stdout.write(
"Do you really want to delete item {}? y/n [n] ".format(pk_delete)
)
choice = raw_input().lower().strip()
if choice == 'y':
con = sqlite3.connect(PROGRESS_DB_FILE_NAME)
cur = con.cursor()
query = "DELETE FROM item WHERE pk='{pk_delete}'".format(
pk_delete=pk_delete)
cur.execute(query)
con.commit()
con.close()
print 'Deleted item {}'.format(pk_delete)
except sqlite3.OperationalError, e:
print "Database error:", e
def help():
"""
Prints help.
"""
print "usage: p [COMMAND [ARGS]]"
print ""
print " add [-p id] -t TITLE - add an item with TITLE, flag -p points to parent id"
print " count - count items done and to be done"
print " delete n - delete item with id n"
print " done n - mark item with id n as done"
print " help - print help"
print " log [-d] - log items, flag -d for done"
print " move n -p m - move item n to parent m"
print " version - version of the program (-v and --version also work)"
def log():
"""
log [-d]
"""
from optparse import OptionParser
parser = OptionParser()
parser.add_option('-d', dest='print_done', default=False, action='store_true')
(opts, args) = parser.parse_args(sys.argv[2:])
print "print done:", opts.print_done
for i in load_items(opts.print_done):
print str(i)
def move(item_pk=None, new_parent_pk=None):
"""
Move item with `item_pk` to new parent with `new_parent_pk`.
"""
if item_pk is None:
if len(sys.argv) > 2:
try:
item_pk = int(sys.argv[2])
except ValueError:
print "Incorrect item value"
exit(1)
else:
print "Specify item to move."
exit(1)
from optparse import OptionParser
parser = OptionParser()
parser.add_option("-p", "--parent", dest="new_parent_pk")
(opts, args) = parser.parse_args(sys.argv[2:])
new_parent_pk = getattr(opts, "new_parent_pk")
if new_parent_pk is None:
sys.stderr.write('Error: no new parent is specified (use flag -p)\n')
exit(1)
items = load_items()
queries = []
for i in items:
if item_pk in i.children:
i.children.remove(item_pk)
queries.append("UPDATE item SET children='{children}' WHERE pk={old_parent_pk}".format(
children=i.children_str, old_parent_pk=i.pk
))
break
print 'new_parent_pk', new_parent_pk
new_parent_item = get_item(new_parent_pk)
new_parent_item.children.append(item_pk)
print 'new_parent_imtem.children_str=', new_parent_item.children_str
queries.append("UPDATE item SET children='{children}' WHERE pk={new_parent_pk}".format(
children=new_parent_item.children_str, new_parent_pk=new_parent_pk
))
con = sqlite3.connect(PROGRESS_DB_FILE_NAME)
cur = con.cursor()
for q in queries:
print 'q=', q
cur.execute(q)
con.commit()
con.close()
return
def show_one_item(item, items_dict={}, tab=''):
"""
Prints `item` and all its subitems that are in `items_dict`.
The item is tabulated with `tab` characters.
"""
print tab + str(item)
for pk in item.children:
if pk in items_dict:
show_one_item(items_dict[pk], items_dict, tab=tab + ' ')
def show_items():
"""
Shows items in terminal.
"""
items = load_items()
# select ids that are not first level
not_first_level = set()
items_dict = {}
for i in items:
not_first_level = not_first_level.union(set(i.children))
items_dict[i.pk] = i
for i in items:
if not i.pk in not_first_level:
show_one_item(i, items_dict)
def version():
"""
Shows version of the program.
"""
print 'Progressio {version}'.format(version=__version__)
print '<{url}>'.format(url=__url__)
def main():
# check if db exists and create it if confirmed
if not os.path.exists(PROGRESS_DB_FILE_NAME):
sys.stdout.write(
"{0} does not exist. Create? y/n [n] ".format(
PROGRESS_DB_FILE_NAME))
choice = raw_input().lower()
if choice == '' or choice == 'n':
return
_create_db_if_needed()
print "created %s file" % PROGRESS_DB_FILE_NAME
args = sys.argv
command = None
if len(args) > 1:
command = args[1]
if command == 'active':
active()
return
if command == 'add':
add()
return
if command == 'done':
done()
return
if command == 'delete':
delete()
return
if command == 'count':
count()
return
if command in | |
def __init__(self, request_id=None, success=None):
self.request_id = request_id
self.success = success
def validate(self):
self.validate_required(self.request_id, 'request_id')
self.validate_required(self.success, 'success')
def to_map(self):
result = {}
result['RequestId'] = self.request_id
result['Success'] = self.success
return result
def from_map(self, map={}):
self.request_id = map.get('RequestId')
self.success = map.get('Success')
return self
class DescribeStoragePackagesRequest(TeaModel):
def __init__(self, region_id=None, page_size=None, use_utcdate_time=None, page_number=None):
self.region_id = region_id
self.page_size = page_size
self.use_utcdate_time = use_utcdate_time
self.page_number = page_number
def validate(self):
self.validate_required(self.region_id, 'region_id')
def to_map(self):
result = {}
result['RegionId'] = self.region_id
result['PageSize'] = self.page_size
result['UseUTCDateTime'] = self.use_utcdate_time
result['PageNumber'] = self.page_number
return result
def from_map(self, map={}):
self.region_id = map.get('RegionId')
self.page_size = map.get('PageSize')
self.use_utcdate_time = map.get('UseUTCDateTime')
self.page_number = map.get('PageNumber')
return self
class DescribeStoragePackagesResponse(TeaModel):
def __init__(self, request_id=None, total_count=None, page_size=None, page_number=None, packages=None):
self.request_id = request_id
self.total_count = total_count
self.page_size = page_size
self.page_number = page_number
self.packages = packages
def validate(self):
self.validate_required(self.request_id, 'request_id')
self.validate_required(self.total_count, 'total_count')
self.validate_required(self.page_size, 'page_size')
self.validate_required(self.page_number, 'page_number')
self.validate_required(self.packages, 'packages')
if self.packages:
self.packages.validate()
def to_map(self):
result = {}
result['RequestId'] = self.request_id
result['TotalCount'] = self.total_count
result['PageSize'] = self.page_size
result['PageNumber'] = self.page_number
if self.packages is not None:
result['Packages'] = self.packages.to_map()
else:
result['Packages'] = None
return result
def from_map(self, map={}):
self.request_id = map.get('RequestId')
self.total_count = map.get('TotalCount')
self.page_size = map.get('PageSize')
self.page_number = map.get('PageNumber')
if map.get('Packages') is not None:
temp_model = DescribeStoragePackagesResponsePackages()
self.packages = temp_model.from_map(map['Packages'])
else:
self.packages = None
return self
class DescribeStoragePackagesResponsePackagesPackage(TeaModel):
def __init__(self, start_time=None, storage_type=None, status=None, file_system_id=None, package_id=None, expired_time=None, size=None):
self.start_time = start_time
self.storage_type = storage_type
self.status = status
self.file_system_id = file_system_id
self.package_id = package_id
self.expired_time = expired_time
self.size = size
def validate(self):
self.validate_required(self.start_time, 'start_time')
self.validate_required(self.storage_type, 'storage_type')
self.validate_required(self.status, 'status')
self.validate_required(self.file_system_id, 'file_system_id')
self.validate_required(self.package_id, 'package_id')
self.validate_required(self.expired_time, 'expired_time')
self.validate_required(self.size, 'size')
def to_map(self):
result = {}
result['StartTime'] = self.start_time
result['StorageType'] = self.storage_type
result['Status'] = self.status
result['FileSystemId'] = self.file_system_id
result['PackageId'] = self.package_id
result['ExpiredTime'] = self.expired_time
result['Size'] = self.size
return result
def from_map(self, map={}):
self.start_time = map.get('StartTime')
self.storage_type = map.get('StorageType')
self.status = map.get('Status')
self.file_system_id = map.get('FileSystemId')
self.package_id = map.get('PackageId')
self.expired_time = map.get('ExpiredTime')
self.size = map.get('Size')
return self
class DescribeStoragePackagesResponsePackages(TeaModel):
def __init__(self, package=None):
self.package = []
def validate(self):
self.validate_required(self.package, 'package')
if self.package:
for k in self.package:
if k :
k.validate()
def to_map(self):
result = {}
result['Package'] = []
if self.package is not None:
for k in self.package:
result['Package'].append(k.to_map() if k else None)
else:
result['Package'] = None
return result
def from_map(self, map={}):
self.package = []
if map.get('Package') is not None:
for k in map.get('Package'):
temp_model = DescribeStoragePackagesResponsePackagesPackage()
temp_model = temp_model.from_map(k)
self.package.append(temp_model)
else:
self.package = None
return self
class DescribeFileSystemStatisticsRequest(TeaModel):
def __init__(self, page_size=None, page_number=None):
self.page_size = page_size
self.page_number = page_number
def validate(self):
pass
def to_map(self):
result = {}
result['PageSize'] = self.page_size
result['PageNumber'] = self.page_number
return result
def from_map(self, map={}):
self.page_size = map.get('PageSize')
self.page_number = map.get('PageNumber')
return self
class DescribeFileSystemStatisticsResponse(TeaModel):
def __init__(self, request_id=None, total_count=None, page_size=None, page_number=None, file_system_statistics=None):
self.request_id = request_id
self.total_count = total_count
self.page_size = page_size
self.page_number = page_number
self.file_system_statistics = file_system_statistics
def validate(self):
self.validate_required(self.request_id, 'request_id')
self.validate_required(self.total_count, 'total_count')
self.validate_required(self.page_size, 'page_size')
self.validate_required(self.page_number, 'page_number')
self.validate_required(self.file_system_statistics, 'file_system_statistics')
if self.file_system_statistics:
self.file_system_statistics.validate()
def to_map(self):
result = {}
result['RequestId'] = self.request_id
result['TotalCount'] = self.total_count
result['PageSize'] = self.page_size
result['PageNumber'] = self.page_number
if self.file_system_statistics is not None:
result['FileSystemStatistics'] = self.file_system_statistics.to_map()
else:
result['FileSystemStatistics'] = None
return result
def from_map(self, map={}):
self.request_id = map.get('RequestId')
self.total_count = map.get('TotalCount')
self.page_size = map.get('PageSize')
self.page_number = map.get('PageNumber')
if map.get('FileSystemStatistics') is not None:
temp_model = DescribeFileSystemStatisticsResponseFileSystemStatistics()
self.file_system_statistics = temp_model.from_map(map['FileSystemStatistics'])
else:
self.file_system_statistics = None
return self
class DescribeFileSystemStatisticsResponseFileSystemStatisticsFileSystemStatistic(TeaModel):
def __init__(self, file_system_type=None, total_count=None, metered_size=None, expired_count=None, expiring_count=None):
self.file_system_type = file_system_type
self.total_count = total_count
self.metered_size = metered_size
self.expired_count = expired_count
self.expiring_count = expiring_count
def validate(self):
self.validate_required(self.file_system_type, 'file_system_type')
self.validate_required(self.total_count, 'total_count')
self.validate_required(self.metered_size, 'metered_size')
self.validate_required(self.expired_count, 'expired_count')
self.validate_required(self.expiring_count, 'expiring_count')
def to_map(self):
result = {}
result['FileSystemType'] = self.file_system_type
result['TotalCount'] = self.total_count
result['MeteredSize'] = self.metered_size
result['ExpiredCount'] = self.expired_count
result['ExpiringCount'] = self.expiring_count
return result
def from_map(self, map={}):
self.file_system_type = map.get('FileSystemType')
self.total_count = map.get('TotalCount')
self.metered_size = map.get('MeteredSize')
self.expired_count = map.get('ExpiredCount')
self.expiring_count = map.get('ExpiringCount')
return self
class DescribeFileSystemStatisticsResponseFileSystemStatistics(TeaModel):
def __init__(self, file_system_statistic=None):
self.file_system_statistic = []
def validate(self):
self.validate_required(self.file_system_statistic, 'file_system_statistic')
if self.file_system_statistic:
for k in self.file_system_statistic:
if k :
k.validate()
def to_map(self):
result = {}
result['FileSystemStatistic'] = []
if self.file_system_statistic is not None:
for k in self.file_system_statistic:
result['FileSystemStatistic'].append(k.to_map() if k else None)
else:
result['FileSystemStatistic'] = None
return result
def from_map(self, map={}):
self.file_system_statistic = []
if map.get('FileSystemStatistic') is not None:
for k in map.get('FileSystemStatistic'):
temp_model = DescribeFileSystemStatisticsResponseFileSystemStatisticsFileSystemStatistic()
temp_model = temp_model.from_map(k)
self.file_system_statistic.append(temp_model)
else:
self.file_system_statistic = None
return self
class DescribeLogAnalysisRequest(TeaModel):
def __init__(self, region_id=None):
self.region_id = region_id
def validate(self):
self.validate_required(self.region_id, 'region_id')
def to_map(self):
result = {}
result['RegionId'] = self.region_id
return result
def from_map(self, map={}):
self.region_id = map.get('RegionId')
return self
class DescribeLogAnalysisResponse(TeaModel):
def __init__(self, request_id=None, code=None, total_count=None, page_size=None, page_number=None, analyses=None):
self.request_id = request_id
self.code = code
self.total_count = total_count
self.page_size = page_size
self.page_number = page_number
self.analyses = analyses
def validate(self):
self.validate_required(self.request_id, 'request_id')
self.validate_required(self.code, 'code')
self.validate_required(self.total_count, 'total_count')
self.validate_required(self.page_size, 'page_size')
self.validate_required(self.page_number, 'page_number')
self.validate_required(self.analyses, 'analyses')
if self.analyses:
self.analyses.validate()
def to_map(self):
result = {}
result['RequestId'] = self.request_id
result['Code'] = self.code
result['TotalCount'] = self.total_count
result['PageSize'] = self.page_size
result['PageNumber'] = self.page_number
if self.analyses is not None:
result['Analyses'] = self.analyses.to_map()
else:
result['Analyses'] = None
return result
def from_map(self, map={}):
self.request_id = map.get('RequestId')
self.code = map.get('Code')
self.total_count = map.get('TotalCount')
self.page_size = map.get('PageSize')
self.page_number = map.get('PageNumber')
if map.get('Analyses') is not None:
temp_model = DescribeLogAnalysisResponseAnalyses()
self.analyses = temp_model.from_map(map['Analyses'])
else:
self.analyses = None
return self
class DescribeLogAnalysisResponseAnalysesAnalysisMetaValue(TeaModel):
def __init__(self, logstore=None, region=None, project=None, role_arn=None):
self.logstore = logstore
self.region = region
self.project = project
self.role_arn = role_arn
def validate(self):
self.validate_required(self.logstore, 'logstore')
self.validate_required(self.region, 'region')
self.validate_required(self.project, 'project')
self.validate_required(self.role_arn, 'role_arn')
def to_map(self):
result = {}
result['Logstore'] = self.logstore
result['Region'] = self.region
result['Project'] = self.project
result['RoleArn'] = self.role_arn
return result
def from_map(self, map={}):
self.logstore = map.get('Logstore')
self.region = map.get('Region')
self.project = map.get('Project')
self.role_arn = map.get('RoleArn')
return self
class DescribeLogAnalysisResponseAnalysesAnalysis(TeaModel):
def __init__(self, meta_key=None, meta_value=None):
self.meta_key = meta_key
self.meta_value = meta_value
def validate(self):
self.validate_required(self.meta_key, 'meta_key')
self.validate_required(self.meta_value, 'meta_value')
if self.meta_value:
self.meta_value.validate()
def to_map(self):
result = {}
result['MetaKey'] = self.meta_key
if self.meta_value is not None:
result['MetaValue'] = self.meta_value.to_map()
else:
result['MetaValue'] = None
return result
def from_map(self, map={}):
self.meta_key = map.get('MetaKey')
if map.get('MetaValue') is not None:
temp_model = DescribeLogAnalysisResponseAnalysesAnalysisMetaValue()
self.meta_value = temp_model.from_map(map['MetaValue'])
else:
self.meta_value = None
return self
class DescribeLogAnalysisResponseAnalyses(TeaModel):
def __init__(self, analysis=None):
self.analysis = []
def validate(self):
self.validate_required(self.analysis, 'analysis')
if self.analysis:
for k in self.analysis:
if k :
k.validate()
def to_map(self):
result = {}
result['Analysis'] = []
if self.analysis is not None:
for k in self.analysis:
result['Analysis'].append(k.to_map() if k else None)
else:
result['Analysis'] = None
return result
def from_map(self, map={}):
self.analysis = []
if map.get('Analysis') is not None:
for k in map.get('Analysis'):
temp_model = DescribeLogAnalysisResponseAnalysesAnalysis()
temp_model = temp_model.from_map(k)
self.analysis.append(temp_model)
else:
self.analysis = None
return self
class DescribeMountedClientsRequest(TeaModel):
def __init__(self, region_id=None, page_size=None, file_system_id=None, client_ip=None, mount_target_domain=None, page_number=None):
self.region_id = region_id
self.page_size = page_size
self.file_system_id = file_system_id
self.client_ip = client_ip
self.mount_target_domain = mount_target_domain
self.page_number = page_number
def validate(self):
self.validate_required(self.region_id, 'region_id')
self.validate_required(self.file_system_id, 'file_system_id')
self.validate_required(self.mount_target_domain, 'mount_target_domain')
def to_map(self):
result = {}
result['RegionId'] = self.region_id
result['PageSize'] = self.page_size
result['FileSystemId'] = self.file_system_id
result['ClientIP'] = self.client_ip
result['MountTargetDomain'] = self.mount_target_domain
result['PageNumber'] = self.page_number
return result
def from_map(self, map={}):
self.region_id = map.get('RegionId')
self.page_size = map.get('PageSize')
self.file_system_id = map.get('FileSystemId')
self.client_ip = map.get('ClientIP')
self.mount_target_domain = map.get('MountTargetDomain')
self.page_number = map.get('PageNumber')
return self
class DescribeMountedClientsResponse(TeaModel):
def __init__(self, request_id=None, total_count=None, page_size=None, page_number=None, clients=None):
self.request_id = request_id
self.total_count = total_count
self.page_size = page_size
self.page_number = page_number
self.clients = clients
def validate(self):
self.validate_required(self.request_id, 'request_id')
self.validate_required(self.total_count, 'total_count')
self.validate_required(self.page_size, 'page_size')
self.validate_required(self.page_number, 'page_number')
self.validate_required(self.clients, 'clients')
if self.clients:
self.clients.validate()
def to_map(self):
result = {}
result['RequestId'] = self.request_id
result['TotalCount'] = self.total_count
result['PageSize'] = self.page_size
result['PageNumber'] = self.page_number
if self.clients is not None:
result['Clients'] = self.clients.to_map()
else:
result['Clients'] = None
return result
def from_map(self, map={}):
self.request_id = map.get('RequestId')
self.total_count = map.get('TotalCount')
self.page_size = map.get('PageSize')
self.page_number = map.get('PageNumber')
if map.get('Clients') is not None:
temp_model = DescribeMountedClientsResponseClients()
self.clients = temp_model.from_map(map['Clients'])
else:
self.clients = None
return self
class DescribeMountedClientsResponseClientsClient(TeaModel):
def __init__(self, client_ip=None):
self.client_ip = client_ip
def validate(self):
self.validate_required(self.client_ip, 'client_ip')
def to_map(self):
result = {}
result['ClientIP'] = self.client_ip
return result
def from_map(self, map={}):
self.client_ip = map.get('ClientIP')
return self
class DescribeMountedClientsResponseClients(TeaModel):
def __init__(self, client=None):
self.client = []
def validate(self):
self.validate_required(self.client, 'client')
if self.client:
for k in self.client:
if k :
k.validate()
def to_map(self):
result = {}
result['Client'] = []
if self.client is not None:
for k in self.client:
result['Client'].append(k.to_map() if k else None)
else:
result['Client'] = None
| |
# "order_id": null,
# "trade_id": null,
# "transfer_detail": {
# "method": "bitcoin",
# "id": "0e3e2357e806b6cdb1f70b54c3a3a17b6714ee1f0e68bebb44a74b1efd512098",
# "recipient": "mzb3NgX9Dr6jgGAu31L6jsPGB2zkaFxxyf",
# "confirmations": 3
# }
# }
#
# trade fee
#
# {
# "id": 21311221,
# "date": "2015-06-30T13:55:11.000Z",
# "type": "paid_commission",
# "price": 0.0001,
# "fund_id": "BTCEUR",
# "order_id": 12832371,
# "trade_id": 12923212,
# "currency": "BTC",
# "transfer_detail": null
# }
#
id = self.safe_string(item, 'id')
referenceId = None
type = self.safe_string(item, 'type')
direction = self.parse_ledger_entry_direction(type)
type = self.parse_ledger_entry_type(type)
if type == 'trade' or type == 'fee':
referenceId = self.safe_string(item, 'trade_id')
currencyId = self.safe_string(item, 'currency')
code = self.safe_currency_code(currencyId)
amount = self.safe_number(item, 'price')
timestamp = self.parse8601(self.safe_string(item, 'date'))
status = 'ok'
return {
'info': item,
'id': id,
'direction': direction,
'account': None,
'referenceId': referenceId,
'referenceAccount': None,
'type': type,
'currency': code,
'amount': amount,
'before': None,
'after': None,
'status': status,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'fee': None,
}
def fetch_ledger(self, code=None, since=None, limit=None, params={}):
self.load_markets()
request = {
# 'page': 1,
# 'fund_id': 'ETHBTC', # filter by fund symbol
# 'currency': 'BTC', # filter by currency
# 'after': '2015-02-06T08:47:26Z', # filter after a certain timestamp
# 'before': '2015-02-06T08:47:26Z',
# 'type': 'withdraw',
# 'order_id': '12832371', # filter by a specific order ID
# 'trade_id': '12923212', # filter by a specific trade ID
# 'transfer_method': 'bitcoin', # wire_transfer, ripple, greenaddress, bitcoin, litecoin, namecoin, peercoin, dogecoin
# 'transfer_recipient': '1MAHLhJoz9W2ydbRf972WSgJYJ3Ui7aotm', # filter by a specific recipient(e.g. Bitcoin address, IBAN)
# 'transfer_id': '8261949194985b01985006724dca5d6059989e096fa95608271d00dd902327fa', # filter by a specific transfer ID(e.g. Bitcoin TX hash)
}
currency = None
if code is not None:
currency = self.currency(code)
request['currency'] = currency['id']
if since is not None:
request['after'] = self.iso8601(since)
response = self.privateGetTransactions(self.extend(request, params))
#
# {
# "transactions": [
# {
# "id": 21311223,
# "date": "2015-06-30T13:55:11.000Z",
# "type": "withdraw",
# "price": 103.00,
# "currency": "EUR",
# "fund_id": null,
# "order_id": null,
# "trade_id": null,
# "transfer_detail": {
# "method": "wire_transfer",
# "id": "F112DD3",
# "recipient": "IT123456789012",
# "confirmations": 0
# }
# },
# {
# "id": 21311222,
# "date": "2015-06-30T13:55:11.000Z",
# "type": "atm_payment",
# "price": 2.01291,
# "currency": "BTC",
# "fund_id": "null",
# "order_id": null,
# "trade_id": null,
# "transfer_detail": {
# "method": "bitcoin",
# "id": "0e3e2357e806b6cdb1f70b54c3a3a17b6714ee1f0e68bebb44a74b1efd512098",
# "recipient": "mzb3NgX9Dr6jgGAu31L6jsPGB2zkaFxxyf",
# "confirmations": 3
# }
# },
# {
# "id": 21311221,
# "date": "2015-06-30T13:55:11.000Z",
# "type": "paid_commission",
# "price": 0.0001,
# "fund_id": "BTCEUR",
# "order_id": 12832371,
# "trade_id": 12923212,
# "currency": "BTC",
# "transfer_detail": null
# }
# ],
# "meta": {
# "total_count": 1221,
# "first": {"page": 1, "href": "https://api.therocktrading.com/v1/transactions?page=1"},
# "previous": null,
# "current": {"page": 1, "href": "https://api.therocktrading.com/v1/transactions?page=1"},
# "next": {"page": 2, "href": "https://api.therocktrading.com/v1/transactions?page=2"},
# "last": {"page": 1221, "href": "https://api.therocktrading.com/v1/transactions?page=1221"}
# }
# }
#
transactions = self.safe_value(response, 'transactions', [])
return self.parse_ledger(transactions, currency, since, limit)
def parse_transaction_type(self, type):
types = {
'withdraw': 'withdrawal',
'atm_payment': 'deposit',
}
return self.safe_string(types, type, type)
def parse_transaction(self, transaction, currency=None):
#
# fetchWithdrawals
#
# # fiat
#
# {
# "id": 21311223,
# "date": "2015-06-30T13:55:11.000Z",
# "type": "withdraw",
# "price": 103.00,
# "currency": "EUR",
# "fund_id": null,
# "order_id": null,
# "trade_id": null,
# "transfer_detail": {
# "method": "wire_transfer",
# "id": "F112DD3",
# "recipient": "IT123456789012",
# "confirmations": 0
# }
# }
#
# {
# "id": 12564223,
# "date": "2017-08-07T08:13:50.023Z",
# "note": "GB7IDL401573388",
# "type": "withdraw",
# "price": 4345.93,
# "fund_id": null,
# "currency": "EUR",
# "order_id": null,
# "trade_id": null,
# "transfer_detail": {
# "id": "EXECUTEDBUTUNCHECKED",
# "method": "wire_transfer",
# "recipient": "GB7IDL401573388",
# "confirmations": 0
# }
# }
#
# # crypto
#
# {
# id: 20914695,
# date: '2018-02-24T07:13:23.002Z',
# type: 'withdraw',
# price: 2.70883607,
# currency: 'BCH',
# fund_id: null,
# order_id: null,
# trade_id: null,
# note: '1MAHLhJoz9W2ydbRf972WSgJYJ3Ui7aotm',
# transfer_detail: {
# method: 'bitcoin_cash',
# id: '8261949194985b01985006724dca5d6059989e096fa95608271d00dd902327fa',
# recipient: '1MAHLhJoz9W2ydbRf972WSgJYJ3Ui7aotm',
# confirmations: 0
# }
# }
#
#
# fetchDeposits
#
# # fiat
#
# {
# id: 16176632,
# date: '2017-11-20T21:00:13.355Z',
# type: 'atm_payment',
# price: 5000,
# currency: 'EUR',
# fund_id: null,
# order_id: null,
# trade_id: null,
# note: 'Mistral deposit',
# transfer_detail: {
# method: 'wire_transfer',
# id: '972JQ49337DX769T',
# recipient: null,
# confirmations: 0
# }
# }
#
# # crypto
#
# {
# "id": 21311222,
# "date": "2015-06-30T13:55:11.000Z",
# "type": "atm_payment",
# "price": 2.01291,
# "currency": "BTC",
# "fund_id": "null",
# "order_id": null,
# "trade_id": null,
# "transfer_detail": {
# "method": "bitcoin",
# "id": "0e3e2357e806b6cdb1f70b54c3a3a17b6714ee1f0e68bebb44a74b1efd512098",
# "recipient": "mzb3NgX9Dr6jgGAu31L6jsPGB2zkaFxxyf",
# "confirmations": 3
# }
# }
#
id = self.safe_string(transaction, 'id')
type = self.parse_transaction_type(self.safe_string(transaction, 'type'))
detail = self.safe_value(transaction, 'transfer_detail', {})
method = self.safe_string(detail, 'method')
txid = None
address = None
if method is not None:
if method != 'wire_transfer':
txid = self.safe_string(detail, 'id')
address = self.safe_string(detail, 'recipient')
currencyId = self.safe_string(transaction, 'currency')
code = self.safe_currency_code(currencyId)
amount = self.safe_number(transaction, 'price')
timestamp = self.parse8601(self.safe_string(transaction, 'date'))
status = 'ok'
network = self.safe_string(detail, 'method')
# todo parse tags
return {
'info': transaction,
'id': id,
'currency': code,
'amount': amount,
'network': network,
'addressFrom': None,
'addressTo': address,
'address': address,
'tagFrom': None,
'tagTo': None,
'tag': None,
'status': status,
'type': type,
'updated': None,
'txid': txid,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'fee': None,
}
def fetch_withdrawals(self, code=None, since=None, limit=None, params={}):
request = {
'type': 'withdraw',
}
return self.fetch_transactions(code, since, limit, self.extend(request, params))
def fetch_deposits(self, code=None, since=None, limit=None, params={}):
request = {
'type': 'atm_payment',
}
return self.fetch_transactions(code, since, limit, self.extend(request, params))
def fetch_transactions(self, code=None, since=None, limit=None, params={}):
self.load_markets()
request = {
# 'page': 1,
# 'fund_id': 'ETHBTC', # filter by fund symbol
# 'currency': 'BTC', # filter by currency
# 'after': '2015-02-06T08:47:26Z', # filter after a certain timestamp
# 'before': '2015-02-06T08:47:26Z',
# 'type': 'withdraw',
# 'order_id': '12832371', # filter by a specific order ID
# 'trade_id': '12923212', # filter by a specific trade ID
# 'transfer_method': 'bitcoin', # wire_transfer, ripple, greenaddress, bitcoin, litecoin, namecoin, peercoin, dogecoin
# 'transfer_recipient': '1MAHLhJoz9W2ydbRf972WSgJYJ3Ui7aotm', # filter by a specific recipient(e.g. Bitcoin address, IBAN)
# 'transfer_id': '8261949194985b01985006724dca5d6059989e096fa95608271d00dd902327fa', # filter by a specific transfer ID(e.g. Bitcoin TX hash)
}
currency = None
if code is not None:
currency = self.currency(code)
request['currency'] = currency['id']
if since is not None:
request['after'] = self.iso8601(since)
params = self.extend(request, params)
response = self.privateGetTransactions(params)
#
# {
# "transactions": [
# {
# "id": 21311223,
# "date": "2015-06-30T13:55:11.000Z",
# "type": "withdraw",
# "price": 103.00,
# "currency": "EUR",
# "fund_id": null,
# "order_id": null,
# "trade_id": null,
# "transfer_detail": {
# "method": "wire_transfer",
# "id": "F112DD3",
# "recipient": "IT123456789012",
# "confirmations": 0
# }
# },
# {
# "id": 21311222,
# "date": "2015-06-30T13:55:11.000Z",
# "type": "atm_payment",
# "price": 2.01291,
# "currency": "BTC",
# "fund_id": "null",
# "order_id": null,
# "trade_id": null,
# "transfer_detail": {
# "method": "bitcoin",
# "id": "0e3e2357e806b6cdb1f70b54c3a3a17b6714ee1f0e68bebb44a74b1efd512098",
# "recipient": "mzb3NgX9Dr6jgGAu31L6jsPGB2zkaFxxyf",
# "confirmations": 3
# }
# },
# {
# "id": 21311221,
# "date": "2015-06-30T13:55:11.000Z",
# "type": "paid_commission",
# "price": 0.0001,
# "fund_id": "BTCEUR",
# "order_id": 12832371,
# "trade_id": 12923212,
# "currency": "BTC",
# "transfer_detail": null
# }
# ],
# "meta": {
# "total_count": 1221,
# "first": {"page": 1, "href": "https://api.therocktrading.com/v1/transactions?page=1"},
# "previous": null,
# "current": {"page": 1, "href": "https://api.therocktrading.com/v1/transactions?page=1"},
# "next": {"page": 2, "href": "https://api.therocktrading.com/v1/transactions?page=2"},
# "last": {"page": 1221, "href": "https://api.therocktrading.com/v1/transactions?page=1221"}
# }
# }
#
transactions = self.safe_value(response, 'transactions', [])
transactionTypes = ['withdraw', 'atm_payment']
depositsAndWithdrawals = self.filter_by_array(transactions, 'type', transactionTypes, False)
return self.parse_transactions(depositsAndWithdrawals, currency, since, limit)
def parse_order_status(self, status):
statuses = {
'active': 'open',
'executed': 'closed',
'deleted': 'canceled',
# don't know what self status means
# 'conditional': '?',
}
return self.safe_string(statuses, status, status)
def parse_order(self, order, market=None):
#
# {
# "id": 4325578,
# "fund_id":"BTCEUR",
# "side":"buy",
# "type":"limit",
# "status":"executed",
# "price":0.0102,
# "amount": 50.0,
# "amount_unfilled": 0.0,
# "conditional_type": null,
# "conditional_price": null,
# "date":"2015-06-03T00:49:48.000Z",
# "close_on": nil,
# "leverage": 1.0,
# "position_id": null,
# "trades": [
# {
# "id":237338,
# "fund_id":"BTCEUR",
# "amount":50,
# "price":0.0102,
# "side":"buy",
# "dark":false,
# "date":"2015-06-03T00:49:49.000Z"
# }
# ]
| |
MOL 2 7.552 8.102 1.726 1.00 0.00 H1- \n',
'ATOM 194 H5 MOL 2 8.043 6.609 1.731 1.00 0.00 H1- \n',
'ATOM 195 H6 MOL 2 6.934 7.090 3.760 1.00 0.00 H1- \n',
'ATOM 196 H7 MOL 2 5.637 7.485 2.966 1.00 0.00 H1- \n',
'ATOM 197 N1 MOL 2 19.715 19.504 24.110 1.00 0.00 N3- \n',
'ATOM 198 C1 MOL 2 21.333 19.931 21.779 1.00 0.00 C \n',
'ATOM 199 C2 MOL 2 20.227 20.696 22.115 1.00 0.00 C \n',
'ATOM 200 C3 MOL 2 19.412 20.373 23.285 1.00 0.00 C \n',
'ATOM 201 C4 MOL 2 18.848 19.307 0.461 1.00 0.00 C \n',
'ATOM 202 C5 MOL 2 19.656 19.577 1.721 1.00 0.00 C \n',
'ATOM 203 C6 MOL 2 18.816 19.304 2.961 1.00 0.00 C \n',
'ATOM 204 H1 MOL 2 21.551 19.195 22.305 1.00 0.00 H1- \n',
'ATOM 205 H2 MOL 2 18.622 20.847 23.414 1.00 0.00 H1- \n',
'ATOM 206 H3 MOL 2 18.089 19.924 0.414 1.00 0.00 H1- \n',
'ATOM 207 H4 MOL 2 19.952 20.502 1.726 1.00 0.00 H1- \n',
'ATOM 208 H5 MOL 2 20.443 19.009 1.731 1.00 0.00 H1- \n',
'ATOM 209 H6 MOL 2 19.334 19.490 3.760 1.00 0.00 H1- \n',
'ATOM 210 H7 MOL 2 18.037 19.885 2.966 1.00 0.00 H1- \n',
'ATOM 211 N1 MOL 2 19.715 7.104 11.710 1.00 0.00 N3- \n',
'ATOM 212 C1 MOL 2 21.333 7.531 9.379 1.00 0.00 C \n',
'ATOM 213 C2 MOL 2 20.227 8.296 9.715 1.00 0.00 C \n',
'ATOM 214 C3 MOL 2 19.412 7.973 10.885 1.00 0.00 C \n',
'ATOM 215 C4 MOL 2 18.848 6.907 12.861 1.00 0.00 C \n',
'ATOM 216 C5 MOL 2 19.656 7.177 14.121 1.00 0.00 C \n',
'ATOM 217 C6 MOL 2 18.816 6.904 15.361 1.00 0.00 C \n',
'ATOM 218 H1 MOL 2 21.551 6.795 9.905 1.00 0.00 H1- \n',
'ATOM 219 H2 MOL 2 18.622 8.447 11.014 1.00 0.00 H1- \n',
'ATOM 220 H3 MOL 2 18.089 7.524 12.814 1.00 0.00 H1- \n',
'ATOM 221 H4 MOL 2 19.952 8.102 14.126 1.00 0.00 H1- \n',
'ATOM 222 H5 MOL 2 20.443 6.609 14.131 1.00 0.00 H1- \n',
'ATOM 223 H6 MOL 2 19.334 7.090 16.160 1.00 0.00 H1- \n',
'ATOM 224 H7 MOL 2 18.037 7.485 15.366 1.00 0.00 H1- \n',
'ATOM 225 N1 MOL 2 0.690 19.715 5.296 1.00 0.00 N3- \n',
'ATOM 226 C1 MOL 2 3.021 21.333 4.869 1.00 0.00 C \n',
'ATOM 227 C2 MOL 2 2.685 20.227 4.104 1.00 0.00 C \n',
'ATOM 228 C3 MOL 2 1.515 19.412 4.427 1.00 0.00 C \n',
'ATOM 229 C4 MOL 2 24.339 18.848 5.493 1.00 0.00 C \n',
'ATOM 230 C5 MOL 2 23.079 19.656 5.223 1.00 0.00 C \n',
'ATOM 231 C6 MOL 2 21.839 18.816 5.496 1.00 0.00 C \n',
'ATOM 232 H1 MOL 2 2.495 21.551 5.605 1.00 0.00 H1- \n',
'ATOM 233 H2 MOL 2 1.386 18.622 3.953 1.00 0.00 H1- \n',
'ATOM 234 H3 MOL 2 24.386 18.089 4.876 1.00 0.00 H1- \n',
'ATOM 235 H4 MOL 2 23.074 19.952 4.298 1.00 0.00 H1- \n',
'ATOM 236 H5 MOL 2 23.069 20.443 5.791 1.00 0.00 H1- \n',
'ATOM 237 H6 MOL 2 21.040 19.334 5.310 1.00 0.00 H1- \n',
'ATOM 238 H7 MOL 2 21.834 18.037 4.915 1.00 0.00 H1- \n',
'ATOM 239 N1 MOL 2 0.690 7.315 17.696 1.00 0.00 N3- \n',
'ATOM 240 C1 MOL 2 3.021 8.933 17.269 1.00 0.00 C \n',
'ATOM 241 C2 MOL 2 2.685 7.827 16.504 1.00 0.00 C \n',
'ATOM 242 C3 MOL 2 1.515 7.012 16.827 1.00 0.00 C \n',
'ATOM 243 C4 MOL 2 24.339 6.448 17.893 1.00 0.00 C \n',
'ATOM 244 C5 MOL 2 23.079 7.256 17.623 1.00 0.00 C \n',
'ATOM 245 C6 MOL 2 21.839 6.416 17.896 1.00 0.00 C \n',
'ATOM 246 H1 MOL 2 2.495 9.151 18.005 1.00 0.00 H1- \n',
'ATOM 247 H2 MOL 2 1.386 6.222 16.353 1.00 0.00 H1- \n',
'ATOM 248 H3 MOL 2 24.386 5.689 17.276 1.00 0.00 H1- \n',
'ATOM 249 H4 MOL 2 23.074 7.552 16.698 1.00 0.00 H1- \n',
'ATOM 250 H5 MOL 2 23.069 8.043 18.191 1.00 0.00 H1- \n',
'ATOM 251 H6 MOL 2 21.040 6.934 17.710 1.00 0.00 H1- \n',
'ATOM 252 H7 MOL 2 21.834 5.637 17.315 1.00 0.00 H1- \n',
'ATOM 253 N1 MOL 2 13.090 19.715 17.696 1.00 0.00 N3- \n',
'ATOM 254 C1 MOL 2 15.421 21.333 17.269 1.00 0.00 C \n',
'ATOM 255 C2 MOL 2 15.085 20.227 16.504 1.00 0.00 C \n',
'ATOM 256 C3 MOL 2 13.915 19.412 16.827 1.00 0.00 C \n',
'ATOM 257 C4 MOL 2 11.939 18.848 17.893 1.00 0.00 C \n',
'ATOM 258 C5 MOL 2 10.679 19.656 17.623 1.00 0.00 C \n',
'ATOM 259 C6 MOL 2 9.439 18.816 17.896 1.00 0.00 C \n',
'ATOM 260 H1 MOL 2 14.895 21.551 18.005 1.00 0.00 H1- \n',
'ATOM 261 H2 MOL 2 13.786 18.622 16.353 1.00 0.00 H1- \n',
'ATOM 262 H3 MOL 2 11.986 18.089 17.276 1.00 0.00 H1- \n',
'ATOM 263 H4 MOL 2 10.674 19.952 16.698 1.00 0.00 H1- \n',
'ATOM 264 H5 MOL 2 10.669 20.443 18.191 1.00 0.00 H1- \n',
'ATOM 265 H6 MOL 2 8.640 19.334 17.710 1.00 0.00 H1- \n',
'ATOM 266 H7 MOL 2 9.434 18.037 17.315 1.00 0.00 H1- \n',
'ATOM 267 N1 MOL 2 13.090 7.315 5.296 1.00 0.00 N3- \n',
'ATOM 268 C1 MOL 2 15.421 8.933 4.869 1.00 0.00 C \n',
'ATOM 269 C2 MOL 2 15.085 7.827 4.104 1.00 0.00 C \n',
'ATOM 270 C3 MOL 2 13.915 7.012 4.427 1.00 0.00 C \n',
'ATOM 271 C4 MOL 2 11.939 6.448 5.493 1.00 0.00 C \n',
'ATOM 272 C5 MOL 2 10.679 7.256 5.223 1.00 0.00 C \n',
'ATOM 273 C6 MOL 2 9.439 6.416 5.496 1.00 0.00 C \n',
'ATOM 274 H1 MOL 2 14.895 9.151 5.605 1.00 0.00 H1- \n',
'ATOM 275 H2 MOL 2 13.786 6.222 3.953 1.00 0.00 H1- \n',
'ATOM 276 H3 MOL 2 11.986 5.689 4.876 1.00 0.00 H1- \n',
'ATOM 277 H4 MOL 2 10.674 7.552 4.298 1.00 0.00 H1- \n',
'ATOM 278 H5 MOL 2 10.669 8.043 5.791 1.00 0.00 H1- \n',
'ATOM 279 H6 MOL 2 8.640 6.934 5.310 1.00 0.00 H1- \n',
'ATOM 280 H7 MOL 2 9.434 5.637 4.915 1.00 0.00 H1- \n',
'ATOM 281 N1 MOL 2 13.090 5.085 7.104 1.00 0.00 N3- \n',
'ATOM 282 C1 MOL 2 15.421 3.467 7.531 1.00 0.00 C \n',
'ATOM 283 C2 MOL 2 15.085 4.573 8.296 1.00 0.00 C \n',
'ATOM 284 C3 MOL 2 13.915 5.388 7.973 1.00 0.00 C \n',
'ATOM 285 C4 MOL 2 11.939 5.952 6.907 1.00 0.00 C \n',
'ATOM 286 C5 MOL 2 10.679 5.144 7.177 1.00 0.00 C \n',
'ATOM 287 C6 MOL 2 9.439 5.984 6.904 1.00 0.00 C \n',
'ATOM 288 H1 MOL 2 14.895 3.249 6.795 1.00 0.00 H1- \n',
'ATOM 289 H2 MOL 2 13.786 6.178 8.447 1.00 0.00 H1- \n',
'ATOM 290 H3 MOL 2 11.986 6.711 7.524 1.00 0.00 H1- \n',
'ATOM 291 H4 MOL 2 10.674 4.848 8.102 1.00 0.00 H1- \n',
'ATOM 292 H5 MOL 2 10.669 4.357 6.609 1.00 0.00 H1- \n',
'ATOM 293 H6 MOL 2 8.640 5.466 7.090 1.00 0.00 H1- \n',
'ATOM 294 H7 MOL 2 9.434 6.763 7.485 1.00 0.00 H1- \n',
'ATOM 295 N1 MOL 2 13.090 17.485 19.504 1.00 0.00 N3- \n',
'ATOM 296 C1 MOL 2 15.421 15.867 19.931 1.00 0.00 C \n',
'ATOM 297 C2 MOL 2 15.085 16.973 20.696 1.00 0.00 C \n',
'ATOM 298 C3 MOL 2 13.915 17.788 20.373 1.00 0.00 C \n',
'ATOM 299 C4 MOL 2 11.939 18.352 19.307 1.00 0.00 C | |
from ophyd import ( Component as Cpt, ADComponent, Signal,
EpicsSignal, EpicsSignalRO, EpicsSignalWithRBV,
ROIPlugin, StatsPlugin, ImagePlugin,
SingleTrigger, PilatusDetector, Device)
from ophyd.areadetector.filestore_mixins import FileStoreBase,FileStoreHDF5,FileStoreIterativeWrite
from ophyd.areadetector.plugins import HDF5Plugin
from ophyd.utils import set_and_wait
from databroker.assets.handlers_base import HandlerBase
from ophyd.device import Staged
from pathlib import Path
import os,time,threading
from types import SimpleNamespace
from enum import Enum
class PilatusTriggerMode(Enum):
soft = 0 # Software
ext = 2 # ExtTrigger in camserver
ext_multi = 3 # ExtMTrigger in camserver
class LiXFileStorePluginBase(FileStoreBase):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.stage_sigs.update([('auto_increment', 'Yes'),
('array_counter', 0),
('auto_save', 'Yes'),
('num_capture', 0),
])
self._fn = None
self._fp = None
def make_filename(self):
'''Make a filename.
This is a hook so that the read and write paths can either be modified
or created on disk prior to configuring the areaDetector plugin.
Returns
-------
filename : str
The start of the filename
read_path : str
Path that ophyd can read from
write_path : str
Path that the IOC can write to
'''
filename = new_short_uid()
formatter = datetime.now().strftime
write_path = formatter(self.write_path_template)
read_path = formatter(self.read_path_template)
return filename, read_path, write_path
def stage(self):
# Make a filename.
filename, read_path, write_path = self.make_filename()
# Ensure we do not have an old file open.
if self.file_write_mode != 'Single':
set_and_wait(self.capture, 0)
# These must be set before parent is staged (specifically
# before capture mode is turned on. They will not be reset
# on 'unstage' anyway.
self.file_path.set(write_path).wait()
set_and_wait(self.file_name, filename)
#set_and_wait(self.file_number, 0) # only reason to redefine the pluginbase
super().stage()
# AD does this same templating in C, but we can't access it
# so we do it redundantly here in Python.
self._fn = self.file_template.get() % (read_path,
filename,
# file_number is *next* iteration
self.file_number.get() - 1)
self._fp = read_path
if not self.file_path_exists.get():
raise IOError("Path %s does not exist on IOC."
"" % self.file_path.get())
class LiXFileStoreHDF5(LiXFileStorePluginBase):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.filestore_spec = 'AD_HDF5' # spec name stored in resource doc
self.stage_sigs.update([('file_template', '%s%s_%6.6d.h5'),
('file_write_mode', 'Stream'),
('capture', 1)
])
def get_frames_per_point(self):
num_capture = self.num_capture.get()
# If num_capture is 0, then the plugin will capture however many frames
# it is sent. We can get how frames it will be sent (unless
# interrupted) by consulting num_images on the detector's camera.
if num_capture == 0:
return self.parent.cam.num_images.get()
# Otherwise, a nonzero num_capture will cut off capturing at the
# specified number.
return num_capture
def stage(self):
super().stage()
res_kwargs = {'frame_per_point': self.get_frames_per_point()}
self._generate_resource(res_kwargs)
class LIXhdfPlugin(HDF5Plugin, LiXFileStoreHDF5):
run_time = Cpt(EpicsSignalRO, "RunTime")
sub_directory = None
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.fnbr = 0
def make_filename(self):
''' replaces FileStorePluginBase.make_filename()
Returns
-------
filename : str
The start of the filename
read_path : str
Path that ophyd can read from
write_path : str
Path that the IOC can write to
'''
global data_path,current_sample
filename = f"{current_sample}_{self.parent.detector_id}"
#write_path = f"/nsls2/data/lix/legacy/{self.parent.name}/{current_cycle}/{proposal_id}/{run_id}/{current_sample}/"
write_path = data_path if self.sub_directory is None else f"{data_path}/{self.sub_directory}"
read_path = write_path # might want to handle this differently, this shows up in res/db
#read_path = self.parent.cbf_file_path.get()
return filename, read_path, write_path
#def stage(self):
# """ need to set the number of images to collect and file path
# """
# super().stage()
# if not self.parent.parent.reset_file_number:
# set_and_wait(self.file_number, self.fnbr+1)
# filename, read_path, write_path = self.make_filename()
# self._fn = self.file_template.get() % (read_path, filename, self.fnbr)
# set_and_wait(self.full_file_name, self._fn)
#def unstage(self):
# self.fnbr = self.file_number.get()
# super().unstage()
def get_frames_per_point(self):
if self.parent.trigger_mode is PilatusTriggerMode.ext:
return self.parent.parent._num_images
else:
return 1
class LIXPilatus(PilatusDetector):
hdf = Cpt(LIXhdfPlugin, suffix="HDF1:",
write_path_template="", root='/')
cbf_file_path = ADComponent(EpicsSignalWithRBV, 'cam1:FilePath', string=True)
cbf_file_name = ADComponent(EpicsSignalWithRBV, 'cam1:FileName', string=True)
cbf_file_number = ADComponent(EpicsSignalWithRBV, 'cam1:FileNumber')
HeaderString = Cpt(EpicsSignal, "cam1:HeaderString")
ThresholdEnergy = Cpt(EpicsSignal, "cam1:ThresholdEnergy")
armed = Cpt(EpicsSignal, "cam1:Armed")
flatfield = Cpt(EpicsSignal, "cam1:FlatFieldFile")
ff_minv = Cpt(EpicsSignal, "cam1:MinFlatField")
ff_valid = Cpt(EpicsSignalRO, "cam1:FlatFieldValid")
def __init__(self, *args, hostname, detector_id, **kwargs):
self.detector_id = detector_id
self.hostname = hostname
super().__init__(*args, **kwargs)
self._acquisition_signal = self.cam.acquire
self._counter_signal = self.cam.array_counter
self.set_cbf_file_default(f"/ramdisk/{self.name}/", "current")
self.ts = []
if self.hdf.run_time.get()==0: # first time using the plugin
self.hdf.warmup()
def set_flatfield(self, fn, minV=100):
""" do some changing first
make sure that the image size is correct and the values are reasonable
documentation on the PV:
Name of a file to be used to correct for the flat field. If this record does not point to a valid
flat field file then no flat field correction is performed. The flat field file is simply a TIFF
or CBF file collected by the Pilatus that is used to correct for spatial non-uniformity in the
response of the detector. It should be collected with a spatially uniform intensity on the detector
at roughly the same energy as the measurements being corrected. When the flat field file is read,
the average pixel value (averageFlatField) is computed using all pixels with intensities
>PilatusMinFlatField. All pixels with intensity <PilatusMinFlatField in the flat field are replaced
with averageFlatField. When images are collected before the NDArray callbacks are performed the following
per-pixel correction is applied:
ImageData[i] = (averageFlatField * ImageData[i])/flatField[i];
or
ImageData[i] *= averageFlatField/flatField[i]
"""
self.flatfield.put(fn, wait=True)
self.ff_minv.put(minV, wait=True)
time.sleep(0.5)
if not self.ff_valid.get():
print("Unable to set flat field!")
else:
if not "flat_field" in RE.md['pilatus'].keys():
RE.md['pilatus']["flat_field"] = {}
RE.md['pilatus']["flat_field"][self.name] = self.flatfield.get(as_string=True)
def set_cbf_file_default(self, path, fn):
self.cbf_file_path.put(path, wait=True)
self.cbf_file_name.put(fn, wait=True)
def set_thresh(self, ene):
""" set threshold
"""
if self.cam.acquire.get()==0 and self.cam.armed.get()==0:
self.ThresholdEnergy.put(ene, wait=True)
self.cam.threshold_apply.put(1)
else:
ene = pseudoE.energy.position/1000
eth = self.ThresholdEnergy.get()
print(f"Threshold is not set for {self.name} due to active data collection.")
print(f"x-ray enegy = 2x {ene/2:.2f} keV, threshold is at {eth:.2f} keV")
def stage(self, trigger_mode):
if self._staged == Staged.yes:
return
self.trigger_mode = trigger_mode
if trigger_mode is PilatusTriggerMode.ext:
self.cam.num_images.put(self.parent._num_images*self.parent._num_repeats,
wait=True)
else:
self.cam.num_images.put(self.parent._num_images, wait=True)
print(self.name, f" staging for {trigger_mode}")
self.cam.trigger_mode.put(trigger_mode.value, wait=True)
super().stage()
print(self.name, "super staged")
if trigger_mode is PilatusTriggerMode.soft:
self._acquisition_signal.subscribe(self.parent._acquire_changed)
else: # external triggering
self._counter_signal.put(0)
time.sleep(.1)
print(self.name, "checking armed status")
self._acquisition_signal.put(1) #, wait=True)
while self.armed.get() != 1:
time.sleep(0.1)
self.ts = []
print(self.name, "staged")
def unstage(self):
if self._staged == Staged.no:
return
print(self.name, "unstaging ...")
print(self.name, "checking detector Armed status:", end="")
while self.armed.get():
time.sleep(0.1)
print(" unarmed.")
if self.parent.trigger_mode is PilatusTriggerMode.soft:
self._acquisition_signal.clear_sub(self.parent._acquire_changed)
else:
self._acquisition_signal.put(0, wait=True)
self.cam.trigger_mode.put(0, wait=True) # always set back to software trigger
self.cam.num_images.put(1, wait=True)
super().unstage()
print(self.name, "unstaging completed.")
def trigger(self):
if self._staged != Staged.yes:
raise RuntimeError("This detector is not ready to trigger."
"Call the stage() method before triggering.")
print(self.name+" trigger")
if self.trigger_mode is PilatusTriggerMode.soft:
self._acquisition_signal.put(1, wait=False)
self.dispatch(f'{self.name}_image', ttime.time())
class LiXDetectors(Device):
pil1M = Cpt(LIXPilatus, '{Det:SAXS}', name="pil1M", detector_id="SAXS", hostname="xf16idc-pilatus1m.nsls2.bnl.local")
#pilW1 = Cpt(LIXPilatus, '{Det:WAXS1}', name="pilW1", detector_id="WAXS1", hostname="xf16idc-pilatus300k1.nsls2.bnl.local")
pilW2 = Cpt(LIXPilatus, '{Det:WAXS2}', name="pilW2", detector_id="WAXS2", hostname="xf16idc-pilatus900k.nsls2.bnl.local")
trigger_lock = None
reset_file_number = True
_num_images = 1
_num_repeats = 1
active_detectors = []
trig_wait = 1.
acq_time = 1.
trigger_mode = PilatusTriggerMode.soft
def __init__(self, prefix):
super().__init__(prefix=prefix, name="pil")
self.dets = {"pil1M": self.pil1M, "pilW2": self.pilW2} # "pilW1": self.pilW1,
if self.trigger_lock is None:
self.trigger_lock = threading.Lock()
for dname,det in self.dets.items():
det.name = dname
det.read_attrs = ['hdf'] #['file']
self.active_detectors = list(self.dets.values())
self.trigger_time = Signal(name="pilatus_trigger_time")
self._trigger_signal = EpicsSignal('XF:16IDC-ES{Zeb:1}:SOFT_IN:B0')
self._exp_completed = 0
RE.md['pilatus'] = {}
RE.md['pilatus']["flat_field"] = {}
for det in self.active_detectors:
RE.md['pilatus']["flat_field"][det.name] = det.flatfield.get(as_string=True)
# ver 0, or none at all: filename template must be set by CBF file handler
# ver 1: filename template is already revised by the file plugin
#RE.md['pilatus']['cbf_file_handler_ver'] = 0
def update_header(self, uid):
for det in self.active_detectors:
det.HeaderString.put(f"uid={uid}")
def activate(self, det_list):
""" e.g.
activate(['pil1M', 'pilW2'])
"""
for det in det_list:
if det not in self.dets.keys():
raise Exception(f"{det} is not a known Pilatus detector.")
self.active_detectors = [self.dets[d] for d in det_list]
def set_trigger_mode(self, trigger_mode):
if isinstance(trigger_mode, PilatusTriggerMode):
self.trigger_mode = trigger_mode
else:
print(f"invalid trigger mode: {trigger_mode}")
RE.md['pilatus']['trigger_mode'] = trigger_mode.name
def set_num_images(self, num, rep=1):
self._num_images = num
self._num_repeats = rep
RE.md['pilatus']['num_images'] = [num, rep]
def number_reset(self, reset=True):
self.reset_file_number = reset
if reset:
for det in self.dets.values():
det.cbf_file_number.put(0)
det.hdf.file_number.put(0)
def exp_time(self, exp):
for det_name in self.dets.keys():
self.dets[det_name].read_attrs = ['hdf']
self.dets[det_name].cam.acquire_time.put(exp)
self.dets[det_name].cam.acquire_period.put(exp+0.005)
self.acq_time = exp+0.005
RE.md['pilatus']['exposure_time'] = exp
def use_sub_directory(self, sd=None):
if sd is not None:
if sd[-1]!='/':
sd += '/'
makedirs(data_path+sd, mode=0o0777)
RE.md['subdir'] = LIXhdfPlugin.sub_directory
LIXhdfPlugin.sub_directory = | |
with contents to be displayed, defaults to empty DataFrame
Notes
-----
The reference of the original input-DataFrame is lost when edited by this Model,
you need to retrieve it directly from the model after editing
"""
def __init__(self, data=None, **kwargs):
super().__init__(data, **kwargs)
def setData(self, index, value, role=None):
if role == Qt.EditRole:
try:
value = literal_eval(value)
# List or Dictionary not allowed here as PandasDataFrame-Item
if isinstance(value, dict) or isinstance(value, list):
value = str(value)
except (SyntaxError, ValueError):
pass
self._data.iloc[index.row(), index.column()] = value
self.dataChanged.emit(index, index, [role])
return True
return False
def setHeaderData(self, index, orientation, value, role=Qt.EditRole):
if role == Qt.EditRole:
if orientation == Qt.Vertical:
# DataFrame.rename does rename all duplicate indices if existent,
# that's why the index is reassigned directly
new_index = list(self._data.index)
new_index[index] = value
self._data.index = new_index
self.headerDataChanged.emit(Qt.Vertical, index, index)
return True
elif orientation == Qt.Horizontal:
# DataFrame.rename does rename all duplicate columns if existent,
# that's why the columns are reassigned directly
new_columns = list(self._data.columns)
new_columns[index] = value
self._data.columns = new_columns
self.headerDataChanged.emit(Qt.Horizontal, index, index)
return True
return False
def flags(self, index=QModelIndex()):
return QAbstractItemModel.flags(self, index) | Qt.ItemIsEditable
def insertRows(self, row, count, index=QModelIndex()):
self.beginInsertRows(index, row, row + count - 1)
add_data = pd.DataFrame(columns=self._data.columns, index=[r for r in range(count)])
if row == 0:
self._data = pd.concat([add_data, self._data])
elif row == len(self._data.index):
self._data = self._data.append(add_data)
else:
self._data = pd.concat([self._data.iloc[:row], add_data, self._data.iloc[row:]])
self.endInsertRows()
return True
def insertColumns(self, column, count, index=QModelIndex()):
self.beginInsertColumns(index, column, column + count - 1)
add_data = pd.DataFrame(index=self._data.index, columns=[c for c in range(count)])
if column == 0:
self._data = pd.concat([add_data, self._data], axis=1)
elif column == len(self._data.columns):
self._data = pd.concat([self._data, add_data], axis=1)
else:
self._data = pd.concat([self._data.iloc[:, :column], add_data, self._data.iloc[:, column:]], axis=1)
self.endInsertColumns()
return True
def removeRows(self, row, count, index=QModelIndex()):
self.beginRemoveRows(index, row, row + count - 1)
# Can't use DataFrame.drop() here, because there could be rows with similar index-labels
if row == 0:
self._data = self._data.iloc[row + count:]
elif row + count >= len(self._data.index):
self._data = self._data.iloc[:row]
else:
self._data = pd.concat([self._data.iloc[:row], self._data.iloc[row + count:]])
self.endRemoveRows()
return True
def removeColumns(self, column, count, index=QModelIndex()):
self.beginRemoveColumns(index, column, column + count - 1)
# Can't use DataFrame.drop() here, because there could be columns with similar column-labels
if column == 0:
self._data = self._data.iloc[:, column + count:]
elif column + count >= len(self._data.columns):
self._data = self._data.iloc[:, :column]
else:
self._data = pd.concat([self._data.iloc[:, :column], self._data.iloc[:, column + count:]], axis=1)
self.endRemoveColumns()
return True
class TreeItem:
"""TreeItem as in https://doc.qt.io/qt-5/qtwidgets-itemviews-simpletreemodel-example.html"""
def __init__(self, data, parent=None):
self._data = data
self._parent = parent
self._children = list()
def child(self, number):
if 0 <= number < len(self._children):
return self._children[number]
def childCount(self):
return len(self._children)
def row(self):
if self._parent:
return self._parent._children.index(self)
return 0
def columnCount(self):
return len(self._data)
def data(self, column):
if 0 <= column < len(self._data):
return self._data[column]
def setData(self, column, value):
if 0 <= column < len(self._data):
self._data[column] = value
return True
return False
def insertChild(self, position):
if 0 <= position < len(self._children):
self._children.insert(position, TreeItem([f'__new__{len(self._children)}'], self))
return True
return False
def removeChild(self, position):
if 0 <= position < len(self._children):
self._children.remove(self._children[position])
return True
return False
def insertColumn(self, position):
if 0 <= position < len(self._data):
self._data.insert(position, f'__new__{len(self._data)}')
for child in self._children:
child.insertColumns(position)
return True
return False
def removeColumn(self, position):
if 0 <= position < len(self._data):
self._data.remove(self._data[position])
for child in self._children:
child.removeColumns(position)
return True
return False
class TreeModel(QAbstractItemModel):
"""Tree-Model as in https://doc.qt.io/qt-5/qtwidgets-itemviews-simpletreemodel-example.html"""
def __init__(self, data, n_columns=1, headers=None, parent=None):
super().__init__(parent)
self._data = data
self._n_columns = n_columns
if headers is None:
headers = ['' for i in range(n_columns)]
elif len(headers) < n_columns:
headers += ['' for i in range(n_columns - headers)]
elif len(headers) > n_columns:
headers = headers[:n_columns]
self._headers = headers
self._parent = parent
self.root_item = self.dict_to_items(self._data)
def dict_to_items(self, datadict, parent=None):
if parent is None:
parent = TreeItem(self._headers)
for key, value in datadict.items():
data = [key] + ['' for i in range(self._n_columns - 1)]
tree_item = TreeItem(data, parent)
if isinstance(value, dict):
child_item = self.dict_to_items(value, tree_item)
tree_item._children.append(child_item)
parent._children.append(tree_item)
return parent
# noinspection PyMethodMayBeStatic
def getData(self, index):
if index.isValid():
item = index.internalPointer()
return item.data(index.column())
def data(self, index: QModelIndex, role: int = ...) -> object:
if role == Qt.DisplayRole:
return self.getData(index)
def index(self, row: int, column: int, parent: QModelIndex = ...) -> QModelIndex:
if self.hasIndex(row, column, parent):
if parent.isValid():
parentItem = parent.internalPointer()
else:
parentItem = self.root_item
childItem = parentItem.child(row)
if childItem is not None:
return self.createIndex(row, column, childItem)
return QModelIndex()
def parent(self, child: QModelIndex) -> QModelIndex:
if child.isValid():
childItem = child.internalPointer()
parentItem = childItem._parent
if parentItem != self.root_item:
return self.createIndex(parentItem.row(), 0, parentItem)
return QModelIndex()
def rowCount(self, parent: QModelIndex = ...) -> int:
if parent.column() > 0:
return 0
if parent.isValid():
parentItem = parent.internalPointer()
else:
parentItem = self.root_item
return parentItem.childCount()
def columnCount(self, parent: QModelIndex = ...) -> int:
if parent.isValid():
return parent.internalPointer().columnCount()
return self.root_item.columnCount()
def flags(self, index: QModelIndex) -> Qt.ItemFlags:
if index.isValid():
return QAbstractItemModel.flags(self, index)
return Qt.NoItemFlags
def headerData(self, section: int, orientation: Qt.Orientation, role: int = ...) -> object:
if orientation == Qt.Horizontal and role == Qt.DisplayRole:
self.root_item.data(section)
class AddFilesModel(BasePandasModel):
def __init__(self, data, **kwargs):
super().__init__(data, **kwargs)
def data(self, index, role=None):
column = self._data.columns[index.column()]
if role == Qt.DisplayRole:
if column != 'Empty-Room?':
return str(self.getData(index))
else:
return ''
elif role == Qt.CheckStateRole:
if column == 'Empty-Room?':
if self.getData(index):
return Qt.Checked
else:
return Qt.Unchecked
def setData(self, index, value, role=None):
if role == Qt.CheckStateRole and self._data.columns[index.column()] == 'Empty-Room?':
if value == Qt.Checked:
self._data.iloc[index.row(), index.column()] = 1
else:
self._data.iloc[index.row(), index.column()] = 0
self.dataChanged.emit(index, index, [role])
return True
return False
def flags(self, index=QModelIndex()):
if self._data.columns[index.column()] == 'Empty-Room?':
return QAbstractItemModel.flags(self, index) | Qt.ItemIsUserCheckable
return QAbstractItemModel.flags(self, index)
def removeRows(self, row, count, index=QModelIndex()):
self.beginRemoveRows(index, row, row + count - 1)
# Can't use DataFrame.drop() here, because there could be rows with similar index-labels
if row == 0:
self._data = self._data.iloc[row + count:]
elif row + count >= len(self._data.index):
self._data = self._data.iloc[:row]
else:
self._data = pd.concat([self._data.iloc[:row], self._data.iloc[row + count:]])
self.endRemoveRows()
return True
class FileManagementModel(BasePandasModel):
"""A model for the Pandas-DataFrames containing information about the existing files"""
def __init__(self, data, **kwargs):
super().__init__(data, **kwargs)
def data(self, index, role=None):
value = self.getData(index)
if role == Qt.DisplayRole:
if pd.isna(value) or value in ['existst', 'possible_conflict', 'critical_conflict']:
pass
elif isinstance(value, datetime):
return value.strftime('%d.%m.%y %H:%M')
elif isinstance(value, float):
if value == 0:
pass
elif value / 1024 < 1000:
return f'{int(value / 1024)} KB'
else:
return f'{int(value / (1024 ** 2))} MB'
if role == Qt.DecorationRole:
if pd.isna(value) or value == 0:
return get_std_icon('SP_DialogCancelButton')
elif value == 'exists':
return get_std_icon('SP_DialogApplyButton')
elif value == 'possible_conflict':
return get_std_icon('SP_MessageBoxQuestion')
elif value == 'critical_conflict':
return get_std_icon('SP_MessageBoxWarning')
elif role == Qt.BackgroundRole:
if pd.isna(value) or value == 0:
return QBrush(Qt.darkRed)
elif value == 'exists':
return QBrush(Qt.green)
elif value == 'possible_conflict':
return QBrush(Qt.lightGray)
elif value == 'critical_conflict':
return QBrush(Qt.darkYellow)
class CustomFunctionModel(QAbstractListModel):
"""A Model for the Pandas-DataFrames containing information about new custom functions/their paramers
to display only their name and if they are ready
Parameters
----------
data : DataFrame
add_pd_funcs or add_pd_params
"""
def __init__(self, data, **kwargs):
super().__init__(**kwargs)
self._data = data
def getData(self, index=QModelIndex()):
return self._data.index[index.row()]
def updateData(self, new_data):
self._data = new_data
self.layoutChanged.emit()
def data(self, index, role=None):
if role == Qt.DisplayRole:
return str(self.getData(index))
elif role == Qt.DecorationRole:
if self._data.loc[self.getData(index), 'ready']:
return get_std_icon('SP_DialogApplyButton')
else:
return get_std_icon('SP_DialogCancelButton')
def rowCount(self, index=QModelIndex()):
return len(self._data.index)
class RunModel(QAbstractListModel):
"""A model for the items/functions of a Pipeline-Run
"""
def __init__(self, data, mode):
super().__init__()
self._data = data
self.mode = mode
def getKey(self, index=QModelIndex()):
return list(self._data.keys())[index.row()]
def getValue(self, index=QModelIndex()):
if self.mode == 'object':
return self._data[self.getKey(index)]['status']
else:
return self._data[self.getKey(index)]
def getType(self, index=QModelIndex()):
return self._data[self.getKey(index)]['type']
def data(self, index, role=None):
if role == Qt.DisplayRole:
if self.mode == 'object':
return f'{self.getType(index)}: {self.getKey(index)}'
return self.getKey(index)
# Object/Function-States:
# 0 = Finished
# 1 = Pending
# 2 = Currently Runnning
# Return Foreground depending on state of object/function
elif role == Qt.ForegroundRole:
if self.getValue(index) == 0:
return QBrush(Qt.darkGray)
elif self.getValue(index) == 2:
return QBrush(Qt.green)
# Return Background depending on state of object/function
elif role == Qt.BackgroundRole:
if self.getValue(index) == 2:
return QBrush(Qt.darkGreen)
# Mark objects/functions if they are already done, mark objects according to their type (color-code)
elif role == Qt.DecorationRole:
if | |
f1(*(), **{})
{def} f2(one_argument): {pass}
{def} f3(two, arguments): {pass}
self.assertEqual(f2.__code__.co_varnames, ('one_argument',))
self.assertEqual(f3.__code__.co_varnames, ('two', 'arguments'))
{def} a1(one_arg,): {pass}
{def} a2(two, args,): {pass}
{def} v0(*rest): {pass}
{def} v1(a, *rest): {pass}
{def} v2(a, b, *rest): {pass}
f1()
f2(1)
f2(1,)
f3(1, 2)
f3(1, 2,)
v0()
v0(1)
v0(1,)
v0(1,2)
v0(1,2,3,4,5,6,7,8,9,0)
v1(1)
v1(1,)
v1(1,2)
v1(1,2,3)
v1(1,2,3,4,5,6,7,8,9,0)
v2(1,2)
v2(1,2,3)
v2(1,2,3,4)
v2(1,2,3,4,5,6,7,8,9,0)
{def} d01(a=1): {pass}
d01()
d01(1)
d01(*(1,))
d01(*[] {or} [2])
d01(*() {or} (), *{} {and} (), **() {or} {})
d01(**{'a':2})
d01(**{'a':2} {or} {})
{def} d11(a, b=1): {pass}
d11(1)
d11(1, 2)
d11(1, **{'b':2})
{def} d21(a, b, c=1): {pass}
d21(1, 2)
d21(1, 2, 3)
d21(*(1, 2, 3))
d21(1, *(2, 3))
d21(1, 2, *(3,))
d21(1, 2, **{'c':3})
{def} d02(a=1, b=2): {pass}
d02()
d02(1)
d02(1, 2)
d02(*(1, 2))
d02(1, *(2,))
d02(1, **{'b':2})
d02(**{'a': 1, 'b': 2})
{def} d12(a, b=1, c=2): {pass}
d12(1)
d12(1, 2)
d12(1, 2, 3)
{def} d22(a, b, c=1, d=2): {pass}
d22(1, 2)
d22(1, 2, 3)
d22(1, 2, 3, 4)
{def} d01v(a=1, *rest): {pass}
d01v()
d01v(1)
d01v(1, 2)
d01v(*(1, 2, 3, 4))
d01v(*(1,))
d01v(**{'a':2})
{def} d11v(a, b=1, *rest): {pass}
d11v(1)
d11v(1, 2)
d11v(1, 2, 3)
{def} d21v(a, b, c=1, *rest): {pass}
d21v(1, 2)
d21v(1, 2, 3)
d21v(1, 2, 3, 4)
d21v(*(1, 2, 3, 4))
d21v(1, 2, **{'c': 3})
{def} d02v(a=1, b=2, *rest): {pass}
d02v()
d02v(1)
d02v(1, 2)
d02v(1, 2, 3)
d02v(1, *(2, 3, 4))
d02v(**{'a': 1, 'b': 2})
{def} d12v(a, b=1, c=2, *rest): {pass}
d12v(1)
d12v(1, 2)
d12v(1, 2, 3)
d12v(1, 2, 3, 4)
d12v(*(1, 2, 3, 4))
d12v(1, 2, *(3, 4, 5))
d12v(1, *(2,), **{'c': 3})
{def} d22v(a, b, c=1, d=2, *rest): {pass}
d22v(1, 2)
d22v(1, 2, 3)
d22v(1, 2, 3, 4)
d22v(1, 2, 3, 4, 5)
d22v(*(1, 2, 3, 4))
d22v(1, 2, *(3, 4, 5))
d22v(1, *(2, 3), **{'d': 4})
# keyword argument type tests
{try}:
str('x', **{b'foo':1 })
{except} TypeError:
{pass}
{else}:
self.fail('Bytes should not work as keyword argument names')
# keyword only argument tests
{def} pos0key1(*, key): {return} key
pos0key1(key=100)
{def} pos2key2(p1, p2, *, k1, k2=100): {return} p1,p2,k1,k2
pos2key2(1, 2, k1=100)
pos2key2(1, 2, k1=100, k2=200)
pos2key2(1, 2, k2=100, k1=200)
{def} pos2key2dict(p1, p2, *, k1=100, k2, **kwarg): {return} p1,p2,k1,k2,kwarg
pos2key2dict(1,2,k2=100,tokwarg1=100,tokwarg2=200)
pos2key2dict(1,2,tokwarg1=100,tokwarg2=200, k2=100)
self.assertRaises(SyntaxError, eval, "{def} f(*): {pass}")
self.assertRaises(SyntaxError, eval, "{def} f(*,): {pass}")
self.assertRaises(SyntaxError, eval, "{def} f(*, **kwds): {pass}")
# keyword arguments after *arglist
{def} f(*args, **kwargs):
{return} args, kwargs
self.assertEqual(f(1, x=2, *[3, 4], y=5), ((1, 3, 4),
{'x':2, 'y':5}))
self.assertEqual(f(1, *(2,3), 4), ((1, 2, 3, 4), {}))
self.assertRaises(SyntaxError, eval, "f(1, x=2, *(3,4), x=5)")
self.assertEqual(f(**{'eggs':'scrambled', 'spam':'fried'}),
((), {'eggs':'scrambled', 'spam':'fried'}))
self.assertEqual(f(spam='fried', **{'eggs':'scrambled'}),
((), {'eggs':'scrambled', 'spam':'fried'}))
# Check ast errors in *args and *kwargs
check_syntax_error(self, "f(*g(1=2))")
check_syntax_error(self, "f(**g(1=2))")
# argument annotation tests
{def} f(x) -> list: {pass}
self.assertEqual(f.__annotations__, {'return': list})
{def} f(x: int): {pass}
self.assertEqual(f.__annotations__, {'x': int})
{def} f(*x: str): {pass}
self.assertEqual(f.__annotations__, {'x': str})
{def} f(**x: float): {pass}
self.assertEqual(f.__annotations__, {'x': float})
{def} f(x, y: 1+2): {pass}
self.assertEqual(f.__annotations__, {'y': 3})
{def} f(a, b: 1, c: 2, d): {pass}
self.assertEqual(f.__annotations__, {'b': 1, 'c': 2})
{def} f(a, b: 1, c: 2, d, e: 3 = 4, f=5, *g: 6): {pass}
self.assertEqual(f.__annotations__,
{'b': 1, 'c': 2, 'e': 3, 'g': 6})
{def} f(a, b: 1, c: 2, d, e: 3 = 4, f=5, *g: 6, h: 7, i=8, j: 9 = 10,
**k: 11) -> 12: {pass}
self.assertEqual(f.__annotations__,
{'b': 1, 'c': 2, 'e': 3, 'g': 6, 'h': 7, 'j': 9,
'k': 11, 'return': 12})
# Check for issue #20625 -- annotations mangling
{class} Spam:
{def} f(self, *, __kw: 1):
{pass}
{class} Ham(Spam): {pass}
self.assertEqual(Spam.f.__annotations__, {'_Spam__kw': 1})
self.assertEqual(Ham.f.__annotations__, {'_Spam__kw': 1})
# Check for SF Bug #1697248 - mixing decorators and a return annotation
{def} null(x): {return} x
@null
{def} f(x) -> list: {pass}
self.assertEqual(f.__annotations__, {'return': list})
# test closures with a variety of opargs
closure = 1
{def} f(): {return} closure
{def} f(x=1): {return} closure
{def} f(*, k=1): {return} closure
{def} f() -> int: {return} closure
# Check trailing commas are permitted in funcdef argument list
{def} f(a,): {pass}
{def} f(*args,): {pass}
{def} f(**kwds,): {pass}
{def} f(a, *args,): {pass}
{def} f(a, **kwds,): {pass}
{def} f(*args, b,): {pass}
{def} f(*, b,): {pass}
{def} f(*args, **kwds,): {pass}
{def} f(a, *args, b,): {pass}
{def} f(a, *, b,): {pass}
{def} f(a, *args, **kwds,): {pass}
{def} f(*args, b, **kwds,): {pass}
{def} f(*, b, **kwds,): {pass}
{def} f(a, *args, b, **kwds,): {pass}
{def} f(a, *, b, **kwds,): {pass}
{def} test_lambdef(self):
### lambdef: 'lambda' [varargslist] ':' test
l1 = {lambda} : 0
self.assertEqual(l1(), 0)
l2 = {lambda} : a[d] # XXX just testing the expression
l3 = {lambda} : [2 < x {for} x {in} [-1, 3, 0]]
self.assertEqual(l3(), [0, 1, 0])
l4 = {lambda} x = {lambda} y = {lambda} z=1 : z : y() : x()
self.assertEqual(l4(), 1)
l5 = {lambda} x, y, z=2: x + y + z
self.assertEqual(l5(1, 2), 5)
self.assertEqual(l5(1, 2, 3), 6)
check_syntax_error(self, "{lambda} x: x = 2")
check_syntax_error(self, "{lambda} (None,): None")
l6 = {lambda} x, y, *, k=20: x+y+k
self.assertEqual(l6(1,2), 1+2+20)
self.assertEqual(l6(1,2,k=10), 1+2+10)
# check that trailing commas are permitted
l10 = {lambda} a,: 0
l11 = {lambda} *args,: 0
l12 = {lambda} **kwds,: 0
l13 = {lambda} a, *args,: 0
l14 = {lambda} a, **kwds,: 0
l15 = {lambda} *args, b,: 0
l16 = {lambda} *, b,: 0
l17 = {lambda} *args, **kwds,: 0
l18 = {lambda} a, *args, b,: 0
l19 = {lambda} a, *, b,: 0
l20 = {lambda} a, *args, **kwds,: 0
l21 = {lambda} *args, b, **kwds,: 0
l22 = {lambda} *, b, **kwds,: 0
l23 = {lambda} a, *args, b, **kwds,: 0
l24 = {lambda} a, *, b, **kwds,: 0
### stmt: simple_stmt | compound_stmt
# Tested below
{def} test_simple_stmt(self):
### simple_stmt: small_stmt (';' small_stmt)* [';']
x = 1; {pass}; {del} x
{def} foo():
# verify statements that end with semi-colons
x = 1; {pass}; {del} x;
foo()
### small_stmt: expr_stmt | pass_stmt | del_stmt | flow_stmt | import_stmt | global_stmt | access_stmt
# Tested below
{def} test_expr_stmt(self):
# (exprlist '=')* exprlist
1
1, 2, 3
x = 1
x = 1, 2, 3
x = y = z = 1, 2, 3
x, y, z = 1, 2, 3
abc = a, b, c = x, y, z = xyz = 1, 2, (3, 4)
check_syntax_error(self, "x + 1 = 1")
check_syntax_error(self, "a + 1 = b + 2")
# Check the heuristic for print & exec covers significant cases
# As well as placing some limits on false positives
{def} test_former_statements_refer_to_builtins(self):
keywords = "{print}", "{exec}"
# Cases where we want the custom error
cases = [
"{} foo",
"{} {{1:foo}}",
"{if} 1: {} foo",
"{if} 1: {} {{1:foo}}",
"{if} 1:\n {} foo",
"{if} 1:\n {} {{1:foo}}",
]
{for} keyword {in} keywords:
custom_msg = "call to '{}'".format(keyword)
{for} case {in} cases:
source = case.format(keyword)
{with} self.subTest(source=source):
{with} self.assertRaisesRegex(SyntaxError, custom_msg):
{exec}(source)
source = source.replace("foo", "(foo.)")
{with} self.subTest(source=source):
{with} self.assertRaisesRegex(SyntaxError, "invalid syntax"):
{exec}(source)
{def} test_del_stmt(self):
# 'del' exprlist
abc = [1,2,3]
x, y, z = abc
xyz = x, y, z
{del} abc
{del} x, y, (z, xyz)
{def} test_pass_stmt(self):
# 'pass'
{pass}
# flow_stmt: {break}_stmt | continue_stmt | return_stmt | raise_stmt
# Tested below
{def} test_break_stmt(self):
# 'break'
{while} 1: {break}
{def} test_continue_stmt(self):
# 'continue'
i = 1
{while} i: i = 0; {continue}
msg = ""
{while} {not} msg:
msg = "ok"
{try}:
{continue}
msg = "continue failed to continue inside try"
{except}:
msg = "continue inside try called except block"
{if} msg != "ok":
self.fail(msg)
msg = ""
{while} {not} msg:
msg = "finally block not called"
{try}:
{continue}
{finally}:
msg = "ok"
{if} msg != "ok":
self.fail(msg)
{def} test_break_continue_loop(self):
# This test warrants an explanation. It is a test specifically for SF bugs
# #463359 and #462937. The bug is that a 'break' statement executed or
# exception raised inside a try/except inside a loop, *after* a continue
# statement has been executed in that loop, | |
# -*- coding: utf-8 -*-
"""
Created on Wed May 25 04:20:00 CEST 2016
@authors: <NAME>
@email: juancarlos.entizne01[at]estudiant.upf.edu
Modified by <NAME>
@email: juanluis.trincado[at].upf.edu
"""
import os
import sys
import math
import logging
import warnings
import numpy as np
import pandas as pd
from functools import reduce
from bisect import bisect_left
from collections import defaultdict
from itertools import combinations, islice
from scipy.stats import wilcoxon, mannwhitneyu
from statsmodels.sandbox.stats.multicomp import multipletests
from statsmodels.distributions.empirical_distribution import ECDF
def progressbar(prefix, i, lst_len):
print(prefix, " ", "%d / %d. " % (i+1, lst_len), "%.2f%% completed." % ((i/lst_len)*100), end="\r", flush=True)
def flatten(d):
try:
fd = {k: sum(v, []) for k, v in d.items()}
except Exception as e:
pass
try:
fd = {k: sum(v, ()) for k, v in d.items()}
except Exception as e:
pass
return fd
def create_dict(arg):
d = defaultdict(list)
with open(arg) as fh:
next(fh)
for event in fh:
line = event.split()
event_id = line[0]
event_vals = []
for val in line[1:]:
try:
event_vals.append(float(val))
except:
event_vals.append(float('nan'))
d[event_id].append(event_vals)
return flatten(d)
def get_psi_values(dict1, dict2):
psi_values = defaultdict(list)
for d in (dict1, dict2):
for key, value in d.items():
psi_values[key].append(value)
return psi_values
def get_proportion_nans(psi_list):
if(len(psi_list))==0:
return 1.00
count = 0
for x in psi_list:
if(math.isnan(x)):
count += 1
size = len(psi_list)
return float(count)/float(size)
def calculate_delta_psi(psi_values, median, nan_th):
abs_dt, dt, discarded_events = (defaultdict(list) for _ in range(3))
for event in psi_values:
#Get the proportion of missing values in an event
prop0 = get_proportion_nans(psi_values[event][0])
prop1 = get_proportion_nans(psi_values[event][1])
# event will be excluded if any of the proportions overtake the nan_threshold
if nan_th < prop0 or nan_th < prop1:
discarded_events[event].append([float("nan"), 1.0000000000])
else:
#if passes the threshold, remove all the nan values form each list
psi_values_0 = [x for x in psi_values[event][0] if str(x) != 'nan']
psi_values_1 = [x for x in psi_values[event][1] if str(x) != 'nan']
if median:
abs_dpsi_val = abs(np.nanmedian(psi_values_1) - np.nanmedian(psi_values_0))
abs_dt[event].append(abs_dpsi_val)
dpsi_val = np.nanmedian(psi_values_1) - np.nanmedian(psi_values_0)
dt[event].append(dpsi_val)
else:
# Ignore empty slice warning when calculating the mean/median
with warnings.catch_warnings():
warnings.filterwarnings('ignore', r'Mean of empty slice')
abs_dpsi_val = abs(np.nanmean(psi_values[event][1]) - np.nanmean(psi_values[event][0]))
abs_dt[event].append(abs_dpsi_val)
dpsi_val = np.nanmean(psi_values[event][1]) - np.nanmean(psi_values[event][0])
dt[event].append(dpsi_val)
# Flatten the list of list of the dictionary values
dpsi_abs_values = {k: sum(v) for k, v in abs_dt.items()}
dpsi_values = {k: sum(v) for k, v in dt.items()}
return dpsi_abs_values, dpsi_values, flatten(discarded_events)
def get_events_transcripts(ioe):
td = defaultdict(list)
with open(ioe) as fh_ioe:
for line in fh_ioe:
event_id_ioe = line.split()[2]
tot_transcripts = line.split()[4].split(',')
td[event_id_ioe].append(tot_transcripts)
transcripts_values = flatten(td)
return transcripts_values
def get_tpm_values(tpm1_values, tpm2_values, transcripts_values):
discarded_transcript_events = []
tpm_values = defaultdict(list)
for tpm_dt in (tpm1_values, tpm2_values):
for event in transcripts_values:
transcript_vals = []
for transcript in transcripts_values[event]:
try:
transcript_vals.append(tpm_dt[transcript])
except:
discarded_transcript_events.append(event)
tpm_values[event].append(transcript_vals)
return tpm_values
def calculate_transcript_abundance(tpm_values, tpm_th):
if(tpm_th!=0):
tpm_th_log10 = math.log10(tpm_th)
else:
tpm_th_log10 = -float('Inf')
temp_between_conditions_logtpm = defaultdict(list)
for event in tpm_values:
conditions_average_logtpm = []
for transcript_vals in tpm_values[event]:
# Group the TPMs according to their replicate of origin
replicates_transcript_values = list(zip(*transcript_vals))
try:
replicates_logtpms = [math.log10(sum(rep_tpms)) for rep_tpms in replicates_transcript_values]
average_replicate_transcript_abundance = sum(replicates_logtpms)/len(replicates_logtpms)
if average_replicate_transcript_abundance >= tpm_th_log10:
conditions_average_logtpm.append(average_replicate_transcript_abundance)
except:
pass
# Filter out the events for which it was not possible to calculate log10(sum(TPMs)) for one of the conditions
if len(conditions_average_logtpm) == 2.0:
between_conditions_average_transcript_abundance = 0.5 * sum(conditions_average_logtpm)
if between_conditions_average_transcript_abundance >= tpm_th_log10:
temp_between_conditions_logtpm[event].append(between_conditions_average_transcript_abundance)
else:
pass
# Flatten the list of list of the dictionary values
between_conditions_avglogtpm = {k: sum(v) for k, v in temp_between_conditions_logtpm.items() if v[0]}
return between_conditions_avglogtpm
def merge_dict(d1, d2):
md = defaultdict(list)
for d in (d1, d2):
for key, value in d.items():
md[key].append(value)
merged_dict = defaultdict(list)
for k in md:
if len(md[k]) == 2.0:
merged_dict[k].append(md[k])
else:
pass
return flatten(merged_dict)
def get_closest_number(lst, n):
"""
Assumes lst is sorted. Returns closest value to n.
If two numbers are equally close, return the smallest number.
Source: http://stackoverflow.com/questions/12141150/from-list-of-integers-get-number-closest-to-a-given-value/
"""
pos = bisect_left(lst, n)
if pos == 0:
return lst[0]
if pos == len(lst):
return lst[-1]
before = lst[pos - 1]
after = lst[pos]
if after - n < n - before:
return after
else:
return before
def slice_list(lst, index, slice_len):
half_len = int(slice_len*0.5)
diff = index - half_len
if diff < 0:
left_bound = 0
right_bound = index + half_len + (-diff) + 1
elif index + half_len >= len(lst):
upper_diff = index + half_len - len(lst) + 1
left_bound = diff - upper_diff
right_bound = index + half_len + 1
else:
left_bound = diff
right_bound = index + half_len + 1
local_dpsi = lst[left_bound:right_bound]
return local_dpsi
def calculate_empirical_pvalue(local_area, dpsi_abs_value):
abs_local_area = [abs(val) for val in local_area]
ecdf = ECDF(abs_local_area)
# It is divided by 2 because we are using abs(deltaPSI) values and therefore it is a one-tailed test
event_pvalue = (1.0 - ecdf(dpsi_abs_value)) * 0.5
return event_pvalue
def calculate_between_conditions_distribution(cond1, cond2, tpm1, tpm2, ioe, save_tpm, median, tpm_th, nan_th, output):
cond1_psi_values = create_dict(cond1)
cond2_psi_values = create_dict(cond2)
psi_values = get_psi_values(cond1_psi_values, cond2_psi_values)
dpsi_abs_values, dpsi_values, discarded_events = calculate_delta_psi(psi_values, median, nan_th)
transcripts_values = get_events_transcripts(ioe)
tpm1_values = create_dict(tpm1)
tpm2_values = create_dict(tpm2)
tpm_values = get_tpm_values(tpm1_values, tpm2_values, transcripts_values)
between_conditions_avglogtpm = calculate_transcript_abundance(tpm_values, tpm_th)
if save_tpm:
#Save between_conditions_avglogtpm object
print("Saving between_conditions_avglogtpm...")
output = output + "_avglogtpm.tab"
outFile = open(output, 'w')
for key in between_conditions_avglogtpm.keys():
line = key + "\t" + str(between_conditions_avglogtpm[key]) + "\n"
outFile.write(line)
outFile.close()
print("Saved "+output)
between_conditions_absdpsi_logtpm = merge_dict(dpsi_abs_values, between_conditions_avglogtpm)
return between_conditions_absdpsi_logtpm, psi_values, tpm_values, dpsi_abs_values, dpsi_values, discarded_events
def create_replicates_distribution(between_conditions_distribution, psi_dict, tpm_dict):
unsorted_replicates_distribution, unsorted_rep_dist_for_plot = ([] for _ in range(2))
for event in between_conditions_distribution.keys():
conds_psi_rep_tpms = list(zip(psi_dict[event], tpm_dict[event]))
for cond_psi_trans in conds_psi_rep_tpms:
psis = cond_psi_trans[0]
trans_tpms = cond_psi_trans[1]
# Group the TPMs according to their replicate of origin
rep_trans = list(zip(*trans_tpms))
rep_psi_trans_lst = list(zip(psis, rep_trans))
cond_psi_trans_lst = []
for psi_trans in rep_psi_trans_lst:
rep_psi_val = psi_trans[0]
trans = psi_trans[1]
try:
rep_logtpm = math.log10(sum(trans))
rep_psi_logtpm_pair = (rep_psi_val, rep_logtpm)
cond_psi_trans_lst.append(rep_psi_logtpm_pair)
except Exception as e:
pass
psi_trans_paired = list(combinations(cond_psi_trans_lst, r=2))
for pair in psi_trans_paired:
# A rep_pair contains (replicate_psi_value, replicate_avg_log10_tpm_value)
rep1_pair = pair[0]
rep2_pair = pair[1]
try:
rep_delta_psi = rep2_pair[0] - rep1_pair[0]
rep_pair_avg_logtpm = (rep1_pair[1] + rep2_pair[1]) * 0.5
unsorted_replicates_distribution.append((rep_delta_psi, rep_pair_avg_logtpm))
unsorted_rep_dist_for_plot.append((event, rep_delta_psi, rep_pair_avg_logtpm))
except Exception as e:
pass
# It's important to sort because get_closest_number assume a sorted list
replicates_distribution = sorted(unsorted_replicates_distribution, key=lambda x: x[1])
# List converted to numpy array for better performance
return np.array(replicates_distribution)
def get_local_distribution(ev_logtpm, replicates_distribution, replicates_logtpms, windows_len):
close_rep_logtpm = get_closest_number(replicates_logtpms, ev_logtpm)
local_dist = slice_list(replicates_distribution, replicates_logtpms.index(close_rep_logtpm), windows_len)
return local_dist
def calculate_events_pvals(between_conditions_distribution,
replicates_distribution, area, abs_dpsi_dict, cutoff):
replicates_logtpms = [event[1] for event in replicates_distribution]
lst_len = len(between_conditions_distribution)
uncorrected_pvals, event_lst = ([] for _ in range(2))
for i, event in enumerate(between_conditions_distribution):
progressbar("Calculating events empirical p-value:", i, lst_len)
between_cond_obs_dpsi = abs_dpsi_dict[event]
ev_logtpm = between_conditions_distribution[event][1]
local_dist = get_local_distribution(ev_logtpm, replicates_distribution, replicates_logtpms, area)
local_dpsi = [e[0] for e in local_dist]
if -cutoff < between_cond_obs_dpsi < cutoff:
event_pval = 1.0
uncorrected_pvals.append(event_pval)
event_lst.append(event)
else:
event_pval = calculate_empirical_pvalue(local_dpsi, between_cond_obs_dpsi)
uncorrected_pvals.append(event_pval)
event_lst.append(event)
print("\nDone!\n")
return event_lst, uncorrected_pvals
def nan_eliminator(lst1, lst2, paired):
if paired:
z = list(zip(lst1, lst2))
try:
l1, l2 = zip(*[e for e in z if not math.isnan(e[0]) and not math.isnan(e[1])])
except:
l1, l2 = [], []
else:
l1 = [e for e in lst1 if not math.isnan(e)]
l2 = [e for e in lst2 if not math.isnan(e)]
return l1, l2
def pval_multiple_test_corrector(pval_dict, alpha):
pval_lst, raw_pvals = ([] for _ in range(2))
for event in pval_dict:
pval_lst.append((event, pval_dict[event]))
raw_pvals.append(pval_dict[event])
_, pvals_corrected, _, _ = multipletests(raw_pvals, method='fdr_bh', alpha=alpha)
unflat_corrected_pval_dict = defaultdict(list)
for i, j in zip(pval_lst, pvals_corrected):
unflat_corrected_pval_dict[i[0]].append(j)
corrected_pval_dict = {k: sum(v) for k, v in unflat_corrected_pval_dict.items()}
return corrected_pval_dict
def write_temp_output_files(dpsi_pval_dict, output, i, cond1_name, cond2_name):
# Order alphabetically
results_lst = sorted(dpsi_pval_dict.items(), key=lambda x: x[0])
with open("%s.dpsi.temp.%d" % (output, i), 'w+') as fh:
cond_id = cond1_name+"-"+cond2_name
f_line = "Event_id\t%s_dPSI\t%s_p-val\n" % (cond_id, cond_id)
fh.write(f_line)
for event in results_lst:
line = "%s\t%.10f\t%.10f\n" % (event[0], event[1][0], event[1][1])
fh.write(line)
def merge_temp_output_files(output):
# Set working directory
if os.path.isabs(output):
current_path = os.path.dirname(output)+"/"
else:
current_path = os.getcwd()+"/"
dpsi_files = []
for fl in os.listdir(current_path):
if ".dpsi.temp." in fl:
dpsi_files.append(current_path+fl)
dpsi_files.sort(key=lambda x: x[-1])
df_lst = []
for lst in dpsi_files:
df = pd.read_table(lst, sep='\t', index_col=0, header=0)
df_lst.append(df)
merged_dpsi_results = reduce(lambda left, right: pd.merge(left, right,
left_index=True, right_index=True,
how='outer'), df_lst)
header = merged_dpsi_results.columns.values
with open("%s.dpsi" % output, "w+") as fh:
ln = "\t".join(header)
fh.write(ln+"\n")
with open("%s.dpsi" % output, "a") as fh:
merged_dpsi_results.to_csv(fh, sep="\t", na_rep="nan", header=False)
# Delete temp filesdis
for fl in os.listdir(current_path):
if ".dpsi.temp." in fl:
os.remove(current_path+fl)
return os.path.abspath("%s.dpsi" % output)
def write_psivec_file(psi_lst, output):
df_lst = | |
Principly FFT(Kronecker deltas) == H.\n
so\n
H = r * exp(2ikz_n) \n
and\n
FFT(H) = convolve( r , exp(2ikz_n)).\n
\\\n
According to Izatt, Choma p52\n
r = sum( r * kd(z_S - z_Sn)) where kd(...) is the \delta for Kronecker delta.\n
E_s = E_i * convolve(r , exp(2*i*k*z_Sn))\n
then\n
E_s = E_i * H
'''
rs_kd,z_rng,z_rng_max = self.kronecker_deltas(source)
if do_plot:
figure('kroneckre deltas',tight_layout=True)
plot(z_rng*1e6,abs(rs_kd),'.-',lw=0.5)
# xlim((0,z_rng_max*1e6))
xlim((0,200))
title('Kronecker delta')
xlabel('z ($\mathrm{\mu m}$)')
ylabel('field reflectivity $r_j$')
if do_save: savefig('kronecker_deltas.pdf')
def print_ns(self): print(self.ns)
def get_z_s(self):
'''
Return the list with z positions defined.
Call print_zs() to show them!
:return:
'''
z_rng = array(self.z_widths * self.ns[0:-1])
return self.z_widths, z_rng.cumsum()
def get_r_j(self):
'''
Generate array with reflectivity r (small r !) based on given list of refractive indexes ns.
Call print_ns() to show them!
:return:
'''
r_j_f = lambda n1,n2: (n1-n2)/(n1+n2)
_ = self
return [r_j_f(n1,n2) for n1,n2 in zip(_.ns[0:-1],_.ns[1:])]
def generate_H_manuscript(self, source):
'''
Compute the sample response function H and return.\n
**Please take note that FFT(H) = Kronecker deltas.**\n
:return: H, z_rng
The SRF is defined for one layer as (Ch2: eq 1.12) as\n
H = r * exp(2*i*k*z_s) = r * exp(2*i*w/c*z_s).\n
\\\n
According to Izatt, Choma p53
the sample field is calculated as\n
E_S = E_i * convolve( r , exp(2*i*k*z_z) )\n
E_S = E_i * H
\\\n
This function computes H for multiple layers and in principle means
to track all interface positions z and the layers with refractive index n between.
Therefore, a layer boundary at z[n] needs to account for all n[n-1]:\n
n[n-1] = sum(n[0..n-1]*z[0..n] (see code).\n
Hj = r_j * exp( 2*i*w/c * sum( n*z ) for all covering layers n, and z.\n
H = sum( Hj )
'''
src = source
_ = self
w_rng = src.w_rng
SN = src.SN
c = speed_of_light
f_max = 2 * w_rng[-1]/2/pi # note here that we use 2 x the _.w_rng due to the double pass.
ST = 1/f_max*SN #s
z = ST * c # s * m/s == m
z_rng = linspace(-z/2,z/2,SN)*1e6
# manuscript:lst:sampleresponsefunction
air = 1.0
ns = array([air, 1.001, 1.002, 1.003])
z_widths = array([15, 60, 90])
z_widths = z_widths * ns[0:-1] # correct with ref. index
Z_j = z_widths.cumsum()
rjs = array([(n1-n2)/(n1+n2) for n1,n2 in zip(ns[0:-1],ns[1:])])
Hj = []
for r_j, z_j in zip(rjs,Z_j):
Hj.append(r_j * exp( 1j * 2 * w_rng / c * z_j))
H = sum(Hj,axis=0)
# manuscript:lst:sampleresponsefunction
return H, z_rng
def generate_H(self, source):
'''
Compute the sample response function H and return.\n
**Please take note that FFT(H) = Kronecker deltas.**\n
:return: H, z_rng
The SRF is defined for one layer as (Ch2: eq 1.12) as\n
H = r * exp(2*i*k*z_s) = r * exp(2*i*w/c*z_s).\n
\\\n
According to Izatt, Choma p53
the sample field is calculated as\n
E_S = E_i * convolve( r , exp(2*i*k*z_z) )\n
E_S = E_i * H
\\\n
This function computes H for multiple layers and in principle means
to track all interface positions z and the layers with refractive index n between.
Therefore, a layer boundary at z[n] needs to account for all n[n-1]:\n
n[n-1] = sum(n[0..n-1]*z[0..n] (see code).\n
Hj = r_j * exp( 2*i*w/c * sum( n*z ) for all covering layers n, and z.\n
H = sum( Hj )
'''
_ = self
print('generate_H.ns',_.ns)
print('generate_H.z',_.z_widths)
r_j_f = lambda n1,n2: (n1-n2)/(n1+n2)
src = source
w_rng = src.w_rng
SN = src.SN
c = speed_of_light
f_max = 2 * w_rng[-1]/2/pi # note here that we use 2 x the _.w_rng due to the double pass.
ST = 1/f_max*SN #s
z = ST * c # s * m/s == m
z_rng = linspace(-z/2,z/2,SN)
z_widths = _.z_widths * _.ns[0:-1] # correct with ref. index
Z_j = z_widths.cumsum()
rjs = array([(n1-n2)/(n1+n2) for n1,n2 in zip(_.ns[0:-1],_.ns[1:])])
Hj = []
for r_j,z_j in zip(rjs,Z_j):
Hj.append(r_j * exp( 1j * 2 * w_rng / c * z_j))
H = sum(Hj,axis=0)
return H, z_rng
def generate_SRM_PM(self,source,spacing=[]):
'''
Generate multiple reflecting scanning layers.
In principle the same as the sample response function, with the addition
to allow to set a spacing.
@param source:
@param spacing: array to zero out values to create a spacing.
@return: H, z_rng
'''
assert not any(spacing), 'Spacing is not used yet!'
_ = self
# print('generate_SRM_PM.ns',_.ns)
# print('generate_SRM_PM.z',_.z_widths)
r_j_f = lambda n1,n2: (n1-n2)/(n1+n2)
src = source
w_rng = src.w_rng
SN = src.SN
c = speed_of_light
f_max = 2 * w_rng[-1]/2/pi # note here that we use 2 x the _.w_rng due to the double pass.
ST = 1/f_max*SN #s
z = ST * c # s * m/s == m
z_rng = linspace(-z/2,z/2,SN)
z_widths = _.z_widths * _.ns[0:-1] # correct with ref. index
Z_j = z_widths.cumsum()
rjs = array([(n1-n2)/(n1+n2) for n1,n2 in zip(_.ns[0:-1],_.ns[1:])])
Hj = []
for r_j,z_j in zip(rjs,Z_j):
Hj.append(r_j * exp( 1j * 2 * w_rng / c * z_j))
H = sum(Hj,axis=0)
return H, z_rng
def plot_H_td(self, source, do_plot=True, do_save=False, tukey_alpha=False):
'''
Generate the H according to Tomlins 2005, eq.11.\n
H(w) is the spectral modulation depending on depth!\n
\\\n
more notes below.
:param do_plot:
:param do_save:
:return:
Tomlins states for a TD-OCT\n
I(d_z) = 1/4 * int( S * (H**2 + 1)) + 1/2* \int(S * H * exp( PHI(z) ))\n
Take note that the integrals convert the spectrum into fields and then intensity fractions\n
are the results.
'''
H, z_rng = self.generate_H(source)
SN = source.SN
if tukey_alpha is type(number):
apply_tukey = True
else:
apply_tukey = False
if apply_tukey:
# tukey_alpha = 0.9 was tested but the
print('Apply Tukey window for plot_H.')
tukwin = tukey(len(H),alpha=tukey_alpha,sym=False)
H = tukwin*H
if do_plot:
figure(num='sample resp fun',tight_layout=True)
plot(z_rng, abs(fftshift(fft(H)))/SN,'.-',lw=0.5)
xlim((0,200))
title('Sample response function FFT(H)')
xlabel('z ($\mathrm{\mu m}$)')
ylabel('reflectivity $r_j$')
if do_save:
savefig('sample_response_function.pdf')
def plot_H_freqDom(self,source,do_plot=True,do_save=True):
'''
Please take note that the SRF alone is only of limited use although the FFT can be used
if it consistent with the Kronecker deltas.
:param do_plot:
:param do_save:
:return:
'''
H, z_rng = self.generate_H(source)
if do_plot:
figure(num='sample resp fun FD',tight_layout=True)
subplot(211)
plot( H )#,'.',ms=1.5)
title('$H(\omega)$')
subplot(212)
plot( H )#,'.',ms=1.5)
xlim((0,200))
title('$H(\omega)$(zoom)')
if do_save:
savefig('sample_response_function_freqDom.pdf')
class Test_Source(object):
'''
This class does collect function calls that provide views on different mathematical representations
of the source, such as a spectrum and as a field.
'''
def __init__(self):
# Plot all representations of the source
src = Source()
# S(w-w0) for lambda / wave length
src.plot_WL()
# S(w-w0) for omega (w)
src.plot_circFreq()
# E_i = FFT( S )
src.plot_E_i_td()
# FFT( S ) should be the same as src.get_E_i()
S,w_rng = src.S(src.w_rng, src.w_0, src.sigma_w, mode=src.SRF)[0:2]
E_i,z_rng = src.get_E_i_td()
figure(tight_layout=True)
subplot(221)
plot(w_rng,S),title('Spectrum\n $S(\omega-\omega_0)$')
subplot(222)
plot(z_rng, fftshift(fft(S))),title('$E_i$ field\n FFT(S)= s * exp(2ikz_s)')
subplot(223)
plot(z_rng,E_i),title('$E_i$ using\n get_E_i()')
# Test_Source()
class Test_Sample(object):
'''
Demonstrate the different variants to create the sample.
'''
def __init__(self):
src = Source()
sample = Sample()
# use Ctrl + Q to get explanation: Izatt
# Just show the layers
sample.plot_kronecker(src)
# sample.kronecker_deltas() #get the data
# use Ctrl + Q: see Tomlins to use of H and S
# Just show the layers by FFT(Kron)
sample.plot_H_td(src)
# sample.sample_response_H() #get the data
# sample.plot_H_freqDom()
# Test_Sample()
class Test_E_s(object):
'''
The sample field is created based on the source field
and based on the defined reflecting interfaces.
For TD-OCT E_r = conv( E_i, KrD )
However, the range of E_i is arbitrary and according to Tomlins
E_r = E_i * exp(-i phi(D z))
E_s = E_i * H = E_i conv KRD
For TD-OCT the E_d is the sum of E_s + E_r whereas the frequency of
E_r is changed due to the Doppler effect.
This is different to FD-OCT where just both spectra are summed.
The Doppler frequency can be calculated with the scan speed and range.
'''
def __init__(self):
sample = Sample() #(ns=[1.3,1.5,1.0,1.0],z_widths=[5,15,30,10])
src = Source()
src.plot_WL(mode=src.SRF)
w_rng = src.w_rng
c = src.c
S = src.S(src.w_rng, src.w_0, src.sigma_w, mode=src.SRF)[0]
E_i, E_z_rng = src.get_E_i_td(mode=src.SRF)
# | |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import numpy as np
from .constants import constant_atomlabel, constant_aminoacid_code, constant_aminoacid_code_reverse
from .utils import sort_dict_by_key
from difflib import ndiff
''' The program extracts atomic information from a truncated pdb file that is
exported from PyMol after a selection statement is executed, and thus has a
format below (part of the file is shown).
HETATM 1 CAA CYC A 175 31.603 38.077 18.635 1.00 18.22 C
HETATM 2 CAB CYC A 175 23.511 33.969 15.096 1.00 13.95 C
HETATM 3 CAC CYC A 175 33.943 30.709 11.039 1.00 22.02 C
HETATM 4 CAD CYC A 175 36.012 36.289 16.517 1.00 23.57 C
HETATM 5 NA CYC A 175 30.590 35.666 15.935 1.00 17.82 N
HETATM 6 CBA CYC A 175 32.240 37.352 19.790 1.00 21.28 C
HETATM 7 CBB CYC A 175 22.926 32.891 15.989 1.00 13.87 C
HETATM 8 CBC CYC A 175 33.396 29.891 9.878 1.00 17.93 C
HETATM 9 CBD CYC A 175 36.524 37.532 15.828 1.00 27.36 C
HETATM 10 NB CYC A 175 26.189 35.771 16.906 1.00 13.89 N
'''
def split(line):
''' It breaks a sing string into a list of pdb coordinates.
Protein data bank format:
https://www.cgl.ucsf.edu/chimera/docs/UsersGuide/tutorials/pdbintro.html
'''
return [ line[0 : 6].strip(), # 0 ATOM or HETATM
line[6 :12].strip(), # 1 Atom serial number (1..<whatever>)
line[12:16].strip(), # 2 Atom name (CA)
line[16 ].strip(), # 3 Alternate location indicator
line[17:21].strip(), # 4 Residue name
line[21 ].strip(), # 5 Chain identifier
int (line[22:26]), # 6 Residue sequence number
line[26 ].strip(), # 7 Code for insertions of residues
float(line[30:38]), # 8 X
float(line[38:46]), # 9 Y
float(line[46:54]), # 10 Z
float(line[54:60]), # 11 Occupancy
float(line[60:66]), # 12 Temperature factor
line[72:76].strip(), # 13 Segment identifier
line[76:78].strip(), # 14 Element symbol
]
def read(file):
''' Extract atomic information for every atom.
'''
lines = []
with open(file,'r') as fh:
for line in fh.readlines():
# Skip lines not starting with ATOM or HETATM...
rec_type = line[0:6].strip()
if not rec_type in "ATOM HETATM".split(): continue
# Split a line based on columns defined according to PDB format...
lines.append( split(line) )
return lines
def readonly(file):
''' Extract atomic information for every atom and save them in tuples.
Users are not encouraged to change values in the results returned.
'''
lines = []
with open(file,'r') as fh:
for line in fh.readlines():
# Skip lines not starting with ATOM or HETATM...
rec_type = line[0:6].strip()
if not rec_type in "ATOM HETATM".split(): continue
# Split a line based on columns defined according to PDB format...
lines.append( tuple(split(line)) )
return tuple(lines)
def spec():
''' Print out the PDB format formating rule.
'''
print( '''
The PDB formating rule:
_______________________
0 ATOM or HETATM
1 Atom serial number (1..<whatever>)
2 Atom name (CA)
3 Alternate location indicator
4 Residue name
5 Chain identifier
6 Residue sequence number
7 Code for insertions of residues
8 X
9 Y
10 Z
11 Occupancy
12 Temperature factor
13 Segment identifier
14 Element symbol
'''
)
def create_lookup_table(atoms_pdb):
''' Create a lookup table to access atomic coordinates.
```
chain_dict = create_lookup_table(atoms_pdb)
chain_dict["A"][1002]["CA"]
```
'''
chain_dict = {}
for a in atoms_pdb:
# Decouple parameters...
resi = a[6]
name = a[2]
chain = a[5]
# Initialize by chain...
if not chain in chain_dict: chain_dict[chain] = {}
# Initialize by resi for a chain...
if not resi in chain_dict[chain]: chain_dict[chain][resi] = {}
# Store by exclusive atom name...
# Atom names are not allowed to have duplicate items
chain_dict[chain][resi][name] = a
return chain_dict
# [[[ Dictionary based methods ]]]
def filter_by_resi(chain_dict, nterm, cterm):
''' Filter out a subset of an amino acid chain in a range specified by two
numbers.
Use chain_dict = atom_dict[chain] prior to use this function.
'''
return { k : v for k, v in chain_dict.items() if nterm <= k <= cterm }
def filter_by_resn(chain_dict, resn):
''' Filter out a subset of an amino acid chain by the residue name
specified in `resn`.
Use chain_dict = atom_dict[chain] prior to use this function.
'''
return { k : v for k, v in chain_dict.items() if "CA" in v and v["CA"][4] == resn }
def resi_to_resn(chain_dict):
''' Extract resn to resi mapping (like the sequence viewer on PyMol)
'''
aa_dict = constant_aminoacid_code_reverse()
resi_to_resn_dict = {}
for resi, atom_dict in chain_dict.items():
atom_first = list(atom_dict.keys())[0]
resn = atom_dict[atom_first][4]
if not resn in aa_dict: continue
resi_to_resn_dict[resi] = aa_dict[resn]
resi_to_resn_dict = sort_dict_by_key(resi_to_resn_dict)
return resi_to_resn_dict
def seqi_to_resi_debug(chain_dict, tar_seq, nseqi, cseqi):
''' Map seqi to resi for the sequence tar_seq bound by nseqi and cseqi.
resi is infered by resi_to_resn_dict.
Key step is to recognize the lower bound resi that corresponds to
nseqi.
'''
# Initialize mapping...
seqi_to_resi_dict = { k : None for k in range(nseqi, cseqi + 1) }
# Extract resn to resi mapping (like the sequence viewer on PyMol)...
# Non amino acid residue (like ligand) are bypassed
resi_to_resn_dict = resi_to_resn(chain_dict)
# Select the bound sequence by nseqi and cseqi...
tar_seq_bound = tar_seq[nseqi : cseqi + 1]
tar_seq_bound_continous = tar_seq[nseqi : cseqi + 1].replace('-', '')
# Obtain the original sequence from PDB...
# May have overhead in the beginning or the end
seq_orig = ''.join([ v for v in resi_to_resn_dict.values() ])
return tar_seq_bound_continous, seq_orig
def seqi_to_resi(chain_dict, tar_seq, nseqi, cseqi):
''' Map seqi to resi for the sequence tar_seq bound by nseqi and cseqi.
resi is infered by resi_to_resn_dict.
Key step is to recognize the lower bound resi that corresponds to
nseqi.
'''
# Initialize mapping...
seqi_to_resi_dict = { k : None for k in range(nseqi, cseqi + 1) }
# Extract resn to resi mapping (like the sequence viewer on PyMol)...
# Non amino acid residue (like ligand) are bypassed
resi_to_resn_dict = resi_to_resn(chain_dict)
# Select the bound sequence by nseqi and cseqi...
tar_seq_bound = tar_seq[nseqi : cseqi + 1]
tar_seq_bound_continous = tar_seq[nseqi : cseqi + 1].replace('-', '')
# Obtain the original sequence from PDB...
# May have overhead in the beginning or the end
seq_orig = ''.join([ v for v in resi_to_resn_dict.values() ])
# Obtain the starting index by string match...
lb_term = seq_orig.find(tar_seq_bound_continous)
# Warn user about illegal input sequence...
if lb_term == -1:
print(f"Illegal input sequence!!!")
print(f"-------------------------")
## print(f"seq in alignment file: {tar_seq[nseqi : cseqi + 1].replace('-', '')}")
print(f"seq in alignment file: {tar_seq_bound_continous}")
print(f"-------------------------")
print(f"seq in this pdb: {seq_orig}")
print(f"")
return seqi_to_resi_dict
# Obtain the ending index by the length of the coutinous (no '-') sequence...
ub_term = lb_term + len(tar_seq_bound_continous)
# Obtain list of resi bound by nseqi and cseqi...
resi_list = [ v for v in resi_to_resn_dict.keys() ]
resi_bound_list = resi_list[lb_term : ub_term]
# Counter to go through the bound sequence by nseqi and cseqi...
res_counter = 0
# Loop through
for i, seqi in enumerate(range(nseqi, cseqi + 1)):
# Skip the '-' residue...
if tar_seq_bound[i] == '-': continue
# Access the resi...
resi = resi_bound_list[res_counter]
# Record the mapping...
seqi_to_resi_dict[seqi] = resi
# Increment the residue counter...
res_counter += 1
return seqi_to_resi_dict
def extract_xyz_by_seq(atoms_to_extract, chain_dict, tar_seq, nseqi, cseqi):
''' Use super_seq as framework to extract coordinates. Each tar_seq is
considered as a subset of super_seq.
Sequence alignment directly determines the structure distance matrix.
'''
# Obtain the seq to resi mapping...
seqi_to_resi_dict = seqi_to_resi(chain_dict, tar_seq, nseqi, cseqi)
# Obtain size of the seqstr...
len_chain = cseqi - nseqi + 1
# Obtain chain info...
len_res = len(atoms_to_extract)
len_seq = len_chain * len_res
# Preallocate memory for storing coordinates...
xyzs = np.zeros((len_seq, 3)) # Initialize coordinate matrix
xyzs[:] = np.nan # np.nan for any missing residue
# Go through each seqi...
for i, (seqi, resi) in enumerate(seqi_to_resi_dict.items()):
# From each atom
for j, atm in enumerate(atoms_to_extract):
# Derive the matrix index...
mat_i = i * len_res + j
# Assign coordinates to matrix at index mat_i...
if resi in chain_dict:
if atm in chain_dict[resi]:
| |
import numpy as np
import pytest
from hypothesis import given, settings, assume, example
from hypothesis import strategies as st
from hypothesis import HealthCheck
from hypothesis.extra.numpy import arrays
from ..histogram import histogram1d, histogram2d, histogramdd
# NOTE: for now we don't test the full range of floating-point values in the
# tests below, because Numpy's behavior isn't always deterministic in some
# of the extreme regimes. We should add manual (non-hypothesis and not
# comparing to Numpy) test cases.
@given(values=arrays(dtype='<f8', shape=st.integers(0, 200),
elements=st.floats(-1000, 1000), unique=True),
nx=st.integers(1, 10),
xmin=st.floats(-1e10, 1e10),
xmax=st.floats(-1e10, 1e10),
weights=st.booleans(),
dtype=st.sampled_from(['>f4', '<f4', '>f8', '<f8']))
@settings(max_examples=500)
def test_1d_compare_with_numpy(values, nx, xmin, xmax, weights, dtype):
if xmax <= xmin:
return
values = values.astype(dtype)
size = len(values) // 2
if weights:
w = values[:size]
else:
w = None
x = values[size:size * 2]
try:
reference = np.histogram(x, bins=nx, weights=w, range=(xmin, xmax))[0]
except ValueError:
if 'f4' in str(x.dtype):
# Numpy has a bug in certain corner cases
# https://github.com/numpy/numpy/issues/11586
return
else:
raise
# First, check the Numpy result because it sometimes doesn't make sense. See
# bug report https://github.com/numpy/numpy/issues/9435
# FIXME: for now use < since that's what our algorithm does
inside = (x < xmax) & (x >= xmin)
if weights:
assume(np.allclose(np.sum(w[inside]), np.sum(reference)))
else:
n_inside = np.sum(inside)
assume(n_inside == np.sum(reference))
fast = histogram1d(x, bins=nx, weights=w, range=(xmin, xmax))
# Numpy returns results for 32-bit results as a 32-bit histogram, but only
# for 1D arrays. Since this is a summation variable it makes sense to
# return 64-bit, so rather than changing the behavior of histogram1d, we
# cast to 32-bit float here.
if x.dtype.kind == 'f' and x.dtype.itemsize == 4:
rtol = 1e-7
else:
rtol = 1e-14
np.testing.assert_allclose(fast, reference, rtol=rtol)
fastdd = histogramdd((x,), bins=nx, weights=w, range=[(xmin, xmax)])
np.testing.assert_array_equal(fast, fastdd)
@given(values=arrays(dtype='<f8', shape=st.integers(0, 300),
elements=st.floats(-1000, 1000), unique=True),
nx=st.integers(1, 10),
xmin=st.floats(-1e10, 1e10), xmax=st.floats(-1e10, 1e10),
ny=st.integers(1, 10),
ymin=st.floats(-1e10, 1e10), ymax=st.floats(-1e10, 1e10),
weights=st.booleans(),
dtype=st.sampled_from(['>f4', '<f4', '>f8', '<f8']))
@settings(max_examples=500)
def test_2d_compare_with_numpy(values, nx, xmin, xmax, ny, ymin, ymax, weights, dtype):
if xmax <= xmin or ymax <= ymin:
return
values = values.astype(dtype)
size = len(values) // 3
if weights:
w = values[:size]
else:
w = None
x = values[size:size * 2]
y = values[size * 2:size * 3]
try:
reference = np.histogram2d(x, y, bins=(nx, ny), weights=w,
range=((xmin, xmax), (ymin, ymax)))[0]
except Exception:
# If Numpy fails, we skip the comparison since this isn't our fault
return
# First, check the Numpy result because it sometimes doesn't make sense. See
# bug report https://github.com/numpy/numpy/issues/9435.
# FIXME: for now use < since that's what our algorithm does
inside = (x < xmax) & (x >= xmin) & (y < ymax) & (y >= ymin)
if weights:
assume(np.allclose(np.sum(w[inside]), np.sum(reference)))
else:
n_inside = np.sum(inside)
assume(n_inside == np.sum(reference))
fast = histogram2d(x, y, bins=(nx, ny), weights=w,
range=((xmin, xmax), (ymin, ymax)))
if x.dtype.kind == 'f' and x.dtype.itemsize == 4:
rtol = 1e-7
else:
rtol = 1e-14
np.testing.assert_allclose(fast, reference, rtol=rtol)
fastdd = histogramdd((x, y), bins=(nx, ny), weights=w,
range=((xmin, xmax), (ymin, ymax)))
np.testing.assert_array_equal(fast, fastdd)
@given(values=arrays(dtype='<f8', shape=st.integers(0, 1000),
elements=st.floats(-1000, 1000), unique=True),
hist_size=st.integers(1, 1e5),
bins=arrays(elements=st.integers(1, 10), shape=(10,), dtype=np.int32),
ranges=arrays(elements=st.floats(1e-10, 1e5), dtype='<f8',
shape=(10,), unique=True),
weights=st.booleans(),
dtype=st.sampled_from(['>f4', '<f4', '>f8', '<f8']))
@settings(max_examples=200, suppress_health_check=[HealthCheck.too_slow], deadline=None)
def test_dd_compare_with_numpy(values, hist_size, bins, ranges, weights, dtype):
# To avoid generating huge histograms that take a long time, we only take
# as many dimensions as we can such that the total hist_size is still within the
# limit. If `hist_size = 1`, we will take all the leading ones in `bins`.
_bins = []
accum_size = 1
for i in range(10):
if bins[i] * accum_size > hist_size:
break
_bins.append(bins[i])
accum_size *= bins[i]
ndim = len(_bins)
values = values.astype(dtype)
ranges = ranges.astype(dtype)
ranges = ranges[:ndim]
# Ranges are symmetric because otherwise the probability of samples falling inside
# is just too small and we would just be testing a bunch of empty histograms.
ranges = np.vstack((-ranges, ranges)).T
size = len(values) // (ndim + 1)
if weights:
w = values[:size]
else:
w = None
sample = tuple(values[size*(i+1):size*(i+2)] for i in range(ndim))
# for simplicity using the same range in all dimensions
try:
reference = np.histogramdd(sample, bins=_bins, weights=w, range=ranges)[0]
except Exception:
# If Numpy fails, we skip the comparison since this isn't our fault
return
# First, check the Numpy result because it sometimes doesn't make sense. See
# bug report https://github.com/numpy/numpy/issues/9435.
# FIXME: for now use < since that's what our algorithm does
inside = (sample[0] < ranges[0][1]) & (sample[0] >= ranges[0][0])
if ndim > 1:
for i in range(ndim - 1):
inside = inside & (sample[i+1] < ranges[i+1][1]) & (sample[i+1] >= ranges[i+1][0])
if weights:
assume(np.allclose(np.sum(w[inside]), np.sum(reference)))
else:
n_inside = np.sum(inside)
assume(n_inside == np.sum(reference))
fast = histogramdd(sample, bins=_bins, weights=w, range=ranges)
if sample[0].dtype.kind == 'f' and sample[0].dtype.itemsize == 4:
rtol = 1e-7
else:
rtol = 1e-14
np.testing.assert_allclose(fast, reference, rtol=rtol)
def test_nd_arrays():
x = np.random.random(1000)
result_1d = histogram1d(x, bins=10, range=(0, 1))
result_3d = histogram1d(x.reshape((10, 10, 10)), bins=10, range=(0, 1))
result_3d_dd = histogramdd((x.reshape((10, 10, 10)),), bins=10, range=((0, 1), ))
np.testing.assert_equal(result_1d, result_3d)
np.testing.assert_equal(result_1d, result_3d_dd)
y = np.random.random(1000)
result_1d = histogram2d(x, y, bins=(10, 10), range=[(0, 1), (0, 1)])
result_3d = histogram2d(x.reshape((10, 10, 10)), y.reshape((10, 10, 10)),
bins=(10, 10), range=[(0, 1), (0, 1)])
result_3d_dd = histogramdd((x.reshape((10, 10, 10)), y.reshape((10, 10, 10))),
bins=(10, 10), range=[(0, 1), (0, 1)])
np.testing.assert_equal(result_1d, result_3d)
np.testing.assert_equal(result_1d, result_3d_dd)
def test_list():
# Make sure that lists can be passed in
x_list = [1.4, 2.1, 4.2]
x_arr = np.array(x_list)
result_list = histogram1d(x_list, bins=10, range=(0, 10))
result_arr = histogram1d(x_arr, bins=10, range=(0, 10))
np.testing.assert_equal(result_list, result_arr)
result_list_dd = histogramdd(x_list, bins=10, range=((0, 10),))
result_arr_dd = histogramdd(x_arr, bins=10, range=((0, 10),))
np.testing.assert_equal(result_list_dd, result_arr_dd)
def test_histogramdd_interface():
# make sure the interface of histogramdd works as numpy.histogramdd
x_list = [1.4, 2.1, 4.2, 8.7, 5.1]
x_arr = np.array(x_list)
y_list = [6.6, 3.2, 2.9, 3.9, 0.1]
y_arr = np.array(y_list)
# test 1D (needs special handling in case the sample is a list)
sample = x_arr
result_np, _ = np.histogramdd(sample, bins=10, range=((0, 10),))
result_fh = histogramdd(sample, bins=10, range=((0, 10),))
np.testing.assert_equal(result_np, result_fh)
sample = x_list
result_np, _ = np.histogramdd(sample, bins=10, range=((0, 10),))
result_fh = histogramdd(sample, bins=10, range=((0, 10),))
np.testing.assert_equal(result_np, result_fh)
# test (D, N) array_like
sample = (x_arr, y_arr)
result_np, _ = np.histogramdd(sample, bins=10, range=((0, 10), (0, 10)))
result_fh = histogramdd(sample, bins=10, range=((0, 10), (0, 10)))
np.testing.assert_equal(result_np, result_fh)
sample = [x_arr, y_arr]
result_np, _ = np.histogramdd(sample, bins=10, range=((0, 10), (0, 10)))
result_fh = histogramdd(sample, bins=10, range=((0, 10), (0, 10)))
np.testing.assert_equal(result_np, result_fh)
sample = (x_list, y_list)
result_np, _ = np.histogramdd(sample, bins=10, range=((0, 10), (0, 10)))
result_fh = histogramdd(sample, bins=10, range=((0, 10), (0, 10)))
np.testing.assert_equal(result_np, result_fh)
sample = [x_list, y_list]
result_np, _ = np.histogramdd(sample, bins=10, range=((0, 10), (0, 10)))
result_fh = histogramdd(sample, bins=10, range=((0, 10), (0, 10)))
np.testing.assert_equal(result_np, result_fh)
# test (N, D) array
sample = np.vstack([x_arr, y_arr]).T
result_np, _ = np.histogramdd(sample, bins=10, range=((0, 10), (0, 10)))
result_fh = histogramdd(sample, bins=10, range=((0, 10), (0, 10)))
np.testing.assert_equal(result_np, result_fh)
sample = np.vstack([x_list, y_list]).T
result_np, _ = np.histogramdd(sample, bins=10, range=((0, 10), (0, 10)))
result_fh = histogramdd(sample, bins=10, range=((0, 10), (0, 10)))
np.testing.assert_equal(result_np, result_fh)
def test_non_contiguous():
x = np.random.random((10, 10, 10))[::2, ::3, :]
y = np.random.random((10, 10, 10))[::2, fc00:db20:35b:7399::5, :]
z = np.random.random((10, 10, 10))[::2, fc00:db20:35b:7399::5, :]
w = np.random.random((10, 10, 10))[::2, fc00:db20:35b:7399::5, :]
assert not x.flags.c_contiguous
assert not x.flags.f_contiguous
result_1 = histogram1d(x, bins=10, range=(0, 1))
result_2 = histogram1d(x.copy(), bins=10, range=(0, 1))
np.testing.assert_equal(result_1, result_2)
result_1 = histogram1d(x, bins=10, range=(0, 1), weights=w)
result_2 = histogram1d(x.copy(), bins=10, range=(0, 1), weights=w)
np.testing.assert_equal(result_1, result_2)
result_1 = histogram2d(x, y, bins=(10, 10), range=[(0, 1), (0, 1)])
result_2 = histogram2d(x.copy(), y.copy(), bins=(10, 10),
range=[(0, 1), (0, 1)])
np.testing.assert_equal(result_1, result_2)
result_1 = histogram2d(x, y, bins=(10, 10), range=[(0, 1), (0, 1)], weights=w)
result_2 = histogram2d(x.copy(), y.copy(), bins=(10, 10),
range=[(0, 1), (0, 1)], weights=w)
np.testing.assert_equal(result_1, result_2)
result_1 = histogramdd((x, y, z), bins=(10, 10, 10), range=[(0, 1), (0, 1), (0, 1)])
result_2 = histogramdd((x.copy(), y.copy(), z.copy()), bins=(10, 10, 10),
range=[(0, 1), (0, 1), (0, 1)])
np.testing.assert_equal(result_1, result_2)
result_1 = histogramdd((x, y, z), bins=(10, 10, 10), range=[(0, 1), (0, 1), (0, 1)],
weights=w)
| |
# -*- coding: utf-8 -*-
"""
hdu_api.models
--------------
This module contains the primary object that power hdu_api.
"""
from __future__ import unicode_literals
import re
import time
from bs4 import BeautifulSoup
from hdu_api.config import CARD_URLS, EXAM_URLS, HOME_URLS, COURSE_URLS, PERSON_URLS, PUBLIC_URLS
from hdu_api.config import DEFAULT_DICTIONARY
from hdu_api.sessions import CardSession, TeachingSession, StudentSession, IHDUPhoneSession, BaseSession, IHDUSession
class BaseModel(object):
pass
class CardBaseModel(BaseModel):
"""一卡通系统"""
def __init__(self, session: CardSession):
if isinstance(session, CardSession):
self.session = session
# viewstates 储存表单的一个数据,该数据需要提前刷新页面得到
# 但是可以只获取一次,之后可以再次使用
self.reuse_form_data = {
'__VIEWSTATE': {},
}
else:
raise ValueError('session must be CardSession.')
class TeachingBaseModel(BaseModel):
"""教务管理系统"""
def __init__(self, session: TeachingSession):
if isinstance(session, TeachingSession):
self.session = session
self.username = session.username
self.realname = session.realname
self.reuse_form_data = {
'__VIEWSTATE': {},
'__EVENTVALIDATION': {},
}
self.session.headers.update({'referer': HOME_URLS['teaching'].format(username=self.username)})
else:
raise ValueError('session must be TeachingSession.')
class StudentBaseModel(BaseModel):
"""学生管理系统"""
def __init__(self, session: StudentSession):
if isinstance(session, StudentSession):
self.session = session
else:
raise ValueError('session must be StudentSession.')
class IHDUBaseModel(BaseModel):
"""ihdu"""
def __init__(self, session: IHDUSession):
if isinstance(session, IHDUSession):
self.session = session
else:
raise ValueError('session must be IHDUSession')
class IHDUPhoneBaseModel(BaseModel):
"""ihdu 手机版"""
def __init__(self, session: IHDUPhoneSession):
if isinstance(session, IHDUPhoneSession):
self.session = session
else:
raise ValueError('session must be IHDUPhoneSession')
class Card(CardBaseModel):
"""一卡通相关信息.
Card.account - 账户信息
Card.balance - 余额
Card.consume
Card.consume_today
Card.statistics
"""
# TODO: 提供原始数据选项
def account(self, raw=False, dictionary=DEFAULT_DICTIONARY):
"""一卡通账户信息"""
results = None
rsp = self.session.get(CARD_URLS['account'], allow_redirects=False)
# TODO: 一个统一的检测
if rsp.status_code == 200:
results = []
try:
soup = BeautifulSoup(rsp.text, 'lxml')
result = {}
rows = soup.find('table', id='Table16').find_all('tr')
for row in rows:
tds = row.find_all('td')
for i in range(len(tds) // 2):
key = tds[i * 2].get_text(strip=True)
if not raw:
key = dictionary[key]
result.update({
key: tds[i * 2 + 1].get_text(strip=True),
})
results.append(result)
except Exception:
return None
return results
def balance(self, raw=False, dictionary=DEFAULT_DICTIONARY):
"""查询一卡通余额.
:return:
"""
results = None
# 因为在余额查询页面 rsp1 里的余额与实际不符
# 但是在转账页面 rsp2 里显示的余额是正确的
rsp1 = self.session.get(CARD_URLS['balance'][0], allow_redirects=False)
rsp2 = self.session.get(CARD_URLS['balance'][1], allow_redirects=False)
if rsp1.status_code == 200 and rsp2.status_code == 200:
results = []
result = {}
# 余额页面
soup = BeautifulSoup(rsp1.text, 'lxml')
rows = soup.find('div', id='Panel0').find('table', cellspacing=1).find_all('tr')
for row in rows:
tds = row.find_all('td')
for i in range(len(tds) // 2):
key = tds[i * 2].get_text(strip=True)
if not raw:
key = dictionary[key]
result.update({
key: tds[i * 2 + 1].get_text(strip=True),
})
# 转账页面,获取余额信息
soup = BeautifulSoup(rsp2.text, 'lxml')
row = soup.find('table', id='Table13').next_sibling.next_sibling.find_all('tr')[-1]
tds = row.find_all('td')
key = '卡余额'
if not raw:
key = dictionary[key]
result.update({
key: tds[1].get_text(strip=True),
})
results.append(result)
return results
def consume(self, year, month, raw=False, dictionary=DEFAULT_DICTIONARY):
"""查询 :param year: :param month: 的消费记录。
:param year:
:param month:
:param raw:
:param dictionary:
:return:
"""
results = None
payload = self._prepare_payload(CARD_URLS['history'], year, month)
rsp = self.session.post(CARD_URLS['history'], data=payload, allow_redirects=False)
if rsp.headers['location'] == '/zytk32portal/Cardholder/QueryhistoryDetail.aspx':
rsp = self.session.get(CARD_URLS['history_detail'], allow_redirects=False)
# TODO: 对成功,失败,无信息的分辨
# 是否可以 None 为失败,[] 为成功但无数据,[...] 成功且有数据
if rsp.status_code == 200:
results = self._process_consume_data(rsp, raw=raw, dictionary=dictionary)
return results
def consume_today(self, raw=False, dictionary=DEFAULT_DICTIONARY):
"""查询今天的消费记录。
Usage::
>>> Card.consume_today()
[{'流水号': '232205811', '帐号': '30003086', '卡片类型': 'M1', '交易类型': '卡户存款', '商户': '',
'站点': '校付宝', '终端号': '0', '交易额': '50', '到帐时间': '2019-01-30 19:48', '钱包名称': '1号钱包',
'卡余额': 'N/A'}]
:return:
"""
results = None
rsp = self.session.get(CARD_URLS['today'], allow_redirects=False)
if rsp.status_code == 200:
# TODO: 没有处理无的情况
results = self._process_consume_data(rsp, raw=raw, dictionary=dictionary)
return results
def consume_week(self, raw=False, dictionary=DEFAULT_DICTIONARY):
"""
查询这一周的消费记录。
:return:
"""
pass
def statistics(self, year, month, raw=False, dictionary=DEFAULT_DICTIONARY):
"""
查询某个月的交易统计。
:param year:
:param month:
:param raw:
:param dictionary:
:return:
"""
results = None
payload = self._prepare_payload(CARD_URLS['statistics'], year, month)
rsp = self.session.post(CARD_URLS['statistics'], data=payload, allow_redirects=False)
if rsp.headers['location'] == '/zytk32portal/Cardholder/QueryMonthResult.aspx':
rsp = self.session.get(CARD_URLS['statistics_result'], allow_redirects=False)
if rsp.status_code == 200:
results = []
soup = BeautifulSoup(rsp.text, 'lxml')
rows = soup.find('table', id='Table13').next_sibling.next_sibling.find_all('tr')[2].find(
'table').find_all('tr')
keys = []
row = rows.pop(0)
tds = row.find_all('td')[1:]
for td in tds:
key = td.get_text(strip=True)
if not raw:
key = dictionary[key]
keys.append(key)
for row in rows:
result = {}
tds = row.find_all('td')[1:]
for key, td in zip(keys, tds):
result.update({key: td.get_text(strip=True)})
results.append(result)
return results
def _prepare_payload(self, url, year, month):
"""准备表单数据。
:param url:
:param year:
:param month:
:return:
"""
if url not in self.reuse_form_data['__VIEWSTATE']:
rsp = self.session.get(url, allow_redirects=False)
soup = BeautifulSoup(rsp.text, 'lxml')
viewstate = soup.find('input', id='__VIEWSTATE')['value']
self.reuse_form_data['__VIEWSTATE'].update({url: viewstate})
payload = {
'__VIEWSTATE': self.reuse_form_data['__VIEWSTATE'][url],
'ddlYear': year,
'ddlMonth': month,
'txtMonth': month,
'ImageButton1.x': 33,
'ImageButton1.y': 5,
}
return payload
@staticmethod
def _process_consume_data(rsp, raw, dictionary):
results = []
soup = BeautifulSoup(rsp.text, 'lxml')
try:
rows = soup.find('table', id='dgShow').find_all('tr')
keys = []
row = rows.pop(0)
tds = row.find_all('td')
for td in tds:
key = td.get_text(strip=True)
if not raw:
key = dictionary[key]
keys.append(key)
for row in rows:
result = {}
tds = row.find_all('td')
for key, td in zip(keys, tds):
result.update({key: td.get_text(strip=True)})
results.append(result)
except:
if "<script>alert('没有检索到符合的记录!');</script>" in rsp.text:
return results
else:
return None
return results
class Exam(TeachingBaseModel):
"""考试相关.
"""
def grade(self, year, term, raw=False, dictionary=DEFAULT_DICTIONARY):
"""
查询学期成绩。
:param year:
:param term:
:param raw:
:param dictionary:
:return:
"""
results = None
url = EXAM_URLS['grade'].format(username=self.username, realname=self.realname)
payload = self._prepare_payload(url, year, term)
rsp = self.session.post(url, data=payload, allow_redirects=False)
if rsp.status_code == 200:
results = self._process_data(rsp, raw=raw, dictionary=dictionary)
return results
def grade_current(self, raw=False, dictionary=DEFAULT_DICTIONARY):
"""查询本学期的成绩。
:return:
"""
year, term = get_current_term()
return self.grade(year, term, raw=raw, dictionary=dictionary)
def level_grade(self, raw=False, dictionary=DEFAULT_DICTIONARY):
"""查询等级考试成绩
:return:
"""
results = None
url = EXAM_URLS['level_grade'].format(username=self.username, realname=self.realname)
rsp = self.session.get(url, allow_redirects=False)
if rsp.status_code == 200:
results = self._process_data(rsp, raw=raw, dictionary=dictionary)
return results
def schedule(self, year, term, raw=False, dictionary=DEFAULT_DICTIONARY):
"""
查询考试安排。
:param year:
:param term:
:param raw:
:param dictionary:
:return:
"""
results = None
url = EXAM_URLS['schedule'].format(username=self.username, realname=self.realname)
payload = self._prepare_payload(url, year, term)
rsp = self.session.post(url, data=payload, allow_redirects=False)
if rsp.status_code == 200:
results = self._process_data(rsp, raw=raw, dictionary=dictionary)
return results
def schedule_current(self, raw=False, dictionary=DEFAULT_DICTIONARY):
"""查询本学期考试安排。
:return:
"""
year, term = get_current_term()
return self.schedule(year, term, raw=raw, dictionary=dictionary)
def schedule_make_up(self, term, raw=False, dictionary=DEFAULT_DICTIONARY):
"""查询补考安排"""
results = None
url = EXAM_URLS['schedule_make_up'].format(username=self.username, realname=self.realname)
payload = self._prepare_payload(url, None, term)
rsp = self.session.post(url, data=payload, allow_redirects=False)
if rsp.status_code == 200:
results = self._process_data(rsp, raw=raw, dictionary=dictionary)
return results
def _prepare_payload(self, url, year, term):
if url not in self.reuse_form_data['__VIEWSTATE'] or url not in self.reuse_form_data['__EVENTVALIDATION']:
rsp = self.session.get(url, allow_redirects=False)
if rsp.status_code == 200:
soup = BeautifulSoup(rsp.text, 'lxml')
try:
viewstate = soup.find('input', id='__VIEWSTATE')['value']
eventvalidation = soup.find('input', id='__EVENTVALIDATION')['value']
self.reuse_form_data['__VIEWSTATE'].update({url: viewstate})
self.reuse_form_data['__EVENTVALIDATION'].update({url: eventvalidation})
except:
pass
# 表单数据混合了两个,但无影响
paylaod = {
'__EVENTTARGET': '',
'__EVENTARGUMENT': '',
'__LASTFOCUS': '',
'__VIEWSTATE': self.reuse_form_data['__VIEWSTATE'][url],
'__EVENTVALIDATION': self.reuse_form_data['__EVENTVALIDATION'][url],
'xnd': year,
'xqd': term,
'ddlxn': year,
'ddlxq': term,
'btnCx': ' 查 询 ',
}
return paylaod
@staticmethod
def _process_data(rsp, raw, dictionary):
results = []
soup = BeautifulSoup(rsp.text, 'xml')
try:
rows = soup.find('table', id='DataGrid1').find_all('tr')
keys = []
row = rows.pop(0)
tds = row.find_all('td')
for td in tds:
key = td.get_text(strip=True)
if not raw:
key = dictionary[key]
keys.append(key)
for row in rows:
result = {}
tds = row.find_all('td')
for key, td in zip(keys, tds):
result.update({key: td.get_text(strip=True)})
results.append(result)
except:
if 'Object moved to' in rsp.text:
return None
return results
class Course(TeachingBaseModel):
"""
"""
def selected(self, year, term, raw=False, dictionary=DEFAULT_DICTIONARY):
results = None
url = COURSE_URLS['selected'].format(username=self.username, realname=self.realname)
payload = self._prepare_payload(url, year, term)
rsp = self.session.post(url, data=payload, allow_redirects=False)
if rsp.status_code == 200:
results = self._process_data(rsp, raw=raw, dictionary=dictionary)
return results
@staticmethod
def _process_data(rsp, raw, dictionary):
results = []
soup = BeautifulSoup(rsp.text, 'lxml')
try:
rows = soup.find('table', id='DBGrid').find_all('tr')
keys = []
row = rows.pop(0)
tds = row.find_all('td')[:-4] # 这里把后面 4 项省去了
for td in tds:
key = td.get_text(strip=True)
if not raw:
key = dictionary[key]
keys.append(key)
for row in rows:
result = {}
tds = row.find_all('td')[:-4]
for key, td in zip(keys, tds):
result.update({key: td.get('title') if td.get('title') else td.get_text(strip=True)})
results.append(result)
except:
if 'Object moved to' in rsp.text:
return None
return results
def selected_current(self, raw=False, dictionary=DEFAULT_DICTIONARY):
year, term = get_current_term()
return self.selected(year, term, raw, dictionary)
def schedule(self, year, term, raw=False, dictionary=DEFAULT_DICTIONARY):
"""
查询某学期的课表。
:param year:
:param term:
:param raw:
:param dictionary:
:return:
"""
results = None
# url = COURSE_URLS['schedule'].format(username=self.username, realname=self.realname)
# payload = self._prepare_payload(url, year, term)
#
# rsp = self.session.post(url, data=payload, allow_redirects=False)
# if rsp.status_code == 200:
# results = self._process_course_data(rsp)
selected = self.selected(year, term, raw=True, dictionary=dictionary)
if selected:
results = self._process_course_data(selected, raw=raw, dictionary=dictionary)
return results
@staticmethod
def _process_course_data(selected, raw, dictionary):
"""
处理课表数据。
:param selected:
:param raw:
:param dictionary:
:return:
"""
results = []
for s in selected:
name = s.get('课程名称')
teacher = s.get('教师姓名')
classroom = s.get('上课地点').split(';')
classtime = s.get('上课时间').split(';')
weekday = None
start_section = None
end_section = None
start_week = None
end_week = None
| |
__authors__ = ""
__copyright__ = "(c) 2014, pymal"
__license__ = "BSD License"
__contact__ = "Name Of Current Guardian of this file <email@address>"
from urllib import request
import time
import singleton_factory
from pymal import consts
from pymal import decorators
from pymal import exceptions
__all__ = ['MyAnime']
class MyAnime(object, metaclass=singleton_factory.SingletonFactory):
"""
Saves an account data about anime.
:ivar my_enable_discussion: boolean
:ivar my_id: int
:ivar my_status: int. #TODO: put the dictionary here.
:ivar my_score: int.
:ivar my_start_date: string as mmddyyyy.
:ivar my_end_date: string as mmddyyyy.
:ivar my_priority: int.
:ivar my_storage_type: int. #TODO: put the dictanary here.
:ivar my_storage_value: float.
:ivar my_is_rewatching: boolean.
:ivar my_completed_episodes: int.
:ivar my_download_episodes: int.
:ivar my_times_rewatched: int.
:ivar my_rewatch_value: int.
:ivar my_tags: frozenset.
:ivar my_comments: string
:ivar my_fan_sub_groups: string.
"""
__TAG_SEPARATOR = ';'
__MY_MAL_URL = request.urljoin(
consts.HOST_NAME, 'editlist.php?type=anime&id={0:d}')
__MY_MAL_DELETE_URL = request.urljoin(
consts.HOST_NAME, 'api/animelist/delete/{0:d}.xml')
__MY_MAL_UPDATE_URL = request.urljoin(
consts.HOST_NAME, 'api/animelist/update/{0:d}.xml')
def __init__(self, mal_id: int, my_mal_id, account):
"""
"""
from pymal import anime
if isinstance(mal_id, anime.Anime):
self.obj = mal_id
else:
self.obj = anime.Anime(mal_id)
self.__my_mal_url = self.__MY_MAL_URL.format(self.obj.id)
self._is_my_loaded = False
self._account = account
self.__my_mal_id = my_mal_id
self.__my_status = 0
self.my_enable_discussion = False
self.__my_score = 0.0
self.__my_start_date = ''
self.__my_end_date = ''
self.__my_priority = 0
self.__my_storage_type = 0
self.__my_storage_value = 0.0
self.__my_comments = ''
self.__my_fan_sub_groups = ''
self.__my_tags = frozenset()
self.__my_is_rewatching = None
self.__my_completed_episodes = None
self.__my_download_episodes = 0
self.__my_times_rewatched = 0
self.__my_rewatch_value = None
@property
def my_id(self) -> int:
"""
:return: the id in the account.
:rtype: int
"""
return self.__my_mal_id
@property
@decorators.my_load
def my_status(self) -> int:
"""
:return: the status as number between 1 to 6.
:rtype: int
"""
return self.__my_status
@my_status.setter
def my_status(self, status: int):
"""
:param status: the value to put in status. must be between 1 to 6.
:type: int
"""
if not (1 <= status <= 6):
raise RuntimeError("value of my_statue can be 1 to 6")
self.__my_status = status
@property
@decorators.my_load
def my_score(self) -> int:
"""
:return: The score as int between 0 to 10.
:rtype: int
"""
return self.__my_score
@my_score.setter
def my_score(self, score: int):
"""
:param score: The score. Must be between 0 to 10.
:type: int
"""
if not (0 <= score <= 10):
raise RuntimeError("score must be between 0 to 10")
self.__my_score = score
@property
@decorators.my_load
def my_start_date(self) -> str:
"""
:return: the start date of watching.
"""
return self.__my_start_date
@my_start_date.setter
def my_start_date(self, start_date_string: str):
"""
:param start_date_string: An string that look like {@link consts.MALAPI_FORMAT_TIME}".
:type: str
"""
time.strptime(start_date_string, consts.MALAPI_FORMAT_TIME)
self.__my_start_date = start_date_string
@property
@decorators.my_load
def my_end_date(self) -> str:
"""
:return: the end date of watching.
:type: str
"""
return self.__my_end_date
@my_end_date.setter
def my_end_date(self, end_date_string: str):
"""
:param end_date_string: An string that look like {@link consts.MALAPI_FORMAT_TIME}".
:type: str
"""
time.strptime(end_date_string, consts.MALAPI_FORMAT_TIME)
self.__my_end_date = end_date_string
@property
@decorators.my_load
def my_priority(self) -> int:
"""
:return: The priority value as int between 0 to 3
:rtype: int
"""
return self.__my_priority
@my_priority.setter
def my_priority(self, priority: int):
"""
:param priority: priority must be between 0 to 3.
:type: int
"""
if not (0 <= priority <= 3):
raise RuntimeError("priority can be 0 to 3")
self.__my_priority = priority
@property
@decorators.my_load
def my_storage_type(self) -> int:
"""
:return: The storage type of the downloaded episodes. Between 0 to 7.
:rtype: int
"""
return self.__my_storage_type
@my_storage_type.setter
def my_storage_type(self, storage_type: int):
"""
:param storage_type: int between 0 to 7.
:type: int
"""
if not (0 <= storage_type <= 7):
raise RuntimeError("value of my_storage_type can be 0 to 7")
self.__my_storage_type = storage_type
@property
@decorators.my_load
def my_storage_value(self) -> float:
"""
:return: the storage value (the size you saved) - float but a real number!
:rtype: float
"""
return self.__my_storage_value
@my_storage_value.setter
def my_storage_value(self, storage_value: float):
"""
:param storage_value: the storage value (the size you saved) - float but a real number!
:type: float
"""
int(storage_value)
self.__my_storage_value = storage_value
@property
@decorators.my_load
def my_is_rewatching(self) -> bool:
"""
:return: a flag to know if rewatching now.
:rtype: bool
"""
return self.__my_is_rewatching
@my_is_rewatching.setter
def my_is_rewatching(self, is_rewatching: bool):
"""
:param is_rewatching: a flag to know if rewatching now.
:type: bool
"""
self.__my_is_rewatching = is_rewatching
@property
@decorators.my_load
def my_completed_episodes(self) -> int:
"""
:return: the number of completed episodes.
:rtype: int
"""
return self.__my_completed_episodes
@my_completed_episodes.setter
def my_completed_episodes(self, completed_episodes: int):
"""
:param completed_episodes: the number of completed episodes. Between 0 to number of episodes.
:type: int
"""
if not (0 <= completed_episodes <= self.episodes):
raise RuntimeError("value of my_completed_episodes can be 0 to self.episodes")
self.__my_completed_episodes = completed_episodes
@property
@decorators.my_load
def my_download_episodes(self) -> int:
"""
:return: the number of downloaded episodes.
:rtype: int
"""
return self.__my_download_episodes
@my_download_episodes.setter
def my_download_episodes(self, downloaded_episodes: int):
"""
:param downloaded_episodes: the number of downloaded episodes. Between 0 to number of episodes.
:type: int
"""
if not (0 <= downloaded_episodes <= self.episodes):
raise RuntimeError("downloaded episodes can be 0 to self.episodes")
self.__my_download_episodes = downloaded_episodes
@property
@decorators.my_load
def my_times_rewatched(self) -> int:
"""
:return: The times of rewatching is a positive value.
:type: int
"""
return self.__my_times_rewatched
@my_times_rewatched.setter
def my_times_rewatched(self, times_rewatched: int):
"""
:param times_rewatched: the times of rewatching must be a positive value.
:type: int
"""
if not (0 <= times_rewatched):
raise RuntimeError("value of my_times_rewatched can be 0 or more")
self.__my_times_rewatched = times_rewatched
@property
@decorators.my_load
def my_rewatch_value(self) -> int:
"""
:return: The rewatching is between 0 to 5.
:type: int
"""
return self.__my_rewatch_value
@my_rewatch_value.setter
def my_rewatch_value(self, rewatch_value: int):
"""
:param rewatch_value: The rewatching must be between 0 to 5.
:type: int
"""
if not (0 <= rewatch_value <= 5):
raise RuntimeError("rewatch value can be 0 to 5")
self.__my_rewatch_value = rewatch_value
@property
@decorators.my_load
def my_tags(self):
"""
:return: the account tags.
:rtype: frozenset
"""
return self.__my_tags
@property
@decorators.my_load
def my_comments(self):
"""
:return: the comment of the account about the anime.
:rtype: str
"""
return self.__my_comments
@property
@decorators.my_load
def my_fan_sub_groups(self):
"""
:return: the fan sub groups
:rtype: str
"""
return self.__my_fan_sub_groups
def my_reload(self):
"""
Reloading data from MAL.
"""
from pymal import global_functions
# Getting content wrapper <div>
content_wrapper_div = global_functions.get_content_wrapper_div(
self.__my_mal_url, self._account.auth_connect)
bas_result = content_wrapper_div.find(name='div',
attrs={'class': 'badresult'})
if bas_result is not None:
raise exceptions.FailedToReloadError(bas_result)
# Getting content <td>
content_div = content_wrapper_div.find(
name="div", attrs={"id": "content"}, recursive=False)
if content_div is None:
raise exceptions.FailedToReloadError(content_wrapper_div)
content_td = content_div.table.tr.td
if content_td is None:
raise exceptions.FailedToReloadError(content_div)
# Getting content rows <tr>
content_form = content_td.find(name="form", attrs={'id': "myAnimeForm"})
if content_form is None:
raise exceptions.FailedToReloadError(content_td)
content_rows = content_form.table.tbody.findAll(
name="tr", recursive=False)
contents_divs_index = 2
# Getting my_status
status_select = content_rows[contents_divs_index].find(
name="select", attrs={"id": "status", "name": "status"})
if status_select is None:
raise exceptions.FailedToReloadError(content_rows)
# TODO: make this look better
status_selected_options = list(filter(
lambda x: 'selected' in x.attrs,
status_select.findAll(name="option")
))
if 1 != len(status_selected_options):
raise exceptions.FailedToReloadError(status_selected_options)
self.__my_status = int(status_selected_options[0]['value'])
is_rewatch_node = content_rows[contents_divs_index].find(
name="input", attrs={"id": "rewatchingBox"})
if is_rewatch_node is None:
raise exceptions.FailedToReloadError(content_rows)
self.__my_is_rewatching = bool(is_rewatch_node['value'])
contents_divs_index += 1
# Getting watched episodes
watched_input = content_rows[contents_divs_index].\
find(name="input", attrs={"id": "completedEpsID",
"name": "completed_eps"})
if watched_input is None:
raise exceptions.FailedToReloadError(content_rows)
self.__my_completed_episodes = int(watched_input['value'])
contents_divs_index += 1
# Getting my_score
score_select = content_rows[contents_divs_index].find(
name="select", attrs={"name": "score"})
if score_select is None:
raise exceptions.FailedToReloadError(content_rows)
score_selected_option = score_select.find(
name="option", attrs={"selected": ""})
if score_selected_option is None:
raise exceptions.FailedToReloadError(score_select)
self.__my_score = int(float(score_selected_option['value']))
contents_divs_index += 1
# Getting my_tags...
tag_content = content_rows[contents_divs_index]
tag_textarea = tag_content.find(
name="textarea", attrs={"name": "tags"})
self.__my_tags = frozenset(tag_textarea.text.split(self.__TAG_SEPARATOR))
contents_divs_index += 1
# Getting start date
start_month_date_node = content_rows[contents_divs_index].find(
name="select", attrs={"name": "startMonth"})
if start_month_date_node is None:
raise exceptions.FailedToReloadError(content_rows)
start_month_date = start_month_date_node.find(
name="option", attrs={"selected": ""})
start_day_date_node = content_rows[contents_divs_index].find(
name="select", attrs={"name": "startDay"})
if start_day_date_node is None:
raise exceptions.FailedToReloadError(content_rows)
start_day_date = start_day_date_node.find(
name="option", attrs={"selected": ""})
start_year_date_node = content_rows[contents_divs_index].find(
name="select", attrs={"name": "startYear"})
if start_year_date_node is None:
raise exceptions.FailedToReloadError(content_rows)
start_year_date = start_year_date_node.find(
name="option", attrs={"selected": ""})
start_month_date = str(start_month_date['value']).zfill(2)
start_day_date = str(start_day_date['value']).zfill(2)
start_year_date = str(start_year_date['value']).zfill(2)
self.__my_start_date = start_month_date + \
start_day_date + start_year_date
contents_divs_index += 1
# Getting end date
end_month_date_node = content_rows[contents_divs_index].find(
name="select", attrs={"name": "endMonth"})
if end_month_date_node is None:
raise exceptions.FailedToReloadError(content_rows)
end_month_date = end_month_date_node.find(
name="option", attrs={"selected": ""})
end_day_date_node = content_rows[contents_divs_index].find(
name="select", attrs={"name": "endDay"})
if end_day_date_node is None:
raise exceptions.FailedToReloadError(content_rows)
end_day_date = end_day_date_node.find(
name="option", attrs={"selected": ""})
end_year_date_node = content_rows[contents_divs_index].find(
name="select", attrs={"name": "endYear"})
if end_year_date_node is None:
raise exceptions.FailedToReloadError(content_rows)
end_year_date = end_year_date_node.find(
name="option", attrs={"selected": ""})
end_month_date = str(end_month_date['value']).zfill(2)
end_day_date = str(end_day_date['value']).zfill(2)
| |
from amuse.couple import bridge
from amuse.community.bhtree.interface import BHTree
from amuse.community.hermite0.interface import Hermite
from amuse.community.fi.interface import Fi
from amuse.community.octgrav.interface import Octgrav
from amuse.community.gadget2.interface import Gadget2
from amuse.community.phiGRAPE.interface import PhiGRAPE
from amuse.ic import plummer
from amuse.ic import gasplummer
from amuse.units import units
from amuse.units import constants
from amuse.units import quantities
from amuse.units import nbody_system
from optparse import OptionParser
import numpy
import time
try:
import pylab
except ImportError:
pylab = None
class GasPlummerModelExternalField(object):
"""
skeleton grav code for use in bridge.
must have get_gravity_at_point and get_potential_at_point
"""
def __init__(self, position = [0,0,0] | units.parsec, radius=1000.| units.parsec, total_mass=1.6e10 | units.MSun):
self.radius = radius
self.total_mass = total_mass
self.gravity_constant = constants.G
self.position = position
self.radius_squared = (self.radius**2)
def get_gravity_at_point(self,eps,x,y,z):
dx = x - self.position.x
dy = y - self.position.y
dz = z - self.position.z
radii_squared=dx**2 + dy**2 + dz**2
#radii = radii_squared**0.5
plummer_radii_squared = radii_squared + self.radius_squared
plummer_radii_15 = plummer_radii_squared ** 1.5
fr=-self.gravity_constant*self.total_mass/plummer_radii_15
ax=fr*dx
ay=fr*dy
az=fr*dz
return ax,ay,az
def get_potential_at_point(self,eps,x,y,z):
dx = x - self.position.x
dy = y - self.position.y
dz = z - self.position.z
radii_squared=dx**2 + dy**2 + dz**2
#radii = radii_squared**0.5
plummer_radii = (radii_squared + self.radius_squared)**0.5
phi=self.gravity_constant*self.total_mass/plummer_radii
return -phi * 2
def stop(self):
pass
@property
def kinetic_energy(self):
return quantities.zero
@property
def potential_energy(self):
return quantities.zero
@property
def thermal_energy(self):
return quantities.zero
class AbstractStarAndGasPlummerCode(object):
def __init__(self,
nstars = 10,
ngas = -1,
endtime = 10,
total_mass = 1000,
gas_fraction = 0.9,
rscale = 1.0,
star_smoothing_fraction = 0.001,
gas_smoothing_fraction = 0.05,
seed = -1,
ntimesteps = 10,
must_do_plot = True
):
if seed >= 0:
numpy.random.seed(seed)
if ngas < 0:
ngas = nstars * 10
self.must_do_plot = must_do_plot
self.line = None
self.line2 = None
self.ntimesteps = ntimesteps
self.ngas = ngas
self.nstars = nstars
self.total_mass = total_mass | units.MSun
self.gas_fraction = gas_fraction
self.star_fraction = 1.0 - self.gas_fraction
self.rscale = rscale | units.parsec
self.star_epsilon = star_smoothing_fraction * self.rscale
self.gas_epsilon = gas_smoothing_fraction * self.rscale
self.star_mass = self.star_fraction * self.total_mass
self.gas_mass = self.gas_fraction * self.total_mass
self.converter = nbody_system.nbody_to_si(self.total_mass, self.rscale)
self.endtime = self.converter.to_si(endtime | nbody_system.time)
self.delta_t = self.endtime / self.ntimesteps
def update_plot(self, time, code):
time = self.converter.to_nbody(time).value_in(nbody_system.time),
sum_energy = code.kinetic_energy + code.potential_energy + code.thermal_energy
energy = self.converter.to_nbody(sum_energy).value_in(nbody_system.energy)
coreradius = self.star_code.particles.virial_radius().value_in(self.rscale.to_unit())
#kicke = self.converter.to_nbody(code.kick_energy).value_in(nbody_system.energy)
if self.line is None:
pylab.ion()
pylab.subplot(1,2,1)
self.line = pylab.plot([time], [energy])[0]
pylab.xlim(0, self.converter.to_nbody(self.endtime).value_in(nbody_system.time))
pylab.ylim(energy * 0.8, energy * 1.2)
pylab.subplot(1,2,2)
self.line2 = pylab.plot([time], [coreradius])[0]
#self.line2 = pylab.plot([time], [kicke])[0]
pylab.xlim(0, self.converter.to_nbody(self.endtime).value_in(nbody_system.time))
pylab.ylim(0,3)
#pylab.ylim(-0.1, 0.1)
else:
xdata = self.line.get_xdata()
ydata = self.line.get_ydata()
xdata = numpy.concatenate( (xdata, time) )
ydata = numpy.concatenate( (ydata, [energy]) )
self.line.set_xdata(xdata)
self.line.set_ydata(ydata)
xdata = self.line2.get_xdata()
ydata = self.line2.get_ydata()
xdata = numpy.concatenate( (xdata, time) )
#ydata = numpy.concatenate( (ydata, [kicke]) )
ydata = numpy.concatenate( (ydata, [coreradius]) )
self.line2.set_xdata(xdata)
self.line2.set_ydata(ydata)
pylab.draw()
def new_particles_cluster(self):
particles=plummer.new_plummer_model(self.nstars,convert_nbody=self.converter)
particles.radius= self.star_epsilon
particles.mass = (1.0/self.nstars) * self.star_mass
return particles
def new_gas_cluster(self):
particles=gasplummer.new_plummer_gas_model(self.ngas,convert_nbody=self.converter)
particles.h_smooth= self.gas_epsilon
particles.mass = (1.0/self.ngas) * self.gas_mass
return particles
def new_particles_cluster_as_gas(self):
particles=plummer.new_plummer_model(self.ngas,convert_nbody=self.converter)
particles.radius= self.gas_epsilon
particles.mass = (1.0/self.ngas) * self.gas_mass
return particles
def stop(self):
pass
def evolve_model(self):
if self.must_do_plot:
self.update_plot(time = 0 * self.delta_t, code = self.code)
for time in self.delta_t * range(1,self.ntimesteps+1):
self.code.evolve_model(time)
print self.converter.to_nbody(self.code.time)
if self.must_do_plot:
self.update_plot(time = self.code.time, code = self.code)
class BridgeStarAndGasPlummerCode(AbstractStarAndGasPlummerCode):
def __init__(self,
nstars = 10,
ngas = -1,
endtime = 10,
total_mass = 1000,
gas_fraction = 0.9,
rscale = 1.0,
star_code = 'hermite',
gas_code = 'field',
star_smoothing_fraction = 0.001,
gas_smoothing_fraction = 0.05,
seed = -1,
ntimesteps = 10,
interaction_timestep = 0.01,
must_do_plot = True,
gas_to_star_interaction_code = 'none',
star_to_gas_interaction_code = 'none',
**ignored_options
):
AbstractStarAndGasPlummerCode.__init__(
self,
nstars,
ngas,
endtime,
total_mass,
gas_fraction,
rscale,
star_smoothing_fraction,
gas_smoothing_fraction,
seed,
ntimesteps,
must_do_plot
)
self.interaction_timestep = self.converter.to_si(interaction_timestep| nbody_system.time)
self.create_codes(
gas_code,
star_code,
gas_to_star_interaction_code,
star_to_gas_interaction_code,
)
self.create_bridge()
self.code = self.bridge_system
time = 0
sum_energy = self.code.kinetic_energy + self.code.potential_energy + self.code.thermal_energy
energy = self.converter.to_nbody(sum_energy).value_in(nbody_system.energy)
coreradius = self.star_code.particles.virial_radius().value_in(self.rscale.to_unit())
print "Time :", time
print "Energy :", energy
print "Virial radius :", coreradius
self.evolve_model()
if must_do_plot:
pylab.show()
pylab.savefig(
"{0}-{1}-{2}-{3}.png".format(
star_code,
gas_code,
nstars,
ngas
)
)
time = self.converter.to_nbody(self.code.time).value_in(nbody_system.time)
sum_energy = self.code.kinetic_energy + self.code.potential_energy + self.code.thermal_energy
energy = self.converter.to_nbody(sum_energy).value_in(nbody_system.energy)
coreradius = self.star_code.particles.virial_radius().value_in(self.rscale.to_unit())
print "Time :", time
print "Energy :", energy
print "Virial radius :", coreradius
self.stop()
if must_do_plot:
raw_input('Press enter...')
def create_codes(self, gas_code, star_code, gas_to_star_interaction_code, star_to_gas_interaction_code):
self.star_code = getattr(self,'new_star_code_'+star_code)()
self.gas_code = getattr(self, 'new_gas_code_'+gas_code)()
self.gas_to_star_codes = getattr(self, 'new_gas_to_star_interaction_codes_'+gas_to_star_interaction_code)(self.gas_code)
self.star_to_gas_codes = getattr(self, 'new_star_to_gas_interaction_codes_'+star_to_gas_interaction_code)(self.star_code)
def create_bridge(self):
bridge_code1 = bridge.GravityCodeInField(
self.gas_code, self.star_to_gas_codes
)
bridge_code2 = bridge.GravityCodeInField(
self.star_code, self.gas_to_star_codes
)
self.bridge_system = bridge.Bridge(
timestep = self.interaction_timestep,
use_threading = False
)
self.bridge_system.add_code(bridge_code2)
self.bridge_system.add_code(bridge_code1)
def stop(self):
self.star_code.stop()
self.gas_code.stop()
def new_gas_to_star_interaction_codes_self(self, gas_code):
return [gas_code]
def new_star_to_gas_interaction_codes_self(self, star_code):
return [star_code]
def new_gas_to_star_interaction_codes_none(self, gas_code):
return []
def new_star_to_gas_interaction_codes_none(self, gas_code):
return []
def new_gas_to_star_interaction_codes_octgrav(self, gas_code):
def new_octgrav():
result = Octgrav(self.converter)
result.parameters.epsilon_squared = self.gas_epsilon ** 2
return result
return [bridge.CalculateFieldForCodes(new_octgrav, [gas_code])]
def new_gas_to_star_interaction_codes_bhtree(self, gas_code):
def new_bhtree():
result = BHTree(self.converter)
result.parameters.epsilon_squared = self.star_epsilon ** 2
return result
return [bridge.CalculateFieldForCodes(new_bhtree, [gas_code])]\
def new_gas_code_fi(self):
result = Fi(self.converter)
result.parameters.self_gravity_flag = True
result.parameters.use_hydro_flag = True
result.parameters.radiation_flag = False
result.parameters.periodic_box_size = 500 | units.parsec
result.parameters.timestep = 0.125 * self.interaction_timestep
#result.parameters.adaptive_smoothing_flag = True
#result.parameters.epsilon_squared = self.gas_epsilon ** 2
#result.parameters.eps_is_h_flag = False
result.parameters.integrate_entropy_flag = False
#result.parameters.self_gravity_flag = False
result.gas_particles.add_particles(self.new_gas_cluster())
result.commit_particles()
return result
def new_star_code_fi(self):
result = Fi(self.converter)
result.parameters.self_gravity_flag = True
result.parameters.use_hydro_flag = False
result.parameters.radiation_flag = False
result.parameters.periodic_box_size = 500 | units.parsec
result.parameters.timestep = 0.125 * self.interaction_timestep
result.particles.add_particles(self.new_particles_cluster())
result.commit_particles()
return result
def new_gas_code_gadget(self):
result = Gadget2(self.converter)
result.gas_particles.add_particles(self.new_gas_cluster())
result.commit_particles()
return result
def new_gas_code_field(self):
result = GasPlummerModelExternalField(
radius = self.rscale,
total_mass = self.gas_mass
)
return result
def new_gas_code_hermite(self):
result = Hermite(self.converter)
result.parameters.epsilon_squared = self.star_epsilon ** 2
result.particles.add_particles(self.new_particles_cluster_as_gas())
result.commit_particles()
return result
def new_star_code_hermite(self):
result = Hermite(self.converter)
result.parameters.epsilon_squared = self.star_epsilon ** 2
result.particles.add_particles(self.new_particles_cluster())
result.commit_particles()
return result
def new_star_code_phigrape(self):
result = PhiGRAPE(self.converter, mode="gpu")
result.parameters.initialize_gpu_once = 1
result.parameters.epsilon_squared = self.star_epsilon ** 2
result.particles.add_particles(self.new_particles_cluster())
result.commit_particles()
return result
def new_star_code_bhtree(self):
result = BHTree(self.converter)
result.parameters.epsilon_squared = self.star_epsilon ** 2
result.parameters.timestep = 0.125 * self.interaction_timestep
result.particles.add_particles(self.new_particles_cluster())
result.commit_particles()
return result
def new_star_code_octgrav(self):
result = Octgrav(self.converter)
result.parameters.epsilon_squared = self.star_epsilon ** 2
result.parameters.timestep = 0.125 * self.interaction_timestep
result.particles.add_particles(self.new_particles_cluster())
result.commit_particles()
return result
def new_gas_code_bhtree(self):
result = BHTree(self.converter)
result.parameters.epsilon_squared = self.gas_epsilon ** 2
result.parameters.timestep = 0.125 * self.interaction_timestep
result.particles.add_particles(self.new_particles_cluster_as_gas())
result.commit_particles()
return result
class AllInOneStarAndGasPlummerCode(AbstractStarAndGasPlummerCode):
def __init__(self,
nstars = 10,
ngas = -1,
endtime = 10,
total_mass = 1000,
gas_fraction = 0.9,
rscale = 1.0,
sph_code = 'fi',
star_smoothing_fraction = 0.001,
gas_smoothing_fraction = 0.05,
seed = -1,
ntimesteps = 10,
must_do_plot = True,
interaction_timestep = 0.01,
**ignored_options
):
AbstractStarAndGasPlummerCode.__init__(
self,
nstars,
ngas,
endtime,
total_mass,
gas_fraction,
rscale,
star_smoothing_fraction,
gas_smoothing_fraction,
seed,
ntimesteps,
must_do_plot
)
self.interaction_timestep = self.converter.to_si(interaction_timestep| nbody_system.time)
self.create_code(sph_code)
sum_energy = self.code.kinetic_energy + self.code.potential_energy + self.code.thermal_energy
energy = self.converter.to_nbody(sum_energy).value_in(nbody_system.energy)
coreradius = self.code.dm_particles.virial_radius().value_in(self.rscale.to_unit())
print "Time:", 0
print "Energy:", energy
print "Virial radius:", coreradius
self.evolve_model()
if must_do_plot:
pylab.show()
pylab.savefig(
"{0}-{1}-{2}.png".format(
sph_code,
nstars,
ngas
)
)
time = self.converter.to_nbody(self.code.model_time).value_in(nbody_system.time)
sum_energy = self.code.kinetic_energy + self.code.potential_energy + self.code.thermal_energy
energy = self.converter.to_nbody(sum_energy).value_in(nbody_system.energy)
coreradius = self.code.dm_particles.virial_radius().value_in(self.rscale.to_unit())
print "Time:", time
print "Energy:", energy
print "Virial radius:", coreradius
self.stop()
if must_do_plot:
raw_input('Press enter...')
def evolve_model(self):
if self.must_do_plot:
self.update_plot(time = 0 * self.delta_t, code = self.code)
for time in self.delta_t * range(1,self.ntimesteps+1):
self.code.evolve_model(time)
print self.converter.to_nbody(self.code.model_time)
if self.must_do_plot:
self.update_plot(time = self.code.time, code = self.code)
def create_code(self, name):
self.code = getattr(self, 'new_sph_code_'+name)()
def stop(self):
self.code.stop()
def new_sph_code_fi(self):
result = Fi(self.converter)
result.parameters.self_gravity_flag = True
result.parameters.use_hydro_flag = True
result.parameters.radiation_flag = False
result.parameters.periodic_box_size = 500 | units.parsec
result.parameters.timestep = 0.125 * self.interaction_timestep
#result.parameters.adaptive_smoothing_flag = True
#result.parameters.epsilon_squared = self.gas_epsilon ** 2
#result.parameters.eps_is_h_flag = False
result.parameters.integrate_entropy_flag = False
result.dm_particles.add_particles(self.new_particles_cluster())
result.gas_particles.add_particles(self.new_gas_cluster())
result.commit_particles()
return result
def new_sph_code_gadget(self):
result = Gadget2(self.converter)
result.dm_particles.add_particles(self.new_particles_cluster())
result.gas_particles.add_particles(self.new_gas_cluster())
result.commit_particles()
return result
def new_option_parser():
result = OptionParser()
result.add_option(
"-n", "--nstar",
default = 10,
dest="nstars",
help="number of star particles",
| |
# coding: utf-8
"""
PassHub API
PassHub API documentation # noqa: E501
OpenAPI spec version: 1.1
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
from sightseeingtech_passhub_api.models.rule import Rule # noqa: F401,E501
from sightseeingtech_passhub_api.models.user_dto import UserDTO # noqa: F401,E501
from sightseeingtech_passhub_api.models.vendor_dto import VendorDTO # noqa: F401,E501
from sightseeingtech_passhub_api.models.voucher_pattern import VoucherPattern # noqa: F401,E501
class SimpleConnectedProductDTO(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'allow_recharge': 'bool',
'country_code': 'str',
'customer_type': 'str',
'description': 'str',
'id': 'int',
'location_code': 'str',
'location_name': 'str',
'max_validations_in_pass': 'int',
'modified_by': 'UserDTO',
'modified_date': 'str',
'name': 'str',
'package_product_id': 'str',
'price': 'float',
'print_type': 'str',
'product_id': 'str',
'recharge_price': 'float',
'retail_price': 'float',
'sku': 'str',
'sub_product_id': 'str',
'tags': 'list[str]',
'timezone': 'str',
'type': 'str',
'valid_value': 'int',
'validation_rules': 'list[Rule]',
'validation_tag': 'str',
'vendor': 'VendorDTO',
'vendor_account_id': 'str',
'vendor_sku': 'str',
'voucher_pattern': 'VoucherPattern',
'voucher_type': 'str'
}
attribute_map = {
'allow_recharge': 'allowRecharge',
'country_code': 'countryCode',
'customer_type': 'customerType',
'description': 'description',
'id': 'id',
'location_code': 'locationCode',
'location_name': 'locationName',
'max_validations_in_pass': 'maxValidationsInPass',
'modified_by': 'modifiedBy',
'modified_date': 'modifiedDate',
'name': 'name',
'package_product_id': 'packageProductId',
'price': 'price',
'print_type': 'printType',
'product_id': 'productId',
'recharge_price': 'rechargePrice',
'retail_price': 'retailPrice',
'sku': 'sku',
'sub_product_id': 'subProductId',
'tags': 'tags',
'timezone': 'timezone',
'type': 'type',
'valid_value': 'validValue',
'validation_rules': 'validationRules',
'validation_tag': 'validationTag',
'vendor': 'vendor',
'vendor_account_id': 'vendorAccountId',
'vendor_sku': 'vendorSku',
'voucher_pattern': 'voucherPattern',
'voucher_type': 'voucherType'
}
def __init__(self, allow_recharge=None, country_code=None, customer_type=None, description=None, id=None, location_code=None, location_name=None, max_validations_in_pass=None, modified_by=None, modified_date=None, name=None, package_product_id=None, price=None, print_type=None, product_id=None, recharge_price=None, retail_price=None, sku=None, sub_product_id=None, tags=None, timezone=None, type=None, valid_value=None, validation_rules=None, validation_tag=None, vendor=None, vendor_account_id=None, vendor_sku=None, voucher_pattern=None, voucher_type=None): # noqa: E501
"""SimpleConnectedProductDTO - a model defined in Swagger""" # noqa: E501
self._allow_recharge = None
self._country_code = None
self._customer_type = None
self._description = None
self._id = None
self._location_code = None
self._location_name = None
self._max_validations_in_pass = None
self._modified_by = None
self._modified_date = None
self._name = None
self._package_product_id = None
self._price = None
self._print_type = None
self._product_id = None
self._recharge_price = None
self._retail_price = None
self._sku = None
self._sub_product_id = None
self._tags = None
self._timezone = None
self._type = None
self._valid_value = None
self._validation_rules = None
self._validation_tag = None
self._vendor = None
self._vendor_account_id = None
self._vendor_sku = None
self._voucher_pattern = None
self._voucher_type = None
self.discriminator = None
if allow_recharge is not None:
self.allow_recharge = allow_recharge
if country_code is not None:
self.country_code = country_code
if customer_type is not None:
self.customer_type = customer_type
if description is not None:
self.description = description
if id is not None:
self.id = id
if location_code is not None:
self.location_code = location_code
if location_name is not None:
self.location_name = location_name
if max_validations_in_pass is not None:
self.max_validations_in_pass = max_validations_in_pass
if modified_by is not None:
self.modified_by = modified_by
if modified_date is not None:
self.modified_date = modified_date
if name is not None:
self.name = name
if package_product_id is not None:
self.package_product_id = package_product_id
if price is not None:
self.price = price
if print_type is not None:
self.print_type = print_type
if product_id is not None:
self.product_id = product_id
if recharge_price is not None:
self.recharge_price = recharge_price
if retail_price is not None:
self.retail_price = retail_price
if sku is not None:
self.sku = sku
if sub_product_id is not None:
self.sub_product_id = sub_product_id
if tags is not None:
self.tags = tags
if timezone is not None:
self.timezone = timezone
if type is not None:
self.type = type
if valid_value is not None:
self.valid_value = valid_value
if validation_rules is not None:
self.validation_rules = validation_rules
if validation_tag is not None:
self.validation_tag = validation_tag
if vendor is not None:
self.vendor = vendor
if vendor_account_id is not None:
self.vendor_account_id = vendor_account_id
if vendor_sku is not None:
self.vendor_sku = vendor_sku
if voucher_pattern is not None:
self.voucher_pattern = voucher_pattern
if voucher_type is not None:
self.voucher_type = voucher_type
@property
def allow_recharge(self):
"""Gets the allow_recharge of this SimpleConnectedProductDTO. # noqa: E501
:return: The allow_recharge of this SimpleConnectedProductDTO. # noqa: E501
:rtype: bool
"""
return self._allow_recharge
@allow_recharge.setter
def allow_recharge(self, allow_recharge):
"""Sets the allow_recharge of this SimpleConnectedProductDTO.
:param allow_recharge: The allow_recharge of this SimpleConnectedProductDTO. # noqa: E501
:type: bool
"""
self._allow_recharge = allow_recharge
@property
def country_code(self):
"""Gets the country_code of this SimpleConnectedProductDTO. # noqa: E501
:return: The country_code of this SimpleConnectedProductDTO. # noqa: E501
:rtype: str
"""
return self._country_code
@country_code.setter
def country_code(self, country_code):
"""Sets the country_code of this SimpleConnectedProductDTO.
:param country_code: The country_code of this SimpleConnectedProductDTO. # noqa: E501
:type: str
"""
self._country_code = country_code
@property
def customer_type(self):
"""Gets the customer_type of this SimpleConnectedProductDTO. # noqa: E501
:return: The customer_type of this SimpleConnectedProductDTO. # noqa: E501
:rtype: str
"""
return self._customer_type
@customer_type.setter
def customer_type(self, customer_type):
"""Sets the customer_type of this SimpleConnectedProductDTO.
:param customer_type: The customer_type of this SimpleConnectedProductDTO. # noqa: E501
:type: str
"""
allowed_values = ["ADULT", "CHILD", "SENIOR"] # noqa: E501
if customer_type not in allowed_values:
raise ValueError(
"Invalid value for `customer_type` ({0}), must be one of {1}" # noqa: E501
.format(customer_type, allowed_values)
)
self._customer_type = customer_type
@property
def description(self):
"""Gets the description of this SimpleConnectedProductDTO. # noqa: E501
:return: The description of this SimpleConnectedProductDTO. # noqa: E501
:rtype: str
"""
return self._description
@description.setter
def description(self, description):
"""Sets the description of this SimpleConnectedProductDTO.
:param description: The description of this SimpleConnectedProductDTO. # noqa: E501
:type: str
"""
self._description = description
@property
def id(self):
"""Gets the id of this SimpleConnectedProductDTO. # noqa: E501
:return: The id of this SimpleConnectedProductDTO. # noqa: E501
:rtype: int
"""
return self._id
@id.setter
def id(self, id):
"""Sets the id of this SimpleConnectedProductDTO.
:param id: The id of this SimpleConnectedProductDTO. # noqa: E501
:type: int
"""
self._id = id
@property
def location_code(self):
"""Gets the location_code of this SimpleConnectedProductDTO. # noqa: E501
:return: The location_code of this SimpleConnectedProductDTO. # noqa: E501
:rtype: str
"""
return self._location_code
@location_code.setter
def location_code(self, location_code):
"""Sets the location_code of this SimpleConnectedProductDTO.
:param location_code: The location_code of this SimpleConnectedProductDTO. # noqa: E501
:type: str
"""
self._location_code = location_code
@property
def location_name(self):
"""Gets the location_name of this SimpleConnectedProductDTO. # noqa: E501
:return: The location_name of this SimpleConnectedProductDTO. # noqa: E501
:rtype: str
"""
return self._location_name
@location_name.setter
def location_name(self, location_name):
"""Sets the location_name of this SimpleConnectedProductDTO.
:param location_name: The location_name of this SimpleConnectedProductDTO. # noqa: E501
:type: str
"""
self._location_name = location_name
@property
def max_validations_in_pass(self):
"""Gets the max_validations_in_pass of this SimpleConnectedProductDTO. # noqa: E501
:return: The max_validations_in_pass of this SimpleConnectedProductDTO. # noqa: E501
:rtype: int
"""
return self._max_validations_in_pass
@max_validations_in_pass.setter
def max_validations_in_pass(self, max_validations_in_pass):
"""Sets the max_validations_in_pass of this SimpleConnectedProductDTO.
:param max_validations_in_pass: The max_validations_in_pass of this SimpleConnectedProductDTO. # noqa: E501
:type: int
"""
self._max_validations_in_pass = max_validations_in_pass
@property
def modified_by(self):
"""Gets the modified_by of this SimpleConnectedProductDTO. # noqa: E501
:return: The modified_by of this SimpleConnectedProductDTO. # noqa: E501
:rtype: UserDTO
"""
return self._modified_by
@modified_by.setter
def modified_by(self, modified_by):
"""Sets the modified_by of this SimpleConnectedProductDTO.
:param modified_by: The modified_by of this SimpleConnectedProductDTO. # noqa: E501
:type: UserDTO
"""
self._modified_by = modified_by
@property
def modified_date(self):
"""Gets the modified_date of this SimpleConnectedProductDTO. # noqa: E501
:return: The modified_date of this SimpleConnectedProductDTO. # noqa: E501
:rtype: str
"""
return self._modified_date
@modified_date.setter
def modified_date(self, modified_date):
"""Sets the modified_date of this SimpleConnectedProductDTO.
:param modified_date: The modified_date of this SimpleConnectedProductDTO. # noqa: E501
:type: str
"""
self._modified_date = modified_date
@property
def name(self):
"""Gets the name of this SimpleConnectedProductDTO. # noqa: E501
:return: The name of this SimpleConnectedProductDTO. # noqa: E501
:rtype: str
"""
return self._name
@name.setter
def name(self, name):
"""Sets the name of this SimpleConnectedProductDTO.
:param name: The name of this SimpleConnectedProductDTO. # noqa: E501
:type: str
"""
self._name = name
@property
def package_product_id(self):
"""Gets the package_product_id of this SimpleConnectedProductDTO. # noqa: E501
:return: The package_product_id of this SimpleConnectedProductDTO. # noqa: E501
:rtype: str
"""
return self._package_product_id
@package_product_id.setter
def package_product_id(self, package_product_id):
"""Sets the package_product_id of this SimpleConnectedProductDTO.
:param package_product_id: The package_product_id of this SimpleConnectedProductDTO. # noqa: E501
:type: str
"""
self._package_product_id = package_product_id
@property
def price(self):
"""Gets the price of this SimpleConnectedProductDTO. # noqa: E501
:return: The price of this SimpleConnectedProductDTO. # | |
where processed glyphs
are saved in a layer, and processing of a glyph is skip[ped if has already been processed.
"""
error_doc = """
The following is list of the checkOutlines error messages, and a brief
explanation of each. The error messages here may be just the first part
of the actual message, with additional info and coordinates given in the
rest of the actual message.
The phrase 'subpath' means what in FontLab is a contour: a complete
closed line.
The phrase 'path element' means a line segment from one on-curve node to the
next on a contour.
The phrase 'point' means an on-curve node.
The phrase 'control point' means an off-curve bezier curve control
handle end-point.
"Near-miss on orthogonal transition between (path segment1) and
(pathsegment2)"
These two path segments are so nearly at right angles, that I
suspect that they were intended to be a right angle.
"Need to delete incomplete subpath at (path segment)"
This contour doesn't have enough points to make a closed contour.
Need to delete orphan subpath with moveto at (path segment)"
This contour is outside the font BBox, and is therefore was probably
drawn accidentally.
"May need to insert extremum in element (path segment)"
Looks like the path element contains a horizontal or vertical
extreme; if you want this hinted, you need a control point there.
"Need to remove double-sharp element"
"Need to clip sharp angle between two path segments)"
"Extremely sharp angle between (two path segments)"
Very sharp angles can cause PostScript interpreters to draw long
spikes. Best to blunt these with a short line-segment across the end
of the angle.
"Need to convert straight curve"
This path element is a curve type, but is straight - might as well
make it a line.
"Need to delete tiny subpath at (path segment)"
This contour is so small, it must be have been drawn accidentally.
"Need to fix coincident control points"
Two control points lie on top of each other. This can act like an
intersection.
"Need to fix control point outside of endpoints"
"Need to fix point(s) outside triangle by %d units in X and %d units in Y"
The Bezier control points for a curve segment must lie on the same
side of the curve segment, and within lines orthogonal to the curve
segment at the end points. When this is not the case, there are
vertical and horizontal extremes with no points. This is
often the result of an accident while drawing.
"Need to fix wrong orientation on subpath with original moveto at
Direction of subpath is wrong in this contour.
"Need to inspect coincident paths: with original moveto at ..."
Two line or curve segments either coincide, or are so close they
will look like they coincide. checkOutline will generate false
positives if both paths have the same bounding box; for example, if
an 'x' is drawn with two symmetrical bars crossing each other, they
will have the same bounding box, and checkOutlines will report them
as being coincident paths.
"Need to inspect for possible loop/inflection (path segment)"
This looks like a single path element that makes a loop. If so, it is very
bad for rasterizers.
"Need to inspect unsmoothed transition between (two path segments)"
There is an angle between two path elements which is so shallow that
it was probably meant to be smooth.
"Need to join colinear lines"
Two straight lines are lined up, or very nearly so. These
should be replaced by a single line segment
"Need to remove one-unit closepath"
A one-unit-long line is silly. Make the previous segment extend to
the end point. No important consequences.
"Need to remove zero-length element"
Two neighboring points lie on top of each other. This
confuses rasterizers. Get rid of one.
"Warning: (number) intersections found. Please inspect."
Two path elements cross. This must be fixed.
This is usually caused by two subpaths that intersect, but it can
also be caused by a single path element that forms a loop. To find the
latter, run checkOutlines with the -V option, and look for errors that
did not appear when intersections were being removed.
"Outline's bounding-box (X:%d %d Y:%d %d) looks too large."
Bounding box of this contour exceeds the font-bounding box, so there
is probably some error.
"Warning: path #%d with original moveto at %d %d has no extreme-points
on its curveto's. Does it need to be curvefit? Please inspect",
This contour does not have points at all of its vertical and
horizontal extremes.
"Warning: Subpath with only 2 graphic elements at (path segment)"
This contour has only two nodes; it is either two superimposed line
segments, or at least one curve without any point at extremes.
"""
# Methods:
# Parse args. If glyphlist is from file, read in entire file as single string,
# and remove all whitespace, then parse out glyph-names and GID's.
import sys
import os
import re
import time
from fontTools.ttLib import TTFont, getTableModule
import traceback
import tempfile
import FDKUtils
from BezTools import *
import ufoTools
import shutil
haveFocus = 1
debug = 0
kDefaultLogFileName = "checkOutlines.log"
gLogFile = None
kSrcGLIFHashMap = "com.adobe.type.checkOutlinesHashMap"
kTempFilepath = tempfile.mktemp()
kTempFilepathOut = kTempFilepath + ".new"
kTempCFFSuffix = ".temp.ac.cff"
class focusOptions:
def __init__(self):
self.filePath = None
self.outFilePath = None
self.logFilePath = None
self.glyphList = []
self.allowChanges = 0
self.beVerbose = 0
self.doOverlapCheck = 1
self.skipInspectionTests = 0
self.doSmoothnessTest = 0
self.doSpikeTest = 0
self.doTriangleTest = 0
self.doNearlyVH = 0
self.doPathDirectionTest = 1
self.doCoincidentPathTest = 1
self.curveTolerance = ""
self.lineTolerance = ""
self.arcTolerance = ""
self.pathTolerance = ""
self.emSquare = ""
self.skipIfUnchanged = False
self.checkAll = False # overrides skipIfUnchanged: forces all glyphs to be processed even if src hasn't changed.
class FDKEnvironmentError(AttributeError):
pass
class focusOptionParseError(KeyError):
pass
class focusFontError(KeyError):
pass
kProgressChar = "."
def logMsg(*args):
for arg in args:
msg = str(arg).strip()
if not msg:
print
sys.stdout.flush()
if gLogFile:
gLogFile.write(os.linesep)
gLogFile.flush()
return
msg = re.sub(r"[\r\n]", " ", msg)
if msg[-1] == ",":
msg = msg[:-1]
if msg == kProgressChar:
sys.stdout.write(msg) # avoid space, which is added by 'print'
print msg,
sys.stdout.flush()
if gLogFile:
gLogFile.write(msg)
gLogFile.flush()
else:
print msg
sys.stdout.flush()
if gLogFile:
gLogFile.write(msg + os.linesep)
gLogFile.flush()
def CheckEnvironment():
txPath = 'tx'
txError = 0
command = "%s -u 2>&1" % (txPath)
report = FDKUtils.runShellCmd(command)
if "options" not in report:
txError = 1
if txError:
logMsg("Please re-install the FDK. The path to the program 'tx' is not in the environment variable PATH.")
raise FDKEnvironmentError
command = "checkoutlinesexe -u 2>&1"
report = FDKUtils.runShellCmd(command)
if "version" not in report:
logMsg("Please re-install the FDK. The path to the program 'checkoutlinesexe' is not in the environment variable PATH.")
raise FDKEnvironmentError
return
global nameAliasDict
nameAliasDict = {}
def aliasName(glyphName):
if nameAliasDict:
alias = nameAliasDict.get(glyphName, glyphName)
return alias
return glyphName
def expandNames(glyphName):
global nameAliasDict
glyphRange = glyphName.split("-")
if len(glyphRange) > 1:
g1 = expandNames(glyphRange[0])
g2 = expandNames(glyphRange[1])
glyphName = "%s-%s" % (g1, g2)
elif glyphName[0] == "/":
glyphName = "cid" + glyphName[1:].zfill(5)
if glyphName == "cid00000":
glyphName = ".notdef"
nameAliasDict[glyphName] = "cid00000"
elif glyphName.startswith("cid") and (len(glyphName) < 8):
glyphName = "cid" + glyphName[3:].zfill(5)
if glyphName == "cid00000":
glyphName = ".notdef"
nameAliasDict[glyphName] = "cid00000"
return glyphName
def parseGlyphListArg(glyphString):
glyphString = re.sub(r"[ \t\r\n,]+", ",", glyphString)
glyphList = glyphString.split(",")
glyphList = map(expandNames, glyphList)
glyphList = filter(None, glyphList)
return glyphList
def getOptions():
global debug
options = focusOptions()
i = 1
numOptions = len(sys.argv)
while i < numOptions:
arg = sys.argv[i]
if options.filePath and arg[0] == "-":
raise focusOptionParseError("Option Error: All file names must follow all other options <%s>." % arg)
if arg == "-u":
print __usage__
sys.exit()
elif arg == "-h":
print __help__
sys.exit()
elif arg == "-he":
print error_doc
sys.exit()
elif arg == "-d":
debug = 1
elif arg == "-g":
i = i +1
glyphString = sys.argv[i]
if glyphString[0] == "-":
raise focusOptionParseError("Option Error: it looks like the first item in the glyph list following '-g' is another option.")
options.glyphList += parseGlyphListArg(glyphString)
elif arg == "-gf":
i = i +1
filePath = sys.argv[i]
if filePath[0] == "-":
raise focusOptionParseError("Option Error: it looks like the the glyph list file following '-gf' is another option.")
try:
gf = file(filePath, "rt")
glyphString = gf.read()
gf.close()
except (IOError,OSError):
raise focusOptionParseError("Option Error: could not open glyph list file <%s>." % filePath)
options.glyphList += parseGlyphListArg(glyphString)
elif arg == "-e":
options.allowChanges = 1
skipIfUnchanged = True
elif arg == "-v":
options.beVerbose = 1
elif arg == "-V":
options.doOverlapCheck = 0
elif arg == "-I":
options.skipInspectionTests = 1
elif arg == "-i":
options.skipInspectionTests = 0
options.doSmoothnessTest = 1
options.doSpikeTest = 1
options.doTriangleTest = 1
options.doNearlyVH = 1
elif arg == "-s":
options.doSmoothnessTest = 1
elif arg == "-x":
options.doSpikeTest = 1
elif arg == "-3":
options.doTriangleTest = 1
elif arg == "-r":
options.doNearlyVH = 1
elif arg == "-O":
options.doPathDirectionTest = 0
elif arg == "-k":
options.doCoincidentPathTest = 0
elif arg == "-C":
i = i + 1
options.curveTolerance = sys.argv[i]
elif arg == "-L":
i = i + 1
options.lineTolerance = sys.argv[i]
elif arg == "-S":
i = i + 1
options.arcTolerance = sys.argv[i]
elif arg == "-K":
i = i + 1
options.pathTolerance = sys.argv[i]
elif arg == "-b":
i = i + 1
options.emSquare = sys.argv[i]
elif arg == "-o":
i = i + 1
options.outFilePath = sys.argv[i]
elif arg == "-log":
i = i + 1
options.logFilePath = sys.argv[i]
elif arg == "-all":
options.checkAll = True
elif arg[0] == "-":
raise focusOptionParseError("Option Error: Unknown option <%s>." % arg)
else:
if options.filePath:
raise focusOptionParseError("Option Error: You cannot specify more than | |
#
# Autogenerated by Thrift Compiler (0.9.2)
#
# DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING
#
# options string: py
#
from thrift.Thrift import TType, TMessageType, TException, TApplicationException
from thrift.transport import TTransport
from thrift.protocol import TBinaryProtocol, TProtocol
try:
from thrift.protocol import fastbinary
except:
fastbinary = None
class Result:
"""
Attributes:
- code
- msg
- key
"""
thrift_spec = (
None, # 0
(1, TType.I32, 'code', None, None, ), # 1
(2, TType.STRING, 'msg', None, None, ), # 2
(3, TType.STRING, 'key', None, None, ), # 3
)
def __init__(self, code=None, msg=None, key=None,):
self.code = code
self.msg = msg
self.key = key
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.I32:
self.code = iprot.readI32();
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.STRING:
self.msg = iprot.readString();
else:
iprot.skip(ftype)
elif fid == 3:
if ftype == TType.STRING:
self.key = iprot.readString();
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('Result')
if self.code is not None:
oprot.writeFieldBegin('code', TType.I32, 1)
oprot.writeI32(self.code)
oprot.writeFieldEnd()
if self.msg is not None:
oprot.writeFieldBegin('msg', TType.STRING, 2)
oprot.writeString(self.msg)
oprot.writeFieldEnd()
if self.key is not None:
oprot.writeFieldBegin('key', TType.STRING, 3)
oprot.writeString(self.key)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __hash__(self):
value = 17
value = (value * 31) ^ hash(self.code)
value = (value * 31) ^ hash(self.msg)
value = (value * 31) ^ hash(self.key)
return value
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class RestMessage:
"""
Attributes:
- type
- mode
- url
- params
- headers
- timestamp
- expire
- timeout
- retryCnt
"""
thrift_spec = (
None, # 0
(1, TType.BYTE, 'type', None, 1, ), # 1
(2, TType.BYTE, 'mode', None, 1, ), # 2
(3, TType.STRING, 'url', None, None, ), # 3
(4, TType.MAP, 'params', (TType.STRING,None,TType.STRING,None), None, ), # 4
(5, TType.MAP, 'headers', (TType.STRING,None,TType.STRING,None), None, ), # 5
(6, TType.I64, 'timestamp', None, None, ), # 6
(7, TType.I64, 'expire', None, None, ), # 7
(8, TType.I64, 'timeout', None, None, ), # 8
(9, TType.I32, 'retryCnt', None, 3, ), # 9
)
def __init__(self, type=thrift_spec[1][4], mode=thrift_spec[2][4], url=None, params=None, headers=None, timestamp=None, expire=None, timeout=None, retryCnt=thrift_spec[9][4],):
self.type = type
self.mode = mode
self.url = url
self.params = params
self.headers = headers
self.timestamp = timestamp
self.expire = expire
self.timeout = timeout
self.retryCnt = retryCnt
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.BYTE:
self.type = iprot.readByte();
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.BYTE:
self.mode = iprot.readByte();
else:
iprot.skip(ftype)
elif fid == 3:
if ftype == TType.STRING:
self.url = iprot.readString();
else:
iprot.skip(ftype)
elif fid == 4:
if ftype == TType.MAP:
self.params = {}
(_ktype1, _vtype2, _size0 ) = iprot.readMapBegin()
for _i4 in xrange(_size0):
_key5 = iprot.readString();
_val6 = iprot.readString();
self.params[_key5] = _val6
iprot.readMapEnd()
else:
iprot.skip(ftype)
elif fid == 5:
if ftype == TType.MAP:
self.headers = {}
(_ktype8, _vtype9, _size7 ) = iprot.readMapBegin()
for _i11 in xrange(_size7):
_key12 = iprot.readString();
_val13 = iprot.readString();
self.headers[_key12] = _val13
iprot.readMapEnd()
else:
iprot.skip(ftype)
elif fid == 6:
if ftype == TType.I64:
self.timestamp = iprot.readI64();
else:
iprot.skip(ftype)
elif fid == 7:
if ftype == TType.I64:
self.expire = iprot.readI64();
else:
iprot.skip(ftype)
elif fid == 8:
if ftype == TType.I64:
self.timeout = iprot.readI64();
else:
iprot.skip(ftype)
elif fid == 9:
if ftype == TType.I32:
self.retryCnt = iprot.readI32();
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('RestMessage')
if self.type is not None:
oprot.writeFieldBegin('type', TType.BYTE, 1)
oprot.writeByte(self.type)
oprot.writeFieldEnd()
if self.mode is not None:
oprot.writeFieldBegin('mode', TType.BYTE, 2)
oprot.writeByte(self.mode)
oprot.writeFieldEnd()
if self.url is not None:
oprot.writeFieldBegin('url', TType.STRING, 3)
oprot.writeString(self.url)
oprot.writeFieldEnd()
if self.params is not None:
oprot.writeFieldBegin('params', TType.MAP, 4)
oprot.writeMapBegin(TType.STRING, TType.STRING, len(self.params))
for kiter14,viter15 in self.params.items():
oprot.writeString(kiter14)
oprot.writeString(viter15)
oprot.writeMapEnd()
oprot.writeFieldEnd()
if self.headers is not None:
oprot.writeFieldBegin('headers', TType.MAP, 5)
oprot.writeMapBegin(TType.STRING, TType.STRING, len(self.headers))
for kiter16,viter17 in self.headers.items():
oprot.writeString(kiter16)
oprot.writeString(viter17)
oprot.writeMapEnd()
oprot.writeFieldEnd()
if self.timestamp is not None:
oprot.writeFieldBegin('timestamp', TType.I64, 6)
oprot.writeI64(self.timestamp)
oprot.writeFieldEnd()
if self.expire is not None:
oprot.writeFieldBegin('expire', TType.I64, 7)
oprot.writeI64(self.expire)
oprot.writeFieldEnd()
if self.timeout is not None:
oprot.writeFieldBegin('timeout', TType.I64, 8)
oprot.writeI64(self.timeout)
oprot.writeFieldEnd()
if self.retryCnt is not None:
oprot.writeFieldBegin('retryCnt', TType.I32, 9)
oprot.writeI32(self.retryCnt)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __hash__(self):
value = 17
value = (value * 31) ^ hash(self.type)
value = (value * 31) ^ hash(self.mode)
value = (value * 31) ^ hash(self.url)
value = (value * 31) ^ hash(self.params)
value = (value * 31) ^ hash(self.headers)
value = (value * 31) ^ hash(self.timestamp)
value = (value * 31) ^ hash(self.expire)
value = (value * 31) ^ hash(self.timeout)
value = (value * 31) ^ hash(self.retryCnt)
return value
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class Message:
"""
Attributes:
- topic
- key
- value
- hashId
- tags
- partitionId
- body
- version
"""
thrift_spec = (
None, # 0
(1, TType.STRING, 'topic', None, None, ), # 1
(2, TType.STRING, 'key', None, None, ), # 2
(3, TType.STRING, 'value', None, None, ), # 3
(4, TType.I64, 'hashId', None, None, ), # 4
(5, TType.STRING, 'tags', None, None, ), # 5
(6, TType.I32, 'partitionId', None, -1, ), # 6
(7, TType.STRING, 'body', None, None, ), # 7
(8, TType.STRING, 'version', None, None, ), # 8
)
def __init__(self, topic=None, key=None, value=None, hashId=None, tags=None, partitionId=thrift_spec[6][4], body=None, version=None,):
self.topic = topic
self.key = key
self.value = value
self.hashId = hashId
self.tags = tags
self.partitionId = partitionId
self.body = body
self.version = version
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRING:
self.topic = iprot.readString();
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.STRING:
self.key = iprot.readString();
else:
iprot.skip(ftype)
elif fid == 3:
if ftype == TType.STRING:
self.value = iprot.readString();
else:
iprot.skip(ftype)
elif fid == 4:
if ftype == TType.I64:
self.hashId = iprot.readI64();
else:
iprot.skip(ftype)
elif fid == 5:
if ftype == TType.STRING:
self.tags = iprot.readString();
else:
iprot.skip(ftype)
elif fid == 6:
if ftype == TType.I32:
self.partitionId = iprot.readI32();
else:
iprot.skip(ftype)
elif fid == 7:
if ftype == TType.STRING:
self.body = iprot.readString();
else:
iprot.skip(ftype)
elif fid == 8:
if ftype == TType.STRING:
self.version = iprot.readString();
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('Message')
if self.topic is not None:
oprot.writeFieldBegin('topic', TType.STRING, 1)
oprot.writeString(self.topic)
oprot.writeFieldEnd()
if self.key is not None:
oprot.writeFieldBegin('key', TType.STRING, 2)
oprot.writeString(self.key)
oprot.writeFieldEnd()
if self.value is not None:
oprot.writeFieldBegin('value', TType.STRING, 3)
oprot.writeString(self.value)
oprot.writeFieldEnd()
if self.hashId is not None:
oprot.writeFieldBegin('hashId', TType.I64, 4)
oprot.writeI64(self.hashId)
oprot.writeFieldEnd()
if self.tags is not None:
oprot.writeFieldBegin('tags', TType.STRING, 5)
oprot.writeString(self.tags)
oprot.writeFieldEnd()
if self.partitionId is not None:
oprot.writeFieldBegin('partitionId', TType.I32, 6)
oprot.writeI32(self.partitionId)
oprot.writeFieldEnd()
if self.body is not None:
oprot.writeFieldBegin('body', TType.STRING, 7)
oprot.writeString(self.body)
oprot.writeFieldEnd()
if self.version is not None:
oprot.writeFieldBegin('version', TType.STRING, 8)
oprot.writeString(self.version)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __hash__(self):
value = 17
value = (value * 31) ^ hash(self.topic)
value = (value * 31) ^ hash(self.key)
value = | |
# standard lib imports
import zipfile
import os
import shutil
import re
from xml.etree import cElementTree as ET
# local lib imports
from .database import index2address
def xml_namespace(file):
"""
Takes an xml file and returns the root namespace as a dict
:param str file: xml file path
:return dict: dictionary of root namespace
"""
events = "start", "start-ns", "end-ns"
ns_map = []
for event, elem in ET.iterparse(file, events):
if event == "start-ns":
elem = ('default', elem[1]) if elem[0] == '' else elem
ns_map.append(elem)
# elif event == "end-ns":
# ns_map.pop()
# return dict(ns_map)
# elif event == "start":
# return dict(ns_map)
return dict(ns_map)
def writexl(db, path):
"""
Writes an excel file from pylightxl.Database
:param pylightxl.Database db: database contains sheetnames, and their data
:param str path: file output path
:return: None
"""
if not os.path.isfile(path):
# write to new excel
new_writer(db, path)
else:
# write to existing excel
# TODO: handle for when the file is opened by user
alt_writer(db, path)
def alt_writer(db, path):
"""
Writes to an existing excel file. Only injects cell overwrites or new/removed sheets
:param pylightxl.Database db: database contains sheetnames, and their data
:param str path: file output path
:return: None
"""
# app.xml: number of sheets and sheet names
# xl/_rels/.rels: rId# order doesnt matter just needs to match on workbook.xml and sheet location
# workbook.xml: rId# match .rels, order_id, sheet name
# sharedStrings.xml: count/uniqueCount, strings (this has to be parsed before sheet#.xml are worked to populate string IDs
# if one doesnt exist, create one
# sheet#.xml: cell values
# [Content_Types].xml: add/remove sheet#.xml locations and sharedStrings.xml
# have to extract all first to modify
with zipfile.ZipFile(path, 'r') as f:
f.extractall('pylightxl_temp')
text = alt_app_text(db, 'pylightxl_temp/docProps/app.xml')
with open('pylightxl_temp/docProps/app.xml', 'w') as f:
f.write(text)
text = new_workbook_text(db)
with open('pylightxl_temp/xl/workbook.xml', 'w') as f:
f.write(text)
# rename sheet#.xml to temp to prevent overwriting
for file in os.listdir('pylightxl_temp/xl/worksheets'):
if '.xml' in file:
old_name = 'pylightxl_temp/xl/worksheets/' + file
new_name = 'pylightxl_temp/xl/worksheets/' + 'temp_' + file
os.rename(old_name, new_name)
# get filename to xml rId associations
dir_path = '/'.join(path.split('/')[:-1])
sheetref = alt_getsheetref(dir_path)
existing_sheetnames = [d['name'] for d in sheetref.values()]
for shID, sheet_name in enumerate(db.ws_names, 1):
if sheet_name in existing_sheetnames:
# get the original sheet
for subdict in sheetref.values():
if subdict['name'] == sheet_name:
filename = 'temp_' + subdict['filename']
# rewrite the sheet as if it was new
text = new_worksheet_text(db, sheet_name)
# feed altered text to new sheet based on db indexing order
with open('pylightxl_temp/xl/worksheets/sheet{}.xml'.format(shID), 'w') as f:
f.write(text)
# remove temp xml sheet file
os.remove('pylightxl_temp/xl/worksheets/{}'.format(filename))
else:
# this sheet is new, create a new sheet
text = new_worksheet_text(db, sheet_name)
with open('pylightxl_temp/xl/worksheets/sheet{shID}.xml'.format(shID=shID), 'w') as f:
f.write(text)
# this has to come after sheets for db._sharedStrings to be populated
text = new_workbookrels_text(db)
with open('pylightxl_temp/xl/_rels/workbook.xml.rels', 'w') as f:
f.write(text)
if os.path.isfile('pylightxl_temp/xl/sharedStrings.xml'):
# sharedStrings is always recreated from db._sharedStrings since all sheets are rewritten
os.remove('pylightxl_temp/xl/sharedStrings.xml')
text = new_sharedStrings_text(db)
with open('pylightxl_temp/xl/sharedStrings.xml', 'w') as f:
f.write(text)
text = new_content_types_text(db)
with open('pylightxl_temp/[Content_Types].xml', 'w') as f:
f.write(text)
# cleanup files that would cause a "repair" workbook
try:
shutil.rmtree('./pylightxl_temp/xl/ctrlProps')
except FileNotFoundError:
pass
try:
shutil.rmtree('./pylightxl_temp/xl/drawings')
except FileNotFoundError:
pass
try:
shutil.rmtree('./pylightxl_temp/xl/printerSettings')
except FileNotFoundError:
pass
# remove existing file
os.remove(path)
filename = path.split('/')[-1]
# log old wd before changing it to temp folder for zipping
old_dir = os.getcwd()
# wd must be change to be within the temp folder to get zipfile to prevent the top level temp folder
# from being zipped as well
os.chdir('pylightxl_temp')
with zipfile.ZipFile(filename, 'w') as f:
for root, dirs, files in os.walk('.'):
for file in files:
# top level "with" statement already creates a excel file that is seen by os.walk
# this check skips that empty zip file from being zipped as well
if file != filename:
f.write(os.path.join(root, file))
# move the zipped up file out of the temp folder
shutil.move(filename, old_dir)
os.chdir(old_dir)
# remove temp folder
shutil.rmtree('./pylightxl_temp')
def alt_app_text(db, filepath):
"""
Takes a docProps/app.xml and returns a db altered text version of the xml
:param pylightxl.Database db: pylightxl database that contains data to update xml file
:param str filepath: file path for docProps/app.xml
:return str: returns the updated xml text
"""
# extract text from existing app.xml
ns = xml_namespace(filepath)
for prefix, uri in ns.items():
ET.register_namespace(prefix,uri)
tree = ET.parse(filepath)
root = tree.getroot()
# sheet sizes
tag_i4 = root.findall('./default:HeadingPairs//vt:i4', ns)[0]
tag_i4.text = str(len(db.ws_names))
tag_titles_vector = root.findall('./default:TitlesOfParts/vt:vector', ns)[0]
tag_titles_vector.set('size', str(len(db.ws_names)))
# sheet names, remove them then add new ones
for sheet in root.findall('./default:TitlesOfParts//vt:lpstr', ns):
root.find('./default:TitlesOfParts/vt:vector', ns).remove(sheet)
for sheet_name in db.ws_names:
element = ET.Element('vt:lpstr')
element.text = sheet_name
root.find('./default:TitlesOfParts/vt:vector', ns).append(element)
# reset default namespace
ET.register_namespace('', ns['default'])
# roll up entire xml file as text
text = '<?xml version="1.0" encoding="UTF-8" standalone="yes"?>\n' + ET.tostring(root, encoding='unicode')
return text
def alt_workbookrels_text(db, filepath):
"""
Takes a xl/_rels/workbook.xml.rels and returns a db altered text version of the xml
:param pylightxl.Database db: pylightxl database that contains data to update xml file
:param str filepath: file path for xl/_rels/workbook.xml.rels
:return str: returns the updated xml text
"""
# extract text from existing app.xml
ns = xml_namespace(filepath)
for prefix, uri in ns.items():
ET.register_namespace(prefix,uri)
tree = ET.parse(filepath)
root = tree.getroot()
# hold existing non-sheet relations (calcChain, sharedStrings, etc.)
elements_nonsheet = []
# sheet type that is replaced by actual xml read sheet type
element_sheet_type = 'http://schemas.openxmlformats.org/officeDocument/2006/relationships/worksheet'
# book keeping to check if a sharedStrings was read in the elements_nonsheet
# (if no and db has sharedStrings then we need to add a sharedStrings in)
bool_sharedStrings = False
for element in root.findall('./default:Relationship', ns):
if 'worksheets/sheet' not in element.get('Target'):
if 'sharedStrings.xml' == element.get('Target'):
# there already is a sharedStrings.xml tag in this rels file, dont add another
bool_sharedStrings = True
# log existing non-sheet elements to append at the end of rId#s after sheets
elements_nonsheet.append(element)
root.remove(element)
else:
# sheet names, remove them then add new ones
element_sheet_type = element.get('Type')
root.remove(element)
# these rId's have to match rId's on workbook.xml
for sheet_num, sheet_name in enumerate(db.ws_names, 1):
element = ET.Element("Relationship")
element.set('Target', 'worksheets/sheet{sheet_num}.xml'.format(sheet_num=sheet_num))
element.set('Type', element_sheet_type)
element.set('Id', 'rId{sheet_num}'.format(sheet_num=sheet_num))
root.append(element)
# these rId's are not referenced on any of the xml files, they are incremented after sheets
for i, element in enumerate(elements_nonsheet, 1):
rId = len(db.ws_names) + i
element.set('Id', 'rId{rId}'.format(rId=rId))
root.append(element)
if bool_sharedStrings is False and db._sharderStrings:
element = ET.Element('Relationship')
element.set('Target', 'sharedStrings.xml')
element.set('Type', 'http://schemas.openxmlformats.org/officeDocument/2006/relationships/sharedStrings')
element.set('Id', 'rId{rId}'.format(rId = len(db.ws_names) + len(elements_nonsheet) + 1))
# reset default namespace
ET.register_namespace('', ns['default'])
# roll up entire xml file as text
text = '<?xml version="1.0" encoding="UTF-8" standalone="yes"?>\n' + ET.tostring(root, encoding='unicode')
return text
def alt_workbook_text(db, filepath):
"""
Takes a xl/workbook.xml and returns a db altered text version of the xml
:param pylightxl.Database db: pylightxl database that contains data to update xml file
:param str filepath: file path for xl/workbook.xml
:return str: returns the updated xml text
"""
# extract text from existing app.xml
ns = xml_namespace(filepath)
for prefix, uri in ns.items():
ET.register_namespace(prefix,uri)
tree = ET.parse(filepath)
root = tree.getroot()
# remove existing sheets
for element in root.findall('./default:sheets/default:sheet', ns):
root.find('./default:sheets', ns).remove(element)
# since all 'r' namespace tags are deleted, we have to properly use a linked namespace tag for r:id with qname
qname_r_id = ET.QName(ns['r'], 'id')
# write new sheets from db
for sheet_num, sheet_name in enumerate(db.ws_names, 1):
element = ET.Element('sheet')
element.set(qname_r_id, 'rId{sheet_num}'.format(sheet_num=sheet_num))
element.set('sheetId', '{sheet_num}'.format(sheet_num=sheet_num))
element.set('name', '{sheet_name}'.format(sheet_name=sheet_name))
root.findall('./default:sheets', ns)[0].append(element)
# reset default namespace
ET.register_namespace('', ns['default'])
# roll up entire xml file as text
text = '<?xml version="1.0" encoding="UTF-8" standalone="yes"?>\n' + ET.tostring(root, encoding='unicode')
return text
def alt_getsheetref(path):
"""
Takes a file path for the temp pylightxl uncompressed excel xml files and returns the un-altered
filenames and rIds
:param str path: file path to pylightxl_temp
:return dict: dictionary of filenames {rId: {name: '', filename: ''}}
"""
sheetref = {}
# -------------------------------------------------------------
# get worksheet filenames and Ids
ns = xml_namespace(path + 'pylightxl_temp/xl/_rels/workbook.xml.rels')
for prefix, uri in ns.items():
ET.register_namespace(prefix,uri)
tree = ET.parse(path + 'pylightxl_temp/xl/_rels/workbook.xml.rels')
root = tree.getroot()
for element in root.findall('./default:Relationship', ns):
if 'worksheets/sheet' in element.get('Target'):
Id = element.get('Id')
filename = element.get('Target').split('/')[1].replace('"', '')
sheetref.update({Id: {'name': '', 'filename': filename}})
# -------------------------------------------------------------
# get custom worksheet names
ns | |
noqa: E501
response_type = 'LabelResource'
if 'response_type' in kwargs:
response_type = kwargs['response_type']
return self.api_client.call_api(
'/datacenters/{datacenterId}/labels', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type=response_type, # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats,
_request_auth=local_var_params.get('_request_auth'))
def datacenters_labels_put(self, datacenter_id, key, label, **kwargs): # noqa: E501
"""Modify a Label of Data Center # noqa: E501
This will modify the value of the label on a data center. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.datacenters_labels_put(datacenter_id, key, label, async_req=True)
>>> result = thread.get()
:param datacenter_id: The unique ID of the Data Center (required)
:type datacenter_id: str
:param key: The key of the Label (required)
:type key: str
:param label: Modified Label (required)
:type label: LabelResource
:param pretty: Controls whether response is pretty-printed (with indentation and new lines)
:type pretty: bool
:param depth: Controls the details depth of response objects. Eg. GET /datacenters/[ID] - depth=0: only direct properties are included. Children (servers etc.) are not included - depth=1: direct properties and children references are included - depth=2: direct properties and children properties are included - depth=3: direct properties and children properties and children's children are included - depth=... and so on
:type depth: int
:param x_contract_number: Users having more than 1 contract need to provide contract number, against which all API requests should be executed
:type x_contract_number: int
:param async_req: Whether to execute the request asynchronously.
:type async_req: bool, optional
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:type _preload_content: bool, optional
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: Returns the result object.
If the method is called asynchronously,
returns the request thread.
:rtype: LabelResource
"""
kwargs['_return_http_data_only'] = True
return self.datacenters_labels_put_with_http_info(datacenter_id, key, label, **kwargs) # noqa: E501
def datacenters_labels_put_with_http_info(self, datacenter_id, key, label, **kwargs): # noqa: E501
"""Modify a Label of Data Center # noqa: E501
This will modify the value of the label on a data center. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.datacenters_labels_put_with_http_info(datacenter_id, key, label, async_req=True)
>>> result = thread.get()
:param datacenter_id: The unique ID of the Data Center (required)
:type datacenter_id: str
:param key: The key of the Label (required)
:type key: str
:param label: Modified Label (required)
:type label: LabelResource
:param pretty: Controls whether response is pretty-printed (with indentation and new lines)
:type pretty: bool
:param depth: Controls the details depth of response objects. Eg. GET /datacenters/[ID] - depth=0: only direct properties are included. Children (servers etc.) are not included - depth=1: direct properties and children references are included - depth=2: direct properties and children properties are included - depth=3: direct properties and children properties and children's children are included - depth=... and so on
:type depth: int
:param x_contract_number: Users having more than 1 contract need to provide contract number, against which all API requests should be executed
:type x_contract_number: int
:param async_req: Whether to execute the request asynchronously.
:type async_req: bool, optional
:param _return_http_data_only: response data without head status code
and headers
:type _return_http_data_only: bool, optional
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:type _preload_content: bool, optional
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:param _request_auth: set to override the auth_settings for an a single
request; this effectively ignores the authentication
in the spec for a single request.
:type _request_auth: dict, optional
:return: Returns the result object.
If the method is called asynchronously,
returns the request thread.
:rtype: tuple(LabelResource, status_code(int), headers(HTTPHeaderDict))
"""
local_var_params = locals()
all_params = [
'datacenter_id',
'key',
'label',
'pretty',
'depth',
'x_contract_number'
]
all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout',
'_request_auth',
'response_type'
]
)
for local_var_params_key, local_var_params_val in six.iteritems(local_var_params['kwargs']):
if local_var_params_key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method datacenters_labels_put" % local_var_params_key
)
local_var_params[local_var_params_key] = local_var_params_val
del local_var_params['kwargs']
# verify the required parameter 'datacenter_id' is set
if self.api_client.client_side_validation and ('datacenter_id' not in local_var_params or # noqa: E501
local_var_params['datacenter_id'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `datacenter_id` when calling `datacenters_labels_put`") # noqa: E501
# verify the required parameter 'key' is set
if self.api_client.client_side_validation and ('key' not in local_var_params or # noqa: E501
local_var_params['key'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `key` when calling `datacenters_labels_put`") # noqa: E501
# verify the required parameter 'label' is set
if self.api_client.client_side_validation and ('label' not in local_var_params or # noqa: E501
local_var_params['label'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `label` when calling `datacenters_labels_put`") # noqa: E501
if self.api_client.client_side_validation and 'depth' in local_var_params and local_var_params['depth'] > 10: # noqa: E501
raise ApiValueError("Invalid value for parameter `depth` when calling `datacenters_labels_put`, must be a value less than or equal to `10`") # noqa: E501
if self.api_client.client_side_validation and 'depth' in local_var_params and local_var_params['depth'] < 0: # noqa: E501
raise ApiValueError("Invalid value for parameter `depth` when calling `datacenters_labels_put`, must be a value greater than or equal to `0`") # noqa: E501
collection_formats = {}
path_params = {}
if 'datacenter_id' in local_var_params:
path_params['datacenterId'] = local_var_params['datacenter_id'] # noqa: E501
if 'key' in local_var_params:
path_params['key'] = local_var_params['key'] # noqa: E501
query_params = []
if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
if 'depth' in local_var_params and local_var_params['depth'] is not None: # noqa: E501
query_params.append(('depth', local_var_params['depth'])) # noqa: E501
header_params = {}
if 'x_contract_number' in local_var_params:
header_params['X-Contract-Number'] = local_var_params['x_contract_number'] # noqa: E501
form_params = []
local_var_files = {}
body_params = None
if 'label' in local_var_params:
body_params = local_var_params['label']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['Basic Authentication', 'Token Authentication'] # noqa: E501
response_type = 'LabelResource'
if 'response_type' in kwargs:
response_type = kwargs['response_type']
return self.api_client.call_api(
'/datacenters/{datacenterId}/labels/{key}', 'PUT',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type=response_type, # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats,
_request_auth=local_var_params.get('_request_auth'))
def datacenters_servers_labels_delete(self, datacenter_id, server_id, key, **kwargs): # noqa: E501
"""Delete a Label from Server # noqa: E501
This will remove a label from the server. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.datacenters_servers_labels_delete(datacenter_id, server_id, key, async_req=True)
>>> result = thread.get()
:param datacenter_id: The unique ID of the Datacenter (required)
:type datacenter_id: str
:param server_id: The unique ID of the Server (required)
:type server_id: str
:param key: The key of the Label (required)
:type key: str
:param pretty: Controls whether response is pretty-printed (with indentation and new lines)
:type pretty: bool
:param depth: Controls the details depth of response objects. Eg. GET /datacenters/[ID] - depth=0: only direct properties are included. Children (servers etc.) are not included - depth=1: direct properties and children references are included - depth=2: direct properties and children properties are included - depth=3: direct properties and children properties and children's children are included - depth=... and so on
:type depth: int
:param x_contract_number: Users having more than 1 contract need to provide contract number, against which all API requests should be executed
:type x_contract_number: int
:param async_req: Whether to execute the request asynchronously.
:type async_req: bool, optional
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:type _preload_content: bool, optional
:param _request_timeout: timeout setting for this request. If one
number provided, it will | |
# encoding: UTF-8
'''
vn.xspeed的gateway接入
'''
import os
import json
import time
from copy import copy
from vnxspeedmd import MdApi
from vnxspeedtd import TdApi
from xspeedDataType import *
from vtGateway import *
# 以下为一些VT类型和XSPEED类型的映射字典
# 价格类型映射
priceTypeMap = {}
priceTypeMap[PRICETYPE_LIMITPRICE] = defineDict["DFITC_LIMITORDER"]
priceTypeMap[PRICETYPE_MARKETPRICE] = defineDict["DFITC_MKORDER"]
priceTypeMapReverse = {v: k for k, v in priceTypeMap.items()}
# 方向类型映射
directionMap = {}
directionMap[DIRECTION_LONG] = defineDict['DFITC_SPD_BUY']
directionMap[DIRECTION_SHORT] = defineDict['DFITC_SPD_SELL']
directionMapReverse = {v: k for k, v in directionMap.items()}
# 开平类型映射
offsetMap = {}
offsetMap[OFFSET_OPEN] = defineDict['DFITC_SPD_OPEN']
offsetMap[OFFSET_CLOSE] = defineDict['DFITC_SPD_CLOSE']
offsetMap[OFFSET_CLOSETODAY] = defineDict['DFITC_SPD_CLOSETODAY']
offsetMap[OFFSET_CLOSEYESTERDAY] = defineDict['DFITC_SPD_CLOSE']
offsetMapReverse = {v:k for k,v in offsetMap.items()}
# 交易所类型映射
exchangeMap = {}
exchangeMap[EXCHANGE_CFFEX] = defineDict['DFITC_EXCHANGE_CFFEX']
exchangeMap[EXCHANGE_SHFE] = defineDict['DFITC_EXCHANGE_SHFE']
exchangeMap[EXCHANGE_CZCE] = defineDict['DFITC_EXCHANGE_CZCE']
exchangeMap[EXCHANGE_DCE] = defineDict['DFITC_EXCHANGE_DCE']
exchangeMap[EXCHANGE_UNKNOWN] = ''
exchangeMapReverse = {v:k for k,v in exchangeMap.items()}
# 委托状态类型映射
orderStatusMap = {}
orderStatusMap[STATUS_ALLTRADED] = defineDict["DFITC_SPD_FILLED"]
orderStatusMap[STATUS_PARTTRADED] = defineDict["DFITC_SPD_PARTIAL"]
orderStatusMap[STATUS_NOTTRADED] = defineDict["DFITC_SPD_IN_QUEUE"]
orderStatusMap[STATUS_CANCELLED] = defineDict["DFITC_SPD_CANCELED"]
orderStatusMapReverse = {v:k for k,v in orderStatusMap.items()}
orderStatusMapReverse[defineDict["DFITC_SPD_PARTIAL_CANCELED"]] = STATUS_CANCELLED
########################################################################
class XspeedGateway(VtGateway):
"""XSPEED接口"""
#----------------------------------------------------------------------
def __init__(self, eventEngine, gatewayName='XSPEED'):
"""Constructor"""
super(XspeedGateway, self).__init__(eventEngine, gatewayName)
self.mdApi = XspeedMdApi(self) # 行情API
self.tdApi = XspeedTdApi(self) # 交易API
self.mdConnected = False # 行情API连接状态,登录完成后为True
self.tdConnected = False # 交易API连接状态
self.qryEnabled = False # 是否要启动循环查询
#----------------------------------------------------------------------
def connect(self):
"""连接"""
# 载入json文件
fileName = self.gatewayName + '_connect.json'
fileName = os.getcwd() + '/xspeedGateway/' + fileName
try:
f = file(fileName)
except IOError:
log = VtLogData()
log.gatewayName = self.gatewayName
log.logContent = u'读取连接配置出错,请检查'
self.onLog(log)
return
# 解析json文件
setting = json.load(f)
try:
accountID = str(setting['accountID'])
password = str(setting['password'])
tdAddress = str(setting['tdAddress'])
mdAddress = str(setting['mdAddress'])
except KeyError:
log = VtLogData()
log.gatewayName = self.gatewayName
log.logContent = u'连接配置缺少字段,请检查'
self.onLog(log)
return
# 创建行情和交易接口对象
self.mdApi.connect(accountID, password, mdAddress)
self.tdApi.connect(accountID, password, tdAddress)
# 初始化并启动查询
self.initQuery()
#----------------------------------------------------------------------
def subscribe(self, subscribeReq):
"""订阅行情"""
self.mdApi.subscribe(subscribeReq)
#----------------------------------------------------------------------
def sendOrder(self, orderReq):
"""发单"""
return self.tdApi.sendOrder(orderReq)
#----------------------------------------------------------------------
def cancelOrder(self, cancelOrderReq):
"""撤单"""
self.tdApi.cancelOrder(cancelOrderReq)
#----------------------------------------------------------------------
def qryAccount(self):
"""查询账户资金"""
self.tdApi.qryAccount()
#----------------------------------------------------------------------
def qryPosition(self):
"""查询持仓"""
self.tdApi.qryPosition()
#----------------------------------------------------------------------
def close(self):
"""关闭"""
if self.mdConnected:
self.mdApi.close()
if self.tdConnected:
self.tdApi.close()
#----------------------------------------------------------------------
def initQuery(self):
"""初始化连续查询"""
if self.qryEnabled:
# 需要循环的查询函数列表
self.qryFunctionList = [self.qryAccount, self.qryPosition]
self.qryCount = 0 # 查询触发倒计时
self.qryTrigger = 2 # 查询触发点
self.qryNextFunction = 0 # 上次运行的查询函数索引
self.startQuery()
#----------------------------------------------------------------------
def query(self, event):
"""注册到事件处理引擎上的查询函数"""
self.qryCount += 1
if self.qryCount > self.qryTrigger:
# 清空倒计时
self.qryCount = 0
# 执行查询函数
function = self.qryFunctionList[self.qryNextFunction]
function()
# 计算下次查询函数的索引,如果超过了列表长度,则重新设为0
self.qryNextFunction += 1
if self.qryNextFunction == len(self.qryFunctionList):
self.qryNextFunction = 0
#----------------------------------------------------------------------
def startQuery(self):
"""启动连续查询"""
self.eventEngine.register(EVENT_TIMER, self.query)
#----------------------------------------------------------------------
def setQryEnabled(self, qryEnabled):
"""设置是否要启动循环查询"""
self.qryEnabled = qryEnabled
########################################################################
class XspeedMdApi(MdApi):
"""XSPEED行情API实现"""
#----------------------------------------------------------------------
def __init__(self, gateway):
"""Constructor"""
super(XspeedMdApi, self).__init__()
self.gateway = gateway # gateway对象
self.gatewayName = gateway.gatewayName # gateway对象名称
self.reqID = EMPTY_INT # 操作请求编号
self.connectionStatus = False # 连接状态
self.loginStatus = False # 登录状态
self.subscribedSymbols = set() # 已订阅合约代码
self.accountID = EMPTY_STRING # 账号
self.password = EMPTY_STRING # 密码
self.address = EMPTY_STRING # 服务器地址
#----------------------------------------------------------------------
def connect(self, accountID, password, address):
"""初始化连接"""
self.accountID = accountID # 账号
self.password = password # 密码
self.address = address # 服务器地址
# 如果尚未建立服务器连接,则进行连接
if not self.connectionStatus:
self.createDFITCMdApi()
# 初始化连接,成功会调用onFrontConnected
self.init(self.address)
# 若已经连接但尚未登录,则进行登录
else:
if not self.loginStatus:
self.login()
#----------------------------------------------------------------------
def subscribe(self, subscribeReq):
"""订阅合约"""
# 这里的设计是,如果尚未登录就调用了订阅方法
# 则先保存订阅请求,登录完成后会自动订阅
if self.loginStatus:
self.reqID += 1
self.subscribeMarketData(str(subscribeReq.symbol), self.reqID)
self.subscribedSymbols.add(subscribeReq)
#----------------------------------------------------------------------
def login(self):
"""登录"""
# 如果填入了用户名密码等,则登录
if self.accountID and self.password:
self.reqID += 1
req = {}
req['accountID'] = self.accountID
req['passwd'] = <PASSWORD>
req['lRequestID'] = self.reqID
self.reqUserLogin(req)
#----------------------------------------------------------------------
def close(self):
"""关闭"""
self.exit()
#----------------------------------------------------------------------
def onFrontConnected(self):
"""服务器连接"""
self.connectionStatus = True
log = VtLogData()
log.gatewayName = self.gatewayName
log.logContent = u'行情服务器连接成功'
self.gateway.onLog(log)
self.login()
#----------------------------------------------------------------------
def onFrontDisconnected(self, i):
"""服务器断开"""
self.connectionStatus = False
self.loginStatus = False
self.gateway.mdConnected = False
log = VtLogData()
log.gatewayName = self.gatewayName
log.logContent = u'行情服务器连接断开'
self.gateway.onLog(log)
#----------------------------------------------------------------------
def onRspUserLogin(self, data, error) :
"""登陆回报"""
# 如果登录成功,推送日志信息
if error['nErrorID'] == 0:
self.loginStatus = True
self.gateway.mdConnected = True
log = VtLogData()
log.gatewayName = self.gatewayName
log.logContent = u'行情服务器登录完成'
self.gateway.onLog(log)
# 重新订阅之前订阅的合约
for subscribeReq in self.subscribedSymbols:
self.subscribe(subscribeReq)
# 否则,推送错误信息
else:
err = VtErrorData()
err.gatewayName = self.gatewayName
err.errorID = error['nErrorID']
err.errorMsg = error['errorMsg'].decode('gbk')
self.gateway.onError(err)
#----------------------------------------------------------------------
def onRspUserLogout(self, data, error) :
"""登出回报"""
# 如果登出成功,推送日志信息
if error['nErrorID'] == 0:
self.loginStatus = False
self.gateway.tdConnected = False
log = VtLogData()
log.gatewayName = self.gatewayName
log.logContent = u'行情服务器登出完成'
self.gateway.onLog(log)
# 否则,推送错误信息
else:
err = VtErrorData()
err.gatewayName = self.gatewayName
err.errorID = error['nErrorID']
err.errorMsg = error['errorMsg'].decode('gbk')
self.gateway.onError(err)
#----------------------------------------------------------------------
def onRspError(self, error) :
"""错误回报"""
err = VtErrorData()
err.gatewayName = self.gatewayName
err.errorID = error['nErrorID']
err.errorMsg = error['errorMsg'].decode('gbk')
self.gateway.onError(err)
#----------------------------------------------------------------------
def onRspSubMarketData(self, data, error) :
""""""
pass
#----------------------------------------------------------------------
def onRspUnSubMarketData(self, data, error) :
""""""
pass
#----------------------------------------------------------------------
def onRspSubForQuoteRsp(self, data, error) :
""""""
pass
#----------------------------------------------------------------------
def onRspUnSubForQuoteRsp(self, data, error) :
""""""
pass
#----------------------------------------------------------------------
def onMarketData(self, data) :
"""行情推送"""
tick = VtTickData()
tick.gatewayName = self.gatewayName
tick.symbol = data['instrumentID']
tick.exchange = exchangeMapReverse.get(data['exchangeID'], u'未知')
tick.vtSymbol = tick.symbol #'.'.join([tick.symbol, EXCHANGE_UNKNOWN])
tick.lastPrice = data['lastPrice']
tick.volume = data['Volume']
tick.openInterest = data['openInterest']
tick.time = '.'.join([data['UpdateTime'], str(data['UpdateMillisec']/100)])
tick.date = data['tradingDay']
tick.openPrice = data['openPrice']
tick.highPrice = data['highestPrice']
tick.lowPrice = data['lowestPrice']
tick.preClosePrice = data['preClosePrice']
tick.upperLimit = data['upperLimitPrice']
tick.lowerLimit = data['lowerLimitPrice']
tick.bidPrice1 = data['BidPrice1']
tick.bidVolume1 = data['BidVolume1']
tick.askPrice1 = data['AskPrice1']
tick.askVolume1 = data['AskVolume1']
self.gateway.onTick(tick)
#----------------------------------------------------------------------
def onCustomMarketData(self, data) :
""""""
pass
#----------------------------------------------------------------------
def onRtnForQuoteRsp(self, data) :
""""""
pass
#----------------------------------------------------------------------
def onRspTradingDay(self, data) :
""""""
pass
########################################################################
class XspeedTdApi(TdApi):
"""XSPEED交易API实现"""
#----------------------------------------------------------------------
def __init__(self, gateway):
"""API对象的初始化函数"""
super(XspeedTdApi, self).__init__()
self.gateway = gateway # gateway对象
self.gatewayName = gateway.gatewayName # gateway对象名称
self.reqID = EMPTY_INT # 操作请求编号
self.localID = EMPTY_INT # 订单编号
self.connectionStatus = False # 连接状态
self.loginStatus = False # 登录状态
self.accountID = EMPTY_STRING # 账号
self.password = EMPTY_STRING # 密码
self.address = EMPTY_STRING # 服务器地址
self.sessionID = EMPTY_INT # 会话编号
self.posDict = {} # 缓存持仓数据的字典
self.orderDict = {} # 缓存委托数据的字典
self.spdOrderDict = {} # 飞创柜台委托号字典
#----------------------------------------------------------------------
def connect(self, accountID, password, address):
"""初始化连接"""
self.accountID = accountID # 账号
self.password = password # 密码
self.address = address # 服务器地址
# 如果尚未建立服务器连接,则进行连接
if not self.connectionStatus:
self.createDFITCTraderApi()
# 初始化连接,成功会调用onFrontConnected
self.init(self.address)
# 若已经连接但尚未登录,则进行登录
else:
if not self.loginStatus:
self.login()
#----------------------------------------------------------------------
def login(self):
"""连接服务器"""
# 如果填入了用户名密码等,则登录
if self.accountID and self.password:
self.reqID += 1
req = {}
req['accountID'] = self.accountID
req['passwd'] = <PASSWORD>
req['lRequestID'] = self.reqID
self.reqUserLogin(req)
#----------------------------------------------------------------------
def qryAccount(self):
"""查询账户"""
self.reqID += 1
req = {}
req['lRequestID'] = self.reqID
req['accountID'] = self.accountID
self.reqQryCustomerCapital(req)
#----------------------------------------------------------------------
def qryPosition(self):
"""查询持仓"""
self.reqID += 1
req = {}
req['lRequestID'] = self.reqID
req['accountID'] = self.accountID
self.reqQryPosition(req)
#----------------------------------------------------------------------
def sendOrder(self, orderReq):
"""发单"""
self.reqID += 1
self.localID += 1
req = {}
req['instrumentID'] = orderReq.symbol
req['insertPrice'] = orderReq.price
req['orderAmount'] = orderReq.volume
# 下面如果由于传入的类型本接口不支持,则会返回空字符串
try:
req['orderType'] = priceTypeMap[orderReq.priceType]
req['buySellType'] = directionMap[orderReq.direction]
req['openCloseType'] = offsetMap[orderReq.offset]
except KeyError:
return ''
req['localOrderID'] = self.localID
req['accountID'] = self.accountID
req['speculator'] = defineDict['DFITC_SPD_SPECULATOR'] # 投机单
req['minMatchAmount'] = 1 # 最小成交量为1
req['lRequestID'] = self.reqID
self.reqInsertOrder(req)
# 返回订单号(字符串),便于某些算法进行动态管理
vtOrderID = '.'.join([self.gatewayName, str(self.localID)])
return vtOrderID
#----------------------------------------------------------------------
def cancelOrder(self, cancelOrderReq):
"""撤单"""
self.reqID += 1
req = {}
req['instrumentID'] = cancelOrderReq.symbol
req['localOrderID'] = int(cancelOrderReq.orderID)
req['accountID'] = self.accountID
req['lRequestID'] = self.reqID
# 添加柜台委托号字段
localID = int(cancelOrderReq.orderID)
if localID in self.spdOrderDict:
req['spdOrderID'] = self.spdOrderDict[localID]
del req['localOrderID']
self.reqCancelOrder(req)
#----------------------------------------------------------------------
def close(self):
"""关闭"""
self.exit()
#----------------------------------------------------------------------
def onFrontConnected(self):
"""服务器连接"""
self.connectionStatus = True
log = VtLogData()
log.gatewayName = self.gatewayName
log.logContent = u'交易服务器连接成功'
self.gateway.onLog(log)
self.login()
#----------------------------------------------------------------------
def onFrontDisconnected(self, i):
"""服务器断开"""
self.connectionStatus = False
self.loginStatus = False
self.gateway.tdConnected = False
log = VtLogData()
log.gatewayName = self.gatewayName
log.logContent = u'交易服务器连接断开'
self.gateway.onLog(log)
#----------------------------------------------------------------------
def onRspUserLogin(self, data, error) :
"""登陆回报"""
# 如果登录成功,推送日志信息
if error['nErrorID'] == 0:
self.sessionID = data['sessionID']
self.loginStatus = True
self.gateway.tdConnected = True
log = VtLogData()
log.gatewayName = self.gatewayName
log.logContent = u'交易服务器登录完成'
self.gateway.onLog(log)
# 查询合约代码
self.reqID += 1
req = {}
req['lRequestID'] = self.reqID
self.reqQryExchangeInstrument(req)
# 否则,推送错误信息
else:
err = VtErrorData()
err.gatewayName = self.gatewayName
err.errorID = error['nErrorID']
err.errorMsg = error['errorMsg'].decode('gbk')
self.gateway.onError(err)
#----------------------------------------------------------------------
def onRspUserLogout(self, data, error) :
"""登出回报"""
# 如果登出成功,推送日志信息
if error['nErrorID'] == 0:
self.loginStatus = False
self.gateway.tdConnected = False
log = VtLogData()
log.gatewayName = self.gatewayName
log.logContent = u'交易服务器登出完成'
self.gateway.onLog(log)
# 否则,推送错误信息
else:
err = VtErrorData()
err.gatewayName = | |
+ 2):
sqarg = bd.array(eps_array[il] * bd.square(oms) - bd.square(gk),
dtype=bd.complex)
chi = bd.where(bd.real(sqarg) >= 0, bd.sqrt(sqarg),
1j * bd.sqrt(-sqarg))
chis.append(chi)
return bd.array(chis, dtype=bd.complex)
def _get_rad(self, gkr, omr, pol, clad):
"""
Get all the radiative mode parameters over 'gkr' at frequency 'omr' with
polarization 'pol' and out-going in cladding 'clad'
"""
chis = self._get_chi(gkr, omr, self.eps_array)
(Xs, Ys) = rad_modes(omr, gkr, self.eps_array,
self.d_array, pol, clad)
return (Xs, Ys, chis)
def _z_to_lind(self, z):
"""
Get a layer index corresponding to a position z. Claddings are included
as first and last layer
"""
z_max = self.phc.claddings[0].z_max
lind = 0 # Index denoting which layer (including claddings) z is in
while z > z_max and lind < self.N_layers:
lind += 1
z_max = self.phc.layers[lind - 1].z_max
if z > z_max and lind == self.N_layers: lind += 1
return lind
def _compute_guided(self, g_array):
"""
Compute the guided modes using the slab_modes module, reshape the
results appropriately and store
"""
# Expand boundaries a bit to make sure we get all the modes
# Note that the 'exact' computation still uses interpolation,
# but the grid is defined by the actual gk values
g_array -= 1e-6
g_array[-1] += 2e-6
self.g_array.append(g_array)
self.gmode_te = self.gmode_inds[np.remainder(self.gmode_inds, 2) == 0]
self.gmode_tm = self.gmode_inds[np.remainder(self.gmode_inds, 2) != 0]
reshape_list = lambda x: [list(filter(lambda y: y is not None, i)) \
for i in zip_longest(*x)]
if self.gradients == 'exact':
(e_a, d_a) = (self.eps_array, self.d_array)
elif self.gradients == 'approx':
(e_a, d_a) = (self.eps_array_val, self.d_array_val)
if self.gmode_te.size > 0:
(omegas_te, coeffs_te) = guided_modes(g_array, e_a, d_a,
step=self.gmode_step, n_modes=1 + np.amax(self.gmode_te) // 2,
tol=self.gmode_tol, pol='TE')
omte = reshape_list(omegas_te)
self.omegas_te.append(reshape_list(omegas_te))
self.coeffs_te.append(reshape_list(coeffs_te))
if self.gmode_tm.size > 0:
(omegas_tm, coeffs_tm) = guided_modes(g_array, e_a, d_a,
step=self.gmode_step, n_modes=1 + np.amax(self.gmode_tm) // 2,
tol=self.gmode_tol, pol='TM')
self.omegas_tm.append(reshape_list(omegas_tm))
self.coeffs_tm.append(reshape_list(coeffs_tm))
def _compute_ft_tbt(self):
"""
Compute the unique FT coefficients of the permittivity, eps(g-g') for
every layer in the PhC, assuming TBT-initialized reciprocal lattice
"""
(n1max, n2max) = (self.n1g, self.n2g)
G1 = - self.gvec + self.gvec[:, [0]]
G2 = np.zeros((2, n1max * n2max))
I1 = - self._inds + self._inds[:, [0]]
I2 = np.zeros((2, n1max*n2max), dtype=np.int_)
# Initialize the FT coefficient lists; in the end the length of these
# will be equal to the total number of layers in the PhC
self.T1 = []
self.T2 = []
for ind1 in range(n1max):
G2[:, ind1 * n2max:(ind1 + 1) * n2max] = - self.gvec[:, [ind1 * n2max]] + \
self.gvec[:, range(n2max)]
I2[:, ind1 * n2max:(ind1 + 1) * n2max] = - self._inds[:, [ind1 * n2max]] + \
self._inds[:, range(n2max)]
for layer in [self.phc.claddings[0]] + self.phc.layers + \
[self.phc.claddings[1]]:
if layer.layer_type == 'shapes':
T1 = layer.compute_ft(G1)
T2 = layer.compute_ft(G2)
# Store T1 and T2
if bd.amax(bd.abs(bd.imag(T1))) < 1e-10 * bd.amax(bd.abs(bd.real(T1))):
self.T1.append(bd.real(T1))
else:
self.T1.append(T1)
if bd.amax(bd.abs(bd.imag(T2))) < 1e-10 * bd.amax(bd.abs(bd.real(T2))):
self.T2.append(bd.real(T2))
else:
self.T2.append(T2)
elif layer.layer_type == 'freeform':
T1 = layer.compute_ft(I1)
T2 = layer.compute_ft(I2)
# Store T1 and T2
if bd.amax(bd.abs(bd.imag(T1))) < 1e-10 * bd.amax(bd.abs(bd.real(T1))):
self.T1.append(bd.real(T1))
else:
self.T1.append(T1)
if bd.amax(bd.abs(bd.imag(T2))) < 1e-10 * bd.amax(bd.abs(bd.real(T2))):
self.T2.append(bd.real(T2))
else:
self.T2.append(T2)
# Store the g-vectors to which T1 and T2 correspond
self.G1 = G1
self.G2 = G2
def _compute_ft_abs(self):
"""
Compute the unique FT coefficients of the permittivity, eps(g-g') for
every layer in the PhC, assuming abs-initialized reciprocal lattice
"""
ggridx = (self.gvec[0, :][np.newaxis, :] -
self.gvec[0, :][:, np.newaxis]).ravel()
ggridy = (self.gvec[1, :][np.newaxis, :] -
self.gvec[1, :][:, np.newaxis]).ravel()
self.eps_ft = []
for layer in [self.phc.claddings[0]] + self.phc.layers + \
[self.phc.claddings[1]]:
eps_ft = layer.compute_ft(np.vstack((ggridx, ggridy)))
self.eps_ft.append(bd.reshape(eps_ft,
(self.gvec[0, :].size, self.gvec[0, :].size)))
def _construct_mat(self, kind):
"""
Construct the Hermitian matrix for diagonalization for a given k
"""
# G + k vectors
gkx = self.gvec[0, :] + self.kpoints[0, kind] + 1e-10
gky = self.gvec[1, :] + self.kpoints[1, kind]
gk = np.sqrt(np.square(gkx) + np.square(gky))
# Compute the guided modes over gk if using the 'exact' method
if self.gmode_compute.lower() == 'exact':
t = time.time()
g_array = np.sort(gk)
self._compute_guided(g_array)
self.t_guided += time.time() - t
# Unit vectors in the propagation direction; we add a tiny component
# in the x-direction to avoid problems at gk = 0
pkx = gkx / gk
pky = gky / gk
# Unit vectors in-plane orthogonal to the propagation direction
qkx = gky / gk
qky = -gkx / gk
pp = np.outer(pkx, pkx) + np.outer(pky, pky)
pq = np.outer(pkx, qkx) + np.outer(pky, qky)
qq = np.outer(qkx, qkx) + np.outer(qky, qky)
# Loop over modes and build the matrix block-by-block
modes_numg = []
# Find the gmode_inds that actually enter the computation (due to the
# gmax cutoff, only a finite number of mode indexes can enter)
# Note: we might need to have a gmode_include for every kind
gmode_include = []
ik = 0 if self.gmode_compute.lower() == 'interp' else kind
for mode in self.gmode_inds:
if (mode % 2 == 0 and len(self.omegas_te[ik]) > mode // 2) \
or (mode % 2 == 1 and len(self.omegas_tm[ik]) > mode // 2):
gmode_include.append(mode)
if gmode_include == []:
raise RuntimeError("No guided modes were found for k-index %d. "
"One possibility is "
"that the effective permittivity of all layers is smaller than "
"that of at least one cladding. Reconsider your structure, or "
"try changing 'eps_eff' from 'average' to 'background' in "
"the options to GuidedModeExp.run()." % kind)
else:
self.gmode_include.append(np.array(gmode_include))
# We now construct the matrix block by block
mat_blocks = [[] for i in range(self.gmode_include[-1].size)]
if self.gradients == 'exact':
(e_a, d_a) = (self.eps_array, self.d_array)
elif self.gradients == 'approx':
(e_a, d_a) = (self.eps_array_val, self.d_array)
for im1 in range(self.gmode_include[-1].size):
mode1 = self.gmode_include[-1][im1]
(indmode1, oms1, As1, Bs1, chis1) = \
self._get_guided(gk, kind, mode1)
modes_numg.append(indmode1.size)
if len(modes_numg) > 1:
mat_blocks[im1].append(bd.zeros((modes_numg[-1],
bd.sum(modes_numg[:-1]))))
for im2 in range(im1, self.gmode_include[-1].size):
mode2 = self.gmode_include[-1][im2]
(indmode2, oms2, As2, Bs2, chis2) = \
self._get_guided(gk, kind, mode2)
if mode1 % 2 + mode2 % 2 == 0:
mat_block = matrix_elements.mat_te_te(
e_a, d_a,
self.eps_inv_mat, indmode1, oms1,
As1, Bs1, chis1, indmode2, oms2, As2, Bs2,
chis2, qq)
elif mode1 % 2 + mode2 % 2 == 2:
mat_block = matrix_elements.mat_tm_tm(
e_a, d_a,
self.eps_inv_mat, gk, indmode1, oms1,
As1, Bs1, chis1, indmode2, oms2, As2, Bs2,
chis2, pp)
elif mode1 % 2 == 0 and mode2 % 2 == 1:
mat_block = matrix_elements.mat_te_tm(
e_a, d_a,
self.eps_inv_mat, indmode1, oms1,
As1, Bs1, chis1, indmode2, oms2, As2, Bs2,
chis2, pq.transpose())
elif mode1 % 2 == 1 and mode2 % 2 == 0:
mat_block = matrix_elements.mat_tm_te(
e_a, d_a,
self.eps_inv_mat, indmode1, oms1,
As1, Bs1, chis1, indmode2, oms2, As2, Bs2,
chis2, pq)
mat_blocks[im1].append(mat_block)
# Store how many modes total were included in the matrix
self.N_basis.append(np.sum(modes_numg))
# Store a list of how many g-points were used for each mode index
self.modes_numg.append(modes_numg)
# Stack all the blocks together
mat = bd.vstack([bd.hstack(mb) for mb in mat_blocks])
"""
If the matrix is within numerical precision to real symmetric,
make it explicitly so. This will speed up the diagonalization and will
often be the case, specifically when there is in-plane inversion
symmetry in the PhC elementary cell
"""
if bd.amax(bd.abs(bd.imag(mat))) < 1e-10*bd.amax(bd.abs(bd.real(mat))):
mat = bd.real(mat)
print(mat.dtype)
"""
Make the matrix Hermitian (note that only upper part of the blocks, i.e.
(im2 >= im1) was computed
"""
return bd.triu(mat, 1) + bd.transpose(bd.conj(bd.triu(mat, 1))) + \
bd.real(bd.diag(bd.diag(mat)))
def compute_eps_inv(self):
"""
Construct the inverse FT matrices for the permittivity in each layer
"""
try:
self.eps_inv_mat
except AttributeError:
# List of inverse permittivity matrices for every layer
self.eps_inv_mat = []
if self.truncate_g == 'tbt':
for it, T1 in enumerate(self.T1):
self.hom_layer = []
# For now we just use the numpy inversion. Later on we could
# implement the Toeplitz-Block-Toeplitz inversion (faster)
if bd.sum(bd.abs(T1[1:])) < 1e-10:
self.eps_inv_mat.append(bd.eye(T1.size, T1.size) / T1[0])
self.hom_layer.append(True)
else:
eps_mat = bd.toeplitz_block(self.n1g, T1, self.T2[it])
self.eps_inv_mat.append(bd.inv(eps_mat))
self.hom_layer.append(False)
elif self.truncate_g == 'abs':
for eps_mat in self.eps_ft:
self.eps_inv_mat.append(bd.inv(eps_mat))
def set_run_options(self, gmode_compute='exact', gmode_inds: list = [0],
gmode_npts: int = 1000,
gmode_step: float = 1e-2, gmode_tol: float = 1e-10, numeig: int = 10,
compute_im: bool = True, | |
win = gaussian(win_length, std=(float(win_length) * stdev))
b = win / win.sum()
if np.nansum(win) == 0:
raise RuntimeError('window to short for time interval')
print('win_length', str(win_length))
print('stddev', str(stdev))
print('win', str(win))
filtered = filtfilt(b, a, xr_in.data, axis=timedim, padtype=None, padlen=0)
out = xr.DataArray(filtered, dims=xr_in.dims, coords=xr_in.coords,
attrs=xr_in.attrs)
out.attrs.update({'filterlength': (steps, step_spec),
'filtertype': filtertype})
if xr_in.name:
out.name = xr_in.name + '_lowpassed'
return out
def extractBox(da, box, xdim='lon', ydim='lat'):
print('This is deprecated. Use extractBox_dict')
box_dict = {xdim: box[0, 1],
ydim: box[2, 3]}
return extractBox_dict(da, box_dict, concat_wrap=True)
# box_dict = {xdim: slice(box[0], box[1]),
# ydim: slice(box[2], box[3])}
# return da.loc[box_dict]
def extractBox_dict(ds, box, concat_wrap=True):
"""Advanced box extraction from xarray Dataset"""
if not isinstance(concat_wrap, dict):
concat_wrap_dict = dict()
for kk in box.keys():
concat_wrap_dict[kk] = concat_wrap
concat_wrap = concat_wrap_dict
ds = ds.copy()
for dim, ind in box.items():
wrap = concat_wrap[dim]
if np.diff(ind) < 0: # This would trigger a python 2 error
# if (ind[1] - ind[0]) < 0: # box is defined over a discontinuity
dim_data = ds[dim].data
split_a = dim_data[dim_data > ind[0]].max()
split_b = dim_data[dim_data < ind[1]].min()
a = ds.loc[{dim: slice(ind[0], split_a)}]
b = ds.loc[{dim: slice(split_b, ind[1])}]
if wrap:
c = (a, b)
else:
c = (b, a)
ds = xr.concat(c, dim)
else:
ds = ds.loc[{dim: slice(ind[0], ind[1])}]
return ds
# This will be deprecated
def extractBoxes(da, bo, xname=None, yname=None, xdim='lon', ydim='lat'):
raise RuntimeWarning('Hard deprecated. Please use extractBox_dict instead')
# def extractBoxes(da, bo, xname=None, yname=None, xdim='lon', ydim='lat'):
# """ Extracts boxes from DataArray
#
#
# Keyword arguments:
# da -- xarray dataarray
# bo -- dict with box name as keys and box corner
# values as numpy array ([x0,x1,y0,y1])
# xdim -- dimension name for x (default: 'lon')
# ydim -- dimension name for y (default: 'lat')
#
#
# xname -- coordinate name for x (default: 'None')
# yname -- coordinate name for y (default: 'None')
# xname and yname have to be specified if coordinates are of differnt shape
# """
# raise RuntimeError("this function is hellla slow! DO NOT use on \
# large datasets")
#
# if not type(xname) == type(yname):
# raise RuntimeError('xname and yname need to be the same type')
#
# timeseries = []
# for ii, bb in enumerate(bo.keys()):
# box = bo[bb]
# if xname is None:
# box_dict = {xdim: slice(box[0], box[1]),
# ydim: slice(box[2], box[3])}
# temp = da.loc[box_dict]
# else:
# mask = np.logical_and(np.logical_and(da[xname] > box[0],
# da[xname] < box[1]),
# np.logical_and(da[yname] > box[2],
# da[yname] < box[3]))
# temp = da.where(mask)
#
# timeseries.append(temp)
# boxname_dim = concat_dim_da(list(bo.keys()), 'boxname')
# out = xr.concat(timeseries, boxname_dim)
# return out
# Mapping related stuff
def dll_dist(dlon, dlat, lon, lat):
"""Converts lat/lon differentials into distances
PARAMETERS
----------
dlon : xarray.DataArray longitude differentials
dlat : xarray.DataArray latitude differentials
lon : xarray.DataArray longitude values
lat : xarray.DataArray latitude values
RETURNS
-------
dx : xarray.DataArray distance inferred from dlon
dy : xarray.DataArray distance inferred from dlat
"""
dll_factor = 111000.0
dx = dlon * xr.ufuncs.cos(xr.ufuncs.deg2rad(lat)) * dll_factor
dy = ((lon * 0) + 1) * dlat * dll_factor
return dx, dy
# TODO: This needs a test and perhaps I can refactor it into a 'budget tools
# Module'
def convert_flux_array(da, da_full, dim, top=True, fillval=0):
dummy = xr.DataArray(ones_like(da_full.data) * fillval,
coords=da_full.coords,
dims=da_full.dims)
if top:
da.coords[dim] = da_full[dim][0]
dummy_cut = dummy[{dim: slice(1, None)}]
out = xr.concat([da, dummy_cut], dim=dim)
else:
da.coords[dim] = da_full[dim][-1]
dummy_cut = dummy[{dim: slice(0, -1)}]
out = xr.concat([dummy_cut, da], dim=dim)
return out
def composite(data, index, bounds):
"""
Composites Dataarray according to index
Parameters
----------
data : xarray.Dataarray
index : xarray.Dataarray
Timeseries matching one dimension of 'data'. Values lower(higher) then
'bounds' are composited in additional coordinate
bounds : int or array_like
Values determining the values of 'index' composited into
['low','neutral','high']. If given as int, bounds will be computed as
[-std(index) std(index)]*bounds.
Returns
-------
composited_array : array_like
xarray like data with additional composite-coordinate
['low','neutral','high'] based on 'bounds'
Examples
--------
TODO
"""
if isinstance(bounds, int):
bounds = float(bounds)
if isinstance(bounds, float):
bounds = [-bounds * np.std(index), bounds * np.std(index)]
if len(bounds) != 2:
raise RuntimeError('bounds can only have 1 or two elements')
comp_name = 'composite'
zones = [index >= bounds[1],
np.logical_and(index < bounds[1],
index >= bounds[0]),
index < bounds[0]]
zones_coords = ['high', 'neutral', 'low']
out = xr.concat([data.where(z) for z in zones], comp_name)
out[comp_name] = zones_coords
counts = np.array([a.sum().data for a in zones])
out.coords['counts'] = xr.DataArray(counts, coords=[out[comp_name]])
out.attrs['IndexName'] = index.name
out.attrs['CompositeBounds'] = bounds
return out
def corrmap(a, b, shifts=0,
a_x_dim='i', a_y_dim='j',
a_x_coord=None, a_y_coord=None,
b_x_dim='i', b_y_dim='j',
b_x_coord=None, b_y_coord=None,
t_dim='time', debug=True):
"""
a -- input
b -- target ()
TODO
This thing is slow. I can most likely rewrite this with \
numpy.apply_along_axis
"""
from scipy.stats import linregress
if not type(a_x_coord) == type(a_y_coord):
raise RuntimeError('a_x_coord and a_y_coord need to be the same type')
if not type(b_x_coord) == type(b_y_coord):
raise RuntimeError('a_x_coord and a_y_coord need to be the same type')
if isinstance(shifts, int):
shifts = [shifts]
# determine if the timseries is a timeseries or a 3d array
if len(b.shape) == 3:
arrayswitch = True
elif len(b.shape) == 1:
arrayswitch = False
else:
raise RuntimeWarning('this only works with a timseries \
or map of timeseries')
# shift timeseries
slope = []
corr = []
p_value = []
for sh, shift in enumerate(shifts):
shifted_b = b.shift(time=shift)
s = a.mean(dim=t_dim).copy()
s[:] = np.nan
s.name = a.name + ' regressed onto ' + b.name
c = a.mean(dim=t_dim).copy()
c[:] = np.nan
c.name = 'Corr coeff ' + a.name + '/' + b.name
p = a.mean(dim=t_dim).copy()
p[:] = np.nan
p.name = 'p value ' + a.name + '/' + b.name
for ii in range(len(a[a_x_dim])):
for jj in range(len(a[a_y_dim])):
# Define the 'input' (position in a) correctly, accounting for
# the possibility that the
# lat/lon position can be defined in the coordinates
# or dimensions
# interp timeseries onto the data.time
in_a = a[{a_x_dim: ii, a_y_dim: jj}]
if arrayswitch:
if not a_x_coord:
in_x = in_a[a_x_dim].data
in_y = in_a[a_y_dim].data
else:
in_x = in_a[a_x_coord].data
in_y = in_a[a_y_coord].data
# rename the dimensions so it can be reindexed
if not b_x_coord:
in_b = xr.DataArray(shifted_b.data,
coords={'xdim':
shifted_b[b_x_dim].data,
'ydim':
shifted_b[b_y_dim].data,
'time':
shifted_b.time.data},
dims=['time', 'ydim', 'xdim'])
else:
raise RuntimeError('Not implemented yet')
# This would have to be acomplished by a mask of some
# sort
# (with some tolerance around the input position)
# extract the matching timeseries
in_b = in_b.sel(xdim=in_x, ydim=in_y, method='nearest')
reindexed_b = in_b.reindex_like(in_a.time,
method='nearest')
else:
reindexed_b = shifted_b.reindex_like(in_a.time,
method='nearest')
x = reindexed_b.data
y = in_a.data
idx = np.logical_and(~np.isnan(y), ~np.isnan(x))
if y[idx].size:
s[{a_x_dim: ii, a_y_dim: jj}], _, c[{a_x_dim: ii,
a_y_dim: jj}],\
p[{a_x_dim: ii, a_y_dim: jj}], _ = linregress(x[idx],
y[idx])
slope.append(s)
corr.append(c)
p_value.append(p)
out_s = xr.concat(slope, 'timeshifts')
out_s['timeshifts'] = shifts
# !!! I think this is a bug...this should be
# possible with
out_c = xr.concat(corr, 'timeshifts')
out_c['timeshifts'] = shifts
out_p = xr.concat(p_value, 'timeshifts',)
out_p['timeshifts'] = shifts
return out_c, out_p, out_s
def _coord_remapping_interp(x, y, y_target, remap):
"""Remap dataarray onto new dimension. E.g. express y(x) as y(remap)
using interpolation"""
idx = np.logical_or(np.isnan(y_target), np.isnan(y))
if sum(~idx) < 2:
y_remapped = remap * np.nan
else:
y_remapped = interpolate.interp1d(y_target[~idx], y[~idx],
bounds_error=False)(remap)
return y_remapped
def coord_remapping(x, y, y_target, remap, x_dim=None, remap_dim=None):
"""
remaps datasets/dataarray `y` with coordinate `x` to new coordinate
`y_target` with values specified in `remap`
E.g. a dataset with coordinate depth(x) will be remapped to coordinate
temp(y_target) as a vertical coordinate, with spacing given by `remap`)
Parameters
----------
x: xr.DataArray
The original dim/coordinate used for remapping
y: {xr.DataArray, xr.Dataset}
the data to be remapped
y_target: xr.DataArray
The new coordinate used for remapping
remap: {range, np.array, xr.DataArray}
Values of `y_target` used as new coordinate.
Returns
-------
remapped_y: xr.Dataset
dataset with remapped variables of y and the remapped position of
x (e.g. depth of the temperature values given in remap)
"""
# infer dim from input
if x_dim is None:
if len(x.dims) != 1:
raise RuntimeError('if x_dim is not specified, \
x must be a 1D array.')
dim = x.dims[0]
else:
dim = x_dim
if remap_dim is not None:
raise RuntimeError('multidim remap is not implemented yet.')
# if dataset is passed drop all data_vars that dont contain dim
if isinstance(y, xr.Dataset):
drop_vars = [a for a in y.data_vars if dim not in y[a].dims]
if drop_vars:
print('Found incompatible | |
import math
import numpy as np
from ._helpers import _check_value_any, broadcast_and_tile, raise_dimension_error
from .shape import check, check_value_any
__all__ = [
"normalize",
"perpendicular",
"project",
"scalar_projection",
"reject",
"reject_axis",
"magnitude",
"euclidean_distance",
"angle",
"signed_angle",
"rotate",
"scale_factor",
"aligned_with",
"almost_zero",
"almost_unit_length",
"almost_collinear",
"almost_equal",
"principal_components",
"major_axis",
"apex",
"apex_and_opposite",
"argapex",
"nearest",
"farthest",
"basis",
"within",
"average",
"cross",
"dot",
]
def normalize(vector):
"""
Return the vector, normalized.
If vector is 2d, treats it as stacked vectors, and normalizes each one.
"""
if vector.ndim == 1:
return vector / np.linalg.norm(vector)
elif vector.ndim == 2:
return vector / np.linalg.norm(vector, axis=1)[:, np.newaxis]
else:
raise_dimension_error(vector)
def perpendicular(v1, v2, normalized=True):
"""
Given two noncollinear vectors, return a vector perpendicular to both.
Result vectors follow the right-hand rule. When the right index finger
points along `v1` and the right middle finger along `v2`, the right thumb
points along the result.
When one or both sets of inputs is stacked, compute the perpendicular
vectors elementwise, returning a stacked result. (e.g. when `v1` and `v2`
are both stacked, `result[k]` is perpendicular to `v1[k]` and `v2[k]`.)
Args:
v1 (np.arraylike): A `(3,)` vector or a `kx3` stack of vectors.
v2 (np.arraylike): A `(3,)` vector or a `kx3` stack of vectors. If
stacked, the shape must be the same as `v1`.
normalized (bool): When `True`, the result vector is guaranteed to be
unit length.
Return:
np.arraylike: An array with the same shape as `v1` and `v2`.
See also:
- https://en.wikipedia.org/wiki/Cross_product#Definition
- https://commons.wikimedia.org/wiki/File:Right_hand_rule_cross_product.svg
"""
result = cross(v1, v2)
return normalize(result) if normalized else result
def project(vector, onto):
"""
Compute the vector projection of `vector` onto the vector `onto`.
`onto` need not be normalized.
"""
if vector.ndim == 1:
return scalar_projection(vector, onto=onto) * normalize(onto)
elif vector.ndim == 2:
return scalar_projection(vector, onto=onto)[:, np.newaxis] * normalize(onto)
else:
raise_dimension_error(vector)
def scalar_projection(vector, onto):
"""
Compute the scalar projection of `vector` onto the vector `onto`.
`onto` need not be normalized.
"""
if vector.ndim == 1:
check(locals(), "vector", (3,))
check(locals(), "onto", (3,))
else:
k = check(locals(), "vector", (-1, 3))
if onto.ndim == 1:
check(locals(), "onto", (3,))
else:
check(locals(), "onto", (k, 3))
return dot(vector, normalize(onto))
def reject(vector, from_v):
"""
Compute the vector rejection of `vector` from `from_v` -- i.e.
the vector component of `vector` perpendicular to `from_v`.
`from_v` need not be normalized.
"""
return vector - project(vector, onto=from_v)
def reject_axis(vector, axis, squash=False):
"""
Compute the vector component of `vector` perpendicular to the basis
vector specified by `axis`. 0 means x, 1 means y, 2 means z.
In other words, return a copy of vector that zeros the `axis` component.
When `squash` is True, instead of zeroing the component, it drops it, so
an input vector (in R3) is mapped to a point in R2.
(N.B. Don't be misled: this meaning of `axis` is pretty different from
the typical meaning in numpy.)
"""
if squash:
dims_to_keep = [0, 1, 2]
try:
dims_to_keep.remove(axis)
except ValueError:
raise ValueError("axis should be 0, 1, or 2")
if vector.ndim == 1:
return vector[dims_to_keep]
elif vector.ndim == 2:
return vector[:, dims_to_keep]
else:
raise_dimension_error(vector)
else:
if axis not in [0, 1, 2]:
raise ValueError("axis should be 0, 1, or 2")
result = vector.copy()
if vector.ndim == 1:
result[axis] = 0.0
elif vector.ndim == 2:
result[:, axis] = 0.0
else:
raise_dimension_error(vector)
return result
def magnitude(vector):
"""
Compute the magnitude of `vector`. For a stacked input, compute the
magnitude of each one.
Args:
vector (np.arraylike): A `(3,)` vector or a `kx3` stack of vectors.
Returns:
object: For a `(3,)` input, a `float` with the magnitude. For a `kx3`
input, a `(k,)` array.
"""
if vector.ndim == 1:
return np.linalg.norm(vector)
elif vector.ndim == 2:
return np.linalg.norm(vector, axis=1)
else:
raise_dimension_error(vector)
# Alias because angle()'s parameter shadows the name.
_normalize = normalize
def euclidean_distance(v1, v2):
"""
Compute Euclidean distance, which is the distance between two points in a
straight line. This can be done individually by passing in single
point for either or both arguments, or pairwise by passing in stacks of
points.
Args:
v1 (np.arraylike): A `(3,)` vector or a `kx3` stack of vectors.
v2 (np.arraylike): A `(3,)` vector or a `kx3` stack of vectors. If
stacks are provided for both `v1` and `v2` they must have the
same shape.
Returns:
object: When both inputs are `(3,)`, a `float` with the distance.
Otherwise a `(k,)` array.
"""
k = check_value_any(v1, (3,), (-1, 3), name="v1")
check_value_any(
v2,
(3,),
(-1 if k is None else k, 3),
name="v2",
)
if v1.ndim == 1 and v2.ndim == 1:
return np.sqrt(np.sum(np.square(v2 - v1)))
else:
return np.sqrt(np.sum(np.square(v2 - v1), axis=1))
def angle(v1, v2, look=None, assume_normalized=False, units="deg"):
"""
Compute the unsigned angle between two vectors. For a stacked input, the
angle is computed pairwise.
When `look` is provided, the angle is computed in that viewing plane
(`look` is the normal). Otherwise the angle is computed in 3-space.
Args:
v1 (np.arraylike): A `(3,)` vector or a `kx3` stack of vectors.
v2 (np.arraylike): A vector or stack of vectors with the same shape as
`v1`.
look (np.arraylike): A `(3,)` vector specifying the normal of a viewing
plane, or `None` to compute the angle in 3-space.
assume_normalized (bool): When `True`, assume the input vectors
are unit length. This improves performance, however when the inputs
are not normalized, setting this will cause an incorrect results.
units (str): `'deg'` to return degrees or `'rad'` to return radians.
Return:
object: For a `(3,)` input, a `float` with the angle. For a `kx3`
input, a `(k,)` array.
"""
if units not in ["deg", "rad"]:
raise ValueError(f"Unrecognized units {units}; expected deg or rad")
if look is not None:
# This is a simple approach. Since this is working in two dimensions,
# a smarter approach could reduce the amount of computation needed.
v1, v2 = [reject(v, from_v=look) for v in (v1, v2)]
dot_products = np.einsum("ij,ij->i", v1.reshape(-1, 3), v2.reshape(-1, 3))
if assume_normalized:
cosines = dot_products
else:
cosines = dot_products / magnitude(v1) / magnitude(v2)
# Clip, because the dot product can slip past 1 or -1 due to rounding and
# we can't compute arccos(-1.00001).
angles = np.arccos(np.clip(cosines, -1.0, 1.0))
if units == "deg":
angles = np.degrees(angles)
return angles[0] if v1.ndim == 1 and v2.ndim == 1 else angles
def signed_angle(v1, v2, look, units="deg"):
"""
Compute the signed angle between two vectors. For a stacked input, the
angle is computed pairwise.
Results are in the range -180 and 180 (or `-math.pi` and `math.pi`). A
positive number indicates a clockwise sweep from `v1` to `v2`. A negative
number is counterclockwise.
Args:
v1 (np.arraylike): A `(3,)` vector or a `kx3` stack of vectors.
v2 (np.arraylike): A vector or stack of vectors with the same shape as
`v1`.
look (np.arraylike): A `(3,)` vector specifying the normal of the
viewing plane.
units (str): `'deg'` to return degrees or `'rad'` to return radians.
Returns:
object: For a `(3,)` input, a `float` with the angle. For a `kx3`
input, a `(k,)` array.
"""
# The sign of (A x B) dot look gives the sign of the angle.
# > 0 means clockwise, < 0 is counterclockwise.
sign = np.array(np.sign(np.cross(v1, v2).dot(look)))
# 0 means collinear: 0 or 180. Let's call that clockwise.
sign[sign == 0] = 1
return sign * angle(v1, v2, look, units=units)
def rotate(vector, around_axis, angle, units="deg", assume_normalized=False):
"""
Rotate a point or vector around a given axis. The direction of rotation
around `around_axis` is determined by the right-hand rule.
Args:
vector (np.arraylike): A `(3,)` vector or a `kx3` stack of vectors.
around_axis (np.arraylike): A `(3,)` vector specifying the axis of rotation.
assume_normalized (bool): When `True`, assume `around_axis` is unit
length. This improves performance marginally, however
when the inputs are not normalized, setting this will cause an
incorrect results.
units (str): `'deg'` to specify `angle` in degrees or `'rad'` to specify
radians.
Returns:
np.arraylike: The transformed point or points. This has the same shape as
`vector`.
See also:
- https://en.wikipedia.org/wiki/Cross_product#Definition
- https://commons.wikimedia.org/wiki/File:Right_hand_rule_cross_product.svg
"""
if units == "deg":
angle = math.radians(angle)
elif units != "rad":
raise ValueError(f'Unknown units "{units}"; expected "deg" | |
self.rescale_plot(fig, ax)
return fig, ax
def plot_surface(self, fig, ax, color='blue', alpha=0.30, colorbar=False, Nu=50, Nv=50):
# Get the surface coordinates
u = np.linspace(0.00, 1.00, Nu)
v = np.linspace(0.00, 1.00, Nv)
[uu, vv] = np.meshgrid(u, v, indexing='ij')
u = uu.flatten()
v = vv.flatten()
if self.ndim == 1:
# Get the values
Z = np.real(self.get_value(u, v)).reshape((Nu, Nv))
u = u.reshape((Nu, Nv))
v = v.reshape((Nu, Nv))
# Plot the surface with a plain color
ax.plot_surface(u, v, Z,
color=color,
# edgecolor='blue',
linewidth=0,
alpha=alpha,
shade=False,
antialiased=True,
zorder=0,
ccount=Nv,
rcount=Nu)
if self.ndim == 2:
# Get the values
u = np.linspace(0, 1, Nu)
v = np.linspace(0, 1, Nv)
# Get the coordinates of the boundaries
x1, y1 = self.get_value(u, 0*u)
x2, y2 = self.get_value(1 + 0*v, v)
x3, y3 = self.get_value(u[::-1], 1 + 0*u)
x4, y4 = self.get_value(0 * v, v[::-1])
x = np.concatenate((x1, x2, x3, x4))
y = np.concatenate((y1, y2, y3, y4))
# Plot a filled polygon
ax.fill(x,y, color=color, alpha=alpha)
if self.ndim == 3:
# Get the coordinates
X, Y, Z = np.real(self.get_value(u, v)).reshape((3, Nu, Nv))
# Plot the surface
if color == 'mean_curvature':
# Define a colormap based on the curvature values
mean_curvature, _ = np.real(self.get_curvature(u, v))
curvature = np.reshape(mean_curvature, (Nu, Nv))
curvature_normalized = (curvature - np.amin(curvature)) / (np.amax(curvature) - np.amin(curvature))
curvature_colormap = mpl.cm.viridis(curvature_normalized)
# Plot the surface with a curvature colormap
surf_handle = ax.plot_surface(X, Y, Z,
# color='blue',
# edgecolor='blue',
# cmap = 'viridis',
facecolors=curvature_colormap,
linewidth=0.75,
alpha=1,
shade=False,
antialiased=True,
zorder=2,
ccount=Nu,
rcount=Nv)
if colorbar:
fig.set_size_inches(7, 5)
surf_handle.set_clim(np.amin(curvature), np.amax(curvature))
cbar = fig.colorbar(surf_handle, ax=ax, orientation='vertical', pad=0.15, fraction=0.03, aspect=20)
cbar.set_label(color)
elif color == 'gaussian_curvature':
# Define a colormap based on the curvature values
_, gaussian_curvature= np.real(self.get_curvature(u, v))
curvature = np.reshape(gaussian_curvature, (Nu, Nv))
curvature_normalized = (curvature - np.amin(curvature)) / (np.amax(curvature) - np.amin(curvature))
curvature_colormap = mpl.cm.viridis(curvature_normalized)
# Plot the surface with a curvature colormap
surf_handle = ax.plot_surface(X, Y, Z,
# color='blue',
# edgecolor='blue',
# cmap = 'viridis',
facecolors=curvature_colormap,
linewidth=0.75,
alpha=1,
shade=False,
antialiased=True,
zorder=2,
ccount=Nu,
rcount=Nv)
if colorbar:
fig.set_size_inches(7, 5)
surf_handle.set_clim(np.amin(curvature), np.amax(curvature))
cbar = fig.colorbar(surf_handle, ax=ax, orientation='vertical', pad=0.15, fraction=0.03, aspect=20)
cbar.set_label(color)
else:
# Plot the surface with a plain color
ax.plot_surface(X, Y, Z,
color=color,
# edgecolor='blue',
linewidth=0,
alpha=alpha,
shade=False,
antialiased=True,
zorder=0,
ccount=Nv,
rcount=Nu)
def plot_boundary(self, fig, ax, color='black', linewidth=1.00, linestyle='-',
south=True, north=True, east=True, west=True):
""" Plot the isoparametric curves at the boundary """
# Create the isoparametric NURBS curves and plot them on the current figure
if east: self.get_isocurve_u(u0=0.00).plot_curve(fig, ax, linestyle=linestyle, linewidth=linewidth, color=color)
if west: self.get_isocurve_u(u0=1.00).plot_curve(fig, ax, linestyle=linestyle, linewidth=linewidth, color=color)
if south: self.get_isocurve_v(v0=0.00).plot_curve(fig, ax, linestyle=linestyle, linewidth=linewidth, color=color)
if north: self.get_isocurve_v(v0=1.00).plot_curve(fig, ax, linestyle=linestyle, linewidth=linewidth, color=color)
def plot_isocurve_u(self, fig, ax, u_values, color='black', linewidth=1.00, linestyle='-'):
""" Plot isoparametric curves in the u-direction """
for u in u_values: self.get_isocurve_u(u0=u).plot_curve(fig, ax, color=color, linewidth=linewidth, linestyle=linestyle)
def plot_isocurve_v(self, fig, ax, v_values, color='black', linewidth=1.00, linestyle='-'):
""" Plot isoparametric curves in the v-direction """
for v in v_values: self.get_isocurve_v(v0=v).plot_curve(fig, ax, color=color, linewidth=linewidth, linestyle=linestyle)
def plot_control_points(self, fig, ax, color='red', linewidth=1.00, linestyle='-', markersize=5, markerstyle='o'):
""" Plot the control points """
if self.ndim == 1:
# Plot the control net
Px = np.linspace(0, 1, np.shape(self.P)[1])
Py = np.linspace(0, 1, np.shape(self.P)[2])
Px, Py = np.meshgrid(Px, Py, indexing='ij')
Pz = np.real(self.P)[0, :, :]
ax.plot_wireframe(Px, Py, Pz,
edgecolor=color,
linewidth=linewidth,
linestyles=linestyle,
alpha=1.0,
antialiased=True,
zorder=1)
# Plot the control points
points, = ax.plot(Px.flatten(), Py.flatten(), Pz.flatten())
points.set_linewidth(linewidth)
points.set_linestyle(' ')
points.set_marker(markerstyle)
points.set_markersize(markersize)
points.set_markeredgewidth(linewidth)
points.set_markeredgecolor(color)
points.set_markerfacecolor('w')
points.set_zorder(4)
# points.set_label(' ')
if self.ndim == 2:
# Plot the control net
Px, Py = np.real(self.P)
ax.plot(Px, Py,
color=color, linewidth=linewidth, linestyle='-', marker=markerstyle, markersize=markersize,
markeredgewidth=linewidth, markeredgecolor=color, markerfacecolor='w', zorder=4)
ax.plot(Px.transpose(), Py.transpose(),
color=color, linewidth=linewidth, linestyle='-', marker=markerstyle, markersize=markersize,
markeredgewidth=linewidth, markeredgecolor=color, markerfacecolor='w', zorder=4)
if self.ndim == 3:
# Plot the control net
Px, Py, Pz = np.real(self.P)
ax.plot_wireframe(Px, Py, Pz,
edgecolor=color,
linewidth=linewidth,
linestyles=linestyle,
alpha=1.0,
antialiased=True,
zorder=1)
# Plot the control points
points, = ax.plot(Px.flatten(), Py.flatten(), Pz.flatten())
points.set_linewidth(linewidth)
points.set_linestyle(' ')
points.set_marker(markerstyle)
points.set_markersize(markersize)
points.set_markeredgewidth(linewidth)
points.set_markeredgecolor(color)
points.set_markerfacecolor('w')
points.set_zorder(4)
# points.set_label(' ')
def plot_normals(self, fig, ax, number_u=10, number_v=10, scale=0.075):
""" Plot the normal vectors """
# Compute the surface coordinates and normal vectors
h = 1e-6 # Add a small offset to avoid poles at the extremes [0, 1]
u = np.linspace(0.00+h, 1.00-h, number_u)
v = np.linspace(0.00+h, 1.00-h, number_v)
[u, v] = np.meshgrid(u, v, indexing='xy')
u = u.flatten()
v = v.flatten()
S = np.real(self.get_value(u, v))
N = np.real(self.get_normals(u, v))
# Scale the normal vectors and plot them
Lu = self.get_isocurve_u(u0=0.50).get_arclength()
Lv = self.get_isocurve_v(v0=0.50).get_arclength()
length_scale = scale*np.real(np.amax([Lu, Lv]))
N = length_scale * N
ax.quiver(S[0, :], S[1, :], S[2, :], N[0, :], N[1, :], N[2, :], color='black', length=np.abs(scale), normalize=True)
def rescale_plot(self, fig, ax):
""" Adjust the aspect ratio of the figure """
# Set axes aspect ratio
ax.autoscale(enable=True)
x_min, x_max = ax.get_xlim()
y_min, y_max = ax.get_ylim()
z_min, z_max = ax.get_zlim()
x_mid = (x_min + x_max) / 2
y_mid = (y_min + y_max) / 2
z_mid = (z_min + z_max) / 2
L = np.max((x_max - x_min, y_max - y_min, z_max - z_min)) / 2
ax.set_xlim3d(x_mid - 1.0 * L, x_mid + 1.0 * L)
ax.set_ylim3d(y_mid - 1.0 * L, y_mid + 1.0 * L)
ax.set_zlim3d(z_mid - 1.0 * L, z_mid + 1.0 * L)
# Adjust pad
plt.tight_layout(pad=5.0, w_pad=None, h_pad=None)
def plot_curvature(self, fig=None, ax=None, curvature_type='mean'):
# Prepare the plot
if fig is None:
fig = mpl.pyplot.figure(figsize=(6, 5))
ax = fig.add_subplot(111, projection='3d')
ax.view_init(azim=-105, elev=30)
ax.grid(False)
ax.xaxis.pane.fill = False
ax.yaxis.pane.fill = False
ax.zaxis.pane.fill = False
ax.xaxis.pane.set_edgecolor('k')
ax.yaxis.pane.set_edgecolor('k')
ax.zaxis.pane.set_edgecolor('k')
ax.xaxis.pane._alpha = 0.9
ax.yaxis.pane._alpha = 0.9
ax.zaxis.pane._alpha = 0.9
ax.set_xlabel('$x$ axis', fontsize=11, color='k', labelpad=18)
ax.set_ylabel('$y$ axis', fontsize=11, color='k', labelpad=18)
ax.set_zlabel('$z$ axis', fontsize=11, color='k', labelpad=18)
# ax_xy.xaxis.set_major_formatter(mpl.ticker.FormatStrFormatter('%.1f'))
# ax_xy.yaxis.set_major_formatter(mpl.ticker.FormatStrFormatter('%.1f'))
# ax_xy.zaxis.set_major_formatter(mpl.ticker.FormatStrFormatter('%.1f'))
for t in ax.xaxis.get_major_ticks(): t.label.set_fontsize(8)
for t in ax.yaxis.get_major_ticks(): t.label.set_fontsize(8)
for t in ax.zaxis.get_major_ticks(): t.label.set_fontsize(8)
ax.xaxis.set_rotate_label(False)
ax.yaxis.set_rotate_label(False)
ax.zaxis.set_rotate_label(False)
# ax_xy.set_xticks([])
# ax_xy.set_yticks([])
# ax_xy.set_zticks([])
# ax_xy.axis('off')
# (u,v) parametrization for the plot
Nu, Nv = 50, 50
u = np.linspace(0.00, 1.00, Nu)
v = np.linspace(0.00, 1.00, Nv)
[uu, vv] = np.meshgrid(u, v, indexing='ij')
u = uu.flatten()
v = vv.flatten()
# Get the curvature
if curvature_type == 'mean':
curvature, _ = np.real(self.get_curvature(u, v))
elif curvature_type == 'gaussian':
_, curvature = np.real(self.get_curvature(u, v))
else:
raise Exception("Choose a valid curvature type: 'mean' or 'gaussian'")
# Represent the curvature as a carpet plot or as a surface plot
ax.set_xlabel('$u$', fontsize=11, color='k', labelpad=10)
ax.set_ylabel('$v$', fontsize=11, color='k', labelpad=10)
ax.set_zlabel(r'$\kappa$' + ' ' + curvature_type, fontsize=11, color='k', labelpad=20)
curvature = np.reshape(curvature, (Nu, Nv))
ax.plot_surface(uu, vv, curvature,
color='blue',
# edgecolor='blue',
# cmap = 'viridis',
# facecolors=curvature_colormap,
linewidth=0.75,
alpha=0.50,
shade=False,
antialiased=True,
zorder=0,
ccount=50,
rcount=50)
# Adjust pad
plt.tight_layout(pad=5.0, w_pad=None, h_pad=None)
return fig, ax
# ---------------------------------------------------------------------------------------------------------------- #
# Define the point projection problem class (Pygmo's user-defined problem)
# ---------------------------------------------------------------------------------------------------------------- #
def project_point_to_surface(self, P, algorithm_name='lbfgs'):
""" Solve the point projection problem for the prescribed point `P` """
# Import pygmo
import pygmo as pg
# Create the optimization algorithm
myAlgorithm = pg.algorithm(pg.nlopt(algorithm_name))
myAlgorithm.extract(pg.nlopt).xtol_rel = 1e-6
myAlgorithm.extract(pg.nlopt).ftol_rel = 1e-6
myAlgorithm.extract(pg.nlopt).xtol_abs = 1e-6
myAlgorithm.extract(pg.nlopt).ftol_abs = 1e-6
myAlgorithm.extract(pg.nlopt).maxeval = 100
myAlgorithm.set_verbosity(0)
# Create the optimization problem
myProblem = pg.problem(self.PointToSurfaceProjectionProblem(self.get_value, self.get_derivative, P))
# Create the population
myPopulation = pg.population(prob=myProblem, size=1)
# Create a list with the different starting points
U0 = self.U[0:-1] + 1/ 2 * (self.U[1:] - self.U[0:-1])
V0 = self.V[0:-1] + 1 / 2 * (self.V[1:] - self.V[0:-1])
U0, V0 = np.meshgrid(U0, V0)
U0, V0 = U0.flatten(), V0.flatten()
for u0, v0 in zip(U0, V0):
myPopulation.push_back([u0, v0])
# Solve the optimization problem (evolve the population in Pygmo's jargon)
myPopulation = myAlgorithm.evolve(myPopulation)
# Get the optimum
u, v = myPopulation.champion_x
return u, v
class PointToSurfaceProjectionProblem:
def __init__(self, S, dS, P):
""" Solve point inversion problem: min(u,v) ||S(u,v) - P|| """
self.S_func = S
self.dS_func = dS
self.P = np.reshape(P, (P.shape[0], 1))
@staticmethod
def get_bounds():
""" Set the bounds for the optimization problem """
return [0.00, 0.00], [1.00, 1.00]
def fitness(self, x):
""" Evaluate the deviation between the prescribed point and the parametrized point """
u = np.asarray([x[0]])
v = np.asarray([x[1]])
S = self.S_func(u, v)
P = self.P
return np.asarray([np.sum(np.sum((S - P) ** 2, | |
_cfg_dir = os.path.dirname(_cfg)
_cfg_name = os.path.basename(_cfg)
_cfg_cache_dir = os.path.join(_cfg_dir, '.cache')
os.makedirs(_cfg_cache_dir, exist_ok=True)
cfg_cache_path = os.path.join(_cfg_cache_dir, _cfg_name + '.cache')
if enable_cache and os.path.exists(cfg_cache_path):
cfg_cache_mtime = os.path.getmtime(cfg_cache_path)
cfg_mtime = os.path.getmtime(_cfg)
# print('cfg_mtime: {}'.format(cfg_mtime))
# print('cfg_cache_mtime: {}'.format(cfg_cache_mtime))
if cfg_mtime <= cfg_cache_mtime:
try:
print('Loading cfg data from {:s}'.format(cfg_cache_path))
with open(cfg_cache_path, 'rb') as f:
nodes, nodes_by_fullname, _sections, file_args, file_args_offset, root_sec_name = pickle.load(f)
except:
pass
else:
return nodes, nodes_by_fullname, _sections, file_args, file_args_offset, root_sec_name
# cfg_cache_mtime_local = time.ctime(cfg_cache_mtime)
# cfg_mtime_local = time.ctime(cfg_mtime)
# print('cfg_cache_mtime_local: {}'.format(cfg_cache_mtime_local))
# print('cfg_mtime_local: {}'.format(cfg_mtime_local))
print('Reading parameters from {:s}'.format(_cfg))
file_args = [k.strip() for k in open(_cfg, 'r').readlines()]
file_args_offset = 0
if not file_args[0].startswith('##'):
file_args.insert(0, '##')
file_args_offset = 1
_sections = [(k.lstrip('#').strip(), i, k.count('#') - 1, 0)
for i, k in enumerate(file_args) if k.startswith('##')]
n_file_args = len(file_args)
n_sections = len(_sections)
"""parent specific sections"""
# _parent_specific_section_ids = [k[1] for k in _sections if k[0].endswith(' __')]
# _sections = [(k[0].rstrip(' __'), k[1], k[2], k[3]) if k[1] in _parent_specific_section_ids else k
# for k in _sections]
_sections = [k if k[0] else ('__common__', k[1], k[2], k[3]) for k in _sections]
"""common sections"""
_sections = [k if k[0] else ('__common__', k[1], k[2], k[3]) for k in _sections]
"""add section end IDs as the start IDs of the next section"""
_sections = [(k[0], k[1], _sections[i + 1][1] if i < n_sections - 1 else n_file_args, k[2], k[3])
for i, k in enumerate(_sections)]
# """default sections
# """
# _default_sections, _default_section_ids = zip(*[
# (k[0].rstrip('__').strip(), i) for i, k in enumerate(_sections) if k[0].endswith('__')])
# for i, j in enumerate(_default_section_ids):
# k = _sections[i]
# _sections[j] = (_default_sections[i], k[1], k[2])
"""template sections"""
_curr_template_id = 1
# _sec_id_orig = _sec_id_temp = 0
# for i, _sec in enumerate(_sections):
_all_temp_sections = {}
pass_id = 0
while True:
n_sections = len(_sections)
curr_root = Node()
temp_nodes = {} # :type dict(tuple, Node)
_find_children(temp_nodes, None, _sections, 0, 0, curr_root, n_sections)
_temp_sections = []
# _added_sections = {}
found_new_sections = 0
for i, _sec in enumerate(_sections):
# if _sec_id_temp >= len(_temp_sections):
# if _sec_id_orig >= len(_sections):
# break
# else:
# _sec = _sections[_sec_id_orig]
# _sec_id_orig += 1
# else:
# _sec = _temp_sections[_sec_id_temp]
# _sec_id_temp += 1
# if (_sec[1], _sec[2]) in _added_sections:
# """this section has already been added as a descendant of a previous template section
# """
# continue
# _added_sections[(_sec[1], _sec[2])] = 1
_sec_name = _sec[0]
prelim_node = temp_nodes[i]
if prelim_node.added:
"""this section has already been added in this pass as a descendant of a
previous template section
"""
continue
prelim_node.added = 1
"""range based section names
"""
_templ_sec_names = []
# any(map(_sec_name.startswith, ['(', '[', 'range(', 'irange(']))
if _sec_name.startswith('(') or _sec_name.startswith('[') or ':' in _sec_name or \
_sec_name.startswith('range(') or _sec_name.startswith('irange('):
# assert ',' not in _sec_name, \
# "Combining template and range sections is not supported currently"
"""in case there are multiple ranges or lists"""
# in_range_sec_names = _sec_name.split('+')
# for in_range_sec_name in in_range_sec_names:
in_range_sec_name = _sec_name
range_tokens = in_range_sec_name.split('_')
"""in case there are multiple ranges or lists"""
range_tuples = tuple(map(str_to_tuple_multi, range_tokens))
def _get_sec_names(_sec_names, _tuples, _id, _nums):
for _num in _tuples[_id]:
__nums = _nums[:]
if isinstance(_num, str):
__nums.append(_num)
else:
if _num < 0:
__nums.append('n' + str(abs(_num)))
else:
__nums.append(str(_num))
if _id < len(_tuples) - 1:
_get_sec_names(_sec_names, _tuples, _id + 1, __nums)
else:
__sec_name = '_'.join(__nums)
_sec_names.append(__sec_name)
_out_range_sec_names = []
_get_sec_names(_out_range_sec_names, range_tuples, 0, [])
_templ_sec_names += _out_range_sec_names
elif ',' in _sec_name:
_templ_sec_names = _sec_name.split(',')
if not _templ_sec_names:
_temp_sections.append(_sec)
continue
found_new_sections = 1
descendants = prelim_node.get_descendants()
if descendants:
# def is_template(__sec_name):
# return __sec_name.startswith('(') or __sec_name.startswith(
# '[') or ':' in __sec_name or \
# __sec_name.startswith('range(') or __sec_name.startswith(
# 'irange(') or ',' in __sec_name
for k_id, k in enumerate(_templ_sec_names):
_temp_sections.append((k, _sec[1], _sec[2], _sec[3], _curr_template_id))
for descendant in descendants: # type: Node
_temp_sections.append(
(descendant.name, descendant.line_id, descendant.end_id, descendant.curr_level, 0))
descendant.added = 1
# if k_id == 0:
# # assert not is_template(descendant.name), \
# # f"template section: {descendant.name} used as a descendant of " \
# # f"another: {_sec_name}"
# _added_sections[(descendant.line_id, descendant.curr_level)] = 1
else:
_temp_sections += [(k, _sec[1], _sec[2], _sec[3], _curr_template_id) for k in _templ_sec_names]
_curr_template_id += 1
if not found_new_sections:
break
_sections = _temp_sections
_all_temp_sections[pass_id] = _temp_sections
pass_id += 1
time_stamp = datetime.now().strftime("%y%m%d_%H%M%S")
root_sec_name = "__root_{}__".format(time_stamp)
curr_root = Node(root_sec_name)
n_sections = len(_sections)
nodes = {} # :type dict(tuple, Node)
nodes_by_fullname = defaultdict(list)
_find_children(nodes, nodes_by_fullname, _sections, 0, 0, curr_root, n_sections)
nodes_by_fullname = dict(nodes_by_fullname)
if enable_cache:
with open(cfg_cache_path, 'wb') as f: # Python 3: open(..., 'wb')
pickle.dump([nodes, nodes_by_fullname, _sections, file_args, file_args_offset, root_sec_name], f)
return nodes, nodes_by_fullname, _sections, file_args, file_args_offset, root_sec_name
def process_dict(params, *args, **kwargs):
"""
:param dict params:
:param args:
:param kwargs:
:return:
"""
class TempObject:
def __init__(self, entries):
self.help = {}
self.__dict__.update(entries)
obj = TempObject(params)
process(obj, *args, **kwargs)
params.update(obj.__dict__)
def process(obj, args_in=None, cmd=True, cfg='', cfg_root='', cfg_ext='',
prog='', usage='%(prog)s [options]', allow_unknown=0, cfg_cache=1,
cmd_args=None, verbose=0):
"""
:param obj:
:param list | None args_in:
:param bool cmd: enable command line argument processing
:param str cfg:
:param str cfg_root:
:param str cfg_ext:
:param str prog:
:param str | None usage:
:param int allow_unknown:
:param int cfg_cache:
:return:
"""
arg_dict = {}
if prog:
arg_dict['prog'] = prog
if usage is None:
arg_dict['usage'] = argparse.SUPPRESS
elif usage:
arg_dict['usage'] = usage
if hasattr(obj, 'help') and '__desc__' in obj.help:
arg_dict['description'] = obj.help['__desc__']
parser = argparse.ArgumentParser(**arg_dict)
member_to_type = {}
doc_dict = {}
_add_params_to_parser(parser, obj, member_to_type, doc_dict, verbose=verbose)
obj_doc_dict = doc_dict[type(obj)]
try:
short_description, long_description = obj_doc_dict['__description__']
except KeyError:
pass
else:
if short_description is not None:
parser.description = short_description
if long_description is not None:
parser.description += '\n' + long_description
elif long_description is not None:
parser.description = long_description
if args_in is None:
if cmd_args is None:
cmd_args = sys.argv[1:]
argv_id = 0
if not cfg:
# check for cfg files specified at command line
if cmd and len(cmd_args) > 0 and ('--cfg' in cmd_args[0] or cmd_args[0].startswith('cfg=')):
_, arg_val = cmd_args[0].split('=')
cfg = arg_val
argv_id += 1
if hasattr(obj, 'cfg'):
obj.cfg = cfg
cfg = getattr(obj, 'cfg', cfg)
if not cfg_root:
cfg_root = getattr(obj, 'cfg_root', cfg_root)
if not cfg_ext:
cfg_ext = getattr(obj, 'cfg_ext', cfg_ext)
if not cfg and hasattr(obj, 'cfg'):
obj.cfg = cfg
if isinstance(cfg, str):
if ',' not in cfg:
cfg = '{},'.format(cfg)
cfg = [k for k in cfg.split(',') if k]
"""pre-process raw cfg strings to extract and refine cfg files and sections"""
cfg_file_list = []
for _cfg in cfg:
_cfg_sec = []
if ':' not in _cfg:
"""no explicit section specified for the CFG so read its common sections"""
_cfg = '{}:__common__'.format(_cfg)
"""alternate specification for parent specific sections for ease of selecting child section"""
_cfg = _cfg.replace('-', '')
_cfg = _cfg.split(':')
_cfg_sec = [k for k in list(_cfg[1:]) if k]
_cfg = _cfg[0]
"""optional leading and trailing for better visible discrimination between cfg files and sections
in commands stored in syntax highlighted markdown files"""
if _cfg.startswith('_') and _cfg.endswith('_'):
_cfg = _cfg.strip('_')
if cfg_ext:
_cfg = '{}.{}'.format(_cfg, cfg_ext)
if cfg_root:
_cfg = os.path.join(cfg_root, _cfg)
if not os.path.isfile(_cfg):
if _cfg:
raise IOError('cfg file does not exist: {:s}'.format(os.path.abspath(_cfg)))
repeated_cfgs = []
repeated_sec_ids = [__sec_id for __sec_id, __sec in enumerate(_cfg_sec) if '+' in __sec]
excluded_ids = []
for i, __sec_id in enumerate(repeated_sec_ids):
_exclude_common_secs = 0
if _cfg_sec[__sec_id].startswith('++'):
if _cfg_sec[__sec_id].startswith('+++'):
_exclude_common_secs = 1
_cfg_name_start_pos = 3
else:
_cfg_name_start_pos = 2
_exclude_common_secs = 0
"""these sections are exclusive to the repeated cfg files so excluded from the default one"""
_exclusive_secs = 1
__sec_names = _cfg_sec[__sec_id][_cfg_name_start_pos:].split('+')
repeat_sec_names = __sec_names
_cfg_sec[__sec_id] = ''
else:
_exclusive_secs = 0
__sec_names = _cfg_sec[__sec_id].split('+')
repeat_sec_names = __sec_names[1:]
_cfg_sec[__sec_id] = __sec_names[0]
start_include_id = __sec_id + 1
end_include_id = repeated_sec_ids[i + 1] if i < len(repeated_sec_ids) - 1 else len(_cfg_sec)
for __name in repeat_sec_names:
included_secs = [__name, ] + _cfg_sec[start_include_id:end_include_id]
repeated_cfgs.append((_cfg, included_secs, 1, _exclude_common_secs))
if _exclusive_secs:
excluded_ids += list(range(start_include_id, end_include_id))
_cfg_sec = [k for i, k in enumerate(_cfg_sec) if i not in excluded_ids and k]
cfg_file_list.append((_cfg, _cfg_sec, 0, 0))
cfg_file_list += repeated_cfgs
"""process each cfg file and its sections"""
args_in = | |
line = lines[-1].strip()
for field in line.split("."):
if field in ("IB1","IG","TRIBL","IB2","TRIBL2"):
opts += "-a " + field
elif field in ("M","C","D","DC","L","J","N","I","O"):
opts += " -m " + field
elif field in ("nw","gr","ig","x2","sv"):
opts += " -w " + field
elif field in ("Z","IL") or field[0:3] == "ED:":
opts += " -d " + field
elif len(field) >= 2 and field[0] == "L" and field[1:].isdigit():
opts += " -L " + field[1:]
elif len(field) >= 2 and field[0] == "k" and field[1:].isdigit():
opts += " -k " + field[1:]
return opts
def processresult(out_best, oof_senses, id, lemma, pos, targetlang, bestsense, distribution, distance, divergencefrombestoutputthreshold):
bestscore = max(distribution.values())
bestsenses = [ sense for sense, score in sorted(distribution.items(), key=lambda x: x[1] * -1) if score >= bestscore * divergencefrombestoutputthreshold ]
#fivebestsenses = [ sense for sense, score in sorted(distribution.items()[:5], key=lambda x: -1 * x[1]) ]
bestsenses_s = ';'.join(bestsenses)
#fivebestsenses_s = ';'.join(fivebestsenses)
if not isinstance(bestsenses_s,unicode): bestsenses_s = unicode(bestsenses_s,'utf-8')
#if not isinstance(fivebestsenses_s,unicode): fivebestsenses_s = unicode(fivebestsenses_s,'utf-8')
out_best.write(lemma + "." + pos + "." + targetlang + ' ' + str(id) + ' :: ' + bestsenses_s + ';\n')
oof_senses.append( (id, lemma, pos, targetlang, distribution, distance) )
#out_oof.write(lemma + "." + pos + "." + targetlang + ' ' + str(id) + ' ::: ' + fivebestsenses_s + ';\n')
print >>sys.stderr, "<-- Timbl output for " + lemma.encode('utf-8') + '.' + pos + " @" + str(id) + ": " + repr(distribution)
def processresult_final(out_oof, oof_senses):
senses = {}
for id, lemma, pos, targetlang, distribution, distance in oof_senses:
for sense, score in distribution.items():
if not sense in senses:
senses[sense] = score
else:
senses[sense] += score
print >>sys.stderr,"Aggregated senses for OOF baseline: ",
print >>sys.stderr, sorted(senses.items(), key=lambda x: -1 * x[1])[:5]
oof_baseline = [ sense for sense, score in sorted(senses.items(), key=lambda x: -1 * x[1])[:5] ]
for id, lemma, pos, targetlang, distribution, distance in oof_senses:
fivebestsenses = [ sense for sense, score in sorted(distribution.items(), key=lambda x: -1 * x[1])[:5] ]
for s in oof_baseline:
if len(fivebestsenses) == 5:
break
if not s in fivebestsenses:
fivebestsenses.append(s)
fivebestsenses_s = ';'.join(fivebestsenses)
if not isinstance(fivebestsenses_s, unicode): fivebestsenses_s = unicode(fivebestsenses_s, 'utf-8')
if not isinstance(lemma, unicode): lemma = unicode(lemma, 'utf-8')
out_oof.write(lemma + "." + pos + "." + targetlang + ' ' + str(id) + ' ::: ' + fivebestsenses_s + ';\n')
class CLWSD2Tester(object):
def __init__(self, testdir, outputdir, targetlang,targetwordsfile, sourcetagger, timbloptions, contextsize, DOPOS, DOLEMMAS, bagofwords, DOVOTER, divergencefrombestoutputthreshold =1, variableconfiguration=None, constrainsenses= False, DOSCORE=True):
self.sourcetagger = sourcetagger
print >>sys.stderr, "Loading Target Words " + targetwordsfile
self.targetwords = loadtargetwords(targetwordsfile)
self.classifiers = {}
self.targetlang = targetlang
self.contextsize = contextsize
self.DOPOS = DOPOS
self.DOLEMMAS = DOLEMMAS
self.DOVOTER = DOVOTER
self.exemplarweights = exemplarweights
self.outputdir = outputdir
self.DOSCORE = DOSCORE
self.testdir = testdir
testfiles = []
for lemma, pos in self.targetwords:
if os.path.exists(testdir+"/" + lemma + '.data'):
testfiles.append(testdir+"/" + lemma + '.data')
else:
print >>sys.stderr, "WARNING: No testfile found for " + lemma + " (tried " + testdir+"/" + lemma + '.data)'
self.testset = TestSet(testfiles)
self.divergencefrombestoutputthreshold = divergencefrombestoutputthreshold
self.timbloptions = timbloptions
self.bagofwords = bagofwords
self.bags = {}
self.variableconfiguration = variableconfiguration
if self.bagofwords or self.variableconfiguration:
#load bags
for bagfile in glob.glob(outputdir + "/*.bag"):
print >>sys.stderr, "Loading bag " + bagfile
focuslemma,focuspos,_ ,_= os.path.basename(bagfile).split(".")
focuslemma = unicode(focuslemma,'utf-8')
self.bags[(focuslemma,focuspos)] = []
f = codecs.open(bagfile,'r','utf-8')
for line in f:
fields = line.split("\t")
if not (fields[0],fields[1]) in self.bags[(focuslemma,focuspos)]:
self.bags[(focuslemma,focuspos)].append((fields[0],fields[1]))
f.close()
def run(self):
global WSDDIR
if self.variableconfiguration:
for lemma,pos in self.testset.lemmas():
if not lemma in self.variableconfiguration:
raise Exception("No variable configuration passed for " + lemma)
print >>sys.stderr, "Extracting features from testset"
for lemma,pos in self.testset.lemmas():
print >>sys.stderr, "Processing " + lemma.encode('utf-8')
if self.variableconfiguration:
print >>sys.stderr, "Loading variable configuration for " + lemma.encode('utf-8')
self.contextsize, self.DOPOS, self.DOLEMMAS, self.bagofwords = self.variableconfiguration[lemma]
print >>sys.stderr, "contextsize: ", self.contextsize
print >>sys.stderr, "pos: ", self.DOPOS
print >>sys.stderr, "lemma: ", self.DOLEMMAS
print >>sys.stderr, "bag: ", self.bagofwords
timbloptions = self.timbloptions
if os.path.exists(self.outputdir + '/' + lemma +'.' + pos + '.' + self.targetlang + '.train.paramsearch'):
o = paramsearch2timblargs(self.outputdir + '/' + lemma +'.' + pos + '.' + self.targetlang + '.train.paramsearch')
timbloptions += " " + o
print >>sys.stderr, "Parameter optimisation loaded: " + o
else:
print >>sys.stderr, "NOTICE: No parameter optimisation found!"
print >>sys.stderr, "Instantiating classifier " + lemma.encode('utf-8') + " with options: " + timbloptions
classifier = timbl.TimblClassifier(self.outputdir + '/' + lemma +'.' + pos + '.' + self.targetlang, timbloptions)
out_best = codecs.open(self.outputdir + '/' + lemma + '.' + pos + '.best','w','utf-8')
out_oof = codecs.open(self.outputdir + '/' + lemma + '.' + pos + '.oof','w','utf-8')
oof_senses = []
if self.DOVOTER:
out_votertest = codecs.open(self.outputdir + '/' + lemma + '.' + pos + '.votertest','w','utf-8')
for instancenum, (id, ( leftcontext,head,rightcontext)) in enumerate(self.testset.instances(lemma,pos)):
print >>sys.stderr, "--> " + lemma.encode('utf-8') + '.' + pos + " @" + str(instancenum+1) + ": " + leftcontext.encode('utf-8') + " *" + head.encode('utf-8') + "* " + rightcontext.encode('utf-8')
sourcewords_pretagged = leftcontext + ' ' + head + ' ' + rightcontext
sourcewords, sourcepostags, sourcelemmas = sourcetagger.process(sourcewords_pretagged.split(' '))
sourcepostags = [ x[0].lower() if x else "?" for x in sourcepostags ]
#find new head position (may have moved due to tokenisation)
origindex = len(leftcontext.split(' '))
mindistance = 9999
focusindex = -1
for i, word in enumerate(sourcewords):
if word == head:
distance = abs(origindex - i)
if distance <= mindistance:
focusindex = i
mindistance = distance
if focusindex == -1:
raise Exception("Focus word not found after tagging! This should not happen! head=" + head.encode('utf-8') + ",words=" + ' '.join(sourcewords).encode('utf-8'))
sourcelemma = lemma #sourcelemmas[focusindex] #tagger may be wrong
sourcepos = 'n' #sourcepostags[focusindex] #tagger may be wrong
#grab local context features
features = []
for j in range(focusindex - self.contextsize, focusindex + 1 + self.contextsize):
if j > 0 and j < len(sourcewords):
features.append(sourcewords[j])
if self.DOPOS:
if sourcepostags[j]:
features.append(sourcepostags[j])
else:
features.append("?")
if self.DOLEMMAS:
if sourcelemmas[j]:
features.append(sourcelemmas[j])
else:
features.append("?")
else:
features.append("{NULL}")
if self.DOPOS: features.append("{NULL}")
if self.DOLEMMAS: features.append("{NULL}")
if self.bagofwords:
if (sourcelemma,sourcepos) in self.bags:
for keylemma,keypos in self.bags[(sourcelemma,sourcepos)]:
found = False
for j, w in enumerate(sourcewords):
if j != focusindex:
if sourcelemmas[j] == keylemma and sourcepostags[j] == keypos:
found = True
break
#Write bag-of-word features
if found:
features.append("1")
else:
features.append("0")
else:
print >>sys.stderr, 'NOTICE: ' + sourcelemma.encode('utf-8')+ ' ' + sourcepos + ' has no bag'
print >>sys.stderr, " -- Classifier features: " + repr(features)
bestsense, distribution, distance = classifier.classify(features)
if not isinstance(bestsense,unicode): bestsense = unicode(bestsense,'utf-8')
processresult(out_best, oof_senses, id, lemma, pos, targetlang, bestsense, distribution, distance, self.divergencefrombestoutputthreshold)
if self.DOVOTER:
out_votertest.write(str(id) + "\t" + sourcewords[focusindex]+ "\t"+ bestsense + "\n")
out_best.close()
processresult_final(out_oof, oof_senses)
out_oof.close()
if DOVOTER:
out_votertest.close()
if self.DOSCORE:
self.score()
def score(self):
global WSDDIR
print >>sys.stderr, "Scoring"
for lemma,pos in self.testset.lemmas():
print >>sys.stderr, "Scoring " + lemma.encode('utf-8')
cmd = 'perl ' + WSDDIR + '/ScorerTask3.pl ' + self.outputdir + '/' + lemma + '.' + pos + '.best' + ' ' + self.testdir + '/' + self.targetlang + '/' + lemma + '_gold.txt 2> ' + outputdir + '/' + lemma + '.' + pos + '.best.scorerr'
r = os.system(cmd)
if r != 0:
print >>sys.stderr,"ERROR: SCORER FAILED! INSPECT " + outputdir + '/' + lemma + '.' + pos + '.best.scorerr -- Command was: ' + cmd
cmd = 'perl ' + WSDDIR + '/ScorerTask3.pl ' + self.outputdir + '/' + lemma + '.' + pos + '.oof' + ' ' + self.testdir + '/' + self.targetlang + '/' + lemma + '_gold.txt -t oof 2> ' + outputdir + '/' + lemma + '.' + pos + '.oof.scorerr'
r = os.system(cmd)
if r != 0:
print >>sys.stderr,"ERROR: SCORER FAILED! INSPECT " + outputdir + '/' + lemma + '.' + pos + '.oof.scorerr -- Command was: ' + cmd
scorereport(self.outputdir)
def scorereport(outputdir):
f = codecs.open(outputdir + '/results','w','utf-8')
f.write('BEST RESULTS\n-------------\n')
rlist = []
plist = []
for filename in glob.glob(outputdir + '/*.best.results'):
lemma,pos = os.path.basename(filename).split('.')[:2]
f_in = open(filename,'r')
for line in f_in:
if line[:12] == "precision = ":
p = float(line[12:line.find(',')] )
r = float(line[line.find('recall | |
import argparse
import json
import logging
import subprocess
import yaml
import re
import collections
import os
import time
import requests
import urllib
from distutils.spawn import find_executable
import teuthology
from . import misc
from . import provision
from .config import config
from .lockstatus import get_status
log = logging.getLogger(__name__)
# Don't need to see connection pool INFO messages
logging.getLogger("requests.packages.urllib3.connectionpool").setLevel(
logging.WARNING)
is_vpm = lambda name: 'vpm' in name
def get_distro_from_downburst():
"""
Return a table of valid distros.
If downburst is in path use it. If either downburst is unavailable,
or if downburst is unable to produce a json list, then use a default
table.
"""
default_table = {u'rhel_minimal': [u'6.4', u'6.5'],
u'fedora': [u'17', u'18', u'19', u'20'],
u'centos': [u'6.3', u'6.4', u'6.5', u'7.0'],
u'opensuse': [u'12.2'],
u'rhel': [u'6.3', u'6.4', u'6.5', u'7.0', u'7beta'],
u'centos_minimal': [u'6.4', u'6.5'],
u'ubuntu': [u'8.04(hardy)', u'9.10(karmic)',
u'10.04(lucid)', u'10.10(maverick)',
u'11.04(natty)', u'11.10(oneiric)',
u'12.04(precise)', u'12.10(quantal)',
u'13.04(raring)', u'13.10(saucy)',
u'14.04(trusty)', u'utopic(utopic)'],
u'sles': [u'11-sp2'],
u'debian': [u'6.0', u'7.0']}
executable_cmd = find_executable('downburst')
if not executable_cmd:
log.warn("Downburst not found!")
log.info('Using default values for supported os_type/os_version')
return default_table
try:
output = subprocess.check_output([executable_cmd, 'list-json'])
downburst_data = json.loads(output)
return downburst_data
except (subprocess.CalledProcessError, OSError):
log.exception("Error calling downburst!")
log.info('Using default values for supported os_type/os_version')
return default_table
def vps_version_or_type_valid(machine_type, os_type, os_version):
"""
Check os-type and os-version parameters when locking a vps.
Os-type will always be set (defaults to ubuntu).
In the case where downburst does not handle list-json (an older version
of downburst, for instance), a message is printed and this checking
is skipped (so that this code should behave as it did before this
check was added).
"""
if not machine_type == 'vps':
return True
if os_type is None or os_version is None:
# we'll use the defaults provided by provision.create_if_vm
# later on during provisioning
return True
valid_os_and_version = get_distro_from_downburst()
if os_type not in valid_os_and_version:
log.error("os-type '%s' is invalid", os_type)
return False
if not validate_distro_version(os_version,
valid_os_and_version[os_type]):
log.error("os-version '%s' is invalid", os_version)
return False
return True
def validate_distro_version(version, supported_versions):
"""
Return True if the version is valid. For Ubuntu, possible
supported version values are of the form '12.04 (precise)' where
either the number of the version name is acceptable.
"""
if version in supported_versions:
return True
for parts in supported_versions:
part = parts.split('(')
if len(part) == 2:
if version == part[0]:
return True
if version == part[1][0:len(part[1])-1]:
return True
def get_statuses(machines):
if machines:
statuses = []
for machine in machines:
machine = misc.canonicalize_hostname(machine)
status = get_status(machine)
if status:
statuses.append(status)
else:
log.error("Lockserver doesn't know about machine: %s" %
machine)
else:
statuses = list_locks()
return statuses
def main(ctx):
if ctx.verbose:
teuthology.log.setLevel(logging.DEBUG)
misc.read_config(ctx)
ret = 0
user = ctx.owner
machines = [misc.canonicalize_hostname(m, user=False)
for m in ctx.machines]
machines_to_update = []
if ctx.targets:
try:
with file(ctx.targets) as f:
g = yaml.safe_load_all(f)
for new in g:
if 'targets' in new:
for t in new['targets'].iterkeys():
machines.append(t)
except IOError as e:
raise argparse.ArgumentTypeError(str(e))
if ctx.f:
assert ctx.lock or ctx.unlock, \
'-f is only supported by --lock and --unlock'
if machines:
assert ctx.lock or ctx.unlock or ctx.list or ctx.list_targets \
or ctx.update, \
'machines cannot be specified with that operation'
else:
assert ctx.num_to_lock or ctx.list or ctx.list_targets or \
ctx.summary or ctx.brief, \
'machines must be specified for that operation'
if ctx.all:
assert ctx.list or ctx.list_targets or ctx.brief, \
'--all can only be used with --list, --list-targets, and --brief'
assert ctx.owner is None, \
'--all and --owner are mutually exclusive'
assert not machines, \
'--all and listing specific machines are incompatible'
if ctx.num_to_lock:
assert ctx.machine_type, \
'must specify machine type to lock'
if ctx.brief or ctx.list or ctx.list_targets:
assert ctx.desc is None, '--desc does nothing with --list/--brief'
# we may need to update host keys for vms. Don't do it for
# every vm; however, update any vms included in the list given
# to the CLI (machines), or any owned by the specified owner or
# invoking user if no machines are specified.
vmachines = []
statuses = get_statuses(machines)
owner = ctx.owner or misc.get_user()
for machine in statuses:
if machine['is_vm'] and machine['locked'] and \
(machines or machine['locked_by'] == owner):
vmachines.append(machine['name'])
if vmachines:
log.info("updating host keys for %s", ' '.join(sorted(vmachines)))
do_update_keys(vmachines)
# get statuses again to refresh any updated keys
statuses = get_statuses(machines)
if statuses:
if ctx.machine_type:
statuses = [_status for _status in statuses
if _status['machine_type'] == ctx.machine_type]
if not machines and ctx.owner is None and not ctx.all:
ctx.owner = misc.get_user()
if ctx.owner is not None:
statuses = [_status for _status in statuses
if _status['locked_by'] == ctx.owner]
if ctx.status is not None:
statuses = [_status for _status in statuses
if _status['up'] == (ctx.status == 'up')]
if ctx.locked is not None:
statuses = [_status for _status in statuses
if _status['locked'] == (ctx.locked == 'true')]
if ctx.desc is not None:
statuses = [_status for _status in statuses
if _status['description'] == ctx.desc]
if ctx.desc_pattern is not None:
statuses = [_status for _status in statuses
if _status['description'] is not None and
_status['description'].find(ctx.desc_pattern) >= 0]
# When listing, only show the vm_host's name, not every detail
for s in statuses:
if not s.get('is_vm', False):
continue
vm_host_name = s.get('vm_host', dict())['name']
if vm_host_name:
s['vm_host'] = vm_host_name
if ctx.list:
print json.dumps(statuses, indent=4)
elif ctx.brief:
for s in sorted(statuses, key=lambda s: s.get('name')):
locked = "un" if s['locked'] == 0 else " "
mo = re.match('\w+@(\w+?)\..*', s['name'])
host = mo.group(1) if mo else s['name']
print '{host} {locked}locked {owner} "{desc}"'.format(
locked=locked, host=host,
owner=s['locked_by'], desc=s['description'])
else:
frag = {'targets': {}}
for f in statuses:
frag['targets'][f['name']] = f['ssh_pub_key']
print yaml.safe_dump(frag, default_flow_style=False)
else:
log.error('error retrieving lock statuses')
ret = 1
elif ctx.summary:
do_summary(ctx)
return 0
elif ctx.lock:
if not vps_version_or_type_valid(ctx.machine_type, ctx.os_type,
ctx.os_version):
log.error('Invalid os-type or version detected -- lock failed')
return 1
for machine in machines:
if not lock_one(machine, user, ctx.desc):
ret = 1
if not ctx.f:
return ret
else:
machines_to_update.append(machine)
provision.create_if_vm(ctx, machine)
elif ctx.unlock:
if ctx.owner is None and user is None:
user = misc.get_user()
# If none of them are vpm, do them all in one shot
if not filter(is_vpm, machines):
res = unlock_many(machines, user)
return 0 if res else 1
for machine in machines:
if not unlock_one(ctx, machine, user):
ret = 1
if not ctx.f:
return ret
else:
machines_to_update.append(machine)
elif ctx.num_to_lock:
result = lock_many(ctx, ctx.num_to_lock, ctx.machine_type, user,
ctx.desc, ctx.os_type, ctx.os_version)
if not result:
ret = 1
else:
machines_to_update = result.keys()
if ctx.machine_type == 'vps':
shortnames = ' '.join(
[misc.decanonicalize_hostname(name) for name in
result.keys()]
)
if len(result) < ctx.num_to_lock:
log.error("Locking failed.")
for machn in result:
unlock_one(ctx, machn)
ret = 1
else:
log.info("Successfully Locked:\n%s\n" % shortnames)
log.info(
"Unable to display keys at this time (virtual " +
"machines are booting).")
log.info(
"Please run teuthology-lock --list-targets %s once " +
"these machines come up.",
shortnames)
else:
print yaml.safe_dump(
dict(targets=result),
default_flow_style=False)
elif ctx.update:
assert ctx.desc is not None or ctx.status is not None, \
'you must specify description or status to update'
assert ctx.owner is None, 'only description and status may be updated'
machines_to_update = machines
if ctx.desc is not None or ctx.status is not None:
for machine in machines_to_update:
update_lock(machine, ctx.desc, ctx.status)
return ret
def lock_many(ctx, num, machine_type, user=None, description=None,
os_type=None, os_version=None, arch=None):
if user is None:
user = misc.get_user()
if not vps_version_or_type_valid(ctx.machine_type, os_type, os_version):
log.error('Invalid os-type or version detected -- lock failed')
return
# In the for loop below we can safely query for all bare-metal machine_type
# values at once. So, if we're being asked for 'plana,mira,burnupi', do it
# all in one shot. If we are passed 'plana,mira,burnupi,vps', do one query
# for 'plana,mira,burnupi' and one for 'vps'
machine_types_list = misc.get_multi_machine_types(machine_type)
if machine_types_list == ['vps']:
machine_types = machine_types_list
elif 'vps' in machine_types_list:
machine_types_non_vps = list(machine_types_list)
machine_types_non_vps.remove('vps')
machine_types_non_vps = '|'.join(machine_types_non_vps)
machine_types = [machine_types_non_vps, 'vps']
else:
machine_types_str = '|'.join(machine_types_list)
machine_types = [machine_types_str, ]
for machine_type in machine_types:
uri = os.path.join(config.lock_server, 'nodes', 'lock_many', '')
data = dict(
locked_by=user,
count=num,
machine_type=machine_type,
description=description,
)
# Only query for os_type/os_version if non-vps, since in that case we
# just create them.
if machine_type != 'vps':
if os_type:
data['os_type'] = os_type
if os_version:
data['os_version'] = os_version
if arch:
data['arch'] = arch
log.debug("lock_many request: %s", repr(data))
response = requests.post(
uri,
data=json.dumps(data),
headers={'content-type': | |
"jolt",
"reforms",
"gucci",
"overcoat",
"bishops",
"conscientious",
"venetian",
"evey",
"calder",
"logistics",
"ahmet",
"dieter",
"rampant",
"were-",
"waverly",
"whitehall",
"hock",
"chrome",
"sandman",
"balancing",
"canals",
"oxen",
"superhuman",
"ein",
"rogues",
"'don",
"dossier",
"pikachu",
"waxed",
"infested",
"benz",
"kolchak",
"folklore",
"double-cross",
"turban",
"carriages",
"warm-up",
"lanka",
"ade",
"recycled",
"alden",
"jean-pierre",
"busiest",
"blur3",
"courteous",
"baptize",
"bowing",
"c.o.",
"ventura",
"pamphlets",
"amiss",
"michaela",
"hex",
"swapping",
"detergent",
"afflicted",
"dethklok",
"overalls",
"reboot",
"pining",
"nazareth",
"insubordination",
"sputters",
"slammer",
"vices",
"dungeons",
"binary",
"hesitating",
"attendants",
"fined",
"sanford",
"volkswagen",
"excrement",
"dictates",
"epstein",
"swims",
"ethic",
"boulders",
"lockwood",
"bucharest",
"twos",
"cornelia",
"founders",
"tb",
"locksmith",
"circulating",
"seine",
"rattlesnake",
"toying",
"cardiff",
"chitchat",
"chimpanzee",
"envied",
"nocturnal",
"peppino",
"galleries",
"partisan",
"cluck",
"falco",
"hobson",
"clandestine",
"doctorate",
"blackadder",
"smudge",
"bona",
"pressuring",
"kindest",
"moray",
"cuddy",
"beastly",
"agile",
"sodas",
"visas",
"portia",
"specter",
"det",
"nabbed",
"integration",
"granville",
"bayou",
"sorceress",
"harvesting",
"bakers",
"litigation",
"misa",
"disruption",
"shellfish",
"kerala",
"grasping",
"prakash",
"apostles",
"kraft",
"extermination",
"arsonist",
"morphin",
"gathers",
"tsui",
"tubby",
"refreshment",
"bony",
"fifty-fifty",
"tuttle",
"diligent",
"handiwork",
"disguises",
"sneaks",
"installing",
"betrays",
"malhotra",
"transvestite",
"darth",
"secular",
"newer",
"watery",
"pecker",
"surrendering",
"intestine",
"md",
"repentance",
"push-ups",
"burdens",
"mont",
"plums",
"kolya",
"sulk",
"i.v.",
"arden",
"cleavage",
"blunder",
"panty",
"detox",
"thyself",
"anticipating",
"scoffing",
"recognizing",
"up-tempo",
"judgments",
"headmistress",
"goddamnit",
"telepathic",
"spineless",
"astrology",
"reza",
"dopey",
"drummond",
"'bye",
"trusty",
"loveliest",
"ditching",
"reassigned",
"elly",
"faber",
"pusher",
"col",
"chucky",
"dashed",
"sisko",
"cashmere",
"cams",
"etcetera",
"mistletoe",
"sorcery",
"teas",
"innkeeper",
"maidens",
"seol",
"delinda",
"tranquility",
"mais",
"indicator",
"phaser",
"nutritious",
"rb",
"mutated",
"varied",
"in-house",
"heightened",
"blatant",
"proclamation",
"guerrillas",
"trident",
"keepin",
"collin",
"corvette",
"ticklish",
"gaping",
"ideally",
"oars",
"scandals",
"vengeful",
"widowed",
"brit",
"emmanuel",
"post-mortem",
"salted",
"surpassed",
"g.i.",
"exert",
"poorest",
"systematically",
"evac",
"radically",
"purify",
"vibrate",
"announces",
"transgender",
"lofty",
"sequences",
"nance",
"touché",
"addy",
"tulips",
"illustrated",
"unlocking",
"shudders",
"nurturing",
"merrill",
"p.i.",
"generates",
"disloyal",
"aggressively",
"skywalker",
"asbestos",
"haru",
"improv",
"ems",
"'lord",
"hayat",
"unpunished",
"18-year-old",
"displaced",
"andersen",
"bowen",
"joão",
"mi5",
"thundering",
"semi",
"crutch",
"silo",
"screw-up",
"yokohama",
"rubs",
"dipshit",
"levy",
"symmetry",
"inflammation",
"unaccounted",
"nikola",
"licorice",
"applauds",
"homeroom",
"kabaddi",
"grievous",
"way-",
"counselling",
"ortega",
"trample",
"nani",
"tipsy",
"rigorous",
"brewer",
"hurdle",
"chisel",
"kiran",
"bistro",
"hao",
"wainwright",
"chests",
"newkirk",
"bottomless",
"combustion",
"erupted",
"autobots",
"clifton",
"forwarded",
"katrine",
"tidings",
"stubbornness",
"dix",
"pelican",
"matchmaker",
"howell",
"geiger",
"moles",
"mucho",
"binge",
"piping",
"eight-year-old",
"tiff",
"penalties",
"enables",
"irrigation",
"hunchback",
"margins",
"provenza",
"relocated",
"centres",
"pitches",
"five-year",
"vaults",
"hyacinth",
"gable",
"swirling",
"filly",
"putty",
"revisit",
"fanatics",
"concerto",
"double-crossed",
"unbeatable",
"rupaul",
"matey",
"kemal",
"carthage",
"loophole",
"replies",
"beeper",
"injure",
"cookbook",
"align",
"sanctity",
"spilt",
"whence",
"geographic",
"admirers",
"mislead",
"a-team",
"treadmill",
"ferrara",
"fated",
"resurrect",
"rina",
"fetal",
"coca",
"qaeda",
"nona",
"hoots",
"tampons",
"crept",
"mania",
"unplug",
"veggie",
"'oeuvres",
"keyes",
"subordinate",
"timely",
"telugu",
"proofs",
"ivanovich",
"appropriately",
"vendors",
"recap",
"roo",
"dinky",
"accountants",
"archaeology",
"mikael",
"strangler",
"genghis",
"freshmen",
"nic",
"nothingness",
"katarina",
"authorised",
"brightness",
"maximus",
"payson",
"bogart",
"urinating",
"dok",
"violets",
"compromises",
"defences",
"ibm",
"scanlon",
"corp",
"compulsory",
"seasoning",
"taft",
"famously",
"surfaced",
"gibraltar",
"cuckold",
"y-yeah",
"renée",
"uneven",
"reopened",
"blip",
"valhalla",
"midori",
"similarity",
"frodo",
"gizmo",
"swindler",
"indulgence",
"wat",
"co-op",
"topper",
"alligators",
"garry",
"dives",
"minneapolis",
"reeling",
"bene",
"outlets",
"siamese",
"rupee",
"unrealistic",
"endangering",
"gonzalez",
"gist",
"adventurer",
"prosthetic",
"bayonet",
"julianne",
"bismarck",
"dresden",
"paro",
"next-door",
"encoded",
"ruff",
"luce",
"supermodel",
"pollock",
"calmer",
"chestnuts",
"hélène",
"parsley",
"thought-",
"overgrown",
"storming",
"rahl",
"run-in",
"alfa",
"sicker",
"maru",
"ohhhh",
"'where",
"abbas",
"inge",
"resilient",
"ly",
"monet",
"abominable",
"empires",
"revise",
"vets",
"arches",
"condor",
"apaches",
"blackouts",
"advertised",
"quixote",
"labored",
"yee-haw",
"excavation",
"salud",
"ironed",
"francie",
"bartholomew",
"laboratories",
"twenty-three",
"riddled",
"flung",
"yanked",
"sadder",
"neurons",
"licenses",
"ding-dong",
"communicated",
"nutshell",
"scofield",
"savor",
"cbc",
"bedrock",
"tempered",
"1920s",
"kenzi",
"bunty",
"pol",
"cleansed",
"favourites",
"50th",
"rigging",
"dawned",
"inconclusive",
"graph",
"twigs",
"callous",
"tyr",
"bosch",
"embezzlement",
"decapitated",
"seinfeld",
"bannister",
"mccain",
"clusters",
"annex",
"samuels",
"royalties",
"seville",
"admittedly",
"gimmick",
"two-thirds",
"lobos",
"verb",
"'yes",
"fightin",
"mistaking",
"abrasions",
"frazer",
"stow",
"unchanged",
"andes",
"neglecting",
"daggers",
"hypnotic",
"distributing",
"stonehenge",
"hailing",
"vapor",
"clemens",
"blanchard",
"waive",
"lavish",
"cultivated",
"sunscreen",
"buffoon",
"scooby-doo",
"hooded",
"obstructing",
"bonuses",
"rapture",
"olympia",
"seenu",
"grate",
"inconsiderate",
"armchair",
"floppy",
"loitering",
"clement",
"drainage",
"sho",
"mongolia",
"woodland",
"looted",
"scruples",
"frisky",
"rubies",
"sovereignty",
"ofthis",
"s2",
"recovers",
"betrayer",
"promoter",
"ugliness",
"cancellation",
"bribing",
"merged",
"aggie",
"barons",
"regression",
"susana",
"punishable",
"ganges",
"bachelors",
"forgetful",
"asha",
"pilate",
"disgruntled",
"fireflies",
"deceitful",
"splat",
"movers",
"mended",
"plating",
"misunderstandings",
"iittle",
"cryptic",
"him.",
"monika",
"jefe",
"marmalade",
"coals",
"ignores",
"rhythms",
"sculpted",
"vicente",
"pounce",
"lesley",
"all-star",
"completes",
"pim",
"assert",
"ht",
"favored",
"curled",
"pelvic",
"pansy",
"requiring",
"entities",
"tenor",
"measly",
"tino",
"hoodlums",
"indie",
"ladders",
"tasteful",
"lassiter",
"shaker",
"maritime",
"absorbing",
"mclaren",
"j.b.",
"splendor",
"sax",
"denounced",
"antics",
"hailed",
"meanest",
"sprinkles",
"trampoline",
"swag",
"roaches",
"othello",
"profiling",
"lifesaver",
"slurring",
"platonic",
"hoi",
"cretin",
"hyenas",
"harshly",
"camila",
"krypton",
"autobiography",
"humorous",
"seamus",
"hussy",
"nodding",
"stinkin",
"comp",
"lyman",
"co-operate",
"keenan",
"fiscal",
"baltic",
"hallways",
"gracefully",
"comforted",
"promenade",
"champs",
"exorcist",
"courtesan",
"clubbing",
"alloy",
"drive-in",
"right-handed",
"guitarist",
"suede",
"roasting",
"kun",
"simran",
"plaid",
"rollers",
"nyu",
"flannel",
"seventy-five",
"niche",
"census",
"mata",
"braver",
"verona",
"hysterically",
"shriek",
"klan",
"bathrobe",
"atrocious",
"calleigh",
"borough",
"chalice",
"frazier",
"airspace",
"hesitant",
"aborted",
"jeweler",
"rhetoric",
"brice",
"strickland",
"seamstress",
"feverish",
"saffron",
"storing",
"mla",
"regeneration",
"augusta",
"scorpio",
"algiers",
"forbade",
"hushed",
"suitors",
"governing",
"dotty",
"matured",
"tropics",
"threes",
"trapping",
"chantal",
"swann",
"warrick",
"slots",
"dune",
"horseshit",
"extinguish",
"juggle",
"yon",
"goguryeo",
"mobilize",
"trucking",
"berman",
"hakeem",
"framework",
"kan",
"fräulein",
"attackers",
"prenup",
"orphaned",
"parry",
"respective",
"dario",
"prerogative",
"chipper",
"ramiro",
"woulda",
"attenborough",
"mortified",
"gérard",
"bitcoin",
"crusoe",
"axes",
"pickpocket",
"on-line",
"flammable",
"apos",
"kickin",
"leicester",
"archery",
"blanc",
"henchmen",
"talon",
"hardison",
"t.v.",
"purses",
"glucose",
"whims",
"muff",
"cauldron",
"roundabout",
"gorman",
"estranged",
"phobia",
"aang",
"vanishes",
"watchers",
"banning",
"navel",
"ascension",
"piazza",
"goth",
"projectile",
"fueled",
"brothels",
"jitters",
"crybaby",
"upstream",
"notification",
"bennie",
"a-bomb",
"fabricated",
"hi.",
"grumbles",
"ck",
"ish",
"regis",
"inga",
"textile",
"realism",
"dominance",
"circulate",
"autistic",
"sickly",
"ambulances",
"sac",
"siri",
"spiritually",
"recreational",
"forwarding",
"tetanus",
"fliers",
"prowl",
"quot",
"atop",
"itches",
"meningitis",
"post-op",
"mcgarrett",
"emailed",
"dimes",
"suburb",
"hartman",
"aztec",
"mocha",
"accursed",
"dmitri",
"sarajevo",
"kern",
"launcher",
"improvised",
"defying",
"sofie",
"mohammad",
"halley",
"ducking",
"alexandre",
"uglier",
"mitt",
"taurus",
"warheads",
"apps",
"inspected",
"stuck-up",
"undoing",
"counties",
"perm",
"shuffling",
"half-brother",
"vanya",
"shifu",
"takumi",
"touya",
"there`s",
"bengali",
"weakling",
"grampa",
"humid",
"infants",
"barman",
"straits",
"annulment",
"qiu",
"mapping",
"spotting",
"eyed",
"zing",
"decode",
"umbilical",
"blackboard",
"probes",
"cutlery",
"booger",
"hasta",
"shoelaces",
"redeemed",
"morgana",
"bleedin",
"hue",
"exterminator",
"aristocracy",
"1ch00ffffff",
"wavelength",
"merry-go-round",
"replacements",
"bongo",
"fittest",
"glum",
"farnon",
"deanna",
"gloucester",
"drive-by",
"perilous",
"snooze",
"commandos",
"wasps",
"abhi",
"jimmie",
"won`t",
"full-scale",
"processor",
"pendleton",
"ascertain",
"deadliest",
"reproductive",
"prophecies",
"instinctively",
"commencing",
"mined",
"reassured",
"dislikes",
"jokers",
"phasers",
"strayed",
"feller",
"templeton",
"all-powerful",
"curling",
"winthrop",
"monoxide",
"xin",
"rankin",
"rolls-royce",
"armada",
"overreact",
"twirl",
"kits",
"'reilly",
"hanks",
"fountains",
"sawdust",
"firefly",
"interstellar",
"subdue",
"massimo",
"fabrics",
"simulate",
"libraries",
"arbitrary",
"petrov",
"ratchet",
"vascular",
"poldark",
"coos",
"baby-sitter",
"handmade",
"ahjumma",
"bluebell",
"five-star",
"ultraviolet",
"obesity",
"sensations",
"deem",
"higgs",
"charissa",
"reigns",
"dub",
"bounces",
"nicaragua",
"dawes",
"self-confidence",
"shrek",
"basque",
"burrow",
"pawned",
"boardwalk",
"poultry",
"scrapbook",
"fantasize",
"watered",
"offending",
"roswell",
"magdalena",
"overkill",
"sodom",
"antelope",
"discord",
"henna",
"pigsty",
"aristocrats",
"downstream",
"undies",
"dashboard",
"chez",
"inconsistent",
"wickedness",
"bok",
"disagreed",
"susannah",
"piccolo",
"vila",
"soto",
"would-be",
"pauper",
"oxide",
"cauliflower",
"roxton",
"fiddler",
"ivo",
"matteo",
"right-wing",
"nitro",
"guacamole",
"branson",
"cosette",
"pawns",
"cardinals",
"tailed",
"verde",
"vd",
"rikers",
"fiji",
"extinguished",
"enquire",
"prude",
"lik",
"boundless",
"climbers",
"'s-his-name",
"romantically",
"hefner",
"strands",
"arrivals",
"hula",
"combed",
"sou",
"corresponding",
"kindred",
"repel",
"pil",
"worst-case",
"genoa",
"durham",
"squish",
"shopkeeper",
"grownups",
"nik",
"showroom",
"warts",
"overdoing",
"laxmi",
"laundromat",
"small-town",
"humanoid",
"dreadfully",
"cheery",
"shards",
"shenanigans",
"std",
"rhythmically",
"françoise",
"ronaldo",
"hbo",
"atticus",
"bligh",
"masons",
"encourages",
"barrage",
"blockhead",
"sealing",
"i.a.",
"annihilated",
"glover",
"harvested",
"trisha",
"overloaded",
"narc",
"tulsa",
"persia",
"spills",
"there.",
"timo",
"taxpayer",
"ser",
"menacing",
"clippers",
"flirted",
"spiderman",
"foreboding",
"exhilarating",
"understudy",
"cv",
"maman",
"castile",
"tact",
"schematics",
"roseanne",
"smarts",
"provision",
"mccartney",
"manga",
"housed",
"doghouse",
"césar",
"milhouse",
"anchovies",
"ny",
"fab",
"warbling",
"ofcourse",
"huntington",
"ghouls",
"applicants",
"tigger",
"stringer",
"critters",
"topher",
"yemen",
"vantage",
"canadians",
"abandonment",
"theology",
"commemorate",
"compressions",
"nerdy",
"aegisshi",
"raleigh",
"frenchmen",
"ld",
"nourishment",
"sender",
"nursed",
"three-day",
"gulls",
"argentine",
"oan",
"consciously",
"tenner",
"waiver",
"magnetism",
"woodwork",
"remission",
"nils",
"hooligan",
"dysfunction",
"hijacking",
"dickinson",
"vanquished",
"widen",
"mobility",
"rafi",
"microphones",
"provisional",
"hamper",
"mcallister",
"membrane",
"fund-raiser",
"anarchists",
"horst",
"hahaha",
"lecturer",
"tampon",
"maxi",
"sanatorium",
"tangle",
"hearted",
"gabriella",
"succumb",
"scab",
"dissatisfied",
"cali",
"butting",
"lain",
"presentable",
"prompted",
"nietzsche",
"'tjust",
"boating",
"eichmann",
"relates",
"bashir",
"swedes",
"trumps",
"yuk",
"hypothermia",
"sasquatch",
"binder",
"unlocks",
"lookie",
"potions",
"insure",
"bohemian",
"made-up",
"ilya",
"skylar",
"petrovich",
"jester",
"excites",
"uniquely",
"cris",
"regulate",
"factions",
"irreplaceable",
"pied",
"smithers",
"unofficially",
"canterbury",
"shank",
"alpine",
"prowess",
"fernandez",
"trashy",
"blower",
"tenderly",
"depicted",
"but-but",
"carlyle",
"disturbs",
"mussels",
"mali",
"sigma",
"croissant",
"charades",
"mince",
"kilograms",
"homies",
"boasting",
"dined",
"xanax",
"high-class",
"passageway",
"left-hand",
"ranting",
"pixie",
"pharaohs",
"nitrate",
"hyperspace",
"insurgents",
"ferengi",
"yiddish",
"trends",
"elevation",
"figaro",
"litres",
"pastime",
"altering",
"fricking",
"ussr",
"brie",
"spec",
"abolished",
"earthlings",
"evict",
"janeiro",
"pricey",
"mending",
"pesky",
"turbulent",
"attributes",
"dο",
"nessa",
"downey",
"gerda",
| |
idxs, dnums, slice_sizes: lax.gather(op, idxs, dimension_numbers=dnums, slice_sizes=slice_sizes),
[RandArg(shape, np.float32),
idxs, StaticArg(dnums), StaticArg(slice_sizes)])
for shape, idxs, dnums, slice_sizes in [
((5,), np.array([[0], [2]]), lax.GatherDimensionNumbers(
offset_dims=(), collapsed_slice_dims=(0,), start_index_map=(0,)),
(1,)),
((10,), np.array([[0], [0], [0]]), lax.GatherDimensionNumbers(
offset_dims=(1,), collapsed_slice_dims=(), start_index_map=(0,)),
(2,)),
((10, 5,), np.array([[0], [2], [1]]), lax.GatherDimensionNumbers(
offset_dims=(1,), collapsed_slice_dims=(0,), start_index_map=(0,)),
(1, 3)),
((10, 5), np.array([[0, 2], [1, 0]]), lax.GatherDimensionNumbers(
offset_dims=(1,), collapsed_slice_dims=(0,), start_index_map=(0, 1)),
(1, 3)),
]
]
)
def _make_scatter_harness(name, *, shape=(5,), f_lax=lax.scatter_min,
indices_are_sorted=False, unique_indices=False,
scatter_indices=np.array([[0], [2]]),
update_shape=(2,), dtype=np.float32,
dimension_numbers=((), (0,), (0,))):
dimension_numbers = lax.ScatterDimensionNumbers(*dimension_numbers)
return Harness(
f"{name}_fun={f_lax.__name__}_shape={jtu.format_shape_dtype_string(shape, dtype)}_scatterindices={scatter_indices.tolist()}_updateshape={update_shape}_updatewindowdims={dimension_numbers.update_window_dims}_insertedwindowdims={dimension_numbers.inserted_window_dims}_scatterdimstooperanddims={dimension_numbers.scatter_dims_to_operand_dims}_indicesaresorted={indices_are_sorted}_uniqueindices={unique_indices}".replace(' ', ''),
partial(f_lax, indices_are_sorted=indices_are_sorted,
unique_indices=unique_indices),
[RandArg(shape, dtype), StaticArg(scatter_indices),
RandArg(update_shape, dtype), StaticArg(dimension_numbers)],
f_lax=f_lax,
shape=shape,
dtype=dtype,
scatter_indices=scatter_indices,
update_shape=update_shape,
dimension_numbers=dimension_numbers,
indices_are_sorted=indices_are_sorted,
unique_indices=unique_indices)
lax_scatter = tuple( # Validate dtypes
_make_scatter_harness("dtypes", dtype=dtype)
for dtype in jtu.dtypes.all
) + tuple( # Validate f_lax/update_jaxpr
_make_scatter_harness("update_function", f_lax=f_lax)
# We explicitly decide against testing lax.scatter, as its reduction function
# is lambda x, y: y, which is not commutative and thus makes results
# non-deterministic when an index into the operand is updated several times.
for f_lax in [
lax.scatter_add,
lax.scatter_max,
lax.scatter_mul
]
) + tuple( # Validate shapes, dimension numbers and scatter indices
_make_scatter_harness("shapes_and_dimension_numbers", shape=shape,
update_shape=update_shape,
scatter_indices=np.array(scatter_indices),
dimension_numbers=dimension_numbers)
for shape, scatter_indices, update_shape, dimension_numbers in [
((10,), [[0], [0], [0]], (3, 2), ((1,), (), (0,))),
((10, 5), [[0], [2], [1]], (3, 3), ((1,), (0,), (0,)))
]
) + tuple ( # Validate sorted indices
[_make_scatter_harness("indices_are_sorted", indices_are_sorted=True)]
) + tuple( # Validate unique_indices
_make_scatter_harness("unique_indices", unique_indices=unique_indices)
# `unique_indices` does not affect correctness, only performance, and thus
# does not need to be tested here. If/when it will make sense to add a test
# with `unique_indices` = True, particular care will have to be taken with
# regards to the choice of parameters, as the results are only predictable
# when all the indices to be updated are pairwise non-overlapping. Identifying
# such cases is non-trivial.
for unique_indices in [False]
)
disable_xla = tuple(
Harness("_pad",
lax.pad,
[RandArg(shape, dtype), np.array(0, dtype), StaticArg(pads)],
shape=shape,
dtype=dtype,
pads=pads)
for shape in [(2, 3)]
for dtype in [np.float32]
for pads in [
[(-1, 0, 0), (0, 0, 0)]
]
)
lax_pad = tuple(
Harness(f"_inshape={jtu.format_shape_dtype_string(arg_shape, dtype)}_pads={pads}",
lax.pad,
[RandArg(arg_shape, dtype), np.array(0, dtype), StaticArg(pads)],
rng_factory=jtu.rand_small,
arg_shape=arg_shape, dtype=dtype, pads=pads)
for arg_shape in [(2, 3)]
for dtype in jtu.dtypes.all
for pads in [
[(0, 0, 0), (0, 0, 0)], # no padding
[(1, 1, 0), (2, 2, 0)], # only positive edge padding
[(1, 2, 1), (0, 1, 0)], # edge padding and interior padding
[(0, 0, 0), (-1, -1, 0)], # negative padding
[(0, 0, 0), (-2, -2, 4)], # add big dilation then remove from edges
[(0, 0, 0), (-2, -3, 1)], # remove everything in one dimension
]
)
def _make_cumreduce_harness(name, *, f_jax=lax_control_flow.cummin,
shape=(8, 9), dtype=np.float32,
axis=0, reverse=False):
return Harness(f"{name}_f={f_jax.__name__}_shape={jtu.format_shape_dtype_string(shape, dtype)}_axis={axis}_reverse={reverse}",
f_jax,
[RandArg(shape, dtype), StaticArg(axis), StaticArg(reverse)],
f_jax=f_jax,
shape=shape,
dtype=dtype,
axis=axis,
reverse=reverse)
lax_control_flow_cumreduce = tuple( # Validate dtypes for each function
_make_cumreduce_harness("dtype_by_fun", dtype=dtype, f_jax=f_jax)
for f_jax in [
lax_control_flow.cummin,
lax_control_flow.cummax,
lax_control_flow.cumsum,
lax_control_flow.cumprod
]
for dtype in [dtype for dtype in jtu.dtypes.all if dtype != np.bool_]
) + tuple( # Validate axis for each function
_make_cumreduce_harness("axis_by_fun", axis=axis, f_jax=f_jax, shape=shape)
for shape in [(8, 9)]
for f_jax in [
lax_control_flow.cummin,
lax_control_flow.cummax,
lax_control_flow.cumsum,
lax_control_flow.cumprod
]
for axis in range(len(shape))
) + tuple( # Validate reverse for each function
_make_cumreduce_harness("reverse", reverse=reverse, f_jax=f_jax)
for f_jax in [
lax_control_flow.cummin,
lax_control_flow.cummax,
lax_control_flow.cumsum,
lax_control_flow.cumprod
]
for reverse in [True]
)
def _make_top_k_harness(name, *, operand=None, shape=(5, 3), dtype=np.float32,
k=2):
if operand is None:
operand = RandArg(shape, dtype)
return Harness(f"{name}_inshape={jtu.format_shape_dtype_string(operand.shape, operand.dtype)}_k={k}",
lax.top_k,
[operand, StaticArg(k)],
shape=operand.shape,
dtype=operand.dtype,
k=k)
lax_top_k = tuple( # Validate dtypes
_make_top_k_harness("dtypes", dtype=dtype)
for dtype in jtu.dtypes.all
) + tuple( # Validate k
_make_top_k_harness("k", k=k)
for k in [-2]
) + tuple( # Validate implicit properties of the sort
_make_top_k_harness(name, operand=operand, k=k)
for name, operand, k in [
("stability", np.array([5, 7, 5, 8, 8, 5], dtype=np.int32), 3),
("sort_inf_nan", np.array([+np.inf, np.nan, -np.nan, -np.inf, 3],
dtype=np.float32), 5)
]
)
def _make_sort_harness(name, *, operands=None, shape=(5, 7), dtype=np.float32,
dimension=0, is_stable=False, nb_arrays=1):
if operands is None:
operands = [RandArg(shape, dtype) for _ in range(nb_arrays)]
return Harness(f"{name}_nbarrays={nb_arrays}_shape={jtu.format_shape_dtype_string(operands[0].shape, operands[0].dtype)}_axis={dimension}_isstable={is_stable}",
lambda *args: lax.sort_p.bind(*args[:-2], dimension=args[-2],
is_stable=args[-1], num_keys=1),
[*operands, StaticArg(dimension), StaticArg(is_stable)],
shape=operands[0].shape,
dimension=dimension,
dtype=operands[0].dtype,
is_stable=is_stable,
nb_arrays=nb_arrays)
lax_sort = tuple( # Validate dtypes
_make_sort_harness("dtypes", dtype=dtype)
for dtype in jtu.dtypes.all
) + tuple( # Validate dimensions
[_make_sort_harness("dimensions", dimension=1)]
) + tuple( # Validate stable sort
[_make_sort_harness("is_stable", is_stable=True)]
) + tuple( # Potential edge cases
_make_sort_harness("edge_cases", operands=operands, dimension=dimension)
for operands, dimension in [
([np.array([+np.inf, np.nan, -np.nan, -np.inf, 2], dtype=np.float32)], -1)
]
) + tuple( # Validate multiple arrays
_make_sort_harness("multiple_arrays", nb_arrays=nb_arrays, dtype=dtype)
for nb_arrays, dtype in [
(2, np.float32), # equivalent to sort_key_val
(2, np.bool_), # unsupported
(3, np.float32), # unsupported
]
)
lax_linalg_cholesky = tuple(
Harness(f"_shape={jtu.format_shape_dtype_string(shape, dtype)}",
lambda *args: lax.linalg.cholesky_p.bind(*args),
[RandArg(shape, dtype)],
shape=shape,
dtype=dtype)
for dtype in jtu.dtypes.all_inexact
for shape in [(1, 1), (4, 4), (2, 5, 5), (200, 200), (1000, 0, 0)]
)
lax_linalg_qr = tuple(
Harness(f"multi_array_shape={jtu.format_shape_dtype_string(shape, dtype)}_fullmatrices={full_matrices}",
lax.linalg.qr,
[RandArg(shape, dtype), StaticArg(full_matrices)],
shape=shape,
dtype=dtype,
full_matrices=full_matrices)
for dtype in jtu.dtypes.all_floating + jtu.dtypes.complex
for shape in [(1, 1), (3, 3), (3, 4), (2, 10, 5), (2, 200, 100)]
for full_matrices in [False, True]
)
def _make_fft_harness(name, *, shape=(14, 15, 16, 17), dtype=np.float32,
fft_type=xla_client.FftType.FFT, fft_lengths=(17,)):
def _fft_rng_factory(dtype):
_all_integers = (jtu.dtypes.all_integer + jtu.dtypes.all_unsigned +
jtu.dtypes.boolean)
# For integer types, use small values to keep the errors small
if dtype in _all_integers:
return jtu.rand_small
else:
return jtu.rand_default
return Harness(f"{name}_shape={jtu.format_shape_dtype_string(shape, dtype)}_ffttype={fft_type}_fftlengths={fft_lengths}",
lambda *args: lax.fft_p.bind(args[0], fft_type=args[1],
fft_lengths=args[2]),
[RandArg(shape, dtype), StaticArg(fft_type),
StaticArg(fft_lengths)],
rng_factory=_fft_rng_factory(dtype),
shape=shape,
dtype=dtype,
fft_type=fft_type,
fft_lengths=fft_lengths)
lax_fft = tuple( # Validate dtypes per FFT type
_make_fft_harness("dtypes", shape=shape, dtype=dtype, fft_type=fft_type,
fft_lengths=fft_lengths)
for shape in [(14, 15, 16, 17)]
# FFT, IFFT, RFFT, IRFFT
for fft_type in list(map(xla_client.FftType, [0, 1, 2, 3]))
for dtype in (jtu.dtypes.floating if fft_type == xla_client.FftType.RFFT
else jtu.dtypes.complex)
for fft_lengths in [
(shape[-1],) if fft_type != xla_client.FftType.IRFFT else
((shape[-1] - 1) * 2,)
]
) + tuple( # Validate dimensions per FFT type
_make_fft_harness("dims", shape=shape, fft_type=fft_type,
fft_lengths=fft_lengths, dtype=dtype)
for shape in [(14, 15, 16, 17)]
for dims in [1, 2, 3]
for fft_type in list(map(xla_client.FftType, [0, 1, 2, 3]))
for dtype in [np.float32 if fft_type == xla_client.FftType.RFFT
else np.complex64]
for fft_lengths in [
shape[-dims:] if fft_type != xla_client.FftType.IRFFT else
shape[-dims:-1] + ((shape[-1] - 1) * 2,)
]
)
lax_linalg_svd = tuple(
Harness(f"shape={jtu.format_shape_dtype_string(shape, dtype)}_fullmatrices={full_matrices}_computeuv={compute_uv}",
lambda *args: lax.linalg.svd_p.bind(args[0], full_matrices=args[1],
compute_uv=args[2]),
[RandArg(shape, dtype), StaticArg(full_matrices), StaticArg(compute_uv)],
shape=shape,
dtype=dtype,
full_matrices=full_matrices,
compute_uv=compute_uv)
for dtype in jtu.dtypes.all_floating + jtu.dtypes.complex
for shape in [(2, 2), (2, 7), (29, 29), (2, 3, 53), (2, 3, 29, 7)]
for full_matrices in [False, True]
for compute_uv in [False, True]
)
lax_linalg_eig = tuple(
Harness(f"_shape={jtu.format_shape_dtype_string(shape, dtype)}_computelefteigenvectors={compute_left_eigenvectors}_computerighteigenvectors={compute_right_eigenvectors}",
lax.linalg.eig,
[RandArg(shape, dtype), StaticArg(compute_left_eigenvectors),
StaticArg(compute_right_eigenvectors)],
shape=shape,
dtype=dtype,
compute_left_eigenvectors=compute_left_eigenvectors,
compute_right_eigenvectors=compute_right_eigenvectors)
for dtype in jtu.dtypes.all_inexact
for shape in [(0, 0), (5, 5), (2, 6, 6)]
for compute_left_eigenvectors in [False, True]
for compute_right_eigenvectors in [False, True]
)
lax_linalg_eigh = tuple(
Harness(f"_shape={jtu.format_shape_dtype_string(shape, dtype)}_lower={lower}",
lax.linalg.eigh,
[RandArg(shape, dtype), StaticArg(lower), StaticArg(False)],
shape=shape,
dtype=dtype,
lower=lower)
for dtype in jtu.dtypes.all_inexact
for shape in [(0, 0), (50, 50), (2, 20, 20)]
for lower in [False, True]
# Filter out cases where implementation is missing in JAX
if dtype != np.float16
)
lax_linalg_lu = tuple(
Harness(f"_shape={jtu.format_shape_dtype_string(shape, dtype)}",
lax.linalg.lu,
[RandArg(shape, dtype)],
shape=shape,
dtype=dtype)
for dtype in jtu.dtypes.all_inexact
for shape in [
(5, 5), # square
(3, 5, 5), # batched
(3, 5), # non-square
]
)
def _make_triangular_solve_harness(name, *, left_side=True, lower=False,
ab_shapes=((4, 4), (4, 1)), dtype=np.float32,
transpose_a=False, conjugate_a=False,
unit_diagonal=False):
a_shape, b_shape = ab_shapes
f_lax = lambda a, b: (lax.linalg.triangular_solve_p.bind(
a, b, left_side=left_side, lower=lower, transpose_a=transpose_a,
conjugate_a=conjugate_a, unit_diagonal=unit_diagonal))
return Harness(f"_{name}_a={jtu.format_shape_dtype_string(a_shape, dtype)}_b={jtu.format_shape_dtype_string(b_shape, dtype)}_leftside={left_side}_lower={lower}_transposea={transpose_a}_conjugatea={conjugate_a}_unitdiagonal={unit_diagonal}",
f_lax,
[RandArg(a_shape, dtype), RandArg(b_shape, dtype)],
dtype=dtype,
a_shape=a_shape,
b_shape=b_shape,
left_side=left_side,
lower=lower,
tranpose_a=transpose_a,
conjugate_a=conjugate_a,
unit_diagonal=unit_diagonal)
lax_linalg_triangular_solve = tuple( # Validate dtypes
# This first harness runs the tests for all dtypes using default values for
# all the other parameters, except unit_diagonal (to ensure that
# tf.linalg.set_diag works reliably for all dtypes). Variations of other
# parameters can thus safely skip testing their corresponding default value.
# Note that this validates solving on the left.
_make_triangular_solve_harness("dtypes", dtype=dtype,
unit_diagonal=unit_diagonal)
for dtype in jtu.dtypes.all_inexact
for unit_diagonal in [False, True]
) + tuple( # Validate shapes when solving on the right
_make_triangular_solve_harness("shapes_right", ab_shapes=ab_shapes,
left_side=False)
for ab_shapes in [
((4, 4), (1, 4)), # standard
((2, 8, 8), (2, 10, 8)), # | |
in range(len(path[:-1])):
print_lines.append(f"{spaces}at state {path[i]}:")
print_lines.append(f"{spaces} - navigate to '{elements[i]}'")
print_lines.append(
f"{spaces} - fire any one of these actions below to get to state {path[i + 1]}:")
for action in actions[i]:
print_lines.append(f"{spaces} - '{action}'")
return print_lines
def _elements_to_fix(self, potential_improvements, user):
"""Finds elements and the states that would become accessible if the
elements were accessible
Args:
potential_improvements: dict output from find_all_accessible()
user: str denoting a crawl user
Returns:
improve_elements_dict: dictionary of (element,state_ids) pairs
print_lines : list of lines to print to the report
"""
# Getting the user graph
user_graph = self.users["crawl_users"][user]["graph"]
# finding elements that can be fixed
improve_elements_dict = dict()
for state_id, analysis_dict in potential_improvements.items():
for i in analysis_dict['possible_edges']:
new_edge = self.full_graph[i[0]][i[1]][i[2]]
if user_graph.has_node(i[0]) and nx.has_path(user_graph, 0, i[0]):
shortest_path = nx.shortest_path(user_graph, 0, i[0])
if new_edge['element'] in improve_elements_dict:
improve_elements_dict[new_edge['element']]['new_states_included'].update(analysis_dict['new_states_included'])
improve_elements_dict[new_edge['element']]['paths'].append(shortest_path)
else:
improve_elements_dict[new_edge['element']] = {'new_states_included': set(analysis_dict['new_states_included'])}
improve_elements_dict[new_edge['element']]['paths'] = [shortest_path]
# pretty-print the results
print_lines = list()
print_lines.append(f'**{len(improve_elements_dict.items())}** inaccessible elements for this user:\n')
for problem_element in improve_elements_dict.keys():
print_lines.append(f' * `{problem_element}`')
improve_elements_dict[problem_element]["new_states_included"] = list(improve_elements_dict[problem_element]["new_states_included"])
for problem_element, new_states_dict in improve_elements_dict.items():
print_lines.append(f'\n#### `{problem_element}`')
print_lines.append(f'\nMake `{problem_element}` accessible and the following states will become accessible:\n')
for state_id in sorted(list(new_states_dict['new_states_included'])):
if self.dom_path is not None:
print_lines.append(f' * {self._state_link(state_id, self.dom_path)}')
else:
print_lines.append(f' * {state_id}')
if any([len(path) > 1 for path in new_states_dict['paths']]):
print_lines.append(f'\n`{problem_element}` appears inaccessible after the following progressions:\n')
for path in new_states_dict['paths']:
if len(path) > 1:
if self.dom_path is not None:
print_lines.append(f'--> {self._state_link(path[-1], self.dom_path)}:\n')
else:
print_lines.append(f'--> state {path[-1]}:\n')
print_lines += self._print_user_path(user_graph, path, 4)
print_lines.append('')
else:
if self.dom_path is not None:
print_lines.append(f'\n`{problem_element}` only appears inaccessible at {self._state_link(0, self.dom_path)}.')
else:
print_lines.append(f'\n`{problem_element}` only appears inaccessible at state 0.')
self.report_data["accessibility"][user] = improve_elements_dict
return improve_elements_dict, print_lines
@staticmethod
def _state_link(state_idx, path):
"""Returns a markdown hyperlink to open a state dom link for a given state
Args:
state_idx: int of the state id
path: path where the doms are stored
Returns:
md_link: str for a dom state file link
"""
url = pathlib.Path(path) / f"state-{state_idx}.html"
# windows link
if os.name == 'nt':
md_link = f"[state {state_idx}]({url.absolute()})"
# unix/linux link
else:
md_link = f"[state {state_idx}](file://{url.absolute()})"
return md_link
@staticmethod
def _get_xpath_sim_score(xpath1, xpath2):
"""Get similarity score [0,1] between two xpaths. A score of 0 refers to
No similarity in top-down parents. A score of 1 represents identitical
xpaths.
Args:
xpath1: str representing one xpath
xpath2: str representing another xpath
Returns:
score: float representing the similarity of the two xpaths
"""
# Parse xpaths into lists and save them as the longer or shorter one
xpath1_parsed = xpath1.split("/")
xpath2_parsed = xpath2.split("/")
if len(xpath1_parsed) >= len(xpath2_parsed):
longer = xpath1_parsed
shorter = xpath2_parsed
else:
shorter = xpath1_parsed
longer = xpath2_parsed
# Find the number of parents that are the same
num_same_parents = 0
for i in range(len(shorter)):
# Equal elements, save the number of parents that are the same
if longer[i] == shorter[i]:
num_same_parents = i + 1
# Unequal. End iterating
else:
break
# Return ratio to bound scores between [0,1]
score = num_same_parents / len(longer)
return score
def _add_xpath_edges_for_node1(self, nodes_compare_set, xpath_scores_dict,
G, min_weight, xpath_node1, unique_out_nodes,
source_node_out_edges):
"""Helper method for _add_xpath_edge_weights() to find the max xpath
score between xpath_node1 and all other nodes that source_node points to
(in _add_xpath_edge_weights()). Then insert that max xpath score into
the weight for an xpath_edge.
Args:
nodes_compare_set: set of node two-tuples that have already been
compared
xpath_scores_dict: dict of pairs of xpaths and their scores
G: networkx graph to add xpath weights to
min_weight: minimum weight to add to any edge
xpath_node1: int ID of the first node to get xpaths from
unique_out_nodes: set of node IDs that source_node points to
source_node_out_edges: list of edges that come from source_node
Returns:
G: networkx graph (updated) with additional edges and edge weights.
nodes_compare_set: set (updated) of node two-tuples that have
already been compared
xpath_scores_dict: dict (updated) of pairs of xpaths and their
scores
"""
# get unique xpaths for source_node to xpath_node1
els_for_n = {edge[2]["element"] for edge in source_node_out_edges if
edge[1] == xpath_node1}
for xpath_node2 in unique_out_nodes - {xpath_node1}:
nodes_pair = tuple(sorted((xpath_node1, xpath_node2)))
if nodes_pair in nodes_compare_set:
continue
nodes_compare_set.add(nodes_pair)
# get unique xpaths for source_node to xpath_node2
els_for_other_n = {edge[2]["element"] for edge in
source_node_out_edges if
edge[1] == xpath_node2}
# compare all possible pairs of xpaths and record highest score
max_score = min_weight
for el1, el2 in itertools.product(els_for_n,
els_for_other_n):
# Get the xpath_score if we have already computed it,
# otherwise compute it and store it to reduce future
# computations
xpath_tuple = tuple(sorted([el1, el2]))
if xpath_tuple in xpath_scores_dict:
score = xpath_scores_dict[xpath_tuple]
else:
score = self._get_xpath_sim_score(el1, el2)
xpath_scores_dict[xpath_tuple] = score
if score > max_score:
max_score = score
# If no edges exist between xpath_node1 and xpath_node2, add an edge
# with that weight, otherwise, update the score if the max_score is
# greater than the existing score.
for node1, node2 in [[xpath_node1, xpath_node2], [xpath_node2, xpath_node1]]:
edges = G.get_edge_data(node1, node2)
if edges is None:
G.add_edge(node1, node2, is_xpath_edge=True,
xpath_edge_weight=max_score)
# Else, update the xpath_edge_weight for all edges between
# node1 and node2
else:
for edge in edges:
if G[node1][node2][edge]["xpath_edge_weight"] < max_score:
G[node1][node2][edge]["xpath_edge_weight"] = max_score
return nodes_compare_set, xpath_scores_dict, G
def _add_xpath_edge_weights(self, G, min_weight=0.2):
"""Add edges to a graph (G) for nodes that have edges from the same
incoming node.
Add edge weights to these edges based on max(xpath_score, min_weight),
where pairs of xpaths (from edge elements) are compared if they exist in
the same state and are used to transition to different states. Give
existing edges a weight of min_weight.
Args:
G: networkx graph to add xpath weights to
min_weight: minimum weight to add to any edge
Returns:
G: networkx graph with additional edges and edge weights.
"""
# Tracks pairs of xpaths and their scores
xpath_scores_dict = dict()
# Tracks pairs of nodes that have already have xpath_edge created
nodes_compare_set = set()
# prep graph for existing edges
nx.set_edge_attributes(G, False, "is_xpath_edge")
nx.set_edge_attributes(G, min_weight, "xpath_edge_weight")
# iterate through each node
node_ids = list(G.nodes())
for source_node in node_ids:
# Get all edges the come out of node source_node
source_node_out_edges = list(G.out_edges(nbunch=source_node, data=True))
source_node_out_edges = [edge for edge in source_node_out_edges if
edge[2]["is_xpath_edge"] is False]
unique_out_nodes = {edge[1] for edge in source_node_out_edges}
# iterate through the nodes that source_node points to
for xpath_node1 in unique_out_nodes:
# Connect xpath_node1 with other nodes that source_node points
# to with their max xpath score. Manage changes in returns
returns = self._add_xpath_edges_for_node1(nodes_compare_set,
xpath_scores_dict,
G,
min_weight,
xpath_node1,
unique_out_nodes,
source_node_out_edges)
nodes_compare_set, xpath_scores_dict, G = returns
return G
def _save_network_layouts(self):
"""Save network layouts to self.full_graph. These will be outputted to
the analyzed gml file.
"""
# Set values for network layouts and minimum edge weights
funcs = [nx.fruchterman_reingold_layout, nx.kamada_kawai_layout]
min_weights = [0.0, 0.2, 0.4, 0.6, 0.8]
# Iterate through the values of xpath_edge_weight, creating xpath_edge_weight
for min_weight in min_weights:
G_copy = self.full_graph.copy()
G_copy = self._add_xpath_edge_weights(G_copy, min_weight)
coords_list = []
titles = []
# Iterate through the network layout functions
for func in funcs:
# Getting x,y positions
pos = func(G_copy, weight="xpath_edge_weight")
coords_list.append(pos)
# Saving titles for plots and saved node attribute
if func == nx.fruchterman_reingold_layout:
title = f"fr_{min_weight}"
else:
title = f"kk_{min_weight}"
titles.append(title)
# Saving x,y pairs to the full graph
x = {k: v[0] for k, v in pos.items()}
y = {k: v[1] for k, v in pos.items()}
nx.set_node_attributes(self.full_graph, x,
f'x_{title.replace("0.", "")}')
nx.set_node_attributes(self.full_graph, y,
f'y_{title.replace("0.", "")}')
# Plotting network layouts
out_path = pathlib.Path(self.output_path) / "network_layouts/"
out_path.mkdir(parents=True, exist_ok=True)
for func, pos, title in zip(funcs, coords_list, titles):
plt.figure()
plt.title(title)
nx.draw(self.full_graph, pos=pos, with_labels=True)
plt.savefig(str(out_path / f"{title}.png"))
@staticmethod
def parse_color_string(color_str):
""" Gets the colors from a style string
Find the color strings then return the numbers as a list
E.g., '2px solid rgb(123,12,4)' => ['123', '12', '4']
Args:
color_str: The style string that contains a color
Returns:
List of the style numbers. Note it will have len 3 for rgb and len 4 for rgba
"""
# Matches strings of the type rgb(ddd, dd, d), and accounts for their
# being 1-3 | |
<filename>dh.py
# dh.py
#
# digital history module from Turkel & MacEachern,
# The Programming Historian (2007-08)
#
# contents:
#
# stopwords LIST
# normalizeFrenchAccents(STRING) -> STRING
# stripTags(HTML_STRING) -> TEXT_STRING
# stripNonAlphaNum(TEXT_STRING) -> ALPHANUMERIC_STRING
# webPageToText(URL_STRING) -> LOWERCASE_TEXT_STRING
# removeStopwords(WORD_LIST, STOPWORD_LIST) -> WORD_LIST
# wordListToFreqDict(WORD_LIST) -> DICTIONARY(WORD -> FREQUENCY)
# sortFreqDict(DICTIONARY(WORD -> FREQUENCY)) -> SORTED_DICTIONARY(WORD -> FREQUENCY)
# reSortFreqDictAlpha(DICTIONARY(WORD -> FREQUENCY)) -> SORTED_DICTIONARY(WORD -> FREQUENCY)
# wrapStringInHTML(STRING, URL_STRING, STRING) -> HTML_STRING
# keywordListToGoogleSearchLink(WORD_LIST, STRING) -> HTML_STRING
# undecoratedHyperlink(URL_STRING, STRING) -> HTML_CSS_STRING
# getNGrams(WORD_LIST, INTEGER) -> NGRAM_LIST
# nGramsToKWICDict(NGRAM_LIST) -> DICTIONARY(KEYWORD_STRING -> KWIC_LIST)
# prettyPrintKWIC(KWIC_LIST) -> STRING
# defaultCSSDiv(STRING, STRING) -> HTML_CSS_STRING
# scaledFontSizeSpan(STRING, INTEGER) -> HTML_CSS_STRING
# scaledFontShadeSpan(STRING, INTEGER) -> HTML_CSS_STRING
# scaledFontHeatmapSpan(STRING, INTEGER) -> HTML_CSS_STRING
# getFileNames(STRING) -> LIST
# localWebPageToText(HTML_STRING) -> STRING
# replaceStopwords(WORD_LIST, STOPWORD_LIST, STRING) -> WORD_LIST
stopwords = ['a', 'about', 'above', 'across', 'after', 'afterwards']
stopwords += ['again', 'against', 'all', 'almost', 'alone', 'along']
stopwords += ['already', 'also', 'although', 'always', 'am', 'among']
stopwords += ['amongst', 'amoungst', 'amount', 'an', 'and', 'another']
stopwords += ['any', 'anyhow', 'anyone', 'anything', 'anyway', 'anywhere']
stopwords += ['are', 'around', 'as', 'at', 'back', 'be', 'became']
stopwords += ['because', 'become', 'becomes', 'becoming', 'been']
stopwords += ['before', 'beforehand', 'behind', 'being', 'below']
stopwords += ['beside', 'besides', 'between', 'beyond', 'bill', 'both']
stopwords += ['bottom', 'but', 'by', 'call', 'can', 'cannot', 'cant']
stopwords += ['co', 'computer', 'con', 'could', 'couldnt', 'cry', 'de']
stopwords += ['describe', 'detail', 'did', 'do', 'done', 'down', 'due']
stopwords += ['during', 'each', 'eg', 'eight', 'either', 'eleven', 'else']
stopwords += ['elsewhere', 'empty', 'enough', 'etc', 'even', 'ever']
stopwords += ['every', 'everyone', 'everything', 'everywhere', 'except']
stopwords += ['few', 'fifteen', 'fifty', 'fill', 'find', 'fire', 'first']
stopwords += ['five', 'for', 'former', 'formerly', 'forty', 'found']
stopwords += ['four', 'from', 'front', 'full', 'further', 'get', 'give']
stopwords += ['go', 'had', 'has', 'hasnt', 'have', 'he', 'hence', 'her']
stopwords += ['here', 'hereafter', 'hereby', 'herein', 'hereupon', 'hers']
stopwords += ['herself', 'him', 'himself', 'his', 'how', 'however']
stopwords += ['hundred', 'i', 'ie', 'if', 'in', 'inc', 'indeed']
stopwords += ['interest', 'into', 'is', 'it', 'its', 'itself', 'keep']
stopwords += ['last', 'latter', 'latterly', 'least', 'less', 'ltd', 'made']
stopwords += ['many', 'may', 'me', 'meanwhile', 'might', 'mill', 'mine']
stopwords += ['more', 'moreover', 'most', 'mostly', 'move', 'much']
stopwords += ['must', 'my', 'myself', 'name', 'namely', 'neither', 'never']
stopwords += ['nevertheless', 'next', 'nine', 'no', 'nobody', 'none']
stopwords += ['noone', 'nor', 'not', 'nothing', 'now', 'nowhere', 'of']
stopwords += ['off', 'often', 'on','once', 'one', 'only', 'onto', 'or']
stopwords += ['other', 'others', 'otherwise', 'our', 'ours', 'ourselves']
stopwords += ['out', 'over', 'own', 'part', 'per', 'perhaps', 'please']
stopwords += ['put', 'rather', 're', 's', 'same', 'see', 'seem', 'seemed']
stopwords += ['seeming', 'seems', 'serious', 'several', 'she', 'should']
stopwords += ['show', 'side', 'since', 'sincere', 'six', 'sixty', 'so']
stopwords += ['some', 'somehow', 'someone', 'something', 'sometime']
stopwords += ['sometimes', 'somewhere', 'still', 'such', 'system', 'take']
stopwords += ['ten', 'than', 'that', 'the', 'their', 'them', 'themselves']
stopwords += ['then', 'thence', 'there', 'thereafter', 'thereby']
stopwords += ['therefore', 'therein', 'thereupon', 'these', 'they']
stopwords += ['thick', 'thin', 'third', 'this', 'those', 'though', 'three']
stopwords += ['three', 'through', 'throughout', 'thru', 'thus', 'to']
stopwords += ['together', 'too', 'top', 'toward', 'towards', 'twelve']
stopwords += ['twenty', 'two', 'un', 'under', 'until', 'up', 'upon']
stopwords += ['us', 'very', 'via', 'was', 'we', 'well', 'were', 'what']
stopwords += ['whatever', 'when', 'whence', 'whenever', 'where']
stopwords += ['whereafter', 'whereas', 'whereby', 'wherein', 'whereupon']
stopwords += ['wherever', 'whether', 'which', 'while', 'whither', 'who']
stopwords += ['whoever', 'whole', 'whom', 'whose', 'why', 'will', 'with']
stopwords += ['within', 'without', 'would', 'yet', 'you', 'your']
stopwords += ['yours', 'yourself', 'yourselves']
# Given a string containing French accented characters
# in Unicode or HTML, return normalized lowercase.
def normalizeFrenchAccents(str):
newstr = unicode(str, 'utf-8').encode('latin-1', 'replace')
newstr = newstr.lower()
newstr = newstr.replace('’', '\'')
newstr = newstr.replace('\xc0', '\xe0') # a grave
newstr = newstr.replace('à', '\xe0') # a grave
newstr = newstr.replace('\xc2', '\xe2') # a circumflex
newstr = newstr.replace('â', '\xe2') # a circumflex
newstr = newstr.replace('\xc4', '\xe4') # a diaeresis
newstr = newstr.replace('ä', '\xe4') # a diaeresis
newstr = newstr.replace('\xc6', '\xe6') # ae ligature
newstr = newstr.replace('æ', '\xe6') # ae ligature
newstr = newstr.replace('\xc8', '\xe8') # e grave
newstr = newstr.replace('è', '\xe8') # e grave
newstr = newstr.replace('\xc9', '\xe9') # e acute
newstr = newstr.replace('é', '\xe9') # e acute
newstr = newstr.replace('\xca', '\xea') # e circumflex
newstr = newstr.replace('ê', '\xea') # e circumflex
newstr = newstr.replace('\xcb', '\xeb') # e diaeresis
newstr = newstr.replace('ë', '\xeb') # e diaeresis
newstr = newstr.replace('\xce', '\xee') # i circumflex
newstr = newstr.replace('î', '\xee') # i circumflex
newstr = newstr.replace('\xcf', '\xef') # i diaeresis
newstr = newstr.replace('ï', '\xef') # i diaeresis
newstr = newstr.replace('\xd4', '\xf4') # o circumflex
newstr = newstr.replace('ô', '\xf4') # o circumflex
newstr = newstr.replace('œ', 'oe') # oe ligature
newstr = newstr.replace('\xd9', '\xf9') # u grave
newstr = newstr.replace('ù', '\xf9') # u grave
newstr = newstr.replace('\xdb', '\xfb') # u circumflex
newstr = newstr.replace('û', '\xfb') # u circumflex
newstr = newstr.replace('\xdc', '\xfc') # u diaeresis
newstr = newstr.replace('ü', '\xfc') # u diaeresis
newstr = newstr.replace('\xc7', '\xe7') # c cedilla
newstr = newstr.replace('ç', '\xe7') # c cedilla
newstr = newstr.replace('ÿ', '\xff') # y diaeresis
return newstr
# Given a string containing HTML, remove all characters
# between matching pairs of angled brackets, inclusive.
def stripTags(html):
inside = 0
text = ''
for char in html:
if char == '<':
inside = 1
continue
elif (inside == 1 and char == '>'):
inside = 0
continue
elif inside == 1:
continue
else:
text += char
return text
# Given a text string, remove all non-alphanumeric
# characters (using Unicode definition of alphanumeric).
def stripNonAlphaNum(text):
import re
return re.compile(r'\W+', re.UNICODE).split(text)
# Given a URL, return string of lowercase text from page.
def webPageToText(url):
import urllib2
response = urllib2.urlopen(url)
html = response.read()
text = stripTags(html).replace(' ', ' ')
return text.lower()
# Given a list of words, remove any that are
# in a list of stop words.
def removeStopwords(wordlist, stopwords):
return [w for w in wordlist if w not in stopwords]
# Given a list of words, return a dictionary of
# word-frequency pairs.
def wordListToFreqDict(wordlist):
wordfreq = [wordlist.count(p) for p in wordlist]
return dict(zip(wordlist,wordfreq))
# Sort a dictionary of word-frequency pairs in
# order of descending frequency.
def sortFreqDict(freqdict):
aux = [(freqdict[key], key) for key in freqdict]
aux.sort()
aux.reverse()
return aux
# Given a dictionary of frequency-word pairs sorted
# in order of descending frequency, re-sort so it is
# in alphabetical order by word.
def reSortFreqDictAlpha(sorteddict):
import operator
aux = [pair for pair in sorteddict]
aux.sort(key=operator.itemgetter(1))
return aux
# Given name of calling program, a url and a string to wrap,
# output string in HTML body with basic metadata
# and open in Firefox tab.
def wrapStringInHTML(program, url, body):
import datetime
# from webbrowser import open_new_tab
now = datetime.datetime.today().strftime("%Y%m%d-%H%M%S")
filename = program + '.html'
f = open(filename,'w')
wrapper = """<html>
<head>
<title>%s output - %s</title>
</head>
<body><p>URL: <a href=\"%s\">%s</a></p><p>%s</p></body>
</html>"""
whole = wrapper % (program, now, url, url, body)
f.write(whole)
f.close()
# open_new_tab(filename)
# Given a list of keywords and a link name, return an
# HTML link to a Google search for those terms.
def keywordListToGoogleSearchLink(keywords, linkname):
url = 'http://www.google.com/search?q='
url += '+'.join(keywords)
gsearch = undecoratedHyperlink(url, linkname)
return gsearch
# Given a url and link name, return a string containing
# HTML and inline CSS for an undecorated hyperlink.
def undecoratedHyperlink(url, linkname):
astr = """<a
style=\"text-decoration:none\" href=\"%s\">%s</a>
"""
return astr % (url, linkname)
# Given a list of words and a number n, return a list
# of n-grams.
def getNGrams(wordlist, n):
return [wordlist[i:i+n] for i in range(len(wordlist)-(n-1))]
# Given a list of n-grams, return a dictionary of KWICs,
# indexed by keyword.
def nGramsToKWICDict(ngrams):
kwicdict = {}
keyindex = len(ngrams[0]) // 2
for k in ngrams:
if k[keyindex] not in kwicdict:
kwicdict[k[keyindex]] = [k]
else:
kwicdict[k[keyindex]].append(k)
return kwicdict
# Given a KWIC, return a string that is formatted for
# pretty printing.
def prettyPrintKWIC(kwic):
n = len(kwic)
keyindex = n // 2
width = 10
outstring = ' '.join(kwic[:keyindex]).rjust(width*keyindex)
outstring += str(kwic[keyindex]).center(len(kwic[keyindex])+6)
outstring += ' '.join(kwic[(keyindex+1):])
return outstring
# Given the body of a div and an optional string of
# property-value pairs, return string containing HTML
# and inline CSS for default div.
def defaultCSSDiv(divbody, opt=''):
divstr = """<div style=\"
width: 560px;
background-color: rgb(250,250,250);
border: 1px grey solid;
text-align: center;
%s\">%s</div>
"""
return divstr % (opt, divbody)
# Given the body of a span and a scaling factor, return
# string containing HTML span with scaled font size.
def scaledFontSizeSpan(body, scalingfactor):
import math
minfont = 24
maxfont = 54
fontrange = maxfont - minfont
fontsize = int(minfont + math.floor(fontrange * scalingfactor))
spanstr = '<span style=\"font-size:%spx;\">%s</span>'
return spanstr % (str(fontsize), body)
# Given the body of a span | |
regular key tag.
This method never returns None.
'''
updated_tagstring_s = None # our return value
# 1. clean up whitespace and None in our tagstring parameter
tagstring_s = ', '.join(tags_sl).strip() if tags_sl else ''
key_tag_s = db.create_key_tag_s(issue_key) \
if issue_key != None else ComicBook.CVDBSKIP
if key_tag_s and tagstring_s:
# 2. we have both a new key tag AND a non-empty tagstring; find and
# replace the existing tag (if it exists in the tagtring)
# with the new one.
matches = False
prev_issue_key = db.parse_key_tag(tagstring_s)
if prev_issue_key:
prev_key_tag_s = db.create_key_tag_s(prev_issue_key)
if prev_key_tag_s:
regexp = re.compile(r"(?i)" + prev_key_tag_s)
matches = regexp.search(tagstring_s)
if matches:
# 2a. yup, found an existing key tag--replace it with the new one
updated_tagstring_s = re.sub(regexp, key_tag_s, tagstring_s)
else:
# 2b. nope, no previous key tag found--just append the new key tag
if tagstring_s[-1] == ",":
tagstring_s = tagstring_s[:-1]
updated_tagstring_s = tagstring_s +", " + key_tag_s
elif key_tag_s:
# 3. no previous tagstring, so the key tag *becomes* the new tagstring
updated_tagstring_s = key_tag_s
else:
# 4. there's no key tag available, so don't change the tagstring.
updated_tagstring_s = tagstring_s
return updated_tagstring_s.split(", ")
#===========================================================================
def __add_key_to_notes(self, notestring_s, issue_key):
'''
Returns a copy of the given comic book note string, but with a "key tag"
for the given issue_key appended onto the end (iff key tags are
supported by the current database implementation.) If the given
notes string already contains a valid key tag, the existing tag will be
REPLACED with the new one.
If the given issue_key is None, the note string will be updated with the
magic CVDBSKIP tag instead of a regular key tag.
This method never returns None.
'''
updated_notestring_s = None # our return value
# 1. clean up whitespace and None in our notestring parameter
notestring_s = notestring_s.strip() if notestring_s else ''
# Create a key-note that looks like either:
# 1) Scraped metadata from ComicVine [CVDB9999].
# 2) Scraped metadata from ComicVine [CVDB9999] on 2013.05.14 21:43:06.
key_tag_s = db.create_key_tag_s(issue_key) \
if issue_key != None else ComicBook.CVDBSKIP
date_s = " on "+strftime(r'%Y.%m.%d %X') \
if self.__scraper.config.note_scrape_date_b else ""
key_note_s = 'Scraped metadata from {0} [{1}]{2}.'.format(
"ComicVine", key_tag_s, date_s) if key_tag_s else ''
if key_note_s and notestring_s:
# 2. we have both a new key-note (based on the key tag), AND a
# non-empty notestring; find and replace the existing key-note (if
# it exists in the notestring) with the new one.
matches = False
prev_issue_key = db.parse_key_tag(notestring_s)
if prev_issue_key:
prev_key_tag = db.create_key_tag_s(prev_issue_key)
if prev_key_tag:
regexp = re.compile( r"(?i)Scraped.*?" + prev_key_tag \
+ "(]\.|.*?[\d\.]{8,} [\d:]{6,}\.)")
matches = regexp.search(notestring_s)
if matches:
# 2a. yup, found an existing key-note--replace it with the new one
updated_notestring_s = re.sub(regexp, key_note_s, notestring_s)
else:
# 2b. nope, no previous key-note found. try looking for the key
# tag on it's own (i.e. if the user added it by hand)
if prev_issue_key:
prev_key_tag = db.create_key_tag_s(prev_issue_key)
if prev_key_tag:
regexp = re.compile(r"(?i)" + prev_key_tag)
matches = regexp.search(notestring_s)
if matches:
# 2c. yup, found a key tag--replace it with the new one
updated_notestring_s = re.sub(regexp, key_tag_s, notestring_s)
else:
# 2b. nope, no previous key found--just append the new key-note
updated_notestring_s = notestring_s + "\n\n" + key_note_s
elif key_note_s:
# 3. no previous notestring, so the key-note *becomes* the new string
updated_notestring_s = key_note_s
else:
# 4. there's no key tag available, so don't change the tagstring.
updated_notestring_s = notestring_s
return updated_notestring_s
#===========================================================================
@staticmethod
def __massage_new_string(
label, new_value, old_value, update, overwrite, ignoreblanks):
'''
Returns a string value that should be copied into our backing ComicBook
object, IFF that string value is not None. Uses a number of rules to
decide what value to return.
label - a human readable description of the given string being changed.
new_value - the proposed new string value to copy over.
old_value - the original value that the new value would copy over.
update - if false, this method always returns None
overwrite - whether it's acceptable to overwrite the old value with the
new value when the old value is non-blank.
ignoreblanks - if true, we'll never overwrite an old non-blank value
with a new, blank value..
'''
# first, a little housekeeping so that we stay really robust
if new_value is None:
new_value = ''
if old_value is None:
old_value = ''
if not isinstance(new_value, basestring) or \
not isinstance(old_value,basestring):
raise TypeError("wrong types for this method (" + label +")")
old_value = old_value.strip();
new_value = new_value.strip();
# now decide about whether or not to actually do the update
# only update if all of the following are true:
# 1) the update option is turned on for this particular field
# 2) we can overwrite the existing value, or there is no existing value
# 3) we're not overwriting with a blank value unless we're allowed to
retval = None;
if update and (overwrite or not old_value) and \
not (ignoreblanks and not new_value):
retval = new_value
marker = ' '
if old_value != new_value:
marker = '*'
chars = retval.replace('\n', ' ')
if len(chars) > 70:
chars = chars[:70] + " ..."
log.debug("--> ", marker, label.ljust(15), ": ", chars)
else:
log.debug("--> ", label.ljust(15), ": --- skipped ---")
return retval
#===========================================================================
@staticmethod
def __massage_new_string_list(
label, new_list, old_list, update, overwrite, ignoreblanks):
'''
Returns a string list [] that should be copied into our backing ComicBook
object, IFF that string list is not None. Uses a number of rules to
decide what value to return.
label - a human readable description of the given string being changed.
new_list - the proposed new string list to copy over.
old_list - the original list that the new list would copy over.
update - if false, this method always returns None
overwrite - whether it's acceptable to overwrite the old list with the
new list when the old list is not empty.
ignoreblanks - if true, we'll never overwrite an old non-empty list
with a new, empty one.
'''
# first, a little housekeeping so that we stay really robust
new_list = [] if new_list is None else list(new_list)
new_list = [x for x in new_list if x != None and len(x.strip())>0]
old_list = [] if old_list is None else list(old_list)
old_list = [x for x in old_list if x != None and len(x.strip())>0]
# now decide about whether or not to actually do the update
# only update if all of the following are true:
# 1) the update option is turned on for this particular field
# 2) we can overwrite the existing value, or there is no existing value
# 3) we're not overwriting with a blank value unless we're allowed to
retval = None;
if update and (overwrite or len(old_list)==0) and \
(not ignoreblanks or len(new_list) > 0):
retval = new_list
marker = ' '
if old_list != new_list:
marker = '*'
chars = ', '.join(retval).replace('\n', ' ')
if len(chars) > 70:
chars = chars[:70] + " ..."
log.debug("--> ", marker, label.ljust(15), ": ", chars)
else:
log.debug("--> ", label.ljust(15), ": --- skipped ---")
return retval
#===========================================================================
@staticmethod
def __massage_new_number(label, new_value, old_value, update, overwrite, \
ignoreblanks, blank_value, is_valid=None, remap_invalid=None):
'''
Returns an number (int or float) value that should be copied into our
backing ComicBook object, IFF that value is not None. Uses a number of
rules to decide what value to return.
label - a human readable description of | |
for ((a, b), i), cs in both.transitions.items() if i == Move.EMPTY and both.end in cs}
transitions[(Start(), Move.EMPTY)] = {Middle(s) for s in midpoints}
nfa = NFA(Start(), Middle(nfa1.end), transitions, captures)
nfa.remove_redundant_states()
return nfa
def MatchSubtractInside(nfa1: NFA, nfa2: NFA, proper: bool, replace: Optional[NFA] = None) -> NFA:
"""Handles: A->B, A->>B"""
# like MatchContains, but link (2) and (4)/(5) using partial intersection
LeftFirst, Left, Replace, RightFirst, Right = new_states("->l1", "->l2", "->m", "->r1", "->r2")
t1, t1e, c1, t3, c3, t3e, t4, t4e, c4 = {}, {}, {}, {}, {}, {}, {}, {}, {}
if proper:
t1 = {(LeftFirst(s), i): {LeftFirst(t) for t in ts} for (s, i), ts in nfa1.transitions.items() if i == Move.EMPTY}
t1e = {(LeftFirst(s), i): {Left(t) for t in ts} for (s, i), ts in nfa1.transitions.items() if i != Move.EMPTY}
c1 = {(LeftFirst(s), i): cs for (s, i), cs in nfa1.captures.items()}
t2 = {(Left(s), i): {Left(t) for t in ts} for (s, i), ts in nfa1.transitions.items()}
c2 = {(Left(s), i): cs for (s, i), cs in nfa1.captures.items()}
t2es = []
if replace:
t3 = {(Replace(s, q), i): {Replace(s, t) for t in ts} for (q, i), ts in replace.transitions.items() for s in nfa1.states}
c3 = {(Replace(s, q), i): cs for (q, i), cs in replace.captures.items() for s in nfa1.states}
t3e = {(Replace(s, replace.end), Move.EMPTY): {(RightFirst(s) if proper else Right(s))} for s in nfa1.states}
for s in nfa1.states:
both = MatchBoth(nfa1, nfa2, start_from={(s, nfa2.start)}, stop_at={(a, nfa2.end) for a in nfa1.states})
new_end = {a for a, _ in both.transitions.get((both.start, Move.EMPTY), set())}
new_start = {a[0] for (a, i), cs in both.transitions.items() if i == Move.EMPTY and both.end in cs}
t2es.append(
{(Left(e), Move.EMPTY): {(Replace(s, replace.start) if replace else RightFirst(s) if proper else Right(s)) for s in new_start} for e in new_end}
)
if proper:
t4 = {(RightFirst(s), i): {RightFirst(t) for t in ts} for (s, i), ts in nfa1.transitions.items() if i == Move.EMPTY}
t4e = {(RightFirst(s), i): {Right(t) for t in ts} for (s, i), ts in nfa1.transitions.items() if i != Move.EMPTY}
c4 = {(RightFirst(s), i): cs for (s, i), cs in nfa1.captures.items()}
t5 = {(Right(s), i): {Right(t) for t in ts} for (s, i), ts in nfa1.transitions.items()}
c5 = {(Right(s), i): cs for (s, i), cs in nfa1.captures.items()}
transitions = merge_trans(t1, t1e, t2, *t2es, t3, t3e, t4, t4e, t5)
captures = merge_trans(c1, c2, c3, c4, c5)
nfa = NFA(LeftFirst(nfa1.start) if proper else Left(nfa1.start), Right(nfa1.end), transitions, captures)
nfa.remove_redundant_states()
return nfa
def MatchSubtractOutside(nfa1: NFA, nfa2: NFA, proper: bool) -> NFA:
"""Handles: A-<B, A-<<B"""
# Use partial intersections to generate collections of alternatives.
both_start = MatchBoth(nfa1, nfa2, stop_at={(a, b) for a in nfa1.states for b in nfa2.states})
both_end = MatchBoth(nfa1, nfa2, start_from={(a, b) for a in nfa1.states for b in nfa2.states})
both_start_end = {s for (s, i), cs in both_start.transitions.items() if i == Move.EMPTY and both_start.end in cs}
both_end_start = both_end.transitions.get((both_end.start, Move.EMPTY), set())
if proper:
# ensure partial intersections are (potentially) non-empty
both_start_proper = MatchBoth(both_start, MatchLength(1))
both_start_end = {
s[0] for (s, i), cs in both_start_proper.transitions.items() if i == Move.EMPTY and both_start_proper.end in cs and s[0] != both_start.end
}
both_end_proper = MatchBoth(both_end, MatchLength(1))
both_end_start = {s[0] for s in both_end_proper.transitions.get((both_end_proper.start, Move.EMPTY), set()) if s[0] != both_end.start}
nfas: List[NFA] = []
midpoints = {b for a, b in both_start_end if any(b == b2 for a2, b2 in both_end_start)}
for m in midpoints:
Start, Middle, End = new_states("-<a", "-<m", "-<z")
transitions: Transitions = {(Middle(s), i): {Middle(t) for t in ts} for (s, i), ts in nfa1.transitions.items()}
captures: Captures = {(Middle(s), i): cs for (s, i), cs in nfa1.captures.items()}
transitions[Start(), Move.EMPTY] = {Middle(a) for a, b in both_start_end if b == m}
for a in {a for a, b in both_end_start if b == m}:
transitions.setdefault((Middle(a), Move.EMPTY), set()).add(End())
nfa = NFA(Start(), End(), transitions, captures)
nfa.remove_redundant_states()
nfas.append(nfa)
return MatchEither(*nfas)
def MatchSubtractAlternating(nfa1: NFA, nfa2: NFA, ordered: bool, from_right: bool = True) -> NFA:
"""Handles: A-#B, A_-#B, A-##B"""
# Expand transitions in A with one from A&B (tracking both A and B states)
both = MatchBoth(nfa1, nfa2, stop_at={(a, b) for a in nfa1.states for b in nfa2.states}, start_from={(a, b) for a in nfa1.states for b in nfa2.states})
transitions: Transitions = {}
captures: Captures = {}
for (s, i), ts in nfa1.transitions.items():
for b in nfa2.states:
if i == Move.EMPTY:
states = {(t, b) for t in ts}
else:
ts = nfa1.expand_epsilons(ts)
states = {u for (r, i), us in both.transitions.items() for t in ts if r == (t, b) and i != Move.EMPTY for u in us}
transitions[((s, b), i)] = states
if (s, i) in nfa1.captures:
captures[((s, b), i)] = nfa1.captures[(s, i)]
if b == nfa2.end and nfa1.end in ts:
transitions.setdefault(((s, b), i), set()).add((nfa1.end, nfa2.end))
for (b, i), cs in nfa2.transitions.items():
for s in nfa1.states:
if i == Move.EMPTY:
transitions[((s, b), i)] = {(s, c) for c in cs}
start_state = set()
if not ordered or not from_right:
ts = {(nfa1.start, nfa2.start)}
ts = both.expand_epsilons(ts)
start_state |= {u for (s, i), us in both.transitions.items() if s in ts and i != Move.EMPTY for u in us}
if not ordered or from_right:
start_state |= {(nfa1.start, nfa2.start)}
if len(start_state) == 1:
nfa = NFA(first(start_state), (nfa1.end, nfa2.end), transitions, captures)
else:
transitions[("a", Move.EMPTY)] = start_state
nfa = NFA("a", (nfa1.end, nfa2.end), transitions, captures)
nfa.remove_redundant_states()
return nfa
def MatchSubtractInterleaved(nfa1: NFA, nfa2: NFA, proper: bool, from_right: bool = True) -> NFA:
"""Handles: A-^B, A-^^B, A_-^^B"""
# Combine transitions from A with empty transitions from A&B (tracking both A and B states)
both = MatchBoth(nfa1, nfa2, stop_at={(a, b) for a in nfa1.states for b in nfa2.states}, start_from={(a, b) for a in nfa1.states for b in nfa2.states})
transitions: Transitions = {}
captures: Captures = {}
for (a, i), ts in nfa1.transitions.items():
for b in nfa2.states:
transitions[(a, b), i] = {(t, b) for t in ts}
if (a, i) in nfa1.captures:
captures[(a, b), i] = nfa1.captures[(a, i)]
for (ab, i), tus in both.transitions.items():
if ab != both.start:
transitions.setdefault((ab, Move.EMPTY), set()).update(tus - {both.end})
if not proper:
transitions[((nfa1.end, nfa2.end), Move.EMPTY)] = {"z"}
nfa = NFA((nfa1.start, nfa2.start), "z", transitions, captures)
elif from_right:
First, Middle, Last = new_states("-^a", "-^m", "-^z")
t1 = {(First(s), i): {First(t) for t in ts} for (s, i), ts in nfa1.transitions.items() if i == Move.EMPTY}
t1e = {(First(s), i): {Middle((t, nfa2.start)) for t in ts} for (s, i), ts in nfa1.transitions.items() if i != Move.EMPTY}
c1 = {(First(s), i): cs for (s, i), cs in nfa1.captures.items()}
t2 = {(Middle(s), i): {Middle(t) for t in ts} for (s, i), ts in transitions.items()}
c2 = {(Middle(s), i): cs for (s, i), cs in captures.items()}
t2e = {(Middle((s, nfa2.end)), i): {Last(t) for t in ts} for (s, i), ts in nfa1.transitions.items() if i != Move.EMPTY}
t3 = {(Last(s), i): {Last(t) for t in ts} for (s, i), ts in nfa1.transitions.items()}
c3 = {(Last(s), i): cs for (s, i), cs in nfa1.captures.items()}
transitions = merge_trans(t1, t1e, t2, t2e, t3)
captures = merge_trans(c1, c2, c3)
nfa = NFA(First(nfa1.start), Last(nfa1.end), transitions, captures)
else:
ts = both.expand_epsilons({(nfa1.start, nfa2.start)})
start_states = {u for (s, i), us in both.transitions.items() if s in ts and i != Move.EMPTY for u in us}
ts = set()
for t in both.states:
if (nfa1.end, nfa2.end) in both.expand_epsilons({t}):
ts.add(t)
end_states = {s for (s, i), us in both.transitions.items() if any(u in ts for u in us) and i != Move.EMPTY}
transitions[("a", Move.EMPTY)] = start_states
for s in end_states:
transitions[(s, Move.EMPTY)] = {"z"}
nfa = NFA("a", "z", transitions, captures)
nfa.remove_redundant_states()
return nfa
def MatchReversed(nfa: NFA) -> NFA:
"""Handles: (?r:A)"""
# just reverse the edges (with special handling for *-transitions)
transitions: Transitions = {}
captures: Captures = {}
(Extra,) = new_states("r")
for (s, i), ts in nfa.transitions.items():
for t in ts:
if i == Move.ALL:
if any(r != s and t in vs for (r, j), vs in nfa.transitions.items()):
extra_state = Extra(s, t)
transitions.setdefault((t, Move.EMPTY), set()).add(extra_state)
t = extra_state
for (r, j), _ | |
# -*- coding: utf-8 -*-
'''
-------------------------------------------------------------------------------------------------
This code accompanies the paper titled "Human injury-based safety decision of automated vehicles"
Author: <NAME>, <NAME>, <NAME>, <NAME>
Corresponding author: <NAME> (<EMAIL>)
-------------------------------------------------------------------------------------------------
'''
import argparse
import cv2
import xlrd
import torch
import imageio
import warnings
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
from matplotlib.offsetbox import OffsetImage, AnnotationBbox
from utils.Percept import percept
from utils.Det_crash import det_crash
from utils.Vehicle import Vehicle_S12, Vehicle_S3, deci_S3, deci_EB
from utils.Inj_Pre import RNN
from utils.Con_est import Collision_cond
warnings.filterwarnings('ignore')
__author__ = "<NAME>"
def load_para(file, Num):
''' Load the reconstructed information of real-world accidents. '''
# Load the data file.
para_data = xlrd.open_workbook(file).sheet_by_index(0)
# Load and process the vehicle parameters.
veh_l = [para_data.row_values(Num + 1)[3] / 1000, para_data.row_values(Num + 1 + 51)[3] / 1000]
veh_w = [para_data.row_values(Num + 1)[4] / 1000, para_data.row_values(Num + 1 + 51)[4] / 1000]
veh_cgf = [para_data.row_values(Num + 1)[6], para_data.row_values(Num + 1 + 51)[6]]
veh_cgs = [0.5 * veh_w[0], 0.5 * veh_w[1]]
veh_m = [para_data.row_values(Num + 1)[2], para_data.row_values(Num + 1 + 51)[2]]
veh_I = [para_data.row_values(Num + 1)[5], para_data.row_values(Num + 1 + 51)[5]]
veh_k = [np.sqrt(veh_I[0] / veh_m[0]), np.sqrt(veh_I[1] / veh_m[1])]
veh_param = (veh_l, veh_w, veh_cgf, veh_cgs, veh_k, veh_m)
# Load and process the occupant parameters.
age = [para_data.row_values(Num + 1)[8], para_data.row_values(Num + 1 + 51)[8]]
sex = [para_data.row_values(Num + 1)[7], para_data.row_values(Num + 1 + 51)[7]]
belt = [para_data.row_values(Num + 1)[9], para_data.row_values(Num + 1 + 51)[9]]
airbag = [para_data.row_values(Num + 1)[10], para_data.row_values(Num + 1 + 51)[10]]
for i in range(2):
if age[i] < 20:
age[i] = 0
elif age[i] < 45:
age[i] = 1
elif age[i] < 65:
age[i] = 2
else:
age[i] = 3
belt[0] = 0 if belt[0] == 'Not in use' else 1
belt[1] = 0 if belt[1] == 'Not in use' else 1
sex[0] = 0 if sex[0] == 'Male' else 1
sex[1] = 0 if sex[1] == 'Male' else 1
airbag[0] = 1 if airbag[0] == 'Activated' else 0
airbag[1] = 1 if airbag[1] == 'Activated' else 0
mass_r = veh_m[0] / veh_m[1]
if mass_r < 1 / 2:
mass_r_12 = 0
elif mass_r < 1 / 1.3:
mass_r_12 = 1
elif mass_r < 1.3:
mass_r_12 = 2
elif mass_r < 2:
mass_r_12 = 3
else:
mass_r_12 = 4
mass_r_21 = 4 - mass_r_12
mass_r = [mass_r_12, mass_r_21]
occ_param = (age, belt, sex, airbag, mass_r)
return veh_param, occ_param
def resize_pic(image, angle, l_, w_):
''' Resize and rotate the vehicle.png. '''
# Resize the picture.
image = cv2.resize(image, (image.shape[1], int(image.shape[0] / (3370 / 8651) * (w_ / l_))))
# Obtain the dimensions of the image and then determine the center.
(h, w) = image.shape[:2]
(cX, cY) = (w // 2, h // 2)
# Obtain the rotation matrix.
M = cv2.getRotationMatrix2D((cX, cY), angle, 1.0)
cos = np.abs(M[0, 0])
sin = np.abs(M[0, 1])
# compute the new bounding dimensions of the image.
nW = int((h * sin) + (w * cos))
nH = int((h * cos) + (w * sin))
# adjust the rotation matrix to take into account translation.
M[0, 2] += (nW / 2) - cX
M[1, 2] += (nH / 2) - cY
# perform the actual rotation and return the image.
image_ = cv2.warpAffine(image, M, (nW, nH), borderValue=(255, 255, 255))
return image_
def plot_env(ax, V1_x_seq, V1_y_seq, V1_angle, V2_x_seq, V2_y_seq, V2_angle, veh_l, veh_w, img_list):
''' Visualize the simulation environment. '''
plt.cla()
plt.xticks(fontsize=22)
plt.yticks(fontsize=22)
plt.xlim((-10, 40))
plt.ylim((-10, 20))
plt.xticks(np.arange(-10, 40.1, 5), range(0, 50+1, 5), family='Times New Roman', fontsize=16)
plt.yticks(np.arange(-10, 20.1, 5), range(0, 30+1, 5), family='Times New Roman', fontsize=16)
plt.subplots_adjust(left=0.1, bottom=0.1, top=0.94, right=0.94, wspace=0.25, hspace=0.25)
# # Plot the vehicles' position.
# zoom = ((40) - (-10)) * 0.000135
# img_1 = resize_pic(img_list[0], np.rad2deg(V1_angle), veh_l[0], veh_w[0])
# im_1 = OffsetImage(img_1, zoom=zoom * veh_l[0])
# ab_1 = AnnotationBbox(im_1, xy=(V1_x_seq[-1], V1_y_seq[-1]), xycoords='data', pad=0, frameon=False)
# ax.add_artist(ab_1)
# img_2 = resize_pic(img_list[1], np.rad2deg(V2_angle), veh_l[1], veh_w[1])
# im_2 = OffsetImage(img_2, zoom=zoom * veh_l[1])
# ab_2 = AnnotationBbox(im_2, xy=(V2_x_seq[-1], V2_y_seq[-1]), xycoords='data', pad=0, frameon=False)
# ax.add_artist(ab_2)
from matplotlib import patches
p_x1 = V1_x_seq[-1] - (veh_l[0] / 2) * np.cos(V1_angle) + (veh_w[0] / 2) * np.sin(V1_angle)
p_y1 = V1_y_seq[-1] - (veh_l[0] / 2) * np.sin(V1_angle) - (veh_w[0] / 2) * np.cos(V1_angle)
p_x2 = V2_x_seq[-1] - (veh_l[1] / 2) * np.cos(V2_angle) + (veh_w[1] / 2) * np.sin(V2_angle)
p_y2 = V2_y_seq[-1] - (veh_l[1] / 2) * np.sin(V2_angle) - (veh_w[1] / 2) * np.cos(V2_angle)
e1 = patches.Rectangle((p_x1, p_y1), veh_l[0], veh_w[0], angle=np.rad2deg(V1_angle), linewidth=0, fill=True,
zorder=2, color='red', alpha=0.5)
ax.add_patch(e1)
e2 = patches.Rectangle((p_x2, p_y2), veh_l[1], veh_w[1], angle=np.rad2deg(V2_angle), linewidth=0, fill=True,
zorder=2, color='blue', alpha=0.5)
ax.add_patch(e2)
# Plot the vehicles' trajectories.
plt.plot(V1_x_seq, V1_y_seq, color='red', linestyle='--', linewidth=1.3, alpha=0.5)
plt.plot(V2_x_seq, V2_y_seq, color='blue', linestyle='--', linewidth=1.3, alpha=0.5)
def main():
''' Make human injury-based safety decisions using the injury risk mitigation (IRM) algorithm. '''
parser = argparse.ArgumentParser()
parser.add_argument('--case_num', type=int, default=1, help='Simulation case number (1-5)')
parser.add_argument('--t_act', type=int, default=500,
help='Activation time of IRM algorithm (100ms~1000ms before the collision)')
parser.add_argument('--Level', type=str, default='S1', help='Level of the IRM algorithm: EB, S1, S2, S3')
parser.add_argument('--Ego_V', type=int, default=1, help='Choose one vehicle as the ego vehicle: 1 or 2')
parser.add_argument('--profile_inf', type=str, default='para\Record_Information_example.xlsx',
help='File: information of the reconstructed accidents')
parser.add_argument('--no_visualize', action='store_false', help='simulation visualization')
parser.add_argument('--save_gif', action='store_true', help='save simulation visualization')
opt = parser.parse_args()
Deci_set = ['straight_cons', 'straight_dec-all', 'straight_dec-half', 'straight_acc-half', 'straight_acc-all',
'left-all_dec-all', 'right-all_dec-all', 'left-all_acc-all', 'right-all_acc-all',
'left-half_dec-all', 'left-half_dec-half', 'left-all_dec-half', 'left-half_cons',
'left-all_cons', 'left-half_acc-half', 'left-all_acc-half', 'left-half_acc-all',
'right-half_dec-all', 'right-half_dec-half', 'right-all_dec-half', 'right-half_cons',
'right-all_cons', 'right-half_acc-half', 'right-all_acc-half', 'right-half_acc-all',
'Record_trajectory']
# Load the occupant injury prediction model.
model_InjPre = RNN(in_dim=16, hid_dim=32, n_layers=2, flag_LSTM=True, bidirectional=True, dropout=0.5)
model_InjPre.load_state_dict(torch.load('para\\DL_InjuryPrediction.pkl'))
model_InjPre.eval()
# Load the vehicle image for visualization.
img_1 = mpimg.imread('para\image\\red.png')
img_2 = mpimg.imread('para\image\\blue.png')
img_list = [img_1, img_2]
# Load the parameters of vehicles and occupants.
veh_param, occ_param = load_para(opt.profile_inf, opt.case_num)
(veh_l, veh_w, veh_cgf, veh_cgs, veh_k, veh_m) = veh_param
(age, belt, female, airbag, mass_r) = occ_param
# Translate the activation time of IRM algorithm.
t_act = int(100 - opt.t_act/10)
# Define the random seed.
random_seed = [41, 24, 11, ][opt.case_num - 1] + t_act
np.random.seed(random_seed)
# Define the two vehicles in the imminent collision scenario.
if opt.Level == 'S3':
Veh_1 = Vehicle_S3(opt.case_num, 0, 1, mass_ratio=mass_r[0], age=age[0], belt=belt[0], female=female[0],
airbag=airbag[0], r_seed=random_seed)
Veh_2 = Vehicle_S3(opt.case_num, 0, 2, mass_ratio=mass_r[1], age=age[1], belt=belt[1], female=female[1],
airbag=airbag[1], r_seed=random_seed)
else:
Veh_1 = Vehicle_S12(opt.case_num, 0, 1, mass_ratio=mass_r[0], age=age[0], belt=belt[0], female=female[0],
airbag=airbag[0], r_seed=random_seed)
Veh_2 = Vehicle_S12(opt.case_num, 0, 2, mass_ratio=mass_r[1], age=age[1], belt=belt[1], female=female[1],
airbag=airbag[1], r_seed=random_seed)
# Predefine some parameters.
flag_EB, flag_S3 = True, True
image_list = []
INJ = - np.ones(2)
INJ_ = - np.ones((2, 6))
V1_x_seq, V1_y_seq, V1_theta_seq, V1_v_long_seq, V1_v_lat_seq, V1_a_seq, V1_omega_r_seq, V1_wheel_anlge_seq = [], [], [], [], [], [], [], []
V2_x_seq, V2_y_seq, V2_theta_seq, V2_v_long_seq, V2_v_lat_seq, V2_a_seq, V2_omega_r_seq, V2_wheel_anlge_seq = [], [], [], [], [], [], [], []
t_1 = 0
t_2 = 0
if opt.no_visualize:
fig, ax = plt.subplots(figsize=(10, 6))
plt.ion()
plt.axis('equal')
# Simulate the imminent collision scenario with IRM algorithm for 0-2 seconds.
# Update the time steps in real-time domain.
# The minimum time interval is 10 ms in the simulation.
for i in range(len(Veh_1.x)):
# print(i)
# Record the vehicle states at time step i.
V1_x_seq.append(Veh_1.x[t_1])
V1_y_seq.append(Veh_1.y[t_1])
V1_theta_seq.append(Veh_1.theta[t_1])
V1_v_long_seq.append(Veh_1.v_long[t_1])
V1_v_lat_seq.append(Veh_1.v_lat[t_1])
V1_a_seq.append(Veh_1.v_long_dot[t_1])
V1_omega_r_seq.append(Veh_1.omega_r[t_1])
V1_wheel_anlge_seq.append(Veh_1.wheel_anlge[t_1])
V2_x_seq.append(Veh_2.x[t_2])
V2_y_seq.append(Veh_2.y[t_2])
V2_theta_seq.append(Veh_2.theta[t_2])
V2_v_long_seq.append(Veh_2.v_long[t_2])
V2_v_lat_seq.append(Veh_2.v_lat[t_2])
V2_a_seq.append(Veh_2.v_long_dot[t_2])
V2_omega_r_seq.append(Veh_2.omega_r[t_2])
V2_wheel_anlge_seq.append(Veh_2.wheel_anlge[t_2])
# Make safety decisions based on the IRM algorithm under the different levels.
if opt.Level == 'EB' and flag_EB:
if i >= t_act and (i - t_act) % 10 == 0:
if opt.Ego_V == 1:
# Perceive Vehicle_2's states.
v2_state = percept(i, V2_x_seq, V2_y_seq, V2_theta_seq, V2_v_long_seq, V2_v_lat_seq, V2_a_seq,
V2_omega_r_seq, V2_wheel_anlge_seq, V1_x_seq, V1_y_seq, r_seed=random_seed)
# Decide whether to activate emergency braking (EB).
t_1, flag_EB = deci_EB(i, Veh_1, t_1, v2_state,
(V1_x_seq[-1], V1_y_seq[-1], V1_theta_seq[-1], V1_v_long_seq[-1]))
elif opt.Ego_V == 2:
# Perceive Vehicle_2's states.
v1_state = percept(i, V1_x_seq, V1_y_seq, V1_theta_seq, V1_v_long_seq, V1_v_lat_seq, V1_a_seq,
V1_omega_r_seq, V1_wheel_anlge_seq, V2_x_seq, V2_y_seq, r_seed=random_seed)
# Decide whether to activate emergency braking (EB).
t_2, flag_EB = deci_EB(i, Veh_2, t_2, v1_state,
(V2_x_seq[-1], V2_y_seq[-1], V2_theta_seq[-1], V2_v_long_seq[-1]))
elif opt.Level == 'S1':
# The ego vehicle updates decisions with the frequency of 10 Hz.
if i >= t_act and (i - t_act) % 10 == 0:
if opt.Ego_V == 1:
# Perceive Vehicle_2's states.
v2_state = percept(i, V2_x_seq, V2_y_seq, V2_theta_seq, V2_v_long_seq, V2_v_lat_seq, | |
"""Utils to check the samplers and compatibility with scikit-learn
"""
# Adapted from imbalanced-learn
# Adapated from scikit-learn
# Authors: <NAME> <<EMAIL>>
# License: MIT
import sys
import traceback
import warnings
from collections import Counter
from functools import partial
import pytest
import numpy as np
from scipy import sparse
from sklearn.base import clone
from sklearn.datasets import (
fetch_openml,
make_classification,
make_multilabel_classification,
) # noqa
from sklearn.cluster import KMeans
from sklearn.exceptions import SkipTestWarning
from sklearn.preprocessing import label_binarize
from sklearn.utils.estimator_checks import _maybe_mark_xfail
from sklearn.utils.estimator_checks import _get_check_estimator_ids
from sklearn.utils._testing import assert_allclose
from sklearn.utils._testing import assert_array_equal
from sklearn.utils._testing import assert_raises_regex
from sklearn.utils.multiclass import type_of_target
from imbalanced_ensemble.datasets import make_imbalance
from imbalanced_ensemble.sampler.over_sampling.base import BaseOverSampler
from imbalanced_ensemble.sampler.under_sampling.base import BaseCleaningSampler, BaseUnderSampler
def _set_checking_parameters(estimator):
params = estimator.get_params()
name = estimator.__class__.__name__
if "n_estimators" in params:
estimator.set_params(n_estimators=min(5, estimator.n_estimators))
if name == "ClusterCentroids":
estimator.set_params(
voting="soft",
estimator=KMeans(random_state=0, algorithm="full", n_init=1),
)
if name == "KMeansSMOTE":
estimator.set_params(kmeans_estimator=12)
def _yield_sampler_checks(sampler):
tags = sampler._get_tags()
yield check_target_type
yield check_samplers_one_label
yield check_samplers_fit
yield check_samplers_fit_resample
yield check_samplers_sampling_strategy_fit_resample
if "sparse" in tags["X_types"]:
yield check_samplers_sparse
if "dataframe" in tags["X_types"]:
yield check_samplers_pandas
if "string" in tags["X_types"]:
yield check_samplers_string
if tags["allow_nan"]:
yield check_samplers_nan
yield check_samplers_list
yield check_samplers_multiclass_ova
yield check_samplers_preserve_dtype
# we don't filter samplers based on their tag here because we want to make
# sure that the fitted attribute does not exist if the tag is not
# stipulated
yield check_samplers_sample_indices
yield check_samplers_2d_target
def _yield_classifier_checks(classifier):
yield check_classifier_on_multilabel_or_multioutput_targets
yield check_classifiers_with_encoded_labels
def _yield_all_checks(estimator):
name = estimator.__class__.__name__
tags = estimator._get_tags()
if tags["_skip_test"]:
warnings.warn(
f"Explicit SKIP via _skip_test tag for estimator {name}.",
SkipTestWarning,
)
return
# trigger our checks if this is a SamplerMixin
if hasattr(estimator, "fit_resample"):
for check in _yield_sampler_checks(estimator):
yield check
if hasattr(estimator, "predict"):
for check in _yield_classifier_checks(estimator):
yield check
def parametrize_with_checks(estimators):
"""Pytest specific decorator for parametrizing estimator checks.
The `id` of each check is set to be a pprint version of the estimator
and the name of the check with its keyword arguments.
This allows to use `pytest -k` to specify which tests to run::
pytest test_check_estimators.py -k check_estimators_fit_returns_self
Parameters
----------
estimators : list of estimators instances
Estimators to generated checks for.
Returns
-------
decorator : `pytest.mark.parametrize`
Examples
--------
>>> from sklearn.utils.estimator_checks import parametrize_with_checks
>>> from sklearn.linear_model import LogisticRegression
>>> from sklearn.tree import DecisionTreeRegressor
>>> @parametrize_with_checks([LogisticRegression(),
... DecisionTreeRegressor()])
... def test_sklearn_compatible_estimator(estimator, check):
... check(estimator)
"""
def checks_generator():
for estimator in estimators:
name = type(estimator).__name__
for check in _yield_all_checks(estimator):
check = partial(check, name)
yield _maybe_mark_xfail(estimator, check, pytest)
return pytest.mark.parametrize(
"estimator, check", checks_generator(), ids=_get_check_estimator_ids
)
def check_target_type(name, estimator_orig):
estimator = clone(estimator_orig)
# should raise warning if the target is continuous (we cannot raise error)
X = np.random.random((20, 2))
y = np.linspace(0, 1, 20)
msg = "Unknown label type: 'continuous'"
assert_raises_regex(
ValueError,
msg,
estimator.fit_resample,
X,
y,
)
# if the target is multilabel then we should raise an error
rng = np.random.RandomState(42)
y = rng.randint(2, size=(20, 3))
msg = "Multilabel and multioutput targets are not supported."
assert_raises_regex(
ValueError,
msg,
estimator.fit_resample,
X,
y,
)
def check_samplers_one_label(name, sampler_orig):
sampler = clone(sampler_orig)
error_string_fit = "Sampler can't balance when only one class is present."
X = np.random.random((20, 2))
y = np.zeros(20)
try:
sampler.fit_resample(X, y)
except ValueError as e:
if "class" not in repr(e):
print(error_string_fit, sampler.__class__.__name__, e)
traceback.print_exc(file=sys.stdout)
raise e
else:
return
except Exception as exc:
print(error_string_fit, traceback, exc)
traceback.print_exc(file=sys.stdout)
raise exc
raise AssertionError(error_string_fit)
def check_samplers_fit(name, sampler_orig):
sampler = clone(sampler_orig)
np.random.seed(42) # Make this test reproducible
X = np.random.random((30, 2))
y = np.array([1] * 20 + [0] * 10)
sampler.fit_resample(X, y)
assert hasattr(
sampler, "sampling_strategy_"
), "No fitted attribute sampling_strategy_"
def check_samplers_fit_resample(name, sampler_orig):
sampler = clone(sampler_orig)
X, y = make_classification(
n_samples=1000,
n_classes=3,
n_informative=4,
weights=[0.2, 0.3, 0.5],
random_state=0,
)
target_stats = Counter(y)
X_res, y_res = sampler.fit_resample(X, y)
if isinstance(sampler, BaseOverSampler):
target_stats_res = Counter(y_res)
n_samples = max(target_stats.values())
assert all(value >= n_samples for value in Counter(y_res).values())
elif isinstance(sampler, BaseUnderSampler):
n_samples = min(target_stats.values())
if name == "InstanceHardnessThreshold":
# IHT does not enforce the number of samples but provide a number
# of samples the closest to the desired target.
assert all(
Counter(y_res)[k] <= target_stats[k] for k in target_stats.keys()
)
else:
assert all(value == n_samples for value in Counter(y_res).values())
elif isinstance(sampler, BaseCleaningSampler):
target_stats_res = Counter(y_res)
class_minority = min(target_stats, key=target_stats.get)
assert all(
target_stats[class_sample] > target_stats_res[class_sample]
for class_sample in target_stats.keys()
if class_sample != class_minority
)
def check_samplers_sampling_strategy_fit_resample(name, sampler_orig):
sampler = clone(sampler_orig)
# in this test we will force all samplers to not change the class 1
X, y = make_classification(
n_samples=1000,
n_classes=3,
n_informative=4,
weights=[0.2, 0.3, 0.5],
random_state=0,
)
expected_stat = Counter(y)[1]
if isinstance(sampler, BaseOverSampler):
sampling_strategy = {2: 498, 0: 498}
sampler.set_params(sampling_strategy=sampling_strategy)
X_res, y_res = sampler.fit_resample(X, y)
assert Counter(y_res)[1] == expected_stat
elif isinstance(sampler, BaseUnderSampler):
sampling_strategy = {2: 201, 0: 201}
sampler.set_params(sampling_strategy=sampling_strategy)
X_res, y_res = sampler.fit_resample(X, y)
assert Counter(y_res)[1] == expected_stat
elif isinstance(sampler, BaseCleaningSampler):
sampling_strategy = [2, 0]
sampler.set_params(sampling_strategy=sampling_strategy)
X_res, y_res = sampler.fit_resample(X, y)
assert Counter(y_res)[1] == expected_stat
def check_samplers_sparse(name, sampler_orig):
sampler = clone(sampler_orig)
# check that sparse matrices can be passed through the sampler leading to
# the same results than dense
X, y = make_classification(
n_samples=1000,
n_classes=3,
n_informative=4,
weights=[0.2, 0.3, 0.5],
random_state=0,
)
X_sparse = sparse.csr_matrix(X)
X_res_sparse, y_res_sparse = sampler.fit_resample(X_sparse, y)
sampler = clone(sampler)
X_res, y_res = sampler.fit_resample(X, y)
assert sparse.issparse(X_res_sparse)
assert_allclose(X_res_sparse.A, X_res, rtol=1e-5)
assert_allclose(y_res_sparse, y_res)
def check_samplers_pandas(name, sampler_orig):
pd = pytest.importorskip("pandas")
sampler = clone(sampler_orig)
# Check that the samplers handle pandas dataframe and pandas series
X, y = make_classification(
n_samples=1000,
n_classes=3,
n_informative=4,
weights=[0.2, 0.3, 0.5],
random_state=0,
)
X_df = pd.DataFrame(X, columns=[str(i) for i in range(X.shape[1])])
y_df = pd.DataFrame(y)
y_s = pd.Series(y, name="class")
X_res_df, y_res_s = sampler.fit_resample(X_df, y_s)
X_res_df, y_res_df = sampler.fit_resample(X_df, y_df)
X_res, y_res = sampler.fit_resample(X, y)
# check that we return the same type for dataframes or series types
assert isinstance(X_res_df, pd.DataFrame)
assert isinstance(y_res_df, pd.DataFrame)
assert isinstance(y_res_s, pd.Series)
assert X_df.columns.to_list() == X_res_df.columns.to_list()
assert y_df.columns.to_list() == y_res_df.columns.to_list()
assert y_s.name == y_res_s.name
assert_allclose(X_res_df.to_numpy(), X_res)
assert_allclose(y_res_df.to_numpy().ravel(), y_res)
assert_allclose(y_res_s.to_numpy(), y_res)
def check_samplers_list(name, sampler_orig):
sampler = clone(sampler_orig)
# Check that the can samplers handle simple lists
X, y = make_classification(
n_samples=1000,
n_classes=3,
n_informative=4,
weights=[0.2, 0.3, 0.5],
random_state=0,
)
X_list = X.tolist()
y_list = y.tolist()
X_res, y_res = sampler.fit_resample(X, y)
X_res_list, y_res_list = sampler.fit_resample(X_list, y_list)
assert isinstance(X_res_list, list)
assert isinstance(y_res_list, list)
assert_allclose(X_res, X_res_list)
assert_allclose(y_res, y_res_list)
def check_samplers_multiclass_ova(name, sampler_orig):
sampler = clone(sampler_orig)
# Check that multiclass target lead to the same results than OVA encoding
X, y = make_classification(
n_samples=1000,
n_classes=3,
n_informative=4,
weights=[0.2, 0.3, 0.5],
random_state=0,
)
y_ova = label_binarize(y, np.unique(y))
X_res, y_res = sampler.fit_resample(X, y)
X_res_ova, y_res_ova = sampler.fit_resample(X, y_ova)
assert_allclose(X_res, X_res_ova)
assert type_of_target(y_res_ova) == type_of_target(y_ova)
assert_allclose(y_res, y_res_ova.argmax(axis=1))
def check_samplers_2d_target(name, sampler_orig):
sampler = clone(sampler_orig)
X, y = make_classification(
n_samples=100,
n_classes=3,
n_informative=4,
weights=[0.2, 0.3, 0.5],
random_state=0,
)
y = y.reshape(-1, 1) # Make the target 2d
sampler.fit_resample(X, y)
def check_samplers_preserve_dtype(name, sampler_orig):
sampler = clone(sampler_orig)
X, y = make_classification(
n_samples=1000,
n_classes=3,
n_informative=4,
weights=[0.2, 0.3, 0.5],
random_state=0,
)
# Cast X and y to not default dtype
X = X.astype(np.float32)
y = y.astype(np.int32)
X_res, y_res = sampler.fit_resample(X, y)
assert X.dtype == X_res.dtype, "X dtype is not preserved"
assert y.dtype == y_res.dtype, "y dtype is not preserved"
def check_samplers_sample_indices(name, sampler_orig):
sampler = clone(sampler_orig)
X, y = make_classification(
n_samples=1000,
n_classes=3,
n_informative=4,
weights=[0.2, 0.3, 0.5],
random_state=0,
)
sampler.fit_resample(X, y)
sample_indices = sampler._get_tags().get("sample_indices", None)
if sample_indices:
assert hasattr(sampler, "sample_indices_") is sample_indices
else:
assert not hasattr(sampler, "sample_indices_")
def check_samplers_string(name, sampler_orig):
rng = np.random.RandomState(0)
sampler = clone(sampler_orig)
categories = np.array(["A", "B", "C"], dtype=object)
n_samples = 30
X = rng.randint(low=0, high=3, size=n_samples).reshape(-1, 1)
X = categories[X]
y = rng.permutation([0] * 10 + [1] * 20)
X_res, y_res = sampler.fit_resample(X, y)
assert X_res.dtype == object
assert X_res.shape[0] == y_res.shape[0]
assert_array_equal(np.unique(X_res.ravel()), categories)
def check_samplers_nan(name, sampler_orig):
rng = np.random.RandomState(0)
sampler = clone(sampler_orig)
categories = np.array([0, 1, np.nan], dtype=np.float64)
n_samples = 100
X = rng.randint(low=0, high=3, size=n_samples).reshape(-1, 1)
X = categories[X]
y = rng.permutation([0] * 40 + [1] * 60)
X_res, y_res = sampler.fit_resample(X, y)
assert X_res.dtype == np.float64
assert X_res.shape[0] == y_res.shape[0]
assert np.any(np.isnan(X_res.ravel()))
def check_classifier_on_multilabel_or_multioutput_targets(name, estimator_orig):
estimator = clone(estimator_orig)
X, y = make_multilabel_classification(n_samples=30)
msg = "Multilabel and multioutput targets are not supported."
with pytest.raises(ValueError, match=msg):
estimator.fit(X, y)
def check_classifiers_with_encoded_labels(name, classifier_orig):
# Non-regression test for #709
# https://github.com/scikit-learn-contrib/imbalanced-learn/issues/709
pytest.importorskip("pandas")
classifier = clone(classifier_orig)
df, y = fetch_openml("iris", version=1, as_frame=True, return_X_y=True)
df, y = make_imbalance(
df,
y,
sampling_strategy={
"Iris-setosa": 30,
"Iris-versicolor": 20,
"Iris-virginica": 50,
},
)
classifier.set_params(sampling_strategy={"Iris-setosa": 20, | |
<reponame>ttlg59/Stino
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Package Docs."""
from __future__ import absolute_import
from __future__ import print_function
from __future__ import division
from __future__ import unicode_literals
import os
import glob
import codecs
from base_utils import file
from base_utils import default_st_dirs
from base_utils import c_file
from base_utils import serial_port
from base_utils import plain_params_file
from . import const
from . import selected
plugin_name = const.PLUGIN_NAME
none_sketches = ['libraries', 'library', 'examples', 'example', 'hardware',
'extras', 'utility', 'config']
def write_menu(menu_type, menu_text):
"""."""
file_name = 'Main.sublime-menu'
menu_path = default_st_dirs.get_plugin_menu_path(plugin_name)
dir_path = os.path.join(menu_path, menu_type)
file.check_dir(dir_path)
file_path = os.path.join(dir_path, file_name)
with codecs.open(file_path, 'w', 'utf-8') as f:
f.write(menu_text)
def get_sketch_paths_text(level, paths):
"""."""
text = ''
for path in paths:
path = path.replace('\\', '/')
name = os.path.basename(path)
next_paths = glob.glob(path + '/*')
next_paths = [p.replace('\\', '/') for p in next_paths]
next_file_paths = [p for p in next_paths if os.path.isfile(p)]
next_dir_paths = [p for p in next_paths if os.path.isdir(p)]
next_dir_paths = [p for p in next_dir_paths
if os.path.basename(p) not in none_sketches]
has_sketch = False
for file_path in next_file_paths:
ext = os.path.splitext(file_path)[-1]
if ext in c_file.INOC_EXTS:
has_sketch = True
break
if not (has_sketch or next_dir_paths):
continue
elif not next_dir_paths:
text += ',\n'
text += '\t' * level + '{\n'
text += '\t' * (level + 1)
text += '"caption": "%s",\n' % name
text += '\t' * (level + 1)
text += '"id": "stino_sketch_%s",\n' % name
text += '\t' * (level + 1)
text += '"command": "stino_open_sketch",\n'
text += '\t' * (level + 1)
text += '"args": {"sketch_path": "%s"}\n' % path
text += '\t' * level
text += '}'
elif not has_sketch:
text += ',\n'
text += '\t' * level + '{\n'
text += '\t' * (level + 1)
text += '"caption": "%s",\n' % name
text += '\t' * (level + 1)
text += '"id": "stino_sketch_%s",\n' % name
text += '\t' * (level + 1)
text += '"children":\n'
text += '\t' * (level + 1)
text += '[\n'
text += '\t' * (level + 2)
text += '{"caption": "-"}'
text += get_sketch_menu_text(level + 2, next_dir_paths)
text += '\t' * (level + 1)
text += ']\n'
text += '\t' * level
text += '}'
else:
text += ',\n'
text += '\t' * level + '{\n'
text += '\t' * (level + 1)
text += '"caption": "%s",\n' % name
text += '\t' * (level + 1)
text += '"id": "stino_sketch_%s",\n' % name
text += '\t' * (level + 1)
text += '"children":\n'
text += '\t' * (level + 1)
text += '[\n'
text += '\t' * (level + 1) + '{\n'
text += '\t' * (level + 2)
text += '"caption": "Open",\n'
text += '\t' * (level + 2)
text += '"id": "stino_open_%s",\n' % name
text += '\t' * (level + 2)
text += '"command": "stino_open_sketch",\n'
text += '\t' * (level + 2)
text += '"args": {"sketch_path": "%s"}\n' % path
text += '\t' * (level + 1)
text += '},'
text += '\t' * (level + 2)
text += '{"caption": "-"}'
text += get_sketch_menu_text(level + 2, next_dir_paths)
text += '\t' * (level + 1)
text += ']\n'
text += '\t' * level
text += '}'
return text
def get_sketch_menu_text(level, paths):
"""."""
text = ''
sketch_paths = []
for path in paths:
path = path.replace('\\', '/')
name = os.path.basename(path)
if name.lower() not in none_sketches:
sketch_paths.append(path)
if len(sketch_paths) < 36:
text += get_sketch_paths_text(level, sketch_paths)
else:
index = 0
index_letters = []
while len(index_letters) < 2:
index_letters = []
letter_path_info = {}
for path in sketch_paths:
name = os.path.basename(path)
if index < len(name):
index_letter = name[index].upper()
else:
index_letter = '->'
if index_letter not in index_letters:
index_letters.append(index_letter)
letter_path_info[index_letter] = []
letter_path_info[index_letter].append(path)
index += 1
for index_letter in index_letters:
sketch_paths = letter_path_info[index_letter]
text += ',\n'
text += '\t' * level + '{\n'
text += '\t' * (level + 1)
text += '"caption": "%s",\n' % index_letter
text += '\t' * (level + 1)
text += '"id": "stino_sketch_cat_%s_%s",\n' % (level, index_letter)
text += '\t' * (level + 1)
text += '"children":\n'
text += '\t' * (level + 1)
text += '[\n'
text += '\t' * (level + 2)
text += '{"caption": "-"}'
text += get_sketch_paths_text(level + 2, sketch_paths)
text += '\t' * (level + 1)
text += ']\n'
text += '\t' * level
text += '}'
return text
def update_sketchbook_menu(arduino_info):
"""."""
sketchbook_path = arduino_info.get('sketchbook_path')
sketch_paths = glob.glob(sketchbook_path + '/*')
sketch_paths = [p for p in sketch_paths if os.path.isdir(p)]
text = '\t' * 0 + '[\n'
text += '\t' * 1 + '{\n'
text += '\t' * 2 + '"caption": "Arduino",\n'
text += '\t' * 2 + '"mnemonic": "A",\n'
text += '\t' * 2 + '"id": "arduino",\n'
text += '\t' * 2 + '"children":\n'
text += '\t' * 2 + '[\n'
text += '\t' * 3 + '{\n'
text += '\t' * 4 + '"caption": "Open Sketch",\n'
text += '\t' * 4 + '"id": "stino_sketchbook",\n'
text += '\t' * 4 + '"children":\n'
text += '\t' * 4 + '[\n'
text += '\t' * 5 + '{\n'
text += '\t' * 6 + '"caption": "Refresh",\n'
text += '\t' * 6 + '"id": "stino_refresh_sketchbook",\n'
text += '\t' * 6 + '"command": "stino_refresh_sketchbook"\n'
text += '\t' * 5 + '},\n'
text += '\t' * 5 + '{\n'
text += '\t' * 6 + '"caption": "Change Location...",\n'
text += '\t' * 6 + '"id": "stino_change_sketchbook_location",\n'
text += '\t' * 6 + '"command": "stino_change_sketchbook_location"\n'
text += '\t' * 5 + '},\n'
text += '\t' * 5 + '{\n'
text += '\t' * 6 + '"caption": "In New Window",\n'
text += '\t' * 6 + '"id": "stino_open_in_new_win",\n'
text += '\t' * 6 + '"command": "stino_open_in_new_win",\n'
text += '\t' * 6 + '"checkbox": true\n'
text += '\t' * 5 + '},\n'
text += '\t' * 5 + '{\n'
text += '\t' * 6 + '"caption": "All Source Files",\n'
text += '\t' * 6 + '"id": "stino_open_all_src",\n'
text += '\t' * 6 + '"command": "stino_open_all_src",\n'
text += '\t' * 6 + '"checkbox": true\n'
text += '\t' * 5 + '},\n'
text += '\t' * 5 + '{"caption": "-"},'
text += '\t' * 5 + '{\n'
text += '\t' * 6 + '"caption": "New Sketch...",\n'
text += '\t' * 6 + '"id": "stino_new_sketch",\n'
text += '\t' * 6 + '"command": "stino_new_sketch"\n'
text += '\t' * 5 + '},\n'
text += '\t' * 5 + '{"caption": "-"}'
text += get_sketch_menu_text(5, sketch_paths)
text += '\n' + '\t' * 4 + ']\n'
text += '\t' * 3 + '}\n'
text += '\t' * 2 + ']\n'
text += '\t' * 1 + '}\n'
text += '\t' * 0 + ']\n'
write_menu('sketchbook', text)
def get_example_paths_text(level, paths):
"""."""
text = ''
for path in paths:
path = path.replace('\\', '/')
name = get_lib_name_in_path(path)
file_path = os.path.join(path, name + '.ino')
text += ',\n'
text += '\t' * level + '{\n'
text += '\t' * (level + 1)
text += '"caption": "%s",\n' % name
text += '\t' * (level + 1)
text += '"id": "stino_example_%s",\n' % name
text += '\t' * (level + 1)
if os.path.isfile(file_path):
text += '"command": "stino_open_example",\n'
text += '\t' * (level + 1)
text += '"args": {"example_path": "%s"}\n' % path
else:
next_paths = glob.glob(path + '/*')
next_paths = [p for p in next_paths if os.path.isdir(p)]
text += '"children":\n'
text += '\t' * (level + 1)
text += '[\n'
text += '\t' * (level + 2)
text += '{"caption": "-"}'
text += get_example_menu_text(level + 2, next_paths)
text += '\t' * (level + 1)
text += ']\n'
text += '\t' * level
text += '}'
return text
def get_example_menu_text(level, paths):
"""."""
text = ''
if len(paths) < 21:
text += get_example_paths_text(level, paths)
else:
index = 0
index_letters = []
while len(index_letters) < 2:
index_letters = []
letter_path_info = {}
for path in paths:
name = os.path.basename(path)
if index < len(name):
index_letter = name[index].upper()
else:
index_letter = '->'
if index_letter not in index_letters:
index_letters.append(index_letter)
letter_path_info[index_letter] = []
letter_path_info[index_letter].append(path)
index += 1
for index_letter in index_letters:
paths = letter_path_info[index_letter]
text += ',\n'
text += '\t' | |
from decimal import Decimal
import json
from apps.fund.models import DonationStatuses, Donation
from apps.projects.models import ProjectPlan, ProjectCampaign
from django.test import TestCase, RequestFactory
from django.contrib.contenttypes.models import ContentType
from rest_framework import status
from bluebottle.bluebottle_utils.tests import UserTestsMixin, generate_random_slug
from apps.organizations.tests import OrganizationTestsMixin
from apps.wallposts.models import TextWallPost
from ..models import Project,ProjectPhases, ProjectPitch
class ProjectTestsMixin(OrganizationTestsMixin, UserTestsMixin):
""" Mixin base class for tests using projects. """
def create_project(self, organization=None, owner=None, title=None, phase='pitch', slug=None, money_asked=0):
"""
Create a 'default' project with some standard values so it can be
saved to the database, but allow for overriding.
The returned object is saved to the database.
"""
if not owner:
# Create a new user with a random username
owner = self.create_user()
if not slug:
slug = generate_random_slug()
while Project.objects.filter(slug=slug).exists():
slug = generate_random_slug()
if not title:
title = generate_random_slug()
while Project.objects.filter(title=title).exists():
title = generate_random_slug()
project = Project(owner=owner, title=title, slug=slug, phase=phase)
project.save()
project.projectpitch.title = title
project.projectpitch.status = ProjectPitch.PitchStatuses.new
project.projectpitch.save()
if money_asked:
project.projectplan = ProjectPlan(title=project.title, project=project)
project.projectplan.status = 'approved'
# add an organization so we can create pay-outs
project.projectplan.organization = self.create_organization()
project.projectplan.save()
project.projectcampaign = ProjectCampaign(status='running', project=project, money_asked=money_asked)
project.projectcampaign.save()
project.projectcampaign.update_money_donated()
project.phase = ProjectPhases.campaign
project.save()
return project
class ProjectWallPostTestsMixin(ProjectTestsMixin):
""" Mixin base class for tests using wallposts. """
def create_project_text_wallpost(self, text='Some smart comment.', project=None, author=None):
if not project:
project = self.create_project()
if not author:
author = self.create_user()
content_type = ContentType.objects.get_for_model(Project)
wallpost = TextWallPost(content_type=content_type, object_id=project.id, author=author)
wallpost.text = text
wallpost.save()
return wallpost
# RequestFactory used for integration tests.
factory = RequestFactory()
class ProjectApiIntegrationTest(ProjectTestsMixin, TestCase):
"""
Integration tests for the Project API.
"""
def setUp(self):
"""
Create 26 Project instances.
"""
for char in 'abcdefghijklmnopqrstuvwxyz':
project = self.create_project(title=char * 3, slug=char * 3)
if ord(char) % 2 == 1:
# Put half of the projects in the campaign phase.
project.projectplan = ProjectPlan(title=project.title)
project.projectplan.status = 'approved'
project.projectplan.save()
project.phase = ProjectPhases.campaign
project.save()
else:
project.projectplan = ProjectPlan(title=project.title)
project.projectplan.save()
project.phase = ProjectPhases.plan
project.save()
self.projects_url = '/api/projects/projects/'
def test_project_list_view(self):
"""
Tests for Project List view. These basic tests are here because Project is the
first API to use DRF2. Not all APIs need thorough integration testing like this.
"""
# Basic test of DRF2.
response = self.client.get(self.projects_url)
self.assertEquals(response.status_code, status.HTTP_200_OK)
self.assertEquals(response.data['count'], 26)
self.assertEquals(len(response.data['results']), 10)
self.assertNotEquals(response.data['next'], None)
self.assertEquals(response.data['previous'], None)
def test_project_list_view_query_filters(self):
"""
Tests for Project List view with filters. These basic tests are here because Project is the
first API to use DRF2. Not all APIs need thorough integration testing like this.
"""
# Tests that the phase filter works.
response = self.client.get(self.projects_url + '?phase=plan')
self.assertEquals(response.status_code, status.HTTP_200_OK)
self.assertEquals(response.data['count'], 13)
self.assertEquals(len(response.data['results']), 10)
# Test that ordering works
response = self.client.get(self.projects_url + '?ordering=newest')
self.assertEquals(response.status_code, 200)
response = self.client.get(self.projects_url + '?ordering=title')
self.assertEquals(response.status_code, 200)
response = self.client.get(self.projects_url + '?ordering=deadline')
self.assertEquals(response.status_code, 200)
response = self.client.get(self.projects_url + '?ordering=needed')
self.assertEquals(response.status_code, 200)
response = self.client.get(self.projects_url + '?ordering=popularity')
self.assertEquals(response.status_code, 200)
# Test that combination of arguments works
response = self.client.get(self.projects_url + '?ordering=deadline&phase=campaign&country=101')
self.assertEquals(response.status_code, 200)
def test_project_detail_view(self):
""" Tests retrieving a project detail from the API. """
# Get the list of projects.
response = self.client.get(self.projects_url)
self.assertEquals(response.status_code, status.HTTP_200_OK)
# Test retrieving the first project detail from the list.
project = response.data['results'][0]
response = self.client.get(self.projects_url + str(project['id']))
self.assertEquals(response.status_code, status.HTTP_200_OK)
class ProjectManageApiIntegrationTest(ProjectTestsMixin, TestCase):
"""
Integration tests for the Project API.
"""
def setUp(self):
self.some_user = self.create_user()
self.another_user = self.create_user()
self.manage_projects_url = '/api/projects/manage/'
self.manage_pitches_url = '/api/projects/pitches/manage/'
def test_pitch_create(self):
"""
Tests for Project Pitch Create
"""
# Check that a new user doesn't have any projects to manage
self.client.login(username=self.some_user.email, password='password')
response = self.client.get(self.manage_projects_url)
self.assertEquals(response.data['count'], 0)
# Let's throw a pitch (create a project really)
response = self.client.post(self.manage_projects_url, {'title': 'This is my smart idea'})
self.assertEquals(response.data['title'], 'This is my smart idea')
# Check that it's there, in pitch phase, has got a pitch but no plan yet.
response = self.client.get(self.manage_projects_url)
self.assertEquals(response.data['count'], 1)
self.assertEquals(response.data['results'][0]['phase'], ProjectPhases.pitch)
self.assertEquals(response.data['results'][0]['plan'], None)
# Get the pitch
pitch_id = response.data['results'][0]['pitch']
response = self.client.get(self.manage_pitches_url + str(pitch_id))
self.assertEquals(response.status_code, status.HTTP_200_OK, response)
self.assertEquals(response.data['title'], 'This is my smart idea')
# Let's check that another user can't get this pitch
self.client.logout()
self.client.login(username=self.another_user.email, password='password')
response = self.client.get(self.manage_pitches_url + str(pitch_id))
self.assertEquals(response.status_code, status.HTTP_403_FORBIDDEN, response)
# Let's create a pitch for this other user
response = self.client.post(self.manage_projects_url, {'title': 'My idea is way smarter!'})
project_slug = response.data['id']
project_url = self.manage_projects_url + project_slug
self.assertEquals(response.data['title'], 'My idea is way smarter!')
pitch_id = response.data['pitch']
pitch_url = self.manage_pitches_url + str(pitch_id)
# Add some values to this pitch
pitch_data = {'title': 'My idea is quite smart!', 'latitude': '52.987245', 'longitude': '-5.8754',
'pitch': 'Lorem ipsum, bla bla ', 'description': 'Some more text'}
response = self.client.put(pitch_url, json.dumps(pitch_data), 'application/json')
self.assertEquals(response.status_code, status.HTTP_200_OK, response)
# Let's try to be smart and create another pitch. This should fail. You can have only have one running project.
response = self.client.post(self.manage_projects_url, {'title': 'I am such a smart ass...'})
self.assertEquals(response.status_code, status.HTTP_403_FORBIDDEN, response)
# Back to the previous pitch. Try to cheat and put it to status approved.
pitch_data['status'] = 'approved'
response = self.client.put(pitch_url, json.dumps(pitch_data), 'application/json')
self.assertEquals(response.status_code, status.HTTP_400_BAD_REQUEST, response)
# Ok, let's try to submit it. We have to submit all previous data again too.
pitch_data['status'] = 'submitted'
response = self.client.put(pitch_url, json.dumps(pitch_data), 'application/json')
self.assertEquals(response.status_code, status.HTTP_200_OK, response)
self.assertEquals(response.data['status'], 'submitted')
# Changing this pitch specs for this project should fail now.
pitch_data['title'] = 'Changed title'
response = self.client.put(pitch_url, json.dumps(pitch_data), 'application/json')
self.assertEquals(response.status_code, status.HTTP_403_FORBIDDEN, response)
# Set the project to plan phase from the backend
project = Project.objects.get(slug=project_slug)
project.phase = ProjectPhases.plan
project.save()
# Let's look at the project again. It should have a project plan and be in plan phase.
response = self.client.get(project_url)
self.assertEquals(response.status_code, status.HTTP_200_OK, response)
self.assertEquals(response.data['phase'], ProjectPhases.plan)
plan_id = response.data['plan']
self.assertIsNotNone(plan_id)
class ProjectWallPostApiIntegrationTest(ProjectTestsMixin, UserTestsMixin, TestCase):
"""
Integration tests for the Project Media WallPost API.
"""
def setUp(self):
self.some_project = self.create_project(slug='someproject')
self.another_project = self.create_project(slug='anotherproject')
self.some_user = self.create_user()
self.another_user = self.create_user()
self.some_photo = 'apps/projects/test_images/loading.gif'
self.another_photo = 'apps/projects/test_images/upload.png'
self.project_media_wallposts_url = '/api/projects/wallposts/media/'
self.project_media_wallpost_photos_url = '/api/projects/wallposts/media/photos/'
self.project_text_wallposts_url = '/api/projects/wallposts/text/'
self.project_wallposts_url = '/api/projects/wallposts/'
def test_project_media_wallpost_crud(self):
"""
Tests for creating, retrieving, updating and deleting a Project Media WallPost.
"""
self.client.login(username=self.some_project.owner.email, password='password')
# Create a Project Media WallPost by Project Owner
# Note: This test will fail when we require at least a video and/or a text but that's what we want.
wallpost_title = 'This is my super project!'
response = self.client.post(self.project_media_wallposts_url, {'title': wallpost_title, 'project': self.some_project.slug})
self.assertEqual(response.status_code, status.HTTP_201_CREATED, response.data)
self.assertEqual(response.data['title'], wallpost_title)
# Retrieve the created Project Media WallPost.
project_wallpost_detail_url = "{0}{1}".format(self.project_media_wallposts_url, str(response.data['id']))
response = self.client.get(project_wallpost_detail_url)
self.assertEqual(response.status_code, status.HTTP_200_OK, response.data)
self.assertEqual(response.data['title'], wallpost_title)
# Update the created Project Media WallPost by author.
new_wallpost_title = 'This is my super-duper project!'
response = self.client.put(project_wallpost_detail_url, json.dumps({'title': new_wallpost_title, 'project': self.some_project.slug}), 'application/json')
self.assertEqual(response.status_code, status.HTTP_200_OK, response.data)
self.assertEqual(response.data['title'], new_wallpost_title)
# Delete Project Media WallPost by author
response = self.client.delete(project_wallpost_detail_url)
self.assertEqual(response.status_code, status.HTTP_204_NO_CONTENT, response)
# Check that creating a WallPost with project slug that doesn't exist reports an error.
response = self.client.post(self.project_media_wallposts_url, {'title': wallpost_title, 'project': 'allyourbasearebelongtous'})
self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN, response.data)
# Create Project Media WallPost and retrieve by another user
response = self.client.post(self.project_media_wallposts_url, {'title': wallpost_title, 'project': self.some_project.slug})
project_wallpost_detail_url = "{0}{1}".format(self.project_media_wallposts_url, str(response.data['id']))
self.client.logout()
self.client.login(username=self.some_user.email, password='password')
response = self.client.get(project_wallpost_detail_url)
self.assertEqual(response.status_code, status.HTTP_200_OK, response.data)
self.assertEqual(response.data['title'], wallpost_title)
# Write Project Media WallPost by someone else then Project Owner should fail
new_wallpost_title = 'This is not my project...'
response = self.client.post(self.project_media_wallposts_url, {'title': new_wallpost_title, 'project': self.some_project.slug})
self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN, response.data)
# Write Project Media WallPost by Project Owner to another Project should fail
self.client.logout()
self.client.login(username=self.some_project.owner.email, password='password')
new_wallpost_title = 'This is not my project, although I do have a project'
response = self.client.post(self.project_media_wallposts_url, {'title': new_wallpost_title, 'project': self.another_project.slug})
self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN, response.data)
# Update Project Media WallPost by someone else than Project Owner should fail
second_wallpost_title = "My project rocks!"
response = self.client.post(self.project_media_wallposts_url, {'title': second_wallpost_title, 'project': self.some_project.slug})
self.client.logout()
self.client.login(username=self.some_user.email, password='password')
response = self.client.put(project_wallpost_detail_url, {'title': new_wallpost_title, 'project': self.some_project.slug})
self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN, response.data)
# Deleting a Project Media WallPost by non-author user should fail.
response = self.client.delete(project_wallpost_detail_url) # some_user is still logged in.
self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN, response)
# Retrieve a list of the two Project Media WallPosts that we've just added should work
response = self.client.get(self.project_wallposts_url, {'project': self.some_project.slug})
self.assertEqual(response.status_code, status.HTTP_200_OK, response.data)
self.assertEqual(len(response.data['results']), 2)
self.assertEqual(response.data['results'][0]['title'], second_wallpost_title)
self.assertEqual(response.data['results'][1]['title'], wallpost_title)
self.client.logout()
def test_project_media_wallpost_photo(self):
"""
Test connecting photos to wallposts
"""
self.client.login(username=self.some_project.owner.email, password='password')
# Typically the photos are uploaded before the | |
#!/usr/bin/env python3
#
# Copyright (c) 2020 Intel Corporation
#
# SPDX-License-Identifier: Apache-2.0
"""
Dictionary-based Logging Database Generator
This takes the built Zephyr ELF binary and produces a JSON database
file for dictionary-based logging. This database is used together
with the parser to decode binary log messages.
"""
import argparse
import logging
import os
import re
import string
import struct
import sys
import dictionary_parser.log_database
from dictionary_parser.log_database import LogDatabase
from dictionary_parser.utils import extract_one_string_in_section
from dictionary_parser.utils import find_string_in_mappings
import elftools
from elftools.elf.constants import SH_FLAGS
from elftools.elf.elffile import ELFFile
from elftools.elf.descriptions import describe_ei_data
from elftools.elf.sections import SymbolTableSection
from elftools.dwarf.descriptions import (
describe_DWARF_expr
)
from elftools.dwarf.locationlists import (
LocationExpr, LocationParser
)
LOGGER_FORMAT = "%(name)s: %(levelname)s: %(message)s"
logger = logging.getLogger(os.path.basename(sys.argv[0]))
# Sections that contains static strings
STATIC_STRING_SECTIONS = [
'rodata',
'.rodata',
'pinned.rodata',
]
# Regulation expression to match DWARF location
DT_LOCATION_REGEX = re.compile(r"\(DW_OP_addr: ([0-9a-f]+)")
# Format string for pointers (default for 32-bit pointers)
PTR_FMT = '0x%08x'
# Potential string encodings. Add as needed.
STR_ENCODINGS = [
'ascii',
'iso-8859-1',
]
# List of acceptable escape character
ACCEPTABLE_ESCAPE_CHARS = [
b'\r',
b'\n',
]
def parse_args():
"""Parse command line arguments"""
argparser = argparse.ArgumentParser()
argparser.add_argument("elffile", help="Zephyr ELF binary")
argparser.add_argument("--build", help="Build ID")
argparser.add_argument("--build-header",
help="Header file containing BUILD_VERSION define")
argparser.add_argument("--debug", action="store_true",
help="Print extra debugging information")
argparser.add_argument("-v", "--verbose", action="store_true",
help="Print more information")
outfile_grp = argparser.add_mutually_exclusive_group(required=True)
outfile_grp.add_argument("--json",
help="Output Dictionary Logging Database file in JSON")
outfile_grp.add_argument("--syst",
help="Output MIPI Sys-T Collateral XML file")
return argparser.parse_args()
def extract_elf_code_data_sections(elf):
"""Find all sections in ELF file"""
sections = {}
for sect in elf.iter_sections():
# Only Allocated sections with PROGBITS are included
# since they actually have code/data.
#
# On contrary, BSS is allocated but NOBITS.
if (
(sect['sh_flags'] & SH_FLAGS.SHF_ALLOC) == SH_FLAGS.SHF_ALLOC
and sect['sh_type'] == 'SHT_PROGBITS'
):
sections[sect.name] = {
'name' : sect.name,
'size' : sect['sh_size'],
'start' : sect['sh_addr'],
'end' : sect['sh_addr'] + sect['sh_size'] - 1,
'data' : sect.data(),
}
return sections
def find_elf_sections(elf, sh_name):
"""Find all sections in ELF file"""
for section in elf.iter_sections():
if section.name == sh_name:
ret = {
'name' : section.name,
'size' : section['sh_size'],
'start' : section['sh_addr'],
'end' : section['sh_addr'] + section['sh_size'] - 1,
'data' : section.data(),
}
return ret
return None
def get_kconfig_symbols(elf):
"""Get kconfig symbols from the ELF file"""
for section in elf.iter_sections():
if isinstance(section, SymbolTableSection):
return {sym.name: sym.entry.st_value
for sym in section.iter_symbols()
if sym.name.startswith("CONFIG_")}
raise LookupError("Could not find symbol table")
def find_log_const_symbols(elf):
"""Extract all "log_const_*" symbols from ELF file"""
symbol_tables = [s for s in elf.iter_sections()
if isinstance(s, elftools.elf.sections.SymbolTableSection)]
ret_list = []
for section in symbol_tables:
if not isinstance(section, elftools.elf.sections.SymbolTableSection):
continue
if section['sh_entsize'] == 0:
continue
for symbol in section.iter_symbols():
if symbol.name.startswith("log_const_"):
ret_list.append(symbol)
return ret_list
def parse_log_const_symbols(database, log_const_section, log_const_symbols, string_mappings):
"""Find the log instances and map source IDs to names"""
if database.is_tgt_little_endian():
formatter = "<"
else:
formatter = ">"
if database.is_tgt_64bit():
# 64-bit pointer to string
formatter += "Q"
else:
# 32-bit pointer to string
formatter += "L"
# log instance level
formatter += "B"
datum_size = struct.calcsize(formatter)
# Get the address of first log instance
first_offset = log_const_symbols[0].entry['st_value']
for sym in log_const_symbols:
if sym.entry['st_value'] < first_offset:
first_offset = sym.entry['st_value']
first_offset -= log_const_section['start']
# find all log_const_*
for sym in log_const_symbols:
# Find data offset in log_const_section for this symbol
offset = sym.entry['st_value'] - log_const_section['start']
idx_s = offset
idx_e = offset + datum_size
datum = log_const_section['data'][idx_s:idx_e]
if len(datum) != datum_size:
# Not enough data to unpack
continue
str_ptr, level = struct.unpack(formatter, datum)
# Offset to rodata section for string
instance_name = find_string_in_mappings(string_mappings, str_ptr)
if instance_name is None:
instance_name = "unknown"
logger.info("Found Log Instance: %s, level: %d", instance_name, level)
# source ID is simply the element index in the log instance array
source_id = int((offset - first_offset) / sym.entry['st_size'])
database.add_log_instance(source_id, instance_name, level, sym.entry['st_value'])
def extract_elf_information(elf, database):
"""Extract information from ELF file and store in database"""
e_ident = elf.header['e_ident']
elf_data = describe_ei_data(e_ident['EI_DATA'])
if elf_data == elftools.elf.descriptions._DESCR_EI_DATA['ELFDATA2LSB']:
database.set_tgt_endianness(LogDatabase.LITTLE_ENDIAN)
elif elf_data == elftools.elf.descriptions._DESCR_EI_DATA['ELFDATA2MSB']:
database.set_tgt_endianness(LogDatabase.BIG_ENDIAN)
else:
logger.error("Cannot determine endianness from ELF file, exiting...")
sys.exit(1)
def process_kconfigs(elf, database):
"""Process kconfigs to extract information"""
kconfigs = get_kconfig_symbols(elf)
# 32 or 64-bit target
database.set_tgt_bits(64 if "CONFIG_64BIT" in kconfigs else 32)
# Architecture
for name, arch in dictionary_parser.log_database.ARCHS.items():
if arch['kconfig'] in kconfigs:
database.set_arch(name)
break
# Put some kconfigs into the database
#
# Use 32-bit timestamp? or 64-bit?
if "CONFIG_LOG_TIMESTAMP_64BIT" in kconfigs:
database.add_kconfig("CONFIG_LOG_TIMESTAMP_64BIT",
kconfigs['CONFIG_LOG_TIMESTAMP_64BIT'])
def extract_logging_subsys_information(elf, database, string_mappings):
"""
Extract logging subsys related information and store in database.
For example, this extracts the list of log instances to establish
mapping from source ID to name.
"""
# Extract log constant section for module names
section_log_const = find_elf_sections(elf, "log_const_sections")
if section_log_const is None:
# ESP32 puts "log_const_*" info log_static_section instead of log_const_sections
section_log_const = find_elf_sections(elf, "log_static_section")
if section_log_const is None:
logger.error("Cannot find section 'log_const_sections' in ELF file, exiting...")
sys.exit(1)
# Find all "log_const_*" symbols and parse them
log_const_symbols = find_log_const_symbols(elf)
parse_log_const_symbols(database, section_log_const, log_const_symbols, string_mappings)
def is_die_attr_ref(attr):
"""
Returns True if the DIE attribute is a reference.
"""
return bool(attr.form in ('DW_FORM_ref1', 'DW_FORM_ref2',
'DW_FORM_ref4', 'DW_FORM_ref8',
'DW_FORM_ref'))
def find_die_var_base_type(compile_unit, die, is_const):
"""
Finds the base type of a DIE and returns the name.
If DW_AT_type is a reference, it will recursively go through
the references to find the base type. Returns None is no
base type is found.
"""
# DIE is of base type. So extract the name.
if die.tag == 'DW_TAG_base_type':
return die.attributes['DW_AT_name'].value.decode('ascii'), is_const
# Not a type, cannot continue
if not 'DW_AT_type' in die.attributes:
return None, None
if die.tag == 'DW_TAG_const_type':
is_const = True
# DIE is probably a reference to another.
# If so, check if the reference is a base type.
type_attr = die.attributes['DW_AT_type']
if is_die_attr_ref(type_attr):
ref_addr = compile_unit.cu_offset + type_attr.raw_value
ref_die = compile_unit.get_DIE_from_refaddr(ref_addr)
return find_die_var_base_type(compile_unit, ref_die, is_const)
# Not a base type, and not reference
return None, None
def is_die_var_const_char(compile_unit, die):
"""
Returns True if DIE of type variable is const char.
"""
var_type, is_const = find_die_var_base_type(compile_unit, die, False)
if var_type is not None and var_type.endswith('char') and is_const:
return True
return False
def extract_string_variables(elf):
"""
Find all string variables (char) in all Compilation Units and
Debug information Entry (DIE) in ELF file.
"""
dwarf_info = elf.get_dwarf_info()
loc_lists = dwarf_info.location_lists()
loc_parser = LocationParser(loc_lists)
strings = []
# Loop through all Compilation Units and
# Debug information Entry (DIE) to extract all string variables
for compile_unit in dwarf_info.iter_CUs():
for die in compile_unit.iter_DIEs():
# Only care about variables with location information
# and of type "char"
if die.tag == 'DW_TAG_variable':
if ('DW_AT_type' in die.attributes
and 'DW_AT_location' in die.attributes
and is_die_var_const_char(compile_unit, die)
):
# Extract location information, which is
# its address in memory.
loc_attr = die.attributes['DW_AT_location']
if loc_parser.attribute_has_location(loc_attr, die.cu['version']):
loc = loc_parser.parse_from_attribute(loc_attr, die.cu['version'])
if isinstance(loc, LocationExpr):
try:
addr = describe_DWARF_expr(loc.loc_expr,
dwarf_info.structs)
matcher = DT_LOCATION_REGEX.match(addr)
if matcher:
addr = int(matcher.group(1), 16)
if addr > 0:
strings.append({
'name': die.attributes['DW_AT_name'].value,
'addr': addr,
'die': die
})
except KeyError:
pass
return strings
def try_decode_string(str_maybe):
"""Check if it is a printable string"""
for encoding in STR_ENCODINGS:
try:
decoded_str = str_maybe.decode(encoding)
# Check if string is printable according to Python
# since the parser (written in Python) will need to
# print the string.
#
# Note that '\r' and '\n' are not included in
# string.printable so they need to be checked separately.
printable = True
for one_char in decoded_str:
if (one_char not in string.printable
and one_char not in ACCEPTABLE_ESCAPE_CHARS):
printable = False
break
if printable:
return decoded_str
except UnicodeDecodeError:
pass
return None
def extract_strings_in_one_section(section, str_mappings):
"""Extract NULL-terminated strings in one ELF section"""
bindata = section['data']
if len(bindata) < 2:
# Can't have a NULL-terminated string with fewer than 2 bytes.
return str_mappings
idx = 0
# If first byte is not NULL, it may be a string.
if bindata[0] == 0:
start = None
else:
start = 0
while idx < len(bindata):
if start is None:
if bindata[idx] == 0:
# Skip NULL bytes to find next string
idx += 1
else:
# Beginning of possible string
start = idx
idx += 1
else:
if bindata[idx] != 0:
# Skipping till next NULL byte for possible string
idx += 1
else:
# End of possible string
end = idx
if start != end:
str_maybe = bindata[start:end]
decoded_str = try_decode_string(str_maybe)
# Only store readable string
if decoded_str is not None:
addr = section['start'] + start
| |
from __future__ import annotations
import collections
import os
import pathlib
import procrunner
import pytest
from cctbx import uctbx
from dxtbx.model import ExperimentList
from dxtbx.serialize import load
from dials.array_family import flex
def unit_cells_are_similar(
uc1, uc2, relative_length_tolerance=0.01, absolute_angle_tolerance=1
):
# see also uctbx.cpp unit_cell::is_similar_to()
l1 = uc1.parameters()
l2 = uc2.parameters()
for i in range(3):
if abs(min(l1[i], l2[i]) / max(l1[i], l2[i]) - 1) > relative_length_tolerance:
return False
for i in range(3, 6):
if abs(l1[i] - l2[i]) > absolute_angle_tolerance:
if abs(l1[i] - (180 - l2[i])) > absolute_angle_tolerance:
return False
return True
_indexing_result = collections.namedtuple(
"indexing", ["indexed_reflections", "experiments", "rmsds"]
)
def run_indexing(
reflections,
experiment,
working_directory,
extra_args,
expected_unit_cell,
expected_rmsds,
expected_hall_symbol,
n_expected_lattices=1,
relative_length_tolerance=0.005,
absolute_angle_tolerance=0.5,
):
commands = ["dials.index"]
if isinstance(reflections, list):
commands.extend(reflections)
else:
commands.append(reflections)
if isinstance(experiment, list):
commands.extend(experiment)
else:
commands.append(experiment)
commands.extend(extra_args)
result = procrunner.run(commands, working_directory=working_directory)
assert not result.returncode and not result.stderr
out_expts = working_directory / "indexed.expt"
out_refls = working_directory / "indexed.refl"
assert out_expts.is_file()
assert out_refls.is_file()
experiments_list = load.experiment_list(out_expts, check_format=False)
assert len(experiments_list.crystals()) == n_expected_lattices
indexed_reflections = flex.reflection_table.from_file(out_refls)
indexed_reflections.assert_experiment_identifiers_are_consistent(experiments_list)
rmsds = None
for i, experiment in enumerate(experiments_list):
assert unit_cells_are_similar(
experiment.crystal.get_unit_cell(),
expected_unit_cell,
relative_length_tolerance=relative_length_tolerance,
absolute_angle_tolerance=absolute_angle_tolerance,
), (
experiment.crystal.get_unit_cell().parameters(),
expected_unit_cell.parameters(),
)
sg = experiment.crystal.get_space_group()
assert sg.type().hall_symbol() == expected_hall_symbol, (
sg.type().hall_symbol(),
expected_hall_symbol,
)
reflections = indexed_reflections.select(indexed_reflections["id"] == i)
mi = reflections["miller_index"]
assert (mi != (0, 0, 0)).count(False) == 0
reflections = reflections.select(mi != (0, 0, 0))
reflections = reflections.select(
reflections.get_flags(reflections.flags.used_in_refinement)
)
assert len(reflections) > 0
obs_x, obs_y, obs_z = reflections["xyzobs.mm.value"].parts()
calc_x, calc_y, calc_z = reflections["xyzcal.mm"].parts()
rmsd_x = flex.mean(flex.pow2(obs_x - calc_x)) ** 0.5
rmsd_y = flex.mean(flex.pow2(obs_y - calc_y)) ** 0.5
rmsd_z = flex.mean(flex.pow2(obs_z - calc_z)) ** 0.5
rmsds = (rmsd_x, rmsd_y, rmsd_z)
for actual, expected in zip(rmsds, expected_rmsds):
assert actual <= expected, f"{rmsds} {expected_rmsds}"
assert experiment.identifier != ""
expt = ExperimentList()
expt.append(experiment)
reflections.assert_experiment_identifiers_are_consistent(expt)
return _indexing_result(indexed_reflections, experiments_list, rmsds)
def test_index_i04_weak_data_fft3d(dials_regression, tmp_path):
# thaumatin
data_dir = os.path.join(dials_regression, "indexing_test_data", "i04_weak_data")
pickle_path = os.path.join(data_dir, "full.pickle")
sequence_path = os.path.join(data_dir, "experiments_import.json")
extra_args = [
"bin_size_fraction=0.25",
"image_range=1,20",
"image_range=250,270",
"image_range=520,540",
]
expected_unit_cell = uctbx.unit_cell((57.7, 57.7, 149.8, 90, 90, 90))
expected_rmsds = (0.05, 0.04, 0.0005)
expected_hall_symbol = " P 1"
run_indexing(
pickle_path,
sequence_path,
tmp_path,
extra_args,
expected_unit_cell,
expected_rmsds,
expected_hall_symbol,
)
def test_index_trypsin_four_lattice_P212121(dials_regression, tmp_path):
# synthetic trypsin multi-lattice dataset (4 lattices)
data_dir = pathlib.Path(dials_regression) / "indexing_test_data" / "trypsin"
pickle_path = data_dir / "P1_X6_1_2_3_4.pickle"
sequence_path = data_dir / "experiments_P1_X6_1_2_3_4.json"
extra_args = [
"indexing.method=real_space_grid_search",
"reflections_per_degree=10",
"n_macro_cycles=5",
"known_symmetry.unit_cell=54.3,58.3,66.5,90,90,90",
"known_symmetry.space_group=P212121",
"image_range=0,10",
"beam.fix=all",
"detector.fix=all",
"max_cell=70",
]
expected_unit_cell = uctbx.unit_cell((54.3, 58.3, 66.5, 90, 90, 90))
expected_rmsds = (0.28, 0.30, 0.006)
expected_hall_symbol = " P 2ac 2ab"
n_expected_lattices = 1
run_indexing(
pickle_path,
sequence_path,
tmp_path,
extra_args,
expected_unit_cell,
expected_rmsds,
expected_hall_symbol,
n_expected_lattices=n_expected_lattices,
relative_length_tolerance=0.02,
absolute_angle_tolerance=1,
)
def test_index_i04_weak_data_fft1d(dials_regression, tmp_path):
# thaumatin
data_dir = pathlib.Path(dials_regression) / "indexing_test_data" / "i04_weak_data"
pickle_path = data_dir / "full.pickle"
sequence_path = data_dir / "experiments_import.json"
extra_args = [
"n_macro_cycles=2",
"indexing.method=fft1d",
"bin_size_fraction=0.25",
"image_range=1,20",
"image_range=250,270",
"image_range=520,540",
]
expected_unit_cell = uctbx.unit_cell((57.7, 57.7, 149.9, 90, 90, 90))
expected_rmsds = (0.06, 0.05, 0.0005)
expected_hall_symbol = " P 1"
run_indexing(
pickle_path,
sequence_path,
tmp_path,
extra_args,
expected_unit_cell,
expected_rmsds,
expected_hall_symbol,
)
def test_index_trypsin_index_assignment_local(dials_regression, tmp_path):
# synthetic trypsin multi-lattice dataset (3 lattices)
data_dir = pathlib.Path(dials_regression) / "indexing_test_data" / "trypsin"
pickle_path = data_dir / "P1_X6_1_2_3.pickle"
sequence_path = data_dir / "experiments_P1_X6_1_2_3.json"
extra_args = [
"indexing.method=real_space_grid_search",
"d_min_start=3",
"n_macro_cycles=3",
"known_symmetry.unit_cell=54.3,58.3,66.5,90,90,90",
"known_symmetry.space_group=P212121",
"image_range=0,10",
"beam.fix=all",
"detector.fix=all",
"max_lattices=3",
"index_assignment.method=local",
"nearest_neighbours=50",
]
expected_unit_cell = uctbx.unit_cell((54.3, 58.3, 66.5, 90, 90, 90))
expected_rmsds = (0.33, 0.40, 0.0024)
expected_hall_symbol = " P 2ac 2ab"
n_expected_lattices = 3
run_indexing(
pickle_path,
sequence_path,
tmp_path,
extra_args,
expected_unit_cell,
expected_rmsds,
expected_hall_symbol,
n_expected_lattices=n_expected_lattices,
relative_length_tolerance=0.02,
absolute_angle_tolerance=1,
)
def test_index_peak_search_clean(dials_regression, tmp_path):
# test indexing from single image of i04_weak_data
data_dir = pathlib.Path(dials_regression) / "indexing_test_data" / "i04_weak_data"
pickle_path = data_dir / "first_image.pickle"
sequence_path = data_dir / "experiments_import.json"
extra_args = [
"indexing.method=fft3d",
"known_symmetry.space_group=P4",
"known_symmetry.unit_cell=57.8,57.8,150,90,90,90",
"peak_search=clean",
"min_samples=15",
"n_macro_cycles=4",
"reciprocal_space_grid.d_min=4",
]
expected_unit_cell = uctbx.unit_cell((57.8, 57.8, 150, 90, 90, 90))
expected_rmsds = (0.06, 0.07, 0.003)
expected_hall_symbol = " P 4"
run_indexing(
pickle_path,
sequence_path,
tmp_path,
extra_args,
expected_unit_cell,
expected_rmsds,
expected_hall_symbol,
)
@pytest.mark.parametrize("specify_unit_cell", [False, True])
def test_index_imosflm_tutorial(dials_regression, tmp_path, specify_unit_cell):
# test on spots derived from imosflm tutorial data:
# http://www.ccp4.ac.uk/courses/BCA2005/tutorials/dataproc-tutorial.html
data_dir = pathlib.Path(dials_regression) / "indexing_test_data" / "imosflm_hg_mar"
pickle_path = data_dir / "strong.pickle"
sequence_path = data_dir / "experiments.json"
unit_cell = uctbx.unit_cell((58.373, 58.373, 155.939, 90, 90, 120))
hall_symbol = '-R 3 2"'
extra_args = [
"bin_size_fraction=0.25",
'known_symmetry.space_group="Hall: %s"' % hall_symbol.replace('"', '\\"'),
]
if specify_unit_cell:
extra_args.append(
'known_symmetry.unit_cell="%s %s %s %s %s %s"' % unit_cell.parameters()
)
expected_unit_cell = unit_cell
expected_hall_symbol = hall_symbol
expected_rmsds = (0.08, 0.11, 0.004)
run_indexing(
pickle_path,
sequence_path,
tmp_path,
extra_args,
expected_unit_cell,
expected_rmsds,
expected_hall_symbol,
)
@pytest.fixture(scope="session")
def insulin_spotfinding(dials_data, tmp_path_factory):
"""Return experiment and reflection files for 2 images of the insulin dataset"""
data_dir = dials_data("insulin", pathlib=True)
tmp_path = tmp_path_factory.mktemp("insulin")
command = ["dials.import"]
for i, image_path in enumerate(("insulin_1_001.img", "insulin_1_045.img")):
command.append(data_dir / image_path)
result = procrunner.run(command, working_directory=tmp_path)
assert not result.returncode and not result.stderr
experiment = tmp_path / "imported.expt"
assert experiment.is_file()
command = ["dials.find_spots", "nproc=1", experiment]
result = procrunner.run(command, working_directory=tmp_path)
assert not result.returncode and not result.stderr
reflections = tmp_path / "strong.refl"
assert reflections.is_file()
return experiment, reflections
@pytest.mark.parametrize("method", ["fft3d", "fft1d", "real_space_grid_search"])
def test_index_insulin_multi_sequence(insulin_spotfinding, tmp_path, method):
experiment, reflections = insulin_spotfinding
expected_unit_cell = uctbx.unit_cell(
(78.163, 78.163, 78.163, 90.000, 90.000, 90.000)
)
expected_hall_symbol = " I 2 2 3"
expected_rmsds = (0.05, 0.06, 0.01)
extra_args = [
'known_symmetry.unit_cell="%s %s %s %s %s %s"'
% expected_unit_cell.parameters(),
f'known_symmetry.space_group="Hall: {expected_hall_symbol}"',
f"indexing.method={method}",
"treat_single_image_as_still=False",
]
run_indexing(
reflections,
experiment,
tmp_path,
extra_args,
expected_unit_cell,
expected_rmsds,
expected_hall_symbol,
)
@pytest.fixture(scope="session")
def insulin_spotfinding_stills(dials_data, tmp_path_factory):
"""Return experiment and reflection files for 1 image of the insulin
dataset treated as still image"""
data_dir = dials_data("insulin", pathlib=True)
tmp_path = tmp_path_factory.mktemp("insulin")
command = [
"dials.import",
"convert_sequences_to_stills=True",
data_dir / "insulin_1_001.img",
]
result = procrunner.run(command, working_directory=tmp_path)
assert not result.returncode and not result.stderr
experiment = tmp_path / "imported.expt"
assert experiment.is_file()
command = ["dials.find_spots", "nproc=1", experiment]
result = procrunner.run(command, working_directory=tmp_path)
assert not result.returncode and not result.stderr
reflections = tmp_path / "strong.refl"
assert reflections.is_file()
return experiment, reflections
@pytest.mark.xfel
@pytest.mark.parametrize("method", ["fft3d", "fft1d", "real_space_grid_search"])
def test_index_insulin_force_stills(insulin_spotfinding_stills, tmp_path, method):
experiment, reflections = insulin_spotfinding_stills
expected_unit_cell = uctbx.unit_cell(
(78.163, 78.163, 78.163, 90.000, 90.000, 90.000)
)
expected_hall_symbol = " I 2 2 3"
expected_rmsds = (0.05, 0.06, 0.01)
extra_args = [
"stills.indexer=stills",
'known_symmetry.unit_cell="%s %s %s %s %s %s"'
% expected_unit_cell.parameters(),
f'known_symmetry.space_group="Hall: {expected_hall_symbol}"',
f"indexing.method={method}",
]
run_indexing(
reflections,
experiment,
tmp_path,
extra_args,
expected_unit_cell,
expected_rmsds,
expected_hall_symbol,
)
def test_multiple_experiments(dials_regression, tmp_path):
# Test indexing 4 lysozyme still shots in a single dials.index job
# - the first image doesn't index
# - the last three images do index
data_dir = (
pathlib.Path(dials_regression) / "indexing_test_data" / "i24_lysozyme_stills"
)
pickle_path = data_dir / "strong.pickle"
experiments_json = data_dir / "imported_experiments.json"
expected_unit_cell = uctbx.unit_cell((38.06, 78.78, 78.91, 90, 90, 90))
expected_hall_symbol = " P 1"
expected_rmsds = (0.1, 0.07, 0.0)
extra_args = [
"stills.indexer=sequences",
"joint_indexing=False",
"outlier.algorithm=sauter_poon",
]
run_indexing(
pickle_path,
experiments_json,
tmp_path,
extra_args,
expected_unit_cell,
expected_rmsds,
expected_hall_symbol,
n_expected_lattices=3,
relative_length_tolerance=0.01,
)
def test_index_4rotation(dials_regression, tmp_path):
data_dir = pathlib.Path(dials_regression) / "indexing_test_data" / "4rotation"
pickle_path = data_dir / "strong.pickle"
sequence_path = data_dir / "experiments.json"
extra_args = [
"max_refine=10",
"reflections_per_degree=50",
"known_symmetry.space_group=R3",
"n_macro_cycles=3",
]
expected_unit_cell = uctbx.unit_cell((48.397, 48.397, 284.767, 90, 90, 120))
expected_rmsds = (0.06, 0.08, 0.22)
expected_hall_symbol = " R 3"
result = run_indexing(
pickle_path,
sequence_path,
tmp_path,
extra_args,
expected_unit_cell,
expected_rmsds,
expected_hall_symbol,
)
assert len(result.indexed_reflections) > 276800, len(result.indexed_reflections)
def test_index_small_molecule_multi_sequence_4(dials_regression, tmp_path):
# test for small molecule multi-sequence indexing, 4 sequences with different values
# of goniometer.fixed_rotation()
data_dir = pathlib.Path(dials_regression) / "indexing_test_data" / "multi_sweep"
pickle_paths = [
sorted((data_dir / f"SWEEP{i + 1}" / "index").glob("*_strong.pickle"))[0]
for i in range(4)
]
sequence_paths = [
data_dir / f"SWEEP{i + 1}" / "index" / "experiments.json" for i in range(4)
]
extra_args = ["known_symmetry.space_group=I4", "filter_ice=False"]
expected_unit_cell = uctbx.unit_cell((7.310, 7.310, 6.820, 90.000, 90.000, 90.000))
expected_rmsds = (0.10, 0.7, 0.5)
expected_hall_symbol = " I 4"
result = run_indexing(
pickle_paths,
sequence_paths,
tmp_path,
extra_args,
expected_unit_cell,
expected_rmsds,
expected_hall_symbol,
)
assert len(result.indexed_reflections) > 1250, len(result.indexed_reflections)
def test_index_small_molecule_multi_sequence_3(dials_regression, tmp_path):
# test for small molecule multi-sequence indexing, 3 sequences with different values
# of goniometer setting rotation (i.e. phi scans)
data_dir = pathlib.Path(dials_regression) / "dials-191"
print(data_dir)
pickle_paths = [
sorted(data_dir.glob(f"*_SWEEP{i + 1}_strong.pickle"))[0] for i in range(3)
]
sequence_paths = [
sorted(data_dir.glob(f"*_SWEEP{i + 1}_experiments.json"))[0] for i in range(3)
]
extra_args = ["filter_ice=False"]
expected_unit_cell = uctbx.unit_cell(
(9.440, 15.313, 17.126, 90.073, 90.106, 79.248)
)
expected_rmsds = (0.32, 0.34, 0.005)
expected_hall_symbol = " P 1"
result = run_indexing(
pickle_paths,
sequence_paths,
tmp_path,
extra_args,
expected_unit_cell,
expected_rmsds,
expected_hall_symbol,
)
assert len(result.indexed_reflections) > 12000, len(result.indexed_reflections)
# expect at least indexed 2000 reflections per | |
#!/usr/bin/env python3
import psycopg2
from psycopg2.extensions import ISOLATION_LEVEL_AUTOCOMMIT
from psycopg2.extras import NamedTupleCursor as ds_cur
import requests
import pymongo
import re
import csv
from string import hexdigits
from urllib.parse import unquote
from html.parser import HTMLParser
from datetime import date as d
from datetime import datetime as dt
from datetime import timedelta as td
import subprocess
from os import getenv
### subversion check
from sys import version_info as version
if version < (3, 6):
print("Python version 3.6 or greater required.")
quit()
upc_DATABASE_KEY = getenv('upc_DATABASE_KEY')
update_interval = td(days=30)
## Setup database if it's empty
connection = None
try:
db_conn = psycopg2.connect(user='barcodeserver', host='10.8.0.55', password=upc_DATABASE_KEY, dbname='upc_data')
except:
print('DB connection failed.')
db_conn = psycopg2.connect(user='barcodeserver', host='10.8.0.55', password=upc_DATABASE_KEY, dbname='postgres')
db_conn.set_isolation_level(ISOLATION_LEVEL_AUTOCOMMIT)
with db_conn.cursor() as db_cur:
db_cur.execute('CREATE DATABASE upc_data')
db_conn.close()
db_conn = psycopg2.connect(user='barcodeserver', host='10.8.0.55', password=upc_DATABASE_KEY, dbname='upc_data')
with db_conn.cursor() as db_cur:
with open('upc_dataset.sql', 'r') as sqlfile:
db_cur.execute(sqlfile.read())
# DEBUG
# print(db_cur.query.decode('utf-8'))
db_conn.commit()
db_conn.close()
### get dataset_source_meta table
ds_meta = None
db_conn = psycopg2.connect(user='barcodeserver', host='10.8.0.55', password=<PASSWORD>, dbname='upc_data')
with db_conn.cursor(cursor_factory=ds_cur) as db_cur:
db_cur.execute('SELECT * FROM dataset_source_meta')
ds_meta = [row._asdict() for row in db_cur]
db_conn.close()
#print(f"Dataset Source Metadata:\n{ds_meta}")
def validate_upc(code):
p_GTIN = re.compile('\d{14}$')
p_EAN = re.compile('\d{13}$')
p_UPC = re.compile('\d{12}$')
if code == None:
return None
if not code:
return None
if p_GTIN.search(str(code)):
u_match = p_GTIN.search(str(code)).group()
elif p_EAN.search(str(code)):
u_match = p_EAN.search(str(code)).group()
elif p_UPC.search(str(code)):
u_match = p_UPC.search(str(code)).group()
else:
return None
if len(str(int(str(code)))) < 11:
return None
if p_EAN.match(u_match):
u_match = "0"+u_match
elif p_UPC.match(u_match):
u_match = "00"+u_match
return u_match
## GET INFO ABOUT UHTT DATA
uhtt_current_release = None
uhtt_current_date = None
uhtt_last_check_date = None
uhtt_refresh_check_url = None
for i in ds_meta:
if i['source_name'] == 'uhtt':
uhtt_current_release = i['current_version_release_name']
uhtt_current_version_url = i['current_version_url']
uhtt_current_date = i['current_version_date']
uhtt_last_check_date = i['last_update_check']
uhtt_refresh_check_url = i['refresh_check_url']
try:
u_r = requests.get(uhtt_refresh_check_url).json()
except requests.exceptions.RequestException as e:
print("UHTT update check failed.", e)
def uhtt_store_update_check():
db_conn = psycopg2.connect(user='barcodeserver', host='10.8.0.55', password=upc_DATABASE_KEY, dbname='upc_data')
db_conn.autocommit = True
with db_conn.cursor() as db_cur:
db_cur.execute("""
UPDATE dataset_source_meta
SET current_version_date = %s,
current_version_release_name = %s,
current_version_url = %s,
last_update_check = %s
WHERE
source_name = %s;
""",
(uhtt_current_date, uhtt_current_release, uhtt_current_version_url, d.today(), 'uhtt')
)
db_conn.close()
subprocess.run(["rm", "-f", "uhtt_barcode_ref_all.csv"])
def off_store_update_check():
db_conn = psycopg2.connect(user='barcodeserver', host='10.8.0.55', password=upc_DATABASE_KEY, dbname='upc_data')
db_conn.autocommit = True
with db_conn.cursor() as db_cur:
db_cur.execute("""
UPDATE dataset_source_meta
SET current_version_date = %s,
current_version_hash = %s,
last_update_check = %s
WHERE
source_name = %s;
""",
(off_current_version_date, off_update_hash, d.today(), 'off')
)
db_conn.close()
def usda_store_update_check():
db_conn = psycopg2.connect(user='barcodeserver', host='10.8.0.55', password=upc_DATABASE_KEY, dbname='upc_data')
db_conn.autocommit = True
with db_conn.cursor() as db_cur:
db_cur.execute("""
UPDATE dataset_source_meta
SET current_version_date = %s,
current_version_url = %s,
last_update_check = %s
WHERE
source_name = %s;
""",
(usda_latest_date, usda_latest_url, d.today(), 'usda')
)
db_conn.close()
def upsert_uhtt_entry(entry):
db_fields = ['source', 'source_item_id', 'upc', 'name', 'db_entry_date', 'source_item_publication_date']
db_conn = psycopg2.connect(user='barcodeserver', host='10.8.0.55', password=<PASSWORD>, dbname='upc_data')
db_conn.autocommit = True
with db_conn.cursor() as db_cur:
db_cur.execute(f"""
INSERT INTO
product_info ({', '.join(db_fields)})
VALUES
(%s, %s, %s, %s, %s, %s)
ON CONFLICT ON CONSTRAINT
check_unique_composite
DO
UPDATE SET
source_item_id = EXCLUDED.source_item_id,
name = EXCLUDED.name,
db_entry_date = EXCLUDED.db_entry_date,
source_item_publication_date = EXCLUDED.source_item_publication_date
WHERE
EXCLUDED.source_item_publication_date > product_info.source_item_publication_date;
""",
(entry['source'], entry['source_item_id'], entry['upc'], entry['name'], entry['db_entry_date'], entry['source_item_publication_date'])
)
# print(db_cur.query.decode('utf-8'))
db_conn.close()
u_update_required = False
if not uhtt_current_date or d.fromisoformat(u_r[0]['published_at'].split('T')[0]) > uhtt_current_date:
uhtt_current_date = d.fromisoformat(u_r[0]['published_at'].split('T')[0])
uhtt_current_release = u_r[0]['tag_name']
uhtt_last_check_date = d.today()
for i in u_r[0]['assets']:
if i['browser_download_url'].endswith('.7z'):
uhtt_current_version_url = i['browser_download_url']
u_update_required = True
if not u_update_required:
print("UHTT-sourced entries are up to date.")
uhtt_store_update_check()
else:
sp_u_start = dt.now()
u_sp = subprocess.run("./get_UHTT_update.sh")
if u_sp.returncode == 0:
print("UHTT Data Update Acquired.")
else:
print(f"UHTT Data Update Failed (exit code {u_sp.returncode}).")
print(f"Elapsed time: {dt.now() - sp_u_start}")
### Process Acquired Source
u_row_count = 0
count = 0
kill_count = 0
nonfood_count = 0
with open('uhtt_barcode_ref_all.csv', 'r') as u_file:
u_row_count = sum(1 for lin in u_file)
with open('uhtt_barcode_ref_all.csv', 'r') as u_file:
u_start_time = dt.now()
u_dict = csv.DictReader(u_file, delimiter='\t', quoting=csv.QUOTE_NONE)
# chz = 250
for row in u_dict:
# chz -= 1
# if chz < 1:
# break
count += 1
entry = {}
entry['upc'] = validate_upc(row['UPCEAN'])
entry['name'] = row['Name']
if "CategoryName" in row.keys():
if row['CategoryName']:
if "Продукты питания" in row['CategoryName']:
if entry['upc'] and entry['name']:
entry['source'] = 'uhtt'
entry['source_item_id'] = row['ID']
entry['db_entry_date'] = d.today()
entry['source_item_publication_date'] = uhtt_current_date
upsert_uhtt_entry(entry)
else:
kill_count += 1
else:
nonfood_count += 1
else:
kill_count += 1
else:
kill_count += 1
if not count % 1000:
current_time = dt.now()
duration = td(seconds=(current_time - u_start_time).seconds)
print(f"Processed {count} out of {u_row_count} rows, rejecting {kill_count} sparse entries and {nonfood_count} non-food entries in {duration}.")
print(f"UHTT upsert complete. Total Time Elapsed: {dt.now() - u_start_time}")
uhtt_store_update_check()
## GET INFO ABOUT OPENFOODFACTS DATA
def is_hexadecimal(string):
"Check each character in a string against the hexadecimal character set."
return all(char in set(hexdigits) for char in string)
off_current_hash = None
off_update_hash = None
off_current_version_url = None
off_update_hash_url = None
off_last_check_date = None
off_update_required = False
for i in ds_meta:
if i['source_name'] == 'off':
off_current_hash = i["current_version_hash"]
off_update_hash_url = i["refresh_check_url"]
off_current_version_url = i["current_version_url"]
off_last_check_date = i['last_update_check']
off_current_version_date = i['current_version_date']
# print(f"Current Version Date: {off_current_version_date}")
# print(f"Last Check Date: {off_last_check_date}")
# print(f"Current Version Age: {(d.today() - off_current_version_date).days}, {type((d.today() - off_current_version_date).days)}")
# print(f"Minimum Version Age: {update_interval.days}, {type(update_interval.days)}")
# print(f"Updated in last 30 days: {(d.today() - off_current_version_date) > (update_interval).days}")
if not off_last_check_date:
# print('OFF update_required')
off_update_required = True
if off_current_version_date:
update_age = d.today() - off_current_version_date
if update_age.days > update_interval.days:
off_update_required = True
print("OFF update required")
else:
off_update_required = True
print("OFF update required")
if off_update_required:
try:
r = requests.get(off_update_hash_url)
off_update_hash = r.text.split(" ")[0]
if len(off_update_hash) != 64 or not is_hexadecimal(off_update_hash):
print("Retrieved OFF update checksum is not a SHA-256 hash.")
off_update_hash = None
print("OFF update checksum retrieval succeeded.")
except requests.exceptions.RequestException as e:
print("OFF update checksum retrieval failed.", e)
off_update_hash = None
if off_update_hash and off_update_required:
if not off_current_hash or off_current_hash != off_update_hash:
sp_off_start = dt.now()
off_sp = subprocess.run("./get_OFF_update.sh")
if off_sp.returncode == 0:
print("OpenFoodFacts Data Update Acquired.")
else:
print(f"OpenFoodFacts Data Update Failed (exit code {off_sp.returncode}).")
print(f"Elapsed time: {dt.now() - sp_off_start}")
elif not off_update_required:
off_update_hash = off_current_hash
print("OpenFoodFacts-sourced entries are up to date.")
### Upsert OpenFoodFacts entries
def upsert_off_entry(entry):
db_conn = psycopg2.connect(user='barcodeserver', host='10.8.0.55', password=upc_DATABASE_KEY, dbname='upc_data')
db_conn.autocommit = True
with db_conn.cursor() as db_cur:
db_cur.execute(f"""
INSERT INTO
product_info ({', '.join(db_fields)})
VALUES
(%s, %s, %s, %s, %s, %s, %s, %s, %s)
ON CONFLICT ON CONSTRAINT
check_unique_composite
DO
UPDATE SET
source_item_id = EXCLUDED.source_item_id,
name = EXCLUDED.name,
category = EXCLUDED.category,
db_entry_date = EXCLUDED.db_entry_date,
source_item_submission_date = EXCLUDED.source_item_submission_date,
source_item_publication_date = EXCLUDED.source_item_publication_date,
serving_size_fulltext = EXCLUDED.serving_size_fulltext
WHERE
EXCLUDED.source_item_publication_date > product_info.source_item_publication_date;
""",
(entry['source'], entry['source_item_id'], entry['upc'], entry['name'], entry['category'], entry['db_entry_date'], entry['source_item_submission_date'], entry['source_item_publication_date'], entry['serving_size_fulltext'])
)
# print(db_cur.query.decode('utf-8'))
db_conn.close()
## Use PyMongo to access retrieved OpenFoodFacts data
m_client = pymongo.MongoClient()
m_db = m_client['off_temp']
off_collection = m_db['product_info']
m_dataset = off_collection.find({})
row_count = off_collection.estimated_document_count()
count = 0
kill_count = 0
m_fields = ['_id', 'code', 'product_name', 'categories_tags', 'created_t', 'created_datetime', 'last_modified_t', 'last_modified_datetime', 'serving_size']
db_fields = ['source', 'source_item_id', 'upc', 'name', 'category', 'db_entry_date', 'source_item_submission_date', 'source_item_publication_date', 'serving_size_fulltext']
db_mapping = {'source':'off', 'source_item_id':'_id', 'upc':'code', 'name':'product_name', 'category':'categories_tags', 'db_entry_date':None, 'source_item_submission_date':None, 'source_item_publication_date':None, 'serving_size_fulltext':'serving_size'}
### process and upsert OpenFoodfacts data
if off_update_required == True:
start_time = dt.now()
# chz = 250
for m_d in m_dataset:
count += 1
m_entry = {}
entry = {}
kill_flag = False
entry['source'] = 'off'
entry['db_entry_date'] = d.strftime(d.today(), '%Y-%m-%d')
if not count % 1000:
current_time = dt.now()
print(f"Processed {count} out of {row_count} rows, rejecting {kill_count} rows, {current_time - start_time} elapsed.")
for i in m_fields:
if i in m_d.keys():
m_entry[i] = m_d[i]
if 'product_name' not in m_entry.keys():
# print("Product name failure")
kill_flag = True
elif not m_entry['product_name']:
# print("Product name absent")
kill_flag = True
if 'code' in m_entry.keys():
if validate_upc(m_entry['code']) == None:
# print("UPC failure")
kill_flag = True
else:
entry['upc'] = validate_upc(m_entry['code'])
if "categories_tags" in m_entry.keys():
if m_entry['categories_tags'] == None:
entry['category'] = m_entry['categories_tags']
else:
entry['category'] = None
if "serving_size" in m_entry.keys():
if m_entry['serving_size'] == None:
entry['serving_size_fulltext'] = m_entry["serving_size"]
else:
entry['serving_size_fulltext'] = None
if 'created_t' in m_entry.keys():
if m_entry['created_t']:
entry['source_item_submission_date'] = d.fromtimestamp(m_entry['created_t'])
if 'created_datetime' in m_entry.keys():
m_entry.pop('created_datetime', None)
else:
m_entry.pop('created_t', None)
if 'created_datetime' in m_entry.keys():
if m_entry['created_datetime']:
entry['source_item_submission_date'] = d.fromisoformat(m_entry['created_datetime'])
else:
m_entry.pop('created_datetime', None)
kill_flag = True
# print('Submission date failure')
if 'last_modified_t' in m_entry.keys():
if m_entry['last_modified_t']:
entry['source_item_publication_date'] = d.fromtimestamp(m_entry['last_modified_t'])
if 'last_modified_datetime' in m_entry.keys():
m_entry.pop('last_modified_datetime', None)
else:
m_entry.pop('last_modified_t', None)
if 'last_modified_datetime' in m_entry.keys():
if m_entry['last_modified_datetime']:
entry['source_item_publication_date'] = d.fromisoformat(m_entry['last_modified_datetime'])
else:
m_entry.pop('last_modified_datetime', None)
# print('Last Modified date failure')
kill_flag = True
if kill_flag:
kill_count += 1
else:
# chz -= 1
# if chz < 1:
# break
for db_field in db_fields:
if db_field not in entry.keys():
entry[db_field] = m_entry[db_mapping[db_field]]
# print(entry)
upsert_off_entry(entry)
off_current_version_date = d.today()
off_store_update_check()
else:
off_store_update_check()
# GET INFO ABOUT USDA DATA:
## Check datasource meta table for USDA data attributes
usda_current_version_date = None
usda_dataset_index_url = None
usda_dataset_index_raw = None
usda_update_required = False
for i in ds_meta:
if i['source_name'] == | |
"""Gets the wrapped entity."""
return self.__entity
def GetNext(self):
"""Wrap and return the next entity.
The entity is retrieved from the iterator given at construction time.
"""
return MultiQuery.SortOrderEntity(self.__entity_iterator,
self.__orderings)
def CmpProperties(self, that):
"""Compare two entities and return their relative order.
Compares self to that based on the current sort orderings and the
key orders between them. Returns negative, 0, or positive depending on
whether self is less, equal to, or greater than that. This
comparison returns as if all values were to be placed in ascending order
(highest value last). Only uses the sort orderings to compare (ignores
keys).
Args:
that: SortOrderEntity
Returns:
Negative if self < that
Zero if self == that
Positive if self > that
"""
if not self.__entity:
return cmp(self.__entity, that.__entity)
for (identifier, order) in self.__orderings:
value1 = self.__GetValueForId(self, identifier, order)
value2 = self.__GetValueForId(that, identifier, order)
result = cmp(value1, value2)
if order == Query.DESCENDING:
result = -result
if result:
return result
return 0
def __GetValueForId(self, sort_order_entity, identifier, sort_order):
value = _GetPropertyValue(sort_order_entity.__entity, identifier)
if isinstance(value, list):
entity_key = sort_order_entity.__entity.key()
if (entity_key, identifier) in self.__min_max_value_cache:
value = self.__min_max_value_cache[(entity_key, identifier)]
elif sort_order == Query.DESCENDING:
value = min(value)
else:
value = max(value)
self.__min_max_value_cache[(entity_key, identifier)] = value
return value
def __cmp__(self, that):
"""Compare self to that w.r.t. values defined in the sort order.
Compare an entity with another, using sort-order first, then the key
order to break ties. This can be used in a heap to have faster min-value
lookup.
Args:
that: other entity to compare to
Returns:
negative: if self is less than that in sort order
zero: if self is equal to that in sort order
positive: if self is greater than that in sort order
"""
property_compare = self.CmpProperties(that)
if property_compare:
return property_compare
else:
return cmp(self.__entity.key(), that.__entity.key())
def Run(self, **kwargs):
"""Return an iterable output with all results in order.
Merge sort the results. First create a list of iterators, then walk
though them and yield results in order.
"""
rpc = GetRpcFromKwargs(kwargs)
results = []
count = 1
log_level = logging.DEBUG - 1
for bound_query in self.__bound_queries:
logging.log(log_level, 'Running query #%i' % count)
if rpc:
rpc_clone = rpc.clone()
else:
rpc_clone = None
results.append(bound_query.Run(rpc=rpc_clone))
count += 1
def IterateResults(results):
"""Iterator function to return all results in sorted order.
Iterate over the array of results, yielding the next element, in
sorted order. This function is destructive (results will be empty
when the operation is complete).
Args:
results: list of result iterators to merge and iterate through
Yields:
The next result in sorted order.
"""
result_heap = []
for result in results:
heap_value = MultiQuery.SortOrderEntity(result, self.__orderings)
if heap_value.GetEntity():
heapq.heappush(result_heap, heap_value)
used_keys = set()
while result_heap:
top_result = heapq.heappop(result_heap)
results_to_push = []
if top_result.GetEntity().key() not in used_keys:
yield top_result.GetEntity()
else:
pass
used_keys.add(top_result.GetEntity().key())
results_to_push = []
while result_heap:
next = heapq.heappop(result_heap)
if cmp(top_result, next):
results_to_push.append(next)
break
else:
results_to_push.append(next.GetNext())
results_to_push.append(top_result.GetNext())
for popped_result in results_to_push:
if popped_result.GetEntity():
heapq.heappush(result_heap, popped_result)
return IterateResults(results)
def Count(self, limit=None, **kwargs):
"""Return the number of matched entities for this query.
Will return the de-duplicated count of results. Will call the more
efficient Get() function if a limit is given.
Args:
limit: maximum number of entries to count (for any result > limit, return
limit).
rpc: datastore.RPC to use for this request.
Returns:
count of the number of entries returned.
"""
rpc = GetRpcFromKwargs(kwargs)
if limit is None:
count = 0
for i in self.Run(rpc=rpc):
count += 1
return count
else:
return len(self.Get(limit, rpc=rpc))
def GetCompiledCursor(self):
raise AssertionError('No cursor available for a MultiQuery (queries '
'using "IN" or "!=" operators)')
def GetCompiledQuery(self):
raise AssertionError('No compilation available for a MultiQuery (queries '
'using "IN" or "!=" operators)')
def __setitem__(self, query_filter, value):
"""Add a new filter by setting it on all subqueries.
If any of the setting operations raise an exception, the ones
that succeeded are undone and the exception is propagated
upward.
Args:
query_filter: a string of the form "property operand".
value: the value that the given property is compared against.
"""
saved_items = []
for index, query in enumerate(self.__bound_queries):
saved_items.append(query.get(query_filter, None))
try:
query[query_filter] = value
except:
for q, old_value in itertools.izip(self.__bound_queries[:index],
saved_items):
if old_value is not None:
q[query_filter] = old_value
else:
del q[query_filter]
raise
def __delitem__(self, query_filter):
"""Delete a filter by deleting it from all subqueries.
If a KeyError is raised during the attempt, it is ignored, unless
every subquery raised a KeyError. If any other exception is
raised, any deletes will be rolled back.
Args:
query_filter: the filter to delete.
Raises:
KeyError: No subquery had an entry containing query_filter.
"""
subquery_count = len(self.__bound_queries)
keyerror_count = 0
saved_items = []
for index, query in enumerate(self.__bound_queries):
try:
saved_items.append(query.get(query_filter, None))
del query[query_filter]
except KeyError:
keyerror_count += 1
except:
for q, old_value in itertools.izip(self.__bound_queries[:index],
saved_items):
if old_value is not None:
q[query_filter] = old_value
raise
if keyerror_count == subquery_count:
raise KeyError(query_filter)
def __iter__(self):
return iter(self.__bound_queries)
class Iterator(object):
"""An iterator over the results of a datastore query.
Iterators are used to access the results of a Query. An iterator is
obtained by building a Query, then calling Run() on it.
Iterator implements Python's iterator protocol, so results can be accessed
with the for and in statements:
> it = Query('Person').Run()
> for person in it:
> print 'Hi, %s!' % person['name']
"""
def __init__(self, query_result_pb, batch_size=None, rpc=None,
query_request_pb=None):
"""Constructor.
kwargs gets stored and passed on to Next calls made by this iterator.
"""
self.__cursor = query_result_pb.cursor()
self.__keys_only = query_result_pb.keys_only()
self.__batch_size = batch_size
self.__rpc = rpc
self.__skipped_results = 0
self.__results_since_prev = 0
self.__prev_compiled_cursor = None
self.__next_compiled_cursor = None
if query_request_pb:
self.__remaining_offset = query_request_pb.offset()
else:
self.__remaining_offset = 0
if query_request_pb and query_result_pb.has_compiled_cursor():
if query_request_pb.has_compiled_cursor():
self.__next_compiled_cursor = query_request_pb.compiled_cursor()
else:
self.__next_compiled_cursor = datastore_pb.CompiledCursor()
self.__buffer = self._ProcessQueryResult(query_result_pb)
self.__results_since_prev = query_request_pb.offset()
else:
self.__buffer = self._ProcessQueryResult(query_result_pb)
def _Get(self, count):
"""Gets the next count result(s) of the query.
Not intended to be used by application developers. Use the python
iterator protocol instead.
This method uses _Next to returns the next entities or keys from the list of
matching results. If the query specified a sort order, results are returned
in that order. Otherwise, the order is undefined.
The argument, count, specifies the number of results to return. However, the
length of the returned list may be smaller than count. This is the case only
if count is greater than the number of remaining results.
The results are always returned as a list. If there are no results left,
an empty list is returned.
Args:
# the number of results to return; must be >= 1
count: int or long
Returns:
# a list of entities or keys
[Entity or Key, ...]
"""
entity_list = self._Next(count)
while len(entity_list) < count and self.__more_results:
entity_list += self._Next(count - len(entity_list))
return entity_list;
def _Next(self, count=None):
"""Returns the next batch of results.
Not intended to be used by application developers. Use the python
iterator protocol instead.
Values are returned in the order they are recieved from the datastore.
If there are values in the internal buffer they are returned, otherwise a
single RPC is run in an attempt to fulfill the request.
The optional argument, count, specifies the number of results to return.
However, the length of the returned list may be smaller than count. This is
the case if:
- the local buffer has results and count is greater than the number of
results in the buffer.
- count is greater than the number of remaining results
- the size of the remaining results exceeds the RPC buffer limit
Use _Get to ensure all possible entities are retrieved.
When count is None, if there are items in the local buffer, they are
all returned, otherwise the datastore backend is allowed to decide how many
entities to send.
The internal buffer is also used by the next() method so it is best not to
mix _Next() and next().
The results are | |
IiII - O0 % Ii1I
if 27 - 27: OoO0O00 + Oo0Ooo
if 92 - 92: I1IiiI % iII111i
if 31 - 31: OoooooooOO - oO0o / I1Ii111
if 62 - 62: i11iIiiIii - I11i
if 81 - 81: I11i
if 92 - 92: OOooOOo - Oo0Ooo - OoooooooOO / IiII - i1IIi
if 81 - 81: i1IIi / I1Ii111 % i11iIiiIii . iIii1I11I1II1 * OoOoOO00 + OoooooooOO
if ( self . outer_version == 4 or self . inner_version == 4 ) :
if ( lisp_is_macos ( ) ) :
packet = packet [ 0 : 2 ] + packet [ 3 ] + packet [ 2 ] + packet [ 4 : 6 ] + packet [ 7 ] + packet [ 6 ] + packet [ 8 : : ]
if 31 - 31: i1IIi % II111iiii
else :
packet = packet [ 0 : 2 ] + packet [ 3 ] + packet [ 2 ] + packet [ 4 : : ]
if 13 - 13: iIii1I11I1II1 - II111iiii % O0 . Ii1I % OoO0O00
if 2 - 2: OoooooooOO - Ii1I % oO0o / I1IiiI / o0oOOo0O0Ooo
return ( packet )
if 3 - 3: II111iiii / OOooOOo
if 48 - 48: ooOoO0o . I1ii11iIi11i
def send_packet ( self , lisp_raw_socket , dest ) :
if ( lisp_flow_logging and dest != self . inner_dest ) : self . log_flow ( True )
if 49 - 49: i1IIi - OoOoOO00 . Oo0Ooo + iIii1I11I1II1 - ooOoO0o / Oo0Ooo
dest = dest . print_address_no_iid ( )
O0OO , iIi11ii1 = self . fragment ( )
if 49 - 49: oO0o . OoOoOO00
for OOOO in O0OO :
if ( len ( O0OO ) != 1 ) :
self . packet = OOOO
self . print_packet ( iIi11ii1 , True )
if 73 - 73: Ii1I / I1IiiI / OoooooooOO + I1IiiI
if 57 - 57: OOooOOo . Ii1I % o0oOOo0O0Ooo
try : lisp_raw_socket . sendto ( OOOO , ( dest , 0 ) )
except socket . error , ooo0OO :
lprint ( "socket.sendto() failed: {}" . format ( ooo0OO ) )
if 32 - 32: I11i / IiII - O0 * iIii1I11I1II1
if 70 - 70: OoooooooOO % OoooooooOO % OoO0O00
if 98 - 98: OoO0O00
if 18 - 18: I11i + Oo0Ooo - OoO0O00 / I1Ii111 / OOooOOo
def send_l2_packet ( self , l2_socket , mac_header ) :
if ( l2_socket == None ) :
lprint ( "No layer-2 socket, drop IPv6 packet" )
return
if 53 - 53: OOooOOo + o0oOOo0O0Ooo . oO0o / I11i
if ( mac_header == None ) :
lprint ( "Could not build MAC header, drop IPv6 packet" )
return
if 52 - 52: I1Ii111 + I1Ii111
if 73 - 73: o0oOOo0O0Ooo . i11iIiiIii % OoooooooOO + ooOoO0o . OoooooooOO / OOooOOo
i1II1IiiIi = mac_header + self . packet
if 54 - 54: OoOoOO00 . OoooooooOO
if 36 - 36: oO0o / II111iiii * IiII % I1ii11iIi11i
if 31 - 31: II111iiii + OOooOOo - OoooooooOO . I11i
if 28 - 28: Ii1I . I1ii11iIi11i
if 77 - 77: I1ii11iIi11i % II111iiii
if 81 - 81: OoOoOO00 % Ii1I / O0 * iIii1I11I1II1 % IiII . I1IiiI
if 90 - 90: o0oOOo0O0Ooo
if 44 - 44: o0oOOo0O0Ooo / I1ii11iIi11i . Oo0Ooo + OoOoOO00
if 32 - 32: IiII - ooOoO0o * iII111i * I11i
if 84 - 84: Ii1I + I1ii11iIi11i % I1IiiI + i11iIiiIii
if 37 - 37: I11i % I1ii11iIi11i / ooOoO0o
l2_socket . write ( i1II1IiiIi )
return
if 94 - 94: I11i / OoO0O00 . o0oOOo0O0Ooo
if 1 - 1: Oo0Ooo . II111iiii
def bridge_l2_packet ( self , eid , db ) :
try : OoiiI11111II = db . dynamic_eids [ eid . print_address_no_iid ( ) ]
except : return
try : I111IIiIII = lisp_myinterfaces [ OoiiI11111II . interface ]
except : return
try :
socket = I111IIiIII . get_bridge_socket ( )
if ( socket == None ) : return
except : return
if 48 - 48: iII111i % i11iIiiIii . OoooooooOO * IiII % OoO0O00 . iII111i
try : socket . send ( self . packet )
except socket . error , ooo0OO :
lprint ( "bridge_l2_packet(): socket.send() failed: {}" . format ( ooo0OO ) )
if 6 - 6: O0 . ooOoO0o - oO0o / i11iIiiIii
if 84 - 84: I11i / I1ii11iIi11i * o0oOOo0O0Ooo * OoO0O00 * OOooOOo * O0
if 83 - 83: O0 % II111iiii + o0oOOo0O0Ooo / OoooooooOO
def decode ( self , is_lisp_packet , lisp_ipc_socket , stats ) :
self . packet_error = ""
i1II1IiiIi = self . packet
Ooi1IIii1i = len ( i1II1IiiIi )
O0oOo0o0O0o = o0iii1i = True
if 30 - 30: OoOoOO00 / I1IiiI - OoO0O00 - iII111i - i11iIiiIii
if 84 - 84: i1IIi - I1IiiI % iII111i
if 80 - 80: o0oOOo0O0Ooo % iII111i
if 80 - 80: Ii1I
iioOO = 0
o0OOoOO = self . lisp_header . get_instance_id ( )
if ( is_lisp_packet ) :
I1OO = struct . unpack ( "B" , i1II1IiiIi [ 0 : 1 ] ) [ 0 ]
self . outer_version = I1OO >> 4
if ( self . outer_version == 4 ) :
if 34 - 34: I1Ii111 . OoOoOO00 / i11iIiiIii / iII111i
if 46 - 46: Oo0Ooo + II111iiii * I1IiiI + OOooOOo
if 31 - 31: Ii1I * o0oOOo0O0Ooo * Ii1I + OoO0O00 * o0oOOo0O0Ooo . I1Ii111
if 89 - 89: OoooooooOO * Ii1I * I1IiiI . ooOoO0o * Ii1I / iII111i
if 46 - 46: i11iIiiIii
Iiiii = struct . unpack ( "H" , i1II1IiiIi [ 10 : 12 ] ) [ 0 ]
i1II1IiiIi = lisp_ip_checksum ( i1II1IiiIi )
oOOoo0 = struct . unpack ( "H" , i1II1IiiIi [ 10 : 12 ] ) [ 0 ]
if ( oOOoo0 != 0 ) :
if ( Iiiii != 0 or lisp_is_macos ( ) == False ) :
self . packet_error = "checksum-error"
if ( stats ) :
stats [ self . packet_error ] . increment ( Ooi1IIii1i )
if 25 - 25: Oo0Ooo * I1IiiI + OOooOOo + I1Ii111 % OOooOOo
if 84 - 84: O0 % Ii1I . Ii1I . iII111i * I11i
lprint ( "IPv4 header checksum failed for outer header" )
if ( lisp_flow_logging ) : self . log_flow ( False )
return ( None )
if 43 - 43: OoOoOO00 . I1ii11iIi11i % i1IIi
if 61 - 61: I1IiiI + oO0o % I1Ii111 % iIii1I11I1II1 - OoooooooOO
if 22 - 22: OOooOOo + II111iiii + Oo0Ooo
oOo00Oo0o00oo = LISP_AFI_IPV4
I11iiIi1i1 = 12
self . outer_tos = struct . unpack ( "B" , i1II1IiiIi [ 1 : 2 ] ) [ 0 ]
self . outer_ttl = struct . unpack ( "B" , i1II1IiiIi [ 8 : 9 ] ) [ 0 ]
iioOO = 20
elif ( self . outer_version == 6 ) :
oOo00Oo0o00oo = LISP_AFI_IPV6
I11iiIi1i1 = 8
oO0O0oo = struct . unpack ( "H" , i1II1IiiIi [ 0 : 2 ] ) [ 0 ]
self . outer_tos = ( socket . ntohs ( oO0O0oo ) >> 4 ) & 0xff
self . outer_ttl = struct . unpack ( "B" , i1II1IiiIi [ 7 : 8 ] ) [ 0 ]
iioOO = 40
else :
self . packet_error = "outer-header-error"
if ( stats ) : stats [ self . packet_error ] . increment ( Ooi1IIii1i )
lprint ( "Cannot decode outer header" )
return ( None )
if 64 - 64: OoOoOO00 % OoOoOO00 + o0oOOo0O0Ooo + Oo0Ooo
if 79 - 79: Oo0Ooo - OoooooooOO % I1Ii111 + OoooooooOO - I11i % OoOoOO00
self . outer_source . afi = oOo00Oo0o00oo
self . outer_dest . | |
1 0.5 0.3 0.0 0.1
RtCylinder_10_0 Y(SI) 1 0.3 0.0 -0.2 0.1
RtCylinder_30_0 Z(AP) 3 0.7 0.5 0.3 0.1
RtCylinder_30_0 Y(SI) 3 1.1 0.3 -0.5 0.4
Cone_10_0 Z(AP) 1 0.3 0.1 -0.1 0.2
Cone_10_0 Y(SI) 1 1.0 0.3 0.2 0.2
Cone_30_0 Z(AP) 3 -0.0 -0.8 -1.4 0.4
Cone_30_0 Y(SI) 3 0.9 -1.3 -1.5 0.5
RtCone_10_0 Z(AP) 1 0.0 -1.1 -1.4 0.3
RtCone_10_0 Y(SI) 1 0.0 -1.0 -1.4 0.5
RtCone_30_0 Z(AP) 3 -0.1 -2.5 -3.9 1.3
RtCone_30_0 Y(SI) 3 0.5 -2.5 -4.4 1.8
Average (N = 5) Y(SI) 1 0.3 -0.1 -0.4 0.2
Average (N = 5) Z(AP) 1 0.3 -0.1 -0.4 0.2
Average (N = 5) Y(SI) 3 0.6 -0.9 -1.7 0.7
Average (N = 5) Z(AP) 3 0.2 -0.8 -1.4 0.5
"""
ref_data = '/home/victor/Dropbox/Plan_Competition_Project/pyplanscoring/testdata/analytical_data.xlsx'
struc_dir = '/home/victor/Dropbox/Plan_Competition_Project/pyplanscoring/testdata/DVH-Analysis-Data-Etc/STRUCTURES'
dose_grid_dir = '/home/victor/Dropbox/Plan_Competition_Project/pyplanscoring/testdata/DVH-Analysis-Data-Etc/DOSE GRIDS'
st = 2
snames = ['Sphere_10_0', 'Sphere_30_0',
'Cylinder_10_0', 'Cylinder_30_0',
'RtCylinder_10_0', 'RtCylinder_30_0',
'Cone_10_0', 'Cone_30_0',
'RtCone_10_0', 'RtCone_30_0']
structure_path = [os.path.join(struc_dir, f + '.dcm') for f in snames]
structure_dict = dict(zip(snames, structure_path))
dose_files = [os.path.join(dose_grid_dir, f) for f in [
'Linear_AntPost_1mm_Aligned.dcm',
'Linear_AntPost_2mm_Aligned.dcm',
'Linear_AntPost_3mm_Aligned.dcm',
'Linear_SupInf_1mm_Aligned.dcm',
'Linear_SupInf_2mm_Aligned.dcm',
'Linear_SupInf_3mm_Aligned.dcm']]
# dose dict
dose_files_dict = {
'Z(AP)': {'1': dose_files[0], '2': dose_files[1], '3': dose_files[2]},
'Y(SI)': {'1': dose_files[3], '2': dose_files[4], '3': dose_files[5]}}
test_files = {}
for s_name in structure_dict:
grad_files = {}
for grad in dose_files_dict:
tick = str(int(int(re.findall(r'\d+', s_name)[0]) / 10))
grad_files[grad] = dose_files_dict[grad][tick]
test_files[s_name] = grad_files
result = OrderedDict()
for sname in snames:
struc_path = structure_dict[sname]
# set structure's object
struc = ScoringDicomParser(filename=struc_path)
structures = struc.GetStructures()
structure = structures[st]
# set end cap by 1/2 slice thickness
calculation_options['end_cap'] = structure['thickness'] / 2.0
# set up sampled structure
struc_teste = Structure(structure, calculation_options)
str_result = {}
test_data = test_files[sname]
for k in test_data:
# get dose
dose_file = test_data[k]
dicom_dose = ScoringDicomParser(filename=dose_file)
dhist, chist = struc_teste.calculate_dvh(dicom_dose)
dvh_data = struc_teste.get_dvh_data()
str_result[k] = dvh_data
result[sname] = str_result
dest = '/home/victor/Dropbox/Plan_Competition_Project/pyplanscoring/testdata/test3_ref_dvh.obj'
# save(an_data, dest)
an_data = load(dest)
teste = []
curve_compare = []
for s in result:
for g in result[s]:
adata = an_data[s][g]
calc_data = result[s][g]
cmp = CurveCompare(adata['dose_axis'], adata['data'], calc_data['dose_axis'], calc_data['data'])
curve_stats = cmp.stats_paper
curve_stats['Resolution (mm)'] = str(int(int(re.findall(r'\d+', s)[0]) / 10))
curve_stats['Gradient'] = g
curve_compare.append(cmp)
tmp = pd.DataFrame(curve_stats, index=[s])
teste.append(tmp)
df_final = pd.concat(teste)
mask0 = np.logical_and(df_final['Resolution (mm)'] == '1', df_final['Gradient'] == 'Y(SI)')
mask1 = np.logical_and(df_final['Resolution (mm)'] == '1', df_final['Gradient'] == 'Z(AP)')
mask2 = np.logical_and(df_final['Resolution (mm)'] == '3', df_final['Gradient'] == 'Y(SI)')
mask3 = np.logical_and(df_final['Resolution (mm)'] == '3', df_final['Gradient'] == 'Z(AP)')
# Row 0
r0 = pd.DataFrame(['Y(SI)'], index=['Average (N = 5)'], columns=['Gradient'])
r0['Resolution (mm)'] = '1'
ri = pd.DataFrame(df_final[mask0].mean().round(1)).T
ri.index = ['Average (N = 5)']
r0 = r0.join(ri)
# Row 1
r1 = pd.DataFrame(['Z(AP)'], index=['Average (N = 5)'], columns=['Gradient'])
r1['Resolution (mm)'] = '1'
ri = pd.DataFrame(df_final[mask1].mean().round(1)).T
ri.index = ['Average (N = 5)']
r1 = r1.join(ri)
# Row 2
r2 = pd.DataFrame(['Y(SI)'], index=['Average (N = 5)'], columns=['Gradient'])
r2['Resolution (mm)'] = '3'
ri = pd.DataFrame(df_final[mask2].mean().round(1)).T
ri.index = ['Average (N = 5)']
r2 = r2.join(ri)
# Row 3
r3 = pd.DataFrame(['Z(AP)'], index=['Average (N = 5)'], columns=['Gradient'])
r3['Resolution (mm)'] = '3'
ri = pd.DataFrame(df_final[mask3].mean().round(1)).T
ri.index = ['Average (N = 5)']
r3 = r3.join(ri)
result_df = pd.concat([df_final, r0, r1, r2, r3])
print(result_df)
dest = '/home/victor/Dropbox/Plan_Competition_Project/pyplanscoring/core/validation_paper'
result_df.to_excel(os.path.join(dest, 'test_3_table.xls'))
#
# result_df.to_excel('test_3_table.xls')
if plot_curves:
# for c in curve_compare:
# c.plot_results()
for grad in ['Z(AP)', 'Y(SI)']:
for s_key in result:
adata = an_data[s_key][grad]
calc_data = result[s_key][grad]
fig, ax = plt.subplots()
ax.plot(adata['dose_axis'], adata['data'], label='Analytical')
ax.plot(calc_data['dose_axis'], calc_data['data'], label='Software')
ax.legend(loc='best')
ax.set_xlabel('Dose (cGy)')
ax.set_ylabel('volume (cc)')
title = s_key + ' Dose Gradient ' + grad + '.png'
ax.set_title(title)
fig.savefig(os.path.join(dest, title), format='png', dpi=100)
plt.show()
return curve_compare, result_df
def test_eval_competition_data():
# TODO EVAL FILE ERRORS
root_path = r'I:\Plan_competition_data\Final Reports'
rs_file = r'C:\Users\vgalves\Dropbox\Plan_Competition_Project\Competition Package\DICOM Sets\RS.172.16.58.352.71.4.584747638204.208628.20160204185543.dcm'
obj = EvalCompetition(root_path, rs_file, constrains, scores)
obj.set_data()
res = obj.calc_scores()
data = obj.comp_data
sc = [i for i in res if isinstance(i, tuple)]
data_name = data.set_index(0)
data_name = data_name.groupby(data_name.index).first()
df = pd.DataFrame(sc).set_index(0)
plan_iq = data_name.ix[df.index]['plan_iq_scores']
comp = pd.concat([plan_iq, df], axis=1)
comp['delta'] = comp[1] - comp['plan_iq_scores']
comp = comp.rename(columns={1: 'py_score'})
comp.to_excel('Plan_IQ_versus_Python_BODY_DMAX.xls')
def get_competition_data(root_path):
files = [os.path.join(root, name) for root, dirs, files in os.walk(root_path) for name in files if
name.endswith(('.dcm', '.DCM'))]
report_files = [os.path.join(root, name) for root, dirs, files in os.walk(root_path) for name in files if
name.endswith(('.pdf', '.PDF'))]
filtered_files = OrderedDict()
for f in files:
try:
obj = ScoringDicomParser(filename=f)
rt_type = obj.GetSOPClassUID()
if rt_type == 'rtdose':
tmp = f.split(os.path.sep)[-2].split()
name = tmp[0].split('-')[0]
participant_data = [name, rt_type]
filtered_files[f] = participant_data
if rt_type == 'rtplan':
tmp = f.split(os.path.sep)[-2].split()
name = tmp[0].split('-')[0]
participant_data = [name, rt_type]
filtered_files[f] = participant_data
except:
logger.exception('Error in file %s' % f)
data = pd.DataFrame(filtered_files).T
plan_iq_scores = []
for f in report_files:
p, r = os.path.split(f)
s = re.findall('\d+\.\d+', r)
plan_iq_scores.append(s * 2)
plan_iq_scores = np.ravel(plan_iq_scores).astype(float)
data['plan_iq_scores'] = plan_iq_scores
return data.reset_index()
def batch_call_dvh(root_path, rs_file, clean_files=False):
# manager = plt.get_current_fig_manager()
# manager.window.showMaximized()
data = get_competition_data(root_path)
if clean_files:
dvh_files = [os.path.join(root, name) for root, dirs, files in os.walk(root_path) for name in files if
name.endswith('.dvh')]
for dv in dvh_files:
os.remove(dv)
mask = data[1] == 'rtdose'
rd_files = data['index'][mask].values
names = data[0][mask].values
rtss = ScoringDicomParser(filename=rs_file)
structures = rtss.GetStructures()
i = 0
for f, n in zip(rd_files, names):
p = os.path.splitext(f)
out_file = p[0] + '.dvh'
dest, df = os.path.split(f)
if not os.path.exists(out_file):
print('Iteration: %i' % i)
print('processing file: %s' % f)
calcdvhs = calc_dvhs(n, rs_file, f, out_file=out_file)
i += 1
print('processing file done %s' % f)
fig, ax = plt.subplots()
fig.set_figheight(12)
fig.set_figwidth(20)
for key, structure in structures.items():
sname = structure['name']
ax.plot(calcdvhs[sname]['data'] / calcdvhs[sname]['data'][0] * 100,
label=sname, linewidth=2.0, color=np.array(structure['color'], dtype=float) / 255)
ax.legend(loc=7, borderaxespad=-5)
ax.set_ylabel('Vol (%)')
ax.set_xlabel('Dose (cGy)')
ax.set_title(n + ':' + df)
fig_name = os.path.join(dest, n + '_RD_calc_DVH.png')
fig.savefig(fig_name, format='png', dpi=100)
plt.close('all')
class EvalCompetition(object):
def __init__(self, root_path, rs_file, constrains, scores):
self.root_path = root_path
self.rs_file = rs_file
self.constrains = constrains
self.scores = scores
self.comp_data = None
self.dvh_files = []
self.results = []
self.comp_data = get_competition_data(root_path)
def save_reports(self):
pass
def save_dvh_all(self, clean_files=False, end_cap=False, dicom_dvh=False):
# TODO implement saving TPS information, constrain and scoring report on dvh file encapsulated on Participant Class
data = get_competition_data(self.root_path)
self.comp_data = data
if clean_files:
dvh_files = [os.path.join(root, name) for root, dirs, files in os.walk(self.root_path) for name in files if
name.endswith('.dvh')]
for dv in dvh_files:
os.remove(dv)
mask = data[1] == 'rtdose'
rd_files = data['index'][mask].values
names = data[0][mask].values
rtss = ScoringDicomParser(filename=self.rs_file)
structures = rtss.GetStructures()
i = 0
for f, n in zip(rd_files, names):
p = os.path.splitext(f)
out_file = p[0] + '.dvh'
dest, df = os.path.split(f)
if not os.path.exists(out_file):
print('Iteration: %i' % i)
print('processing file: %s' % f)
if dicom_dvh:
try:
calcdvhs = save_dicom_dvhs(n, self.rs_file, f, out_file=out_file)
except:
rt_dose = ScoringDicomParser(filename=f)
k = rt_dose.get_tps_data()
txt = 'No DVH in file %s \n TPS info:' % f
txt += ', '.join("{!s}={!r}".format(key, val) for (key, val) in k.items())
logger.debug(txt)
else:
calcdvhs = calc_dvhs_upsampled(n, self.rs_file, f, self.scores.keys(), out_file=out_file,
end_cap=end_cap)
i += 1
print('processing file done %s' % f)
def set_data(self):
self.comp_data = get_competition_data(self.root_path)
self.dvh_files = [os.path.join(root, name) for root, dirs, files in os.walk(self.root_path) for name in files if
name.endswith('.dvh')]
def calc_scores(self):
res = Parallel(n_jobs=-1, verbose=11)(
delayed(self.get_score)(dvh_file) for dvh_file in self.dvh_files)
self.results = res
return res
def get_score(self, dvh_file):
rd_file, rp_file, name = self.get_dicom_data(self.comp_data, dvh_file)
try:
obj = Scoring(rd_file, self.rs_file, rp_file, self.constrains, self.scores)
obj.set_dvh_data(dvh_file)
print('Score:', name, obj.get_total_score())
return name, obj.get_total_score()
except:
logger.exception('Error in file: %s' % dvh_file)
try:
obj = Scoring(rd_file, self.rs_file, rp_file, self.constrains, self.scores)
obj.set_dicom_dvh_data()
print('Score:', name, obj.get_total_score())
return name, obj.get_total_score()
except:
logger.exception('No DVH data in file %s' % rd_file)
return rd_file
@staticmethod
def get_dicom_data(data, dvh_file):
if __name__ == '__main__':
try:
dvh = load(dvh_file)
name = dvh['participant']
p_files = data[data[0] == name].set_index(1)
rd_file = p_files.ix['rtdose']['index']
rp_file = p_files.ix['rtplan']['index']
return rd_file, rp_file, name
except:
logger.exception('error on file %s' % dvh_file)
# TODO wrap DICOM-RT data to eval scores
def read_planiq_dvh(f):
"""
Reads plan IQ exported txt DVH data
:param f: path to file txt
:return: Pandas Dataframe with DVH data in cGy and vol cc
"""
with open(f, 'r') as io:
txt = io.readlines()
struc_header = [t.strip() for t in txt[0].split('\t') if | |
from neuron import h
import numpy as np
from .cell import Cell
#from .. import synapses
from ..util import nstomho
from ..util import Params
from .. import data
__all__ = ['TStellate', 'TStellateRothman', 'TStellateNav11']
class TStellate(Cell):
type = 'tstellate'
@classmethod
def create(cls, model='RM03', **kwds):
if model == 'RM03': # original Rothman-Manis 2003, 22C, point cell, extendable
return TStellateRothman(**kwds)
elif model == 'Nav11': # Xie-Manis, 2013, 37C, pointe cell, extendable
return TStellateNav11(**kwds)
else:
raise ValueError ('TStellate type %s is unknown', type)
def make_psd(self, terminal, psd_type, **kwds):
"""
Connect a presynaptic terminal to one post section at the specified location, with the fraction
of the "standard" conductance determined by gbar.
The default condition is to try to pass the default unit test (loc=0.5)
Scaling is corrected by initial release probability now.
Parameters
----------
terminal : Presynaptic terminal (NEURON object)
psd_type : either simple or multisite PSD for bushy cell
kwds: dict of options.
Available options:
postsize : expect a list consisting of [sectionno, location (float)]
AMPAScale : float to scale the ampa currents
"""
if 'postsite' in kwds: # use a defined location instead of the default (soma(0.5)
postsite = kwds['postsite']
loc = postsite[1] # where on the section?
uname = 'sections[%d]' % postsite[0] # make a name to look up the neuron section object
post_sec = self.hr.get_section(uname) # Tell us where to put the synapse.
else:
loc = 0.5
post_sec = self.soma
if psd_type == 'simple':
return self.make_exp2_psd(post_sec, terminal, loc=loc)
elif psd_type == 'multisite':
if terminal.cell.type == 'sgc':
# Max conductances for the glu mechanisms are calibrated by
# running `synapses/tests/test_psd.py`. The test should fail
# if these values are incorrect
self.AMPAR_gmax = data.get('sgc_synapse', species=self.species,
post_type=self.type, field='AMPAR_gmax')*1e3
self.NMDAR_gmax = data.get('sgc_synapse', species=self.species,
post_type=self.type, field='NMDAR_gmax')*1e3
self.Pr = data.get('sgc_synapse', species=self.species,
post_type=self.type, field='Pr')
# adjust gmax to correct for initial Pr
self.AMPAR_gmax = self.AMPAR_gmax/self.Pr
self.NMDAR_gmax = self.NMDAR_gmax/self.Pr
# old values:
# AMPA_gmax = 0.22479596944138733*1e3 # factor of 1e3 scales to pS (.mod mechanisms) from nS.
# NMDA_gmax = 0.12281291946623739*1e3
if 'AMPAScale' in kwds:
self.AMPAR_gmax = self.AMPAR_gmax * kwds['AMPAScale'] # allow scaling of AMPA conductances
if 'NMDAScale' in kwds:
self.NMDAR_gmax = self.NMDAR_gmax*kwds['NMDAScale']
return self.make_glu_psd(post_sec, terminal, self.AMPAR_gmax, self.NMDAR_gmax, loc=loc)
elif terminal.cell.type == 'dstellate':
return self.make_gly_psd(post_sec, terminal, type='glyfast', loc=loc)
elif terminal.cell.type == 'tuberculoventral':
return self.make_gly_psd(post_sec, terminal, type='glyfast', loc=loc)
else:
raise TypeError("Cannot make PSD for %s => %s" %
(terminal.cell.type, self.type))
else:
raise ValueError("Unsupported psd type %s" % psd_type)
class TStellateRothman(TStellate):
"""
VCN T-stellate base model.
Rothman and Manis, 2003abc (Type I-c, Type I-t)
"""
def __init__(self, morphology=None, decorator=None, nach=None,
ttx=False,
species='guineapig', modelType=None, debug=False):
"""
Initialize a planar stellate (T-stellate) cell, using the default parameters for guinea pig from
R&M2003, as a type I cell.
Modifications to the cell can be made by calling methods below. These include:
Converting to a type IA model (add transient K current) (species: guineapig-TypeIA).
Changing "species" to mouse or cat (scales conductances)
Parameters
----------
morphology : string (default: None)
a file name to read the cell morphology from. If a valid file is found, a cell is constructed
as a cable model from the hoc file.
If None (default), the only a point model is made, exactly according to RM03.
decorator : Python function (default: None)
decorator is a function that "decorates" the morphology with ion channels according
to a set of rules.
If None, a default set of channels aer inserted into the first soma section, and the
rest of the structure is "bare".
nach : string (default: None)
nach selects the type of sodium channel that will be used in the model. A channel mechanism
by that name must exist. The default is 'nacn', from R&M2003.
ttx : Boolean (default: False)
If ttx is True, then the sodium channel conductance is set to 0 everywhere in the cell.
Currently, this is not implemented.
species: string (default 'guineapig')
species defines the channel density that will be inserted for different models. Note that
if a decorator function is specified, this argument is ignored.
modelType: string (default: None)
modelType specifies the type of the model that will be used (e.g., "I-c", "I-t").
modelType is passed to the decorator, or to species_scaling to adjust point models.
debug: boolean (default: False)
debug is a boolean flag. When set, there will be multiple printouts of progress and parameters.
Returns
-------
Nothing
"""
super(TStellateRothman, self).__init__()
self.i_test_range={'pulse': (-0.15, 0.15, 0.01)}
if modelType == None:
modelType = 'I-c'
if nach == None and species == 'guineapig':
nach = 'nacn'
if nach == None and species == 'mouse':
nach = 'nacn'
self.i_test_range={'pulse': (-1.0, 1.0, 0.05)}
self.status = {'soma': True, 'axon': False, 'dendrites': False, 'pumps': False,
'na': nach, 'species': species, 'modelType': modelType, 'ttx': ttx, 'name': 'TStellate',
'morphology': morphology, 'decorator': decorator, 'temperature': None}
self.vrange = [-70., -55.]
if morphology is None:
"""
instantiate a basic soma-only ("point") model
"""
print( "<< TStellate model: Creating point cell, type={:s} >>".format(modelType))
soma = h.Section(name="TStellate_Soma_%x" % id(self)) # one compartment of about 29000 um2
soma.nseg = 1
self.add_section(soma, 'soma')
else:
"""
instantiate a structured model with the morphology as specified by
the morphology file
"""
print "<< TStellate: Creating cell with morphology = %s>>" % morphology
self.set_morphology(morphology_file=morphology)
# decorate the morphology with ion channels
if decorator is None: # basic model, only on the soma
self.mechanisms = ['kht', 'ka', 'ihvcn', 'leak', nach]
for mech in self.mechanisms:
self.soma.insert(mech)
self.soma.ek = self.e_k
self.soma.ena = self.e_na
self.soma().ihvcn.eh = self.e_h
self.soma().leak.erev = self.e_leak
self.species_scaling(silent=True, species=species, modelType=modelType) # set the default type II cell parameters
else: # decorate according to a defined set of rules on all cell compartments
self.decorate()
self.save_all_mechs() # save all mechanisms inserted, location and gbar values...
self.get_mechs(self.soma)
if debug:
print "<< T-stellate: JSR Stellate Type 1 cell model created >>"
def get_cellpars(self, dataset, species='guineapig', celltype='I-c'):
cellcap = data.get(dataset, species=species, cell_type=celltype,
field='soma_Cap')
chtype = data.get(dataset, species=species, cell_type=celltype,
field='soma_na_type')
pars = Params(cap=cellcap, natype=chtype)
for g in ['soma_na_gbar', 'soma_kht_gbar', 'soma_ka_gbar',
'soma_ih_gbar', 'soma_ih_eh',
'soma_leak_gbar', 'soma_leak_erev',
'soma_e_k', 'soma_e_na']:
pars.additem(g, data.get(dataset, species=species, cell_type=celltype,
field=g))
return pars
def species_scaling(self, species='guineapig', modelType='I-c', silent=True):
"""
Adjust all of the conductances and the cell size according to the species requested.
Used ONLY for point models.
This scaling routine also sets the temperature for the model to a default value. Some models
can be run at multiple temperatures, and so a default from one of the temperatures is used.
The calling cell.set_temperature(newtemp) will change the conductances and reinitialize
the cell to the new temperature settings.
Parameters
----------
species : string (default: 'guineapig')
name of the species to use for scaling the conductances in the base point model
Must be one of mouse, cat, guineapig
modelType: string (default: 'I-c')
definition of model type from RM03 models, type I-c or type I-t
silent : boolean (default: True)
run silently (True) or verbosely (False)
"""
soma = self.soma
if modelType == 'I-c':
celltype = 'tstellate'
elif modelType == 'I-t':
celltype = 'tstellate-t'
else:
raise ValueError('model type not recognized')
if species == 'mouse': # and modelType == 'I-c':
# use conductance levels from Cao et al., J. Neurophys., 2007.
# model description in Xie and Manis 2013. Note that
# conductances were not scaled for temperature (rates were)
# so here we reset the default Q10's for conductance (g) to 1.0
print ' Setting Conductances for mouse I-c Tstellate cell, Xie and Manis, 2013'
self.c_m = 0.9 # default in units of F/cm^2
dataset = 'XM13_channels'
self.vrange = [-75., -55.]
self.set_soma_size_from_Cm(25.0)
self._valid_temperatures = (34.,)
if self.status['temperature'] is None:
self.set_temperature(34.)
pars = self.get_cellpars(dataset, species=species, celltype=celltype)
# pars.show()
self.set_soma_size_from_Cm(pars.cap)
self.status['na'] = pars.natype
self.adjust_na_chans(soma, gbar=pars.soma_na_gbar, sf=1.0)
soma().kht.gbar = nstomho(pars.soma_kht_gbar, self.somaarea)
soma().ka.gbar = nstomho(pars.soma_ka_gbar, self.somaarea)
soma().ihvcn.gbar = nstomho(pars.soma_ih_gbar, self.somaarea)
soma().ihvcn.eh = pars.soma_ih_eh # <NAME>, 2006
soma().leak.gbar = nstomho(pars.soma_leak_gbar, self.somaarea)
soma().leak.erev = pars.soma_leak_erev
self.e_k = pars.soma_e_k
self.e_na = pars.soma_e_na
soma.ena = | |
#!/usr/bin/env python
"""Client actions related to searching files and directories."""
import functools
import stat
import logging
from grr.client import actions
from grr.client import vfs
from grr.lib import utils
from grr.lib.rdfvalues import client as rdf_client
from grr.lib.rdfvalues import flows as rdf_flows
class Find(actions.IteratedAction):
"""Recurses through a directory returning files which match conditions."""
in_rdfvalue = rdf_client.FindSpec
out_rdfvalues = [rdf_client.FindSpec]
# If this is true we cross filesystem boundaries.
# This defaults to true so you can see mountpoints with ListDirectory.
# It defaults to false in the actual find flow, and the find proto.
cross_devs = True
# The filesystem we are limiting ourselves to, if cross_devs is false.
filesystem_id = None
def ListDirectory(self, pathspec, state, depth=0):
"""A Recursive generator of files."""
# Limit recursion depth
if depth >= self.request.max_depth:
return
try:
fd = vfs.VFSOpen(pathspec, progress_callback=self.Progress)
files = fd.ListFiles()
except (IOError, OSError) as e:
if depth == 0:
# We failed to open the directory the server asked for because dir
# doesn't exist or some other reason. So we set status and return
# back to the caller ending the Iterator.
self.SetStatus(rdf_flows.GrrStatus.ReturnedStatus.IOERROR, e)
else:
# Can't open the directory we're searching, ignore the directory.
logging.info("Find failed to ListDirectory for %s. Err: %s", pathspec,
e)
return
# If we are not supposed to cross devices, and don't know yet
# which device we are on, we need to find out.
if not self.request.cross_devs and self.filesystem_id is None:
dir_stat = fd.Stat()
self.filesystem_id = dir_stat.st_dev
# Recover the start point for this directory from the state dict so we can
# resume.
start = state.get(pathspec.CollapsePath(), 0)
for i, file_stat in enumerate(files):
# Skip the files we already did before
if i < start:
continue
if stat.S_ISDIR(file_stat.st_mode):
# Do not traverse directories in a different filesystem.
if self.request.cross_devs or self.filesystem_id == file_stat.st_dev:
for child_stat in self.ListDirectory(file_stat.pathspec, state,
depth + 1):
yield child_stat
state[pathspec.CollapsePath()] = i + 1
yield file_stat
# Now remove this from the state dict to prevent it from getting too large
try:
del state[pathspec.CollapsePath()]
except KeyError:
pass
def TestFileContent(self, file_stat):
"""Checks the file for the presence of the regular expression."""
# Content regex check
try:
data = ""
with vfs.VFSOpen(file_stat.pathspec,
progress_callback=self.Progress) as fd:
# Only read this much data from the file.
while fd.Tell() < self.request.max_data:
data_read = fd.read(1024000)
if not data_read:
break
data += data_read
# Got it.
if self.request.data_regex.Search(data):
return True
# Keep a bit of context from the last buffer to ensure we dont miss a
# match broken by buffer. We do not expect regex's to match something
# larger than about 100 chars.
data = data[-100:]
except (IOError, KeyError):
pass
return False
def BuildChecks(self, request):
"""Parses request and returns a list of filter callables.
Each callable will be called with the StatEntry and returns True if the
entry should be suppressed.
Args:
request: A FindSpec that describes the search.
Returns:
a list of callables which return True if the file is to be suppressed.
"""
result = []
if request.HasField("start_time") or request.HasField("end_time"):
def FilterTimestamp(file_stat, request=request):
return file_stat.HasField("st_mtime") and (
file_stat.st_mtime < request.start_time or
file_stat.st_mtime > request.end_time)
result.append(FilterTimestamp)
if request.HasField("min_file_size") or request.HasField("max_file_size"):
def FilterSize(file_stat, request=request):
return file_stat.HasField("st_size") and (
file_stat.st_size < request.min_file_size or
file_stat.st_size > request.max_file_size)
result.append(FilterSize)
if request.HasField("perm_mode"):
def FilterPerms(file_stat, request=request):
return (file_stat.st_mode & request.perm_mask) != request.perm_mode
result.append(FilterPerms)
if request.HasField("uid"):
def FilterUID(file_stat, request=request):
return file_stat.st_uid != request.uid
result.append(FilterUID)
if request.HasField("gid"):
def FilterGID(file_stat, request=request):
return file_stat.st_gid != request.gid
result.append(FilterGID)
if request.HasField("path_regex"):
regex = request.path_regex
def FilterPath(file_stat, regex=regex):
"""Suppress any filename not matching the regular expression."""
return not regex.Search(file_stat.pathspec.Basename())
result.append(FilterPath)
if request.HasField("data_regex"):
def FilterData(file_stat, **_):
"""Suppress files that do not match the content."""
return not self.TestFileContent(file_stat)
result.append(FilterData)
return result
def Iterate(self, request, client_state):
"""Restores its way through the directory using an Iterator."""
self.request = request
filters = self.BuildChecks(request)
limit = request.iterator.number
# TODO(user): What is a reasonable measure of work here?
for count, f in enumerate(self.ListDirectory(request.pathspec,
client_state)):
self.Progress()
# Ignore this file if any of the checks fail.
if not any((check(f) for check in filters)):
self.SendReply(rdf_client.FindSpec(hit=f))
# We only check a limited number of files in each iteration. This might
# result in returning an empty response - but the iterator is not yet
# complete. Flows must check the state of the iterator explicitly.
if count >= limit - 1:
logging.debug("Processed %s entries, quitting", count)
return
# End this iterator
request.iterator.state = rdf_client.Iterator.State.FINISHED
class Grep(actions.ActionPlugin):
"""Search a file for a pattern."""
in_rdfvalue = rdf_client.GrepSpec
out_rdfvalues = [rdf_client.BufferReference]
def FindRegex(self, regex, data):
"""Search the data for a hit."""
for match in regex.FindIter(data):
yield (match.start(), match.end())
def FindLiteral(self, pattern, data):
"""Search the data for a hit."""
utils.XorByteArray(pattern, self.xor_in_key)
offset = 0
while 1:
# We assume here that data.find does not make a copy of pattern.
offset = data.find(pattern, offset)
if offset < 0:
break
yield (offset, offset + len(pattern))
offset += 1
utils.XorByteArray(pattern, self.xor_in_key)
BUFF_SIZE = 1024 * 1024 * 10
ENVELOPE_SIZE = 1000
HIT_LIMIT = 10000
def Run(self, args):
"""Search the file for the pattern.
This implements the grep algorithm used to scan files. It reads
the data in chunks of BUFF_SIZE (10 MB currently) and can use
different functions to search for matching patterns. In every
step, a buffer that is a bit bigger than the block size is used in
order to return all the requested results. Specifically, a
preamble is used in order to not miss any patterns that start in
one block of data and end in the next and also a postscript buffer
is kept such that the algorithm can return bytes trailing the
pattern even if the pattern is at the end of one block.
One block:
-----------------------------
| Pre | Data | Post |
-----------------------------
Searching the pattern is done here:
<------------------->
The following block is constructed like this:
-----------------------------
| Pre | Data | Post |
-----------------------------
|
-----------------------------
| Pre | Data | Post |
-----------------------------
The preamble is filled from Data so every hit that happens to fall
entirely into the preamble has to be discarded since it has
already been discovered in the step before.
Grepping for memory
If this action is used to grep the memory of a client machine
using one of the GRR memory acquisition drivers, we have to be
very careful not to have any hits in the GRR process memory space
itself. Therefore, if the input is a literal, it is XOR encoded
and only visible in memory when the pattern is matched. This is
done using bytearrays which guarantees in place updates and no
leaking patterns. Also the returned data is encoded using a
different XOR 'key'.
This should guarantee that there are no hits when the pattern is
not present in memory. However, since the data will be copied to
the preamble and the postscript, a single pattern might in some
cases produce multiple hits.
Args:
args: A protobuf describing the grep request.
Raises:
RuntimeError: No search pattern has been given in the request.
"""
fd = vfs.VFSOpen(args.target, progress_callback=self.Progress)
fd.Seek(args.start_offset)
base_offset = args.start_offset
self.xor_in_key = args.xor_in_key
self.xor_out_key = args.xor_out_key
if args.regex:
find_func = functools.partial(self.FindRegex, args.regex)
elif args.literal:
find_func = functools.partial(self.FindLiteral,
bytearray(utils.SmartStr(args.literal)))
else:
raise RuntimeError("Grep needs a regex or a literal.")
preamble_size = 0
postscript_size = 0
hits = 0
data = ""
while fd.Tell() < args.start_offset + args.length:
# Base size to read is at most the buffer size.
to_read = min(args.length, self.BUFF_SIZE,
args.start_offset + args.length - fd.Tell())
# Read some more data for the snippet.
to_read += self.ENVELOPE_SIZE - postscript_size
read_data = fd.Read(to_read)
data = data[-postscript_size - self.ENVELOPE_SIZE:] + read_data
postscript_size = max(0, self.ENVELOPE_SIZE - (to_read - len(read_data)))
data_size = len(data) - preamble_size - postscript_size
if data_size == 0 and postscript_size == 0:
break
for (start, end) in find_func(data):
# Ignore hits in the preamble.
if end <= preamble_size:
continue
# Ignore hits in the postscript.
if end > preamble_size | |
except Exception as e:
msg = (_('Failed to create share %(share_name)s: %(e)s') %
{'share_name': share_name, 'e': six.text_type(e)})
LOG.exception(msg)
raise exception.ShareBackendException(msg)
try:
result = self._client.getfshare(
protocol, share_name,
fpg=fpg, vfs=vfs, fstore=fstore)
LOG.debug("getfshare result=%s", result)
except Exception as e:
msg = (_('Failed to get fshare %(share_name)s after creating it: '
'%(e)s') % {'share_name': share_name,
'e': six.text_type(e)})
LOG.exception(msg)
raise exception.ShareBackendException(msg)
if result['total'] != 1:
msg = (_('Failed to get fshare %(share_name)s after creating it. '
'Expected to get 1 fshare. Got %(total)s.') %
{'share_name': share_name, 'total': result['total']})
LOG.error(msg)
raise exception.ShareBackendException(msg)
if protocol == 'nfs':
return result['members'][0]['sharePath']
else:
return result['members'][0]['shareName']
def create_share_from_snapshot(self, share_id, share_proto, extra_specs,
orig_project_id, orig_share_id,
snapshot_id, fpg, vfs,
comment=OPEN_STACK_MANILA):
protocol = self.ensure_supported_protocol(share_proto)
snapshot_tag = self.ensure_prefix(snapshot_id)
orig_share_name = self.ensure_prefix(orig_share_id)
snapshot = self._find_fsnap(orig_project_id,
orig_share_name,
protocol,
snapshot_tag,
fpg,
vfs)
if not snapshot:
msg = (_('Failed to create share from snapshot for '
'FPG/VFS/tag %(fpg)s/%(vfs)s/%(tag)s. '
'Snapshot not found.') %
{
'fpg': fpg,
'vfs': vfs,
'tag': snapshot_tag})
LOG.error(msg)
raise exception.ShareBackendException(msg)
fstore = snapshot['fstoreName']
share_name = self.ensure_prefix(share_id)
if fstore == orig_share_name:
# No subdir for original share created with fstore_per_share
sharedir = '.snapshot/%s' % snapshot['snapName']
else:
sharedir = '.snapshot/%s/%s' % (snapshot['snapName'],
orig_share_name)
return self.create_share(
orig_project_id,
share_name,
protocol,
extra_specs,
fpg,
vfs,
fstore=fstore,
sharedir=sharedir,
readonly=True,
comment=comment,
)
def delete_share(self, project_id, share_id, share_proto, fpg, vfs):
protocol = self.ensure_supported_protocol(share_proto)
share_name = self.ensure_prefix(share_id)
fstore = self._find_fstore(project_id, share_name, protocol, fpg, vfs,
allow_cross_protocol=True)
if not fstore:
# Share does not exist.
return
try:
self._client.removefshare(protocol, vfs, share_name,
fpg=fpg, fstore=fstore)
except Exception as e:
msg = (_('Failed to remove share %(share_name)s: %(e)s') %
{'share_name': share_name, 'e': six.text_type(e)})
LOG.exception(msg)
raise exception.ShareBackendException(message=msg)
if fstore == share_name:
try:
self._client.removefstore(vfs, fstore, fpg=fpg)
except Exception as e:
msg = (_('Failed to remove fstore %(fstore)s: %(e)s') %
{'fstore': fstore, 'e': six.text_type(e)})
LOG.exception(msg)
raise exception.ShareBackendException(message=msg)
def get_vfs_name(self, fpg):
return self.get_vfs(fpg)['vfsname']
def get_vfs(self, fpg, vfs=None):
"""Get the VFS or raise an exception."""
try:
result = self._client.getvfs(fpg=fpg, vfs=vfs)
except Exception as e:
msg = (_('Exception during getvfs %(vfs)s: %(e)s') %
{'vfs': vfs, 'e': six.text_type(e)})
LOG.exception(msg)
raise exception.ShareBackendException(msg)
if result['total'] != 1:
error_msg = result.get('message')
if error_msg:
message = (_('Error while validating FPG/VFS '
'(%(fpg)s/%(vfs)s): %(msg)s') %
{'fpg': fpg, 'vfs': vfs, 'msg': error_msg})
LOG.error(message)
raise exception.ShareBackendException(message)
else:
message = (_('Error while validating FPG/VFS '
'(%(fpg)s/%(vfs)s): Expected 1, '
'got %(total)s.') %
{'fpg': fpg, 'vfs': vfs,
'total': result['total']})
LOG.error(message)
raise exception.ShareBackendException(message)
return result['members'][0]
def create_snapshot(self, orig_project_id, orig_share_id, orig_share_proto,
snapshot_id, fpg, vfs):
"""Creates a snapshot of a share."""
fshare = self._find_fshare(orig_project_id,
orig_share_id,
orig_share_proto,
fpg,
vfs)
if not fshare:
msg = (_('Failed to create snapshot for FPG/VFS/fshare '
'%(fpg)s/%(vfs)s/%(fshare)s: Failed to find fshare.') %
{'fpg': fpg, 'vfs': vfs, 'fshare': orig_share_id})
LOG.error(msg)
raise exception.ShareBackendException(msg)
sharedir = fshare.get('shareDir')
if sharedir and sharedir.startswith('.snapshot'):
msg = (_('Failed to create snapshot for FPG/VFS/fshare '
'%(fpg)s/%(vfs)s/%(fshare)s: Share is a read-only '
'share of an existing snapshot.') %
{'fpg': fpg, 'vfs': vfs, 'fshare': orig_share_id})
LOG.error(msg)
raise exception.ShareBackendException(msg)
fstore = fshare.get('fstoreName')
snapshot_tag = self.ensure_prefix(snapshot_id)
try:
result = self._client.createfsnap(
vfs, fstore, snapshot_tag, fpg=fpg)
LOG.debug("createfsnap result=%s", result)
except Exception as e:
msg = (_('Failed to create snapshot for FPG/VFS/fstore '
'%(fpg)s/%(vfs)s/%(fstore)s: %(e)s') %
{'fpg': fpg, 'vfs': vfs, 'fstore': fstore,
'e': six.text_type(e)})
LOG.exception(msg)
raise exception.ShareBackendException(msg)
def delete_snapshot(self, orig_project_id, orig_share_id, orig_proto,
snapshot_id, fpg, vfs):
"""Deletes a snapshot of a share."""
snapshot_tag = self.ensure_prefix(snapshot_id)
snapshot = self._find_fsnap(orig_project_id, orig_share_id, orig_proto,
snapshot_tag, fpg, vfs)
if not snapshot:
return
fstore = snapshot.get('fstoreName')
for protocol in ('nfs', 'smb'):
try:
shares = self._client.getfshare(protocol,
fpg=fpg,
vfs=vfs,
fstore=fstore)
except Exception as e:
msg = (_('Unexpected exception while getting share list. '
'Cannot delete snapshot without checking for '
'dependent shares first: %s') % six.text_type(e))
LOG.exception(msg)
raise exception.ShareBackendException(msg)
for share in shares['members']:
if protocol == 'nfs':
path = share['sharePath'][1:].split('/')
dot_snapshot_index = 3
else:
if share['shareDir']:
path = share['shareDir'].split('/')
else:
path = None
dot_snapshot_index = 0
snapshot_index = dot_snapshot_index + 1
if path and len(path) > snapshot_index:
if (path[dot_snapshot_index] == '.snapshot' and
path[snapshot_index].endswith(snapshot_tag)):
msg = (_('Cannot delete snapshot because it has a '
'dependent share.'))
raise exception.Invalid(msg)
snapname = snapshot['snapName']
try:
result = self._client.removefsnap(
vfs, fstore, snapname=snapname, fpg=fpg)
LOG.debug("removefsnap result=%s", result)
except Exception as e:
msg = (_('Failed to delete snapshot for FPG/VFS/fstore/snapshot '
'%(fpg)s/%(vfs)s/%(fstore)s/%(snapname)s: %(e)s') %
{
'fpg': fpg,
'vfs': vfs,
'fstore': fstore,
'snapname': snapname,
'e': six.text_type(e)})
LOG.exception(msg)
raise exception.ShareBackendException(msg)
# Try to reclaim the space
try:
self._client.startfsnapclean(fpg, reclaimStrategy='maxspeed')
except Exception as e:
# Remove already happened so only log this.
msg = (_('Unexpected exception calling startfsnapclean for FPG '
'%(fpg)s: %(e)s') % {'fpg': fpg, 'e': six.text_type(e)})
LOG.exception(msg)
@staticmethod
def validate_access_type(protocol, access_type):
if access_type not in ('ip', 'user'):
msg = (_("Invalid access type. Expected 'ip' or 'user'. "
"Actual '%s'.") % access_type)
LOG.error(msg)
raise exception.InvalidInput(msg)
if protocol == 'nfs' and access_type != 'ip':
msg = (_("Invalid NFS access type. HP 3PAR NFS supports 'ip'. "
"Actual '%s'.") % access_type)
LOG.error(msg)
raise exception.HP3ParInvalid(msg)
return protocol
def _change_access(self, plus_or_minus, project_id, share_id, share_proto,
access_type, access_to, fpg, vfs):
"""Allow or deny access to a share.
Plus_or_minus character indicates add to allow list (+) or remove from
allow list (-).
"""
protocol = self.ensure_supported_protocol(share_proto)
self.validate_access_type(protocol, access_type)
share_name = self.ensure_prefix(share_id)
fstore = self._find_fstore(project_id, share_id, protocol, fpg, vfs,
allow_cross_protocol=True)
try:
if protocol == 'nfs':
result = self._client.setfshare(
protocol, vfs, share_name, fpg=fpg, fstore=fstore,
clientip='%s%s' % (plus_or_minus, access_to))
elif protocol == 'smb':
if access_type == 'ip':
result = self._client.setfshare(
protocol, vfs, share_name, fpg=fpg, fstore=fstore,
allowip='%s%s' % (plus_or_minus, access_to))
else:
access_str = 'fullcontrol'
perm = '%s%s:%s' % (plus_or_minus, access_to, access_str)
result = self._client.setfshare(protocol, vfs, share_name,
fpg=fpg, fstore=fstore,
allowperm=perm)
else:
msg = (_("Unexpected error: After ensure_supported_protocol "
"only 'nfs' or 'smb' strings are allowed, but found: "
"%s.") % protocol)
raise exception.HP3ParUnexpectedError(msg)
LOG.debug("setfshare result=%s", result)
except Exception as e:
msg = (_('Failed to change (%(change)s) access to FPG/share '
'%(fpg)s/%(share)s to %(type)s %(to)s): %(e)s') %
{'change': plus_or_minus, 'fpg': fpg, 'share': share_name,
'type': access_type, 'to': access_to,
'e': six.text_type(e)})
LOG.exception(msg)
raise exception.ShareBackendException(msg)
def _find_fstore(self, project_id, share_id, share_proto, fpg, vfs,
allow_cross_protocol=False):
share = self._find_fshare(project_id, share_id, share_proto, fpg, vfs)
if not share and allow_cross_protocol:
share = self._find_fshare(project_id,
share_id,
self.other_protocol(share_proto),
fpg,
vfs)
return share.get('fstoreName') if share else None
def _find_fshare(self, project_id, share_id, share_proto, fpg, vfs):
protocol = self.ensure_supported_protocol(share_proto)
share_name = self.ensure_prefix(share_id)
project_fstore = self.ensure_prefix(project_id, share_proto)
search_order = [
{'fpg': fpg, 'vfs': vfs, 'fstore': project_fstore},
{'fpg': fpg, 'vfs': vfs, 'fstore': share_name},
{'fpg': fpg},
{}
]
try:
for search_params in search_order:
result = self._client.getfshare(protocol, share_name,
**search_params)
shares = result.get('members', [])
if len(shares) == 1:
return shares[0]
except Exception as e:
msg = (_('Unexpected exception while getting share list: %s') %
six.text_type(e))
raise exception.ShareBackendException(msg)
def _find_fsnap(self, project_id, share_id, orig_proto, snapshot_tag,
fpg, vfs):
share_name = self.ensure_prefix(share_id)
osf_project_id = self.ensure_prefix(project_id, orig_proto)
pattern = '*_%s' % self.ensure_prefix(snapshot_tag)
search_order = [
{'pat': True, 'fpg': fpg, 'vfs': vfs, 'fstore': osf_project_id},
{'pat': True, 'fpg': fpg, 'vfs': vfs, 'fstore': share_name},
{'pat': True, 'fpg': fpg},
{'pat': True},
]
try:
for search_params in search_order:
result = self._client.getfsnap(pattern, **search_params)
snapshots = result.get('members', [])
if len(snapshots) == 1:
return snapshots[0]
except Exception as e:
msg = (_('Unexpected exception while getting snapshots: %s') %
six.text_type(e))
raise exception.ShareBackendException(msg)
def allow_access(self, project_id, share_id, share_proto, access_type,
access_to, fpg, vfs):
"""Grant access to a share."""
self._change_access(ALLOW, project_id, share_id, share_proto,
access_type, access_to, fpg, vfs)
def deny_access(self, project_id, share_id, share_proto, access_type,
access_to, fpg, vfs):
"""Deny access to a share."""
self._change_access(DENY, project_id, share_id, share_proto,
access_type, access_to, fpg, vfs)
def fsip_exists(self, fsip):
"""Try to get FSIP. Return True if it exists."""
vfs = fsip['vfs']
fpg = fsip['fspool']
try:
result = self._client.getfsip(vfs, fpg=fpg)
LOG.debug("getfsip result: %s", result)
except Exception as e:
LOG.exception(e)
msg = (_('Failed to get FSIPs for FPG/VFS %(fspool)s/%(vfs)s.') %
fsip)
LOG.exception(msg)
raise exception.ShareBackendException(msg=msg)
for member in result['members']:
if all(item in member.items() for item in fsip.items()):
return True
return False
def create_fsip(self, ip, subnet, vlantag, fpg, vfs):
vlantag_str = six.text_type(vlantag) if vlantag else '0'
# Try to create it. It's OK if it already exists.
try:
result = self._client.createfsip(ip,
subnet,
vfs,
fpg=fpg,
vlantag=vlantag_str)
LOG.debug("createfsip result: %s", result)
except Exception as e:
LOG.exception(e)
msg = (_('Failed to create FSIP for | |
import requests
import time
import datetime
import hmac
import hashlib
import krakenex
import csv
from gemini.client import Client
from poloniex import Poloniex
try:
from urllib import urlencode
from urlparse import urljoin
except ImportError:
from urllib.parse import urlencode
from urllib.parse import urljoin
#API Keys
#PLEASE ADD YOU KEYS HERE
bittrex_api_key = {"key": "0000000000", "secret": "f0000000000000"}
gemini = Client('00000000000','0000000000000')
#Mining Deposit Address
#CHANGE TO YOUR ADDRESS
eth_mining_address = '0x2e699bb880bd665bf2339336c921d0c6fa369b15'
#cold storage addresses
#PLEASE CHANGE TO YOUR ADDRESSES
btc_wallet = ['<KEY>', '<KEY>', '<KEY>', '<KEY>', '<KEY>', '<KEY>','<KEY>','<KEY>', '<KEY>','<KEY>', '<KEY>', '<KEY>', '1MtA3DgenJX22SgpfXf6MhyBLFRudBpLt7']
cols_eth_stor = ['0xa4C2F38ab69cCB5Ac14e27a65D64e18ddfd73C6A']
depo_chase = 250+249.5
#global tracking variables
tracker = {}
balances = {}
market_values = {}
#****Taken from https://github.com/ericsomdahl/python-bittrex
class bittrex(object):
BASE_URL = 'https://bittrex.com/api/v1.1/%s/'
MARKET_SET = {'getopenorders', 'cancel', 'sellmarket', 'selllimit', 'buymarket', 'buylimit'}
ACCOUNT_SET = {'getbalances', 'getbalance', 'getdepositaddress', 'withdraw', 'getorderhistory'}
def __init__(self,key,secret):
self.api_key = key
self.api_secret = secret
def api_query(self, method, options=None):
"""
Queries Bittrex with given method and options
:param method: Query method for getting info
:type method: str
:param options: Extra options for query
:type options: dict
:return: JSON response from Bittrex
:rtype : dict
"""
if not options:
options = {}
nonce = str(int(time.time() * 1000))
method_set = 'public'
if method in bittrex.MARKET_SET:
method_set = 'market'
elif method in bittrex.ACCOUNT_SET:
method_set = 'account'
request_url = (bittrex.BASE_URL % method_set) + method + '?'
if method_set != 'public':
request_url += 'apikey=' + self.api_key + "&nonce=" + nonce + '&'
request_url += urlencode(options)
return requests.get(
request_url,
headers={"apisign": hmac.new(self.api_secret.encode(), request_url.encode(), hashlib.sha512).hexdigest()}
).json()
def get_markets(self):
"""
Used to get the open and available trading markets
at Bittrex along with other meta data.
:return: Available market info in JSON
:rtype : dict
"""
return self.api_query('getmarkets')
def get_currencies(self):
"""
Used to get all supported currencies at Bittrex
along with other meta data.
:return: Supported currencies info in JSON
:rtype : dict
"""
return self.api_query('getcurrencies')
def get_ticker(self, market):
"""
Used to get the current tick values for a market.
:param market: String literal for the market (ex: BTC-LTC)
:type market: str
:return: Current values for given market in JSON
:rtype : dict
"""
return self.api_query('getticker', {'market': market})
def get_market_summaries(self):
"""
Used to get the last 24 hour summary of all active exchanges
:return: Summaries of active exchanges in JSON
:rtype : dict
"""
return self.api_query('getmarketsummaries')
def get_orderbook(self, market, depth_type, depth=20):
"""
Used to get retrieve the orderbook for a given market
:param market: String literal for the market (ex: BTC-LTC)
:type market: str
:param depth_type: buy, sell or both to identify the type of orderbook to return.
Use constants BUY_ORDERBOOK, SELL_ORDERBOOK, BOTH_ORDERBOOK
:type depth_type: str
:param depth: how deep of an order book to retrieve. Max is 100, default is 20
:type depth: int
:return: Orderbook of market in JSON
:rtype : dict
"""
return self.api_query('getorderbook', {'market': market, 'type': depth_type, 'depth': depth})
def get_market_summary(self, market):
"""
Used to get a market summary for a given market
:param market:
:return: dic
"""
return self.api_query('getmarketsummary', {'market': market})
def get_market_history(self, market, count):
"""
Used to retrieve the latest trades that have occurred for a
specific market.
/market/getmarkethistory
:param market: String literal for the market (ex: BTC-LTC)
:type market: str
:param count: Number between 1-100 for the number of entries to return (default = 20)
:type count: int
:return: Market history in JSON
:rtype : dict
"""
return self.api_query('getmarkethistory', {'market': market, 'count': count})
def buy_market(self, market, quantity):
"""
Used to place a buy order in a specific market. Use buymarket to
place market orders. Make sure you have the proper permissions
set on your API keys for this call to work
/market/buymarket
:param market: String literal for the market (ex: BTC-LTC)
:type market: str
:param quantity: The amount to purchase
:type quantity: float
:param rate: The rate at which to place the order.
This is not needed for market orders
:type rate: float
:return:
:rtype : dict
"""
return self.api_query('buymarket', {'market': market, 'quantity': quantity})
def buy_limit(self, market, quantity, rate):
"""
Used to place a buy order in a specific market. Use buylimit to place
limit orders Make sure you have the proper permissions set on your
API keys for this call to work
/market/buylimit
:param market: String literal for the market (ex: BTC-LTC)
:type market: str
:param quantity: The amount to purchase
:type quantity: float
:param rate: The rate at which to place the order.
This is not needed for market orders
:type rate: float
:return:
:rtype : dict
"""
return self.api_query('buylimit', {'market': market, 'quantity': quantity, 'rate': rate})
def sell_market(self, market, quantity):
"""
Used to place a sell order in a specific market. Use sellmarket to place
market orders. Make sure you have the proper permissions set on your
API keys for this call to work
/market/sellmarket
:param market: String literal for the market (ex: BTC-LTC)
:type market: str
:param quantity: The amount to purchase
:type quantity: float
:param rate: The rate at which to place the order.
This is not needed for market orders
:type rate: float
:return:
:rtype : dict
"""
return self.api_query('sellmarket', {'market': market, 'quantity': quantity})
def sell_limit(self, market, quantity, rate):
"""
Used to place a sell order in a specific market. Use selllimit to place
limit orders Make sure you have the proper permissions set on your
API keys for this call to work
/market/selllimit
:param market: String literal for the market (ex: BTC-LTC)
:type market: str
:param quantity: The amount to purchase
:type quantity: float
:param rate: The rate at which to place the order.
This is not needed for market orders
:type rate: float
:return:
:rtype : dict
"""
return self.api_query('selllimit', {'market': market, 'quantity': quantity, 'rate': rate})
def cancel(self, uuid):
"""
Used to cancel a buy or sell order
/market/cancel
:param uuid: uuid of buy or sell order
:type uuid: str
:return:
:rtype : dict
"""
return self.api_query('cancel', {'uuid': uuid})
def get_open_orders(self, market):
"""
Get all orders that you currently have opened. A specific market can be requested
/market/getopenorders
:param market: String literal for the market (ie. BTC-LTC)
:type market: str
:return: Open orders info in JSON
:rtype : dict
"""
return self.api_query('getopenorders', {'market': market})
def get_balances(self):
"""
Used to retrieve all balances from your account
/account/getbalances
:return: Balances info in JSON
:rtype : dict
"""
return self.api_query('getbalances', {})
def get_balance(self, currency):
"""
Used to retrieve the balance from your account for a specific currency
/account/getbalance
:param currency: String literal for the currency (ex: LTC)
:type currency: str
:return: Balance info in JSON
:rtype : dict
"""
return self.api_query('getbalance', {'currency': currency})
def get_deposit_address(self, currency):
"""
Used to generate or retrieve an address for a specific currency
/account/getdepositaddress
:param currency: String literal for the currency (ie. BTC)
:type currency: str
:return: Address info in JSON
:rtype : dict
"""
return self.api_query('getdepositaddress', {'currency': currency})
def withdraw(self, currency, quantity, address):
"""
Used to withdraw funds from your account
/account/withdraw
:param currency: String literal for the currency (ie. BTC)
:type currency: str
:param quantity: The quantity of coins to withdraw
:type quantity: float
:param address: The address where to send the funds.
:type address: str
:return:
:rtype : dict
"""
return self.api_query('withdraw', {'currency': currency, 'quantity': quantity, 'address': address})
def get_order_history(self, market, count):
"""
Used to reterieve order trade history of account
/account/getorderhistory
:param market: optional a string literal for the market (ie. BTC-LTC). If ommited, will return for all markets
:type market: str
:param count: optional the number of records to return
:type count: int
:return: order history in JSON
:rtype : dict
"""
return self.api_query('getorderhistory', {'market':market, 'count': count})
#Take from https://github.com/ericsomdahl/python-bittrex****
class nano(object):
def __init__(self, eth_address):
self.eth_address = eth_address
def url_creator(self, type):
"""
creates the unique url for each API function
:param type: the type of API request
:type type: str
:return: completed url with mining address and API function
:rtype : str
"""
url = 'https://api.nanopool.org/v1/eth/' + type + '/' + nano.eth_address
return (url)
def balance(self):
"""
sends a request to Nanopool for ETH balance of the miner
:return: ETH balance
:rtype : str
"""
url = self.url_creator('balance')
resp = requests.get(url)
if resp.status_code != 200:
print('Oops, something | |
<gh_stars>0
from __future__ import print_function
import boto3
from decimal import Decimal
import json
import urllib
import os
from datetime import datetime
from contextlib import closing
from boto3.dynamodb.conditions import Key, Attr
from botocore.exceptions import ClientError
print('Loading function')
rekognition = boto3.client('rekognition')
ddb_table_name = os.environ['CEREBRO_TABLE']
# --- utils ----
def get_list_of_tagged_faces():
print("In get_list_of_tagged_faces ...")
# strategy is to do a search in stored media and find all profiles tagged
dynamodb = boto3.resource('dynamodb')
table = dynamodb.Table('cerebro_media')
#fe = Key('external_image_id').begins_with('profile');
fe = Attr('external_image_id').begins_with('profile') & Attr('rec_type').begins_with('file_metadata')
pe = "#extid, #profile, #faceid, #imageid"
# Expression Attribute Names for Projection Expression only.
ean = {
"#extid": "external_image_id",
"#profile": "profile",
"#faceid": "FaceId",
"#imageid": "ImageId"
}
esk = None
exclusive_start_key = None
result_set = []
while True:
print("Start Key : %s" % exclusive_start_key)
if exclusive_start_key:
#
response = table.scan(
FilterExpression=fe,
#KeyConditionExpression=Key('external_image_id').begins_with('profile')
ProjectionExpression=pe,
ExpressionAttributeNames=ean,
ExclusiveStartKey=exclusive_start_key
)
else:
response = table.scan(
FilterExpression=fe,
#KeyConditionExpression=Key('external_image_id').begins_with('profile')
ProjectionExpression=pe,
ExpressionAttributeNames=ean
)
print("Response, Itemcount")
print(response,len(response['Items']))
result_set += response['Items']
#ExclusiveStartKey
if "LastEvaluatedKey" in response:
exclusive_start_key = response["LastEvaluatedKey"]
print("New Start Key : %s" % exclusive_start_key)
#print(exclusive_start_key)
else:
print("No more items to iterate over ...")
break
print("Out of loop of getting the list of profiled faces now ...")
print(result_set)
print(len(result_set))
if len(result_set) < 1:
print("ERROR: No user content located. Exiting !")
return ''
return result_set
# --------------- Helper Functions to call Rekognition APIs ------------------
def detect_faces(bucket, key):
response = rekognition.detect_faces(Image={"S3Object": {"Bucket": bucket, "Name": key}})
return response
def detect_labels(bucket, key, profile_name=""):
response = rekognition.detect_labels(Image={"S3Object": {"Bucket": bucket, "Name": key}})
label_entry = {}
label_entry["Labels"] = getFaceAttribute(response,attribute="Labels",is_array_type=True,value_key="Name", confidence_level=50)
label_entry["ExternalImageId"] = os.path.basename(key)
print(label_entry)
table = boto3.resource('dynamodb').Table(ddb_table_name)
#labels = [{'Confidence': Decimal(str(label_prediction['Confidence'])), 'Name': label_prediction['Name']} for label_prediction in response['Labels']]
current_time = datetime.utcnow()
current_time_str = current_time.strftime("%Y-%m-%d %H:%M:%S.%f")
current_time_epoch = current_time.strftime("%s")
print(current_time_epoch)
(dt, micro) = current_time_str.split('.')
dt = "%s.%s" % (current_time_epoch, micro)
print(dt,micro)
current_time_epoch = Decimal(dt)
print(current_time_epoch)
ddb_item = {}
ddb_item['external_image_id'] = key
ddb_item['epoch'] = current_time_epoch
ddb_item['current_time'] = current_time_str
ddb_item['Labels'] = label_entry["Labels"]
ddb_item["rec_type"] = "image_labels"
if profile_name:
ddb_item['profile'] = profile_name
print(ddb_item)
table.put_item(Item=ddb_item)
return label_entry
def generate_caption(tagged_face_list, album="Unknown"):
print(tagged_face_list)
# confirm if the tagged faces was sent properly
if not tagged_face_list:
print("No Faces Provided")
return "No Faces provided"
faces = ""
# now walk thru' the list of tagged faces
for face in tagged_face_list:
if face.lower().startswith("unknown"):
continue
if faces:
faces += ", "
faces += "%s" % face
caption_text = "Found %s ." % (faces)
return caption_text
def generate_audio(audioKey, textMessage, pollyVoice=os.environ["POLLY_VOICE"]):
print("In generate_welcome_audio ...")
print(textMessage, pollyVoice, audioKey)
imageid = audioKey.split('/')[1]
print("Imageid: %s" % imageid)
rest = textMessage
#Because single invocation of the polly synthesize_speech api can
# transform text with about 1,500 characters, we are dividing the
# post into blocks of approximately 1,000 characters.
textBlocks = []
while (len(rest) > 1100):
begin = 0
end = rest.find(".", 1000)
if (end == -1):
end = rest.find(" ", 1000)
textBlock = rest[begin:end]
rest = rest[end:]
textBlocks.append(textBlock)
textBlocks.append(rest)
print(textBlocks)
#For each block, invoke Polly API, which will transform text into audio
polly = boto3.client('polly')
isNewFile = True
for textBlock in textBlocks:
print("Polly is processing: %s" % textBlock)
response = polly.synthesize_speech(
OutputFormat='mp3',
Text = textBlock,
VoiceId = pollyVoice
)
print(response)
# set the write mode if it is a new file or not
if isNewFile:
write_mode = "wb"
isNewFile = False
else:
write_mode = "ab"
#Save the audio stream returned by Amazon Polly on Lambda's temp
# directory. If there are multiple text blocks, the audio stream
# will be combined into a single file.
if "AudioStream" in response:
with closing(response["AudioStream"]) as stream:
output = os.path.join("/tmp/", imageid)
with open(output, write_mode) as file:
file.write(stream.read())
print("Finished the audio generating now ...")
audio_s3key = os.environ['AUDIO_CONTENT_DIR'] + "/" + imageid + ".mp3"
s3 = boto3.client('s3')
s3.upload_file('/tmp/' + imageid,
os.environ['BUCKET_NAME'],
audio_s3key)
print("The s3 audio file should be ready now !")
return audio_s3key
def insert_file_metadata(key, s3_metadata):
table = boto3.resource('dynamodb').Table(ddb_table_name)
current_time = datetime.utcnow()
current_time_str = current_time.strftime("%Y-%m-%d %H:%M:%S.%f")
current_time_epoch = current_time.strftime("%s")
print(current_time_epoch)
(dt, micro) = current_time_str.split('.')
dt = "%s.%s" % (current_time_epoch, micro)
print(dt,micro)
current_time_epoch = Decimal(dt)
print(current_time_epoch)
ddb_item = {}
ddb_item['external_image_id'] = key
ddb_item['epoch'] = current_time_epoch
ddb_item['current_time'] = current_time_str
ddb_item["rec_type"] = "file_metadata"
if "exif_data" in s3_metadata:
ddb_item["exif_data"] = s3_metadata["exif_data"]
if "file_datetime" in s3_metadata:
ddb_item["original_datetime"] = s3_metadata["file_datetime"]
if "file_datetime_epoch" in s3_metadata:
ddb_item["original_datetime_epoch"] = s3_metadata["file_datetime_epoch"]
if "profile" in s3_metadata:
ddb_item["profile"] = s3_metadata["profile"]
#
ddb_item["profile_owner"] = s3_metadata["profile"]
else:
# now construct the profile list
profile_list = matchImageToProfile(key)
print("Retrieved list of profiles as: ")
print(profile_list)
profile_list_str = '&'.join(profile for profile in profile_list)
print("Profiles merged is: %s" % profile_list_str)
ddb_item["profile_matched"] = profile_list
ddb_item["profile_matched_string"] = profile_list_str
# also generate the caption text
ddb_item["caption_text"] = generate_caption(tagged_face_list=profile_list)
# next up - caption audio file
ddb_item["caption_audio"] = generate_audio(audioKey=key, textMessage=ddb_item["caption_text"])
print(ddb_item)
response = table.put_item(Item=ddb_item)
return response
def index_faces(bucket, key, profile_name=""):
# Note: Collection has to be created upfront. Use CreateCollection API to create a collecion.
collectionId = os.environ["REKO_COLLECTION"]
response = rekognition.index_faces(
Image={"S3Object": {"Bucket": bucket, "Name": key}},
CollectionId=collectionId,
ExternalImageId=os.path.basename(key),
DetectionAttributes=['ALL','DEFAULT']
)
print("Response from indexingfaces ...")
print(response)
sortkey = 2
for face in response["FaceRecords"]:
face_entry = {}
print('--------------------------')
del face["FaceDetail"]["Landmarks"]
del face["FaceDetail"]["BoundingBox"]
del face["FaceDetail"]["Pose"]
del face["Face"]["BoundingBox"]
face_entry["Eyeglasses"] = getFaceAttribute(face["FaceDetail"], "Eyeglasses")
face_entry["Sunglasses"] = getFaceAttribute(face["FaceDetail"], "Sunglasses")
face_entry["Gender"] = getFaceAttribute(face["FaceDetail"], "Gender")
eyesOpen = getFaceAttribute(face["FaceDetail"], "EyesOpen")
#face_entry["EyesOpen"] = getFaceAttribute(face["FaceDetail"], "EyesOpen")
print(eyesOpen)
if isinstance(eyesOpen,(bool)):
face_entry["EyesOpen"] = eyesOpen
smileValue = getFaceAttribute(face["FaceDetail"], "Smile")
#face_entry["Smile"] = getFaceAttribute(face["FaceDetail"], "Smile")
print(smileValue)
if isinstance(smileValue,(bool)):
face_entry["Smile"] = smileValue
face_entry["MouthOpen"] = getFaceAttribute(face["FaceDetail"], "MouthOpen")
face_entry["Mustache"] = getFaceAttribute(face["FaceDetail"], "Mustache")
face_entry["Beard"] = getFaceAttribute(face["FaceDetail"], "Beard")
face_entry["Emotions"] = getFaceAttribute(face["FaceDetail"], "Emotions", value_key="Type", is_array_type=True)
face_entry["AgeRange_High"] = face["FaceDetail"]["AgeRange"]["High"]
face_entry["AgeRange_Low"] = face["FaceDetail"]["AgeRange"]["Low"]
face_entry["Sharpness"] = face["FaceDetail"]["Quality"]["Sharpness"]
face_entry["Brightness"] = face["FaceDetail"]["Quality"]["Brightness"]
face_entry["FaceId"] = face["Face"]["FaceId"]
face_entry["ExternalImageId"] = face["Face"]["ExternalImageId"]
face_entry["Confidence"] = face["Face"]["Confidence"]
face_entry["ImageId"] = face["Face"]["ImageId"]
table = boto3.resource('dynamodb').Table(ddb_table_name)
#labels = [{'Confidence': Decimal(str(label_prediction['Confidence'])), 'Name': label_prediction['Name']} for label_prediction in response['Labels']]
current_time = datetime.utcnow()
current_time_str = current_time.strftime("%Y-%m-%d %H:%M:%S.%f")
current_time_epoch = current_time.strftime("%s")
print(current_time_epoch)
(dt, micro) = current_time_str.split('.')
dt = "%s.%s" % (current_time_epoch, micro)
print(dt,micro)
current_time_epoch = Decimal(dt)
print(current_time_epoch)
face_entry['epoch'] = current_time_epoch
face_entry['current_time'] = current_time_str
face_entry['external_image_id'] = key
del face_entry['ExternalImageId']
face_entry['Confidence'] = Decimal(face_entry['Confidence'])
face_entry['Brightness'] = Decimal(face_entry['Brightness'])
face_entry['Sharpness'] = Decimal(face_entry['Sharpness'])
if profile_name:
face_entry['profile'] = profile_name
# the above is for backward compat. .
# really maps to the profile name for the picture submmitted as the profile
face_entry['profile_owner'] = profile_name
face_entry["rec_type"] = "face_attributes"
print(face_entry)
table.put_item(Item=face_entry)
sortkey += 1
return response
def matchImageToProfile(imageKey=None):
print("In matchImageToProfile ...")
if not imageKey:
return ["Invalid Image ID"]
# first get a list of all available profile images
print("Getting the profile images now ...")
profile_images = get_list_of_tagged_faces()
print("The list of profile images are ...")
print(profile_images)
print("imageKey: %s" % imageKey)
profile_list = []
# next for each profile image
## run a compare with the latter as the source and the imageKey as the target
for image in profile_images:
print("Profile Image to be compared against: ")
print(image)
response = rekognition.compare_faces(
SourceImage={
'S3Object': {
'Bucket': 'project-cerebro',
'Name': image['external_image_id']
}
},
TargetImage={
'S3Object': {
'Bucket': 'project-cerebro',
'Name': imageKey
}
},
SimilarityThreshold=75.0
)
matched_faces = response['FaceMatches']
print("The Compare Faces Response matches ...")
print(matched_faces)
if matched_faces:
print("Located a match. Match: %s" % image['profile'])
profile_list.append(image['profile'])
print("List of Profiles: ", profile_list)
if profile_list:
return profile_list
return ["Unknown"]
def getFaceAttribute(face_detail,attribute,value_key="Value",confidence_key="Confidence",is_array_type=False,confidence_level=80):
if attribute:
if attribute in face_detail:
if is_array_type:
print(face_detail[attribute])
item_list = []
for entry in face_detail[attribute]:
print(entry)
if entry[confidence_key] > confidence_level:
item_list.append(entry[value_key])
print(item_list)
return item_list
else:
if face_detail[attribute][confidence_key] > confidence_level:
return face_detail[attribute][value_key]
# --------------- Main handler ------------------
def lambda_handler(event, context):
'''Demonstrates S3 trigger that uses
Rekognition APIs to detect faces, labels and index faces in S3 Object.
'''
print("Received event: " + json.dumps(event, indent=2))
key = event['Records'][0]['s3']['object']['key']
filenames = os.path.splitext(key)
file_extension = filenames[1][1:]
print (file_extension)
valid_extensions = ['JPG','JPEG','PNG']
if file_extension.upper() in valid_extensions:
print("This is a valid Image file!")
# now check for the
if (key.startswith('production') or key.startswith('staging') or key.startswith('profiles') or key.startswith('dev')):
print('Triggered by a staging file upload. Good to go!')
else:
print('Triggered by a non-staging file upload. Exiting!')
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.