content
stringlengths
7
1.05M
# -*- coding: utf-8 -*- # Copyright (c) 2019, Silvio Peroni <essepuntato@gmail.com> # # Permission to use, copy, modify, and/or distribute this software for any purpose # with or without fee is hereby granted, provided that the above copyright notice # and this permission notice appear in all copies. # # THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES WITH # REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND # FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, # OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, # DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS # ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS # SOFTWARE. # Test case for the function def test_caesar_cypher(msg, left_shift, shift_quantity, expected): result = caesar_cypher(msg, left_shift, shift_quantity) if expected == result: return True else: return False # Code of the function def caesar_cypher(msg, left_shift, shift_quantity): result = list() alphabet = "abcdefghijklmnopqrstuvwxyz" if left_shift: shift_quantity = -shift_quantity cypher = alphabet[shift_quantity:] + alphabet[:shift_quantity] for c in msg.lower(): if c in alphabet: result.append(cypher[alphabet.index(c)]) else: result.append(c) return "".join(result) # Tests print(test_caesar_cypher("message to encrypt", True, 3, "jbppxdb ql bkzovmq")) print(test_caesar_cypher("message to encrypt", False, 5, "rjxxflj yt jshwduy"))
print("Witamy w generatorze nazw zespołów") city = input("Podaj miasto, w którym się urodziłeś\n") pet = input("Podaj imię swojego pupila\n") print("Nazwa twojego zespołu powinna brzmieć: " + city + " " + pet)
## # This software was developed and / or modified by Raytheon Company, # pursuant to Contract DG133W-05-CQ-1067 with the US Government. # # U.S. EXPORT CONTROLLED TECHNICAL DATA # This software product contains export-restricted data whose # export/transfer/disclosure is restricted by U.S. law. Dissemination # to non-U.S. persons whether in the United States or abroad requires # an export license or other authorization. # # Contractor Name: Raytheon Company # Contractor Address: 6825 Pine Street, Suite 340 # Mail Stop B8 # Omaha, NE 68106 # 402.291.0100 # # See the AWIPS II Master Rights File ("Master Rights File.pdf") for # further licensing information. ## #------*-python-*------------------------------------------------------------- # Config file for the GFE (Graphical Forecast Editor). # # SOFTWARE HISTORY # # Date Ticket# Engineer Description # ------------- -------- --------- -------------------------------------------- # Nov 20, 2013 2488 randerso Changed to use DejaVu fonts # May 28, 2014 2841 randerso Added separate configurable limits for text # formatter and product script tasks # Feb 04, 2015 17039 ryu Removed HighlightFramingCodes setting. # Feb 09, 2016 5283 nabowle Remove NGM support. # Jun 23, 2017 6138 dgilling Remove obsolete winter weather phensigs. # Jan 23, 2018 7153 randerso Cleaned up spelling errors in comments # Dec 06, 2017 DCS20267 psantos Add NWPS rip current guidance ## ## # This is a base file that is not intended to be overridden. # # This file can be imported to override configuration settings. Please see the # Configuration Guides->GFE Configuration section of the GFE Online Help for # guidance on creating a new configuration file. ## GFESUITE_HOME = "/awips2/GFESuite" GFESUITE_PRDDIR = "/tmp/products" yes = True no = False #------------------------------------------------------------------------ # Hiding the configuration file #------------------------------------------------------------------------ # The gfe configuration file can be hidden in the Start Up Dialog by # using the HideConfigFile keyword and setting it to 1, or by commenting # out the following line. # HideConfigFile = 1 #------------------------------------------------------------------------ # Mutable Parameter and Viewable Database Configurations #------------------------------------------------------------------------ # mutableModel indicates the one database that can be modified. Format # is "type_model_time". If time isn't important (for a singleton db), # then the format is "type_model". If there isn't a type, then the # format is "_model". mutableModel = "_Fcst" # dbTypes is a list of database types which the gfe should "see". dbTypes = ['', 'D2D', 'V'] # The GFE supports filtering of the displayed data by site ID. # If a config entry of the form SITEID_mask is set (to a named edit area), # then the gfe will use the area as a mask in displaying data in the # spatial editor. The user also can set masks for individual weather # elements by adding config entries of the form SITEID_PARMNAME_mask. # Simplified formats also available are PARMNAME_mask and just mask. # The software first looks for a specific site/parmName entry, then # for the site entry, then parmName, then just mask. If you want all of # the weather elements clipped except one, then specify an empty name ("") # of the edit area associated with that weather element. #BOU_Wind_mask = "BOU" #BOU_mask = "CWA" #Wind_mask = "CWA" #mask = "CWA" #------------------------------------------------------------------------ # Initial GFE Startup Weather Elements, Samples, and Edit Action List # Configurations #------------------------------------------------------------------------ #------------------------------------------------------------------------ # Ordering the Weather Element Groups # Defines the initial set of parameters to be loaded when starting # the GFE. The name of the Group is specified. # This is also the name of the default group of Edit Actions. DefaultGroup = "Public" # To provide an order for the weather elements, list an order # preference in the list variable 'WEList'. Any elements not listed in # 'WEList', will be listed at the bottom of the weather element group menu # in alphabetical order. # WEList = ["FireWx","Gweight","Public","Temps"] # Defines the initial set of sample sets that are displayed when # starting the GFE. #DefaultSamples = ['DefaultSamples'] # Defines the Smart Tools to be displayed on the Spatial Editor button-3 pop up menu. # All smart tools (screened by active element) will appear if # AllEditActionsOnPopUp = yes # Alternatively, you can set AllEditActionsOnPopUp = no and specify a list of smart tools (screened by active element) to appear. AllEditActionsOnPopUp = yes PopUpEditActions = ["Assign_Value","AdjustValue_Down","AdjustValue_Up","Smooth"] # Defines the Smart Tools to be displayed on the Grid Manager button-3 pop up menu. #GridManagerEditActions = ['nameOfTool1', 'nameOfTool2', 'nameOfTool3'] # Define keyboard shortcuts. # You are allowed up to 200 shortcuts. # IMPORTANT: You should test your shortcuts on your system as many # keys are already bound by the system. For example, F10 is bound by some Tk # widgets to bring up menus. # Each shortcut is defined by a list with entries: # Shortcut key # State of ShortCut key # None # Ctrl (control key) # Alt (alt key) # Shift (shift key) # key states can be combined (e.g. Ctrl+Alt) # Action type: # EditTool # SmartTool # Procedure # Toggle # Name of the action. # # The possible EditTool actions are: # Sample # Pencil # Contour # MoveCopy # DrawEditArea # # The possible Toggle actions are: # ISC # TEGM (Temporal Editor/Grid Manager) # HorizVert (Horizontal/Vertical Display) # # Examples: # #ShortCut1 = ["F1", "None", "SmartTool","Assign_Value"] # F1 #ShortCut2 = ["NUMPAD_SUBTRACT", "None", "SmartTool","AdjustValue_Down"] # Keypad - #ShortCut3 = ["NUMPAD_ADD", "None", "SmartTool","AdjustValue_Up"] # Keypad + #ShortCut4 = ["F2", "None", "SmartTool","Smooth"] #ShortCut5 = ["F3", "None", "Procedure","ISC_Discrepancies"] #ShortCut6 = ["F5", "None", "EditTool", "Sample"] #ShortCut7 = ["F6", "None", "EditTool", "Contour"] #ShortCut8 = ["F7", "None", "EditTool", "Pencil"] #ShortCut9 = ["F8", "None", "EditTool", "MoveCopy"] #ShortCut10 = ["F9", "None", "EditTool", "DrawEditArea"] # #ShortCut11 = ["F5", "Alt", "EditTool", "Sample"] #ShortCut12 = ["F6", "Ctrl", "EditTool", "Contour"] #ShortCut13 = ["F7", "Shift", "EditTool", "Pencil"] # Defines the initial set of edit area groups to appear in the edit # area and query dialog. If not specified, the default is Misc. EditAreaGroups = ['Misc'] #------------------------------------------------------------------------ # Misc. Configuration #------------------------------------------------------------------------ # This list of Weather element names will be used to sort the GridManager. # Elements in this list will occur first. All others will be sorted # by name. GridManagerSortOrder = ['T', 'Td', 'RH', 'MaxT', 'MinT', 'MaxRH', 'MinRH', 'WindChill', 'HeatIndex', 'Wind', 'WindGust', 'FreeWind', 'TransWind', 'Sky', 'Wx', 'LAL', 'PoP', 'CWR', 'QPF', 'SnowAmt', 'StormTotalSnow', 'SnowLevel', 'MaxTAloft', 'WetBulb', 'Hazards', 'FzLevel', 'Haines', 'MixHgt'] # This algorithm determines the sorting order of weather elements in the # Grid Manager, Samples, and Spatial Editor Legends. It contains of up to # 5 characters in the order of sort importance. The characters are: # 'm' for mutable, 'N' for parm name, 'M' for model name, 't' for model time, # and 'o' for model optional type. For example, "mNMto" will result in # the mutables first, then parm name, then model name, then model time, then # optional type. This means that all of the weather elements with the same # name from different models will be grouped together (except for the mutable). #GridManagerSortAlgorithm = "mNMto" # Auto Save Interval # The Auto Save Interval entry defines the interval in minutes that is # used to automatically save modified weather elements. AutoSaveInterval = 0 # This is the list of entries that appear on the Publish Dialog. The # entries are the names of the user-defined selection time ranges. The # order of entries on the dialog will match this list. PublishTimes = ['Today', 'Tonight', 'Tomorrow', 'Tomorrow Night', 'Day 3', 'Day 4', 'Day 5', 'Day 6', 'Day 7', 'Hour 0-240'] #Preselect a weather group to be loaded in the Publish Dialog #PublishDialogInitialWEGroup = "Public" # Interpolation Dialog defaults. By default, the dialog is shown # with a minimum interval and duration. This can be changed. If the # duration is specified, then the interval must also be specified. # The units are hours and must range between 1 and 24. #InterpolateDefaultInterval = 1 #InterpolateDefaultDuration = 1 # Create from Scratch Dialog defaults. By default, the dialog is shown # with a minimum interval and duration. This can be changed. If the # duration is specified, then the interval must also be specified. # The units are hours and must range between 1 and 24. #CreateScratchDefaultInterval = 1 #CreateScratchDefaultDuration = 1 # Defines the product file purge in hours #ProductPurgeHours = 6 #------------------------------------------------------------------------ # Map Background Configuration #------------------------------------------------------------------------ # Defines the initial loaded set of map backgrounds. The name of each # background should match the name (without ".xml") of a map file in the # CAVE/Bundles/maps directory under the Localization perspective. MapBackgrounds_default = ['States','CWA'] # Specific Colors for a map background # The user may specify a specific color to be used for a map background, # rather than getting a random color assigned. # Format is mapName_graphicColor = color. #States_graphicColor = 'green' # Specific Graphic Line Widths for a map # Default line widths can be set for each map background based on # map name. Zero is the default value, which represents thin lines. # The larger the number, the wider the line. The format is mapName_lineWidth. # Do not include a decimal point after these entries. #States_lineWidth = 1 # Specific Line Pattern definitions for a map # Default line patterns can be set up for each map background. The # possible strings are "SOLID", "DASHED", "DOTTED", "DASHED_DOTTED". The # values must be enclosed within quotes. The format is mapName_linePattern. #States_linePattern = "SOLID" # Specific Font Offsets for a map background. # The font offset (called magnification on the GFE menus) allows the # default font size to be increased or decreased on a per map basis. # Numbers can range from -2 through +2. Format is mapName_fontOffset. # Do not include a decimal point after these entries. #States_fontOffset = 0 #------------------------------------------------------------------------ # Graphics Hardware Configurations #------------------------------------------------------------------------ # # general default X resources can be set here. # # Fonts. These are the various fonts that the GFE uses. They can be # changed to increase/decrease the size of the text on the GFE. The # fonts are in ascending sizes. A better way to override the fonts # is to use the config items under UI Configuration. # A valid font data representation is a string of the form fontname-style-height # where fontname is the name of a font, # style is a font style (one of "regular", "bold", "italic", or "bold italic") # height is an integer representing the font height. # Example: Times New Roman-bold-36. TextFont0 = "DejaVu Sans Mono-regular-9" TextFont1 = "DejaVu Sans Mono-regular-9" TextFont2 = "DejaVu Sans Mono-bold-12" TextFont3 = "DejaVu Sans Mono-bold-14" TextFont4 = "DejaVu Sans Mono-bold-20" # The color which will be used as the background for all of the display # panes. bgColor = "black" #------------------------------------------------------------------------ # System Time Range Configuration #------------------------------------------------------------------------ # These parameters indicate the span of the Grid Manager and Temporal # Editor in relation to the current time. Units are in hours. If grids # are present, the displayable time range may be expanded by the software. SystemTimeRange_beforeCurrentTime = 48 SystemTimeRange_afterCurrentTime = 168 #------------------------------------------------------------------------ # UI Configuration #------------------------------------------------------------------------ # Defines the color and pattern used in the Grid Manager to indicate # a time selection. Selected_color = 'LightSkyBlue' Selected_fillPattern = 'TRANS_25PC_45DEG' # Defines the color and pattern of the time scale lines in the Grid # Manager and Temporal Editor TimeScaleLines_color = 'Blue' TimeScaleLines_pattern = 'DOTTED' # Defines the color, width, and pattern used for the editor time line # that runs through the Grid Manager and Temporal Editor EditorTimeLine_color = 'Yellow' EditorTimeLine_width = 2 EditorTimeLine_pattern = 'DOTTED' # Defines the color used by the Grid Manager to indicate the #current system time CurrentSystemTime_color = 'Green' # Defines the colors used in the Grid Manager to indicate that a # time period is locked by you, or by another person. LockedByMe_color = 'forestgreen' LockedByMe_pattern = 'WHOLE' LockedByOther_color = 'tomato2' LockedByOther_pattern = 'WHOLE' # Defines the visible, invisible, and active colors used in the Grid # Manager to indicate when a grid block is either visible, invisible, # and/or active. Defines the color used to indicate which grids # may be modified during an edit action.(Preview_color) TimeBlockVisible_color = 'White' TimeBlockActive_color = 'Yellow' TimeBlockInvisible_color = 'Gray50' TimeBlockPreview_color = 'Cyan' # Defines the color used to indicate the Edit Area on the spatial editor. ReferenceSet_color = 'Gray80' # Defines the border width used to indicate the Edit Area on the spatial editor ReferenceSet_width = 0 # Defines the initial horizontal size of the grid manager when first # starting the GFE in pixels. Do not place a decimal point after the number. TimeScale_horizSize = 350 # Initial Legend Mode. Can be GRIDS for all weather elements (default), # MUTABLE for just the Fcst weather elements, # ACTIVE for just the active weather element, MAPS for just the maps, # or SETIME for just the spatial editor time. LegendMode = 'GRIDS' # Initial Grid Manager Mode. Can be "Normal", "History", "Saved", # "Modified", "Published", or "Sent". Default is "Normal". InitialGMDisplayMode = 'Normal' # Defines the number of Edit Area Quick Set Buttons. Do not place a # decimal point after the buttons #QuickSetButtons = 4 # Sets the maximum number of menu items before the menu will cascade # with a 'More >'. Do not place a decimal point after the number. MaxMenuItemsBeforeCascade = 30 # Defines the percent that the office domain will be expanded for the # spatial editor full-screen view. The user can specify the expansion # for each of the four directions. If not specified, the default is 10%. OfficeDomain_expandLeft = 10 OfficeDomain_expandRight = 10 OfficeDomain_expandTop = 10 OfficeDomain_expandBottom = 10 # Initial location of Edit Action Dialog # These are absolute screen coordinates (not relative to GFE window) # To put Edit Action Dialog in lower left corner, set Ycoord to 600 #EditActionDialog_Xcoord = 99 #EditActionDialog_Ycoord = 74 # Initial layout up of Grid Manager/Temporal Editor: # Values: "OnTop" (default) # "OnLeft" #GM_TE_Layout = "OnTop" # Default setting for temporal editor weather elements. Choices are # ALL for all weather elements are displayed in the temporal # editor, ALL_NOISC is for all weather elements except ISC (intersite coord) # elements, MUTABLE for just the mutable weather elements (e.g., Fcst) # displayed in the temporal editor, VISIBLE (default) for all visible # elements in the grid manager and ACTIVE for just the single # active weather element to be displayed in the temporal editor. TemporalEditorWEMode = "VISIBLE" # Extra categories for the formatter launcher. # Products beginning with this name will get their own # cascade. #FormatterLauncherDialog_Categories = [] # Default setting for the Wx/Discrete Show Description option. Setting it # to True will enable the descriptions, setting it to False will disable the # descriptions. #WxDiscrete_Description = True # Default setting for the font and colors for the Product Output Dialog. #ProductOutputDialog_font = TextFont2 #ProductOutputDialog_fgColor = "#000000" #ProductOutputDialog_bgColor = "#d0d0d0" #ProductOutputDialog_wrapMode = 1 #default, if not listed in wrapPils, nonWrap ProductOutputDialog_wrapPils = [] ProductOutputDialog_nonWrapPils = ['AFM','PFM','FWF','SFT','WCN','FWS','TCV','HLS'] #ProductOutputDialog_wrapSize = 66 #ProductOutputDialog_lockColor = "blue" #ProductOutputDialog_frameColor = "red" # The initial size of the Call to action dialog (in pixels) #ProductOutputDialog_CTAWidth = 575 #ProductOutputDialog_CTAHeight = 300 # Default directory to use for the ProductOutputDialog editor when # {prddir} is not set in the product definition. #ProductEditorDirectory = '/tmp' #------------------------------------------------------------------------ # Process Monitor Options #------------------------------------------------------------------------ # # The maximum number of pending product scripts to queue. #ProcessMonitorMaxPendingScripts = 10 # The maximum number of finished product scripts to keep around (so you can # see their output). #ProcessMonitorMaxOldScripts = 5 # The maximum number of product scripts to run at one time (user can still # start more via the ProcessMonitorDialog). #ProcessMonitorMaxScripts = 1 # The maximum number of pending text formatters to queue. #ProcessMonitorMaxPendingFormatters = 10 # The maximum number of finished text formatters to keep around (so you can # see their output). #ProcessMonitorMaxOldFormatters = 5 # The maximum number of text formatters to run at one time (user can still # start more via the ProcessMonitorDialog). #ProcessMonitorMaxFormatters = 1 #------------------------------------------------------------------------ # Sample and Legend Colors, Sample Shadows #------------------------------------------------------------------------ # This section provides some control over the sample colors and # the image legend color. Normally these values are set to "white", # but might need to be changed if the background color for the drawing # panes color is changed. The sample shadow may also be turned on # or off. # Alternative sample color. This is used primarily for ifpIMAGE when # you want a specific color for the sample, rather than the default which # is the graphic color. Format is parmname_Sample_color = "color". # Note that this applies only if the data is displayed as a graphic. # T_Sample_color = "#ff0672" # Alternative legend color. This is used primarily for ifpIMAGE when # you want a specific color for the legend, rather than the default which # is the graphic color. Format is parmname_Legend_color = "color". # Note that this applies only if the data is displayed as a graphic. # T_Legend_color = "#ff0672" # Sample LatLon and + Color. This affects the color of the '+' drawing, # plus the color of the latitude/longitude samples on the spatial editor. # SampleLLPlus_color = "white" # Image Legend color. This affects the color of the legend when a weather # element is displayed as an image. This also affects the sample color # for weather element displayed as an image. #ImageLegend_color = "white" # Sample Shadows. The samples can have a shadow character written in # black offset from the sample text. This improves contrast when the # main sample color is light and the background color (e.g., image) is # also fairly light. Acceptable entries are yes and no. #ShowSampleShadows = yes # Sample Shadow Color. The color of the shadows defaults to black. # You can override this with any valid color. #SampleShadow_color = "#000000" # SampleLabelXOffset and SampleLabelYOffset are the number of pixels you # wish to move sample labels relative to their "normal" position. #SampleLabelXOffset = 0 #SampleLabelYOffset = 0 # Limiting Samples to Specific Weather Elements # Controls the weather elements that will be displayed as samples. # This feature is normally only used in conjunction with the creation # of PNG imagery. If not specified, then all visible weather elements # will have a sample value. #SampleParms = ['T', 'Wind'] # Using descriptive names instead of the pretty Wx strings for samples. # This set of parallel lists translate a pretty Wx string into a # more descriptive name for the sample labels. #AltWxSampleLabels_prettyWx = ['Sct RW-', 'Sct SW-'] #AltWxSampleLabels_label = ['Rain Showers', 'Snow Showers'] # ISC Update Time. The samples can show the ISC Update Time if in ISC mode. # Acceptable entries are yes and no. ShowISCUpdateTime = yes # ISC Site Id. The samples can show the ISC Site Id if in ISC mode. # Acceptable entries are yes and no. ShowISCSiteID = yes # Enable ISC Markers. ISC Markers are only shown # if ISC mode or an ISC grid is displayed. Acceptable entries are yes and no. ShowISCMarkers = yes # ISC Update Time for Marker. The sample markers can show the ISC # Update Time if in ISC mode. # Acceptable entries are yes and no. ShowISCUpdateTimeMarker = yes # ISC Site Id Marker. The sample markers can show the ISC Site Id # if in ISC mode. # Acceptable entries are yes and no. ShowISCSiteIDMarker = yes # ISC Official Symbol Marker. The sample markers can show the "P" symbol # for the # official database data or not. Acceptable entries are yes and no. ShowISCOfficialSymbolMarker = yes # ISC Official Symbol. The samples can show the "P" symbol for the # official database data or not. Acceptable entries are yes and no. ShowISCOfficialSymbol = yes # Spatial Editor Color Bar Label/Tick Colors # Controls the tick, foreground text colors for the labels, # and the foreground/background text colors for the pickup value. There # is a special set of colors for the Wx/Discrete (WEATHER/DISCRETE) values. #SEColorBar_tickColor = "white" #SEColorBar_fgTextColor = "white" #SEColorBar_fgPickUpColor = "white" #SEColorBar_bgPickUpColor = "black" #SEColorBar_fgWxPickUpColor = "white" #SEColorBar_bgWxPickUpColor = "purple" # Configure additional labels on the SE Color Bar for WEATHER. # The format is an array of strings which represent the ugly weather # string. #Wx_AdditionalColorBarLabels = [ \ # "<NoCov>:<NoWx>:<NoInten>:<NoVis>:<NoAttr>" ] #------------------------------------------------------------------------ # GFE Font Sizes #------------------------------------------------------------------------ # This section provides the user the capability of changing the font # sizes in various components of the GFE. The font numbers can range # from 0 through 4 with 0 being the smallest. # These font entries define the fonts used by various components of # the GFE. #ColorBarScale_font = 1 #ColorBarWxLabel_font = 2 #ColorBarPickUp_font = 3 #SESample_font = 2 #SEMarker_font = 3 #SELegend_font = 3 #TEDataSelector_font = 1 #TESample_font = 1 #TimeBlockLabel_font = 3 #TimeBlockSource_font = 1 #TimeScale_font = 2 #SetValueContLabel_font = 2 #SetValuePickUp_font = 3 # Defines the default labeling size on the Bounded Area display for # weather, the contour tool depiction font, the map background font, # and the contour labeling font. These fonts can also be modified on # a per-parm basis using the fontOffset capability defined in the # parameter configuration. #BoundedArea_font = 2 #Cline_font = 2 #Contour_font = 2 #------------------------------------------------------------------------ # Grid Manager Saved, Published, Sent configurations #------------------------------------------------------------------------ # Defines the colors and times used to color the Grid Manager when # in the last saved, last modified, last published, or last sent display mode. # parallel list of minutes and colors. If the last save, modified, # published, sendISC time is less than the given time (in minutes), # then that color is used to display the box. The default if the # last xxx time is greater than the final "minutes" in the list, is Gray75. Modified_minutes = [60, 180, 360, 720, 1440, 2880 ] Modified_colors = ["#0bc71e", "#60c7b8", "#417fc7", "#e17c10", "#ebdf00", "#e11a00"] Saved_minutes = [60, 180, 360, 720, 1440, 2880 ] Saved_colors = ["#0bc71e", "#60c7b8", "#417fc7", "#e17c10", "#ebdf00", "#e11a00"] Published_minutes = [60, 180, 360, 720, 1440, 2880 ] Published_colors = ["#0bc71e", "#60c7b8", "#417fc7", "#e17c10", "#ebdf00", "#e11a00"] Sent_minutes = [60, 120, 180, 240, 300, 360] Sent_colors = ["#0bc71e", "#60c7b8", "#417fc7", "#e17c10", "#ebdf00", "#e11a00"] #------------------------------------------------------------------------ # Grid Data History configuration #------------------------------------------------------------------------ # Defines the characters, colors, and patterns that will appear # in the Grid Manager grid blocks # to indicate the source, origin, and modification states. # # If the grid has been modified by me or someone else, the text in the # grid block and grid pattern is modified to that specified below: HistoryUserModText_Me = "m" #Text for modified by me HistoryUserModText_Other = "o" #Text for modified by other HistoryUserModPattern_Me = "TRANS_25PC_45DEG" #Pattern for mod by me HistoryUserModPattern_Other = "TRANS_25PC_135DEG" #Pattern for mod by other # The text in the grid block and the grid color will represent the origin: # Note that the user can also override the populated in the next section. HistoryOriginText_Populated = "P" HistoryOriginText_Calculated = "C" HistoryOriginText_Scratch = "S" HistoryOriginText_Interpolated = "I" HistoryOriginText_Other = "?" HistoryOriginColor_Populated = "wheat" HistoryOriginColor_Calculated = "red" HistoryOriginColor_Scratch = "magenta" HistoryOriginColor_Interpolated = "blue" HistoryOriginColor_Other = "gray75" # This next section applies to the text and the color of the grid blocks # that have an origin of Populated. The model determines the text and color. # The format of the color entry is HistoryModelColor_modelname. The format # of the text entry is: HistoryModelText_modelname. If a model is not # listed here, then the HistoryOriginText_Populated and # HistoryOriginColor_Populated is used. HistoryModelColor_gfsLR = '#30df10' HistoryModelColor_RAP40 = '#00ffff' HistoryModelColor_MAVMOS = '#e6c8a1' HistoryModelColor_GFSMOS = '#e6d8a1' HistoryModelColor_METMOS = '#e6b8a1' HistoryModelColor_MEXMOS = '#e6a8a1' HistoryModelColor_NAM80 = '#ffff52' HistoryModelColor_NAM95 = '#ffff52' HistoryModelColor_NAM40 = '#ff99ff' HistoryModelColor_NAM12 = '#ffcaa0' HistoryModelColor_GFS80 = 'pink' HistoryModelColor_GFS40 = 'pink' HistoryModelColor_GFS190 = 'pink' HistoryModelColor_GWW = '#a0a0ff' HistoryModelColor_HPCStn = '#d0d0a0' HistoryModelColor_HPCGrid = '#d0d0b0' HistoryModelColor_ISC = '#b43aee' HistoryModelColor_LAPS = '#b06b72' HistoryModelColor_HPCQPF = '#3dc9ff' HistoryModelColor_HPCGuide = '#3dc9ff' HistoryModelColor_RFCQPF = '#3bffb7' HistoryModelColor_Restore = '#e0a0ff' HistoryModelColor_DGEX = 'orange' HistoryModelColor_MOSGuide = '#e608ff' HistoryModelColor_OPCTAFBE = '#a0a0cc' HistoryModelColor_OPCTAFBSW = '#a0a0cc' HistoryModelColor_OPCTAFBNW = '#a0a0cc' HistoryModelColor_RTMA = '#a0522d' HistoryModelColor_NamDNG5 = '#808000' HistoryModelText_GFS80 = 'GFS' HistoryModelText_GFS40 = 'GFS' HistoryModelText_GFS190 = 'GFS' HistoryModelText_RAP40 = 'RUC' HistoryModelText_GFSMOS = 'GFSMOS' HistoryModelText_MEXMOS = 'MEXMOS' HistoryModelText_MAVMOS = 'MAVMOS' HistoryModelText_METMOS = 'METMOS' HistoryModelText_NAM80 = 'N80' HistoryModelText_NAM95 = 'N95' HistoryModelText_NAM40 = 'N40' HistoryModelText_NAM20 = 'N20' HistoryModelText_NAM12 = 'N12' HistoryModelText_gfsLR = 'gfsLR' HistoryModelText_HPCStn = 'HPCs' HistoryModelText_HPCGrid = 'HPCg' HistoryModelText_GWW = 'GWW' HistoryModelText_ISC = 'ISC' HistoryModelText_LAPS = 'LAPS' HistoryModelText_HPCQPF = 'HPCQPF' HistoryModelText_HPCGuide = 'HPCGuide' HistoryModelText_RFCQPF = 'RFCQPF' HistoryModelText_Restore = 'Restore' HistoryModelText_DGEX = 'DGEX' HistoryModelText_MOSGuide = 'GMOS' HistoryModelText_OPCTAFBE = 'OPC' HistoryModelText_OPCTAFBSW = 'OPC' HistoryModelText_OPCTAFBNW = 'OPC' HistoryModelText_RTMA = 'RTMA' HistoryModelText_NamDNG5 = 'Nd5' #------------------------------------------------------------------------ # Algorithm Configuration #------------------------------------------------------------------------ # Smart tools can access time-weighted averages of multiple grids. Since # weather is discrete, time weighted average for weather is based on # all values of weather at that grid point as long as they occupy at least # the given percentage of all grids. Do not place a decimal point after # the number. SignificantWeatherTimeWeightAverage_percent = 40 # The default width of the pencil tool can be specified in grid cells # on a per weather element basis. The format is parmName_pencilWidth. # If not specified, the value defaults to 4. #T_pencilWidth = 4 # Pencil Tool influence sizes are specified here PencilToolInfluence_list = [1, 2, 4, 8, 12, 16] # Smooth algorithm default value SmoothSize = 3 # Smooth Size Choices SmoothSizeList = [3, 5, 7, 9] # User can control the interpolation algorithm for each weather element. # The format of the string is parmName_interpolateAlgorithm. The available # options, which must be quoted, are "CUBIC_ADVECT", "LINEAR_ADVECT", # "CUBIC_NOADVECT", and "LINEAR_NOADVECT". By default, most elements use # CUBIC_NOADVECT, except for Wx, PoP, Sky, and QPF which use CUBIC_ADVECT. # Wind and Wx cannot be changed. # T_interpolateAlgorithm = "CUBIC_NOADVECT" #------------------------------------------------------------------------ # Menu and Dialog Configuration #------------------------------------------------------------------------ # Entries allow the specification of the zoom factor (click 1) over the # Pickup Value Dialog. There is only one zoom step. If not specified, # the default is set to a zoom factor of 4. You can also specify specific # zoom factors based on the parameter name. # SetValue_zoom is the generic zoom value. parmName_SetValue_zoom is # the parameter-specific zoom value. Do not place a decimal point # after the numbers. SetValue_zoom = 4 QPF_SetValue_zoom = 10 # The maximum value on the Set Delta Dialog may be set on a # per weather element basis. Format is weName_MaxDeltaDialogValue. # The floating-point entry requires a decimal point in the value. # The default is 20% of the weather element data range. #Sky_MaxDeltaDialogValue = 30.0 # The default value of the Interpolate Dialog mode may be set to # either "Gaps" or "Edited", which refer to "By Gaps" and "Based on # Edited Data" on the dialog. The default if not specified is "By Gaps". #InterpolateDialogMode = "Gaps" #------------------------------------------------------------------------ # Weather Element Configuration #------------------------------------------------------------------------ # generic colors for graphics ----------------------------------- # These colors will be the colors assigned to the graphics, unless # specific colors are assigned to each parameter. Generic_colors = ['#00ff00', '#ff8e59', '#00ffff', '#e6c8a1', '#ffff52', '#ff99ff', '#aeb370', '#ff4000', '#e6c8a1'] # Specific Graphic Colors for a parameter # The user may specify a specific color to be used for a parameter, rather # than getting a random color assigned. This color will be assigned, if # available. Format is parmName_graphicColor = color. The color does # not need to be in the Generic_colors list. #T_graphicColor = 'green' Wx_graphicColor = '#ffffff' # Specific Graphic Line Widths for a parameter # Default line widths can be set for each weather element, which will # be used to draw their graphics on the spatial editor. 0 is the default # value, which represents thin lines. The larger the number, the wider # the line. The format is parmName_lineWidth. Do not include a decimal # point after these entries. #T_lineWidth = 1 # Specific Line Pattern definitions for a parameter. # Default line patterns can be set up for each weather element. The # possible strings are "SOLID", "DASHED", "DOTTED", "DASH_DOTTED". The # values must be enclosed within quotes. The format is parmName_linePattern. #T_linePattern = "SOLID" # Specific Font Offsets for a parameter. # The font offset (called magnification on the GFE menus) allows the # default font size to be increased or decreased on a per-parameter # basis. Note that for wind arrows/barbs, the fontOffset controls the # size of the wind arrows/barbs. Numbers can range from -2 through +2. # Format is parmName_fontOffset. Do not include a decimal point # after these entries. #T_fontOffset = 0 # Specific Density definitions for a parameter. # The density controls the packing of wind barbs and arrows for the vector # spatial editor displays, and the packing of contour intervals for the # scalar spatial editor displays. The default is zero. Densities and # contour values are related to each other. Typical values can range # from -2 through +2. You can use values outside of this range if # desired. Format is parmName_density. Do not include a # decimal point after these entries. #T_density = 0 # temporal editor sizes ----------------------------------------- # the initial size of temporal editor panes may be defined on a # per parameter basis. If not specified, the default is 150 pixels. # Format is: parmName_temporalDataPaneSize = size # Do not place a decimal point after the numbers. # Wx_temporalDataPaneSize = 200 # contour values ----------------------------------------------- # contour values may be defined on a per-parameter basis. If not # defined, then contour values are automatically computed. # Format is wxElementName_contourValues = [c1, c2, c3, c4, ... ] # Be sure to include decimal points in each entry. # This overrides any entries that may exist in contour interval. QPF_contourValues = [0.01, 0.05, 0.10, 0.15, 0.20, 0.25, 0.30, 0.40, 0.50, 0.60, 0.7, 0.8, 0.9, 1.0, 1.2, 1.4, 1.6, 1.8, 2.0, 2.2, 2.4, 2.6, 2.8, 3.0, 3.2, 3.4, 3.6, 3.8, 4.0, 4.2, 4.4, 4.6, 4.8, 5.0] Topography_contourValues = [5.0, 10.0, 20.0, 30.0, 40.0, 50.0, 60.0, 70.0, 80.0, 90.0, 100.0, 125.0, 150.0, 175.0, 200.0, 250.0, 300.0, 350.0, 400.0, 450.0, 500.0, 600.0, 700.0, 800.0, 900.0, 1000.0, 1250.0, 1500.0, 1750.0, 2000.0, 2500.0, 3000.0, 3500.0, 4000.0, 4500.0, 5000.0, 5500.0, 6000.0, 6500.0, 7000.0, 7500.0, 8000.0, 8500.0, 9000.0, 9500.0, 10000.0, 11000.0, 12000.0, 13000.0, 14000.0, 15000.0, 16000.0, 17000.0, 18000.0, 19000.0, 20000.0] # contour intervals ----------------------------------------------- # contour intervals may be defined on a per-parameter basis. If not # defined, then contour values are automatically computed. # Format is wxElementName_contourInterval = value. # Be sure to include decimal points in the entry. # Note, you can also specify wxElementName_contourValues, which # will override the entry for contour interval. Sky_contourInterval = 10.0 PoP_contourInterval = 10.0 MinT_contourInterval = 5.0 MaxT_contourInterval = 5.0 T_contourInterval = 5.0 Td_contourInterval = 5.0 # delta values # Delta values define the default delta (adjust up, adjust down) value # for the adjust operations. The user can redefine this at any time # through the GUI. If not specified, the delta value defaults to # the precision value. For example, a precision of 0 indicates a delta of 1. # and a precision of 1 indicates a delta of 0.1. # Format is parmName_deltaValue = value. # Be sure to include a decimal point. #parmName_deltaValue = 10.0 FzLevel_deltaValue = 100.0 SnowLevel_deltaValue = 100.0 # fuzz values # fuzz values define the value considered to be the same during a # homogeneous area select using the GridPoint Tool. For example, if the # fuzz is 2.0 degrees for Temperature and you click on 40 degrees, then # all points between 38 and 42 will be selected as long as they are # contiguous to the click point. If not specified, the fuzz is set # to 1/100 of the parm range. Format is parmName_fuzzValue = value. # Be sure to include a decimal point. #parmName_fuzzValue = 10.0 # visual types # This section defines the spatial and temporal editor visualization # types for the scalar, vector, and weather parameters. There are two # modes, graphic and image. For example, the weather parameter may be # viewed as a bounded area and an image. Available types: # Spatial Editor Options: # Scalar: Image, Contour # Vector: Image, WindBarb, WindArrow # Weather: Image, BoundedArea # Discrete: Image, BoundedArea # Temporal Editor Options: # Scalar: TEColorBar, TEColorRangeBar, TimeBar, RangeBar # Vector: TEColorBar, TEColorRangeBar, TimeBar, RangeBar # Weather: TEColorBar, TEColorRangeBar # Discrete: TEColorBar, TEColorRangeBar # # format is: parmName_editorImageType = [ types ] or # parmName_editorGraphicType = [ types ] where 'editor' is replaced with # spatial or temporal. # For example, to make wind appear as wind arrows on the spatial editor # in graphic mode: Wind_spatialGraphicType = ["WindArrow"]. Wx_spatialImageType = [ "Image", "BoundedArea" ] Headlines_spatialImageType = [ "Image", "BoundedArea" ] Swell_spatialImageType = [ "Image", "WindArrow" ] Swell2_spatialImageType = [ "Image", "WindArrow" ] Swell_spatialGraphicType = [ "WindArrow" ] Swell2_spatialGraphicType = [ "WindArrow" ] # Bounded Area Visual attributes # The user may turn on/off the boundary, and the text labels, for # the bounded area visual. Allowable values are yes and no (or True and False). # By default, then are both enabled. #BoundedArea_Labels = yes #BoundedArea_Boundary = yes # Wind Barb and Arrow Default Sizes. # The user may specify the default wind barb and arrow default sizes, # for the GFE, or by the weather element name. The default size is 60 # pixels. The entry format for a particular weather element definition # of arrow or barb size is parmName_windArrowDefaultSize and # parmName_windBarbDefaultSize. WindArrowDefaultSize = 60 WindBarbDefaultSize = 60 #Wind_windArrowDefaultSize = 60 #Wind_windBarbDefaultSize = 60 # Wind Arrow Scaling # The user may specify the default scaling for the wind arrow. If not # specified, then the wind arrows will grow linearly with an increase # in magnitude. To emphasize the lower ranges, the user may set the # wind arrow logarithmic scaling. The lower the number, # the steeper the log curve will appear. Refer to on-line documentation # for example values. Include decimal points with the numbers. # Note that the factor needs to be greater than 0. The format of the # entry is parmName_arrowScaling. Wind_arrowScaling = 0.03 Swell_arrowScaling = 0.001 Swell2_arrowScaling = 0.001 # Wind Sample Format # The user may specify the default sample format for vector weather elements. # If not specified, then the format is "ddff". The user may specify a format # for all vector elements, or can specify the format for a particular weather # element. The four possible formats are "ddff", "8pt", "16pt", and "d/f". # The configuration entry for the default sample format for all vector # elements is WindFormat = "type". The entry format to define the format for # a specific entry is parmName_windFormat = "type". WindFormat = "ddff" #Swell_windFormat = "8pt" # Default Values (for create from scratch) # The default values for SCALAR, VECTOR, WEATHER, and DISCRETE may be # specified on a per-weather element basis. By default, SCALAR has the # weather element's minimum value, VECTOR has a magnitude and direction of 0, # WEATHER has <NoWx>, and DISCRETE has the first defined discrete key # (always <None> for Hazards grids, user-defined DISCRETE grids may vary). # Format of the entry is parmName_defaultValue, or parmName_level_defaultValue # for non-surface based SCALAR, WEATHER, or DISCRETE elements. For VECTOR, # the format is slightly different: parmName_magDefaultValue has the # magnitude, and parmName_dirDefaultValue has the direction in degrees. # A decimal point is required for SCALAR and VECTOR, strings are required for # WEATHER and DISCRETE. # #T_defaultValue = 32.0 #Wx_defaultValue = "<NoCov>:<NoWx>:<NoInten>:<Novis>:" #Wind_dirDefaultValue = 90.0 #------------------------------------------------------------------------ # Weather/Discrete Common Value Definitions #------------------------------------------------------------------------ # the following describes common types that appear on the temporal # editor popup menu and the spatial editor color bar popup menu. # For WEATHER, the format is the "ugly" string of the Weather Key. For # DISCRETE, the format is the key string of the Discrete Key. # Prefixing an string with other strings that end with a vertical # bar (|) will make these strings in a cascade, # such as "Winter|Wide:S:--:<NoVis>:<NoAttr>", # which will put the widespread snow under a Winter cascade. The format # of this entry is parmName_commonValues, and applies to Weather and # Discrete only. Wx_commonValues = [ \ "<NoCov>:<NoWx>:<NoInten>:<NoVis>:<NoAttr>", "Wide:R:-:<NoVis>:<NoAttr>", "Wide:S:--:<NoVis>:<NoAttr>", "Wide:R:-:<NoVis>:<NoAttr>^Wide:S:-:<NoVis>:<NoAttr>", "Sct:RW:-:<NoVis>:<NoAttr>", "Sct:SW:-:<NoVis>:<NoAttr>", "Sct:T:<NoInten>:<NoVis>:<NoAttr>^Sct:RW:-:<NoVis>:<NoAttr>", "Patchy:F:<NoInten>:<NoVis>:<NoAttr>"] Hazards_commonValues = [ \ "Watches|Fire Weather|FW.A", "Watches|Hydrology|FF.A", "Watches|Hydrology|FA.A", "Watches|Coastal Flooding|CF.A", "Watches|Coastal Flooding|LS.A", "Watches|Marine|GL.A", "Watches|Marine|HF.A", "Watches|Marine|SE.A", "Watches|Marine|SR.A", "Watches|Marine|UP.A", "Watches|Non-Precipitation|EH.A", "Watches|Non-Precipitation|FZ.A", "Watches|Non-Precipitation|HW.A", "Watches|Non-Precipitation|HZ.A", "Watches|Non-Precipitation|EC.A", "Watches|Winter Storm|WC.A", "Watches|Winter Storm|WS.A", "Warnings|Fire Weather|FW.W", "Warnings|Coastal Flooding|CF.W", "Warnings|Coastal Flooding|LS.W", "Warnings|Coastal Flooding|SU.W", "Warnings|Marine|MH.W", "Warnings|Marine|HF.W", "Warnings|Marine|GL.W", "Warnings|Marine|UP.W", "Warnings|Marine|SR.W", "Warnings|Marine|SE.W", "Warnings|Non-Precipitation|AF.W", "Warnings|Non-Precipitation|DU.W", "Warnings|Non-Precipitation|EH.W", "Warnings|Non-Precipitation|FZ.W", "Warnings|Non-Precipitation|HW.W", "Warnings|Non-Precipitation|HZ.W", "Warnings|Non-Precipitation|EC.W", "Warnings|Winter Storm|BZ.W", "Warnings|Winter Storm|IS.W", "Warnings|Winter Storm|LE.W", "Warnings|Winter Storm|WC.W", "Warnings|Winter Storm|WS.W", "Advisories|Marine|UP.Y", "Advisories|Marine|LO.Y", "Advisories|Marine|SC.Y", "Advisories|Marine|SW.Y", "Advisories|Marine|BW.Y", "Advisories|Marine|RB.Y", "Advisories|Marine|SI.Y", "Advisories|Marine|MF.Y", "Advisories|Marine|MS.Y", "Advisories|Marine|MH.Y", "Advisories|Coastal Flooding|CF.Y", "Advisories|Coastal Flooding|LS.Y", "Advisories|Coastal Flooding|SU.Y", "Advisories|Non-Precipitation|AS.O", "Advisories|Non-Precipitation|AS.Y", "Advisories|Non-Precipitation|AQ.Y", "Advisories|Non-Precipitation|DU.Y", "Advisories|Non-Precipitation|FG.Y", "Advisories|Non-Precipitation|SM.Y", "Advisories|Non-Precipitation|ZF.Y", "Advisories|Non-Precipitation|FR.Y", "Advisories|Non-Precipitation|HT.Y", "Advisories|Non-Precipitation|LW.Y", "Advisories|Non-Precipitation|AF.Y", "Advisories|Non-Precipitation|WI.Y", "Advisories|Winter Weather|WC.Y", "Advisories|Winter Weather|WW.Y", "Statements|Coastal Flooding|CF.S", "Statements|Coastal Flooding|LS.S", "Statements|Coastal Flooding|RP.S", "Statements|Marine|MA.S", ] #------------------------------------------------------------------------ # Weather Dialog Default Values #------------------------------------------------------------------------ # the following describes the intensity and coverage/probability defaults # that appear in the Set Value dialog for Weather data. The format is # the weather type (e.g., RW), followed by the keyword. The actual value # is a string surrounded in quotes. # Define the weather dialog default coverage/probabilities R_defaultCoverage = "Wide" RW_defaultCoverage = "Sct" S_defaultCoverage = "Wide" SW_defaultCoverage = "Sct" T_defaultCoverage = "Sct" # Define the weather dialog default intensities R_defaultIntensity = "-" RW_defaultIntensity = "-" S_defaultIntensity = "-" SW_defaultIntensity = "-" L_defaultIntensity = "-" ZR_defaultIntensity = "-" ZL_defaultIntensity = "-" IP_defaultIntensity = "-" #------------------------------------------------------------------------ # Default Discrete Color Table Algorithm Configuration #------------------------------------------------------------------------ # DiscreteOverlapPatterns are used for overlapping (non-exclusive) # Discrete weather elements when two or more values are overlapping. # Each entry denotes the fill pattern to use when it is overlapping # another pattern. The available types are: WHOLE, WIDE, SCATTERED, # WIDE_SCATTERED, ISOLATED, TRANS_25PC_45DEG, SELECTED_AREA, OCNL, # LKLY, TRANS_25PC_135DEG, DUALCURVE, CURVE, VERTICAL, CROSS, HORIZONTAL, # BIGCROSS. DiscreteOverlapPatterns are used for all discrete weather # elements, unless a parmName_level_DiscreteOverlapPatterns is found. #------------------------------------------------------------------------ DiscreteOverlapPatterns = ['TRANS_25PC_45DEG', 'TRANS_25PC_135DEG', 'CROSS'] #pName_level_DiscreteOverlapPatterns = ['pat1', 'pat2', 'pat3'] # DiscreteComplexColor is used when there aren't enough fill patterns # defined for overlap. This color is used when a very complex overlapping # situation occurs. DiscreteComplexColor applies to all discrete # weather elements, unless a parmName_level_DiscreteComplexColor # value is found. Default is "White". #DiscreteComplexColor = 'White' #pName_level_DiscreteComplexColor = 'color' # DiscreteComplexPattern is used when there aren't enough fill patterns # defined for overlap. This pattern is used when a very complex overlapping # situation occurs. DiscreteComplexPattern applies to all discrete # weather elements, unless a parmName_level_DiscreteComplexPattern # value is found. Default is "SCATTERED". #DiscreteComplexPattern = 'SCATTERED' #pName_level_DiscreteComplexPattern = 'pattern' #------------------------------------------------------------------------ # Default (non-weather) Color Table Algorithm Configuration #------------------------------------------------------------------------ # The default color table is used for all parameters unless overridden in # this configuration file. The left wavelength defines the left side # value for the color in nanometers. 380 is roughly purple. The right # wavelength defines the right side value for the color in nanometers. # 650 is red. The number of colors indicate the number of color bins # that will be used when the default color table is displayed. # Use decimal points after the wavelengths, but not the numColors. DefaultColorTable_leftWavelength = 380.0 DefaultColorTable_rightWavelength = 650.0 DefaultColorTable_numColors = 150 # color table default entries ----------------------------- # Entries can be made to define a default color table for a particular # parameter. If a default color table is not defined for a parameter, then # the spectrum defined in DefaultColorTable* will be used for the parameter. # Entries are of the form parmName_defaultColorTable="colortablename". # For example, if you want MaxT to always have a "Low-Enhanced" color table, # then the entry would be as shown below. # MaxT_defaultColorTable = "Low-Enhanced" # You can determine the possible color tables that are on the system by # displaying any scalar image and selecting "Change Color Table To". RipProb_defaultColorTable="GFE/RipProb" ErosionProb_defaultColorTable="GFE/RunupProbs" OverwashProb_defaultColorTable="GFE/RunupProbs" T_defaultColorTable="GFE/Mid Range Enhanced" Td_defaultColorTable="GFE/Mid Range Enhanced" MaxT_defaultColorTable="GFE/Mid Range Enhanced" MinT_defaultColorTable="GFE/Mid Range Enhanced" Sky_defaultColorTable="GFE/Cloud" Wind_defaultColorTable="GFE/Low Range Enhanced" Wind20ft_defaultColorTable="GFE/Low Range Enhanced" PoP_defaultColorTable="GFE/ndfdPoP12" QPF_defaultColorTable="GFE/Gridded Data" Ttrend_defaultColorTable = "GFE/Discrepancy" RHtrend_defaultColorTable = "GFE/Discrepancy" Wetflag_defaultColorTable = "GFE/YesNo" DeltaMinT_defaultColorTable = "GFE/Discrepancy" DeltaMaxT_defaultColorTable = "GFE/Discrepancy" DeltaWind_defaultColorTable = "GFE/Discrepancy" DeltaSky_defaultColorTable = "GFE/Discrepancy" DeltaPoP_defaultColorTable = "GFE/Discrepancy" # Default Satellite weather element color tables visibleEast_defaultColorTable = "Sat/VIS/ZA (Vis Default)" ir11East_defaultColorTable = "Sat/IR/CIRA (IR Default)" ir13East_defaultColorTable = "Sat/IR/CIRA (IR Default)" ir39East_defaultColorTable = "Sat/IR/CIRA (IR Default)" waterVaporEast_defaultColorTable = "Sat/WV/Gray Scale Water Vapor" visibleCentral_defaultColorTable = "Sat/VIS/ZA (Vis Default)" ir11Central_defaultColorTable = "Sat/IR/CIRA (IR Default)" ir13Central_defaultColorTable = "Sat/IR/CIRA (IR Default)" ir39Central_defaultColorTable = "Sat/IR/CIRA (IR Default)" waterVaporCentral_defaultColorTable = "Sat/WV/Gray Scale Water Vapor" visibleWest_defaultColorTable = "Sat/VIS/ZA (Vis Default)" ir11West_defaultColorTable = "Sat/IR/CIRA (IR Default)" ir13West_defaultColorTable = "Sat/IR/CIRA (IR Default)" ir39West_defaultColorTable = "Sat/IR/CIRA (IR Default)" waterVaporWest_defaultColorTable = "Sat/WV/Gray Scale Water Vapor" VisibleE_defaultColorTable = "Sat/VIS/ZA (Vis Default)" IR11E_defaultColorTable = "Sat/IR/CIRA (IR Default)" IR13E_defaultColorTable = "Sat/IR/CIRA (IR Default)" IR39E_defaultColorTable = "Sat/IR/CIRA (IR Default)" WaterVaporE_defaultColorTable = "Sat/WV/Gray Scale Water Vapor" FogE_defaultColorTable = "Sat/WV/Gray Scale Water Vapor" VisibleC_defaultColorTable = "Sat/VIS/ZA (Vis Default)" IR11C_defaultColorTable = "Sat/IR/CIRA (IR Default)" IR13C_defaultColorTable = "Sat/IR/CIRA (IR Default)" IR39C_defaultColorTable = "Sat/IR/CIRA (IR Default)" WaterVaporC_defaultColorTable = "Sat/WV/Gray Scale Water Vapor" FogC_defaultColorTable = "Sat/WV/Gray Scale Water Vapor" VisibleW_defaultColorTable = "Sat/VIS/ZA (Vis Default)" IR11W_defaultColorTable = "Sat/IR/CIRA (IR Default)" IR13W_defaultColorTable = "Sat/IR/CIRA (IR Default)" IR39W_defaultColorTable = "Sat/IR/CIRA (IR Default)" WaterVaporW_defaultColorTable = "Sat/WV/Gray Scale Water Vapor" FogW_defaultColorTable = "Sat/WV/Gray Scale Water Vapor" Hazards_defaultColorTable = "GFE/Hazards" # Start HTI entries ProposedSS_defaultColorTable="GFE/w" ProposedSSnc_defaultColorTable="GFE/w" CollabDiffSS_defaultColorTable="GFE/diffSS" InundationMax_defaultColorTable="GFE/Inundation" InundationMax_maxColorTableValue = 30.0 InundationMax_minColorTableValue = 0.0 InundationMaxnc_defaultColorTable="GFE/Inundation" InundationMaxnc_maxColorTableValue = 30.0 InundationMaxnc_minColorTableValue = 0.0 InundationTiming_defaultColorTable="GFE/Inundation" InundationTiming_maxColorTableValue = 30.0 InundationTiming_minColorTableValue = 0.0 InundationTimingnc_defaultColorTable="GFE/Inundation" InundationTimingnc_maxColorTableValue = 30.0 InundationTimingnc_minColorTableValue = 0.0 SurgeHtPlusTideMLLW_defaultColorTable="GFE/Inundation" SurgeHtPlusTideMLLW_maxColorTableValue = 30.0 SurgeHtPlusTideMLLW_minColorTableValue = 0.0 SurgeHtPlusTideMLLWnc_defaultColorTable="GFE/Inundation" SurgeHtPlusTideMLLWnc_maxColorTableValue = 30.0 SurgeHtPlusTideMLLWnc_minColorTableValue = 0.0 SurgeHtPlusTideMHHW_defaultColorTable="GFE/Inundation" SurgeHtPlusTideMHHW_maxColorTableValue = 30.0 SurgeHtPlusTideMHHW_minColorTableValue = 0.0 SurgeHtPlusTideMHHWnc_defaultColorTable="GFE/Inundation" SurgeHtPlusTideMHHWnc_maxColorTableValue = 30.0 SurgeHtPlusTideMHHWnc_minColorTableValue = 0.0 SurgeHtPlusTideNAVD_defaultColorTable="GFE/Inundation" SurgeHtPlusTideNAVD_maxColorTableValue = 30.0 SurgeHtPlusTideNAVD_minColorTableValue = 0.0 SurgeHtPlusTideNAVDnc_defaultColorTable="GFE/Inundation" SurgeHtPlusTideNAVDnc_maxColorTableValue = 30.0 SurgeHtPlusTideNAVDnc_minColorTableValue = 0.0 SurgeHtPlusTideMSL_defaultColorTable="GFE/Inundation" SurgeHtPlusTideMSL_maxColorTableValue = 30.0 SurgeHtPlusTideMSL_minColorTableValue = 0.0 SurgeHtPlusTideMSLnc_defaultColorTable="GFE/Inundation" SurgeHtPlusTideMSLnc_maxColorTableValue = 30.0 SurgeHtPlusTideMSLnc_minColorTableValue = 0.0 prob34_defaultColorTable="GFE/TPCprob" prob64_defaultColorTable="GFE/TPCprob" pwsD34_defaultColorTable="GFE/TPCprob" pwsD64_defaultColorTable="GFE/TPCprob" pwsN34_defaultColorTable="GFE/TPCprob" pwsN64_defaultColorTable="GFE/TPCprob" pws34int_defaultColorTable="GFE/TPCprob" pws64int_defaultColorTable="GFE/TPCprob" FloodingRainThreat_defaultColorTable = "GFE/gHLS_new" StormSurgeThreat_defaultColorTable = "GFE/gHLS_new" TornadoThreat_defaultColorTable = "GFE/gHLS_new" WindThreat_defaultColorTable = "GFE/gHLS_new" # End HTI entries # TopDownWx MaxTAloft_defaultColorTable="WarmNoseTemp" WetBulb_defaultColorTable="WetBulbTemp" # Logarithmic Color Table Assignments # By default, all color tables are linear. Certain parameters may lend # themselves to a logarithmic color table. To enable a logarithmic # color table for a parameter, an entry in the form of # parmName_LogFactor=factor is required. The closer the value is to zero, # the steeper the log curve will appear. Refer to on-line documentation # for example values. Include decimal points with the numbers. # Note that the factor needs to be greater than 0 QPF_LogFactor = 0.03 SnowAmt_LogFactor = 0.6 # Default Max/Min Ranges for Color Tables # By default, all colors tables (except for WEATHER) are spread out over # the range of the minimum to maximum weather element possible value, as # defined by serverConfig.py. The initial range of the color table can # be specified through these entries. The form of the two entries are: # parmName_maxColorTableValue and parmName_minColorTableValue. These # values are floats and MUST have a decimal point in them. #T_maxColorTableValue = 120.0 #T_minColorTableValue = -30.0 WetBulb_maxColorTableValue = 50.0 WetBulb_minColorTableValue = 20.0 # Fit To Data Color Tables # Automatic Fit To Data color tables can be set up for the initial set # of data in a weather element. The form of the entry is: # parmName_fitToDataColorTable. The fit to data overrides any # specified max/min color table values. There are several algorithms # available: "None", "All Grids", "Single Grid", "All Grids over Area", # and "Single Grid over Area". The Single Grid options are not # available for the GFE and only apply to the ifpIMAGE program. # Note that the ifpIMAGE program can specify an edit area to use for # the "All Grids over Area" and "Single Grid over Area" algorithms. # See Png_fitToDataArea. For the GFE, the active edit area is used in # the fit to data scheme. #T_fitToDataColorTable = "None" # Configure the desired labels on the SE Color Bar on a per-parameter basis. # The format is parmName + "_ColorBarLabels". For example, the color bar # would be labeled at 10, 20, 40 & 60 for temperature with the following # entry. Note that the values need to be entered as floats for all parameters. # This is only used for SCALAR or VECTOR parameters. # For WEATHER or DISCRETE parameters, use parmName_additionalColorBarLabels. #T_ColorBarLabels = [10.00, 20.00, 40.00, 60.00] #------------------------------------------------------------------------ # Weather Color Algorithm Configuration #------------------------------------------------------------------------ # Color Tables for Weather are handled differently than scalar and # vector data. Coverages are denoted by fill patterns. Composite # types by colors. Complex weather of more than two coverages will # result in a solid fill pattern and can't be configured. # The WeatherCoverage_names and WeatherCoverage_fillPatterns indicate # the fill pattern used for a particular weather coverage or probability. # These are parallel lists. For example, if "Iso" coverage is in the 1st # entry of the list and ISOLATED appears in the first entry of the # fill patterns, the for Iso coverage, the fill pattern ISOLATED will # be used. WeatherCoverage_names = ["Iso", "Sct", "Num", "Wide", "Ocnl", "SChc", "Chc", "Lkly", "Def", "Patchy", "<NoCov>", "Areas", "Frq", "Brf", "Pds", "Inter"] WeatherCoverage_fillPatterns = ["WIDE_SCATTERED", "SCATTERED", "LKLY", "WIDE", "OCNL", "WIDE_SCATTERED", "SCATTERED", "LKLY", "WIDE", "CURVE", "WHOLE", "DUALCURVE", "OCNL", "OCNL", "OCNL", "OCNL"] # The weather type entries are generic entries without intensities. # Combinations are permitted. The WeatherType_names and WeatherType_colors # are parallel lists of names and colors. The default weather color table # algorithm looks at the weather type or combination of types, as listed # in the _names, and matches the list with the specified color. For # example, if T appears in the names as the first entry and brown2 appears # in the colors for the first entry, then for weather type T, the color # shown will be brown2. WeatherType_names = ["<NoWx>", "T", "R", "RW", "L", "ZR", "ZL", "S", "SW", "IP", "F", "H", "BS", "K", "BD", "SA", "LC", "FR", "AT", "TRW"] WeatherType_colors = ["Gray40", "red3", "ForestGreen", "ForestGreen", "CadetBlue1", "darkorange1", "goldenrod1", "Grey65", "Grey65", "plum1", "khaki4", "Gray75", "snow", "grey30", "Brown", "blue1", "coral1", "pale turquoise", "DeepPink", "red3"] # The weather type entries are specific entries that contain intensities. # Combinations are permitted. The WeatherTypeInten_names and # WeatherTypeInten_colors are parallel lists of names and colors. The # algorithm looks first at this list to find a specific type/intensity # match. If not found, then the algorithm looks in the WeatherType_names # and WeatherType_colors list for a match. If not found, then a generic # color is assigned. # The weather type with intensity entries are specific entries WeatherTypeInten_names = ["T+", "Rm", "R+", "RWm", "RW+"] WeatherTypeInten_colors = ["red1", "green", "green", "green", "green"] # Colors to use for weather which was not defined using any of the methods # found above. The colors in this list will be used before a "random" color # is chosen. WeatherGeneric_colors = ["Coral", "CadetBlue2", "Aquamarine", "DarkKhaki", "DodgerBlue", "IndianRed1", "PaleGreen", "MistyRose", "chartreuse3", "PapayaWhip"] #------------------------------------------------------------------------ # Preference Defaults #------------------------------------------------------------------------ # Default setting for changing the active grid to an image-type display. # This occurs when "Edit Grid" from the Grid Manager or setting a parameter # active from the legend. ImageOnActiveSE = yes # Default visibility setting for showing the time scale lines in the # Grid Manager and Temporal Editor TimeScaleLines = yes # Default visibility setting for showing the editor time lines in the # Grid Manager and Temporal Editor. The editor time line is always on # for the Time Scale. EditorTimeLines = yes # Default visibility setting for showing the split boundary or time # constraints in the Grid Manager and Temporal Editor for mutable parameters. SplitBoundaryDisplay = yes # Default setting for combining like parameters (same units) in the # temporal editor when loading parameters. TemporalEditorOverlay = yes # Default setting for temporal editor edits. Choices are absolute mode # or relative mode which is defined by "yes" or "no". TemporalEditorAbsoluteEditMode = no # Initial statistics mode for temporal editor range statistics dialog. # Choices are "ABSOLUTE", # "MODERATED", or "STANDARD_DEVIATION". TemporalEditorStatisticsMode = "ABSOLUTE" # Initial minimum and maximum values for scales on temporal editor range # statistics dialog in moderated and standard deviation operation modes # (dialog is not shown in absolute mode). Do NOT include a decimal point # for moderated mode values, you MUST include a decimal point for standard # deviation values. TemporalEditorStatisticsModeModeratedMin = 15 TemporalEditorStatisticsModeModeratedMax = 15 TemporalEditorStatisticsModeStandardDeviationMin = 1.0 TemporalEditorStatisticsModeStandardDeviationMax = 1.0 # Default setting for editing components of vector parameters. Choices # are MAG, DIR, or BOTH. WindEditMode = "BOTH" # Default setting for automatic combining of existing weather/discrete and new # weather/discrete when editing. For example, if the setting is yes # and existing weather is Rain, then setting the value to Snow will result in # a Rain/Snow mix. WeatherDiscreteCombineMode = no # Default setting for Missing Data Mode. Possible values are: # Stop: Stop execution of a smart tool if there is missing data. # Skip: Skip grids for which there is missing data. # A User Alert message will report which grids were skipped. # Create: Create grids to supply the missing data. # A User Alert message will report which grids were created. MissingDataMode = "Stop" # Default setting for showing the dialog box when the user attempts to # edit grids when a selection time range is active. Editing grids when # a selection time range is active may cause multiple grids to be # edited. ShowTimeRangeWarning = yes # Default setting for showing the dialog box when the user attempts to # edit grids without an edit area being set. The behavior is to edit # the entire domain. ShowEmptyEditAreaWarning = yes # Specifies the default contour to grid algorithm. Can be set to # "Contour Analyzer", "Internal SIRS Server" ContourServer = "Contour Analyzer" # The Countour Analyzer algorithm can run over a subsampled grid # to improve performance. This is usually ok since the contour tool # is mostly used where there is not much detail due to topography. # The value of ContourSubSample is used to divide the x and y dimensions # of the original grid to get the dimensions of the subsampled grid. # So, setting ContourSubSample to 4 would cause the Contour Analyzer to # reduce a 400x400 grid to a 100x100 grid for contouring purposes. # This can greatly speed up the algorithm. Setting ContourSubSample to # 1 will cause no reduction. # The default value is 4. If ContourSubSample is set to a value less than # or equal to 0 then it will go back to 4. If it is set to a value large # enough to make the subsampled grid have an x or y dimension less than 5 # then it will be reduced so that the minimum dimension for x or y will be # 5. ContourSubSample = 4 # Specifies whether the selection time range will track the spatial # editor time when time stepping using the toolbar buttons or keyboard. SelectGridsWhenStepping = no # Default Time Scale Periods that are shown on the time scale. These # are names of the selection time ranges (SELECTTR). TimeScalePeriods = ['Today', 'Tonight', 'Tomorrow', 'Tomorrow Night', 'Day 3', 'Day 4', 'Day 5', 'Day 6', 'Day 7'] # Contour Tool drawing color. Defaults to "White" #ContourToolDrawing_color = "White" # Move/Copy, Pencil, SelectPoints drawing color. Defaults to "White" #Drawing_color = "White" #------------------------------------------------------------------------ # PNG Graphic Product Generation (ifpIMAGE program) #------------------------------------------------------------------------ # Defines what kind of files ifpIMAGE will produce. The default is # png. But you may also choose from the following list. Note that # these are case sensitive and only png, svg, and gif have been really # tested. [ 'png', 'pnm', 'gif', 'svg', 'ai', 'ps', # 'cgm', 'fig', 'pcl', 'hpgl', 'regis', # 'tek', 'meta' ] # #Png_fileType = 'ps' # Legends display mode - 0 for UTC, 1 for local time # Do not include a decimal point after the number. #Png_localTime = 1 # legend displays time in local or UTC (default to UTC) # You can set the height and width (in pixels) for the Png images. # It is only necessary to set one of these, as the other will # be calculated using the aspect ratio of your office domain. # Do not include decimal points after the numbers. # Both default to 400 #Png_height = 400 #Png_width = 400 # Name of the weather element which will be displayed as # an image in the png. If nothing is specified here, then all weather # elements will be displayed as a graphic. Topo may also be added # using the string "Topo" #Png_image = 'T' # Indicates that a snapshot time should be displayed instead of the valid time # of the grid. Png_snapshotTime = 0 # ifpIMAGE only # Default format of the snapshot time if the Png_snapshotTime = 1 #Png_legendFormat_Zulu_snapshot = "%b%d%H%MZ" # Default format of the snapshot itme if the Png_snapshotTime = 1 and # Png_localTime = 1 #Png_legendFormat_LT_snapshot = "%d %b %I:%M %p %Z" # Indicate if the Png image displayed should be smoothed (1 = smoothing # enabled, 0 = smoothing disabled). Note that smoothing will only apply # to scalar and vector images. Png_smoothImage = 0 # ifpIMAGE only # Alternate way of specifying the weather elements to be displayed. # If this entry is specified, then the DefaultGroup is ignored (for # ifpIMAGE). Format is a list of weather elements in a pseudo weather # element bundle formats, which consist # of "parmName_level:optType_modelName seq", where the # seq is normally -1 for singleton databases, 0 for model databases for # the most recent version, 1 for the prev. version of a model database. # If you wish, you may add Topo to this list. For it just use # the string "Topo" (none of the other nonsense is needed). #Png_parms = ['FzLevel_SFC:_Fcst -1', 'Sky_SFC:_Fcst -1', 'QPF_SFC:_Fcst -1'] # Ability to turn on/off legends for the graphic generation. Applies # only to graphic product generation and not GFE. Defaults to on # if not specified. Do not include a decimal point after the number. #Png_legend = 1 #1 for visible, 0 for invisible # Legend weather element name mode - SHORT for weather element name, # LONG for weather element descriptive name, ALT for alternate, # OFF for no name #Png_descriptiveWeName = 'SHORT' # Alternate weather element name. Png_descriptiveWeName must be set to ALT. # These entries define the weather element name to be displayed based # on the weather element name (e.g., T). The string # format is Png_wxelem_AltName. For example, Png_MaxT_AltName = "Highs" will # display "Highs" for the wx element name rather than MaxT or # Maximum Temperature. If not defined and ALT is set, then the weather # element name will be the 'SHORT' name. #Png_MaxT_AltName = "Highs" # Legend format for Pngs. See strftime(3) for time string formats # or ifpIMAGE documentation. If the duration, start time, or ending # time is not desired, then the entry should be set to "". There are # separate entries for Zulu and LocalTime. The duration formats # can use the %H (hours) %M (minutes) formats. Png_legendFormat_Zulu_dur = "" # ifpIMAGE only Png_legendFormat_Zulu_start = "%b %d %H%MZ to " # ifpIMAGE only Png_legendFormat_Zulu_end = "%b %d %H%MZ" # ifpIMAGE only Png_legendFormat_LT_dur = "" # ifpIMAGE only Png_legendFormat_LT_start = "%b %d %I:%M %p %Z to " # ifpIMAGE only Png_legendFormat_LT_end = "%b %d %I:%M %p %Z" # ifpIMAGE only # Png filename prefix # Specifies the prefix to be applied to all generated png imagery #Png_filenamePrefix = 'desiredPrefix' # Png filename format # Specifies the format to be used for the date/time string in the # generated png imagery. See strftime(3) for time string formats # or the ifpIMAGE documentation. Default is yyyymmdd_hhmm #Png_baseTimeFormat = '%Y%m%d_%H%M' #By default, png images are generated for each and every possible change #in the generated grids. For example, if you are generating a grid for T #and WaveHeight, and the T is a one hour grid and the WaveHeight a 6 hour #grid, that starts at the same time (e.g., 12z), two different images will #be generated. The first will have T and WaveHeight together and will be #time stamped to 12z; the second will just have WaveHeight and will be time #stamped to 13z. This is identical behavior to running the GFE with #multiple visible weather elements. #You can override this behavior for the creation of the Png imagery by #specifying an interval for which to generate imagery. The interval is #specified in hours. Setting the value to 6 will generate grids at 00z, #06z,12z and 18z, assuming there is data available to generate the imagery. #The configuration line to set the generation to every 6 hours is: #Png_interval = 6 #Png imagery intervals can be offset by the amount set in the #Png_intervalOffset option. If the Png_intervalOffset is 1 and Png_interval =6, #(specified in hours) grids will be generated at 01z, 07z, 13z, etc., #assuming there is data available to generate the imagery. Png_intervalOffset #is 0. #Png_intervalOffset = 0 # If using fit to data for ifpIMAGE, and the option "All Grids over Area", # or "Single Grid over Area" is enabled, then the ifpIMAGE program needs to # know the name of the edit area. #Png_fitToDataArea = "BOU" # Add a "logo bar" to the bottom of each image. If this flag is set to 1, # then a bar containing the NOAA and NWS logos will be inserted at the bottom # of the image. #Png_logo = 0 # If Png_logo is enabled, then this can be set to a string you would # like to have in the "logo bar". The string will be centered in the bar. #Png_logoString = "" # If an alternate legend language is desired, then enter that here. # Acceptable values those defined in the locale command (part of Unix). # Checked values are "spanish" and "french". #Png_legendLanguage = "spanish" # If set to 1, then the colorbar will not be rendered for images. #Png_omitColorBar = 0 # Disables Automatic Zooming feature when ifpIMAGE clipping is enabled. # Default is that ifpIMAGE will automatically zoom. Set to yes or 1 to # disable automatic zooming. #Png_wholeDomain = 0 # Enables the creation of the PNG *.info files. Set to yes or 1 to enable # the creation. Set to no or 0 to disable the creation. #Png_infoFiles = 1 # Enables the special masking for ifpIMAGE to use the ISC grid data history # information. This is used when creating imagery with ISC data. Areas # not containing current ISC data will be blanked out. 0 for off, 1 for on. # This entry overrides the other masking. #Png_historyMask = 0 #------------------------------------------------------------------------ # INTERSITE COORDINATION #------------------------------------------------------------------------ # Moved to serverConfig/localConfig for OB8.3 #------------------------------------------------------------------------ # ZONE COMBINER CONFIGURATION #------------------------------------------------------------------------ # Specifies the height and width of the zone combiner. It can be resized # larger, but not smaller in the GFE. Defaults are 400 pixels #ZoneCombiner_height = 400 #ZoneCombiner_width = 400 # Specifies the zone combiner colors for the background, # and the no zone color, which is used when a zone is not included # in any combination. #ZoneCombiner_backgroundColor = 'gray40' #ZoneCombiner_noZoneColor = 'black' # If set true, then these options will be set when the zone combiner # starts for each product. #ZoneCombiner_LabelZones = False #ZoneCombiner_LabelGroups = True #------------------------------------------------------------------------ # PRODUCT GENERATION SCRIPTS #------------------------------------------------------------------------ # Product Generation Scripts appear under the product generation menu # on the GFE. Scripts = [ "Ascii Grids...: " + "ifpAG -h {host} -r {port} -o {prddir}/AG/{ztime}.ag " +\ "-d {productDB} ", "Make and Send HTI:" + "xterm -e ssh px2f /awips2/GFESuite/hti/bin/make_hti.sh {site}", "Official Grids to LDAD: " + "ifpAG -h {host} -r {port} -o - -d {productDB} | gzip -9 > " + " /data/fxa/LDAD/ifp/Official/.incoming; " + "mv /data/fxa/LDAD/ifp/Official/.incoming /data/fxa/LDAD/ifp/Official/{ztime} &" "Png Images...:" + "ifpIMAGE " +\ "-h {host} -c {entry:ConfigFile:imageTest1} -o {prddir}/IMAGE", "Send Grids to NDFD..:" + "sendGridsToNDFD.sh {site} &", "Send Point and Click Grids to Consolidated Web Farm..:" + "/awips2/GFESuite/bin/rsyncGridsToCWF_client.sh {site} &", ] ## Note: Please define TextProducts through ## the DefineTextProducts dialog (Product Generation Menu) ## within the GFE. # Ordering Product Generation # NOTE: 'ProductList' is not supported in AWIPS 2. # Products will be listed in the order they appear in the list of Scripts above. #------------------------------------------------------------------------ # Product Generation Script Notes # # Each script entry is a text string of the form: # "<Entry Name>: " + # "<command line script> " # # where: # <Entry Name> will appear in the Product Generation menu # <command line script> is the command line that will be submitted when # the script is chosen. # # The following variables can be used in scripts and the GFE will fill # in the appropriate information before executing the script: # # {host} -- server hostname # {port} -- server port # {site} -- site identifier # {productDB} -- product database -- this is the # Official Database if it exists. # Otherwise, it's the Mutable (Fcst) database. # {SEstart} -- Start of Spatial Editor time: # format of all times: YYYYMMDD_HHMM # {SEend} -- Spatial Editor time plus one second # {SelectedStart} -- Start of Selected Time range # {SelectedEnd} -- End of Selected Time range # {time} -- Current local time in format: YYYYMMDD_HHMM # {ztime} -- Current Zulu time in format: YYYYMMDD_HHMM # {module:<module name>} -- The correct path of the module will # be substituted in the command line. # The module must have a .py extension. # {home} -- Substitutes the home GFESuite directory # at runtime (may differ from local to server) # {prddir} -- Substitutes the product directory # at runtime (may differ from local to server) # # Note that the directory {} values should be used, rather than hard-coding # them, if you want to be able to run a process locally as well as remotely. # # If the following variables are used in a script, # a dialog will appear for the user to make selections from a simple GUI # before the script is executed: # {parmsMutable} (Those listed in Forecast database) # {refsets} # {maps} # {databases} # {output file} # {output directory} # {startTime} # {endTime} ## # Named Variable # To have the user prompted for a named variable, use the following # in your script: # {entry: <name of variable>: <default value>} # For example, to have the user prompted for "width" use: # {entry: width: 350} # in your script. # # Radio Button list of values # To have the user prompted for a list of radiobutton variables, use # {entryButtons: <name of variable>: <list of values separated by commas>} # E.g. # {entryButtons: ReportType: GeneralImages,CustomizedImages} # # Check Button list of values # To have the user prompted for a list of radiobutton variables, use # {entryChecks: <name of variable>: <list of values separated by commas>} # E.g. # {entryChecks: EditAreas: Area1,Area2,Area3} # Edit Areas and Groups # If the name of the entryButtons or entryChecks is "EditAreas", # the system will accept edit area OR edit area group names. # The system will check for groups and will automatically expand # them to the appropriate areas # {entryChecks: EditAreas: Group1,Group2,Area3,Area4} # {entryButtons: EditAreas: Group1,Group2} # Scripts with Multiple Command Lines # To string multiple command lines together, use the following format for # your command line script: # "csh -c (<command line 1>; <command line 2>; <command line 3>)" #------------------------------------------------------------------------
class EvenIterator(object): def __init__(self,collection): self.iter = iter(collection[::2]) def __iter__(self): return self def __next__(self): return next(self.iter) if __name__=="__main__": for i in EvenIterator([0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10]): print(i)
# # PySNMP MIB module CISCO-UNIFIED-COMPUTING-FSM-MIB (http://snmplabs.com/pysmi) # ASN.1 source file:///Users/davwang4/Dev/mibs.snmplabs.com/asn1/CISCO-UNIFIED-COMPUTING-FSM-MIB # Produced by pysmi-0.3.4 at Mon Apr 29 17:59:37 2019 # On host DAVWANG4-M-1475 platform Darwin version 18.5.0 by user davwang4 # Using Python version 3.7.3 (default, Mar 27 2019, 09:23:15) # OctetString, ObjectIdentifier, Integer = mibBuilder.importSymbols("ASN1", "OctetString", "ObjectIdentifier", "Integer") NamedValues, = mibBuilder.importSymbols("ASN1-ENUMERATION", "NamedValues") SingleValueConstraint, ValueSizeConstraint, ValueRangeConstraint, ConstraintsIntersection, ConstraintsUnion = mibBuilder.importSymbols("ASN1-REFINEMENT", "SingleValueConstraint", "ValueSizeConstraint", "ValueRangeConstraint", "ConstraintsIntersection", "ConstraintsUnion") ciscoMgmt, = mibBuilder.importSymbols("CISCO-SMI", "ciscoMgmt") TimeIntervalSec, CiscoNetworkAddress, CiscoInetAddressMask, Unsigned64, CiscoAlarmSeverity = mibBuilder.importSymbols("CISCO-TC", "TimeIntervalSec", "CiscoNetworkAddress", "CiscoInetAddressMask", "Unsigned64", "CiscoAlarmSeverity") CucsManagedObjectDn, ciscoUnifiedComputingMIBObjects, CucsManagedObjectId = mibBuilder.importSymbols("CISCO-UNIFIED-COMPUTING-MIB", "CucsManagedObjectDn", "ciscoUnifiedComputingMIBObjects", "CucsManagedObjectId") CucsFsmFsmStageStatus, = mibBuilder.importSymbols("CISCO-UNIFIED-COMPUTING-TC-MIB", "CucsFsmFsmStageStatus") InetAddressIPv4, InetAddressIPv6 = mibBuilder.importSymbols("INET-ADDRESS-MIB", "InetAddressIPv4", "InetAddressIPv6") SnmpAdminString, = mibBuilder.importSymbols("SNMP-FRAMEWORK-MIB", "SnmpAdminString") ModuleCompliance, NotificationGroup = mibBuilder.importSymbols("SNMPv2-CONF", "ModuleCompliance", "NotificationGroup") ModuleIdentity, IpAddress, Bits, Counter64, Unsigned32, Gauge32, iso, MibIdentifier, NotificationType, MibScalar, MibTable, MibTableRow, MibTableColumn, Counter32, Integer32, ObjectIdentity, TimeTicks = mibBuilder.importSymbols("SNMPv2-SMI", "ModuleIdentity", "IpAddress", "Bits", "Counter64", "Unsigned32", "Gauge32", "iso", "MibIdentifier", "NotificationType", "MibScalar", "MibTable", "MibTableRow", "MibTableColumn", "Counter32", "Integer32", "ObjectIdentity", "TimeTicks") MacAddress, RowPointer, TruthValue, TimeInterval, DisplayString, TimeStamp, TextualConvention, DateAndTime = mibBuilder.importSymbols("SNMPv2-TC", "MacAddress", "RowPointer", "TruthValue", "TimeInterval", "DisplayString", "TimeStamp", "TextualConvention", "DateAndTime") cucsFsmObjects = ModuleIdentity((1, 3, 6, 1, 4, 1, 9, 9, 719, 1, 63)) if mibBuilder.loadTexts: cucsFsmObjects.setLastUpdated('201601180000Z') if mibBuilder.loadTexts: cucsFsmObjects.setOrganization('Cisco Systems Inc.') cucsFsmStatusTable = MibTable((1, 3, 6, 1, 4, 1, 9, 9, 719, 1, 63, 1), ) if mibBuilder.loadTexts: cucsFsmStatusTable.setStatus('current') cucsFsmStatusEntry = MibTableRow((1, 3, 6, 1, 4, 1, 9, 9, 719, 1, 63, 1, 1), ).setIndexNames((0, "CISCO-UNIFIED-COMPUTING-FSM-MIB", "cucsFsmStatusInstanceId")) if mibBuilder.loadTexts: cucsFsmStatusEntry.setStatus('current') cucsFsmStatusInstanceId = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 719, 1, 63, 1, 1, 1), CucsManagedObjectId()) if mibBuilder.loadTexts: cucsFsmStatusInstanceId.setStatus('current') cucsFsmStatusDn = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 719, 1, 63, 1, 1, 2), CucsManagedObjectDn()).setMaxAccess("readonly") if mibBuilder.loadTexts: cucsFsmStatusDn.setStatus('current') cucsFsmStatusRn = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 719, 1, 63, 1, 1, 3), SnmpAdminString()).setMaxAccess("readonly") if mibBuilder.loadTexts: cucsFsmStatusRn.setStatus('current') cucsFsmStatusConvertedEpRef = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 719, 1, 63, 1, 1, 4), SnmpAdminString()).setMaxAccess("readonly") if mibBuilder.loadTexts: cucsFsmStatusConvertedEpRef.setStatus('current') cucsFsmStatusDescr = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 719, 1, 63, 1, 1, 5), SnmpAdminString()).setMaxAccess("readonly") if mibBuilder.loadTexts: cucsFsmStatusDescr.setStatus('current') cucsFsmStatusName = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 719, 1, 63, 1, 1, 6), SnmpAdminString()).setMaxAccess("readonly") if mibBuilder.loadTexts: cucsFsmStatusName.setStatus('current') cucsFsmStatusObjectClassName = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 719, 1, 63, 1, 1, 7), SnmpAdminString()).setMaxAccess("readonly") if mibBuilder.loadTexts: cucsFsmStatusObjectClassName.setStatus('current') cucsFsmStatusRemoteEpRef = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 719, 1, 63, 1, 1, 8), SnmpAdminString()).setMaxAccess("readonly") if mibBuilder.loadTexts: cucsFsmStatusRemoteEpRef.setStatus('current') cucsFsmStatusState = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 719, 1, 63, 1, 1, 9), CucsFsmFsmStageStatus()).setMaxAccess("readonly") if mibBuilder.loadTexts: cucsFsmStatusState.setStatus('current') mibBuilder.exportSymbols("CISCO-UNIFIED-COMPUTING-FSM-MIB", cucsFsmStatusState=cucsFsmStatusState, cucsFsmStatusObjectClassName=cucsFsmStatusObjectClassName, cucsFsmObjects=cucsFsmObjects, cucsFsmStatusInstanceId=cucsFsmStatusInstanceId, cucsFsmStatusName=cucsFsmStatusName, cucsFsmStatusRn=cucsFsmStatusRn, cucsFsmStatusTable=cucsFsmStatusTable, cucsFsmStatusRemoteEpRef=cucsFsmStatusRemoteEpRef, cucsFsmStatusDn=cucsFsmStatusDn, cucsFsmStatusConvertedEpRef=cucsFsmStatusConvertedEpRef, cucsFsmStatusDescr=cucsFsmStatusDescr, PYSNMP_MODULE_ID=cucsFsmObjects, cucsFsmStatusEntry=cucsFsmStatusEntry)
numero = int(input("Digite o valor de n:")) x = 1 i = 1 while x <= numero: print(i) i = i+2; x=x+1
data = [ {'text':'oh hi duuuude how r uy??check this 1xbet'}, {'text':'Dear Harry Potter, i am Frodo Baggins i represent 1xbet company.Best bet service'}, {'text':'wooooh yoow harry look at my jackpot 100000000$ at 1xbet service'}, {'text':'Harry , today i saw the man who looks like Hawkeye from Avengers on 100% and he dont use 1xbet service'}, ] final_mail = 'Hello Harry, my name is Maksim, Im still waiting for the letter from Hogwarts' spam_word = '' q_spam = 0 database = [] for mail in data: str = mail['text'].lower().split() database.extend(str) print(database) for word in database: quantity = database.count(word) if quantity > q_spam: q_spam = quantity spam_word = word if spam_word in final_mail.lower(): print('mail is not ok') else: print('mail is ok')
dist = int(input("Qual é a distância da sua viagem? ")) print(f'Você está prestes a iniciar uma viagem de {dist:.2f} Km.') if dist <= 200: preco = dist * 0.5 else: preco = dist * 0.45 # Outra opção, simplificada # preco = dist * 0.5 if dist <= 200 else dist * 0.45 print(f'E o preço da sua passagem será de R$ {preco:.2f}.')
"""dots: dotfiles made easy """ __version__ = "0.0.1a0"
# # PySNMP MIB module BAY-STACK-LLDP-EXT-DOT3-MIB (http://snmplabs.com/pysmi) # ASN.1 source file:///Users/davwang4/Dev/mibs.snmplabs.com/asn1/BAY-STACK-LLDP-EXT-DOT3-MIB # Produced by pysmi-0.3.4 at Wed May 1 11:35:40 2019 # On host DAVWANG4-M-1475 platform Darwin version 18.5.0 by user davwang4 # Using Python version 3.7.3 (default, Mar 27 2019, 09:23:15) # Integer, ObjectIdentifier, OctetString = mibBuilder.importSymbols("ASN1", "Integer", "ObjectIdentifier", "OctetString") NamedValues, = mibBuilder.importSymbols("ASN1-ENUMERATION", "NamedValues") SingleValueConstraint, ValueRangeConstraint, ConstraintsUnion, ValueSizeConstraint, ConstraintsIntersection = mibBuilder.importSymbols("ASN1-REFINEMENT", "SingleValueConstraint", "ValueRangeConstraint", "ConstraintsUnion", "ValueSizeConstraint", "ConstraintsIntersection") lldpXdot3LocPowerEntry, lldpXdot3RemPowerEntry = mibBuilder.importSymbols("LLDP-EXT-DOT3-MIB", "lldpXdot3LocPowerEntry", "lldpXdot3RemPowerEntry") ModuleCompliance, NotificationGroup = mibBuilder.importSymbols("SNMPv2-CONF", "ModuleCompliance", "NotificationGroup") Counter64, ObjectIdentity, MibScalar, MibTable, MibTableRow, MibTableColumn, TimeTicks, Gauge32, Counter32, IpAddress, MibIdentifier, iso, NotificationType, Integer32, Bits, ModuleIdentity, Unsigned32 = mibBuilder.importSymbols("SNMPv2-SMI", "Counter64", "ObjectIdentity", "MibScalar", "MibTable", "MibTableRow", "MibTableColumn", "TimeTicks", "Gauge32", "Counter32", "IpAddress", "MibIdentifier", "iso", "NotificationType", "Integer32", "Bits", "ModuleIdentity", "Unsigned32") DisplayString, TextualConvention = mibBuilder.importSymbols("SNMPv2-TC", "DisplayString", "TextualConvention") bayStackMibs, = mibBuilder.importSymbols("SYNOPTICS-ROOT-MIB", "bayStackMibs") bayStackLldpXDot3Mib = ModuleIdentity((1, 3, 6, 1, 4, 1, 45, 5, 47)) bayStackLldpXDot3Mib.setRevisions(('2014-10-22 00:00',)) if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0): if mibBuilder.loadTexts: bayStackLldpXDot3Mib.setRevisionsDescriptions(('Ver 1: Initial version.',)) if mibBuilder.loadTexts: bayStackLldpXDot3Mib.setLastUpdated('201410220000Z') if mibBuilder.loadTexts: bayStackLldpXDot3Mib.setOrganization('Avaya Inc.') if mibBuilder.loadTexts: bayStackLldpXDot3Mib.setContactInfo('avaya.com') if mibBuilder.loadTexts: bayStackLldpXDot3Mib.setDescription('This MIB module is an extension to the standard LLDP-EXT-DOT3 MIB.') bsLldpXDot3Notifications = MibIdentifier((1, 3, 6, 1, 4, 1, 45, 5, 47, 0)) bsLldpXDot3Objects = MibIdentifier((1, 3, 6, 1, 4, 1, 45, 5, 47, 1)) bsLldpXdot3Config = MibIdentifier((1, 3, 6, 1, 4, 1, 45, 5, 47, 1, 1)) bsLldpXdot3LocalData = MibIdentifier((1, 3, 6, 1, 4, 1, 45, 5, 47, 1, 2)) bsLldpXdot3RemoteData = MibIdentifier((1, 3, 6, 1, 4, 1, 45, 5, 47, 1, 3)) bsLldpXdot3LocPowerTable = MibTable((1, 3, 6, 1, 4, 1, 45, 5, 47, 1, 2, 1), ) if mibBuilder.loadTexts: bsLldpXdot3LocPowerTable.setStatus('current') if mibBuilder.loadTexts: bsLldpXdot3LocPowerTable.setDescription('This table contains one row per port of PSE PoE information on the local system known to this agent.') bsLldpXdot3LocPowerEntry = MibTableRow((1, 3, 6, 1, 4, 1, 45, 5, 47, 1, 2, 1, 1), ) lldpXdot3LocPowerEntry.registerAugmentions(("BAY-STACK-LLDP-EXT-DOT3-MIB", "bsLldpXdot3LocPowerEntry")) bsLldpXdot3LocPowerEntry.setIndexNames(*lldpXdot3LocPowerEntry.getIndexNames()) if mibBuilder.loadTexts: bsLldpXdot3LocPowerEntry.setStatus('current') if mibBuilder.loadTexts: bsLldpXdot3LocPowerEntry.setDescription('Information about a particular port PoE information.') bsLldpXdot3LocPowerType = MibTableColumn((1, 3, 6, 1, 4, 1, 45, 5, 47, 1, 2, 1, 1, 1), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4))).clone(namedValues=NamedValues(("type2pse", 1), ("type2pd", 2), ("type1pse", 3), ("type1pd", 4)))).setMaxAccess("readonly") if mibBuilder.loadTexts: bsLldpXdot3LocPowerType.setReference('802.3at, Section 30.12.2') if mibBuilder.loadTexts: bsLldpXdot3LocPowerType.setStatus('current') if mibBuilder.loadTexts: bsLldpXdot3LocPowerType.setDescription('A GET attribute that returns whether the local system is a PSE or a PD and whether it is Type 1 or Type 2.') bsLldpXdot3LocPowerSource = MibTableColumn((1, 3, 6, 1, 4, 1, 45, 5, 47, 1, 2, 1, 1, 2), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4))).clone(namedValues=NamedValues(("unknown", 1), ("primaryPs", 2), ("backupPs", 3), ("reserved", 4)))).setMaxAccess("readonly") if mibBuilder.loadTexts: bsLldpXdot3LocPowerSource.setReference('802.3at, Section 30.12.2') if mibBuilder.loadTexts: bsLldpXdot3LocPowerSource.setStatus('current') if mibBuilder.loadTexts: bsLldpXdot3LocPowerSource.setDescription('A GET attribute indicating the PSE Power Sources of the local system. A PSE indicates whether it is being powered by a primary power source; a backup power source; or unknown. A value primaryPs(2) indicates that the device advertises its power source as primary. A value backupPs(3) indicates that the device advertises its power source as backup.') bsLldpXdot3LocPowerPriority = MibTableColumn((1, 3, 6, 1, 4, 1, 45, 5, 47, 1, 2, 1, 1, 3), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4))).clone(namedValues=NamedValues(("unknown", 1), ("critical", 2), ("high", 3), ("low", 4)))).setMaxAccess("readonly") if mibBuilder.loadTexts: bsLldpXdot3LocPowerPriority.setReference('802.3at, Section 30.12.2') if mibBuilder.loadTexts: bsLldpXdot3LocPowerPriority.setStatus('current') if mibBuilder.loadTexts: bsLldpXdot3LocPowerPriority.setDescription('Reflects the PD power priority that is being advertised on this PSE port. If both locally configure priority and ldpXMedRemXPoEPDPowerPriority are available on this port, it is a matter of local policy which one takes precedence. This object reflects the active value on this port. If the priority is not configured or known by the PD, the value unknown(1) will be returned. A value critical(2) indicates that the device advertises its power Priority as critical, as per RFC 3621. A value high(3) indicates that the device advertises its power Priority as high, as per RFC 3621. A value low(4) indicates that the device advertises its power Priority as low, as per RFC 3621.') bsLldpXdot3LocPDRequestedPowerValue = MibTableColumn((1, 3, 6, 1, 4, 1, 45, 5, 47, 1, 2, 1, 1, 4), Integer32().subtype(subtypeSpec=ValueRangeConstraint(1, 255))).setUnits('tenth of watt').setMaxAccess("readonly") if mibBuilder.loadTexts: bsLldpXdot3LocPDRequestedPowerValue.setReference('802.3at, Section 30.12.2') if mibBuilder.loadTexts: bsLldpXdot3LocPDRequestedPowerValue.setStatus('current') if mibBuilder.loadTexts: bsLldpXdot3LocPDRequestedPowerValue.setDescription('A GET attribute that returns the PD requested power value. For a PSE, it is the power value that the PSE mirrors back to the remote system. This is the PD requested power value that was used by the PSE to compute the power it has currently allocated to the remote system. It is expressed in units of 0.1 watts.') bsLldpXdot3LocPSEAllocatedPowerValue = MibTableColumn((1, 3, 6, 1, 4, 1, 45, 5, 47, 1, 2, 1, 1, 5), Integer32().subtype(subtypeSpec=ValueRangeConstraint(1, 255))).setUnits('tenth of watt').setMaxAccess("readonly") if mibBuilder.loadTexts: bsLldpXdot3LocPSEAllocatedPowerValue.setReference('802.3at, Section 30.12.2') if mibBuilder.loadTexts: bsLldpXdot3LocPSEAllocatedPowerValue.setStatus('current') if mibBuilder.loadTexts: bsLldpXdot3LocPSEAllocatedPowerValue.setDescription('A GET attribute that returns the PSE allocated power value. For a PSE, it is the power value that the PSE has currently allocated to the remote system. The PSE allocated power value is the maximum input average power that the PSE wants the PD to ever draw under this allocation if it is accepted. It is expressed in units of 0.1 watts.') bsLldpXdot3RemPowerTable = MibTable((1, 3, 6, 1, 4, 1, 45, 5, 47, 1, 3, 1), ) if mibBuilder.loadTexts: bsLldpXdot3RemPowerTable.setStatus('current') if mibBuilder.loadTexts: bsLldpXdot3RemPowerTable.setDescription('This table contains information about the PoE device type as advertised by the remote system.') bsLldpXdot3RemPowerEntry = MibTableRow((1, 3, 6, 1, 4, 1, 45, 5, 47, 1, 3, 1, 1), ) lldpXdot3RemPowerEntry.registerAugmentions(("BAY-STACK-LLDP-EXT-DOT3-MIB", "bsLldpXdot3RemPowerEntry")) bsLldpXdot3RemPowerEntry.setIndexNames(*lldpXdot3RemPowerEntry.getIndexNames()) if mibBuilder.loadTexts: bsLldpXdot3RemPowerEntry.setStatus('current') if mibBuilder.loadTexts: bsLldpXdot3RemPowerEntry.setDescription('Information about a particular port component.') bsLldpXdot3RemPowerType = MibTableColumn((1, 3, 6, 1, 4, 1, 45, 5, 47, 1, 3, 1, 1, 1), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4))).clone(namedValues=NamedValues(("type2pse", 1), ("type2pd", 2), ("type1pse", 3), ("type1pd", 4)))).setMaxAccess("readonly") if mibBuilder.loadTexts: bsLldpXdot3RemPowerType.setReference('802.3at, Section 30.12.3') if mibBuilder.loadTexts: bsLldpXdot3RemPowerType.setStatus('current') if mibBuilder.loadTexts: bsLldpXdot3RemPowerType.setDescription('A GET attribute that returns whether the remote system is a PSE or a PD and whether it is Type 1 or Type 2.') bsLldpXdot3RemPowerSource = MibTableColumn((1, 3, 6, 1, 4, 1, 45, 5, 47, 1, 3, 1, 1, 2), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4))).clone(namedValues=NamedValues(("unknown", 1), ("pse", 2), ("reserved", 3), ("pseAndLocal", 4)))).setMaxAccess("readonly") if mibBuilder.loadTexts: bsLldpXdot3RemPowerSource.setReference('802.3at, Section 30.12.3') if mibBuilder.loadTexts: bsLldpXdot3RemPowerSource.setStatus('current') if mibBuilder.loadTexts: bsLldpXdot3RemPowerSource.setDescription('A GET attribute that returns the power sources of the remote system. When the remote system is a PD, it indicates whether it is being powered by: a PSE and locall; locally only; by a PSE only; or unknown.') bsLldpXdot3RemPowerPriority = MibTableColumn((1, 3, 6, 1, 4, 1, 45, 5, 47, 1, 3, 1, 1, 3), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4))).clone(namedValues=NamedValues(("unknown", 1), ("critical", 2), ("high", 3), ("low", 4)))).setMaxAccess("readonly") if mibBuilder.loadTexts: bsLldpXdot3RemPowerPriority.setReference('802.3at, Section 30.12.3') if mibBuilder.loadTexts: bsLldpXdot3RemPowerPriority.setStatus('current') if mibBuilder.loadTexts: bsLldpXdot3RemPowerPriority.setDescription('A GET operation returns the priority of the PD system received from the remote system. For a PD, this is the priority that the remote system has assigned to the PD.') bsLldpXdot3RemPDRequestedPowerValue = MibTableColumn((1, 3, 6, 1, 4, 1, 45, 5, 47, 1, 3, 1, 1, 4), Integer32().subtype(subtypeSpec=ValueRangeConstraint(1, 255))).setUnits('tenth of watt').setMaxAccess("readonly") if mibBuilder.loadTexts: bsLldpXdot3RemPDRequestedPowerValue.setReference('802.3at, Section 30.12.3') if mibBuilder.loadTexts: bsLldpXdot3RemPDRequestedPowerValue.setStatus('current') if mibBuilder.loadTexts: bsLldpXdot3RemPDRequestedPowerValue.setDescription('A GET attribute that for a PSE returs the the PD requested power value received from the remote system. It is expressed in units of 0.1 watts.') bsLldpXdot3RemPSEAllocatedPowerValue = MibTableColumn((1, 3, 6, 1, 4, 1, 45, 5, 47, 1, 3, 1, 1, 5), Integer32().subtype(subtypeSpec=ValueRangeConstraint(1, 255))).setUnits('tenth of watt').setMaxAccess("readonly") if mibBuilder.loadTexts: bsLldpXdot3RemPSEAllocatedPowerValue.setReference('802.3at, Section 30.12.3') if mibBuilder.loadTexts: bsLldpXdot3RemPSEAllocatedPowerValue.setStatus('current') if mibBuilder.loadTexts: bsLldpXdot3RemPSEAllocatedPowerValue.setDescription('A GET attribute that for a PSE returns the PSE allocated power value that was used by the remote system to compute the power value that it has currently requested from the PSE. It is expressed in units of 0.1 watts.') mibBuilder.exportSymbols("BAY-STACK-LLDP-EXT-DOT3-MIB", bsLldpXdot3RemoteData=bsLldpXdot3RemoteData, bsLldpXdot3LocPowerSource=bsLldpXdot3LocPowerSource, bsLldpXdot3RemPowerSource=bsLldpXdot3RemPowerSource, bayStackLldpXDot3Mib=bayStackLldpXDot3Mib, bsLldpXDot3Objects=bsLldpXDot3Objects, bsLldpXdot3RemPowerType=bsLldpXdot3RemPowerType, bsLldpXdot3RemPowerPriority=bsLldpXdot3RemPowerPriority, bsLldpXdot3RemPSEAllocatedPowerValue=bsLldpXdot3RemPSEAllocatedPowerValue, PYSNMP_MODULE_ID=bayStackLldpXDot3Mib, bsLldpXdot3LocPowerEntry=bsLldpXdot3LocPowerEntry, bsLldpXdot3LocPDRequestedPowerValue=bsLldpXdot3LocPDRequestedPowerValue, bsLldpXdot3LocalData=bsLldpXdot3LocalData, bsLldpXdot3LocPowerType=bsLldpXdot3LocPowerType, bsLldpXdot3Config=bsLldpXdot3Config, bsLldpXdot3RemPowerEntry=bsLldpXdot3RemPowerEntry, bsLldpXDot3Notifications=bsLldpXDot3Notifications, bsLldpXdot3RemPowerTable=bsLldpXdot3RemPowerTable, bsLldpXdot3LocPowerPriority=bsLldpXdot3LocPowerPriority, bsLldpXdot3LocPowerTable=bsLldpXdot3LocPowerTable, bsLldpXdot3RemPDRequestedPowerValue=bsLldpXdot3RemPDRequestedPowerValue, bsLldpXdot3LocPSEAllocatedPowerValue=bsLldpXdot3LocPSEAllocatedPowerValue)
class proxy(ref): def __call__(self, *args, **kwargs): func = ref.__call__(self) if func is None: raise weakref.ReferenceError('referent object is dead') else: return func(*args, **kwargs) def __eq__(self, other): if type(other) != type(self): return False return ref.__call__(self) == ref.__call__(other)
# Generated file, do not modify by hand """Definitions to be used in rbe_repo attr of an rbe_autoconf rule """ _TOOLCHAIN_CONFIG_SPECS = [] _BAZEL_TO_CONFIG_SPEC_NAMES = {} LATEST = "" CONTAINER_TO_CONFIG_SPEC_NAMES = {} _DEFAULT_TOOLCHAIN_CONFIG_SPEC = "" TOOLCHAIN_CONFIG_AUTOGEN_SPEC = struct( bazel_to_config_spec_names_map = _BAZEL_TO_CONFIG_SPEC_NAMES, container_to_config_spec_names_map = CONTAINER_TO_CONFIG_SPEC_NAMES, default_toolchain_config_spec = _DEFAULT_TOOLCHAIN_CONFIG_SPEC, latest_container = LATEST, toolchain_config_specs = _TOOLCHAIN_CONFIG_SPECS, )
produtos = ('Pão Francês', 11.58, 'Mortadela Perdigão', 6.34, 'Leite Jussara', 3.65, 'Banana', 4.56, 'Ovo', 8.75, 'Iorgute', 4.89) nada = '\033[m' titulo = '\033[01;34m' txt = '\033[34m' tamanho = float(len(produtos)) / 2 print(f'\n{titulo :-<20} LISTA DE PREÇOS {nada :->15}') for count in range(0, int(tamanho)): if count == 0: print(f' \n {produtos[count]:.<30} R$:{produtos[count+1]}\n') elif count != 0: print(f' {produtos[count * 2]:.<30} R$:{produtos[(count * 2)+ 1]:}\n') print(f'{titulo :-<50}{nada}')
usuario = "Esteban" contrasena = "12345bla" print("inserte un nombre de usuario") n_usuario = input() print("inserte un su contraseña") n_contrasena = input() if usuario == n_usuario and contrasena == n_contrasena: print(usuario + "has iniciado sesion") else: print("usuario o contraseña incorrectas")
""" None """ class Solution: def lengthOfLongestSubstringKDistinct(self, s: str, k: int) -> int: counter = {} start = 0 m_len = 0 for i, c in enumerate(s): if c not in counter: counter[c] = 1 else: counter[c] += 1 while len(counter.keys()) > k: counter[s[start]] -= 1 if counter[s[start]] == 0: del counter[s[start]] start += 1 m_len = max(m_len, i - start + 1) return m_len
# Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. """ @Desc : This module defines all codes, constants maintained globally \ and used across marvin and its test features.The main purpose \ is to maintain readability, maintain one common place for \ all codes used or reused across test features. It enhances \ maintainability and readability. Users just import statement \ to receive all the codes mentioned here. EX: Here, we define \ a code viz., ENABLED with value "Enabled",then using \ this code in a sample feature say test_a.py as below. \ from codes import * if obj.getvalue() == ENABLED @DateAdded: 20th October 2013 """ RUNNING = "Running" RECURRING = "RECURRING" ENABLED = "Enabled" NETWORK_OFFERING = "network_offering" ROOT = "ROOT" INVALID_INPUT = "INVALID INPUT" EMPTY_LIST = "EMPTY_LIST" FAIL = 0 PASS = 1 MATCH_NOT_FOUND = "ELEMENT NOT FOUND IN THE INPUT" SUCCESS = "SUCCESS" EXCEPTION_OCCURRED = "Exception Occurred" NO = "no" YES = "yes" FAILED = "FAILED" UNKNOWN_ERROR = "Unknown Error" EXCEPTION = "EXCEPTION" BASIC_ZONE = "basic" ISOLATED_NETWORK = "ISOLATED" SHARED_NETWORK = "SHARED" VPC_NETWORK = "VPC"
loop_flag = False def consult_check(consult, doctor, upper, lower): doc_test = doctor in {'Dr A Wettstein', 'Dr S Ghaly', 'Dr S Vivekanandarajah'} path = upper in {'pb', 'pp'} or lower in {'cb', 'cp', 'sb', 'sp'} cv_test = (doctor == 'Dr C Vickers') and path return doc_test or cv_test def get_consult(doctor, upper, lower, loop_flag): while True: consult = input('Consult: ') if consult == '0': consult = 'none' if consult == 'q': loop_flag = True break if consult in {'110', '116', 'none'}: break print('TRY AGAIN!') if consult_check(consult, doctor, upper, lower) and loop_flag is False: print('Confirm with {} that he/she' ' does not want a consult'.format(doctor)) while True: consult = input('Consult either 0,110,116: ') if consult == '0': consult = 'none' if consult in {'110', '116', 'none'}: break return consult, loop_flag if __name__ == '__main__': print(get_consult('Dr A Wettstein', '0', 'co', loop_flag))
def insertion_sort(array): for index in range(1,len(array)): value = array[index] i = index - 1 while i >= 0: if array[i] > array[i+1]: array[i+1] = array[i] array[i] = value i = i - 1 else: break return array if __name__ == '__main__': array = [0,2,1,3,6,4,5,7,9,8] insertion_sort(array)
#!/usr/bin/env python3 RIGHT = 'R' LEFT = 'L' FORWARD = 'F' NORTH = 'N' EAST = 'E' SOUTH = 'S' WEST = 'W' DIRECTIONS = { 'N': (0, 1), 'E': (1, 0), 'S': (0, -1), 'W': (-1, 0) } COMPASS = { 0: 'N', 90: 'E', 180: 'S', 270: 'W' } def parse(step): return (step[0], int(step[1:])) def load(file): with open(file) as f: step = [parse(line.strip()) for line in f.readlines()] return step def rotate(rotation, instruction, value): if instruction != LEFT and instruction != RIGHT: raise Exception(f'Unknown rotation instruction: {instruction}') if value % 90 != 0: raise Exception(f'Invalid rotation ({value}). Rotations must be increments of 90.') direction = 0 if instruction == LEFT: direction = -1 else: direction = 1 rotation += direction * value rotation %= 360 return rotation def rotate_wp(x, y, instruction, value): if instruction != LEFT and instruction != RIGHT: raise Exception(f'Unknown rotation instruction: {instruction}') if instruction == LEFT: instruction = RIGHT value = -value value %= 360 if value == 0: return x, y elif value == 90: return y, -x elif value == 180: return -x, -y elif value == 270: return -y, x raise Exception(f'Invalid rotation ({value}). Rotations must be increments of 90.') def part1(route): ''' >>> part1(load('test1.txt')) 25 ''' x = 0 y = 0 ship_rotation = 90 for step in route: instruction = step[0] value = step[1] if instruction == RIGHT or instruction == LEFT: ship_rotation = rotate(ship_rotation, instruction, value) else: direction = DIRECTIONS[COMPASS[ship_rotation]] if instruction != FORWARD: direction = DIRECTIONS[instruction] x += direction[0] * value y += direction[1] * value return abs(x) + abs(y) def part2(route): ''' >>> part2(load('test1.txt')) 286 ''' x = 0 y = 0 wp_x = 10 wp_y = 1 for step in route: instruction = step[0] value = step[1] if instruction == FORWARD: x += wp_x * value y += wp_y * value elif instruction == RIGHT or instruction == LEFT: (wp_x, wp_y) = rotate_wp(wp_x, wp_y, instruction, value) else: direction = DIRECTIONS[instruction] wp_x += direction[0] * value wp_y += direction[1] * value return abs(x) + abs(y) def main(): route = load('input.txt') value = part1(route) print(f'Part 1: {value}') assert value == 2057 value = part2(route) print(f'Part 2: {value}') assert value == 71504 if __name__ == '__main__': main()
def main(): def welcome(): # welcome function print("==================================================================================") print("Hi this is a Cryptograph encoder which u can use to Encrypt or Decrypt messages") print("==================================================================================") welcome() def get_msg_and_choice(): global decis #making the variable global decis=str(input("Please enter 'E' if you want to Encrypt a message or 'D' if you want to Decode the message: ")).upper() print("==================================================================================") msg=str(input("Please Enter the message you want to encrypt or Decrypt: ")) print("==================================================================================") global y y=[] y= list(msg) # converting the input message into a list of characters global list_length list_length=len(y) #getting the length of the list #return y get_msg_and_choice() #calling the function def code_it():#for coding the message for i in range(len(y)): a=ord(y[i])# getting the ascii value m=(a+85)%128# using numbers to add to ascii print(chr(m),end="") #converting ASCII value to character def decode_it():# for decoding the message for i in range(len(y)): a=ord(y[i])#getting the Ascii Value m=(a-85)%128 #using mathematical operation to convert it to its original Form print(chr(m),end="") #converting ascii to char and printing it in same line def output(): if (decis == 'D'): print("This is the Decrypted Message Copy this") print("↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓") decode_it() if (decis == 'E'): # checking if the user wants to encode it print("This is the Encrypted Message Copy this") print("↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓") code_it() output() main() # calling the main function
#!/usr/bin/python #coding=utf-8 class RequestTmBase: def addRecode(self, ssp, url, tmSpan, state, concurrency,countPer10s,size): raise NotImplementedError def startRecode(self): raise NotImplementedError def startServer(self): raise NotImplementedError def endRecode(self): raise NotImplementedError
""" __version__.py ~~~~~~~~~~~~~~ Information about the current version of firemelon package. """ __title__ = "firemelon" __description__ = "firemelon — Simple Password Generator" __version__ = "1.1.4" __author__ = "evtn" __author_email__ = "g@evtn.ru" __license__ = "MIT License" __url__ = "https://github.com/evtn/firemelon"
""" Exceptions for the pystrike module. """ class ConnectionException(Exception): """ Raised when the client is unable to communicate with the indicated host. This exception could indicate that the client is unable to contact the indicated host, or it could indicate that the client was unable to send an HTTP request to the indicated host, or that the client was unable to receive an HTTP response from the indicated host. """ pass class ClientRequestException(Exception): """ Raised when the server returns a 4xx response. The library code shall include the content of the error message from Strike, if available. """ pass class ServerErrorException(Exception): """ Raised when the server returns a 5xx response. The library code shall include the content of the error message from Strike, if available. """ pass class UnexpectedResponseException(Exception): """ Raised when the server returns a response that the library does not understand. """ pass class ChargeNotFoundException(ClientRequestException): """ Raised when the server returns a 404 response. """ pass
class Solution: def sumEvenAfterQueries(self, A, queries): # keep around total sum, we update (if we find evens) on # each iteration. evenSum = sum(i for i in A if i & 1 == 0) for idx, (value, index) in enumerate(queries): old_value = A[index] new_value = value + old_value # add new value if it is even. if not new_value & 1: evenSum = evenSum + new_value # remove old value if it was even. if not old_value & 1: evenSum = evenSum - old_value A[index] = new_value # reuse it queries[idx] = evenSum return queries
""" Objective In this challenge, we're going to use loops to help us do some simple math. Check out the Tutorial tab to learn more. Task Given an integer, N, print its first 10 multiples. Each multiple N * i (where 1 <= i <= 10) should be printed on a new line in the form: N x i = result. """ N = int(input().strip()) for i in range(1, 11): print('{0} x {1} = {2}'.format(N, i, N * i))
#!/usr/bin/env python # -*- coding: utf-8 -*- # @Date : 2022/2/21 class OperationLog: """ append only write operation log """ pass
""" Module: 'ucryptolib' on LEGO EV3 v1.0.0 """ # MCU: sysname=ev3, nodename=ev3, release=('v1.0.0',), version=('0.0.0',), machine=ev3 # Stubber: 1.3.2 class aes: '' def decrypt(): pass def encrypt(): pass
# -*- coding: utf-8 -*- """ciscosparkapi exception classes.""" __author__ = "Chris Lunsford" __author_email__ = "chrlunsf@cisco.com" __copyright__ = "Copyright (c) 2016 Cisco Systems, Inc." __license__ = "MIT" SPARK_RESPONSE_CODES = { 200: "OK", 204: "Member deleted.", 400: "The request was invalid or cannot be otherwise served. An " "accompanying error message will explain further.", 401: "Authentication credentials were missing or incorrect.", 403: "The request is understood, but it has been refused or access is not " "allowed.", 404: "The URI requested is invalid or the resource requested, such as a " "user, does not exist. Also returned when the requested format is " "not supported by the requested method.", 409: "The request could not be processed because it conflicts with some " "established rule of the system. For example, a person may not be " "added to a room more than once.", 500: "Something went wrong on the server.", 503: "Server is overloaded with requests. Try again later." } class ciscosparkapiException(Exception): """Base class for all ciscosparkapi package exceptions.""" def __init__(self, *args, **kwargs): super(ciscosparkapiException, self).__init__(*args, **kwargs) class SparkApiError(ciscosparkapiException): """Errors returned by requests to the Cisco Spark cloud APIs.""" def __init__(self, response_code, request=None, response=None): assert isinstance(response_code, int) self.response_code = response_code self.request = request self.response = response response_text = SPARK_RESPONSE_CODES.get(response_code) if response_text: self.response_text = response_text error_message = "Response Code [{!s}] - {}".format(response_code, response_text) else: error_message = "Response Code [{!s}] - " \ "Unknown Response Code".format(response_code) super(SparkApiError, self).__init__(error_message)
def get_layers(width, height, data): layers = [] layer_area = width*height data = [pixel for pixel in str(data)] data.remove('\n') while len(data) > 0: layers.append(data[:layer_area]) data = data[layer_area:] return layers WIDTH = 25 HEIGHT = 6 layers = get_layers(WIDTH, HEIGHT, open('input').read()) fewest_zero_layer = min(layers, key=lambda layer: layer.count('0')) print('Part 1 solution: %i' % (fewest_zero_layer.count('1') * fewest_zero_layer.count('2'))) BLACK = '0' WHITE = '1' TRANS = '2' COLORS = { WHITE: u'\u2588', BLACK: ' ', TRANS: None, } imagedata = [] for index, pixel in enumerate(layers[0]): depth = 0 while pixel == TRANS: depth += 1 pixel = layers[depth][index] imagedata.append(pixel) def get_image(width, height, imagedata): output = '' for i, pixel in enumerate(imagedata): if i > 0 and i % width == 0: output += '\n' output += COLORS[pixel] return output print(get_image(WIDTH, HEIGHT, imagedata))
# ============================================================================= # Author: Teerapat Jenrungrot - https://github.com/mjenrungrot/ # FileName: 10494.py # Description: UVa Online Judge - 10494 # ============================================================================= while True: try: line = input() except EOFError: break if "/" in line: a, b = list(map(int, line.split("/"))) print(a // b) else: a, b = list(map(int, line.split("%"))) print(a % b)
# Definition for a binary tree node. # class TreeNode: # def __init__(self, x): # self.val = x # self.left = None # self.right = None class Solution: def __init__(self): self.tree_node = [] self.res = 0 def pathSum(self, root, sum): """ :type root: TreeNode :type sum: int :rtype: int """ self.traversal(root) for i in self.tree_node: self.res += self._pathSum(i, sum) return self.res def traversal(self, root): if root: self.traversal(root.left) self.tree_node.append(root) self.traversal(root.right) def _pathSum(self, root, sum): if not root: return 0 if root.val == sum: return 1+self._pathSum(root.left, 0)+self._pathSum(root.right, 0) return self._pathSum(root.left, sum-root.val)+self._pathSum(root.right, sum-root.val)
MAX_VAL = 2**31+1 def main(): n_ints = int(input()) ints = [int(x) for x in input().split()] # Find all potential pivots by iterating from the left side. highest = ints[0] potential_pivots = set() for i in ints: if i >= highest: potential_pivots.add(i) highest = i # Confirm pivots by iterating from right side. pivot_count = 0 lowest = MAX_VAL for i in ints[::-1]: if i < lowest: if i in potential_pivots: pivot_count += 1 lowest = i print(pivot_count) if __name__ == '__main__': main()
""" Data structures """ class SymmetryData: def __init__(self): self.number_of_species = -1 self.structure = None self.space_group = -1 self.point_group = -1
one = { 'r': { '__type__': 'tf.truncated_normal', '__pre__': [], 'dtype': 'tf.float32', 'shape': [2, 2], 'name': '\'r\'' }, 'w': { '__type__': 'tf.Variable', '__pre__': ['r'], 'dtype': 'tf.float32', 'initial_value': 'r', 'name': '\'w\'' }, 'x': { '__type__': 'tf.placeholder', '__pre__': [], 'dtype': 'tf.float32', 'shape': [1, 2], 'name': '\'x\'' }, 'dot': { '__type__': 'tf.matmul', '__pre__': ['w', 'x'], 'a': 'x', 'b': 'w', 'name': '\'dot\'' }, 'y': { '__type__': 'tf.placeholder', '__pre__': [], 'dtype': 'tf.float32', 'shape': [1, 2], 'name': '\'y\'' }, 'add': { '__type__': 'tf.add', '__pre__': ['dot', 'y'], 'x': 'dot', 'y': 'y', 'name': '\'add\'' } }
"""Constants for pypresseportal.""" MEDIA_TYPES = ("image", "document", "audio", "video") PUBLIC_SERVICE_MEDIA_TYPES = ("image", "document") RESSORTS = ("wirtschaft", "politik", "sport", "kultur", "vermischtes", "finanzen") SECTORS = ( "arbeit", "auto", "banken", "bauwesen", "bildung", "celebrities", "chemie", "computer", "energie", "fernsehen", "fussball", "gesundheit", "handel", "immobilien", "kinder", "lebensmittel", "lifestyle", "logistik", "maschinenbau", "medien", "motorsport", "presseschau", "ratgeber", "recht", "soziales", "telekommunikation", "touristik", "umwelt", "unterhaltung", "versicherungen", "wissenschaft", ) INVESTOR_RELATIONS_NEWS_TYPES = ( "all", "adhoc", "vote", "nvr", "dd", "news", "tip", "report", "wpueg", "info", "ers", ) PUBLIC_SERVICE_REGIONS = ( "hh", "sh", "he", "sl", "bw", "ni", "bb", "nrw", "st", "by", "sn", "rp", "hb", "mv", "th", ) TOPICS = ( "auto-verkehr", "bau-immobilien", "fashion-beauty", "finanzen", "gesundheit-medizin", "handel", "medien-kultur", "netzwelt", "panorama", "people", "politik", "presseschau", "soziales", "sport", "tourismus-urlaub", "umwelt", "wirtschaft", "wissen-bildung", ) KEYWORDS = ( "agrar", "alternativeenergie", "arbeit", "armut", "arzneimittel", "atomenergie", "aussenpolitik", "auto", "bahn", "banken", "bau", "behinderte", "bekleidung", "bildung", "boerse", "buecher", "bundesliga", "bundesregierung", "bundeswehr", "chemie", "computer", "ecommerce", "energie", "erdbeben", "familie", "fernsehen", "film", "finanzen", "fluechtlinge", "formel1", "freizeit", "fussball", "gas", "gesellschaft", "gesundheit", "getraenke", "gewerkschaften", "globalisierung", "golf", "handball", "handel", "historisches", "hunger", "immobilien", "industrie", "innenpolitik", "internet", "jugendkriminalitaet", "jugendlicher", "justiz", "katastrophe", "kinder", "kleidung", "klimaveraenderung", "konflikte", "konjunktur", "konsumgueter", "kosmetik", "krankenhaus", "krankenversicherung", "krieg", "kriminalitaet", "kultur", "leichtathletik", "celebrities", "lifestyle", "luftverkehr", "luxusgueter", "maschinenbau", "medien", "medizin", "menschenrechte", "mode", "motorsport", "musik", "nahrungsmittel", "naturschutz", "oel", "olympia", "papier", "partei", "personalien", "pharmaindustrie", "politik", "presseschau", "radsport", "ratgeber", "religion", "rente", "schiffbau", "schifffahrt", "schule", "senior", "soziales", "sport", "steuern", "strom", "tabak", "telekommunikation", "tennis", "textil", "tier", "tourismus", "transport", "umwelt", "unterhaltung", "verbraucher", "verkehr", "verlag", "vermischtes", "verpackung", "versandhandel", "versicherung", "wahlen", "weltmeisterschaft", "werbung", "wirtschaft", "wissenschaft", )
# -*- coding: utf-8 -*- tag2struct = {u"#": "R_HEADER" ,u"£": "RDR_HEADER" ,u"µ": "RDR_DRAFT" }
# -*- coding:utf-8 -*- #保存文件地址 FILE_DIR = '/home/scrapy/wyzw/file'
# automatically generated by the FlatBuffers compiler, do not modify # namespace: zkinterface class Message(object): NONE = 0 Circuit = 1 R1CSConstraints = 2 Witness = 3
""" A set of PyQt distutils extensions for build qt ui files in a pythonic way: - build_ui: build qt ui/qrc files """ __version__ = '0.7.3'
print('='*10, 'Desafio Python 27','='*50) nome = str(input('Digite seu nome completo: ')) nome = nome.strip() nome = nome.upper() print('O seu nome completo é {}.'.format(nome)) nome = nome.split() print('O seu primeiro nome é {}.'.format(nome[0])) print('O seu ultimo nome é {}.'.format(nome[-1])) print('='*10, 'Desafio Python 27','='*50)
class Dawg (object): def __init__(self, digraphs=[]): self.root = self.index = 0 self.digraphs = digraphs self.graph = {self.root: []} self.accepts = {} def tokenize(self, word): return self._rtokenize(word, []) def insert(self, word): self._rinsert(self.root, self.tokenize(word), word) def words(self): return self.accepts.values() def node(self, word): return self._rnode(self.root, self.tokenize(word)) def pivot_search(self, substring): results = [] tokens = self.tokenize(substring) # Check if this node is the start of the substring. pivot = tokens.index('.') matches = self._rmatch_string(self.root, self.tokenize(substring), []) for m in matches: results += [self.tokenize(m)[pivot]] return results def _rtokenize(self, word, tokens): if len(word) == 0: return tokens digraph = [dg for dg in self.digraphs if word.startswith(dg)] if len(digraph) > 0: return self._rtokenize(word[2:], tokens + [digraph[0]]) else: return self._rtokenize(word[1:], tokens + [word[0]]) def _rinsert(self, node, word, orig): try: # Unzip the edge values (letters) and the edge indices (targets). if len(self.graph[node]) > 0: letters, targets = map(lambda x: list(x), zip(*self.graph[node])) else: letters = targets = [] if word[0] in letters: # If this edge already exists in the graph, recurse to it's target self._rinsert(targets[letters.index(word[0])], word[1:], orig) else: # If the edge doesn't already exist in the graph, create the edge self.index += 1 self.graph[node].append((word[0],self.index)) self.graph[self.index] = [] # Move to the next letter self._rinsert(self.index, word[1:], orig) except IndexError: # Set this node to an accepting node once the whole word is inserted. if node not in self.accepts: self.accepts[node] = orig def _rmatch_string(self, node, tokens, results): if len(tokens) == 0: if node in self.accepts: results += [self.accepts[node]] return results letter = tokens[0] for e in self.graph[node]: if letter == e[0] or '.' == letter: results = self._rmatch_string(e[1], tokens[1:], results) return results def _rnode(self, node, word): if len(word) == 0: return node for n in self.graph[node]: if n[0] == word[0]: return self._rnode(n[1], word[1:]) return None
#!/usr/bin/env python3 # #Copyright 2022 Kurt R. Brorsen # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # def hf_energy(eri_ee_full, eri_ep_full, mo_fock_1e, mo_fock_1p, e_nocc, p_nocc): e_hf = 0.0 for i in range(e_nocc): e_hf += 2*mo_fock_1e[i,i] for j in range(e_nocc): e_hf += (2*eri_ee_full[i,i,j,j] - eri_ee_full[i,j,i,j]) # need to subtract this due to double counting by using ee and pp fock matrices for j in range(p_nocc): e_hf -= 2*eri_ep_full[i,i,j,j] for i in range(p_nocc): e_hf += mo_fock_1p[i,i] return e_hf
''' The MIT License (MIT) Copyright (c) 2016 WavyCloud Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ''' def batch_get_named_query(NamedQueryIds=None): """ Returns the details of a single named query or a list of up to 50 queries, which you provide as an array of query ID strings. Requires you to have access to the workgroup in which the queries were saved. Use ListNamedQueriesInput to get the list of named query IDs in the specified workgroup. If information could not be retrieved for a submitted query ID, information about the query ID submitted is listed under UnprocessedNamedQueryId . Named queries differ from executed queries. Use BatchGetQueryExecutionInput to get details about each unique query execution, and ListQueryExecutionsInput to get a list of query execution IDs. See also: AWS API Documentation Exceptions :example: response = client.batch_get_named_query( NamedQueryIds=[ 'string', ] ) :type NamedQueryIds: list :param NamedQueryIds: [REQUIRED]\nAn array of query IDs.\n\n(string) --\n\n :rtype: dict ReturnsResponse Syntax{ 'NamedQueries': [ { 'Name': 'string', 'Description': 'string', 'Database': 'string', 'QueryString': 'string', 'NamedQueryId': 'string', 'WorkGroup': 'string' }, ], 'UnprocessedNamedQueryIds': [ { 'NamedQueryId': 'string', 'ErrorCode': 'string', 'ErrorMessage': 'string' }, ] } Response Structure (dict) -- NamedQueries (list) --Information about the named query IDs submitted. (dict) --A query, where QueryString is the list of SQL query statements that comprise the query. Name (string) --The query name. Description (string) --The query description. Database (string) --The database to which the query belongs. QueryString (string) --The SQL query statements that comprise the query. NamedQueryId (string) --The unique identifier of the query. WorkGroup (string) --The name of the workgroup that contains the named query. UnprocessedNamedQueryIds (list) --Information about provided query IDs. (dict) --Information about a named query ID that could not be processed. NamedQueryId (string) --The unique identifier of the named query. ErrorCode (string) --The error code returned when the processing request for the named query failed, if applicable. ErrorMessage (string) --The error message returned when the processing request for the named query failed, if applicable. Exceptions Athena.Client.exceptions.InternalServerException Athena.Client.exceptions.InvalidRequestException :return: { 'NamedQueries': [ { 'Name': 'string', 'Description': 'string', 'Database': 'string', 'QueryString': 'string', 'NamedQueryId': 'string', 'WorkGroup': 'string' }, ], 'UnprocessedNamedQueryIds': [ { 'NamedQueryId': 'string', 'ErrorCode': 'string', 'ErrorMessage': 'string' }, ] } :returns: Athena.Client.exceptions.InternalServerException Athena.Client.exceptions.InvalidRequestException """ pass def batch_get_query_execution(QueryExecutionIds=None): """ Returns the details of a single query execution or a list of up to 50 query executions, which you provide as an array of query execution ID strings. Requires you to have access to the workgroup in which the queries ran. To get a list of query execution IDs, use ListQueryExecutionsInput$WorkGroup . Query executions differ from named (saved) queries. Use BatchGetNamedQueryInput to get details about named queries. See also: AWS API Documentation Exceptions :example: response = client.batch_get_query_execution( QueryExecutionIds=[ 'string', ] ) :type QueryExecutionIds: list :param QueryExecutionIds: [REQUIRED]\nAn array of query execution IDs.\n\n(string) --\n\n :rtype: dict ReturnsResponse Syntax{ 'QueryExecutions': [ { 'QueryExecutionId': 'string', 'Query': 'string', 'StatementType': 'DDL'|'DML'|'UTILITY', 'ResultConfiguration': { 'OutputLocation': 'string', 'EncryptionConfiguration': { 'EncryptionOption': 'SSE_S3'|'SSE_KMS'|'CSE_KMS', 'KmsKey': 'string' } }, 'QueryExecutionContext': { 'Database': 'string' }, 'Status': { 'State': 'QUEUED'|'RUNNING'|'SUCCEEDED'|'FAILED'|'CANCELLED', 'StateChangeReason': 'string', 'SubmissionDateTime': datetime(2015, 1, 1), 'CompletionDateTime': datetime(2015, 1, 1) }, 'Statistics': { 'EngineExecutionTimeInMillis': 123, 'DataScannedInBytes': 123, 'DataManifestLocation': 'string', 'TotalExecutionTimeInMillis': 123, 'QueryQueueTimeInMillis': 123, 'QueryPlanningTimeInMillis': 123, 'ServiceProcessingTimeInMillis': 123 }, 'WorkGroup': 'string' }, ], 'UnprocessedQueryExecutionIds': [ { 'QueryExecutionId': 'string', 'ErrorCode': 'string', 'ErrorMessage': 'string' }, ] } Response Structure (dict) -- QueryExecutions (list) --Information about a query execution. (dict) --Information about a single instance of a query execution. QueryExecutionId (string) --The unique identifier for each query execution. Query (string) --The SQL query statements which the query execution ran. StatementType (string) --The type of query statement that was run. DDL indicates DDL query statements. DML indicates DML (Data Manipulation Language) query statements, such as CREATE TABLE AS SELECT . UTILITY indicates query statements other than DDL and DML, such as SHOW CREATE TABLE , or DESCRIBE <table> . ResultConfiguration (dict) --The location in Amazon S3 where query results were stored and the encryption option, if any, used for query results. These are known as "client-side settings". If workgroup settings override client-side settings, then the query uses the location for the query results and the encryption configuration that are specified for the workgroup. OutputLocation (string) --The location in Amazon S3 where your query results are stored, such as s3://path/to/query/bucket/ . To run the query, you must specify the query results location using one of the ways: either for individual queries using either this setting (client-side), or in the workgroup, using WorkGroupConfiguration . If none of them is set, Athena issues an error that no output location is provided. For more information, see Query Results . If workgroup settings override client-side settings, then the query uses the settings specified for the workgroup. See WorkGroupConfiguration$EnforceWorkGroupConfiguration . EncryptionConfiguration (dict) --If query results are encrypted in Amazon S3, indicates the encryption option used (for example, SSE-KMS or CSE-KMS ) and key information. This is a client-side setting. If workgroup settings override client-side settings, then the query uses the encryption configuration that is specified for the workgroup, and also uses the location for storing query results specified in the workgroup. See WorkGroupConfiguration$EnforceWorkGroupConfiguration and Workgroup Settings Override Client-Side Settings . EncryptionOption (string) --Indicates whether Amazon S3 server-side encryption with Amazon S3-managed keys (SSE-S3 ), server-side encryption with KMS-managed keys (SSE-KMS ), or client-side encryption with KMS-managed keys (CSE-KMS) is used. If a query runs in a workgroup and the workgroup overrides client-side settings, then the workgroup\'s setting for encryption is used. It specifies whether query results must be encrypted, for all queries that run in this workgroup. KmsKey (string) --For SSE-KMS and CSE-KMS , this is the KMS key ARN or ID. QueryExecutionContext (dict) --The database in which the query execution occurred. Database (string) --The name of the database. Status (dict) --The completion date, current state, submission time, and state change reason (if applicable) for the query execution. State (string) --The state of query execution. QUEUED indicates that the query has been submitted to the service, and Athena will execute the query as soon as resources are available. RUNNING indicates that the query is in execution phase. SUCCEEDED indicates that the query completed without errors. FAILED indicates that the query experienced an error and did not complete processing. CANCELLED indicates that a user input interrupted query execution. StateChangeReason (string) --Further detail about the status of the query. SubmissionDateTime (datetime) --The date and time that the query was submitted. CompletionDateTime (datetime) --The date and time that the query completed. Statistics (dict) --Query execution statistics, such as the amount of data scanned, the amount of time that the query took to process, and the type of statement that was run. EngineExecutionTimeInMillis (integer) --The number of milliseconds that the query took to execute. DataScannedInBytes (integer) --The number of bytes in the data that was queried. DataManifestLocation (string) --The location and file name of a data manifest file. The manifest file is saved to the Athena query results location in Amazon S3. The manifest file tracks files that the query wrote to Amazon S3. If the query fails, the manifest file also tracks files that the query intended to write. The manifest is useful for identifying orphaned files resulting from a failed query. For more information, see Working with Query Results, Output Files, and Query History in the Amazon Athena User Guide . TotalExecutionTimeInMillis (integer) --The number of milliseconds that Athena took to run the query. QueryQueueTimeInMillis (integer) --The number of milliseconds that the query was in your query queue waiting for resources. Note that if transient errors occur, Athena might automatically add the query back to the queue. QueryPlanningTimeInMillis (integer) --The number of milliseconds that Athena took to plan the query processing flow. This includes the time spent retrieving table partitions from the data source. Note that because the query engine performs the query planning, query planning time is a subset of engine processing time. ServiceProcessingTimeInMillis (integer) --The number of milliseconds that Athena took to finalize and publish the query results after the query engine finished running the query. WorkGroup (string) --The name of the workgroup in which the query ran. UnprocessedQueryExecutionIds (list) --Information about the query executions that failed to run. (dict) --Describes a query execution that failed to process. QueryExecutionId (string) --The unique identifier of the query execution. ErrorCode (string) --The error code returned when the query execution failed to process, if applicable. ErrorMessage (string) --The error message returned when the query execution failed to process, if applicable. Exceptions Athena.Client.exceptions.InternalServerException Athena.Client.exceptions.InvalidRequestException :return: { 'QueryExecutions': [ { 'QueryExecutionId': 'string', 'Query': 'string', 'StatementType': 'DDL'|'DML'|'UTILITY', 'ResultConfiguration': { 'OutputLocation': 'string', 'EncryptionConfiguration': { 'EncryptionOption': 'SSE_S3'|'SSE_KMS'|'CSE_KMS', 'KmsKey': 'string' } }, 'QueryExecutionContext': { 'Database': 'string' }, 'Status': { 'State': 'QUEUED'|'RUNNING'|'SUCCEEDED'|'FAILED'|'CANCELLED', 'StateChangeReason': 'string', 'SubmissionDateTime': datetime(2015, 1, 1), 'CompletionDateTime': datetime(2015, 1, 1) }, 'Statistics': { 'EngineExecutionTimeInMillis': 123, 'DataScannedInBytes': 123, 'DataManifestLocation': 'string', 'TotalExecutionTimeInMillis': 123, 'QueryQueueTimeInMillis': 123, 'QueryPlanningTimeInMillis': 123, 'ServiceProcessingTimeInMillis': 123 }, 'WorkGroup': 'string' }, ], 'UnprocessedQueryExecutionIds': [ { 'QueryExecutionId': 'string', 'ErrorCode': 'string', 'ErrorMessage': 'string' }, ] } :returns: Athena.Client.exceptions.InternalServerException Athena.Client.exceptions.InvalidRequestException """ pass def can_paginate(operation_name=None): """ Check if an operation can be paginated. :type operation_name: string :param operation_name: The operation name. This is the same name\nas the method name on the client. For example, if the\nmethod name is create_foo, and you\'d normally invoke the\noperation as client.create_foo(**kwargs), if the\ncreate_foo operation can be paginated, you can use the\ncall client.get_paginator('create_foo'). """ pass def create_named_query(Name=None, Description=None, Database=None, QueryString=None, ClientRequestToken=None, WorkGroup=None): """ Creates a named query in the specified workgroup. Requires that you have access to the workgroup. For code samples using the AWS SDK for Java, see Examples and Code Samples in the Amazon Athena User Guide . See also: AWS API Documentation Exceptions :example: response = client.create_named_query( Name='string', Description='string', Database='string', QueryString='string', ClientRequestToken='string', WorkGroup='string' ) :type Name: string :param Name: [REQUIRED]\nThe query name.\n :type Description: string :param Description: The query description. :type Database: string :param Database: [REQUIRED]\nThe database to which the query belongs.\n :type QueryString: string :param QueryString: [REQUIRED]\nThe contents of the query with all query statements.\n :type ClientRequestToken: string :param ClientRequestToken: A unique case-sensitive string used to ensure the request to create the query is idempotent (executes only once). If another CreateNamedQuery request is received, the same response is returned and another query is not created. If a parameter has changed, for example, the QueryString , an error is returned.\n\nWarning\nThis token is listed as not required because AWS SDKs (for example the AWS SDK for Java) auto-generate the token for users. If you are not using the AWS SDK or the AWS CLI, you must provide this token or the action will fail.\n\nThis field is autopopulated if not provided.\n :type WorkGroup: string :param WorkGroup: The name of the workgroup in which the named query is being created. :rtype: dict ReturnsResponse Syntax { 'NamedQueryId': 'string' } Response Structure (dict) -- NamedQueryId (string) -- The unique ID of the query. Exceptions Athena.Client.exceptions.InternalServerException Athena.Client.exceptions.InvalidRequestException :return: { 'NamedQueryId': 'string' } :returns: Athena.Client.exceptions.InternalServerException Athena.Client.exceptions.InvalidRequestException """ pass def create_work_group(Name=None, Configuration=None, Description=None, Tags=None): """ Creates a workgroup with the specified name. See also: AWS API Documentation Exceptions :example: response = client.create_work_group( Name='string', Configuration={ 'ResultConfiguration': { 'OutputLocation': 'string', 'EncryptionConfiguration': { 'EncryptionOption': 'SSE_S3'|'SSE_KMS'|'CSE_KMS', 'KmsKey': 'string' } }, 'EnforceWorkGroupConfiguration': True|False, 'PublishCloudWatchMetricsEnabled': True|False, 'BytesScannedCutoffPerQuery': 123, 'RequesterPaysEnabled': True|False }, Description='string', Tags=[ { 'Key': 'string', 'Value': 'string' }, ] ) :type Name: string :param Name: [REQUIRED]\nThe workgroup name.\n :type Configuration: dict :param Configuration: The configuration for the workgroup, which includes the location in Amazon S3 where query results are stored, the encryption configuration, if any, used for encrypting query results, whether the Amazon CloudWatch Metrics are enabled for the workgroup, the limit for the amount of bytes scanned (cutoff) per query, if it is specified, and whether workgroup\'s settings (specified with EnforceWorkGroupConfiguration) in the WorkGroupConfiguration override client-side settings. See WorkGroupConfiguration$EnforceWorkGroupConfiguration .\n\nResultConfiguration (dict) --The configuration for the workgroup, which includes the location in Amazon S3 where query results are stored and the encryption option, if any, used for query results. To run the query, you must specify the query results location using one of the ways: either in the workgroup using this setting, or for individual queries (client-side), using ResultConfiguration$OutputLocation . If none of them is set, Athena issues an error that no output location is provided. For more information, see Query Results .\n\nOutputLocation (string) --The location in Amazon S3 where your query results are stored, such as s3://path/to/query/bucket/ . To run the query, you must specify the query results location using one of the ways: either for individual queries using either this setting (client-side), or in the workgroup, using WorkGroupConfiguration . If none of them is set, Athena issues an error that no output location is provided. For more information, see Query Results . If workgroup settings override client-side settings, then the query uses the settings specified for the workgroup. See WorkGroupConfiguration$EnforceWorkGroupConfiguration .\n\nEncryptionConfiguration (dict) --If query results are encrypted in Amazon S3, indicates the encryption option used (for example, SSE-KMS or CSE-KMS ) and key information. This is a client-side setting. If workgroup settings override client-side settings, then the query uses the encryption configuration that is specified for the workgroup, and also uses the location for storing query results specified in the workgroup. See WorkGroupConfiguration$EnforceWorkGroupConfiguration and Workgroup Settings Override Client-Side Settings .\n\nEncryptionOption (string) -- [REQUIRED]Indicates whether Amazon S3 server-side encryption with Amazon S3-managed keys (SSE-S3 ), server-side encryption with KMS-managed keys (SSE-KMS ), or client-side encryption with KMS-managed keys (CSE-KMS) is used.\nIf a query runs in a workgroup and the workgroup overrides client-side settings, then the workgroup\'s setting for encryption is used. It specifies whether query results must be encrypted, for all queries that run in this workgroup.\n\nKmsKey (string) --For SSE-KMS and CSE-KMS , this is the KMS key ARN or ID.\n\n\n\n\n\nEnforceWorkGroupConfiguration (boolean) --If set to 'true', the settings for the workgroup override client-side settings. If set to 'false', client-side settings are used. For more information, see Workgroup Settings Override Client-Side Settings .\n\nPublishCloudWatchMetricsEnabled (boolean) --Indicates that the Amazon CloudWatch metrics are enabled for the workgroup.\n\nBytesScannedCutoffPerQuery (integer) --The upper data usage limit (cutoff) for the amount of bytes a single query in a workgroup is allowed to scan.\n\nRequesterPaysEnabled (boolean) --If set to true , allows members assigned to a workgroup to reference Amazon S3 Requester Pays buckets in queries. If set to false , workgroup members cannot query data from Requester Pays buckets, and queries that retrieve data from Requester Pays buckets cause an error. The default is false . For more information about Requester Pays buckets, see Requester Pays Buckets in the Amazon Simple Storage Service Developer Guide .\n\n\n :type Description: string :param Description: The workgroup description. :type Tags: list :param Tags: One or more tags, separated by commas, that you want to attach to the workgroup as you create it.\n\n(dict) --A tag that you can add to a resource. A tag is a label that you assign to an AWS Athena resource (a workgroup). Each tag consists of a key and an optional value, both of which you define. Tags enable you to categorize workgroups in Athena, for example, by purpose, owner, or environment. Use a consistent set of tag keys to make it easier to search and filter workgroups in your account. The maximum tag key length is 128 Unicode characters in UTF-8. The maximum tag value length is 256 Unicode characters in UTF-8. You can use letters and numbers representable in UTF-8, and the following characters: + - = . _ : / @. Tag keys and values are case-sensitive. Tag keys must be unique per resource.\n\nKey (string) --A tag key. The tag key length is from 1 to 128 Unicode characters in UTF-8. You can use letters and numbers representable in UTF-8, and the following characters: + - = . _ : / @. Tag keys are case-sensitive and must be unique per resource.\n\nValue (string) --A tag value. The tag value length is from 0 to 256 Unicode characters in UTF-8. You can use letters and numbers representable in UTF-8, and the following characters: + - = . _ : / @. Tag values are case-sensitive.\n\n\n\n\n :rtype: dict ReturnsResponse Syntax {} Response Structure (dict) -- Exceptions Athena.Client.exceptions.InternalServerException Athena.Client.exceptions.InvalidRequestException :return: {} :returns: (dict) -- """ pass def delete_named_query(NamedQueryId=None): """ Deletes the named query if you have access to the workgroup in which the query was saved. For code samples using the AWS SDK for Java, see Examples and Code Samples in the Amazon Athena User Guide . See also: AWS API Documentation Exceptions :example: response = client.delete_named_query( NamedQueryId='string' ) :type NamedQueryId: string :param NamedQueryId: [REQUIRED]\nThe unique ID of the query to delete.\nThis field is autopopulated if not provided.\n :rtype: dict ReturnsResponse Syntax{} Response Structure (dict) -- Exceptions Athena.Client.exceptions.InternalServerException Athena.Client.exceptions.InvalidRequestException :return: {} :returns: Athena.Client.exceptions.InternalServerException Athena.Client.exceptions.InvalidRequestException """ pass def delete_work_group(WorkGroup=None, RecursiveDeleteOption=None): """ Deletes the workgroup with the specified name. The primary workgroup cannot be deleted. See also: AWS API Documentation Exceptions :example: response = client.delete_work_group( WorkGroup='string', RecursiveDeleteOption=True|False ) :type WorkGroup: string :param WorkGroup: [REQUIRED]\nThe unique name of the workgroup to delete.\n :type RecursiveDeleteOption: boolean :param RecursiveDeleteOption: The option to delete the workgroup and its contents even if the workgroup contains any named queries. :rtype: dict ReturnsResponse Syntax {} Response Structure (dict) -- Exceptions Athena.Client.exceptions.InternalServerException Athena.Client.exceptions.InvalidRequestException :return: {} :returns: (dict) -- """ pass def generate_presigned_url(ClientMethod=None, Params=None, ExpiresIn=None, HttpMethod=None): """ Generate a presigned url given a client, its method, and arguments :type ClientMethod: string :param ClientMethod: The client method to presign for :type Params: dict :param Params: The parameters normally passed to\nClientMethod. :type ExpiresIn: int :param ExpiresIn: The number of seconds the presigned url is valid\nfor. By default it expires in an hour (3600 seconds) :type HttpMethod: string :param HttpMethod: The http method to use on the generated url. By\ndefault, the http method is whatever is used in the method\'s model. """ pass def get_named_query(NamedQueryId=None): """ Returns information about a single query. Requires that you have access to the workgroup in which the query was saved. See also: AWS API Documentation Exceptions :example: response = client.get_named_query( NamedQueryId='string' ) :type NamedQueryId: string :param NamedQueryId: [REQUIRED]\nThe unique ID of the query. Use ListNamedQueries to get query IDs.\n :rtype: dict ReturnsResponse Syntax{ 'NamedQuery': { 'Name': 'string', 'Description': 'string', 'Database': 'string', 'QueryString': 'string', 'NamedQueryId': 'string', 'WorkGroup': 'string' } } Response Structure (dict) -- NamedQuery (dict) --Information about the query. Name (string) --The query name. Description (string) --The query description. Database (string) --The database to which the query belongs. QueryString (string) --The SQL query statements that comprise the query. NamedQueryId (string) --The unique identifier of the query. WorkGroup (string) --The name of the workgroup that contains the named query. Exceptions Athena.Client.exceptions.InternalServerException Athena.Client.exceptions.InvalidRequestException :return: { 'NamedQuery': { 'Name': 'string', 'Description': 'string', 'Database': 'string', 'QueryString': 'string', 'NamedQueryId': 'string', 'WorkGroup': 'string' } } """ pass def get_paginator(operation_name=None): """ Create a paginator for an operation. :type operation_name: string :param operation_name: The operation name. This is the same name\nas the method name on the client. For example, if the\nmethod name is create_foo, and you\'d normally invoke the\noperation as client.create_foo(**kwargs), if the\ncreate_foo operation can be paginated, you can use the\ncall client.get_paginator('create_foo'). :rtype: L{botocore.paginate.Paginator} ReturnsA paginator object. """ pass def get_query_execution(QueryExecutionId=None): """ Returns information about a single execution of a query if you have access to the workgroup in which the query ran. Each time a query executes, information about the query execution is saved with a unique ID. See also: AWS API Documentation Exceptions :example: response = client.get_query_execution( QueryExecutionId='string' ) :type QueryExecutionId: string :param QueryExecutionId: [REQUIRED]\nThe unique ID of the query execution.\n :rtype: dict ReturnsResponse Syntax{ 'QueryExecution': { 'QueryExecutionId': 'string', 'Query': 'string', 'StatementType': 'DDL'|'DML'|'UTILITY', 'ResultConfiguration': { 'OutputLocation': 'string', 'EncryptionConfiguration': { 'EncryptionOption': 'SSE_S3'|'SSE_KMS'|'CSE_KMS', 'KmsKey': 'string' } }, 'QueryExecutionContext': { 'Database': 'string' }, 'Status': { 'State': 'QUEUED'|'RUNNING'|'SUCCEEDED'|'FAILED'|'CANCELLED', 'StateChangeReason': 'string', 'SubmissionDateTime': datetime(2015, 1, 1), 'CompletionDateTime': datetime(2015, 1, 1) }, 'Statistics': { 'EngineExecutionTimeInMillis': 123, 'DataScannedInBytes': 123, 'DataManifestLocation': 'string', 'TotalExecutionTimeInMillis': 123, 'QueryQueueTimeInMillis': 123, 'QueryPlanningTimeInMillis': 123, 'ServiceProcessingTimeInMillis': 123 }, 'WorkGroup': 'string' } } Response Structure (dict) -- QueryExecution (dict) --Information about the query execution. QueryExecutionId (string) --The unique identifier for each query execution. Query (string) --The SQL query statements which the query execution ran. StatementType (string) --The type of query statement that was run. DDL indicates DDL query statements. DML indicates DML (Data Manipulation Language) query statements, such as CREATE TABLE AS SELECT . UTILITY indicates query statements other than DDL and DML, such as SHOW CREATE TABLE , or DESCRIBE <table> . ResultConfiguration (dict) --The location in Amazon S3 where query results were stored and the encryption option, if any, used for query results. These are known as "client-side settings". If workgroup settings override client-side settings, then the query uses the location for the query results and the encryption configuration that are specified for the workgroup. OutputLocation (string) --The location in Amazon S3 where your query results are stored, such as s3://path/to/query/bucket/ . To run the query, you must specify the query results location using one of the ways: either for individual queries using either this setting (client-side), or in the workgroup, using WorkGroupConfiguration . If none of them is set, Athena issues an error that no output location is provided. For more information, see Query Results . If workgroup settings override client-side settings, then the query uses the settings specified for the workgroup. See WorkGroupConfiguration$EnforceWorkGroupConfiguration . EncryptionConfiguration (dict) --If query results are encrypted in Amazon S3, indicates the encryption option used (for example, SSE-KMS or CSE-KMS ) and key information. This is a client-side setting. If workgroup settings override client-side settings, then the query uses the encryption configuration that is specified for the workgroup, and also uses the location for storing query results specified in the workgroup. See WorkGroupConfiguration$EnforceWorkGroupConfiguration and Workgroup Settings Override Client-Side Settings . EncryptionOption (string) --Indicates whether Amazon S3 server-side encryption with Amazon S3-managed keys (SSE-S3 ), server-side encryption with KMS-managed keys (SSE-KMS ), or client-side encryption with KMS-managed keys (CSE-KMS) is used. If a query runs in a workgroup and the workgroup overrides client-side settings, then the workgroup\'s setting for encryption is used. It specifies whether query results must be encrypted, for all queries that run in this workgroup. KmsKey (string) --For SSE-KMS and CSE-KMS , this is the KMS key ARN or ID. QueryExecutionContext (dict) --The database in which the query execution occurred. Database (string) --The name of the database. Status (dict) --The completion date, current state, submission time, and state change reason (if applicable) for the query execution. State (string) --The state of query execution. QUEUED indicates that the query has been submitted to the service, and Athena will execute the query as soon as resources are available. RUNNING indicates that the query is in execution phase. SUCCEEDED indicates that the query completed without errors. FAILED indicates that the query experienced an error and did not complete processing. CANCELLED indicates that a user input interrupted query execution. StateChangeReason (string) --Further detail about the status of the query. SubmissionDateTime (datetime) --The date and time that the query was submitted. CompletionDateTime (datetime) --The date and time that the query completed. Statistics (dict) --Query execution statistics, such as the amount of data scanned, the amount of time that the query took to process, and the type of statement that was run. EngineExecutionTimeInMillis (integer) --The number of milliseconds that the query took to execute. DataScannedInBytes (integer) --The number of bytes in the data that was queried. DataManifestLocation (string) --The location and file name of a data manifest file. The manifest file is saved to the Athena query results location in Amazon S3. The manifest file tracks files that the query wrote to Amazon S3. If the query fails, the manifest file also tracks files that the query intended to write. The manifest is useful for identifying orphaned files resulting from a failed query. For more information, see Working with Query Results, Output Files, and Query History in the Amazon Athena User Guide . TotalExecutionTimeInMillis (integer) --The number of milliseconds that Athena took to run the query. QueryQueueTimeInMillis (integer) --The number of milliseconds that the query was in your query queue waiting for resources. Note that if transient errors occur, Athena might automatically add the query back to the queue. QueryPlanningTimeInMillis (integer) --The number of milliseconds that Athena took to plan the query processing flow. This includes the time spent retrieving table partitions from the data source. Note that because the query engine performs the query planning, query planning time is a subset of engine processing time. ServiceProcessingTimeInMillis (integer) --The number of milliseconds that Athena took to finalize and publish the query results after the query engine finished running the query. WorkGroup (string) --The name of the workgroup in which the query ran. Exceptions Athena.Client.exceptions.InternalServerException Athena.Client.exceptions.InvalidRequestException :return: { 'QueryExecution': { 'QueryExecutionId': 'string', 'Query': 'string', 'StatementType': 'DDL'|'DML'|'UTILITY', 'ResultConfiguration': { 'OutputLocation': 'string', 'EncryptionConfiguration': { 'EncryptionOption': 'SSE_S3'|'SSE_KMS'|'CSE_KMS', 'KmsKey': 'string' } }, 'QueryExecutionContext': { 'Database': 'string' }, 'Status': { 'State': 'QUEUED'|'RUNNING'|'SUCCEEDED'|'FAILED'|'CANCELLED', 'StateChangeReason': 'string', 'SubmissionDateTime': datetime(2015, 1, 1), 'CompletionDateTime': datetime(2015, 1, 1) }, 'Statistics': { 'EngineExecutionTimeInMillis': 123, 'DataScannedInBytes': 123, 'DataManifestLocation': 'string', 'TotalExecutionTimeInMillis': 123, 'QueryQueueTimeInMillis': 123, 'QueryPlanningTimeInMillis': 123, 'ServiceProcessingTimeInMillis': 123 }, 'WorkGroup': 'string' } } """ pass def get_query_results(QueryExecutionId=None, NextToken=None, MaxResults=None): """ Streams the results of a single query execution specified by QueryExecutionId from the Athena query results location in Amazon S3. For more information, see Query Results in the Amazon Athena User Guide . This request does not execute the query but returns results. Use StartQueryExecution to run a query. To stream query results successfully, the IAM principal with permission to call GetQueryResults also must have permissions to the Amazon S3 GetObject action for the Athena query results location. See also: AWS API Documentation Exceptions :example: response = client.get_query_results( QueryExecutionId='string', NextToken='string', MaxResults=123 ) :type QueryExecutionId: string :param QueryExecutionId: [REQUIRED]\nThe unique ID of the query execution.\n :type NextToken: string :param NextToken: The token that specifies where to start pagination if a previous request was truncated. :type MaxResults: integer :param MaxResults: The maximum number of results (rows) to return in this request. :rtype: dict ReturnsResponse Syntax { 'UpdateCount': 123, 'ResultSet': { 'Rows': [ { 'Data': [ { 'VarCharValue': 'string' }, ] }, ], 'ResultSetMetadata': { 'ColumnInfo': [ { 'CatalogName': 'string', 'SchemaName': 'string', 'TableName': 'string', 'Name': 'string', 'Label': 'string', 'Type': 'string', 'Precision': 123, 'Scale': 123, 'Nullable': 'NOT_NULL'|'NULLABLE'|'UNKNOWN', 'CaseSensitive': True|False }, ] } }, 'NextToken': 'string' } Response Structure (dict) -- UpdateCount (integer) -- The number of rows inserted with a CREATE TABLE AS SELECT statement. ResultSet (dict) -- The results of the query execution. Rows (list) -- The rows in the table. (dict) -- The rows that comprise a query result table. Data (list) -- The data that populates a row in a query result table. (dict) -- A piece of data (a field in the table). VarCharValue (string) -- The value of the datum. ResultSetMetadata (dict) -- The metadata that describes the column structure and data types of a table of query results. ColumnInfo (list) -- Information about the columns returned in a query result metadata. (dict) -- Information about the columns in a query execution result. CatalogName (string) -- The catalog to which the query results belong. SchemaName (string) -- The schema name (database name) to which the query results belong. TableName (string) -- The table name for the query results. Name (string) -- The name of the column. Label (string) -- A column label. Type (string) -- The data type of the column. Precision (integer) -- For DECIMAL data types, specifies the total number of digits, up to 38. For performance reasons, we recommend up to 18 digits. Scale (integer) -- For DECIMAL data types, specifies the total number of digits in the fractional part of the value. Defaults to 0. Nullable (string) -- Indicates the column\'s nullable status. CaseSensitive (boolean) -- Indicates whether values in the column are case-sensitive. NextToken (string) -- A token to be used by the next request if this request is truncated. Exceptions Athena.Client.exceptions.InternalServerException Athena.Client.exceptions.InvalidRequestException :return: { 'UpdateCount': 123, 'ResultSet': { 'Rows': [ { 'Data': [ { 'VarCharValue': 'string' }, ] }, ], 'ResultSetMetadata': { 'ColumnInfo': [ { 'CatalogName': 'string', 'SchemaName': 'string', 'TableName': 'string', 'Name': 'string', 'Label': 'string', 'Type': 'string', 'Precision': 123, 'Scale': 123, 'Nullable': 'NOT_NULL'|'NULLABLE'|'UNKNOWN', 'CaseSensitive': True|False }, ] } }, 'NextToken': 'string' } :returns: Athena.Client.exceptions.InternalServerException Athena.Client.exceptions.InvalidRequestException """ pass def get_waiter(waiter_name=None): """ Returns an object that can wait for some condition. :type waiter_name: str :param waiter_name: The name of the waiter to get. See the waiters\nsection of the service docs for a list of available waiters. :rtype: botocore.waiter.Waiter """ pass def get_work_group(WorkGroup=None): """ Returns information about the workgroup with the specified name. See also: AWS API Documentation Exceptions :example: response = client.get_work_group( WorkGroup='string' ) :type WorkGroup: string :param WorkGroup: [REQUIRED]\nThe name of the workgroup.\n :rtype: dict ReturnsResponse Syntax{ 'WorkGroup': { 'Name': 'string', 'State': 'ENABLED'|'DISABLED', 'Configuration': { 'ResultConfiguration': { 'OutputLocation': 'string', 'EncryptionConfiguration': { 'EncryptionOption': 'SSE_S3'|'SSE_KMS'|'CSE_KMS', 'KmsKey': 'string' } }, 'EnforceWorkGroupConfiguration': True|False, 'PublishCloudWatchMetricsEnabled': True|False, 'BytesScannedCutoffPerQuery': 123, 'RequesterPaysEnabled': True|False }, 'Description': 'string', 'CreationTime': datetime(2015, 1, 1) } } Response Structure (dict) -- WorkGroup (dict) --Information about the workgroup. Name (string) --The workgroup name. State (string) --The state of the workgroup: ENABLED or DISABLED. Configuration (dict) --The configuration of the workgroup, which includes the location in Amazon S3 where query results are stored, the encryption configuration, if any, used for query results; whether the Amazon CloudWatch Metrics are enabled for the workgroup; whether workgroup settings override client-side settings; and the data usage limits for the amount of data scanned per query or per workgroup. The workgroup settings override is specified in EnforceWorkGroupConfiguration (true/false) in the WorkGroupConfiguration. See WorkGroupConfiguration$EnforceWorkGroupConfiguration . ResultConfiguration (dict) --The configuration for the workgroup, which includes the location in Amazon S3 where query results are stored and the encryption option, if any, used for query results. To run the query, you must specify the query results location using one of the ways: either in the workgroup using this setting, or for individual queries (client-side), using ResultConfiguration$OutputLocation . If none of them is set, Athena issues an error that no output location is provided. For more information, see Query Results . OutputLocation (string) --The location in Amazon S3 where your query results are stored, such as s3://path/to/query/bucket/ . To run the query, you must specify the query results location using one of the ways: either for individual queries using either this setting (client-side), or in the workgroup, using WorkGroupConfiguration . If none of them is set, Athena issues an error that no output location is provided. For more information, see Query Results . If workgroup settings override client-side settings, then the query uses the settings specified for the workgroup. See WorkGroupConfiguration$EnforceWorkGroupConfiguration . EncryptionConfiguration (dict) --If query results are encrypted in Amazon S3, indicates the encryption option used (for example, SSE-KMS or CSE-KMS ) and key information. This is a client-side setting. If workgroup settings override client-side settings, then the query uses the encryption configuration that is specified for the workgroup, and also uses the location for storing query results specified in the workgroup. See WorkGroupConfiguration$EnforceWorkGroupConfiguration and Workgroup Settings Override Client-Side Settings . EncryptionOption (string) --Indicates whether Amazon S3 server-side encryption with Amazon S3-managed keys (SSE-S3 ), server-side encryption with KMS-managed keys (SSE-KMS ), or client-side encryption with KMS-managed keys (CSE-KMS) is used. If a query runs in a workgroup and the workgroup overrides client-side settings, then the workgroup\'s setting for encryption is used. It specifies whether query results must be encrypted, for all queries that run in this workgroup. KmsKey (string) --For SSE-KMS and CSE-KMS , this is the KMS key ARN or ID. EnforceWorkGroupConfiguration (boolean) --If set to "true", the settings for the workgroup override client-side settings. If set to "false", client-side settings are used. For more information, see Workgroup Settings Override Client-Side Settings . PublishCloudWatchMetricsEnabled (boolean) --Indicates that the Amazon CloudWatch metrics are enabled for the workgroup. BytesScannedCutoffPerQuery (integer) --The upper data usage limit (cutoff) for the amount of bytes a single query in a workgroup is allowed to scan. RequesterPaysEnabled (boolean) --If set to true , allows members assigned to a workgroup to reference Amazon S3 Requester Pays buckets in queries. If set to false , workgroup members cannot query data from Requester Pays buckets, and queries that retrieve data from Requester Pays buckets cause an error. The default is false . For more information about Requester Pays buckets, see Requester Pays Buckets in the Amazon Simple Storage Service Developer Guide . Description (string) --The workgroup description. CreationTime (datetime) --The date and time the workgroup was created. Exceptions Athena.Client.exceptions.InternalServerException Athena.Client.exceptions.InvalidRequestException :return: { 'WorkGroup': { 'Name': 'string', 'State': 'ENABLED'|'DISABLED', 'Configuration': { 'ResultConfiguration': { 'OutputLocation': 'string', 'EncryptionConfiguration': { 'EncryptionOption': 'SSE_S3'|'SSE_KMS'|'CSE_KMS', 'KmsKey': 'string' } }, 'EnforceWorkGroupConfiguration': True|False, 'PublishCloudWatchMetricsEnabled': True|False, 'BytesScannedCutoffPerQuery': 123, 'RequesterPaysEnabled': True|False }, 'Description': 'string', 'CreationTime': datetime(2015, 1, 1) } } """ pass def list_named_queries(NextToken=None, MaxResults=None, WorkGroup=None): """ Provides a list of available query IDs only for queries saved in the specified workgroup. Requires that you have access to the workgroup. If a workgroup is not specified, lists the saved queries for the primary workgroup. For code samples using the AWS SDK for Java, see Examples and Code Samples in the Amazon Athena User Guide . See also: AWS API Documentation Exceptions :example: response = client.list_named_queries( NextToken='string', MaxResults=123, WorkGroup='string' ) :type NextToken: string :param NextToken: The token that specifies where to start pagination if a previous request was truncated. :type MaxResults: integer :param MaxResults: The maximum number of queries to return in this request. :type WorkGroup: string :param WorkGroup: The name of the workgroup from which the named queries are returned. If a workgroup is not specified, the saved queries for the primary workgroup are returned. :rtype: dict ReturnsResponse Syntax { 'NamedQueryIds': [ 'string', ], 'NextToken': 'string' } Response Structure (dict) -- NamedQueryIds (list) -- The list of unique query IDs. (string) -- NextToken (string) -- A token to be used by the next request if this request is truncated. Exceptions Athena.Client.exceptions.InternalServerException Athena.Client.exceptions.InvalidRequestException :return: { 'NamedQueryIds': [ 'string', ], 'NextToken': 'string' } :returns: (string) -- """ pass def list_query_executions(NextToken=None, MaxResults=None, WorkGroup=None): """ Provides a list of available query execution IDs for the queries in the specified workgroup. If a workgroup is not specified, returns a list of query execution IDs for the primary workgroup. Requires you to have access to the workgroup in which the queries ran. For code samples using the AWS SDK for Java, see Examples and Code Samples in the Amazon Athena User Guide . See also: AWS API Documentation Exceptions :example: response = client.list_query_executions( NextToken='string', MaxResults=123, WorkGroup='string' ) :type NextToken: string :param NextToken: The token that specifies where to start pagination if a previous request was truncated. :type MaxResults: integer :param MaxResults: The maximum number of query executions to return in this request. :type WorkGroup: string :param WorkGroup: The name of the workgroup from which queries are returned. If a workgroup is not specified, a list of available query execution IDs for the queries in the primary workgroup is returned. :rtype: dict ReturnsResponse Syntax { 'QueryExecutionIds': [ 'string', ], 'NextToken': 'string' } Response Structure (dict) -- QueryExecutionIds (list) -- The unique IDs of each query execution as an array of strings. (string) -- NextToken (string) -- A token to be used by the next request if this request is truncated. Exceptions Athena.Client.exceptions.InternalServerException Athena.Client.exceptions.InvalidRequestException :return: { 'QueryExecutionIds': [ 'string', ], 'NextToken': 'string' } :returns: (string) -- """ pass def list_tags_for_resource(ResourceARN=None, NextToken=None, MaxResults=None): """ Lists the tags associated with this workgroup. See also: AWS API Documentation Exceptions :example: response = client.list_tags_for_resource( ResourceARN='string', NextToken='string', MaxResults=123 ) :type ResourceARN: string :param ResourceARN: [REQUIRED]\nLists the tags for the workgroup resource with the specified ARN.\n :type NextToken: string :param NextToken: The token for the next set of results, or null if there are no additional results for this request, where the request lists the tags for the workgroup resource with the specified ARN. :type MaxResults: integer :param MaxResults: The maximum number of results to be returned per request that lists the tags for the workgroup resource. :rtype: dict ReturnsResponse Syntax { 'Tags': [ { 'Key': 'string', 'Value': 'string' }, ], 'NextToken': 'string' } Response Structure (dict) -- Tags (list) -- The list of tags associated with this workgroup. (dict) -- A tag that you can add to a resource. A tag is a label that you assign to an AWS Athena resource (a workgroup). Each tag consists of a key and an optional value, both of which you define. Tags enable you to categorize workgroups in Athena, for example, by purpose, owner, or environment. Use a consistent set of tag keys to make it easier to search and filter workgroups in your account. The maximum tag key length is 128 Unicode characters in UTF-8. The maximum tag value length is 256 Unicode characters in UTF-8. You can use letters and numbers representable in UTF-8, and the following characters: + - = . _ : / @. Tag keys and values are case-sensitive. Tag keys must be unique per resource. Key (string) -- A tag key. The tag key length is from 1 to 128 Unicode characters in UTF-8. You can use letters and numbers representable in UTF-8, and the following characters: + - = . _ : / @. Tag keys are case-sensitive and must be unique per resource. Value (string) -- A tag value. The tag value length is from 0 to 256 Unicode characters in UTF-8. You can use letters and numbers representable in UTF-8, and the following characters: + - = . _ : / @. Tag values are case-sensitive. NextToken (string) -- A token to be used by the next request if this request is truncated. Exceptions Athena.Client.exceptions.InternalServerException Athena.Client.exceptions.InvalidRequestException Athena.Client.exceptions.ResourceNotFoundException :return: { 'Tags': [ { 'Key': 'string', 'Value': 'string' }, ], 'NextToken': 'string' } :returns: Athena.Client.exceptions.InternalServerException Athena.Client.exceptions.InvalidRequestException Athena.Client.exceptions.ResourceNotFoundException """ pass def list_work_groups(NextToken=None, MaxResults=None): """ Lists available workgroups for the account. See also: AWS API Documentation Exceptions :example: response = client.list_work_groups( NextToken='string', MaxResults=123 ) :type NextToken: string :param NextToken: A token to be used by the next request if this request is truncated. :type MaxResults: integer :param MaxResults: The maximum number of workgroups to return in this request. :rtype: dict ReturnsResponse Syntax { 'WorkGroups': [ { 'Name': 'string', 'State': 'ENABLED'|'DISABLED', 'Description': 'string', 'CreationTime': datetime(2015, 1, 1) }, ], 'NextToken': 'string' } Response Structure (dict) -- WorkGroups (list) -- The list of workgroups, including their names, descriptions, creation times, and states. (dict) -- The summary information for the workgroup, which includes its name, state, description, and the date and time it was created. Name (string) -- The name of the workgroup. State (string) -- The state of the workgroup. Description (string) -- The workgroup description. CreationTime (datetime) -- The workgroup creation date and time. NextToken (string) -- A token to be used by the next request if this request is truncated. Exceptions Athena.Client.exceptions.InternalServerException Athena.Client.exceptions.InvalidRequestException :return: { 'WorkGroups': [ { 'Name': 'string', 'State': 'ENABLED'|'DISABLED', 'Description': 'string', 'CreationTime': datetime(2015, 1, 1) }, ], 'NextToken': 'string' } :returns: Athena.Client.exceptions.InternalServerException Athena.Client.exceptions.InvalidRequestException """ pass def start_query_execution(QueryString=None, ClientRequestToken=None, QueryExecutionContext=None, ResultConfiguration=None, WorkGroup=None): """ Runs the SQL query statements contained in the Query . Requires you to have access to the workgroup in which the query ran. For code samples using the AWS SDK for Java, see Examples and Code Samples in the Amazon Athena User Guide . See also: AWS API Documentation Exceptions :example: response = client.start_query_execution( QueryString='string', ClientRequestToken='string', QueryExecutionContext={ 'Database': 'string' }, ResultConfiguration={ 'OutputLocation': 'string', 'EncryptionConfiguration': { 'EncryptionOption': 'SSE_S3'|'SSE_KMS'|'CSE_KMS', 'KmsKey': 'string' } }, WorkGroup='string' ) :type QueryString: string :param QueryString: [REQUIRED]\nThe SQL query statements to be executed.\n :type ClientRequestToken: string :param ClientRequestToken: A unique case-sensitive string used to ensure the request to create the query is idempotent (executes only once). If another StartQueryExecution request is received, the same response is returned and another query is not created. If a parameter has changed, for example, the QueryString , an error is returned.\n\nWarning\nThis token is listed as not required because AWS SDKs (for example the AWS SDK for Java) auto-generate the token for users. If you are not using the AWS SDK or the AWS CLI, you must provide this token or the action will fail.\n\nThis field is autopopulated if not provided.\n :type QueryExecutionContext: dict :param QueryExecutionContext: The database within which the query executes.\n\nDatabase (string) --The name of the database.\n\n\n :type ResultConfiguration: dict :param ResultConfiguration: Specifies information about where and how to save the results of the query execution. If the query runs in a workgroup, then workgroup\'s settings may override query settings. This affects the query results location. The workgroup settings override is specified in EnforceWorkGroupConfiguration (true/false) in the WorkGroupConfiguration. See WorkGroupConfiguration$EnforceWorkGroupConfiguration .\n\nOutputLocation (string) --The location in Amazon S3 where your query results are stored, such as s3://path/to/query/bucket/ . To run the query, you must specify the query results location using one of the ways: either for individual queries using either this setting (client-side), or in the workgroup, using WorkGroupConfiguration . If none of them is set, Athena issues an error that no output location is provided. For more information, see Query Results . If workgroup settings override client-side settings, then the query uses the settings specified for the workgroup. See WorkGroupConfiguration$EnforceWorkGroupConfiguration .\n\nEncryptionConfiguration (dict) --If query results are encrypted in Amazon S3, indicates the encryption option used (for example, SSE-KMS or CSE-KMS ) and key information. This is a client-side setting. If workgroup settings override client-side settings, then the query uses the encryption configuration that is specified for the workgroup, and also uses the location for storing query results specified in the workgroup. See WorkGroupConfiguration$EnforceWorkGroupConfiguration and Workgroup Settings Override Client-Side Settings .\n\nEncryptionOption (string) -- [REQUIRED]Indicates whether Amazon S3 server-side encryption with Amazon S3-managed keys (SSE-S3 ), server-side encryption with KMS-managed keys (SSE-KMS ), or client-side encryption with KMS-managed keys (CSE-KMS) is used.\nIf a query runs in a workgroup and the workgroup overrides client-side settings, then the workgroup\'s setting for encryption is used. It specifies whether query results must be encrypted, for all queries that run in this workgroup.\n\nKmsKey (string) --For SSE-KMS and CSE-KMS , this is the KMS key ARN or ID.\n\n\n\n\n :type WorkGroup: string :param WorkGroup: The name of the workgroup in which the query is being started. :rtype: dict ReturnsResponse Syntax { 'QueryExecutionId': 'string' } Response Structure (dict) -- QueryExecutionId (string) -- The unique ID of the query that ran as a result of this request. Exceptions Athena.Client.exceptions.InternalServerException Athena.Client.exceptions.InvalidRequestException Athena.Client.exceptions.TooManyRequestsException :return: { 'QueryExecutionId': 'string' } :returns: Athena.Client.exceptions.InternalServerException Athena.Client.exceptions.InvalidRequestException Athena.Client.exceptions.TooManyRequestsException """ pass def stop_query_execution(QueryExecutionId=None): """ Stops a query execution. Requires you to have access to the workgroup in which the query ran. For code samples using the AWS SDK for Java, see Examples and Code Samples in the Amazon Athena User Guide . See also: AWS API Documentation Exceptions :example: response = client.stop_query_execution( QueryExecutionId='string' ) :type QueryExecutionId: string :param QueryExecutionId: [REQUIRED]\nThe unique ID of the query execution to stop.\nThis field is autopopulated if not provided.\n :rtype: dict ReturnsResponse Syntax{} Response Structure (dict) -- Exceptions Athena.Client.exceptions.InternalServerException Athena.Client.exceptions.InvalidRequestException :return: {} :returns: Athena.Client.exceptions.InternalServerException Athena.Client.exceptions.InvalidRequestException """ pass def tag_resource(ResourceARN=None, Tags=None): """ Adds one or more tags to the resource, such as a workgroup. A tag is a label that you assign to an AWS Athena resource (a workgroup). Each tag consists of a key and an optional value, both of which you define. Tags enable you to categorize resources (workgroups) in Athena, for example, by purpose, owner, or environment. Use a consistent set of tag keys to make it easier to search and filter workgroups in your account. For best practices, see AWS Tagging Strategies . The key length is from 1 (minimum) to 128 (maximum) Unicode characters in UTF-8. The tag value length is from 0 (minimum) to 256 (maximum) Unicode characters in UTF-8. You can use letters and numbers representable in UTF-8, and the following characters: + - = . _ : / @. Tag keys and values are case-sensitive. Tag keys must be unique per resource. If you specify more than one, separate them by commas. See also: AWS API Documentation Exceptions :example: response = client.tag_resource( ResourceARN='string', Tags=[ { 'Key': 'string', 'Value': 'string' }, ] ) :type ResourceARN: string :param ResourceARN: [REQUIRED]\nRequests that one or more tags are added to the resource (such as a workgroup) for the specified ARN.\n :type Tags: list :param Tags: [REQUIRED]\nOne or more tags, separated by commas, to be added to the resource, such as a workgroup.\n\n(dict) --A tag that you can add to a resource. A tag is a label that you assign to an AWS Athena resource (a workgroup). Each tag consists of a key and an optional value, both of which you define. Tags enable you to categorize workgroups in Athena, for example, by purpose, owner, or environment. Use a consistent set of tag keys to make it easier to search and filter workgroups in your account. The maximum tag key length is 128 Unicode characters in UTF-8. The maximum tag value length is 256 Unicode characters in UTF-8. You can use letters and numbers representable in UTF-8, and the following characters: + - = . _ : / @. Tag keys and values are case-sensitive. Tag keys must be unique per resource.\n\nKey (string) --A tag key. The tag key length is from 1 to 128 Unicode characters in UTF-8. You can use letters and numbers representable in UTF-8, and the following characters: + - = . _ : / @. Tag keys are case-sensitive and must be unique per resource.\n\nValue (string) --A tag value. The tag value length is from 0 to 256 Unicode characters in UTF-8. You can use letters and numbers representable in UTF-8, and the following characters: + - = . _ : / @. Tag values are case-sensitive.\n\n\n\n\n :rtype: dict ReturnsResponse Syntax {} Response Structure (dict) -- Exceptions Athena.Client.exceptions.InternalServerException Athena.Client.exceptions.InvalidRequestException Athena.Client.exceptions.ResourceNotFoundException :return: {} :returns: (dict) -- """ pass def untag_resource(ResourceARN=None, TagKeys=None): """ Removes one or more tags from the workgroup resource. Takes as an input a list of TagKey Strings separated by commas, and removes their tags at the same time. See also: AWS API Documentation Exceptions :example: response = client.untag_resource( ResourceARN='string', TagKeys=[ 'string', ] ) :type ResourceARN: string :param ResourceARN: [REQUIRED]\nRemoves one or more tags from the workgroup resource for the specified ARN.\n :type TagKeys: list :param TagKeys: [REQUIRED]\nRemoves the tags associated with one or more tag keys from the workgroup resource.\n\n(string) --\n\n :rtype: dict ReturnsResponse Syntax {} Response Structure (dict) -- Exceptions Athena.Client.exceptions.InternalServerException Athena.Client.exceptions.InvalidRequestException Athena.Client.exceptions.ResourceNotFoundException :return: {} :returns: (dict) -- """ pass def update_work_group(WorkGroup=None, Description=None, ConfigurationUpdates=None, State=None): """ Updates the workgroup with the specified name. The workgroup\'s name cannot be changed. See also: AWS API Documentation Exceptions :example: response = client.update_work_group( WorkGroup='string', Description='string', ConfigurationUpdates={ 'EnforceWorkGroupConfiguration': True|False, 'ResultConfigurationUpdates': { 'OutputLocation': 'string', 'RemoveOutputLocation': True|False, 'EncryptionConfiguration': { 'EncryptionOption': 'SSE_S3'|'SSE_KMS'|'CSE_KMS', 'KmsKey': 'string' }, 'RemoveEncryptionConfiguration': True|False }, 'PublishCloudWatchMetricsEnabled': True|False, 'BytesScannedCutoffPerQuery': 123, 'RemoveBytesScannedCutoffPerQuery': True|False, 'RequesterPaysEnabled': True|False }, State='ENABLED'|'DISABLED' ) :type WorkGroup: string :param WorkGroup: [REQUIRED]\nThe specified workgroup that will be updated.\n :type Description: string :param Description: The workgroup description. :type ConfigurationUpdates: dict :param ConfigurationUpdates: The workgroup configuration that will be updated for the given workgroup.\n\nEnforceWorkGroupConfiguration (boolean) --If set to 'true', the settings for the workgroup override client-side settings. If set to 'false' client-side settings are used. For more information, see Workgroup Settings Override Client-Side Settings .\n\nResultConfigurationUpdates (dict) --The result configuration information about the queries in this workgroup that will be updated. Includes the updated results location and an updated option for encrypting query results.\n\nOutputLocation (string) --The location in Amazon S3 where your query results are stored, such as s3://path/to/query/bucket/ . For more information, see Query Results If workgroup settings override client-side settings, then the query uses the location for the query results and the encryption configuration that are specified for the workgroup. The 'workgroup settings override' is specified in EnforceWorkGroupConfiguration (true/false) in the WorkGroupConfiguration. See WorkGroupConfiguration$EnforceWorkGroupConfiguration .\n\nRemoveOutputLocation (boolean) --If set to 'true', indicates that the previously-specified query results location (also known as a client-side setting) for queries in this workgroup should be ignored and set to null. If set to 'false' or not set, and a value is present in the OutputLocation in ResultConfigurationUpdates (the client-side setting), the OutputLocation in the workgroup\'s ResultConfiguration will be updated with the new value. For more information, see Workgroup Settings Override Client-Side Settings .\n\nEncryptionConfiguration (dict) --The encryption configuration for the query results.\n\nEncryptionOption (string) -- [REQUIRED]Indicates whether Amazon S3 server-side encryption with Amazon S3-managed keys (SSE-S3 ), server-side encryption with KMS-managed keys (SSE-KMS ), or client-side encryption with KMS-managed keys (CSE-KMS) is used.\nIf a query runs in a workgroup and the workgroup overrides client-side settings, then the workgroup\'s setting for encryption is used. It specifies whether query results must be encrypted, for all queries that run in this workgroup.\n\nKmsKey (string) --For SSE-KMS and CSE-KMS , this is the KMS key ARN or ID.\n\n\n\nRemoveEncryptionConfiguration (boolean) --If set to 'true', indicates that the previously-specified encryption configuration (also known as the client-side setting) for queries in this workgroup should be ignored and set to null. If set to 'false' or not set, and a value is present in the EncryptionConfiguration in ResultConfigurationUpdates (the client-side setting), the EncryptionConfiguration in the workgroup\'s ResultConfiguration will be updated with the new value. For more information, see Workgroup Settings Override Client-Side Settings .\n\n\n\nPublishCloudWatchMetricsEnabled (boolean) --Indicates whether this workgroup enables publishing metrics to Amazon CloudWatch.\n\nBytesScannedCutoffPerQuery (integer) --The upper limit (cutoff) for the amount of bytes a single query in a workgroup is allowed to scan.\n\nRemoveBytesScannedCutoffPerQuery (boolean) --Indicates that the data usage control limit per query is removed. WorkGroupConfiguration$BytesScannedCutoffPerQuery\n\nRequesterPaysEnabled (boolean) --If set to true , allows members assigned to a workgroup to specify Amazon S3 Requester Pays buckets in queries. If set to false , workgroup members cannot query data from Requester Pays buckets, and queries that retrieve data from Requester Pays buckets cause an error. The default is false . For more information about Requester Pays buckets, see Requester Pays Buckets in the Amazon Simple Storage Service Developer Guide .\n\n\n :type State: string :param State: The workgroup state that will be updated for the given workgroup. :rtype: dict ReturnsResponse Syntax {} Response Structure (dict) -- Exceptions Athena.Client.exceptions.InternalServerException Athena.Client.exceptions.InvalidRequestException :return: {} :returns: (dict) -- """ pass
# -*- coding: utf-8 -*- class Node: def __init__(self, val, isLeaf, topLeft, topRight, bottomLeft, bottomRight): self.val = val self.isLeaf = isLeaf self.topLeft = topLeft self.topRight = topRight self.bottomLeft = bottomLeft self.bottomRight = bottomRight def __eq__(self, other): return ( other is not None and self.val == other.val and self.isLeaf == other.isLeaf and self.topLeft == other.topLeft and self.topRight == other.topRight and self.bottomLeft == other.bottomLeft and self.bottomRight == other.bottomRight ) class Solution: def intersect(self, quadTree1, quadTree2): if quadTree1.isLeaf: return quadTree1 if quadTree1.val else quadTree2 elif quadTree2.isLeaf: return quadTree2 if quadTree2.val else quadTree1 topLeft = self.intersect(quadTree1.topLeft, quadTree2.topLeft) topRight = self.intersect(quadTree1.topRight, quadTree2.topRight) bottomLeft = self.intersect(quadTree1.bottomLeft, quadTree2.bottomLeft) bottomRight = self.intersect(quadTree1.bottomRight, quadTree2.bottomRight) if topLeft.isLeaf and topRight.isLeaf and bottomLeft.isLeaf and bottomRight.isLeaf: if topLeft.val == topRight.val == bottomLeft.val == bottomRight.val: return Node(topLeft.val, True, None, None, None, None) return Node(None, False, topLeft, topRight, bottomLeft, bottomRight) if __name__ == '__main__': solution = Solution() t0_4 = Node(False, True, None, None, None, None) t0_3 = Node(False, True, None, None, None, None) t0_2 = Node(True, True, None, None, None, None) t0_1 = Node(True, True, None, None, None, None) t0_0 = Node(None, False, t0_1, t0_2, t0_3, t0_4) t1_8 = Node(True, True, None, None, None, None) t1_7 = Node(True, True, None, None, None, None) t1_6 = Node(False, True, None, None, None, None) t1_5 = Node(False, True, None, None, None, None) t1_4 = Node(False, True, None, None, None, None) t1_3 = Node(True, True, None, None, None, None) t1_2 = Node(None, False, t1_5, t1_6, t1_7, t1_8) t1_1 = Node(True, True, None, None, None, None) t1_0 = Node(None, False, t1_1, t1_2, t1_3, t1_4) t2_4 = Node(False, True, None, None, None, None) t2_3 = Node(True, True, None, None, None, None) t2_2 = Node(True, True, None, None, None, None) t2_1 = Node(True, True, None, None, None, None) t2_0 = Node(None, False, t2_1, t2_2, t2_3, t2_4) assert t2_0 == solution.intersect(t0_0, t1_0)
''' @Author: Hata @Date: 2020-07-30 09:27:42 @LastEditors: Hata @LastEditTime: 2020-07-30 09:29:33 @FilePath: \LeetCode\M02-04.py @Description: https://leetcode-cn.com/problems/partition-list-lcci/ ''' # Definition for singly-linked list. class ListNode: def __init__(self, x): self.val = x self.next = None class Solution: def partition(self, head: ListNode, x: int) -> ListNode: p, q = head, head while q: if q.val < x: q.val, p.val = p.val, q.val p = p.next q = q.next return head
print("{:_^20}".format("School average")) FirstGrade = float(input("First Grade: ")) SecondGrade = float(input("Second Grade: ")) Media = (FirstGrade + SecondGrade) / 2 print("Media: {:.1f}".format(Media)) print("{:_^20}".format("School average 2")) FirstGrade = float(input("First Grade: ")) SecondGrade = float(input("Second Grade: ")) print("Media: {:.1f}".format((FirstGrade + SecondGrade) / 2)) if(Media >= 6.0): { print("Student approved") } else: { print("Student failed") }
""" A system log management tool with time-series causal analysis """
class BaseException(Exception): code = 0 message = '' def __init__(self, code, message): self.code = code self.message = message class ServerException(BaseException): def __init__(self, message): super().__init__(500, message) class ClientException(BaseException): def __init__(self, message): super().__init__(400, message)
# Copyright (c) 2020 jya # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Color classes.""" # color mode MODE_GRAYSCALE = 'grayscale' MODE_RGB = 'rgb' # preset grayscale colors BLACK = 0x00 DARK_GRAY = 0x9d GRAY = 0xc9 WHITE = 0xfe TRANSPARENT = 0xff # preset RGB colors RGB_BLACK = 0x000000 RGB_DARK_GRAY = 0x9d9d9d RGB_GRAY = 0xc9c9c9 RGB_WHITE = 0xfefefe RGB_TRANSPARENT = 0xffffff def get_rgb(value): r = (value & 0xff0000) >> 16 g = (value & 0x00ff00) >> 8 b = value & 0x0000ff return (r, g, b) class ColorPalette: def __init__(self, mode=MODE_GRAYSCALE, colors=(BLACK, DARK_GRAY, GRAY, WHITE)): if mode not in [MODE_GRAYSCALE, MODE_RGB]: raise ValueError('mode must be MODE_GRAYSCALE or MODE_RGB') if len(colors) != 4: raise ValueError('colors must have 4 color values (black, darkgray, gray, white)') self.mode = mode self.black = colors[0] self.darkgray = colors[1] self.gray = colors[2] self.white = colors[3] if mode == MODE_GRAYSCALE: self.transparent = TRANSPARENT else: self.transparent = RGB_TRANSPARENT DEFAULT_COLORPALETTE = ColorPalette(MODE_GRAYSCALE, (BLACK, DARK_GRAY, GRAY, WHITE)) DEFAULT_RGB_COLORPALETTE = ColorPalette(MODE_RGB, (RGB_BLACK, RGB_DARK_GRAY, RGB_GRAY, RGB_WHITE))
UNTRACKED_PATH = "repositorios" COMPILER = "pdflatex" SAE_COUNTER_GITHUB = "https://github.com/comissao-aerodesign/PyAeroCounter.git" SAE_COUNTER_PATH = "PyAeroCounter" PROJECTS_OVERLEAF = [ { 'name': "<Nome do projeto>", 'path': "<Pasta_do_projeto>", 'main': "<Arquivo tex>", 'url': "https://git.overleaf.com/<SUA_URL_OVERLEAF_GIT>" }, ] OVERLEAF_USER = "<SEU_USUARIO>" OVERLEAF_PASSWORD = "<SUA_SENHA>"
def test(function, arguments_result_list, one_argument_function=False): """ testing function for every (arguments,expected_result) pair in arguments_result_list checks if function(arguments) == result """ for args, expected_result in arguments_result_list: if one_argument_function: args = [args] call_result = function(*args) if call_result != expected_result: exception_message = f"function {function.__name__} returned {call_result} instaed of {expected_result} for aguments:{args}" raise AssertionError(exception_message)
def get_entity_bios(seq,id2label): """Gets entities from sequence. note: BIOS Args: seq (list): sequence of labels. Returns: list: list of (chunk_type, chunk_start, chunk_end). Example: # >>> seq = ['B-PER', 'I-PER', 'O', 'S-LOC'] # >>> get_entity_bios(seq) [['PER', 0,1], ['LOC', 3, 3]] """ chunks = [] chunk = [-1, -1, -1] for indx, tag in enumerate(seq): if not isinstance(tag, str): tag = id2label[tag] if tag.startswith("S-"): if chunk[2] != -1: chunks.append(chunk) chunk = [-1, -1, -1] chunk[1] = indx chunk[2] = indx chunk[0] = tag.split('-')[1] chunks.append(chunk) chunk = (-1, -1, -1) if tag.startswith("B-"): if chunk[2] != -1: chunks.append(chunk) chunk = [-1, -1, -1] chunk[1] = indx chunk[0] = tag.split('-')[1] elif tag.startswith('I-') and chunk[1] != -1: _type = tag.split('-')[1] if _type == chunk[0]: chunk[2] = indx if indx == len(seq) - 1: chunks.append(chunk) else: if chunk[2] != -1: chunks.append(chunk) chunk = [-1, -1, -1] return chunks def get_entity_bio(seq,id2label): """Gets entities from sequence. note: BIO Args: seq (list): sequence of labels. Returns: list: list of (chunk_type, chunk_start, chunk_end). Example: seq = ['B-PER', 'I-PER', 'O', 'B-LOC'] get_entity_bio(seq) #output [['PER', 0,1], ['LOC', 3, 3]] """ chunks = [] chunk = [-1, -1, -1] for indx, tag in enumerate(seq): if not isinstance(tag, str): tag = id2label[tag] if tag.startswith("B-"): if chunk[2] != -1: chunks.append(chunk) chunk = [-1, -1, -1] chunk[1] = indx chunk[0] = tag.split('-')[1] chunk[2] = indx if indx == len(seq) - 1: chunks.append(chunk) elif tag.startswith('I-') and chunk[1] != -1: _type = tag.split('-')[1] if _type == chunk[0]: chunk[2] = indx if indx == len(seq) - 1: chunks.append(chunk) else: if chunk[2] != -1: chunks.append(chunk) chunk = [-1, -1, -1] return chunks def get_entities(seq,id2label,markup='bios'): ''' :param seq: :param id2label: :param markup: :return: ''' assert markup in ['bio','bios'] if markup =='bio': return get_entity_bio(seq,id2label) else: return get_entity_bios(seq,id2label)
class AppValidationError(Exception): def __init__(self, msg, response=None): super(AppValidationError, self).__init__(msg) self.response = response
# ---------------------------------------------------------------------------- # Copyright (c) 2017-, labman development team. # # Distributed under the terms of the Modified BSD License. # # The full license is in the file LICENSE, distributed with this software. # ---------------------------------------------------------------------------- class LabmanError(Exception): """Base class for all labman exceptions""" pass class LabmanUnknownIdError(LabmanError): """Exception for error when an object doesn't exist in the DB Parameters ---------- obj_name : str The name of the object obj_id : str The unknown id """ def __init__(self, obj_name, obj_id): super(LabmanUnknownIdError, self).__init__() self.args = ("%s with ID '%s' does not exist" % (obj_name, obj_id), ) class LabmanDuplicateError(LabmanError): """Exception for error when duplicates occur Parameters ---------- obj_name : str The name of the object attributes : list of (str, str) The duplicated attributes """ def __init__(self, obj_name, attributes): super(LabmanDuplicateError, self).__init__() attr = ', '.join(["%s = %s" % (key, val) for key, val in attributes]) self.args = ("%s with %s already exists" % (obj_name, attr), ) class LabmanLoginError(LabmanError): """Exception for error when login in""" def __init__(self): super(LabmanLoginError, self).__init__() self.args = ("Incorrect user id or password", ) class LabmanLoginDisabledError(LabmanError): """Exception for error when user is not allowed""" def __init__(self): super(LabmanLoginDisabledError, self).__init__() self.args = ("Login credentials disabled for this portal", )
# import random # class InvalidGreetingException(Exception): # """ # Raised when a string does not imply greeting # """ # pass # def main(): # """ # Doc string.Girish # """ # greeting_messages = ["Hello", "Welcome", "Hey", "Hi"] # choices = greeting_messages + ["Bye", "Thanks", "GoodBye"] # greeting_choice = random.choice(choices) # print("Selected choice: {0!s}".format(greeting_choice)) # if (greeting_choice not in greeting_messages): # raise InvalidGreetingException( # "Not a valid word for greeting", greeting_choice) # if __name__ == "__main__": # try: # main() # except InvalidGreetingException as ex: # for arg in ex.args: # print(arg) # class Phone: # def __init__(self, name, color): # self._name = name # self._color = color # # name not changeable once set # @property # def name(self): # return self._name # @property # def color(self): # """ # Color of the phone # """ # return self._color # # property returns an object # # which is the same as getter method # # # @color.setter # def color(self, color): # self._color = color # phone = Phone("Motorola onepower", "black") # print("Name: {0!s}, Color: {1!s}".format(phone.name, phone.color)) # phone.color = "red" # print("Name: {0!s}, Color: {1!s}".format(phone.name, phone.color)) # class Sample: # def print_message(self, message): # print("Inside: print_message, message:{}".format(message)) # def fake_print_message(message): # print("Inside: fake_print_message, message:{}".format(message)) # sample = Sample() # sample.print_message("Hello") # print(sample.__dict__) # # think of this like replacing the __dict__ of the object with key "print_message", updating its value to point to a different function object. # sample.print_message = fake_print_message # sample.print_message("Hello") # print(sample.__dict__) class CallableObject: def __call__(self, message): print("message: {}".format(message)) obj = CallableObject() obj("This is a callable object behaving like a function") CallableObject()("Hello")
#program to sort a list of elements using Comb sort. def comb_sort(nums): shrink_fact = 1.3 gaps = len(nums) swapped = True i = 0 while gaps > 1 or swapped: gaps = int(float(gaps) / shrink_fact) swapped = False i = 0 while gaps + i < len(nums): if nums[i] > nums[i+gaps]: nums[i], nums[i+gaps] = nums[i+gaps], nums[i] swapped = True i += 1 return nums num1 = input('Input comma separated numbers:\n').strip() nums = [int(item) for item in num1.split(',')] print(comb_sort(nums))
#!/usr/bin/env python # I need to figure out how I want to deal with these classes class FacebookEndpoint: pass
def is_phone_number(text): """ This function was made to recognize a Brazilian phone number :param text: number :return: """ # normally, brazilians use the format +00 (00) 00000-0000 the brazilian code is +55, in the beginning disallowed_characters = '+() -' for character in disallowed_characters: text = text.replace(character, '') if text[0] == '5' and text[1] == '5': text = text[2:] if len(text) != 11: return False return True print(is_phone_number('2191111-1111')) print(is_phone_number('248888-9999'))
sns.relplot( data=monthly_victim_counts, kind="line", palette="colorblind", height=3, aspect=4, )
""" Dictionary: 1. Normal variable holds 1 value; dictionary holds collection of key-value pairs; all keys must be distinct but values may be repeated 2. {} - curly bracket 3. Unordered 4. Mutable 5. uses Hashing internally 6. Functions: 1. dict[] : returns value at specified index 2. len() : returns length of dictionary min() : returns min value in dictionary max() : returns max value in dictionary sum() : returns sum of values in dictionary 3. dict.reverse() : 'dict' object has no attribute 'reverse' 4. dict.sort() : 'dict' object has no attribute 'sort' 5. in : operator returns bool stating if specified value present in dictionary or not 6. dict[key] = value : add value with specified key 7. dict[key] : get value from dict with specified key dict.get(key) returns None if key dosen't exists 11. dict.pop(key) : dict.pop() dict.popitem() pop() will remove last value 12. del dict[key] : delete """ dict = {10:"abc", 20:"xyz", 30:"pqr"} print(dict) print(type(dict)) print(dict[10]) print(dict, len(dict), min(dict), max(dict), sum(dict)) dict[40] = "def" print(dict) print(dict[30], dict.get(30)) print(dict.get(50), dict.get(60, "Not Available")) #dict.reverse() #dict.sort() print(20 in dict, 80 in dict) dict.popitem() print(dict) dict.pop(10) print(dict) del dict[30] print(dict)
#coding=utf-8 #检查用户名P79 2017.4.14 current_users = ['jer','cas','ety','a','b'] new_users = ['JER','Cas','x','y','z' ] for new_user in new_users: if new_user.upper() in current_users: print('Sorry!The username '+new_user.title()+' has been used,You must change your input!') elif new_user.title() in current_users: print('Sorry!The username '+new_user.title()+' has been used,You must change your input!') elif new_user.lower() in current_users: print('Sorry!The username '+new_user.title()+' has been used,You must change your input!') else: print('Register Success!')
# https://www.codechef.com/problems/FLOW015 for T in range(int(input())): n,days,c=int(input()),['sunday','monday','tuesday','wednesday','thursday','friday','saturday'],1 if(n>2001): for z in range(2002,n+1): if((z-1)%4==0 and ((z-1)%400==0 or (z-1)%100!=0)): c+=2 else: c+=1 else: for z in range(2000,n-1,-1): if(z%4==0 and (z%400==0 or z%100!=0)): c-=2 else: c-=1 print(days[c%7])
#In this file I am going to model a Spotify playlist playlist ={ 'name':'hip-hop', 'author':'John', 'songs' : [ { 'title':'Walk it Talk it', 'artist': 'Migos', }, { 'title':'New Freezer', 'artist' : 'Rich Kid', }, { 'title':'New Freezer', 'artist':'Chris Brown', } ], } for song in playlist['songs']: print(song['title'])
load("@bazel_gazelle//:deps.bzl", _go_repository = "go_repository") load("@io_bazel_rules_go//go:def.bzl", _go_binary = "go_binary", _go_library = "go_library", _go_test = "go_test") def go_repository(name, **kwargs): """Macro wrapping the Gazelle go_repository rule. This conditionally defines the repository if it hasn't already been. """ if name in native.existing_rules(): return _go_repository(name = name, **kwargs) # Go importpath prefix shared by all Kythe libraries go_prefix = "kythe.io/" def _infer_importpath(name): basename = native.package_name().split("/")[-1] importpath = go_prefix + native.package_name() if basename == name: return importpath return importpath + "/" + name def go_binary(name, importpath = None, **kwargs): """This macro wraps the go_binary rule provided by the Bazel Go rules to automatically infer the binary's importpath. It is otherwise equivalent in function to a go_binary. """ if importpath == None: importpath = _infer_importpath(name) _go_binary( name = name, importpath = importpath, out = name, **kwargs ) def go_library(name, importpath = None, **kwargs): """This macro wraps the go_library rule provided by the Bazel Go rules to automatically infer the library's importpath. It is otherwise equivalent in function to a go_library. """ if importpath == None: importpath = _infer_importpath(name) _go_library( name = name, importpath = importpath, **kwargs ) def go_test(name, library = None, **kwargs): """This macro wraps the go_test rule provided by the Bazel Go rules to silence a deprecation warning for use of the "library" attribute. It is otherwise equivalent in function to a go_test. """ # For internal tests (defined in the same package), we need to embed # the library under test, but this is not needed for external tests. embed = [library] if library else [] _go_test( name = name, embed = embed, **kwargs )
# -*- coding: utf-8 -*- """ File Name: best-line-lcci.py Author : jynnezhang Date: 2020/11/12 11:45 下午 Description: https://leetcode-cn.com/problems/best-line-lcci/ """ class Solution: def bestLine(self, points): ret = {} for i in range(len(points)): for j in range(i + 1, len(points)): if points[i][0] - points[j][0] == 0: k = points[i][0] b = 0 elif points[i][1] == points[j][1]: k = 0 b = points[i][1] else: k = (points[i][1] - points[j][1]) / (points[i][0] - points[j][0]) b = points[i][1] - (points[i][1] - points[j][1]) / (points[i][0] - points[j][0])*points[i][0] key = str(k) + '-' + str(b) if key not in ret.keys(): ret[key] = [] if i not in ret[key]: ret[key].append(i) if j not in ret[key]: ret[key].append(j) max_num, max_num_k = 0, 0 for key, value in ret.items(): value.sort() if len(value) > max_num: max_num = len(value) max_num_k = key if len(value) == max_num: if value[0] < ret[max_num_k][0]: max_num = len(value) max_num_k = key elif value[0] == ret[max_num_k][0] and value[1] < ret[max_num_k][1]: max_num = len(value) max_num_k = key return [ret[max_num_k][0], ret[max_num_k][1]] if __name__ == '__main__': print(Solution().bestLine( [[-20398, -20631], [3544, -25103], [-12602, -17934], [-21077, -20589], [-42421, -34121], [-13836, -57776], [-23894, -15740], [-35969, 44416], [20924, 7570], [8073, -21024], [-13406, -30413], [-48433, -11240], [6794, -16545], [-8554, 37203], [4236, -7587], [-28748, -10765]]))
# Numbers can be combined using mathematical operators x = 1 + 1 y = 2 * 3 # Variables holding numbers can be used any way numbers can be used z = y / x # We can prove that these computations worked out the same # using comparison operators, specifically == to test for equality: print('===comparing===') print('2 == x', 2 == x) print('6 == y', 6 == y) print('3 == z', 3 == z) # Note that z is a float. Division in Python 3+ always returns a float. # We can coerce the result to an int using the "integer division" operator # which always rounds down: int_div = y // x # We can also use the "modulo" operator to compute the remainder: remainder = y % x print() # Just for a blank line in the output # Two values can only be equal if they have the same type print("1 == '1'", 1 == '1') # Other common comparisons include <, <=, >, >= print('1 < 2', 1 < 2) # True print('10 >= 10', 10 >= 10) # True print('10 > 10', 10 > 10) # False print() # For a blank line in the output # Strings are compared pseudo-alphabetically for greater than / less than print('"albert" < "bill"', "albert" < "bill") # True # HOWEVER, in python ALL capital letters come before ANY lowercase letters print('"B" < "a"', "B" < "a") # True # FYI: There are additional rules for other characters like $, %, ., and so on # that we're ignoring for now. # Strings can also be combined with math operators, but they mean different # things when operating on strings x = "hello " + "world." # Concatenation, x is "hello world." y = "a" * 4 # Duplication, y = "aaaa" print() print(x) print(y) # Finally, we can combine the assignment operator and these math operations # using the following shorthands: x = 4 x += 3 # x = x + 3 x -= 1 # x = x - 1 x *= 2 # x = x * 2 x /= 4 # x = x / 4 # Micro-Exercise: predict the value of x. Then write a comparison statement # involving x that evaluates to False. Print the result of that comparison.
class Solution: def kthFactor(self, n: int, k: int) -> int: count = 0 for i in range(1,n+1): if n%i==0: count+=1 if count==k: return i return -1
{ 'name': 'Junari Odoo Website Utils', 'version': '1.0', 'summary': 'Re-usable widgets for odoo website modules', 'author': 'Junari Ltd', 'category': 'CRM', 'website': 'https://www.junari.com', 'images': [], 'depends': [ 'website' ], 'data': [ 'views/assets.xml' ], 'js': [], 'qweb': [], 'css': [], 'demo': [], 'test': [], 'application': False, 'installable': True, 'auto_install': False, }
while True: num = int(input('Quer ver a tabuada de qual valor? ')) if num < 0: break print('-' * 30) for mult in range(1, 11, 1): print(f'{num} x {mult} = {num * mult}') print('-' * 30) print('PROGRAMA TABUADA ENCERRADO. Volte sempre!')
class Humidifier: """Class that represents a humidifier object in the Venta API.""" def __init__(self, request): """Initialize a humidifier object.""" self.state = {} self.request = request self.update() # Note: each property name maps the name in the returned data @property def mac(self) -> int: """Return the Mac of the humidifier.""" return self.state["Header"]["MacAdress"] @property def temperature(self) -> int: """Return current temperature.""" return self.state["Measure"]["Temperature"] @property def humidity(self) -> int: """Return current humidity.""" return self.state["Measure"]["Humidity"] @property def dust(self) -> int: """Return current dust.""" return self.state["Measure"]["Dust"] @property def target_humidity(self) -> int: """Return target_humidity.""" return self.state["Action"]["TargetHum"] @property def fan_speed(self) -> int: """Return the fan speed.""" return self.state["Action"]["FanSpeed"] @property def is_on(self) -> bool: """Return if the humidifier is running.""" return self.state["Action"]["Power"] @property def is_sleep_mode(self) -> bool: """Return if the humidifier is in Sleep mode.""" return self.state["Action"]["SleepMode"] @property def is_auto_mode(self) -> bool: """Return if the humidifier is in Auto mode.""" return self.state["Action"]["Automatic"] def set_humidity(self, humidity: int): res = self.request(json={"Action": {"TargetHum": humidity}}) self.state = res.json() def change_mode(self, mode: str, speed: int = 0): turn_off = {"Action": {"Power": False}} turn_on = {"Action": {"Power": True}} sleep_mode = {"Action": {"Power": True, "SleepMode": True, "Automatic": False}} automatic_mode = { "Action": {"Power": True, "SleepMode": False, "Automatic": True} } def fan_speed_mode(speed): return { "Action": { "Power": True, "SleepMode": False, "Automatic": False, "FanSpeed": speed, } } if mode == "off": action = turn_off elif mode == "on": action = turn_on elif mode == "sleep": action = sleep_mode elif mode == "automatic": action = automatic_mode elif mode == "manual": print("speed", speed) action = fan_speed_mode(speed) res = self.request(json=action) self.state = res.json() def update(self): """Update the humidifier data.""" res = self.request() self.state = res.json()
""" Given an integer, write a function to determine if it is a power of two. Example 1: Input: 1 Output: true Explanation: 20 = 1 Example 2: Input: 16 Output: true Explanation: 24 = 16 Example 3: Input: 218 Output: false Solution: 1. While loop and Divide by 2 2. Bit Manipulation a power of two in binary representation is one 1-bit, followed by some zeros The problem will be solved in O(1) time with the help of bitwise operators. The idea is to discuss such bitwise (按位操作) tricks as * How to get / isolate the rightmost 1-bit : x & (-x). keep the rightmost 1-bit and to set all the other bits to 0. * How to turn off (= set to 0) the rightmost 1-bit : x & (x - 1). These tricks are often used as something obvious in more complex bit-manipulation solutions, like for N Queens problem, and it's important to recognize them to understand what is going on. """ # While loop + Divide by 2 # Time: O(logN) # Space: O(1) class Solution(object): def isPowerOfTwo(self, n): """ :type n: int :rtype: bool """ while n > 1 and n % 2 == 0: n //= 2 return n == 1 # Bitwise Operators : Get the Rightmost 1-bit # x & (-x) keep the rightmost 1-bit and to set all the other bits to 0. # Time: O(1), > 82% # Space: O(1) class Solution(object): def isPowerOfTwo(self, n): """ :type n: int :rtype: bool """ if n == 0: return False return (n & -n) == n # Bitwise Operators : Turn off the Rightmost 1-bit # x & (x-1) set the rightmost 1-bit to zero. # Time: O(1), > 99% # Space: O(1) class Solution(object): def isPowerOfTwo(self, n): """ :type n: int :rtype: bool """ if n == 0: return False return (n & n-1) == 0
# -*- coding: utf-8 -*- class Duck(object): def quark(self): print('Quaaaaaark!') class Person(object): def quark(self): print('Hello!') def quarking(duck): try: duck.quark() except AttributeError: pass if __name__ == '__main__': duck = Duck() person = Person() quarking(duck) quarking(person)
# -*- coding: utf-8 -*- """ smallparts.constants - common constants """ # # Single character constants # AMPERSAND = '&' ASTERISK = '*' AT = '@' BLANK = SPACE = SP = ' ' BRACE_OPEN = '{' BRACE_CLOSE = '}' COLON = ':' COMMA = ',' CARRIAGE_RETURN = CR = '\r' DASH = '-' DOT = '.' DOUBLE_QUOTE = '"' EMPTY = '' EQUALS = '=' HASH = POUND = '#' LINEFEED = LF = NEWLINE = NL = '\n' PERCENT = '%' PIPE = '|' PLUS_SIGN = '+' QUESTION_MARK = '?' SEMICOLON = ';' SINGLE_QUOTE = "'" SLASH = '/' TILDE = '~' UNDERSCORE = '_' # # Compound constants # COLON_BLANK = COLON + BLANK COMMA_BLANK = COMMA + BLANK CRLF = CR + LF # # Numeric constants # ZERO = 0 ONE = 1 FIRST_INDEX = ZERO SECOND_INDEX = ONE LAST_INDEX = -1 # # Encodings # ASCII = 'ascii' CP1252 = 'cp1252' UTF_8 = 'utf-8' # # Functional constants # MODE_APPEND = 'a+' MODE_APPEND_BINARY = 'a+b' MODE_READ = 'r' MODE_READ_BINARY = 'rb' MODE_WRITE = 'w' MODE_WRITE_BINARY = 'wb' YES = 'yes' NO = 'no' XML_1_0 = '1.0' XML_1_1 = '1.1' # # Return codes # RC_ERROR = 1 RC_OK = 0 # vim: fileencoding=utf-8 ts=4 sts=4 sw=4 autoindent expandtab syntax=python:
DATA = [ ("Load JS/WebAssembly", 2, 2, 2), ("Load /tmp/lines.txt", 225, 222, 218), ("From JS new Fzf() until ready to ....", 7825, 8548, 1579), ("Calling fzf-lib's fzf.New()", 1255, 3121, 963), ("return from fzfNew() function", 358, 7, 0), ("search() until library has result", 4235, 1394, 12132), ("Returning search result to JS callback", 1908, 1378, 416), ] def create_plot(ax): labels = ["Go", "TinyGo", "GopherJS"] bottoms = [0, 0, 0] for row in DATA: ax.bar(labels, row[1:], label=row[0], bottom=bottoms) bottoms = [bottoms[i] + row[1:][i] for i in range(len(bottoms))] ax.set_ylabel("Time (ms)") ax.set_ylim([0, 20000]) ax.legend(ncol=2)
extenso = 'zero', 'um', 'dois', 'três', 'quatro', 'cinco',\ 'seis', 'sete', 'oito', 'nove', 'dez',\ 'onze', 'doze', 'treze', 'catorez', 'quinze',\ 'dezesseis', 'dezessete', 'dezoito', 'dezenovo,', 'vinte' while True: while True: n = int(input('Digite um número entre 0 e 20: ')) if 0 <= n <= 20: break print('Tente novamente...', end='') print(f'Você digitou o número {extenso[n]}') while True: continua = str(input('Quer Continuar? [S/N] ')).strip().upper()[0] if continua in 'SN': break if continua == 'N': break
''' Copyright (C) 2015 Dato, Inc. All rights reserved. This software may be modified and distributed under the terms of the BSD license. See the LICENSE file for details. ''' graphlab_server = 'http://pws-billing-stage.herokuapp.com' mode = 'QA' mixpanel_user = '97b6ae8fe096844c2efb9f6c57165d41' metrics_url = 'http://d343i1j50yi7ez.cloudfront.net/i'
class BaseValidator: REQUIRED_KEYS = [] def validate(self, data): return all( key in data.keys() for key in self.REQUIRED_KEYS) class UserValidator(BaseValidator): REQUIRED_KEYS = ('username', 'password') class PotionValidator(BaseValidator): REQUIRED_KEYS = ('name')
def poisson(i, j, u_tp, v_tp, dx, dy, dt, P, c, SOR): cm = ( u_tp[i, j] - u_tp[i-1, j] + v_tp[i, j] - v_tp[i, j-1] ) # conservation of mass pressure = (P[i+1, j] + P[i-1, j] + P[i, j+1] + P[i, j-1]) P[i, j] = SOR * (c[i, j])*(pressure - (dx/dt)*(cm)) + (1.0 - SOR) * P[i, j] return P def streamlines(i, j, dx, dy, dt, phi, vorticity, SOR): PHI = (phi[i+1, j] + phi[i-1, j] + phi[i, j+1] + phi[i, j-1]) phi[i, j] = ( (1.0/4.0) * SOR * (PHI + (dx*dy) * vorticity[i, j]) + (1.0 - SOR)*phi[i, j] ) return phi
AWS_REGION = 'us-east-1' #Lambda function name for querying lambda_func_name = "cloudtrailTrackerQueries" #Lambda function name for automatic event uploads lambda_func_name_trigger = "cloudtrailTrackerUploads" #Stage name for API Gateway stage_name = "cloudtrailtrackerStage" #DynamoDB Table name table_name = "cloudtrailtrackerdb" #Preconfigured S3 bucket by CloudTrail bucket_name = "cursocloudaws-trail" #API name API_name = "cloudtrailTrackerAPI" #eventNames that we DO NOT want to store - Filter filterEventNames = ["get", "describe", "list", "info", "decrypt", "checkmfa", "head"] ### Account IDs and permisiions #aws_acct_id = "111111111111" ### Roles #A role is needed with access to S3 / apig / lamba permissions # arn_role = 'arn:aws:iam::111111111111:role/your-iam-role' #Index name for DynamoDB Table - Do not modify if is not necessary index = 'userIdentity_userName-eventTime-index'
SETUP_TIME = -1.0 INTERMED_FILE_FILEEXT = '.txt' SEPERATOR = '_' SETUP_SCRIPT_POSTFIX = "_setup.sh" RUNTIME_SCRIPT_POSTFIX = "_runtime.cmd"
# Definition for a binary tree node. # class TreeNode: # def __init__(self, val=0, left=None, right=None): # self.val = val # self.left = left # self.right = right class Solution: def zigzagLevelOrder(self, root: Optional[TreeNode]) -> List[List[int]]: if not root: return [] rev, res, level = False, [], [root] while level: clv,nextl = [],[] for nd in level: clv.append(nd.val) if nd.left: nextl.append(nd.left) if nd.right: nextl.append(nd.right) if rev: res.append(clv[::-1]) rev = False else: res.append(clv) rev = True level = nextl return res
#title :matchmaker_nance.py #description :This will ask a series of questionsd and determine match %. #author :Chris Nance #date :2022-03-13 #version :1.0 #usage :python matchmaker_nance.py #notes :Edit the questionAndAnswerDict Dictionary below. #python_version :3.10 #==============================================================================#==============================================================================#============================================================================== # Please add, remove or modify any questions in the dictionary. The first number in each array is the correct score for the question while the second number is the weight (or how important) the question is to you. # The script will automatically adjust the total points available and output based on your modifications to this dictionary. questionAndAnswerDict = ( # ['QUESTION', SCORE, WEIGHT] ['iPhone is better than Android', 4, 1], ['I prefer the outdoors rather than being inside', 4, 3], ['Computer Science is one of the coolest fields of study', 5, 3], ['Data science is really fun', 1, 2], ['I like all four seasons rather than it just being hot all year', 2, 2], ) #==============================================================================#==============================================================================#============================================================================== ## VARIABLES user_score = 0 user_score_nw = 0 user_answer = 0 total_available = 0 total_available_nw = 0 ## HEADER print(''' -------------------------------------- -- MATCHMAKER 1.0 -- -------------------------------------- This program will ask '''+str(len(questionAndAnswerDict))+''' questions.\nYou will respond to each question with a\nnumber 1 through 5. The number 1 means you\ndisagree while the number 5 means you highly\nagree.At the end you wil be given your final\nscore and match maker percentage. ''') ## MAIN PROGRAM for question_num in range(0,len(questionAndAnswerDict)): # Ask all questions in the dictionary; in order. question, answer, weight, user_answer = questionAndAnswerDict[question_num][0], questionAndAnswerDict[question_num][1], questionAndAnswerDict[question_num][2], 0 # Multi-Assignment of question, answer, weight and placeholder for user_score. print('\n') print("Question:", question) while user_answer not in [1,2,3,4,5]: try: user_answer = int(input('Your Answer (From 1 to 5): ')) if user_answer in [1,2,3,4,5]: break # Break on condition being met so error print does not happen. except ValueError as error: print("[ERROR]: Sorry, you MUST enter an INTEGER from 1 to 5:", error) except Exception as error: print("[ERROR]: There was an unknown error. Please try entering in an integer from 1 to 5:", error) print("[ERROR]: You need to enter an INTEGER from 1 to 5...") user_score += abs(answer - user_answer) * weight # Calculate the running total of points the user has accumulated. Abs prevents negatives. user_score_nw += abs(answer - user_answer) total_available += answer*weight # Calculate the total points available based on the dictionary of questions. Adding/Removing/Editing questions require no code change. total_available_nw += answer user_score = total_available - user_score # Obtain true user score by subtracting their score from the total score available. ## SCORE OUTPUT/REMARKS print(f"\n\nMatch Percent: {user_score/total_available*100:.2f}%.\nYou scored", str(user_score), "weighted points out of the possible", str(total_available), "available.\nYou scored", str(user_score_nw), "non-weighted points out of the possible", str(total_available_nw), "available.") if user_score < total_available*0.5: # <50% match print("Maybe we would be better off never talking again...") elif user_score < total_available*0.7: # <70% match print("I'm thinking we're just friends...") elif user_score < total_available*0.9: # <90% match print("This could really work out...") elif user_score > total_available*0.9: # >90% match print("Perfect!") print(''' -------------------------------------- -- MATCHMAKER 1.0 -- -------------------------------------- ''') input('Thank you for using Match Maker 1.0\nPress Enter to close this window...')
# finding bottom left value if binary search tree class Node: def __init__(self,val): self.left = None self.right = None self.val = val def findBottomLeftValue(root): current = [] # contains nodes are current level parent = [] # contains nodes at previous level current.append(root) # initializing with root # loop break condition: no more elements to add i.e all leaf nodes in parent while len(current) > 0: parent = current current = [] # clearing current for next level # for each node at previous level add it's children for node in parent: if node.left is not None: current.append(node.left) if node.right is not None: current.append(node.right) # always pick first element to get bottom left return parent[0].val # Create a root node root = Node(5) root.left = Node(3) root.right = Node(8) root.right.left = Node(10) root.right.left.right = Node(11) print(findBottomLeftValue(root))
class DSSConstants(object): APPLICATION_JSON = "application/json;odata=verbose" APPLICATION_JSON_NOMETADATA = "application/json;odata=nometadata" AUTH_LOGIN = "login" AUTH_OAUTH = "oauth" AUTH_SITE_APP = "site-app-permissions" CHILDREN = 'children' DATE_FORMAT = "%Y-%m-%dT%H:%M:%S.%fZ" DIRECTORY = 'directory' EXISTS = 'exists' FALLBACK_TYPE = "string" FULL_PATH = 'fullPath' IS_DIRECTORY = 'isDirectory' JSON_HEADERS = { "Content-Type": APPLICATION_JSON, "Accept": APPLICATION_JSON } LAST_MODIFIED = 'lastModified' LOGIN_DETAILS = { "sharepoint_tenant": "The tenant name is missing", "sharepoint_site": "The site name is missing", "sharepoint_username": "The account's username is missing", "sharepoint_password": "The account's password is missing" } OAUTH_DETAILS = { "sharepoint_tenant": "The tenant name is missing", "sharepoint_site": "The site name is missing", "sharepoint_oauth": "The access token is missing" } PATH = 'path' SITE_APP_DETAILS = { "sharepoint_tenant": "The tenant name is missing", "sharepoint_site": "The site name is missing", "tenant_id": "The tenant ID is missing. See documentation on how to obtain this information", "client_id": "The client ID is missing", "client_secret": "The client secret is missing" } SIZE = 'size' TYPES = { "string": "Text", "map": "Note", "array": "Note", "object": "Note", "double": "Number", "float": "Number", "int": "Integer", "bigint": "Integer", "smallint": "Integer", "tinyint": "Integer", "date": "DateTime" }
#Faça um Programa que peça um valor e mostre na tela se o valor é positivo ou negativo. value = int(input("Informe um número inteiro: ")) if value < 0: print(f"{value} é um valor negativo!") elif value > 0: print(f"{value} é um valor positivo!") else: print(f"{value} é um valor nulo!")
# This is an example script Import("projenv") # access to project construction environment print(projenv) # Dump construction environments (for debug purpose) print(projenv.Dump()) # append extra flags to only project build environment projenv.Append(CPPDEFINES=[ "PROJECT_EXTRA_MACRO_1_NAME", ("PROJECT_EXTRA_MACRO_2_NAME", "PROJECT_EXTRA_MACRO_2_VALUE") ])
class Inventory: def __init__(self,unRealize,nowPrice,stockno,stockname,amount,price): self.__stockno = stockno self.__stockname = stockname self.__amount = amount self.__price = price self.__unRealize = unRealize self.__nowPrice = nowPrice @property def stockno(self): return self.__stockno @property def stockname(self): return self.__stockname @property def amount(self): return self.__amount @property def price(self): return self.__price @property def UnRealize(self): return self.__unRealize @property def nowPrice(self): return self.__nowPrice def __str__(self): return """ { stockno:%s, stockname:%s, amount:%s, price:%s, unRealize:%s, nowPrice:%s, } """ % (self.__stockno,self.__stockname,self.__amount,self.__price,self.__unRealize,self.__nowPrice) def toDict(self): return { "stockno":self.__stockno, "stockname":self.__stockname, "amount":self.__amount, "price":self.__price, "unRealize":self.__unRealize, "nowPrice":self.__nowPrice, }
def electionsWinners(votes, k): m = max(votes) n = len(list(filter(lambda y: y, (map(lambda x: (x + k) > m, votes))))) votes.remove(max(votes)) if n == 0 and m > max(votes): return 1 return n
''' 1. Write a Python program to calculate the length of a string. 2. Write a Python program to count the number of characters (character frequency) in a string. Sample String : google.com' Expected Result : {'o': 3, 'g': 2, '.': 1, 'e': 1, 'l': 1, 'm': 1, 'c': 1} 3. Write a Python program to get a string made of the first 2 and the last 2 chars from a given a string. If the string length is less than 2, return instead of the empty string. Sample String : 'codedladies' Expected Result : 'coes' Sample String : 'co' Expected Result : 'coco' Sample String : ' c' Expected Result : Empty String Click me to see the sample solution 4. Write a Python program to get a string from a given string where all occurrences of its first char have been changed to '$', except the first char itself. Sample String : 'restart' Expected Result : 'resta$t' 5. Write a Python program to get a single string from two given strings, separated by a space and swap the first two characters of each string. Sample String : 'abc', 'xyz' Expected Result : 'xyc abz' 6. Write a Python program to add 'ing' at the end of a given string (length should be at least 3). If the given string already ends with 'ing' then add 'ly' instead. If the string length of the given string is less than 3, leave it unchanged. Sample String : 'abc' Expected Result : 'abcing' Sample String : 'string' Expected Result : 'stringly' 7. Write a Python program to find the first appearance of the substring 'not' and 'poor' from a given string, if 'not' follows the 'poor', replace the whole 'not'...'poor' substring with 'good'. Return the resulting string. Sample String : 'The lyrics is not that poor!' 'The lyrics is poor!' Expected Result : 'The lyrics is good!' 'The lyrics is poor!' 8. Write a Python function that takes a list of words and returns the length of the longest one. 9. Write a Python program to remove the nth index character from a nonempty string. 10. Write a Python program to change a given string to a new string where the first and last chars have been exchanged. '''
'''#while cont = soma = media = maior = menor = 0 continuar = 'S' while continuar in 'Ss': numero = int(input('Digite um valor: ')) cont += 1 soma += numero #maior e menor if cont ==1: maior = menor = numero else: if numero > maior: maior = numero elif numero < menor: menor = numero #continuar [S/N] continuar = str(input('Deseja continuar [S/N]? ')).upper().strip()[0] #média media = (f'{(soma / cont):.0f}') #FIM print('Foram digitados {} valores, e a média entre eles é {},\no maior valor foi {} e o menor valor foi {}'.format(cont, media, maior, menor)) print('FIM')''' soma = cont = maior = menor = continuar = 0 continuar = 'S' while continuar in 'Ss': numero = int(input('Digite um número: ')) cont += 1 soma += numero continuar = str(input('Deseja continuar [S/N]? ')) if cont == 1: maior = menor = numero else: if numero > maior: maior = numero elif numero < menor: menor = numero media = (f'{(soma / cont):.2f}') print(f'Foram digitados {cont} valores, e a média entre eles é {media},\no maior valor foi {maior} e o menor valor foi {menor}')
class Point(object): def __init__(self, x, y): self.x = x self.y = y def ToString(self): return '("+x+", "+y+")'
def _repo_path(repository_ctx): return repository_ctx.path(".") def _get_depot_tools(repository_ctx): # Clone Chrome Depot Tools. git = repository_ctx.which("git") git_command = [git, "clone", "https://chromium.googlesource.com/chromium/tools/depot_tools.git"] repository_ctx.report_progress("Cloning depot_tools into %s with command %s..." % (_repo_path(repository_ctx), git_command)) repository_ctx.execute(git_command, quiet = False, working_directory = "%s" % _repo_path(repository_ctx)) repository_ctx.report_progress("Cloning depot_tools is complete") return repository_ctx.path("depot_tools") def _gclient_metrics_opt_out(repository_ctx, depot_tools): repository_ctx.report_progress("Opting out of gclient metrics...") gclient = depot_tools.get_child("gclient") gclient_metrics_opt_out_command = [gclient, "metrics", "--opt-out"] repository_ctx.execute(gclient_metrics_opt_out_command, quiet = False) def _fetch_v8_source(repository_ctx, depot_tools): # Fetch V8 source code. v8_destination_path = repository_ctx.path("v8") fetch = depot_tools.get_child("fetch") fetch_v8_command = [fetch, "v8"] repository_ctx.report_progress("Fetching v8 codebase into %s..." % v8_destination_path) repository_ctx.execute(fetch_v8_command, quiet = False, working_directory = "%s" % _repo_path(repository_ctx)) repository_ctx.report_progress("Fetching v8 codebase is complete") return v8_destination_path def _checkout_v8_branch(repository_ctx, v8_path): # Checkout V8 branch with the specified name git = repository_ctx.which("git") git_fetch_command = [git, "fetch", "origin", "refs/heads/%s" % repository_ctx.attr.branch] repository_ctx.execute(git_fetch_command, working_directory = "%s" % v8_path) git_checkout_command = [git, "checkout", "-b", "refs/heads/%s" % repository_ctx.attr.branch] repository_ctx.execute(git_checkout_command, working_directory = "%s" % v8_path) def _gclient_sync(repository_ctx, depot_tools, v8_path): repository_ctx.report_progress("Executing gclient sync...") gclient = depot_tools.get_child("gclient") gclient_sync_command = [gclient, "sync"] repository_ctx.execute(gclient_sync_command, quiet = False, working_directory = "%s" % v8_path) def _clear_bazel_build_files(repository_ctx, v8_path): # Remove all BUILD.bazel files throughout the V8 source tree. # We're not yet using Bazel to build V8, as Bazel support in V8 is still # very early and needs more work. bash = repository_ctx.which("bash") rm = repository_ctx.which("rm") find = repository_ctx.which("find") grep = repository_ctx.which("grep") xargs = repository_ctx.which("xargs") remove_build_bazel_command = [bash, "-c", "%s %s | %s BUILD.bazel | %s %s -f " % (find, v8_path, grep, xargs, rm)] repository_ctx.execute(remove_build_bazel_command, quiet = False, working_directory = "%s" % repository_ctx.path(".")) def _v8_sources_impl(repository_ctx): depot_tools = _get_depot_tools(repository_ctx) _gclient_metrics_opt_out(repository_ctx, depot_tools = depot_tools) v8_destination_path = _fetch_v8_source(repository_ctx, depot_tools = depot_tools) _checkout_v8_branch(repository_ctx, v8_path = v8_destination_path) _gclient_sync(repository_ctx, depot_tools = depot_tools, v8_path = v8_destination_path) _clear_bazel_build_files(repository_ctx, v8_path = v8_destination_path) # Write the build shell script. build_script = """ set -e OUTPUT_DIR=$(readlink -f "$1") SOURCE_ROOT=$(readlink -f $(dirname "$0"))/v8 DEPOT_TOOLS=$(readlink -f $(dirname "$0"))/depot_tools # Configure and build V8 as a static library. # Use local Linux toolchain, as opposed to the toolchain packaged with V8, # to avoid dependencies on standard libraries packaged with V8. export CC=gcc export CXX=g++ export CXXFLAGS="-Wno-unknown-warning-option -Wno-implicit-int-float-conversion -Wno-builtin-assume-aligned-alignment -Wno-final-dtor-non-final-class" export AR=${AR:-ar} export NM=${NM:-nm} export LD=${LD:-ld} cd ${SOURCE_ROOT} echo SOURCE_ROOT=${SOURCE_ROOT} ./buildtools/linux64/gn gen out/x64.static --args="custom_toolchain=\\"//build/toolchain/linux/unbundle:default\\" use_sysroot=false linux_use_bundled_binutils=false use_lld=false strip_debug_info=false symbol_level=0 use_allocator_shim=false is_cfi=false use_gold=false v8_static_library=true v8_enable_gdbjit=false v8_monolithic=true clang_use_chrome_plugins=false v8_enable_shared_ro_heap=false v8_use_external_startup_data=false is_debug=false v8_symbol_level=1 v8_enable_handle_zapping=false use_glib=false v8_use_external_startup_data=false v8_enable_i18n_support=false v8_enable_webassembly=false is_clang=false use_custom_libcxx=false" ${DEPOT_TOOLS}/ninja -C out/x64.static v8_monolith # Copy V8 static library to the output location cp out/x64.static/obj/libv8_monolith.a "${OUTPUT_DIR}/libv8_monolith.a" # Copy V8 public headers to the output location. cd include find -L . -type f | grep -i \\.h$ | xargs -I {} cp --parents {} ${OUTPUT_DIR} """ repository_ctx.file(repository_ctx.path("build_v8.sh"), build_script) # Write the Bazel BUILD file. v8_build_file = """ filegroup( name = "depot_tools", srcs = glob(["depot_tools/**/*"]), ) V8_HEADERS = [ "cppgc/allocation.h", "cppgc/common.h", "cppgc/cross-thread-persistent.h", "cppgc/custom-space.h", "cppgc/default-platform.h", "cppgc/ephemeron-pair.h", "cppgc/garbage-collected.h", "cppgc/heap-consistency.h", "cppgc/heap.h", "cppgc/internal/api-constants.h", "cppgc/internal/atomic-entry-flag.h", "cppgc/internal/caged-heap-local-data.h", "cppgc/internal/compiler-specific.h", "cppgc/internal/finalizer-trait.h", "cppgc/internal/gc-info.h", "cppgc/internal/logging.h", "cppgc/internal/name-trait.h", "cppgc/internal/persistent-node.h", "cppgc/internal/pointer-policies.h", "cppgc/internal/prefinalizer-handler.h", "cppgc/internal/write-barrier.h", "cppgc/liveness-broker.h", "cppgc/macros.h", "cppgc/member.h", "cppgc/name-provider.h", "cppgc/object-size-trait.h", "cppgc/persistent.h", "cppgc/platform.h", "cppgc/prefinalizer.h", "cppgc/source-location.h", "cppgc/trace-trait.h", "cppgc/type-traits.h", "cppgc/visitor.h", "libplatform/libplatform-export.h", "libplatform/libplatform.h", "libplatform/v8-tracing.h", "v8config.h", "v8-cppgc.h", "v8-fast-api-calls.h", "v8.h", "v8-inspector.h", "v8-inspector-protocol.h", "v8-internal.h", "v8-metrics.h", "v8-platform.h", "v8-profiler.h", "v8-unwinder-state.h", "v8-util.h", "v8-value-serializer-version.h", "v8-version.h", "v8-version-string.h", "v8-wasm-trap-handler-posix.h", "v8-wasm-trap-handler-win.h", ] filegroup( name = "build_v8", srcs = ["build_v8.sh"], ) genrule( name = "compile_v8", srcs = glob(["v8/**"], exclude = ["v8/tools/swarming_client/**"]) + [":depot_tools"], outs = [ "libv8_monolith.a", ] + V8_HEADERS, cmd = "$(location //:build_v8) $(@D)", tools = [":build_v8"], ) filegroup( name = "libv8", srcs = ["libv8_monolith.a"], data = [":compile_v8"], ) filegroup( name = "v8_headers", srcs = V8_HEADERS, ) cc_import( name = "v8", hdrs = [":v8_headers"], static_library = ":libv8", visibility = ["//visibility:public"], ) """ repository_ctx.file(repository_ctx.path("BUILD.bazel"), v8_build_file) v8_sources = repository_rule( implementation = _v8_sources_impl, local = True, attrs = {"branch": attr.string(mandatory = True)}, )
## HOMESCAN SHARED FUNCTIONS ## (c) Copyright Si Dunford, Aug 2019 ## Version 1.1.0 # Convert an MQTT return code to an error message def mqtt_errstr( rc ): if rc==0: return "Success" elif rc==1: return "Incorrect protocol version" elif rc==2: return "Invalid client identifier" elif rc==3: return "Server unavailable" elif rc==4: return "Bad username or password" elif rc==5: return "Not authorised" else: return "Unknown" # Create a fixed-width row from dictionary def row( data, definition, padding=' ', gap=' ' ): try: line = '' for col in definition: align = 0 if col in data: field=str(data[col]) else: field='' # if type(definition[col])==int: width = int(definition[col]) elif definition[col].endswith( ":R" ): align = 1 width = int( definition[col][:-2] ) elif definition[col].endswith( ":L" ): width = int( definition[col][:-2] ) else: # Default to Align Left width = int(definition[col]) # if align==1: # RIGHT ALIGN line+=field.rjust(width,padding)+gap else: # LEFT ALIGN line+=field.ljust(width,padding)+gap return line except Exception as e: print( "EXCEPTION" ) print( str(e) ) traceback.print_exc(file=sys.stdout) sys.exit(1) # IP Helper functions def IP2Integer( ip_str ): return struct.unpack( "!L", socket.inet_aton( ip_str ))[0] def Integer2IP( ip_int ): return socket.inet_ntoa( struct.pack( '!L', ip_int ) ) def Mask2CIDR( netmask_str ): return sum(bin(int(x)).count('1') for x in netmask_str.split('.'))
""" XVM (c) www.modxvm.com 2013-2017 """ # PUBLIC def getAvgStat(key): return _data.get(key, {}) # PRIVATE _data = {}
# File: Average_hight_of_pupils.py # Description: Reading information from the file and finding the average values # Environment: PyCharm and Anaconda environment # # MIT License # Copyright (c) 2018 Valentyn N Sichkar # github.com/sichkar-valentyn # # Reference to: # [1] Valentyn N Sichkar. Reading information from the file and finding the average values // GitHub platform [Electronic resource]. URL: https://github.com/sichkar-valentyn/Average_hight_of_pupils (date of access: XX.XX.XXXX) # Implementing the task # Find out the average hight of pupils from each class # Creating the function to update dictionary def u_d(d, key, value): if key in d: d[key][0] += value d[key][1] += 1 else: d[key] = [value, 1] return d # Reading the file and putting data to the dictionary a = {} string = '' with open('dataset_3380_5.txt') as inf: for line in inf: string = line.split() # It is important to use split() in order to write the words in the string as separate elements but not the letters u_d(a, int(string[0]), int(string[2])) # Showing the average hight from each class out of all pupils for i in range(1, 12): if i in a: print(i, a[i][0] / a[i][1]) else: print(i, '-') print(a)
class Solution: def checkIfPangram(self, sentence: str) -> bool: answer = True letters = "abcdefghijklmnopqrstuvwxyz" for letter in letters: if letter not in sentence: answer = False return answer solution = Solution() print(solution.checkIfPangram(sentence = "thequickbrownfoxjumpsoverthelazydog"))
class BankOfAJ: loggedinCounter = 0 def __init__(self, theatmpin, theaccountbalance, thename): self.atmpin = theatmpin self.accountbalance = theaccountbalance self.name = thename BankOfAJ.loggedinCounter = BankOfAJ.loggedinCounter + 1 def CollectMoney(self, ammounttowithdraw): if(ammounttowithdraw > self.accountbalance): print("Insufficient Funds oporrrr!") else: print("Alaye collect your cashh...may you get out.") def ChangePin(self, newPin): self.atmpin = newPin print("You don change your pin may you no forget am oo") @classmethod def NoofCustomersLoggedin(): print(" A total of" + str(BankOfAJ.loggedinCounter) + "don come collect money.") customer1 = BankOfAJ(2890, 10000000000000000, "AJ") customer1.NoofCustomersLoggedin() # f = open("") # #print(f.readline()) # password = [] # accountB = [] # name = [] # breaker = [] # for x in f: # breaker = x.split(" ") # password.append(breaker[0]) # accountB.append(breaker[1]) # name.append(breaker[2]) # break # print('may you put your pin.....') # pasw = input() # if(pasw == password[0]): # customer = BankOfAJ(password[0], accountB[0], name[0]) # else: # print('sorry your password no correct oo')
class Solution: def clumsy(self, N: int) -> int: if N==4: return 7 elif N==1: return 1 elif N==2: return 2 elif N==3: return 6 if N%4==0: return (N+1) elif N%4==1: return (N+2) elif N%4==2: return (N+2) else: return (N-1)
# Author : Nilesh D # December 3 - The Decimation values = input() l = values.split(',') while l != sorted(l): size = len(l) l = l[:size//2] print(l)
def section1(): # Get item from the platform item = dataset.items.get(filepath='/your-image-file-path.jpg') # Create a builder instance builder = item.annotations.builder() # Create ellipse annotation with label - With params for an ellipse; x and y for the center, rx, and ry for the radius and rotation angle: builder.add(annotations_definition=dl.Ellipse(x=x, y=y, rx=rx, ry=ry, angle=angle, label=label)) # Upload the ellipse to the item item.annotations.upload(builder) def section2(): # Get item from the platform item = dataset.items.get(filepath='/your-image-file-path.jpg') # Add description (update if already exists)- if text is empty it will remove the description from the item item.set_description(text="this is item description")