signature
stringlengths
29
44.1k
implementation
stringlengths
0
85.2k
def run ( self ) : """Listen for pings until stopped ."""
self . _live = True self . _sock . settimeout ( self . LISTEN_TIMEOUT_S ) # Passing in INADDR _ ANY means the kernel will choose the default interface . # The localhost address is used to receive messages sent in " local _ only " # mode and the default address is used to receive all other messages . for interface_ip in ( socket . INADDR_ANY , LOCALHOST_ADDRESS ) : self . _sock . setsockopt ( socket . IPPROTO_IP , socket . IP_ADD_MEMBERSHIP , # IP _ ADD _ MEMBERSHIP takes the 8 - byte group address followed by the IP # assigned to the interface on which to listen . struct . pack ( '!4sL' , socket . inet_aton ( self . address ) , interface_ip ) ) if sys . platform == 'darwin' : self . _sock . setsockopt ( socket . SOL_SOCKET , socket . SO_REUSEPORT , 1 ) # Allow multiple listeners to bind . else : self . _sock . setsockopt ( socket . SOL_SOCKET , socket . SO_REUSEADDR , 1 ) # Allow multiple listeners to bind . self . _sock . bind ( ( self . address , self . port ) ) while self . _live : try : data , address = self . _sock . recvfrom ( MAX_MESSAGE_BYTES ) data = data . decode ( 'utf-8' ) log_line = 'Received multicast message from %s: %s' % ( address , data ) response = self . _callback ( data ) if response is not None : log_line += ', responding with %s bytes' % len ( response ) # Send replies out - of - band instead of with the same multicast socket # so that multiple processes on the same host can listen for # requests and reply ( if they all try to use the multicast socket # to reply , they conflict and this sendto fails ) . response = response . encode ( 'utf-8' ) socket . socket ( socket . AF_INET , socket . SOCK_DGRAM ) . sendto ( response , address ) _LOG . debug ( log_line ) except socket . timeout : pass except socket . error : _LOG . debug ( 'Error receiving multicast message' , exc_info = True )
def configure_truth ( self , ** kwargs ) : # pragma : no cover """Configure the arguments passed to the ` ` axvline ` ` and ` ` axhline ` ` methods when plotting truth values . If you do not call this explicitly , the : func : ` plot ` method will invoke this method automatically . Recommended to set the parameters ` ` linestyle ` ` , ` ` color ` ` and / or ` ` alpha ` ` if you want some basic control . Default is to use an opaque black dashed line . Parameters kwargs : dict The keyword arguments to unwrap when calling ` ` axvline ` ` and ` ` axhline ` ` . Returns ChainConsumer Itself , to allow chaining calls ."""
if kwargs . get ( "ls" ) is None and kwargs . get ( "linestyle" ) is None : kwargs [ "ls" ] = "--" kwargs [ "dashes" ] = ( 3 , 3 ) if kwargs . get ( "color" ) is None : kwargs [ "color" ] = "#000000" self . config_truth = kwargs self . _configured_truth = True return self
def direct_to_template ( request , template , extra_context = None , ** kwargs ) : """Replacement for Django ' s ` ` direct _ to _ template ` ` that uses ` ` TemplateResponse ` ` via ` ` yacms . utils . views . render ` ` ."""
context = extra_context or { } context [ "params" ] = kwargs for ( key , value ) in context . items ( ) : if callable ( value ) : context [ key ] = value ( ) return TemplateResponse ( request , template , context )
def insert_one ( self , validate = True ) : """Insert this document . The ` validate ` argument translates to the inverse of the ` bypass _ document _ validation ` PyMongo option . https : / / api . mongodb . com / python / current / api / pymongo / collection . html # pymongo . collection . Collection . insert _ one"""
kw = { } kw [ 'bypass_document_validation' ] = not validate collection = self . get_collection ( kw . pop ( 'source' , None ) ) return collection . insert_one ( self , ** kw )
def xml_marshal_bucket_constraint ( region ) : """Marshal ' s bucket constraint based on * region * . : param region : Region name of a given bucket . : return : Marshalled XML data ."""
root = s3_xml . Element ( 'CreateBucketConfiguration' , { 'xmlns' : _S3_NAMESPACE } ) location_constraint = s3_xml . SubElement ( root , 'LocationConstraint' ) location_constraint . text = region data = io . BytesIO ( ) s3_xml . ElementTree ( root ) . write ( data , encoding = None , xml_declaration = False ) return data . getvalue ( )
def get_target_transcript ( self , min_intron = 1 ) : """Get the mapping of to the target strand : returns : Transcript mapped to target : rtype : Transcript"""
if min_intron < 1 : sys . stderr . write ( "ERROR minimum intron should be 1 base or longer\n" ) sys . exit ( ) # tx = Transcript ( ) rngs = [ self . alignment_ranges [ 0 ] [ 0 ] . copy ( ) ] # rngs [ 0 ] . set _ direction ( None ) for i in range ( len ( self . alignment_ranges ) - 1 ) : dist = self . alignment_ranges [ i + 1 ] [ 0 ] . start - rngs [ - 1 ] . end - 1 # print ' dist ' + str ( dist ) if dist >= min_intron : rngs . append ( self . alignment_ranges [ i + 1 ] [ 0 ] . copy ( ) ) # rngs [ - 1 ] . set _ direction ( None ) else : rngs [ - 1 ] . end = self . alignment_ranges [ i + 1 ] [ 0 ] . end tx = Transcript ( rngs , options = Transcript . Options ( direction = self . strand , name = self . alignment_ranges [ 0 ] [ 1 ] . chr , gene_name = self . alignment_ranges [ 0 ] [ 1 ] . chr ) ) # tx . set _ exons _ and _ junctions _ from _ ranges ( rngs ) # tx . set _ range ( ) # tx . set _ strand ( self . get _ strand ( ) ) # tx . set _ transcript _ name ( self . get _ alignment _ ranges ( ) [ 0 ] [ 1 ] . chr ) # tx . set _ gene _ name ( self . get _ alignment _ ranges ( ) [ 0 ] [ 1 ] . chr ) return tx
def render_string ( self , source : str , ** vars ) -> str : """Render the template contained in the given string . The current context will be available to the template as the ` ` ctx ` ` variable . : param source : content of the template to render : param vars : extra variables made available to the template : return : the rendered results"""
vars . setdefault ( 'ctx' , self . _ctx ) return self . _renderer . render_string ( source , ** vars )
def get_top_artists ( self , period = PERIOD_OVERALL , limit = None ) : """Returns the top artists played by a user . * period : The period of time . Possible values : o PERIOD _ OVERALL o PERIOD _ 7DAYS o PERIOD _ 1MONTH o PERIOD _ 3MONTHS o PERIOD _ 6MONTHS o PERIOD _ 12MONTHS"""
params = self . _get_params ( ) params [ "period" ] = period if limit : params [ "limit" ] = limit doc = self . _request ( self . ws_prefix + ".getTopArtists" , True , params ) return _extract_top_artists ( doc , self . network )
def update_flags ( self , idlist , flags ) : """A thin back compat wrapper around build _ update ( flags = X )"""
return self . update_bugs ( idlist , self . build_update ( flags = flags ) )
def get_time_graded ( self ) : """Gets the time the gradeable object was graded . return : ( osid . calendaring . DateTime ) - the timestamp of the grading entry raise : IllegalState - ` ` is _ graded ( ) ` ` is ` ` false ` ` or ` ` is _ derived ( ) ` ` is ` ` true ` ` * compliance : mandatory - - This method must be implemented . *"""
if not self . is_graded ( ) or self . is_derived ( ) : raise errors . IllegalState ( ) time_graded = self . _my_map [ 'timeGraded' ] return DateTime ( year = time_graded . year , month = time_graded . month , day = time_graded . day , hour = time_graded . hour , minute = time_graded . minute , second = time_graded . second , microsecond = time_graded . microsecond )
def log_handler ( self , handler ) : """Setter for the log handler function . Args : self ( JLink ) : the ` ` JLink ` ` instance Returns : ` ` None ` `"""
if not self . opened ( ) : handler = handler or util . noop self . _log_handler = enums . JLinkFunctions . LOG_PROTOTYPE ( handler ) self . _dll . JLINKARM_EnableLog ( self . _log_handler )
def restore_state ( self , state ) : """Restore the current state of this emulated object . Args : state ( dict ) : A previously dumped state produced by dump _ state ."""
super ( ReferenceController , self ) . restore_state ( state ) state_name = state . get ( 'state_name' ) state_version = state . get ( 'state_version' ) if state_name != self . STATE_NAME or state_version != self . STATE_VERSION : raise ArgumentError ( "Invalid emulated device state name or version" , found = ( state_name , state_version ) , expected = ( self . STATE_NAME , self . STATE_VERSION ) ) self . app_info = state . get ( 'app_info' , ( 0 , "0.0" ) ) self . os_info = state . get ( 'os_info' , ( 0 , "0.0" ) ) # Notify all subsystems of our intent to restore in case they need to prepare self . sensor_log . prepare_for_restore ( ) # Restore all of the subsystems self . remote_bridge . restore ( state . get ( 'remote_bridge' , { } ) ) self . tile_manager . restore ( state . get ( 'tile_manager' , { } ) ) self . config_database . restore ( state . get ( 'config_database' , { } ) ) self . sensor_log . restore ( state . get ( 'sensor_log' , { } ) )
def parse ( cls , version_string , partial = False , coerce = False ) : """Parse a version string into a Version ( ) object . Args : version _ string ( str ) , the version string to parse partial ( bool ) , whether to accept incomplete input coerce ( bool ) , whether to try to map the passed in string into a valid Version ."""
if not version_string : raise ValueError ( 'Invalid empty version string: %r' % version_string ) if partial : version_re = cls . partial_version_re else : version_re = cls . version_re match = version_re . match ( version_string ) if not match : raise ValueError ( 'Invalid version string: %r' % version_string ) major , minor , patch , prerelease , build = match . groups ( ) if _has_leading_zero ( major ) : raise ValueError ( "Invalid leading zero in major: %r" % version_string ) if _has_leading_zero ( minor ) : raise ValueError ( "Invalid leading zero in minor: %r" % version_string ) if _has_leading_zero ( patch ) : raise ValueError ( "Invalid leading zero in patch: %r" % version_string ) major = int ( major ) minor = cls . _coerce ( minor , partial ) patch = cls . _coerce ( patch , partial ) if prerelease is None : if partial and ( build is None ) : # No build info , strip here return ( major , minor , patch , None , None ) else : prerelease = ( ) elif prerelease == '' : prerelease = ( ) else : prerelease = tuple ( prerelease . split ( '.' ) ) cls . _validate_identifiers ( prerelease , allow_leading_zeroes = False ) if build is None : if partial : build = None else : build = ( ) elif build == '' : build = ( ) else : build = tuple ( build . split ( '.' ) ) cls . _validate_identifiers ( build , allow_leading_zeroes = True ) return ( major , minor , patch , prerelease , build )
def fix_config ( self , options ) : """Fixes the options , if necessary . I . e . , it adds all required elements to the dictionary . : param options : the options to fix : type options : dict : return : the ( potentially ) fixed options : rtype : dict"""
options = super ( ForLoop , self ) . fix_config ( options ) opt = "min" if opt not in options : options [ opt ] = 1 if opt not in self . help : self . help [ opt ] = "The minimum for the loop (included, int)." opt = "max" if opt not in options : options [ opt ] = 10 if opt not in self . help : self . help [ opt ] = "The maximum for the loop (included, int)." opt = "step" if opt not in options : options [ opt ] = 1 if opt not in self . help : self . help [ opt ] = "The step size (int)." return options
def write_and_convert ( self , text ) : '''Write the given text to our wrapped stream , stripping any ANSI sequences from the text , and optionally converting them into win32 calls .'''
cursor = 0 for match in self . ANSI_RE . finditer ( text ) : start , end = match . span ( ) self . write_plain_text ( text , cursor , start ) self . convert_ansi ( * match . groups ( ) ) cursor = end self . write_plain_text ( text , cursor , len ( text ) )
def parse_actor_and_date ( line ) : """Parse out the actor ( author or committer ) info from a line like : : author Tom Preston - Werner < tom @ mojombo . com > 1191999972 - 0700 : return : [ Actor , int _ seconds _ since _ epoch , int _ timezone _ offset ]"""
actor , epoch , offset = '' , 0 , 0 m = _re_actor_epoch . search ( line ) if m : actor , epoch , offset = m . groups ( ) else : m = _re_only_actor . search ( line ) actor = m . group ( 1 ) if m else line or '' return ( Actor . _from_string ( actor ) , int ( epoch ) , utctz_to_altz ( offset ) )
def detect ( self , color_im , depth_im , cfg , camera_intr , T_camera_world , vis_foreground = False , vis_segmentation = False , segmask = None ) : """Detects all relevant objects in an rgbd image pair using foreground masking . Parameters color _ im : : obj : ` ColorImage ` color image for detection depth _ im : : obj : ` DepthImage ` depth image for detection ( corresponds to color image ) cfg : : obj : ` YamlConfig ` parameters of detection function camera _ intr : : obj : ` CameraIntrinsics ` intrinsics of the camera T _ camera _ world : : obj : ` autolab _ core . RigidTransform ` registration of the camera to world frame segmask : : obj : ` BinaryImage ` optional segmask of invalid pixels Returns : obj : ` list ` of : obj : ` RgbdDetection ` all detections in the image"""
# read params min_pt_box = np . array ( cfg [ 'min_pt' ] ) max_pt_box = np . array ( cfg [ 'max_pt' ] ) min_contour_area = cfg [ 'min_contour_area' ] max_contour_area = cfg [ 'max_contour_area' ] min_box_area = cfg [ 'min_box_area' ] max_box_area = cfg [ 'max_box_area' ] box_padding_px = cfg [ 'box_padding_px' ] crop_height = cfg [ 'image_height' ] crop_width = cfg [ 'image_width' ] depth_grad_thresh = cfg [ 'depth_grad_thresh' ] point_cloud_mask_only = cfg [ 'point_cloud_mask_only' ] w = cfg [ 'filter_dim' ] half_crop_height = float ( crop_height ) / 2 half_crop_width = float ( crop_width ) / 2 half_crop_dims = np . array ( [ half_crop_height , half_crop_width ] ) fill_depth = np . max ( depth_im . data [ depth_im . data > 0 ] ) kinect2_denoising = False if 'kinect2_denoising' in cfg . keys ( ) and cfg [ 'kinect2_denoising' ] : kinect2_denoising = True depth_offset = cfg [ 'kinect2_noise_offset' ] max_depth = cfg [ 'kinect2_noise_max_depth' ] box = Box ( min_pt_box , max_pt_box , 'world' ) # project into 3D point_cloud_cam = camera_intr . deproject ( depth_im ) point_cloud_world = T_camera_world * point_cloud_cam seg_point_cloud_world , _ = point_cloud_world . box_mask ( box ) seg_point_cloud_cam = T_camera_world . inverse ( ) * seg_point_cloud_world depth_im_seg = camera_intr . project_to_image ( seg_point_cloud_cam ) # mask image using background detection bgmodel = color_im . background_model ( ) binary_im = depth_im_seg . to_binary ( ) if segmask is not None : binary_im = binary_im . mask_binary ( segmask . inverse ( ) ) # filter the image y , x = np . ogrid [ - w / 2 + 1 : w / 2 + 1 , - w / 2 + 1 : w / 2 + 1 ] mask = x * x + y * y <= w / 2 * w / 2 filter_struct = np . zeros ( [ w , w ] ) . astype ( np . uint8 ) filter_struct [ mask ] = 1 binary_im_filtered_data = snm . binary_dilation ( binary_im . data , structure = filter_struct ) binary_im_filtered = BinaryImage ( binary_im_filtered_data . astype ( np . uint8 ) , frame = binary_im . frame , threshold = 0 ) # find all contours contours = binary_im_filtered . find_contours ( min_area = min_contour_area , max_area = max_contour_area ) if vis_foreground : plt . figure ( ) plt . subplot ( 1 , 3 , 1 ) plt . imshow ( color_im . data ) plt . imshow ( segmask . data , cmap = plt . cm . gray ) plt . axis ( 'off' ) plt . subplot ( 1 , 3 , 2 ) plt . imshow ( binary_im . data , cmap = plt . cm . gray ) plt . axis ( 'off' ) plt . subplot ( 1 , 3 , 3 ) plt . imshow ( binary_im_filtered . data , cmap = plt . cm . gray ) plt . axis ( 'off' ) plt . show ( ) # switch to just return the mean of nonzero _ px if point_cloud_mask_only == 1 : center_px = np . mean ( binary_im_filtered . nonzero_pixels ( ) , axis = 0 ) ci = center_px [ 0 ] cj = center_px [ 1 ] binary_thumbnail = binary_im_filtered . crop ( crop_height , crop_width , ci , cj ) color_thumbnail = color_im . crop ( crop_height , crop_width , ci , cj ) depth_thumbnail = depth_im . crop ( crop_height , crop_width , ci , cj ) thumbnail_intr = camera_intr if camera_intr is not None : thumbnail_intr = camera_intr . crop ( crop_height , crop_width , ci , cj ) query_box = Box ( center_px - half_crop_dims , center_px + half_crop_dims ) return [ RgbdDetection ( color_thumbnail , depth_thumbnail , query_box , binary_thumbnail = binary_thumbnail , contour = None , camera_intr = thumbnail_intr ) ] # convert contours to detections detections = [ ] for i , contour in enumerate ( contours ) : orig_box = contour . bounding_box logging . debug ( 'Orig box %d area: %.3f' % ( i , orig_box . area ) ) if orig_box . area > min_box_area and orig_box . area < max_box_area : # convert orig bounding box to query bounding box min_pt = orig_box . center - half_crop_dims max_pt = orig_box . center + half_crop_dims query_box = Box ( min_pt , max_pt , frame = orig_box . frame ) # segment color to get refined detection contour_mask = binary_im_filtered . contour_mask ( contour ) binary_thumbnail = contour_mask . crop ( query_box . height , query_box . width , query_box . ci , query_box . cj ) else : # otherwise take original bounding box query_box = Box ( contour . bounding_box . min_pt - box_padding_px , contour . bounding_box . max_pt + box_padding_px , frame = contour . bounding_box . frame ) binary_thumbnail = binary_im_filtered . crop ( query_box . height , query_box . width , query_box . ci , query_box . cj ) # crop to get thumbnails color_thumbnail = color_im . crop ( query_box . height , query_box . width , query_box . ci , query_box . cj ) depth_thumbnail = depth_im . crop ( query_box . height , query_box . width , query_box . ci , query_box . cj ) thumbnail_intr = camera_intr if camera_intr is not None : thumbnail_intr = camera_intr . crop ( query_box . height , query_box . width , query_box . ci , query_box . cj ) # fix depth thumbnail depth_thumbnail = depth_thumbnail . replace_zeros ( fill_depth ) if kinect2_denoising : depth_data = depth_thumbnail . data min_depth = np . min ( depth_data ) binary_mask_data = binary_thumbnail . data depth_mask_data = depth_thumbnail . mask_binary ( binary_thumbnail ) . data depth_mask_data += depth_offset depth_data [ binary_mask_data > 0 ] = depth_mask_data [ binary_mask_data > 0 ] depth_thumbnail = DepthImage ( depth_data , depth_thumbnail . frame ) # append to detections detections . append ( RgbdDetection ( color_thumbnail , depth_thumbnail , query_box , binary_thumbnail = binary_thumbnail , contour = contour , camera_intr = thumbnail_intr ) ) return detections
def translate_connect_args ( self , names = [ ] , ** kw ) : """Translate url attributes into a dictionary of connection arguments . Returns attributes of this url ( ` host ` , ` database ` , ` username ` , ` password ` , ` port ` ) as a plain dictionary . The attribute names are used as the keys by default . Unset or false attributes are omitted from the final dictionary . : param \ * * kw : Optional , alternate key names for url attributes . : param names : Deprecated . Same purpose as the keyword - based alternate names , but correlates the name to the original positionally ."""
translated = { } attribute_names = [ "host" , "database" , "username" , "password" , "port" ] for sname in attribute_names : if names : name = names . pop ( 0 ) elif sname in kw : name = kw [ sname ] else : name = sname if name is not None and getattr ( self , sname , False ) : translated [ name ] = getattr ( self , sname ) return translated
def get ( self , action , version = None ) : """Get the method class handing the given action and version ."""
by_version = self . _by_action [ action ] if version in by_version : return by_version [ version ] else : return by_version [ None ]
def drag_and_drop ( self , droppable ) : """Performs drag a element to another elmenet . Currently works only on Chrome driver ."""
self . scroll_to ( ) ActionChains ( self . parent . driver ) . drag_and_drop ( self . _element , droppable . _element ) . perform ( )
def add_arguments ( parser ) : """Parse arguments Args : parser ( argparse . ArgumentParser )"""
parser . description = 'Examples:\n' 'python -m etk regex_extractor pattern /tmp/date.txt\n' 'cat /tmp/date.txt | python -m etk regex_extractor pattern' parser . add_argument ( 'pattern' , nargs = '?' , type = str , default = sys . stdin ) parser . add_argument ( 'input_file' , nargs = '?' , type = argparse . FileType ( 'r' ) , default = sys . stdin )
def signed_add ( a , b ) : """Return wirevector for result of signed addition . : param a : a wirevector to serve as first input to addition : param b : a wirevector to serve as second input to addition Given a length n and length m wirevector the result of the signed addition is length max ( n , m ) + 1 . The inputs are twos complement sign extended to the same length before adding ."""
a , b = match_bitwidth ( as_wires ( a ) , as_wires ( b ) , signed = True ) result_len = len ( a ) + 1 ext_a = a . sign_extended ( result_len ) ext_b = b . sign_extended ( result_len ) # add and truncate to the correct length return ( ext_a + ext_b ) [ 0 : result_len ]
def get_clients ( self ) : """return a merge of public and private clients"""
public = self . channels [ 'public' ] private = self . channels [ 'private' ] return dict ( public . items ( ) + private . items ( ) )
def parse_aws_include_transform ( data ) : """If the input data is an AWS : : Include data , then parse and return the location of the included file . AWS : : Include transform data usually has the following format : " Fn : : Transform " : { " Name " : " AWS : : Include " , " Parameters " : { " Location " : " s3 : / / MyAmazonS3BucketName / swagger . yaml " Parameters data : dict Dictionary data to parse Returns str Location of the included file , if available . None , otherwise"""
if not data : return if _FN_TRANSFORM not in data : return transform_data = data [ _FN_TRANSFORM ] name = transform_data . get ( "Name" ) location = transform_data . get ( "Parameters" , { } ) . get ( "Location" ) if name == "AWS::Include" : LOG . debug ( "Successfully parsed location from AWS::Include transform: %s" , location ) return location
def _read_mplain ( self , lines ) : """Read text fragments from a multilevel format text file . : param list lines : the lines of the subtitles text file"""
self . log ( u"Parsing fragments from subtitles text format" ) word_separator = self . _mplain_word_separator ( ) self . log ( [ u"Word separator is: '%s'" , word_separator ] ) lines = [ line . strip ( ) for line in lines ] pairs = [ ] i = 1 current = 0 tree = Tree ( ) while current < len ( lines ) : line_text = lines [ current ] if len ( line_text ) > 0 : sentences = [ line_text ] following = current + 1 while ( following < len ( lines ) ) and ( len ( lines [ following ] ) > 0 ) : sentences . append ( lines [ following ] ) following += 1 # here sentences holds the sentences for this paragraph # create paragraph node paragraph_identifier = u"p%06d" % i paragraph_lines = [ u" " . join ( sentences ) ] paragraph_fragment = TextFragment ( identifier = paragraph_identifier , lines = paragraph_lines , filtered_lines = paragraph_lines ) paragraph_node = Tree ( value = paragraph_fragment ) tree . add_child ( paragraph_node ) self . log ( [ u"Paragraph %s" , paragraph_identifier ] ) # create sentences nodes j = 1 for s in sentences : sentence_identifier = paragraph_identifier + u"s%06d" % j sentence_lines = [ s ] sentence_fragment = TextFragment ( identifier = sentence_identifier , lines = sentence_lines , filtered_lines = sentence_lines ) sentence_node = Tree ( value = sentence_fragment ) paragraph_node . add_child ( sentence_node ) j += 1 self . log ( [ u" Sentence %s" , sentence_identifier ] ) # create words nodes k = 1 for w in [ w for w in s . split ( word_separator ) if len ( w ) > 0 ] : word_identifier = sentence_identifier + u"w%06d" % k word_lines = [ w ] word_fragment = TextFragment ( identifier = word_identifier , lines = word_lines , filtered_lines = word_lines ) word_node = Tree ( value = word_fragment ) sentence_node . add_child ( word_node ) k += 1 self . log ( [ u" Word %s" , word_identifier ] ) # keep iterating current = following i += 1 current += 1 self . log ( u"Storing tree" ) self . fragments_tree = tree
def trace_walker ( module ) : """Defines a generator used to walk into modules . : param module : Module to walk . : type module : ModuleType : return : Class / Function / Method . : rtype : object or object"""
for name , function in inspect . getmembers ( module , inspect . isfunction ) : yield None , function for name , cls in inspect . getmembers ( module , inspect . isclass ) : yield cls , None for name , method in inspect . getmembers ( cls , inspect . ismethod ) : yield cls , method for name , function in inspect . getmembers ( cls , inspect . isfunction ) : yield cls , function for name , accessor in inspect . getmembers ( cls , lambda x : type ( x ) is property ) : yield cls , accessor . fget yield cls , accessor . fset yield cls , accessor . fdel
def create_background ( bg_type , fafile , outfile , genome = "hg18" , width = 200 , nr_times = 10 , custom_background = None ) : """Create background of a specific type . Parameters bg _ type : str Name of background type . fafile : str Name of input FASTA file . outfile : str Name of output FASTA file . genome : str , optional Genome name . width : int , optional Size of regions . nr _ times : int , optional Generate this times as many background sequences as compared to input file . Returns nr _ seqs : int Number of sequences created ."""
width = int ( width ) config = MotifConfig ( ) fg = Fasta ( fafile ) if bg_type in [ "genomic" , "gc" ] : if not genome : logger . error ( "Need a genome to create background" ) sys . exit ( 1 ) if bg_type == "random" : f = MarkovFasta ( fg , k = 1 , n = nr_times * len ( fg ) ) logger . debug ( "Random background: %s" , outfile ) elif bg_type == "genomic" : logger . debug ( "Creating genomic background" ) f = RandomGenomicFasta ( genome , width , nr_times * len ( fg ) ) elif bg_type == "gc" : logger . debug ( "Creating GC matched background" ) f = MatchedGcFasta ( fafile , genome , nr_times * len ( fg ) ) logger . debug ( "GC matched background: %s" , outfile ) elif bg_type == "promoter" : fname = Genome ( genome ) . filename gene_file = fname . replace ( ".fa" , ".annotation.bed.gz" ) if not gene_file : gene_file = os . path . join ( config . get_gene_dir ( ) , "%s.bed" % genome ) if not os . path . exists ( gene_file ) : print ( "Could not find a gene file for genome {}" ) print ( "Did you use the --annotation flag for genomepy?" ) print ( "Alternatively make sure there is a file called {}.bed in {}" . format ( genome , config . get_gene_dir ( ) ) ) raise ValueError ( ) logger . info ( "Creating random promoter background (%s, using genes in %s)" , genome , gene_file ) f = PromoterFasta ( gene_file , genome , width , nr_times * len ( fg ) ) logger . debug ( "Random promoter background: %s" , outfile ) elif bg_type == "custom" : bg_file = custom_background if not bg_file : raise IOError ( "Background file not specified!" ) if not os . path . exists ( bg_file ) : raise IOError ( "Custom background file %s does not exist!" , bg_file ) else : logger . info ( "Copying custom background file %s to %s." , bg_file , outfile ) f = Fasta ( bg_file ) l = np . median ( [ len ( seq ) for seq in f . seqs ] ) if l < ( width * 0.95 ) or l > ( width * 1.05 ) : logger . warn ( "The custom background file %s contains sequences with a " "median length of %s, while GimmeMotifs predicts motifs in sequences " "of length %s. This will influence the statistics! It is recommended " "to use background sequences of the same length." , bg_file , l , width ) f . writefasta ( outfile ) return len ( f )
def __try_read_record ( self ) : """Try reading a record . Returns : ( data , record _ type ) tuple . Raises : EOFError : when end of file was reached . InvalidRecordError : when valid record could not be read ."""
block_remaining = _BLOCK_SIZE - self . __reader . tell ( ) % _BLOCK_SIZE if block_remaining < _HEADER_LENGTH : return ( '' , _RECORD_TYPE_NONE ) header = self . __reader . read ( _HEADER_LENGTH ) if len ( header ) != _HEADER_LENGTH : raise EOFError ( 'Read %s bytes instead of %s' % ( len ( header ) , _HEADER_LENGTH ) ) ( masked_crc , length , record_type ) = struct . unpack ( _HEADER_FORMAT , header ) crc = _unmask_crc ( masked_crc ) if length + _HEADER_LENGTH > block_remaining : # A record can ' t be bigger than one block . raise errors . InvalidRecordError ( 'Length is too big' ) data = self . __reader . read ( length ) if len ( data ) != length : raise EOFError ( 'Not enough data read. Expected: %s but got %s' % ( length , len ( data ) ) ) if record_type == _RECORD_TYPE_NONE : return ( '' , record_type ) actual_crc = crc32c . crc_update ( crc32c . CRC_INIT , [ record_type ] ) actual_crc = crc32c . crc_update ( actual_crc , data ) actual_crc = crc32c . crc_finalize ( actual_crc ) if actual_crc != crc : raise errors . InvalidRecordError ( 'Data crc does not match' ) return ( data , record_type )
def traverse_one ( self , attribute , source , target , visitor ) : """: param source : source data proxy : type source : instance of ` DataTraversalProxy ` or None : param target : target data proxy : type target : instance of ` DataTraversalProxy ` or None"""
if __debug__ : self . __log_traverse_one ( self . __trv_path , attribute , source , target ) prx = source or target rel_op = RELATION_OPERATIONS . check ( source , target ) if prx . do_traverse ( ) and ( rel_op == prx . relation_operation or attribute is None ) : for attr in prx . get_relationship_attributes ( ) : # Check cascade settings . if not bool ( attr . cascade & rel_op ) : continue if not source is None : try : attr_source = source . get_attribute_proxy ( attr ) except AttributeError : # If the source does not have the attribute set , we # do nothing ( as opposed to when the value is None ) . continue else : attr_source = None if not target is None : attr_target = target . get_attribute_proxy ( attr ) else : attr_target = None attr_rel_op = RELATION_OPERATIONS . check ( attr_source , attr_target ) if attr_rel_op == RELATION_OPERATIONS . ADD : if rel_op == RELATION_OPERATIONS . ADD : parent = source else : parent = target elif attr_rel_op == RELATION_OPERATIONS . REMOVE : parent = target else : # UPDATE parent = target card = get_attribute_cardinality ( attr ) if card == CARDINALITY_CONSTANTS . ONE : if attr_source is None and attr_target is None : # If both source and target have None values , there is # nothing to do . continue if attr_rel_op == RELATION_OPERATIONS . ADD : # if not attr _ source . get _ id ( ) is None : # # We only ADD new items . # continue src_items = [ attr_source ] tgt_items = None elif attr_rel_op == RELATION_OPERATIONS . REMOVE : src_items = None tgt_items = [ attr_target ] else : # UPDATE src_items = [ attr_source ] tgt_items = [ attr_target ] src_id = attr_source . get_id ( ) tgt_id = attr_target . get_id ( ) if src_id != tgt_id : if not src_id is None : # If the source ID is None , this is a replace # operation ( ADD source , REMOVE target ) . src_target = attr_target . get_matching ( src_id ) if not src_target is None : tgt_items . append ( src_target ) else : src_items = attr_source tgt_items = attr_target self . __trv_path . push ( parent , ( source , target ) , attr , rel_op ) self . traverse_many ( attr , src_items , tgt_items , visitor ) self . __trv_path . pop ( ) # path . pop ( ) visitor . visit ( self . __trv_path , attribute , source , target )
def validate ( self , obj ) : """Base validation method . Will inspect class attributes to dermine just what should be present"""
if 'tags' in obj and not isinstance ( obj [ 'tags' ] , list ) : raise aomi_excep . Validation ( 'tags must be a list' ) if self . present : check_obj ( self . required_fields , self . name ( ) , obj )
def _postprocess_output ( self , output ) : '''Performs the last modifications before the output is returned .'''
# Replace long vowels with circumflex characters . if self . vowel_style == CIRCUMFLEX_STYLE : try : output = output . translate ( vowels_to_circumflexes ) except TypeError : # Python 2 will error out here if there are no # macron characters in the string to begin with . pass # Output the desired case . if self . uppercase : output = output . upper ( ) return output
def _FindPartition ( self , key ) : """Finds the partition from the byte array representation of the partition key ."""
hash_value = self . hash_generator . ComputeHash ( key ) return self . _LowerBoundSearch ( self . partitions , hash_value )
def get_post ( self , slug ) : """This method returns a single post by slug"""
cache_key = self . get_cache_key ( post_slug = slug ) content = cache . get ( cache_key ) if not content : post = Post . objects . get ( slug = slug ) content = self . _format ( post ) cache_duration = conf . GOSCALE_CACHE_DURATION if post else 1 cache . set ( cache_key , content , cache_duration ) return content
def _filter ( self , data ) : """Apply a filter to reduce noisy data . Return the median value of a heap of data ."""
filtered_data = [ ] for queue , data in zip ( self . _raw_data_queues , data ) : queue . append ( data ) filtered_data . append ( numpy . median ( queue ) ) return filtered_data
def csetLog_maintenance ( self , please_stop = None ) : '''Handles deleting old csetLog entries and timestamping revisions once they pass the length for permanent storage for deletion later . : param please _ stop : : return :'''
while not please_stop : try : # Wait until something signals the maintenance cycle # to begin ( or end ) . ( self . maintenance_signal | please_stop ) . wait ( ) if please_stop : break if self . disable_maintenance : continue # Reset signal so we don ' t request # maintenance infinitely . with self . maintenance_signal . lock : self . maintenance_signal . _go = False with self . working_locker : all_data = None with self . conn . transaction ( ) as t : all_data = sorted ( t . get ( "SELECT revnum, revision, timestamp FROM csetLog" ) , key = lambda x : int ( x [ 0 ] ) ) # Restore maximum permanents ( if overflowing ) new_data = [ ] modified = False for count , ( revnum , revision , timestamp ) in enumerate ( all_data [ : : - 1 ] ) : if count < MINIMUM_PERMANENT_CSETS : if timestamp != - 1 : modified = True new_data . append ( ( revnum , revision , - 1 ) ) else : new_data . append ( ( revnum , revision , timestamp ) ) elif type ( timestamp ) != int or timestamp == - 1 : modified = True new_data . append ( ( revnum , revision , int ( time . time ( ) ) ) ) else : new_data . append ( ( revnum , revision , timestamp ) ) # Delete annotations at revisions with timestamps # that are too old . The csetLog entries will have # their timestamps reset here . new_data1 = [ ] annrevs_to_del = [ ] current_time = time . time ( ) for count , ( revnum , revision , timestamp ) in enumerate ( new_data [ : : - 1 ] ) : new_timestamp = timestamp if timestamp != - 1 : if current_time >= timestamp + TIME_TO_KEEP_ANNOTATIONS . seconds : modified = True new_timestamp = current_time annrevs_to_del . append ( revision ) new_data1 . append ( ( revnum , revision , new_timestamp ) ) if len ( annrevs_to_del ) > 0 : # Delete any latestFileMod and annotation entries # that are too old . Log . note ( "Deleting annotations and latestFileMod for revisions for being " "older than {{oldest}}: {{revisions}}" , oldest = TIME_TO_KEEP_ANNOTATIONS , revisions = annrevs_to_del ) with self . conn . transaction ( ) as t : t . execute ( "DELETE FROM latestFileMod WHERE revision IN " + quote_set ( annrevs_to_del ) ) t . execute ( "DELETE FROM annotations WHERE revision IN " + quote_set ( annrevs_to_del ) ) # Delete any overflowing entries new_data2 = new_data1 reved_all_data = all_data [ : : - 1 ] deleted_data = reved_all_data [ MAXIMUM_NONPERMANENT_CSETS : ] delete_overflowing_revstart = None if len ( deleted_data ) > 0 : _ , delete_overflowing_revstart , _ = deleted_data [ 0 ] new_data2 = set ( all_data ) - set ( deleted_data ) # Update old frontiers if requested , otherwise # they will all get deleted by the csetLog _ deleter # worker if UPDATE_VERY_OLD_FRONTIERS : _ , max_revision , _ = all_data [ - 1 ] for _ , revision , _ in deleted_data : with self . conn . transaction ( ) as t : old_files = t . get ( "SELECT file FROM latestFileMod WHERE revision=?" , ( revision , ) ) if old_files is None or len ( old_files ) <= 0 : continue self . tuid_service . get_tuids_from_files ( old_files , max_revision , going_forward = True , ) still_exist = True while still_exist and not please_stop : Till ( seconds = TUID_EXISTENCE_WAIT_TIME ) . wait ( ) with self . conn . transaction ( ) as t : old_files = t . get ( "SELECT file FROM latestFileMod WHERE revision=?" , ( revision , ) ) if old_files is None or len ( old_files ) <= 0 : still_exist = False # Update table and schedule a deletion if modified : with self . conn . transaction ( ) as t : t . execute ( "INSERT OR REPLACE INTO csetLog (revnum, revision, timestamp) VALUES " + sql_list ( quote_set ( cset_entry ) for cset_entry in new_data2 ) ) if not deleted_data : continue Log . note ( "Scheduling {{num_csets}} for deletion" , num_csets = len ( deleted_data ) ) self . deletions_todo . add ( delete_overflowing_revstart ) except Exception as e : Log . warning ( "Unexpected error occured while maintaining csetLog, continuing to try: " , cause = e ) return
def _dequeue_batch ( self ) -> Optional [ Batch ] : """Return a single batch from queue or ` ` None ` ` signaling epoch end . : raise ChildProcessError : if the enqueueing thread ended unexpectedly"""
if self . _enqueueing_thread is None : raise ValueError ( 'StreamWrapper `{}` with buffer of size `{}` was used outside with-resource environment.' . format ( self . _name , self . _buffer_size ) ) if not self . _enqueueing_thread . is_alive ( ) and self . _queue . empty ( ) : self . _start_thread ( ) while True : try : batch = self . _queue . get ( timeout = 2 ) self . _queue . task_done ( ) break except Empty : if not self . _enqueueing_thread . is_alive ( ) : try : # the enqueueing thread may just finished properly so lets check the queue eagerly batch = self . _queue . get_nowait ( ) self . _queue . task_done ( ) break except Empty : # so we failed to retrieve a batch and the enqueueing thread is dead # there is no hope , something must went wrong raise ChildProcessError ( 'Enqueueing thread ended unexpectedly.' ) return batch
def create_operator ( operator , auth , url , headers = HEADERS ) : """Function takes input of dictionary operator with the following keys operator = { " fullName " : " " , " sessionTimeout " : " " , " password " : " " , " operatorGroupId " : " " , " name " : " " , " desc " : " " , " defaultAcl " : " " , " authType " : " " } converts to json and issues a HTTP POST request to the HPE IMC Restful API : param auth : requests auth object # usually auth . creds from auth pyhpeimc . auth . class : param url : base url of IMC RS interface # usually auth . url from pyhpeimc . auth . authclass : param operator : dictionary with the required operator key - value pairs as defined above . : param headers : json formated string . default values set in module : return : : rtype : > > > import json > > > from pyhpeimc . auth import * > > > from pyhpeimc . plat . operator import * > > > auth = IMCAuth ( " http : / / " , " 10.101.0.203 " , " 8080 " , " admin " , " admin " ) > > > operator = ' ' ' { " fullName " : " test administrator " , " sessionTimeout " : " 30 " , " password " : " password " , " operatorGroupId " : " 1 " , " name " : " testadmin " , " desc " : " test admin account " , " defaultAcl " : " " , " authType " : " 0 " } ' ' ' > > > operator = json . loads ( operator ) > > > delete _ if _ exists = delete _ plat _ operator ( ' testadmin ' , auth . creds , auth . url ) > > > new _ operator = create _ operator ( operator , auth . creds , auth . url ) > > > assert type ( new _ operator ) is int > > > assert new _ operator = = 201 > > > fail _ operator _ create = create _ operator ( operator , auth . creds , auth . url ) > > > assert type ( fail _ operator _ create ) is int > > > assert fail _ operator _ create = = 409"""
create_operator_url = '/imcrs/plat/operator' f_url = url + create_operator_url payload = json . dumps ( operator , indent = 4 ) # creates the URL using the payload variable as the contents r = requests . post ( f_url , data = payload , auth = auth , headers = headers ) try : if r . status_code == 409 : # print ( " Operator Already Exists " ) return r . status_code elif r . status_code == 201 : return r . status_code except requests . exceptions . RequestException as e : return "Error:\n" + str ( e ) + ' create_operator: An Error has occured'
def rebuild ( self ) : """Rebuilds the grid lines based on the current settings and scene width . This method is triggered automatically , and shouldn ' t need to be manually called ."""
rect = self . sceneRect ( ) x = rect . left ( ) y = rect . top ( ) w = rect . width ( ) h = rect . height ( ) # calculate background gridlines cx = x + ( w / 2 ) cy = y + ( h / 2 ) self . _centerLines = [ QLine ( cx , rect . top ( ) , cx , rect . bottom ( ) ) , QLine ( rect . left ( ) , cy , rect . right ( ) , cy ) ] # create the horizontal grid lines delta = self . cellHeight ( ) minor_lines = [ ] major_lines = [ ] count = 1 while delta < ( h / 2 ) : pos_line = QLine ( x , cy + delta , x + w , cy + delta ) neg_line = QLine ( x , cy - delta , x + w , cy - delta ) # every 10th line will be a major line if count == 10 : major_lines . append ( pos_line ) major_lines . append ( neg_line ) count = 1 else : minor_lines . append ( pos_line ) minor_lines . append ( neg_line ) # update the current y location delta += self . cellHeight ( ) count += 1 # create the vertical grid lines delta = self . cellWidth ( ) count = 1 while delta < ( w / 2 ) : pos_line = QLine ( cx + delta , y , cx + delta , y + h ) neg_line = QLine ( cx - delta , y , cx - delta , y + h ) # every 10th line will be a major line if count == 10 : major_lines . append ( pos_line ) major_lines . append ( neg_line ) count = 1 else : minor_lines . append ( pos_line ) minor_lines . append ( neg_line ) # update the current y location delta += self . cellWidth ( ) count += 1 # set the line cache self . _majorLines = major_lines self . _minorLines = minor_lines # unmark the scene as being dirty self . setDirty ( False )
def valid_index ( index , shape ) -> tuple : """Get a valid index for a broadcastable shape . Parameters index : tuple Given index . shape : tuple of int Shape . Returns tuple Valid index ."""
# append slices to index index = list ( index ) while len ( index ) < len ( shape ) : index . append ( slice ( None ) ) # fill out , in reverse out = [ ] for i , s in zip ( index [ : : - 1 ] , shape [ : : - 1 ] ) : if s == 1 : if isinstance ( i , slice ) : out . append ( slice ( None ) ) else : out . append ( 0 ) else : out . append ( i ) return tuple ( out [ : : - 1 ] )
def round ( self , ndigits = 0 ) : """Rounds the amount using the current ` ` Decimal ` ` rounding algorithm ."""
if ndigits is None : ndigits = 0 return self . __class__ ( amount = self . amount . quantize ( Decimal ( '1e' + str ( - ndigits ) ) ) , currency = self . currency )
def get_rates_from_response_headers ( headers ) : """Returns a namedtuple with values for short - and long usage and limit rates found in provided HTTP response headers : param headers : HTTP response headers : type headers : dict : return : namedtuple with request rates or None if no rate - limit headers present in response . : rtype : Optional [ RequestRate ]"""
try : usage_rates = [ int ( v ) for v in headers [ 'X-RateLimit-Usage' ] . split ( ',' ) ] limit_rates = [ int ( v ) for v in headers [ 'X-RateLimit-Limit' ] . split ( ',' ) ] return RequestRate ( short_usage = usage_rates [ 0 ] , long_usage = usage_rates [ 1 ] , short_limit = limit_rates [ 0 ] , long_limit = limit_rates [ 1 ] ) except KeyError : return None
def _get_timethresh_heuristics ( self ) : """resonably decent hueristics for how much time to wait before updating progress ."""
if self . length > 1E5 : time_thresh = 2.5 elif self . length > 1E4 : time_thresh = 2.0 elif self . length > 1E3 : time_thresh = 1.0 else : time_thresh = 0.5 return time_thresh
def prepare_mac_header ( token , uri , key , http_method , nonce = None , headers = None , body = None , ext = '' , hash_algorithm = 'hmac-sha-1' , issue_time = None , draft = 0 ) : """Add an ` MAC Access Authentication ` _ signature to headers . Unlike OAuth 1 , this HMAC signature does not require inclusion of the request payload / body , neither does it use a combination of client _ secret and token _ secret but rather a mac _ key provided together with the access token . Currently two algorithms are supported , " hmac - sha - 1 " and " hmac - sha - 256 " , ` extension algorithms ` _ are not supported . Example MAC Authorization header , linebreaks added for clarity Authorization : MAC id = " h480djs93hd8 " , nonce = " 1336363200 : dj83hs9s " , mac = " bhCQXTVyfj5cmA9uKkPFx1zeOXM = " . . _ ` MAC Access Authentication ` : https : / / tools . ietf . org / html / draft - ietf - oauth - v2 - http - mac - 01 . . _ ` extension algorithms ` : https : / / tools . ietf . org / html / draft - ietf - oauth - v2 - http - mac - 01 # section - 7.1 : param token : : param uri : Request URI . : param key : MAC given provided by token endpoint . : param http _ method : HTTP Request method . : param nonce : : param headers : Request headers as a dictionary . : param body : : param ext : : param hash _ algorithm : HMAC algorithm provided by token endpoint . : param issue _ time : Time when the MAC credentials were issued ( datetime ) . : param draft : MAC authentication specification version . : return : headers dictionary with the authorization field added ."""
http_method = http_method . upper ( ) host , port = utils . host_from_uri ( uri ) if hash_algorithm . lower ( ) == 'hmac-sha-1' : h = hashlib . sha1 elif hash_algorithm . lower ( ) == 'hmac-sha-256' : h = hashlib . sha256 else : raise ValueError ( 'unknown hash algorithm' ) if draft == 0 : nonce = nonce or '{0}:{1}' . format ( utils . generate_age ( issue_time ) , common . generate_nonce ( ) ) else : ts = common . generate_timestamp ( ) nonce = common . generate_nonce ( ) sch , net , path , par , query , fra = urlparse ( uri ) if query : request_uri = path + '?' + query else : request_uri = path # Hash the body / payload if body is not None and draft == 0 : body = body . encode ( 'utf-8' ) bodyhash = b2a_base64 ( h ( body ) . digest ( ) ) [ : - 1 ] . decode ( 'utf-8' ) else : bodyhash = '' # Create the normalized base string base = [ ] if draft == 0 : base . append ( nonce ) else : base . append ( ts ) base . append ( nonce ) base . append ( http_method . upper ( ) ) base . append ( request_uri ) base . append ( host ) base . append ( port ) if draft == 0 : base . append ( bodyhash ) base . append ( ext or '' ) base_string = '\n' . join ( base ) + '\n' # hmac struggles with unicode strings - http : / / bugs . python . org / issue5285 if isinstance ( key , unicode_type ) : key = key . encode ( 'utf-8' ) sign = hmac . new ( key , base_string . encode ( 'utf-8' ) , h ) sign = b2a_base64 ( sign . digest ( ) ) [ : - 1 ] . decode ( 'utf-8' ) header = [ ] header . append ( 'MAC id="%s"' % token ) if draft != 0 : header . append ( 'ts="%s"' % ts ) header . append ( 'nonce="%s"' % nonce ) if bodyhash : header . append ( 'bodyhash="%s"' % bodyhash ) if ext : header . append ( 'ext="%s"' % ext ) header . append ( 'mac="%s"' % sign ) headers = headers or { } headers [ 'Authorization' ] = ', ' . join ( header ) return headers
def check_ffprobe ( cls ) : """Check whether ` ` ffprobe ` ` can be called . Return ` ` True ` ` on failure and ` ` False ` ` on success . : rtype : bool"""
try : from aeneas . ffprobewrapper import FFPROBEWrapper file_path = gf . absolute_path ( u"tools/res/audio.mp3" , __file__ ) prober = FFPROBEWrapper ( ) properties = prober . read_properties ( file_path ) gf . print_success ( u"ffprobe OK" ) return False except : pass gf . print_error ( u"ffprobe ERROR" ) gf . print_info ( u" Please make sure you have ffprobe installed correctly" ) gf . print_info ( u" (usually it is provided by the ffmpeg installer)" ) gf . print_info ( u" and that its path is in your PATH environment variable" ) return True
def _next_move_direction ( self ) : """pick a move at random from the list of moves"""
nmoves = len ( self . moves ) move = np . random . randint ( 1 , nmoves + 1 ) while self . prev_move == ( move + 3 ) % nmoves : move = np . random . randint ( 1 , nmoves + 1 ) self . prev_move = move return np . array ( self . moves [ move ] )
def _model_for_CLASS ( self , name , definition ) : """Model a Swagger definition that is like a Python class . : param unicode name : The name of the definition from the specification . : param pyrsistent . PMap definition : A Swagger definition to categorize . This will be a value like the one found at ` ` spec [ " definitions " ] [ name ] ` ` ."""
return _ClassModel . from_swagger ( self . pclass_for_definition , name , definition , )
def proxy_for ( widget ) : """Create a proxy for a Widget : param widget : A gtk . Widget to proxy This will raise a KeyError if there is no proxy type registered for the widget type ."""
proxy_type = widget_proxies . get ( widget . __class__ ) if proxy_type is None : raise KeyError ( 'There is no proxy type registered for %r' % widget ) return proxy_type ( widget )
def auth_aliases ( d ) : """Interpret user / password aliases ."""
for alias , real in ( ( USER_KEY , "readonly_user" ) , ( PASS_KEY , "readonly_password" ) ) : if alias in d : d [ real ] = d [ alias ] del d [ alias ]
def write ( self , message , flush = True ) : """Function : write Summary : write method on the default stream Examples : > > > stream . write ( ' message ' ) ' message ' Attributes : @ param ( message ) : str - like content to send on stream @ param ( flush ) default = True : flush the stdout after write Returns : None"""
self . stream . write ( message ) if flush : self . stream . flush ( )
def cc ( self , args = None , ret_val = None , sp_delta = None , func_ty = None ) : """Return a SimCC ( calling convention ) parametrized for this project and , optionally , a given function . : param args : A list of argument storage locations , as SimFunctionArguments . : param ret _ val : The return value storage location , as a SimFunctionArgument . : param sp _ delta : Does this even matter ? ? : param func _ ty : The prototype for the given function , as a SimType or a C - style function declaration that can be parsed into a SimTypeFunction instance . Example func _ ty strings : > > > " int func ( char * , int ) " > > > " int f ( int , int , int * ) ; " Function names are ignored . Relevant subclasses of SimFunctionArgument are SimRegArg and SimStackArg , and shortcuts to them can be found on this ` cc ` object . For stack arguments , offsets are relative to the stack pointer on function entry ."""
return self . _default_cc ( arch = self . project . arch , args = args , ret_val = ret_val , sp_delta = sp_delta , func_ty = func_ty )
def str2midi ( note_string ) : """Given a note string name ( e . g . " Bb4 " ) , returns its MIDI pitch number ."""
if note_string == "?" : return nan data = note_string . strip ( ) . lower ( ) name2delta = { "c" : - 9 , "d" : - 7 , "e" : - 5 , "f" : - 4 , "g" : - 2 , "a" : 0 , "b" : 2 } accident2delta = { "b" : - 1 , "#" : 1 , "x" : 2 } accidents = list ( it . takewhile ( lambda el : el in accident2delta , data [ 1 : ] ) ) octave_delta = int ( data [ len ( accidents ) + 1 : ] ) - 4 return ( MIDI_A4 + name2delta [ data [ 0 ] ] + # Name sum ( accident2delta [ ac ] for ac in accidents ) + # Accident 12 * octave_delta # Octave )
def known_dists ( ) : '''Return a list of all Distributions exporting udata . * entrypoints'''
return ( dist for dist in pkg_resources . working_set if any ( k in ENTRYPOINTS for k in dist . get_entry_map ( ) . keys ( ) ) )
def _adjust_probability_vec_best ( population , fitnesses , probability_vec , adjust_rate ) : """Shift probabilities towards the best solution ."""
best_solution = max ( zip ( fitnesses , population ) ) [ 1 ] # Shift probabilities towards best solution return _adjust ( probability_vec , best_solution , adjust_rate )
def run_xenon ( workflow , * , machine , worker_config , n_processes , deref = False , verbose = False ) : """Run the workflow using a number of online Xenon workers . : param workflow : | Workflow | or | PromisedObject | to evaluate . : param machine : The | Machine | instance . : param worker _ config : Configuration of the pilot job : param n _ processes : Number of pilot jobs to start . : param deref : Set this to True to pass the result through one more encoding and decoding step with object dereferencing turned on . : returns : the result of evaluating the workflow"""
dynamic_pool = DynamicPool ( machine ) for i in range ( n_processes ) : cfg = copy ( worker_config ) cfg . name = 'xenon-{0:02}' . format ( i ) dynamic_pool . add_xenon_worker ( cfg ) job_keeper = JobKeeper ( ) S = Scheduler ( job_keeper = job_keeper , verbose = verbose ) result = S . run ( dynamic_pool , get_workflow ( workflow ) ) dynamic_pool . close_all ( ) if deref : return worker_config . registry ( ) . dereference ( result , host = 'scheduler' ) else : return result
def _calc_font_size ( self , win_wd ) : """Heuristic to calculate an appropriate font size based on the width of the viewer window . Parameters win _ wd : int The width of the viewer window . Returns font _ size : int Approximately appropriate font size in points"""
font_size = 4 if win_wd >= 1600 : font_size = 24 elif win_wd >= 1000 : font_size = 18 elif win_wd >= 800 : font_size = 16 elif win_wd >= 600 : font_size = 14 elif win_wd >= 500 : font_size = 12 elif win_wd >= 400 : font_size = 11 elif win_wd >= 300 : font_size = 10 elif win_wd >= 250 : font_size = 8 elif win_wd >= 200 : font_size = 6 return font_size
def cached_request ( self , request ) : """Return a cached response if it exists in the cache , otherwise return False ."""
cache_url = self . cache_url ( request . url ) logger . debug ( 'Looking up "%s" in the cache' , cache_url ) cc = self . parse_cache_control ( request . headers ) # Bail out if the request insists on fresh data if "no-cache" in cc : logger . debug ( 'Request header has "no-cache", cache bypassed' ) return False if "max-age" in cc and cc [ "max-age" ] == 0 : logger . debug ( 'Request header has "max_age" as 0, cache bypassed' ) return False # Request allows serving from the cache , let ' s see if we find something cache_data = self . cache . get ( cache_url ) if cache_data is None : logger . debug ( "No cache entry available" ) return False # Check whether it can be deserialized resp = self . serializer . loads ( request , cache_data ) if not resp : logger . warning ( "Cache entry deserialization failed, entry ignored" ) return False # If we have a cached 301 , return it immediately . We don ' t # need to test our response for other headers b / c it is # intrinsically " cacheable " as it is Permanent . # See : # https : / / tools . ietf . org / html / rfc7231 # section - 6.4.2 # Client can try to refresh the value by repeating the request # with cache busting headers as usual ( ie no - cache ) . if resp . status == 301 : msg = ( 'Returning cached "301 Moved Permanently" response ' "(ignoring date and etag information)" ) logger . debug ( msg ) return resp headers = CaseInsensitiveDict ( resp . headers ) if not headers or "date" not in headers : if "etag" not in headers : # Without date or etag , the cached response can never be used # and should be deleted . logger . debug ( "Purging cached response: no date or etag" ) self . cache . delete ( cache_url ) logger . debug ( "Ignoring cached response: no date" ) return False now = time . time ( ) date = calendar . timegm ( parsedate_tz ( headers [ "date" ] ) ) current_age = max ( 0 , now - date ) logger . debug ( "Current age based on date: %i" , current_age ) # TODO : There is an assumption that the result will be a # urllib3 response object . This may not be best since we # could probably avoid instantiating or constructing the # response until we know we need it . resp_cc = self . parse_cache_control ( headers ) # determine freshness freshness_lifetime = 0 # Check the max - age pragma in the cache control header if "max-age" in resp_cc : freshness_lifetime = resp_cc [ "max-age" ] logger . debug ( "Freshness lifetime from max-age: %i" , freshness_lifetime ) # If there isn ' t a max - age , check for an expires header elif "expires" in headers : expires = parsedate_tz ( headers [ "expires" ] ) if expires is not None : expire_time = calendar . timegm ( expires ) - date freshness_lifetime = max ( 0 , expire_time ) logger . debug ( "Freshness lifetime from expires: %i" , freshness_lifetime ) # Determine if we are setting freshness limit in the # request . Note , this overrides what was in the response . if "max-age" in cc : freshness_lifetime = cc [ "max-age" ] logger . debug ( "Freshness lifetime from request max-age: %i" , freshness_lifetime ) if "min-fresh" in cc : min_fresh = cc [ "min-fresh" ] # adjust our current age by our min fresh current_age += min_fresh logger . debug ( "Adjusted current age from min-fresh: %i" , current_age ) # Return entry if it is fresh enough if freshness_lifetime > current_age : logger . debug ( 'The response is "fresh", returning cached response' ) logger . debug ( "%i > %i" , freshness_lifetime , current_age ) return resp # we ' re not fresh . If we don ' t have an Etag , clear it out if "etag" not in headers : logger . debug ( 'The cached response is "stale" with no etag, purging' ) self . cache . delete ( cache_url ) # return the original handler return False
def construct_multi_parameter_validators ( parameters , context ) : """Given an iterable of parameters , returns a dictionary of validator functions for each parameter . Note that this expects the parameters to be unique in their name value , and throws an error if this is not the case ."""
validators = ValidationDict ( ) for parameter in parameters : key = parameter [ 'name' ] if key in validators : raise ValueError ( "Duplicate parameter name {0}" . format ( key ) ) parameter_validators = construct_parameter_validators ( parameter , context = context ) validators . add_validator ( key , generate_object_validator ( field_validators = parameter_validators ) , ) return validators
def _set_key_table ( self , v , load = False ) : """Setter method for key _ table , mapped from YANG variable / interface / fortygigabitethernet / ip / interface _ fo _ ospf _ conf / ospf _ interface _ config / md5 _ authentication / key _ table ( container ) If this variable is read - only ( config : false ) in the source YANG file , then _ set _ key _ table is considered as a private method . Backends looking to populate this variable should do so via calling thisObj . _ set _ key _ table ( ) directly ."""
if hasattr ( v , "_utype" ) : v = v . _utype ( v ) try : t = YANGDynClass ( v , base = key_table . key_table , is_container = 'container' , presence = False , yang_name = "key-table" , rest_name = "key-id" , parent = self , path_helper = self . _path_helper , extmethods = self . _extmethods , register_paths = True , extensions = { u'tailf-common' : { u'info' : u' MD5 authentication key ID table ' , u'cli-full-no' : None , u'cli-compact-syntax' : None , u'cli-sequence-commands' : None , u'cli-incomplete-command' : None , u'alt-name' : u'key-id' } } , namespace = 'urn:brocade.com:mgmt:brocade-ospf' , defining_module = 'brocade-ospf' , yang_type = 'container' , is_config = True ) except ( TypeError , ValueError ) : raise ValueError ( { 'error-string' : """key_table must be of a type compatible with container""" , 'defined-type' : "container" , 'generated-type' : """YANGDynClass(base=key_table.key_table, is_container='container', presence=False, yang_name="key-table", rest_name="key-id", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u' MD5 authentication key ID table ', u'cli-full-no': None, u'cli-compact-syntax': None, u'cli-sequence-commands': None, u'cli-incomplete-command': None, u'alt-name': u'key-id'}}, namespace='urn:brocade.com:mgmt:brocade-ospf', defining_module='brocade-ospf', yang_type='container', is_config=True)""" , } ) self . __key_table = t if hasattr ( self , '_set' ) : self . _set ( )
def convert_type_list_elements ( list_object = None , element_type = str ) : """Recursively convert all elements and all elements of all sublists of a list to a specified type and return the new list ."""
if element_type is str : return [ str ( element ) if not isinstance ( element , list ) else convert_type_list_elements ( list_object = element , element_type = str ) for element in list_object ]
def clear ( ) : """Clears the console ."""
if sys . platform . startswith ( "win" ) : call ( "cls" , shell = True ) else : call ( "clear" , shell = True )
def atq ( tag = None ) : '''List all queued and running jobs or only those with an optional ' tag ' . CLI Example : . . code - block : : bash salt ' * ' at . atq salt ' * ' at . atq [ tag ] salt ' * ' at . atq [ job number ]'''
jobs = [ ] # Shim to produce output similar to what _ _ virtual _ _ ( ) should do # but _ _ salt _ _ isn ' t available in _ _ virtual _ _ ( ) # Tested on CentOS 5.8 if __grains__ [ 'os_family' ] == 'RedHat' : output = _cmd ( 'at' , '-l' ) else : output = _cmd ( 'atq' ) if output is None : return '\'at.atq\' is not available.' # No jobs so return if output == '' : return { 'jobs' : jobs } # Jobs created with at . at ( ) will use the following # comment to denote a tagged job . job_kw_regex = re . compile ( r'^### SALT: (\w+)' ) # Split each job into a dictionary and handle # pulling out tags or only listing jobs with a certain # tag for line in output . splitlines ( ) : job_tag = '' # Redhat / CentOS if __grains__ [ 'os_family' ] == 'RedHat' : job , spec = line . split ( '\t' ) specs = spec . split ( ) elif __grains__ [ 'os' ] == 'OpenBSD' : if line . startswith ( ' Rank' ) : continue else : tmp = line . split ( ) timestr = ' ' . join ( tmp [ 1 : 5 ] ) job = tmp [ 6 ] specs = datetime . datetime ( * ( time . strptime ( timestr , '%b %d, %Y ' '%H:%M' ) [ 0 : 5 ] ) ) . isoformat ( ) . split ( 'T' ) specs . append ( tmp [ 7 ] ) specs . append ( tmp [ 5 ] ) elif __grains__ [ 'os' ] == 'FreeBSD' : if line . startswith ( 'Date' ) : continue else : tmp = line . split ( ) timestr = ' ' . join ( tmp [ 1 : 6 ] ) job = tmp [ 8 ] specs = datetime . datetime ( * ( time . strptime ( timestr , '%b %d %H:%M:%S %Z %Y' ) [ 0 : 5 ] ) ) . isoformat ( ) . split ( 'T' ) specs . append ( tmp [ 7 ] ) specs . append ( tmp [ 6 ] ) else : job , spec = line . split ( '\t' ) tmp = spec . split ( ) timestr = ' ' . join ( tmp [ 0 : 5 ] ) specs = datetime . datetime ( * ( time . strptime ( timestr ) [ 0 : 5 ] ) ) . isoformat ( ) . split ( 'T' ) specs . append ( tmp [ 5 ] ) specs . append ( tmp [ 6 ] ) # Search for any tags atc_out = _cmd ( 'at' , '-c' , job ) for line in atc_out . splitlines ( ) : tmp = job_kw_regex . match ( line ) if tmp : job_tag = tmp . groups ( ) [ 0 ] if __grains__ [ 'os' ] in BSD : job = six . text_type ( job ) else : job = int ( job ) # If a tag is supplied , only list jobs with that tag if tag : # TODO : Looks like there is a difference between salt and salt - call # If I don ' t wrap job in an int ( ) , it fails on salt but works on # salt - call . With the int ( ) , it fails with salt - call but not salt . if tag == job_tag or tag == job : jobs . append ( { 'job' : job , 'date' : specs [ 0 ] , 'time' : specs [ 1 ] , 'queue' : specs [ 2 ] , 'user' : specs [ 3 ] , 'tag' : job_tag } ) else : jobs . append ( { 'job' : job , 'date' : specs [ 0 ] , 'time' : specs [ 1 ] , 'queue' : specs [ 2 ] , 'user' : specs [ 3 ] , 'tag' : job_tag } ) return { 'jobs' : jobs }
def validate_implementation_for_auto_decode_and_soupify ( func ) : """Validate that : func : ` auto _ decode _ and _ soupify ` is applicable to this function . If not applicable , a ` ` NotImplmentedError ` ` will be raised ."""
arg_spec = inspect . getargspec ( func ) for arg in [ "response" , "html" , "soup" ] : if arg not in arg_spec . args : raise NotImplementedError ( ( "{func} method has to take the keyword syntax input: " "{arg}" ) . format ( func = func , arg = arg ) )
def generateNodeDocuments ( self ) : '''Creates all of the reStructuredText documents related to types parsed by Doxygen . This includes all leaf - like documents ( ` ` class ` ` , ` ` struct ` ` , ` ` enum ` ` , ` ` typedef ` ` , ` ` union ` ` , ` ` variable ` ` , and ` ` define ` ` ) , as well as namespace , file , and directory pages . During the reparenting phase of the parsing process , nested items were added as a child to their actual parent . For classes , structs , enums , and unions , if it was reparented to a ` ` namespace ` ` it will * remain * in its respective ` ` self . < breathe _ kind > ` ` list . However , if it was an internally declared child of a class or struct ( nested classes , structs , enums , and unions ) , this node will be removed from its ` ` self . < breathe _ kind > ` ` list to avoid duplication in the class hierarchy generation . When generating the full API , though , we will want to include all of these and therefore must call : func : ` ~ exhale . graph . ExhaleRoot . generateSingleNodeRST ` with all of the nested items . For nested classes and structs , this is done by just calling ` ` node . findNestedClassLike ` ` for every node in ` ` self . class _ like ` ` . The resulting list then has all of ` ` self . class _ like ` ` , as well as any nested classes and structs found . With ` ` enum ` ` and ` ` union ` ` , these would have been reparented to a * * class * * or * * struct * * if it was removed from the relevant ` ` self . < breathe _ kind > ` ` list . Meaning we must make sure that we genererate the single node RST documents for everything by finding the nested enums and unions from ` ` self . class _ like ` ` , as well as everything in ` ` self . enums ` ` and ` ` self . unions ` ` .'''
# initialize all of the nodes first for node in self . all_nodes : self . initializeNodeFilenameAndLink ( node ) self . adjustFunctionTitles ( ) # now that all potential ` ` node . link _ name ` ` members are initialized , generate # the leaf - like documents for node in self . all_nodes : if node . kind in utils . LEAF_LIKE_KINDS : self . generateSingleNodeRST ( node ) # generate the remaining parent - like documents self . generateNamespaceNodeDocuments ( ) self . generateFileNodeDocuments ( ) self . generateDirectoryNodeDocuments ( )
def RandomUniformInt ( shape , minval , maxval , seed ) : """Random uniform int op ."""
if seed : np . random . seed ( seed ) return np . random . randint ( minval , maxval , size = shape ) ,
def _set_bd_add ( self , v , load = False ) : """Setter method for bd _ add , mapped from YANG variable / routing _ system / evpn _ config / evpn / evpn _ instance / bridge _ domain / bd _ add ( container ) If this variable is read - only ( config : false ) in the source YANG file , then _ set _ bd _ add is considered as a private method . Backends looking to populate this variable should do so via calling thisObj . _ set _ bd _ add ( ) directly ."""
if hasattr ( v , "_utype" ) : v = v . _utype ( v ) try : t = YANGDynClass ( v , base = bd_add . bd_add , is_container = 'container' , presence = False , yang_name = "bd-add" , rest_name = "" , parent = self , path_helper = self . _path_helper , extmethods = self . _extmethods , register_paths = True , extensions = { u'tailf-common' : { u'info' : u'Add/Remove bridge domains from EVPN Instance' , u'cli-drop-node-name' : None } } , namespace = 'urn:brocade.com:mgmt:brocade-bgp' , defining_module = 'brocade-bgp' , yang_type = 'container' , is_config = True ) except ( TypeError , ValueError ) : raise ValueError ( { 'error-string' : """bd_add must be of a type compatible with container""" , 'defined-type' : "container" , 'generated-type' : """YANGDynClass(base=bd_add.bd_add, is_container='container', presence=False, yang_name="bd-add", rest_name="", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Add/Remove bridge domains from EVPN Instance', u'cli-drop-node-name': None}}, namespace='urn:brocade.com:mgmt:brocade-bgp', defining_module='brocade-bgp', yang_type='container', is_config=True)""" , } ) self . __bd_add = t if hasattr ( self , '_set' ) : self . _set ( )
def evaluate ( self , pipeline_key , purpose = None , attempt = 0 ) : """Evaluates the given Pipeline and enqueues sub - stages for execution . Args : pipeline _ key : The db . Key or stringified key of the _ PipelineRecord to run . purpose : Why evaluate was called ( ' start ' , ' finalize ' , or ' abort ' ) . attempt : The attempt number that should be tried ."""
After . _thread_init ( ) InOrder . _thread_init ( ) InOrder . _local . _activated = False if not isinstance ( pipeline_key , db . Key ) : pipeline_key = db . Key ( pipeline_key ) pipeline_record = db . get ( pipeline_key ) if pipeline_record is None : logging . error ( 'Pipeline ID "%s" does not exist.' , pipeline_key . name ( ) ) return if pipeline_record . status not in ( _PipelineRecord . WAITING , _PipelineRecord . RUN ) : # If we ' re attempting to abort an already aborted pipeline , # we silently advance . # 50 if ( pipeline_record . status == _PipelineRecord . ABORTED and purpose == _BarrierRecord . ABORT ) : return logging . error ( 'Pipeline ID "%s" in bad state for purpose "%s": "%s"' , pipeline_key . name ( ) , purpose or _BarrierRecord . START , pipeline_record . status ) return params = pipeline_record . params root_pipeline_key = _PipelineRecord . root_pipeline . get_value_for_datastore ( pipeline_record ) default_slot_key = db . Key ( params [ 'output_slots' ] [ 'default' ] ) default_slot_record , root_pipeline_record = db . get ( [ default_slot_key , root_pipeline_key ] ) if default_slot_record is None : logging . error ( 'Pipeline ID "%s" default slot "%s" does not exist.' , pipeline_key . name ( ) , default_slot_key ) return if root_pipeline_record is None : logging . error ( 'Pipeline ID "%s" root pipeline ID "%s" is missing.' , pipeline_key . name ( ) , root_pipeline_key . name ( ) ) return # Always finalize if we ' re aborting so pipelines have a chance to cleanup # before they terminate . Pipelines must access ' was _ aborted ' to find # out how their finalization should work . abort_signal = ( purpose == _BarrierRecord . ABORT or root_pipeline_record . abort_requested == True ) finalize_signal = ( ( default_slot_record . status == _SlotRecord . FILLED and purpose == _BarrierRecord . FINALIZE ) or abort_signal ) try : pipeline_func_class = mr_util . for_name ( pipeline_record . class_path ) except ImportError , e : # This means something is wrong with the deployed code . Rely on the # taskqueue system to do retries . retry_message = '%s: %s' % ( e . __class__ . __name__ , str ( e ) ) logging . exception ( 'Could not locate %s#%s. %s' , pipeline_record . class_path , pipeline_key . name ( ) , retry_message ) raise try : pipeline_func = pipeline_func_class . from_id ( pipeline_key . name ( ) , resolve_outputs = finalize_signal , _pipeline_record = pipeline_record ) except SlotNotFilledError , e : logging . exception ( 'Could not resolve arguments for %s#%s. Most likely this means there ' 'is a bug in the Pipeline runtime or some intermediate data has been ' 'deleted from the Datastore. Giving up.' , pipeline_record . class_path , pipeline_key . name ( ) ) self . transition_aborted ( pipeline_key ) return except Exception , e : retry_message = '%s: %s' % ( e . __class__ . __name__ , str ( e ) ) logging . exception ( 'Instantiating %s#%s raised exception. %s' , pipeline_record . class_path , pipeline_key . name ( ) , retry_message ) self . transition_retry ( pipeline_key , retry_message ) if pipeline_record . params [ 'task_retry' ] : raise else : return else : pipeline_generator = mr_util . is_generator_function ( pipeline_func_class . run ) caller_output = pipeline_func . outputs if ( abort_signal and pipeline_func . async and pipeline_record . status == _PipelineRecord . RUN and not pipeline_func . try_cancel ( ) ) : logging . warning ( 'Could not cancel and abort mid-flight async pipeline: %r#%s' , pipeline_func , pipeline_key . name ( ) ) return if finalize_signal : try : pipeline_func . _finalized_internal ( self , pipeline_key , root_pipeline_key , caller_output , abort_signal ) except Exception , e : # This means something is wrong with the deployed finalization code . # Rely on the taskqueue system to do retries . retry_message = '%s: %s' % ( e . __class__ . __name__ , str ( e ) ) logging . exception ( 'Finalizing %r#%s raised exception. %s' , pipeline_func , pipeline_key . name ( ) , retry_message ) raise else : if not abort_signal : self . transition_complete ( pipeline_key ) return if abort_signal : logging . debug ( 'Marking as aborted %s#%s' , pipeline_func , pipeline_key . name ( ) ) self . transition_aborted ( pipeline_key ) return if pipeline_record . current_attempt != attempt : logging . error ( 'Received evaluation task for pipeline ID "%s" attempt %d but ' 'current pending attempt is %d' , pipeline_key . name ( ) , attempt , pipeline_record . current_attempt ) return if pipeline_record . current_attempt >= pipeline_record . max_attempts : logging . error ( 'Received evaluation task for pipeline ID "%s" on attempt %d ' 'but that exceeds max attempts %d' , pipeline_key . name ( ) , attempt , pipeline_record . max_attempts ) return if pipeline_record . next_retry_time is not None : retry_time = pipeline_record . next_retry_time - _RETRY_WIGGLE_TIMEDELTA if self . _gettime ( ) <= retry_time : detail_message = ( 'Received evaluation task for pipeline ID "%s" on attempt %d, ' 'which will not be ready until: %s' % ( pipeline_key . name ( ) , pipeline_record . current_attempt , pipeline_record . next_retry_time ) ) logging . warning ( detail_message ) raise UnexpectedPipelineError ( detail_message ) if pipeline_record . status == _PipelineRecord . RUN and pipeline_generator : if ( default_slot_record . status == _SlotRecord . WAITING and not pipeline_record . fanned_out ) : # This properly handles the yield - less generator case when the # RUN state transition worked properly but outputting to the default # slot failed . self . fill_slot ( pipeline_key , caller_output . default , None ) return if ( pipeline_record . status == _PipelineRecord . WAITING and pipeline_func . async ) : self . transition_run ( pipeline_key ) try : result = pipeline_func . _run_internal ( self , pipeline_key , root_pipeline_key , caller_output ) except Exception , e : if self . handle_run_exception ( pipeline_key , pipeline_func , e ) : raise else : return if pipeline_func . async : return if not pipeline_generator : # Catch any exceptions that are thrown when the pipeline ' s return # value is being serialized . This ensures that serialization errors # will cause normal abort / retry behavior . try : self . fill_slot ( pipeline_key , caller_output . default , result ) except Exception , e : retry_message = 'Bad return value. %s: %s' % ( e . __class__ . __name__ , str ( e ) ) logging . exception ( 'Generator %r#%s caused exception while serializing return ' 'value %r. %s' , pipeline_func , pipeline_key . name ( ) , result , retry_message ) self . transition_retry ( pipeline_key , retry_message ) if pipeline_func . task_retry : raise else : return expected_outputs = set ( caller_output . _output_dict . iterkeys ( ) ) found_outputs = self . session_filled_output_names if expected_outputs != found_outputs : exception = SlotNotFilledError ( 'Outputs %r for pipeline ID "%s" were never filled by "%s".' % ( expected_outputs - found_outputs , pipeline_key . name ( ) , pipeline_func . _class_path ) ) if self . handle_run_exception ( pipeline_key , pipeline_func , exception ) : raise exception return pipeline_iter = result next_value = None last_sub_stage = None sub_stage = None sub_stage_dict = { } sub_stage_ordering = [ ] while True : try : yielded = pipeline_iter . send ( next_value ) except StopIteration : break except Exception , e : if self . handle_run_exception ( pipeline_key , pipeline_func , e ) : raise else : return if isinstance ( yielded , Pipeline ) : if yielded in sub_stage_dict : raise UnexpectedPipelineError ( 'Already yielded pipeline object %r with pipeline ID %s' % ( yielded , yielded . pipeline_id ) ) last_sub_stage = yielded next_value = PipelineFuture ( yielded . output_names ) next_value . _after_all_pipelines . update ( After . _local . _after_all_futures ) next_value . _after_all_pipelines . update ( InOrder . _local . _in_order_futures ) sub_stage_dict [ yielded ] = next_value sub_stage_ordering . append ( yielded ) InOrder . _add_future ( next_value ) # To aid local testing , the task _ retry flag ( which instructs the # evaluator to raise all exceptions back up to the task queue ) is # inherited by all children from the root down . yielded . task_retry = pipeline_func . task_retry else : raise UnexpectedPipelineError ( 'Yielded a disallowed value: %r' % yielded ) if last_sub_stage : # Final yielded stage inherits outputs from calling pipeline that were not # already filled during the generator ' s execution . inherited_outputs = params [ 'output_slots' ] for slot_name in self . session_filled_output_names : del inherited_outputs [ slot_name ] sub_stage_dict [ last_sub_stage ] . _inherit_outputs ( pipeline_record . class_path , inherited_outputs ) else : # Here the generator has yielded nothing , and thus acts as a synchronous # function . We can skip the rest of the generator steps completely and # fill the default output slot to cause finalizing . expected_outputs = set ( caller_output . _output_dict . iterkeys ( ) ) expected_outputs . remove ( 'default' ) found_outputs = self . session_filled_output_names if expected_outputs != found_outputs : exception = SlotNotFilledError ( 'Outputs %r for pipeline ID "%s" were never filled by "%s".' % ( expected_outputs - found_outputs , pipeline_key . name ( ) , pipeline_func . _class_path ) ) if self . handle_run_exception ( pipeline_key , pipeline_func , exception ) : raise exception else : self . fill_slot ( pipeline_key , caller_output . default , None ) self . transition_run ( pipeline_key ) return # Allocate any SlotRecords that do not yet exist . entities_to_put = [ ] for future in sub_stage_dict . itervalues ( ) : for slot in future . _output_dict . itervalues ( ) : if not slot . _exists : entities_to_put . append ( _SlotRecord ( key = slot . key , root_pipeline = root_pipeline_key ) ) # Allocate PipelineRecords and BarrierRecords for generator - run Pipelines . pipelines_to_run = set ( ) all_children_keys = [ ] all_output_slots = set ( ) for sub_stage in sub_stage_ordering : future = sub_stage_dict [ sub_stage ] # Catch any exceptions that are thrown when the pipeline ' s parameters # are being serialized . This ensures that serialization errors will # cause normal retry / abort behavior . try : dependent_slots , output_slots , params_text , params_blob = _generate_args ( sub_stage , future , self . queue_name , self . base_path ) except Exception , e : retry_message = 'Bad child arguments. %s: %s' % ( e . __class__ . __name__ , str ( e ) ) logging . exception ( 'Generator %r#%s caused exception while serializing args for ' 'child pipeline %r. %s' , pipeline_func , pipeline_key . name ( ) , sub_stage , retry_message ) self . transition_retry ( pipeline_key , retry_message ) if pipeline_func . task_retry : raise else : return child_pipeline_key = db . Key . from_path ( _PipelineRecord . kind ( ) , uuid . uuid4 ( ) . hex ) all_output_slots . update ( output_slots ) all_children_keys . append ( child_pipeline_key ) child_pipeline = _PipelineRecord ( key = child_pipeline_key , root_pipeline = root_pipeline_key , # Bug in DB means we need to use the storage name here , # not the local property name . params = params_text , params_blob = params_blob , class_path = sub_stage . _class_path , max_attempts = sub_stage . max_attempts ) entities_to_put . append ( child_pipeline ) if not dependent_slots : # This child pipeline will run immediately . pipelines_to_run . add ( child_pipeline_key ) child_pipeline . start_time = self . _gettime ( ) else : entities_to_put . extend ( _PipelineContext . _create_barrier_entities ( root_pipeline_key , child_pipeline_key , _BarrierRecord . START , dependent_slots ) ) entities_to_put . extend ( _PipelineContext . _create_barrier_entities ( root_pipeline_key , child_pipeline_key , _BarrierRecord . FINALIZE , output_slots ) ) # This generator pipeline ' s finalization barrier must include all of the # outputs of any child pipelines that it runs . This ensures the finalized # calls will not happen until all child pipelines have completed . # The transition _ run ( ) call below will update the FINALIZE _ BarrierRecord # for this generator pipeline to include all of these child outputs in # its list of blocking _ slots . That update is done transactionally to # make sure the _ BarrierRecord only lists the slots that matter . # However , the notify _ barriers ( ) method doesn ' t find _ BarrierRecords # through the blocking _ slots field . It finds them through _ BarrierIndexes # entities . Thus , before we update the FINALIZE _ BarrierRecord in # transition _ run ( ) , we need to write _ BarrierIndexes for all child outputs . barrier_entities = _PipelineContext . _create_barrier_entities ( root_pipeline_key , pipeline_key , _BarrierRecord . FINALIZE , all_output_slots ) # Ignore the first element which is the _ BarrierRecord . That entity must # have already been created and put in the datastore for the parent # pipeline before this code generated child pipelines . barrier_indexes = barrier_entities [ 1 : ] entities_to_put . extend ( barrier_indexes ) db . put ( entities_to_put ) self . transition_run ( pipeline_key , blocking_slot_keys = all_output_slots , fanned_out_pipelines = all_children_keys , pipelines_to_run = pipelines_to_run )
def prepare_editable_requirement ( self , req , # type : InstallRequirement require_hashes , # type : bool use_user_site , # type : bool finder # type : PackageFinder ) : # type : ( . . . ) - > DistAbstraction """Prepare an editable requirement"""
assert req . editable , "cannot prepare a non-editable req as editable" logger . info ( 'Obtaining %s' , req ) with indent_log ( ) : if require_hashes : raise InstallationError ( 'The editable requirement %s cannot be installed when ' 'requiring hashes, because there is no single file to ' 'hash.' % req ) req . ensure_has_source_dir ( self . src_dir ) req . update_editable ( not self . _download_should_save ) abstract_dist = make_abstract_dist ( req ) with self . req_tracker . track ( req ) : abstract_dist . prep_for_dist ( finder , self . build_isolation ) if self . _download_should_save : req . archive ( self . download_dir ) req . check_if_exists ( use_user_site ) return abstract_dist
def SetGaugeCallback ( self , metric_name , callback , fields = None ) : """See base class ."""
self . _gauge_metrics [ metric_name ] . SetCallback ( callback , fields )
def check_column_id ( problems : List , table : str , df : DataFrame , column : str , * , column_required : bool = True , ) -> List : """A specialization of : func : ` check _ column ` . Parameters problems : list A four - tuple containing 1 . A problem type ( string ) equal to ` ` ' error ' ` ` or ` ` ' warning ' ` ` ; ` ` ' error ' ` ` means the GTFS is violated ; ` ` ' warning ' ` ` means there is a problem but it is not a GTFS violation 2 . A message ( string ) that describes the problem 3 . A GTFS table name , e . g . ` ` ' routes ' ` ` , in which the problem occurs 4 . A list of rows ( integers ) of the table ' s DataFrame where the problem occurs table : string Name of a GTFS table df : DataFrame The GTFS table corresponding to ` ` table ` ` column : string A column of ` ` df ` ` column _ required : boolean ` ` True ` ` if and only if ` ` column ` ` is required ( and not optional ) by the GTFS Returns list The ` ` problems ` ` list extended as follows . Record the indices of ` ` df ` ` where the given column has duplicated entry or an invalid strings . If the list of indices is nonempty , append to the problems the item ` ` [ type _ , problem , table , indices ] ` ` ; otherwise do not append anything . If not ` ` column _ required ` ` , then NaN entries will be ignored in the checking ."""
f = df . copy ( ) if not column_required : if column not in f . columns : f [ column ] = np . nan f = f . dropna ( subset = [ column ] ) cond = ~ f [ column ] . map ( valid_str ) problems = check_table ( problems , table , f , cond , f"Invalid {column}; maybe has extra space characters" , ) cond = f [ column ] . duplicated ( ) problems = check_table ( problems , table , f , cond , f"Repeated {column}" ) return problems
def get_rprof ( step , var ) : """Extract or compute and rescale requested radial profile . Args : step ( : class : ` ~ stagpy . stagyydata . _ Step ` ) : a step of a StagyyData instance . var ( str ) : radial profile name , a key of : data : ` stagpy . phyvars . RPROF ` or : data : ` stagpy . phyvars . RPROF _ EXTRA ` . Returns : tuple of : class : ` numpy . array ` and : class : ` stagpy . phyvars . Varr ` : rprof , rad , meta rprof is the requested profile , rad the radial position at which it is evaluated ( set to None if it is the position of profiles output by StagYY ) , and meta is a : class : ` stagpy . phyvars . Varr ` instance holding metadata of the requested variable ."""
if var in step . rprof . columns : rprof = step . rprof [ var ] rad = None if var in phyvars . RPROF : meta = phyvars . RPROF [ var ] else : meta = phyvars . Varr ( var , None , '1' ) elif var in phyvars . RPROF_EXTRA : meta = phyvars . RPROF_EXTRA [ var ] rprof , rad = meta . description ( step ) meta = phyvars . Varr ( misc . baredoc ( meta . description ) , meta . kind , meta . dim ) else : raise UnknownRprofVarError ( var ) rprof , _ = step . sdat . scale ( rprof , meta . dim ) if rad is not None : rad , _ = step . sdat . scale ( rad , 'm' ) return rprof , rad , meta
def interpolate2dStructuredFastIDW ( grid , mask , kernel = 15 , power = 2 , minnvals = 5 ) : '''FASTER IMPLEMENTATION OF interpolate2dStructuredIDW replace all values in [ grid ] indicated by [ mask ] with the inverse distance weighted interpolation of all values within px + - kernel [ power ] - > distance weighting factor : 1 / distance * * [ power ] [ minvals ] - > minimum number of neighbour values to find until interpolation stops'''
indices , dist = growPositions ( kernel ) weights = 1 / dist ** ( 0.5 * power ) return _calc ( grid , mask , indices , weights , minnvals - 1 )
def sample_u ( self , q ) : r"""Extract a sample from random variates uniform on : math : ` [ 0 , 1 ] ` . For a univariate distribution , this is simply evaluating the inverse CDF . To facilitate efficient sampling , this function returns a * vector * of PPF values , one value for each variable . Basically , the idea is that , given a vector : math : ` q ` of ` num _ params ` values each of which is distributed uniformly on : math : ` [ 0 , 1 ] ` , this function will return corresponding samples for each variable . Parameters q : array of float Values between 0 and 1 to evaluate inverse CDF at ."""
q = scipy . atleast_1d ( q ) if len ( q ) != len ( self . univariate_priors ) : raise ValueError ( "length of q must equal the number of parameters!" ) if q . ndim != 1 : raise ValueError ( "q must be one-dimensional!" ) if ( q < 0 ) . any ( ) or ( q > 1 ) . any ( ) : raise ValueError ( "q must be within [0, 1]!" ) return scipy . asarray ( [ p . ppf ( v ) for v , p in zip ( q , self . univariate_priors ) ] )
def alter_partition_with_environment_context ( self , db_name , tbl_name , new_part , environment_context ) : """Parameters : - db _ name - tbl _ name - new _ part - environment _ context"""
self . send_alter_partition_with_environment_context ( db_name , tbl_name , new_part , environment_context ) self . recv_alter_partition_with_environment_context ( )
def option_parser ( ) : """Option Parser to give various options ."""
usage = ''' $ ./crawler -d5 <url> Here in this case it goes till depth of 5 and url is target URL to start crawling. ''' version = "2.0.0" parser = optparse . OptionParser ( usage = usage , version = version ) parser . add_option ( "-l" , "--links" , action = "store_true" , default = False , dest = "links" , help = "links for target url" ) parser . add_option ( "-d" , "--depth" , action = "store" , type = "int" , default = 30 , dest = "depth" , help = "Maximum depth traverse" ) opts , args = parser . parse_args ( ) if len ( args ) < 1 : parser . print_help ( ) raise SystemExit ( 1 ) return opts , args
def convert_audio ( self , file_path , new_mimetype , overwrite = False ) : '''a method to convert an audio file into a different codec : param file _ path : string with path to file on localhost : param new _ mimetype : string with mimetype for new file : param overwrite : [ optional ] boolean to overwrite existing file : return : string with path to new file on localhost SEE : https : / / github . com / Ch00k / ffmpy'''
title = '%s.convert_audio' % self . __class__ . __name__ # validate inputs input_fields = { 'file_path' : file_path , 'new_mimetype' : new_mimetype } for key , value in input_fields . items ( ) : object_title = '%s(%s=%s)' % ( title , key , str ( value ) ) self . fields . validate ( value , '.%s' % key , object_title ) # retrieve file extension new_extension = '' mimetype_list = [ ] mime_arg = '%s(new_mimetype=%s)' % ( title , new_mimetype ) for key , value in self . fields . schema [ 'audio_extensions' ] . items ( ) : mimetype_list . append ( value [ 'mimetype' ] ) if value [ 'mimetype' ] == new_mimetype : new_extension = value [ 'extension' ] break if not new_extension : raise ValueError ( '%s must be one of %s mimetypes' % ( mime_arg , mimetype_list ) ) # import dependencies from os import path import ffmpy # validate existence of file file_arg = '%s(file_path=%s)' % ( title , file_path ) if not path . exists ( file_path ) : raise ValueError ( '%s is not a valid path' , file_arg ) # construct new file file_name , file_extension = path . splitext ( file_path ) output_path = file_name + new_extension # construct inputs ffmpeg_kwargs = { 'inputs' : { file_path : None } , 'outputs' : { output_path : None } , 'global_options' : [ '-v error' ] } if overwrite : ffmpeg_kwargs [ 'global_options' ] . append ( '-y' ) ffmpeg_client = ffmpy . FFmpeg ( ** ffmpeg_kwargs ) # run conversion ffmpeg_client . run ( ) return output_path
def target_gene_indices ( gene_names , target_genes ) : """: param gene _ names : list of gene names . : param target _ genes : either int ( the top n ) , ' all ' , or a collection ( subset of gene _ names ) . : return : the ( column ) indices of the target genes in the expression _ matrix ."""
if isinstance ( target_genes , list ) and len ( target_genes ) == 0 : return [ ] if isinstance ( target_genes , str ) and target_genes . upper ( ) == 'ALL' : return list ( range ( len ( gene_names ) ) ) elif isinstance ( target_genes , int ) : top_n = target_genes assert top_n > 0 return list ( range ( min ( top_n , len ( gene_names ) ) ) ) elif isinstance ( target_genes , list ) : if not target_genes : # target _ genes is empty return target_genes elif all ( isinstance ( target_gene , str ) for target_gene in target_genes ) : return [ index for index , gene in enumerate ( gene_names ) if gene in target_genes ] elif all ( isinstance ( target_gene , int ) for target_gene in target_genes ) : return target_genes else : raise ValueError ( "Mixed types in target genes." ) else : raise ValueError ( "Unable to interpret target_genes." )
async def _grab_connection ( self , url ) : '''The connection pool handler . Returns a connection to the caller . If there are no connections ready , and as many connections checked out as there are available total , we yield control to the event loop . If there is a connection ready or space to create a new one , we pop / create it , register it as checked out , and return it . Args : url ( str ) : breaks the url down and uses the top level location info to see if we have any connections to the location already lying around .'''
scheme , host , _ , _ , _ , _ = urlparse ( url ) host_loc = urlunparse ( ( scheme , host , '' , '' , '' , '' ) ) sock = self . _checkout_connection ( host_loc ) if sock is None : sock = await self . _make_connection ( host_loc ) return sock
def _sqla_postgresql ( self , uri , version = None , isolation_level = "READ COMMITTED" ) : '''expected uri form : postgresql + psycopg2 : / / % s : % s @ % s : % s / % s ' % ( username , password , host , port , db )'''
isolation_level = isolation_level or "READ COMMITTED" kwargs = dict ( isolation_level = isolation_level ) # FIXME : version of postgresql < 9.2 don ' t have pg . JSON ! # check and use JSONTypedLite instead # override default dict and list column types types = { list : pg . ARRAY , tuple : pg . ARRAY , set : pg . ARRAY , dict : JSONDict , datetime : UTCEpoch } self . type_map . update ( types ) bs = self . config [ 'batch_size' ] # 999 batch _ size is default for sqlite , postgres handles more at once self . config [ 'batch_size' ] = 5000 if bs == 999 else bs self . _lock_required = False # default schema name is ' public ' for postgres dsn = self . config [ 'db_schema' ] self . config [ 'db_schema' ] = dsn or 'public' return uri , kwargs
def _flush ( self ) : """Flushes all registered consumer streams ."""
for consumer in self . consumers : if not getattr ( consumer , "closed" , False ) : consumer . flush ( )
def file_list ( load ) : '''Return a list of all files on the file server in a specified environment'''
if 'env' in load : # " env " is not supported ; Use " saltenv " . load . pop ( 'env' ) if load [ 'saltenv' ] not in envs ( ) : return [ ] mountpoint = salt . utils . url . strip_proto ( __opts__ [ 'minionfs_mountpoint' ] ) prefix = load . get ( 'prefix' , '' ) . strip ( '/' ) if mountpoint and prefix . startswith ( mountpoint + os . path . sep ) : prefix = prefix [ len ( mountpoint + os . path . sep ) : ] minions_cache_dir = os . path . join ( __opts__ [ 'cachedir' ] , 'minions' ) minion_dirs = os . listdir ( minions_cache_dir ) # If the prefix is not an empty string , then get the minion id from it . The # minion ID will be the part before the first slash , so if there is no # slash , this is an invalid path . if prefix : tgt_minion , _ , prefix = prefix . partition ( '/' ) if not prefix : # No minion ID in path return [ ] # Reassign minion _ dirs so we don ' t unnecessarily walk every minion ' s # pushed files if tgt_minion not in minion_dirs : log . warning ( 'No files found in minionfs cache for minion ID \'%s\'' , tgt_minion ) return [ ] minion_dirs = [ tgt_minion ] ret = [ ] for minion in minion_dirs : if not _is_exposed ( minion ) : continue minion_files_dir = os . path . join ( minions_cache_dir , minion , 'files' ) if not os . path . isdir ( minion_files_dir ) : log . debug ( 'minionfs: could not find files directory under %s!' , os . path . join ( minions_cache_dir , minion ) ) continue walk_dir = os . path . join ( minion_files_dir , prefix ) # Do not follow links for security reasons for root , _ , files in salt . utils . path . os_walk ( walk_dir , followlinks = False ) : for fname in files : # Ignore links for security reasons if os . path . islink ( os . path . join ( root , fname ) ) : continue relpath = os . path . relpath ( os . path . join ( root , fname ) , minion_files_dir ) if relpath . startswith ( '../' ) : continue rel_fn = os . path . join ( mountpoint , minion , relpath ) if not salt . fileserver . is_file_ignored ( __opts__ , rel_fn ) : ret . append ( rel_fn ) return ret
def fancy_error_template_middleware ( app ) : """WGSI middleware for catching errors and rendering the error page ."""
def application ( environ , start_response ) : try : return app ( environ , start_response ) except Exception as exc : sio = StringIO ( ) traceback . print_exc ( file = sio ) sio . seek ( 0 ) response = Response ( status = 500 , body = render_error_page ( 500 , exc , traceback = sio . read ( ) ) , content_type = "text/html" ) return response ( environ , start_response ) return application
def main ( input_filename , format ) : """Calculate the fingerprint hashses of the referenced audio file and save to disk as a pickle file"""
# open the file & convert to wav song_data = AudioSegment . from_file ( input_filename , format = format ) song_data = song_data . set_channels ( 1 ) # convert to mono wav_tmp = song_data . export ( format = "wav" ) # write to a tmp file buffer wav_tmp . seek ( 0 ) rate , wav_data = wavfile . read ( wav_tmp ) rows_per_second = ( 1 + ( rate - WIDTH ) ) // FRAME_STRIDE # Calculate a coarser window for matching window_size = ( rows_per_second // TIME_STRIDE , ( WIDTH // 2 ) // FREQ_STRIDE ) peaks = resound . get_peaks ( np . array ( wav_data ) , window_size = window_size ) # half width ( nyquist freq ) & half size ( window is + / - around the middle ) f_width = WIDTH // ( 2 * FREQ_STRIDE ) * 2 t_gap = 1 * rows_per_second t_width = 2 * rows_per_second fingerprints = resound . hashes ( peaks , f_width = f_width , t_gap = t_gap , t_width = t_width ) # hash , offset pairs return fingerprints
def cl_picard ( self , command , options , memscale = None ) : """Prepare a Picard commandline ."""
options = [ "%s=%s" % ( x , y ) for x , y in options ] options . append ( "VALIDATION_STRINGENCY=SILENT" ) return self . _get_picard_cmd ( command , memscale = memscale ) + options
def __type2python ( cls , value ) : """: Description : Convert javascript value to python value by type . : param value : Value to transform . : type value : None , bool , int , float , string : return : None , bool , int , float , string"""
if isinstance ( value , string_types ) : if value is 'null' : return None elif value in ( 'true' , 'false' ) : return False if value == 'false' else True elif value . replace ( '.' , '' , 1 ) . isdigit ( ) : return eval ( value ) elif value . startswith ( '{' ) and value . endswidth ( '}' ) : try : return json . loads ( value ) except ValueError : return value return value
def name ( self ) : """Cluster name used in requests . . . note : : This property will not change if ` ` _ instance ` ` and ` ` cluster _ id ` ` do not , but the return value is not cached . For example : . . literalinclude : : snippets . py : start - after : [ START bigtable _ cluster _ name ] : end - before : [ END bigtable _ cluster _ name ] The cluster name is of the form ` ` " projects / { project } / instances / { instance } / clusters / { cluster _ id } " ` ` : rtype : str : returns : The cluster name ."""
return self . _instance . _client . instance_admin_client . cluster_path ( self . _instance . _client . project , self . _instance . instance_id , self . cluster_id )
def action_run_command ( self ) : """Run a shortcut , if exists"""
name = self . args [ '<name>' ] # get entry from DB self . db_query ( ''' SELECT path,command FROM shortcuts WHERE name=? ''' , ( name , ) ) row = self . db_fetch_one ( ) if row is None : print_err ( 'Shortcut "%s" does not exist.' % name ) return path = row [ 0 ] cmd = row [ 1 ] # show message msg = colored ( 'JumpRun shortcut' , 'white' , attrs = [ 'bold' ] ) + '\n' + self . shortcut_str ( path , cmd ) + '\n' print ( msg ) # cd to the folder & run the command os . chdir ( path ) try : subprocess . call ( cmd , shell = True ) except KeyboardInterrupt : print ( '' ) # newline return
def _move_files_to_compute ( compute , project_id , directory , files_path ) : """Move the files to a remote compute"""
location = os . path . join ( directory , files_path ) if os . path . exists ( location ) : for ( dirpath , dirnames , filenames ) in os . walk ( location ) : for filename in filenames : path = os . path . join ( dirpath , filename ) dst = os . path . relpath ( path , directory ) yield from _upload_file ( compute , project_id , path , dst ) shutil . rmtree ( os . path . join ( directory , files_path ) )
def get_remote_etag ( storage , prefixed_path ) : """Get etag of path from S3 using boto or boto3."""
normalized_path = safe_join ( storage . location , prefixed_path ) . replace ( '\\' , '/' ) try : return storage . bucket . get_key ( normalized_path ) . etag except AttributeError : pass try : return storage . bucket . Object ( normalized_path ) . e_tag except : pass return None
def dump_br_version ( project ) : """Dump an enhanced version json including - The version from package . json - The current branch ( if it can be found ) - The current sha"""
normalized = get_version ( project ) sha = subprocess . check_output ( [ 'git' , 'rev-parse' , 'HEAD' ] , cwd = HERE ) . strip ( ) branch = subprocess . check_output ( [ 'git' , 'rev-parse' , '--abbrev-ref' , 'HEAD' ] , cwd = HERE ) . strip ( ) pref = br_version_prefixes [ project ] return json . dumps ( { pref + '_version' : normalized , pref + '_sha' : sha , pref + '_branch' : branch } )
def run ( name , cmd , container_type = None , exec_driver = None , output = None , no_start = False , stdin = None , python_shell = True , output_loglevel = 'debug' , ignore_retcode = False , path = None , use_vt = False , keep_env = None ) : '''Common logic for running shell commands in containers path path to the container parent ( for LXC only ) default : / var / lib / lxc ( system default ) CLI Example : . . code - block : : bash salt myminion container _ resource . run mycontainer ' ps aux ' container _ type = docker exec _ driver = nsenter output = stdout'''
valid_output = ( 'stdout' , 'stderr' , 'retcode' , 'all' ) if output is None : cmd_func = 'cmd.run' elif output not in valid_output : raise SaltInvocationError ( '\'output\' param must be one of the following: {0}' . format ( ', ' . join ( valid_output ) ) ) else : cmd_func = 'cmd.run_all' if keep_env is None or isinstance ( keep_env , bool ) : to_keep = [ ] elif not isinstance ( keep_env , ( list , tuple ) ) : try : to_keep = keep_env . split ( ',' ) except AttributeError : log . warning ( 'Invalid keep_env value, ignoring' ) to_keep = [ ] else : to_keep = keep_env if exec_driver == 'lxc-attach' : full_cmd = 'lxc-attach ' if path : full_cmd += '-P {0} ' . format ( pipes . quote ( path ) ) if keep_env is not True : full_cmd += '--clear-env ' if 'PATH' not in to_keep : full_cmd += '--set-var {0} ' . format ( PATH ) # - - clear - env results in a very restrictive PATH # ( / bin : / usr / bin ) , use a good fallback . full_cmd += ' ' . join ( [ '--set-var {0}={1}' . format ( x , pipes . quote ( os . environ [ x ] ) ) for x in to_keep if x in os . environ ] ) full_cmd += ' -n {0} -- {1}' . format ( pipes . quote ( name ) , cmd ) elif exec_driver == 'nsenter' : pid = __salt__ [ '{0}.pid' . format ( container_type ) ] ( name ) full_cmd = ( 'nsenter --target {0} --mount --uts --ipc --net --pid -- ' . format ( pid ) ) if keep_env is not True : full_cmd += 'env -i ' if 'PATH' not in to_keep : full_cmd += '{0} ' . format ( PATH ) full_cmd += ' ' . join ( [ '{0}={1}' . format ( x , pipes . quote ( os . environ [ x ] ) ) for x in to_keep if x in os . environ ] ) full_cmd += ' {0}' . format ( cmd ) elif exec_driver == 'docker-exec' : # We ' re using docker exec on the CLI as opposed to via docker - py , since # the Docker API doesn ' t return stdout and stderr separately . full_cmd = 'docker exec ' if stdin : full_cmd += '-i ' full_cmd += '{0} ' . format ( name ) if keep_env is not True : full_cmd += 'env -i ' if 'PATH' not in to_keep : full_cmd += '{0} ' . format ( PATH ) full_cmd += ' ' . join ( [ '{0}={1}' . format ( x , pipes . quote ( os . environ [ x ] ) ) for x in to_keep if x in os . environ ] ) full_cmd += ' {0}' . format ( cmd ) if not use_vt : ret = __salt__ [ cmd_func ] ( full_cmd , stdin = stdin , python_shell = python_shell , output_loglevel = output_loglevel , ignore_retcode = ignore_retcode ) else : stdout , stderr = '' , '' proc = salt . utils . vt . Terminal ( full_cmd , shell = python_shell , log_stdin_level = 'quiet' if output_loglevel == 'quiet' else 'info' , log_stdout_level = output_loglevel , log_stderr_level = output_loglevel , log_stdout = True , log_stderr = True , stream_stdout = False , stream_stderr = False ) # Consume output try : while proc . has_unread_data : try : cstdout , cstderr = proc . recv ( ) if cstdout : stdout += cstdout if cstderr : if output is None : stdout += cstderr else : stderr += cstderr time . sleep ( 0.5 ) except KeyboardInterrupt : break ret = stdout if output is None else { 'retcode' : proc . exitstatus , 'pid' : 2 , 'stdout' : stdout , 'stderr' : stderr } except salt . utils . vt . TerminalException : trace = traceback . format_exc ( ) log . error ( trace ) ret = stdout if output is None else { 'retcode' : 127 , 'pid' : 2 , 'stdout' : stdout , 'stderr' : stderr } finally : proc . terminate ( ) return ret
def reset ( self ) : """Stops the timer and resets its values to 0."""
self . _elapsed = datetime . timedelta ( ) self . _delta = datetime . timedelta ( ) self . _starttime = datetime . datetime . now ( ) self . refresh ( )
def load ( self , txt_fst_filename ) : """Save the transducer in the text file format of OpenFST . The format is specified as follows : arc format : src dest ilabel olabel [ weight ] final state format : state [ weight ] lines may occur in any order except initial state must be first line Args : txt _ fst _ filename ( string ) : The name of the file Returns : None"""
with open ( txt_fst_filename , 'r' ) as txt_fst : for line in txt_fst : line = line . strip ( ) splitted_line = line . split ( ) if len ( splitted_line ) == 1 : self [ int ( splitted_line [ 0 ] ) ] . final = True else : self . add_arc ( int ( splitted_line [ 0 ] ) , int ( splitted_line [ 1 ] ) , splitted_line [ 2 ] . decode ( 'hex' ) )
def pauli_basis ( nq = 1 ) : """Returns a TomographyBasis for the Pauli basis on ` ` nq ` ` qubits . : param int nq : Number of qubits on which the returned basis is defined ."""
basis = tensor_product_basis ( * [ TomographyBasis ( gell_mann_basis ( 2 ) . data [ [ 0 , 2 , 3 , 1 ] ] , [ 2 ] , [ u'𝟙' , r'\sigma_x' , r'\sigma_y' , r'\sigma_z' ] ) ] * nq ) basis . _name = 'pauli_basis' return basis
def _soln2str ( self , soln , fancy = False ) : """Convert a Sudoku solution point to a string ."""
chars = list ( ) for r in range ( 1 , 10 ) : for c in range ( 1 , 10 ) : if fancy and c in ( 4 , 7 ) : chars . append ( "|" ) chars . append ( self . _get_val ( soln , r , c ) ) if fancy and r != 9 : chars . append ( "\n" ) if r in ( 3 , 6 ) : chars . append ( "---+---+---\n" ) return "" . join ( chars )
def mkdir ( self , astr_dirSpec ) : """Given an < astr _ dirSpec > in form ' / a / b / c / d / . . . / f ' , create that path in the internal stree , creating all intermediate nodes as necessary : param astr _ dirSpec : : return :"""
if astr_dirSpec != '/' and astr_dirSpec != "//" : str_currentPath = self . cwd ( ) l_pathSpec = astr_dirSpec . split ( '/' ) if not len ( l_pathSpec [ 0 ] ) : self . cd ( '/' ) l_nodesDepth = l_pathSpec [ 1 : ] else : l_nodesDepth = l_pathSpec for d in l_nodesDepth : self . mkcd ( d ) self . cd ( str_currentPath )
def get_workspace_config ( namespace , workspace , cnamespace , config ) : """Get method configuration in workspace . Args : namespace ( str ) : project to which workspace belongs workspace ( str ) : Workspace name cnamespace ( str ) : Config namespace config ( str ) : Config name Swagger : https : / / api . firecloud . org / # ! / Method _ Configurations / getWorkspaceMethodConfig"""
uri = "workspaces/{0}/{1}/method_configs/{2}/{3}" . format ( namespace , workspace , cnamespace , config ) return __get ( uri )
def remove_response_property ( xml_root ) : """Removes response properties if exist ."""
if xml_root . tag == "testsuites" : properties = xml_root . find ( "properties" ) resp_properties = [ ] for prop in properties : prop_name = prop . get ( "name" , "" ) if "polarion-response-" in prop_name : resp_properties . append ( prop ) for resp_property in resp_properties : properties . remove ( resp_property ) elif xml_root . tag in ( "testcases" , "requirements" ) : resp_properties = xml_root . find ( "response-properties" ) if resp_properties is not None : xml_root . remove ( resp_properties ) else : raise Dump2PolarionException ( _NOT_EXPECTED_FORMAT_MSG )
def _write_cache ( self , lines , append = False ) : """Write virtualenv metadata to cache ."""
mode = 'at' if append else 'wt' with open ( self . filepath , mode , encoding = 'utf8' ) as fh : fh . writelines ( line + '\n' for line in lines )
def get_position ( self , row , col ) : """Get char position in all the text from row and column ."""
result = col self . log . debug ( '%s %s' , row , col ) lines = self . editor . getlines ( ) [ : row - 1 ] result += sum ( [ len ( l ) + 1 for l in lines ] ) self . log . debug ( result ) return result
def max_substring ( words , position = 0 , _last_letter = '' ) : """Finds max substring shared by all strings starting at position Args : words ( list ) : list of unicode of all words to compare position ( int ) : starting position in each word to begin analyzing for substring _ last _ letter ( unicode ) : last common letter , only for use internally unless you really know what you are doing Returns : unicode : max str common to all words Examples : . . code - block : : Python > > > max _ substring ( [ ' aaaa ' , ' aaab ' , ' aaac ' ] ) ' aaa ' > > > max _ substring ( [ ' abbb ' , ' bbbb ' , ' cbbb ' ] , position = 1) ' bbb ' > > > max _ substring ( [ ' abc ' , ' bcd ' , ' cde ' ] )"""
# If end of word is reached , begin reconstructing the substring try : letter = [ word [ position ] for word in words ] except IndexError : return _last_letter # Recurse if position matches , else begin reconstructing the substring if all ( l == letter [ 0 ] for l in letter ) is True : _last_letter += max_substring ( words , position = position + 1 , _last_letter = letter [ 0 ] ) return _last_letter else : return _last_letter