idx int64 0 63k | question stringlengths 61 4.03k | target stringlengths 6 1.23k |
|---|---|---|
23,400 | def _make_token_async ( scopes , service_account_id ) : rpc = app_identity . create_rpc ( ) app_identity . make_get_access_token_call ( rpc , scopes , service_account_id ) token , expires_at = yield rpc raise ndb . Return ( ( token , expires_at ) ) | Get a fresh authentication token . |
23,401 | def _make_sync_method ( name ) : def sync_wrapper ( self , * args , ** kwds ) : method = getattr ( self , name ) future = method ( * args , ** kwds ) return future . get_result ( ) return sync_wrapper | Helper to synthesize a synchronous method from an async method name . |
23,402 | def add_sync_methods ( cls ) : for name in cls . __dict__ . keys ( ) : if name . endswith ( '_async' ) : sync_name = name [ : - 6 ] if not hasattr ( cls , sync_name ) : setattr ( cls , sync_name , _make_sync_method ( name ) ) return cls | Class decorator to add synchronous methods corresponding to async methods . |
23,403 | def do_request_async ( self , url , method = 'GET' , headers = None , payload = None , deadline = None , callback = None ) : retry_wrapper = api_utils . _RetryWrapper ( self . retry_params , retriable_exceptions = api_utils . _RETRIABLE_EXCEPTIONS , should_retry = api_utils . _should_retry ) resp = yield retry_wrapper . run ( self . urlfetch_async , url = url , method = method , headers = headers , payload = payload , deadline = deadline , callback = callback , follow_redirects = False ) raise ndb . Return ( ( resp . status_code , resp . headers , resp . content ) ) | Issue one HTTP request . |
23,404 | def get_metadata ( headers ) : return dict ( ( k , v ) for k , v in headers . iteritems ( ) if any ( k . lower ( ) . startswith ( valid ) for valid in _GCS_METADATA ) ) | Get user defined options from HTTP response headers . |
23,405 | def _process_path_prefix ( path_prefix ) : _validate_path ( path_prefix ) if not _GCS_PATH_PREFIX_REGEX . match ( path_prefix ) : raise ValueError ( 'Path prefix should have format /bucket, /bucket/, ' 'or /bucket/prefix but got %s.' % path_prefix ) bucket_name_end = path_prefix . find ( '/' , 1 ) bucket = path_prefix prefix = None if bucket_name_end != - 1 : bucket = path_prefix [ : bucket_name_end ] prefix = path_prefix [ bucket_name_end + 1 : ] or None return bucket , prefix | Validate and process a Google Cloud Stoarge path prefix . |
23,406 | def _validate_path ( path ) : if not path : raise ValueError ( 'Path is empty' ) if not isinstance ( path , basestring ) : raise TypeError ( 'Path should be a string but is %s (%s).' % ( path . __class__ , path ) ) | Basic validation of Google Storage paths . |
23,407 | def validate_options ( options ) : if not options : return for k , v in options . iteritems ( ) : if not isinstance ( k , str ) : raise TypeError ( 'option %r should be a str.' % k ) if not any ( k . lower ( ) . startswith ( valid ) for valid in _GCS_OPTIONS ) : raise ValueError ( 'option %s is not supported.' % k ) if not isinstance ( v , basestring ) : raise TypeError ( 'value %r for option %s should be of type basestring.' % ( v , k ) ) | Validate Google Cloud Storage options . |
23,408 | def dt_str_to_posix ( dt_str ) : parsable , _ = dt_str . split ( '.' ) dt = datetime . datetime . strptime ( parsable , _DT_FORMAT ) return calendar . timegm ( dt . utctimetuple ( ) ) | format str to posix . |
23,409 | def posix_to_dt_str ( posix ) : dt = datetime . datetime . utcfromtimestamp ( posix ) dt_str = dt . strftime ( _DT_FORMAT ) return dt_str + '.000Z' | Reverse of str_to_datetime . |
23,410 | def local_run ( ) : server_software = os . environ . get ( 'SERVER_SOFTWARE' ) if server_software is None : return True if 'remote_api' in server_software : return False if server_software . startswith ( ( 'Development' , 'testutil' ) ) : return True return False | Whether we should hit GCS dev appserver stub . |
23,411 | def memory_usage ( method ) : def wrapper ( * args , ** kwargs ) : logging . info ( 'Memory before method %s is %s.' , method . __name__ , runtime . memory_usage ( ) . current ( ) ) result = method ( * args , ** kwargs ) logging . info ( 'Memory after method %s is %s' , method . __name__ , runtime . memory_usage ( ) . current ( ) ) return result return wrapper | Log memory usage before and after a method . |
23,412 | def _get_storage_api ( retry_params , account_id = None ) : api = _StorageApi ( _StorageApi . full_control_scope , service_account_id = account_id , retry_params = retry_params ) service_account = app_identity . get_service_account_name ( ) if ( common . local_run ( ) and not common . get_access_token ( ) and ( not service_account or service_account . endswith ( '@localhost' ) ) ) : api . api_url = common . local_api_url ( ) if common . get_access_token ( ) : api . token = common . get_access_token ( ) return api | Returns storage_api instance for API methods . |
23,413 | def post_object_async ( self , path , ** kwds ) : return self . do_request_async ( self . api_url + path , 'POST' , ** kwds ) | POST to an object . |
23,414 | def put_object_async ( self , path , ** kwds ) : return self . do_request_async ( self . api_url + path , 'PUT' , ** kwds ) | PUT an object . |
23,415 | def get_object_async ( self , path , ** kwds ) : return self . do_request_async ( self . api_url + path , 'GET' , ** kwds ) | GET an object . |
23,416 | def delete_object_async ( self , path , ** kwds ) : return self . do_request_async ( self . api_url + path , 'DELETE' , ** kwds ) | DELETE an object . |
23,417 | def head_object_async ( self , path , ** kwds ) : return self . do_request_async ( self . api_url + path , 'HEAD' , ** kwds ) | HEAD an object . |
23,418 | def get_bucket_async ( self , path , ** kwds ) : return self . do_request_async ( self . api_url + path , 'GET' , ** kwds ) | GET a bucket . |
23,419 | def compose_object ( self , file_list , destination_file , content_type ) : xml_setting_list = [ '<ComposeRequest>' ] for meta_data in file_list : xml_setting_list . append ( '<Component>' ) for key , val in meta_data . iteritems ( ) : xml_setting_list . append ( '<%s>%s</%s>' % ( key , val , key ) ) xml_setting_list . append ( '</Component>' ) xml_setting_list . append ( '</ComposeRequest>' ) xml = '' . join ( xml_setting_list ) if content_type is not None : headers = { 'Content-Type' : content_type } else : headers = None status , resp_headers , content = self . put_object ( api_utils . _quote_filename ( destination_file ) + '?compose' , payload = xml , headers = headers ) errors . check_status ( status , [ 200 ] , destination_file , resp_headers , body = content ) | COMPOSE multiple objects together . |
23,420 | def readline ( self , size = - 1 ) : self . _check_open ( ) if size == 0 or not self . _remaining ( ) : return '' data_list = [ ] newline_offset = self . _buffer . find_newline ( size ) while newline_offset < 0 : data = self . _buffer . read ( size ) size -= len ( data ) self . _offset += len ( data ) data_list . append ( data ) if size == 0 or not self . _remaining ( ) : return '' . join ( data_list ) self . _buffer . reset ( self . _buffer_future . get_result ( ) ) self . _request_next_buffer ( ) newline_offset = self . _buffer . find_newline ( size ) data = self . _buffer . read_to_offset ( newline_offset + 1 ) self . _offset += len ( data ) data_list . append ( data ) return '' . join ( data_list ) | Read one line delimited by \ n from the file . |
23,421 | def read ( self , size = - 1 ) : self . _check_open ( ) if not self . _remaining ( ) : return '' data_list = [ ] while True : remaining = self . _buffer . remaining ( ) if size >= 0 and size < remaining : data_list . append ( self . _buffer . read ( size ) ) self . _offset += size break else : size -= remaining self . _offset += remaining data_list . append ( self . _buffer . read ( ) ) if self . _buffer_future is None : if size < 0 or size >= self . _remaining ( ) : needs = self . _remaining ( ) else : needs = size data_list . extend ( self . _get_segments ( self . _offset , needs ) ) self . _offset += needs break if self . _buffer_future : self . _buffer . reset ( self . _buffer_future . get_result ( ) ) self . _buffer_future = None if self . _buffer_future is None : self . _request_next_buffer ( ) return '' . join ( data_list ) | Read data from RAW file . |
23,422 | def _request_next_buffer ( self ) : self . _buffer_future = None next_offset = self . _offset + self . _buffer . remaining ( ) if next_offset != self . _file_size : self . _buffer_future = self . _get_segment ( next_offset , self . _buffer_size ) | Request next buffer . |
23,423 | def _get_segments ( self , start , request_size ) : if not request_size : return [ ] end = start + request_size futures = [ ] while request_size > self . _max_request_size : futures . append ( self . _get_segment ( start , self . _max_request_size ) ) request_size -= self . _max_request_size start += self . _max_request_size if start < end : futures . append ( self . _get_segment ( start , end - start ) ) return [ fut . get_result ( ) for fut in futures ] | Get segments of the file from Google Storage as a list . |
23,424 | def _get_segment ( self , start , request_size , check_response = True ) : end = start + request_size - 1 content_range = '%d-%d' % ( start , end ) headers = { 'Range' : 'bytes=' + content_range } status , resp_headers , content = yield self . _api . get_object_async ( self . _path , headers = headers ) def _checker ( ) : errors . check_status ( status , [ 200 , 206 ] , self . _path , headers , resp_headers , body = content ) self . _check_etag ( resp_headers . get ( 'etag' ) ) if check_response : _checker ( ) raise ndb . Return ( content ) raise ndb . Return ( content , _checker ) | Get a segment of the file from Google Storage . |
23,425 | def _check_etag ( self , etag ) : if etag is None : return elif self . _etag is None : self . _etag = etag elif self . _etag != etag : raise ValueError ( 'File on GCS has changed while reading.' ) | Check if etag is the same across requests to GCS . |
23,426 | def seek ( self , offset , whence = os . SEEK_SET ) : self . _check_open ( ) self . _buffer . reset ( ) self . _buffer_future = None if whence == os . SEEK_SET : self . _offset = offset elif whence == os . SEEK_CUR : self . _offset += offset elif whence == os . SEEK_END : self . _offset = self . _file_size + offset else : raise ValueError ( 'Whence mode %s is invalid.' % str ( whence ) ) self . _offset = min ( self . _offset , self . _file_size ) self . _offset = max ( self . _offset , 0 ) if self . _remaining ( ) : self . _request_next_buffer ( ) | Set the file s current offset . |
23,427 | def find_newline ( self , size = - 1 ) : if size < 0 : return self . _buffer . find ( '\n' , self . _offset ) return self . _buffer . find ( '\n' , self . _offset , self . _offset + size ) | Search for newline char in buffer starting from current offset . |
23,428 | def write ( self , data ) : self . _check_open ( ) if not isinstance ( data , str ) : raise TypeError ( 'Expected str but got %s.' % type ( data ) ) if not data : return self . _buffer . append ( data ) self . _buffered += len ( data ) self . _offset += len ( data ) if self . _buffered >= self . _flushsize : self . _flush ( ) | Write some bytes . |
23,429 | def close ( self ) : if not self . closed : self . closed = True self . _flush ( finish = True ) self . _buffer = None | Flush the buffer and finalize the file . |
23,430 | def _flush ( self , finish = False ) : while ( ( finish and self . _buffered >= 0 ) or ( not finish and self . _buffered >= self . _blocksize ) ) : tmp_buffer = [ ] tmp_buffer_len = 0 excess = 0 while self . _buffer : buf = self . _buffer . popleft ( ) size = len ( buf ) self . _buffered -= size tmp_buffer . append ( buf ) tmp_buffer_len += size if tmp_buffer_len >= self . _maxrequestsize : excess = tmp_buffer_len - self . _maxrequestsize break if not finish and ( tmp_buffer_len % self . _blocksize + self . _buffered < self . _blocksize ) : excess = tmp_buffer_len % self . _blocksize break if excess : over = tmp_buffer . pop ( ) size = len ( over ) assert size >= excess tmp_buffer_len -= size head , tail = over [ : - excess ] , over [ - excess : ] self . _buffer . appendleft ( tail ) self . _buffered += len ( tail ) if head : tmp_buffer . append ( head ) tmp_buffer_len += len ( head ) data = '' . join ( tmp_buffer ) file_len = '*' if finish and not self . _buffered : file_len = self . _written + len ( data ) self . _send_data ( data , self . _written , file_len ) self . _written += len ( data ) if file_len != '*' : break | Internal API to flush . |
23,431 | def _send_data ( self , data , start_offset , file_len ) : headers = { } end_offset = start_offset + len ( data ) - 1 if data : headers [ 'content-range' ] = ( 'bytes %d-%d/%s' % ( start_offset , end_offset , file_len ) ) else : headers [ 'content-range' ] = ( 'bytes */%s' % file_len ) status , response_headers , content = self . _api . put_object ( self . _path_with_token , payload = data , headers = headers ) if file_len == '*' : expected = 308 else : expected = 200 errors . check_status ( status , [ expected ] , self . _path , headers , response_headers , content , { 'upload_path' : self . _path_with_token } ) | Send the block to the storage service . |
23,432 | def _get_offset_from_gcs ( self ) : headers = { 'content-range' : 'bytes */*' } status , response_headers , content = self . _api . put_object ( self . _path_with_token , headers = headers ) errors . check_status ( status , [ 308 ] , self . _path , headers , response_headers , content , { 'upload_path' : self . _path_with_token } ) val = response_headers . get ( 'range' ) if val is None : return - 1 _ , offset = val . rsplit ( '-' , 1 ) return int ( offset ) | Get the last offset that has been written to GCS . |
23,433 | def _force_close ( self , file_length = None ) : if file_length is None : file_length = self . _get_offset_from_gcs ( ) + 1 self . _send_data ( '' , 0 , file_length ) | Close this buffer on file_length . |
23,434 | def receive_data ( self , data ) : if data : if self . _receive_buffer_closed : raise RuntimeError ( "received close, then received more data?" ) self . _receive_buffer += data else : self . _receive_buffer_closed = True | Add data to our internal recieve buffer . |
23,435 | def next_event ( self ) : if self . their_state is ERROR : raise RemoteProtocolError ( "Can't receive data when peer state is ERROR" ) try : event = self . _extract_next_receive_event ( ) if event not in [ NEED_DATA , PAUSED ] : self . _process_event ( self . their_role , event ) self . _receive_buffer . compress ( ) if event is NEED_DATA : if len ( self . _receive_buffer ) > self . _max_incomplete_event_size : raise RemoteProtocolError ( "Receive buffer too long" , error_status_hint = 431 ) if self . _receive_buffer_closed : raise RemoteProtocolError ( "peer unexpectedly closed connection" ) return event except BaseException as exc : self . _process_error ( self . their_role ) if isinstance ( exc , LocalProtocolError ) : exc . _reraise_as_remote_protocol_error ( ) else : raise | Parse the next event out of our receive buffer update our internal state and return it . |
23,436 | def send ( self , event ) : data_list = self . send_with_data_passthrough ( event ) if data_list is None : return None else : return b"" . join ( data_list ) | Convert a high - level event into bytes that can be sent to the peer while updating our internal state machine . |
23,437 | def adam7_generate ( width , height ) : for xstart , ystart , xstep , ystep in adam7 : if xstart >= width : continue yield ( ( xstart , y , xstep ) for y in range ( ystart , height , ystep ) ) | Generate the coordinates for the reduced scanlines of an Adam7 interlaced image of size width by height pixels . |
23,438 | def write_chunk ( outfile , tag , data = b'' ) : data = bytes ( data ) outfile . write ( struct . pack ( "!I" , len ( data ) ) ) outfile . write ( tag ) outfile . write ( data ) checksum = zlib . crc32 ( tag ) checksum = zlib . crc32 ( data , checksum ) checksum &= 2 ** 32 - 1 outfile . write ( struct . pack ( "!I" , checksum ) ) | Write a PNG chunk to the output file including length and checksum . |
23,439 | def pack_rows ( rows , bitdepth ) : assert bitdepth < 8 assert 8 % bitdepth == 0 spb = int ( 8 / bitdepth ) def make_byte ( block ) : res = 0 for v in block : res = ( res << bitdepth ) + v return res for row in rows : a = bytearray ( row ) n = float ( len ( a ) ) extra = math . ceil ( n / spb ) * spb - n a . extend ( [ 0 ] * int ( extra ) ) blocks = group ( a , spb ) yield bytearray ( make_byte ( block ) for block in blocks ) | Yield packed rows that are a byte array . Each byte is packed with the values from several pixels . |
23,440 | def unpack_rows ( rows ) : for row in rows : fmt = '!%dH' % len ( row ) yield bytearray ( struct . pack ( fmt , * row ) ) | Unpack each row from being 16 - bits per value to being a sequence of bytes . |
23,441 | def is_natural ( x ) : try : is_integer = int ( x ) == x except ( TypeError , ValueError ) : return False return is_integer and x >= 0 | A non - negative integer . |
23,442 | def binary_stdout ( ) : try : stdout = sys . stdout . buffer except AttributeError : stdout = sys . stdout if sys . platform == "win32" : import msvcrt import os msvcrt . setmode ( sys . stdout . fileno ( ) , os . O_BINARY ) return stdout | A sys . stdout that accepts bytes . |
23,443 | def write_packed ( self , outfile , rows ) : self . write_preamble ( outfile ) if self . compression is not None : compressor = zlib . compressobj ( self . compression ) else : compressor = zlib . compressobj ( ) data = bytearray ( ) for i , row in enumerate ( rows ) : data . append ( 0 ) data . extend ( row ) if len ( data ) > self . chunk_limit : compressed = compressor . compress ( bytes ( data ) ) if len ( compressed ) : write_chunk ( outfile , b'IDAT' , compressed ) data = bytearray ( ) compressed = compressor . compress ( bytes ( data ) ) flushed = compressor . flush ( ) if len ( compressed ) or len ( flushed ) : write_chunk ( outfile , b'IDAT' , compressed + flushed ) write_chunk ( outfile , b'IEND' ) return i + 1 | Write PNG file to outfile . rows should be an iterator that yields each packed row ; a packed row being a sequence of packed bytes . |
23,444 | def array_scanlines_interlace ( self , pixels ) : fmt = 'BH' [ self . bitdepth > 8 ] vpr = self . width * self . planes for lines in adam7_generate ( self . width , self . height ) : for x , y , xstep in lines : ppr = int ( math . ceil ( ( self . width - x ) / float ( xstep ) ) ) reduced_row_len = ppr * self . planes if xstep == 1 : offset = y * vpr yield pixels [ offset : offset + vpr ] continue row = array ( fmt ) row . extend ( pixels [ 0 : reduced_row_len ] ) offset = y * vpr + x * self . planes end_offset = ( y + 1 ) * vpr skip = self . planes * xstep for i in range ( self . planes ) : row [ i : : self . planes ] = pixels [ offset + i : end_offset : skip ] yield row | Generator for interlaced scanlines from an array . pixels is the full source image as a single array of values . The generator yields each scanline of the reduced passes in turn each scanline being a sequence of values . |
23,445 | def write ( self , file ) : w = Writer ( ** self . info ) w . write ( file , self . rows ) | Write the image to the open file object . |
23,446 | def _deinterlace ( self , raw ) : vpr = self . width * self . planes vpi = vpr * self . height if self . bitdepth > 8 : a = array ( 'H' , [ 0 ] * vpi ) else : a = bytearray ( [ 0 ] * vpi ) source_offset = 0 for lines in adam7_generate ( self . width , self . height ) : recon = None for x , y , xstep in lines : ppr = int ( math . ceil ( ( self . width - x ) / float ( xstep ) ) ) row_size = int ( math . ceil ( self . psize * ppr ) ) filter_type = raw [ source_offset ] source_offset += 1 scanline = raw [ source_offset : source_offset + row_size ] source_offset += row_size recon = self . undo_filter ( filter_type , scanline , recon ) flat = self . _bytes_to_values ( recon , width = ppr ) if xstep == 1 : assert x == 0 offset = y * vpr a [ offset : offset + vpr ] = flat else : offset = y * vpr + x * self . planes end_offset = ( y + 1 ) * vpr skip = self . planes * xstep for i in range ( self . planes ) : a [ offset + i : end_offset : skip ] = flat [ i : : self . planes ] return a | Read raw pixel data undo filters deinterlace and flatten . Return a single array of values . |
23,447 | def _bytes_to_values ( self , bs , width = None ) : if self . bitdepth == 8 : return bytearray ( bs ) if self . bitdepth == 16 : return array ( 'H' , struct . unpack ( '!%dH' % ( len ( bs ) // 2 ) , bs ) ) assert self . bitdepth < 8 if width is None : width = self . width spb = 8 // self . bitdepth out = bytearray ( ) mask = 2 ** self . bitdepth - 1 shifts = [ self . bitdepth * i for i in reversed ( list ( range ( spb ) ) ) ] for o in bs : out . extend ( [ mask & ( o >> i ) for i in shifts ] ) return out [ : width ] | Convert a packed row of bytes into a row of values . Result will be a freshly allocated object not shared with the argument . |
23,448 | def _iter_straight_packed ( self , byte_blocks ) : rb = self . row_bytes a = bytearray ( ) recon = None for some_bytes in byte_blocks : a . extend ( some_bytes ) while len ( a ) >= rb + 1 : filter_type = a [ 0 ] scanline = a [ 1 : rb + 1 ] del a [ : rb + 1 ] recon = self . undo_filter ( filter_type , scanline , recon ) yield recon if len ( a ) != 0 : raise FormatError ( 'Wrong size for decompressed IDAT chunk.' ) assert len ( a ) == 0 | Iterator that undoes the effect of filtering ; yields each row as a sequence of packed bytes . Assumes input is straightlaced . byte_blocks should be an iterable that yields the raw bytes in blocks of arbitrary size . |
23,449 | def preamble ( self , lenient = False ) : self . validate_signature ( ) while True : if not self . atchunk : self . atchunk = self . _chunk_len_type ( ) if self . atchunk is None : raise FormatError ( 'This PNG file has no IDAT chunks.' ) if self . atchunk [ 1 ] == b'IDAT' : return self . process_chunk ( lenient = lenient ) | Extract the image metadata by reading the initial part of the PNG file up to the start of the IDAT chunk . All the chunks that precede the IDAT chunk are read and either processed for metadata or discarded . |
23,450 | def _dehex ( s ) : import re import binascii s = re . sub ( br'[^a-fA-F\d]' , b'' , s ) return binascii . unhexlify ( s ) | Liberally convert from hex string to binary string . |
23,451 | def s15f16l ( s ) : n = len ( s ) // 4 t = struct . unpack ( '>%dl' % n , s ) return map ( ( 2 ** - 16 ) . __mul__ , t ) | Convert sequence of ICC s15Fixed16 to list of float . |
23,452 | def RDcurv ( s ) : assert s [ 0 : 4 ] == 'curv' count , = struct . unpack ( '>L' , s [ 8 : 12 ] ) if count == 0 : return dict ( gamma = 1 ) table = struct . unpack ( '>%dH' % count , s [ 12 : ] ) if count == 1 : return dict ( gamma = table [ 0 ] * 2 ** - 8 ) return table | Convert ICC curveType . |
23,453 | def RDvcgt ( s ) : assert s [ 0 : 4 ] == 'vcgt' tagtype , = struct . unpack ( '>L' , s [ 8 : 12 ] ) if tagtype != 0 : return s [ 8 : ] if tagtype == 0 : channels , count , size = struct . unpack ( '>3H' , s [ 12 : 18 ] ) if size == 1 : fmt = 'B' elif size == 2 : fmt = 'H' else : return s [ 8 : ] n = len ( s [ 18 : ] ) // size t = struct . unpack ( '>%d%s' % ( n , fmt ) , s [ 18 : ] ) t = group ( t , count ) return size , t return s [ 8 : ] | Convert Apple CMVideoCardGammaType . |
23,454 | def greyInput ( self ) : self . d . update ( dict ( profileclass = 'scnr' , colourspace = 'GRAY' , pcs = 'XYZ ' ) ) return self | Adjust self . d dictionary for greyscale input device . profileclass is scnr colourspace is GRAY pcs is XYZ . |
23,455 | def write ( self , out ) : if not self . rawtagtable : self . rawtagtable = self . rawtagdict . items ( ) tags = tagblock ( self . rawtagtable ) self . writeHeader ( out , 128 + len ( tags ) ) out . write ( tags ) out . flush ( ) return self | Write ICC Profile to the file . |
23,456 | def writeHeader ( self , out , size = 999 ) : def defaultkey ( d , key , value ) : if key in d : return d [ key ] = value z = '\x00' * 4 defaults = dict ( preferredCMM = z , version = '02000000' , profileclass = z , colourspace = z , pcs = 'XYZ ' , created = writeICCdatetime ( ) , acsp = 'acsp' , platform = z , flag = 0 , manufacturer = z , model = 0 , deviceattributes = 0 , intent = 0 , pcsilluminant = encodefuns ( ) [ 'XYZ' ] ( * D50 ( ) ) , creator = z , ) for k , v in defaults . items ( ) : defaultkey ( self . d , k , v ) hl = map ( self . d . __getitem__ , [ 'preferredCMM' , 'version' , 'profileclass' , 'colourspace' , 'pcs' , 'created' , 'acsp' , 'platform' , 'flag' , 'manufacturer' , 'model' , 'deviceattributes' , 'intent' , 'pcsilluminant' , 'creator' ] ) hl [ 1 ] = int ( hl [ 1 ] , 16 ) out . write ( struct . pack ( '>L4sL4s4s4s12s4s4sL4sLQL12s4s' , size , * hl ) ) out . write ( '\x00' * 44 ) return self | Add default values to the instance s d dictionary then write a header out onto the file stream . The size of the profile must be specified using the size argument . |
23,457 | def convert ( f , output = sys . stdout ) : r = f . read ( 11 ) if r == 'compressed\n' : png ( output , * decompress ( f ) ) else : png ( output , * glue ( f , r ) ) | Convert Plan 9 file to PNG format . Works with either uncompressed or compressed files . |
23,458 | def bitdepthof ( pixel ) : maxd = 0 for c in re . findall ( r'[a-z]\d*' , pixel ) : if c [ 0 ] != 'x' : maxd = max ( maxd , int ( c [ 1 : ] ) ) return maxd | Return the bitdepth for a Plan9 pixel format string . |
23,459 | def decompress ( f ) : r = meta ( f . read ( 60 ) ) return r , decomprest ( f , r [ 4 ] ) | Decompress a Plan 9 image file . Assumes f is already cued past the initial compressed \ n string . |
23,460 | def decomprest ( f , rows ) : row = 0 while row < rows : row , o = deblock ( f ) yield o | Iterator that decompresses the rest of a file once the metadata have been consumed . |
23,461 | def prepareList ( self , listFile = False , noSample = False ) : logging . debug ( "Loading resolver file" ) listFileLocation = self . listLocal if not listFile else listFile listLocal = os . path . expanduser ( listFileLocation ) assert os . path . isdir ( os . path . dirname ( listLocal ) ) , "{0} is not a directory!" . format ( os . path . dirname ( listLocal ) ) assert os . access ( os . path . dirname ( listLocal ) , os . W_OK ) , "{0} is not writable!" . format ( os . path . dirname ( listLocal ) ) with open ( listLocal ) as ll : raw = ll . read ( ) serverList = yaml . safe_load ( raw ) if self . country is not None : logging . debug ( "Filtering serverList for country {0}" . format ( self . country ) ) serverList = [ d for d in serverList if d [ 'country' ] == self . country ] if len ( serverList ) == 0 : raise ValueError ( "There are no servers avaliable " "with the country code {0}" . format ( self . country ) ) if self . maxServers == 'ALL' or noSample : self . maxServers = len ( serverList ) elif self . maxServers > len ( serverList ) : logging . warning ( "You asked me to query {0} servers, but I only have " "{1} servers in my serverlist" . format ( self . maxServers , len ( serverList ) ) ) self . maxServers = len ( serverList ) self . serverList = random . sample ( serverList , self . maxServers ) return self . serverList | Load and filter the server list for only the servers we care about |
23,462 | def query ( self , domain , recordType , progress = True ) : assert type ( domain ) == str , "Domain must be a string" recordType = recordType . upper ( ) assert recordType in self . lookupRecordTypes , "Record type is not in valid list of record types {0}" . format ( ', ' . join ( self . lookupRecordTypes ) ) self . domain = domain self . recordType = recordType self . resultsColated = [ ] self . results = [ ] if len ( self . serverList ) == 0 : logging . warning ( "Server list is empty. Attempting " "to populate with prepareList" ) self . prepareList ( ) logging . debug ( "Starting query against {0} servers" . format ( len ( self . serverList ) ) ) workers = [ ] startTime = datetime . utcnow ( ) serverCounter = 0 while len ( self . results ) < len ( self . serverList ) : runningWorkers = len ( [ w for w in workers if w . result is None ] ) for i , w in enumerate ( workers ) : if w . result : self . results . append ( w . result ) workers . pop ( i ) if progress : sys . stdout . write ( "\r\x1b[KStatus: Queried {0} of {1} servers, duration: {2}" . format ( len ( self . results ) , len ( self . serverList ) , ( datetime . utcnow ( ) - startTime ) ) ) sys . stdout . flush ( ) if runningWorkers < self . maxWorkers : logging . debug ( "Starting {0} workers" . format ( self . maxWorkers - runningWorkers ) ) for i in range ( 0 , self . maxWorkers - runningWorkers ) : if serverCounter < len ( self . serverList ) : wt = QueryWorker ( ) wt . server = self . serverList [ serverCounter ] wt . domain = domain wt . recType = recordType wt . daemon = True workers . append ( wt ) wt . start ( ) serverCounter += 1 time . sleep ( 0.1 ) for r in self . results : if r [ 'results' ] in [ rs [ 'results' ] for rs in self . resultsColated ] : cid = [ i for i , rs in enumerate ( self . resultsColated ) if r [ 'results' ] == rs [ 'results' ] ] [ 0 ] self . resultsColated [ cid ] [ 'servers' ] . append ( r [ 'server' ] ) else : self . resultsColated . append ( { 'servers' : [ r [ 'server' ] ] , 'results' : r [ 'results' ] , 'success' : r [ 'success' ] } ) if progress : sys . stdout . write ( "\n\n" ) logging . debug ( "There are {0} unique results" . format ( len ( self . resultsColated ) ) ) | Run the query |
23,463 | def outputSimple ( self ) : out = [ ] errors = [ ] successfulResponses = len ( [ True for rsp in self . results if rsp [ 'success' ] ] ) out . append ( "INFO QUERIED {0}" . format ( len ( self . serverList ) ) ) out . append ( "INFO SUCCESS {0}" . format ( successfulResponses ) ) out . append ( "INFO ERROR {0}" . format ( len ( self . serverList ) - successfulResponses ) ) for rsp in self . resultsColated : if rsp [ 'success' ] : out . append ( "RESULT {0} {1}" . format ( len ( rsp [ 'servers' ] ) , "|" . join ( rsp [ 'results' ] ) ) ) else : errors . append ( "ERROR {0} {1}" . format ( len ( rsp [ 'servers' ] ) , "|" . join ( rsp [ 'results' ] ) ) ) out += errors sys . stdout . write ( "\n" . join ( out ) ) sys . stdout . write ( "\n" ) | Simple output mode |
23,464 | def run ( self ) : logging . debug ( "Querying server {0}" . format ( self . server [ 'ip' ] ) ) try : rsvr = dns . resolver . Resolver ( ) rsvr . nameservers = [ self . server [ 'ip' ] ] rsvr . lifetime = 5 rsvr . timeout = 5 qry = rsvr . query ( self . domain , self . recType ) results = sorted ( [ r . to_text ( ) for r in qry ] ) success = True except dns . resolver . NXDOMAIN : success = False results = [ 'NXDOMAIN' ] except dns . resolver . NoNameservers : success = False results = [ 'No Nameservers' ] except dns . resolver . NoAnswer : success = False results = [ 'No Answer' ] except dns . resolver . Timeout : success = False results = [ 'Server Timeout' ] self . result = { 'server' : self . server , 'results' : results , 'success' : success } | Do a single DNS query against a server |
23,465 | def aws_to_unix_id ( aws_key_id ) : uid_bytes = hashlib . sha256 ( aws_key_id . encode ( ) ) . digest ( ) [ - 2 : ] if USING_PYTHON2 : return 2000 + int ( from_bytes ( uid_bytes ) // 2 ) else : return 2000 + ( int . from_bytes ( uid_bytes , byteorder = sys . byteorder ) // 2 ) | Converts a AWS Key ID into a UID |
23,466 | def _gorg ( cls ) : assert isinstance ( cls , GenericMeta ) if hasattr ( cls , '_gorg' ) : return cls . _gorg while cls . __origin__ is not None : cls = cls . __origin__ return cls | This function exists for compatibility with old typing versions . |
23,467 | def _eval_args ( args ) : res = [ ] for arg in args : if not isinstance ( arg , tuple ) : res . append ( arg ) elif is_callable_type ( arg [ 0 ] ) : callable_args = _eval_args ( arg [ 1 : ] ) if len ( arg ) == 2 : res . append ( Callable [ [ ] , callable_args [ 0 ] ] ) elif arg [ 1 ] is Ellipsis : res . append ( Callable [ ... , callable_args [ 1 ] ] ) else : res . append ( Callable [ list ( callable_args [ : - 1 ] ) , callable_args [ - 1 ] ] ) else : res . append ( type ( arg [ 0 ] ) . __getitem__ ( arg [ 0 ] , _eval_args ( arg [ 1 : ] ) ) ) return tuple ( res ) | Internal helper for get_args . |
23,468 | def on ( self , eventName , cb ) : self . eventHandlers [ eventName ] = cb return self . _send ( { "id" : self . igv_id , "command" : "on" , "eventName" : eventName } ) | Subscribe to an igv . js event . |
23,469 | def log_request ( request : str , trim_log_values : bool = False , ** kwargs : Any ) -> None : return log_ ( request , request_logger , logging . INFO , trim = trim_log_values , ** kwargs ) | Log a request |
23,470 | def log_response ( response : str , trim_log_values : bool = False , ** kwargs : Any ) -> None : return log_ ( response , response_logger , logging . INFO , trim = trim_log_values , ** kwargs ) | Log a response |
23,471 | def validate ( request : Union [ Dict , List ] , schema : dict ) -> Union [ Dict , List ] : jsonschema_validate ( request , schema ) return request | Wraps jsonschema . validate returning the same object passed in . |
23,472 | def call ( method : Method , * args : Any , ** kwargs : Any ) -> Any : return validate_args ( method , * args , ** kwargs ) ( * args , ** kwargs ) | Validates arguments and then calls the method . |
23,473 | def safe_call ( request : Request , methods : Methods , * , debug : bool ) -> Response : with handle_exceptions ( request , debug ) as handler : result = call ( methods . items [ request . method ] , * request . args , ** request . kwargs ) handler . response = SuccessResponse ( result = result , id = request . id ) return handler . response | Call a Request catching exceptions to ensure we always return a Response . |
23,474 | def call_requests ( requests : Union [ Request , Iterable [ Request ] ] , methods : Methods , debug : bool ) -> Response : if isinstance ( requests , collections . Iterable ) : return BatchResponse ( safe_call ( r , methods , debug = debug ) for r in requests ) return safe_call ( requests , methods , debug = debug ) | Takes a request or list of Requests and calls them . |
23,475 | def dispatch_pure ( request : str , methods : Methods , * , context : Any , convert_camel_case : bool , debug : bool , ) -> Response : try : deserialized = validate ( deserialize ( request ) , schema ) except JSONDecodeError as exc : return InvalidJSONResponse ( data = str ( exc ) , debug = debug ) except ValidationError as exc : return InvalidJSONRPCResponse ( data = None , debug = debug ) return call_requests ( create_requests ( deserialized , context = context , convert_camel_case = convert_camel_case ) , methods , debug = debug , ) | Pure version of dispatch - no logging no optional parameters . |
23,476 | def serve ( name : str = "" , port : int = 5000 ) -> None : logging . info ( " * Listening on port %s" , port ) httpd = HTTPServer ( ( name , port ) , RequestHandler ) httpd . serve_forever ( ) | A basic way to serve the methods . |
23,477 | def convert_camel_case_string ( name : str ) -> str : string = re . sub ( "(.)([A-Z][a-z]+)" , r"\1_\2" , name ) return re . sub ( "([a-z0-9])([A-Z])" , r"\1_\2" , string ) . lower ( ) | Convert camel case string to snake case |
23,478 | def convert_camel_case_keys ( original_dict : Dict [ str , Any ] ) -> Dict [ str , Any ] : new_dict = dict ( ) for key , val in original_dict . items ( ) : if isinstance ( val , dict ) : new_dict [ convert_camel_case_string ( key ) ] = convert_camel_case_keys ( val ) else : new_dict [ convert_camel_case_string ( key ) ] = val return new_dict | Converts all keys of a dict from camel case to snake case recursively |
23,479 | def get_arguments ( params : Union [ List , Dict , object ] = NOPARAMS , context : Any = NOCONTEXT ) -> Tuple [ List , Dict ] : positionals , nameds = [ ] , { } if params is not NOPARAMS : assert isinstance ( params , ( list , dict ) ) if isinstance ( params , list ) : positionals , nameds = ( params , { } ) elif isinstance ( params , dict ) : positionals , nameds = ( [ ] , params ) if context is not NOCONTEXT : positionals = [ context ] + positionals return ( positionals , nameds ) | Get the positional and keyword arguments from a request . |
23,480 | def validate_args ( func : Method , * args : Any , ** kwargs : Any ) -> Method : signature ( func ) . bind ( * args , ** kwargs ) return func | Check if the request s arguments match a function s signature . |
23,481 | def add ( self , * args : Any , ** kwargs : Any ) -> Optional [ Callable ] : self . items = { ** self . items , ** { m . __name__ : validate ( m ) for m in args } , ** { k : validate ( v ) for k , v in kwargs . items ( ) } , } if len ( args ) : return args [ 0 ] return None | Register a function to the list . |
23,482 | def fix_hyphen_commands ( raw_cli_arguments ) : for i in [ 'gen-sample' ] : raw_cli_arguments [ i . replace ( '-' , '_' ) ] = raw_cli_arguments [ i ] raw_cli_arguments . pop ( i ) return raw_cli_arguments | Update options to match their module names with underscores . |
23,483 | def main ( ) : if os . environ . get ( 'DEBUG' ) : logging . basicConfig ( level = logging . DEBUG ) else : logging . basicConfig ( level = logging . INFO ) logging . getLogger ( 'botocore' ) . setLevel ( logging . ERROR ) cli_arguments = fix_hyphen_commands ( docopt ( __doc__ , version = version ) ) possible_commands = [ command for command , enabled in cli_arguments . items ( ) if enabled ] command_class = find_command_class ( possible_commands ) if command_class : command_class ( cli_arguments ) . execute ( ) else : LOGGER . error ( "class not found for command '%s'" , possible_commands ) | Provide main CLI entrypoint . |
23,484 | def calculate_hash_of_files ( files , root ) : file_hash = hashlib . md5 ( ) for fname in sorted ( files ) : fileobj = os . path . join ( root , fname ) file_hash . update ( ( fname + "\0" ) . encode ( ) ) with open ( fileobj , "rb" ) as filedes : for chunk in iter ( lambda : filedes . read ( 4096 ) , "" ) : if not chunk : break file_hash . update ( chunk ) file_hash . update ( "\0" . encode ( ) ) return file_hash . hexdigest ( ) | Return a hash of all of the given files at the given root . |
23,485 | def get_hash_of_files ( root_path , directories = None ) : if not directories : directories = [ { 'path' : './' } ] files_to_hash = [ ] for i in directories : ignorer = get_ignorer ( os . path . join ( root_path , i [ 'path' ] ) , i . get ( 'exclusions' ) ) with change_dir ( root_path ) : for root , dirs , files in os . walk ( i [ 'path' ] , topdown = True ) : if ( root != './' ) and ignorer . is_ignored ( root , True ) : dirs [ : ] = [ ] files [ : ] = [ ] else : for filename in files : filepath = os . path . join ( root , filename ) if not ignorer . is_ignored ( filepath ) : files_to_hash . append ( filepath [ 2 : ] if filepath . startswith ( './' ) else filepath ) return calculate_hash_of_files ( files_to_hash , root_path ) | Generate md5 hash of files . |
23,486 | def get_ignorer ( path , additional_exclusions = None ) : ignorefile = zgitignore . ZgitIgnore ( ) gitignore_file = os . path . join ( path , '.gitignore' ) if os . path . isfile ( gitignore_file ) : with open ( gitignore_file , 'r' ) as fileobj : ignorefile . add_patterns ( fileobj . read ( ) . splitlines ( ) ) if additional_exclusions is not None : ignorefile . add_patterns ( additional_exclusions ) return ignorefile | Create ignorer with directory gitignore file . |
23,487 | def download_tf_release ( version , versions_dir , command_suffix , tf_platform = None , arch = None ) : version_dir = os . path . join ( versions_dir , version ) if arch is None : arch = ( os . environ . get ( 'TFENV_ARCH' ) if os . environ . get ( 'TFENV_ARCH' ) else 'amd64' ) if tf_platform : tfver_os = tf_platform + '_' + arch else : if platform . system ( ) . startswith ( 'Darwin' ) : tfver_os = "darwin_%s" % arch elif platform . system ( ) . startswith ( 'MINGW64' ) or ( platform . system ( ) . startswith ( 'MSYS_NT' ) or ( platform . system ( ) . startswith ( 'CYGWIN_NT' ) ) ) : tfver_os = "windows_%s" % arch else : tfver_os = "linux_%s" % arch download_dir = tempfile . mkdtemp ( ) filename = "terraform_%s_%s.zip" % ( version , tfver_os ) shasums_name = "terraform_%s_SHA256SUMS" % version tf_url = "https://releases.hashicorp.com/terraform/" + version for i in [ filename , shasums_name ] : urlretrieve ( tf_url + '/' + i , os . path . join ( download_dir , i ) ) tf_hash = get_hash_for_filename ( filename , os . path . join ( download_dir , shasums_name ) ) if tf_hash != sha256sum ( os . path . join ( download_dir , filename ) ) : LOGGER . error ( "Downloaded Terraform %s does not match sha256 %s" , filename , tf_hash ) sys . exit ( 1 ) tf_zipfile = zipfile . ZipFile ( os . path . join ( download_dir , filename ) ) os . mkdir ( version_dir ) tf_zipfile . extractall ( version_dir ) tf_zipfile . close ( ) shutil . rmtree ( download_dir ) os . chmod ( os . path . join ( version_dir , 'terraform' + command_suffix ) , os . stat ( os . path . join ( version_dir , 'terraform' + command_suffix ) ) . st_mode | 0o0111 ) | Download Terraform archive and return path to it . |
23,488 | def get_available_tf_versions ( include_prerelease = False ) : tf_releases = json . loads ( requests . get ( 'https://releases.hashicorp.com/index.json' ) . text ) [ 'terraform' ] tf_versions = sorted ( [ k for k , _v in tf_releases [ 'versions' ] . items ( ) ] , key = LooseVersion , reverse = True ) if include_prerelease : return tf_versions return [ i for i in tf_versions if '-' not in i ] | Return available Terraform versions . |
23,489 | def find_min_required ( path ) : found_min_required = '' for filename in glob . glob ( os . path . join ( path , '*.tf' ) ) : with open ( filename , 'r' ) as stream : tf_config = hcl . load ( stream ) if tf_config . get ( 'terraform' , { } ) . get ( 'required_version' ) : found_min_required = tf_config . get ( 'terraform' , { } ) . get ( 'required_version' ) break if found_min_required : if re . match ( r'^!=.+' , found_min_required ) : LOGGER . error ( 'Min required Terraform version is a negation (%s) ' '- unable to determine required version' , found_min_required ) sys . exit ( 1 ) else : found_min_required = re . search ( r'[0-9]*\.[0-9]*(?:\.[0-9]*)?' , found_min_required ) . group ( 0 ) LOGGER . debug ( "Detected minimum terraform version is %s" , found_min_required ) return found_min_required LOGGER . error ( 'Terraform version specified as min-required, but unable to ' 'find a specified version requirement in this module\'s tf ' 'files' ) sys . exit ( 1 ) | Inspect terraform files and find minimum version . |
23,490 | def get_version_requested ( path ) : tf_version_path = os . path . join ( path , TF_VERSION_FILENAME ) if not os . path . isfile ( tf_version_path ) : LOGGER . error ( "Terraform install attempted and no %s file present to " "dictate the version. Please create it (e.g. write " "\"0.11.13\" (without quotes) to the file and try again" , TF_VERSION_FILENAME ) sys . exit ( 1 ) with open ( tf_version_path , 'r' ) as stream : ver = stream . read ( ) . rstrip ( ) return ver | Return string listing requested Terraform version . |
23,491 | def ensure_versions_dir_exists ( tfenv_path ) : versions_dir = os . path . join ( tfenv_path , 'versions' ) if not os . path . isdir ( tfenv_path ) : os . mkdir ( tfenv_path ) if not os . path . isdir ( versions_dir ) : os . mkdir ( versions_dir ) return versions_dir | Ensure versions directory is available . |
23,492 | def install ( self , version_requested = None ) : command_suffix = '.exe' if platform . system ( ) == 'Windows' else '' versions_dir = ensure_versions_dir_exists ( self . tfenv_dir ) if not version_requested : version_requested = get_version_requested ( self . path ) if re . match ( r'^min-required$' , version_requested ) : LOGGER . debug ( 'tfenv: detecting minimal required version' ) version_requested = find_min_required ( self . path ) if re . match ( r'^latest:.*$' , version_requested ) : regex = re . search ( r'latest:(.*)' , version_requested ) . group ( 1 ) include_prerelease_versions = False elif re . match ( r'^latest$' , version_requested ) : regex = r'^[0-9]+\.[0-9]+\.[0-9]+$' include_prerelease_versions = False else : regex = "^%s$" % version_requested include_prerelease_versions = True if os . path . isdir ( os . path . join ( versions_dir , version_requested ) ) : LOGGER . info ( "Terraform version %s already installed; using " "it..." , version_requested ) return os . path . join ( versions_dir , version_requested , 'terraform' ) + command_suffix try : version = next ( i for i in get_available_tf_versions ( include_prerelease_versions ) if re . match ( regex , i ) ) except StopIteration : LOGGER . error ( "Unable to find a Terraform version matching regex: %s" , regex ) sys . exit ( 1 ) if os . path . isdir ( os . path . join ( versions_dir , version ) ) : LOGGER . info ( "Terraform version %s already installed; using it..." , version ) return os . path . join ( versions_dir , version , 'terraform' ) + command_suffix LOGGER . info ( "Downloading and using Terraform version %s ..." , version ) download_tf_release ( version , versions_dir , command_suffix ) LOGGER . info ( "Downloaded Terraform %s successfully" , version ) return os . path . join ( versions_dir , version , 'terraform' ) + command_suffix | Ensure terraform is available . |
23,493 | def delete_param ( context , provider , ** kwargs ) : parameter_name = kwargs . get ( 'parameter_name' ) if not parameter_name : raise ValueError ( 'Must specify `parameter_name` for delete_param ' 'hook.' ) session = get_session ( provider . region ) ssm_client = session . client ( 'ssm' ) try : ssm_client . delete_parameter ( Name = parameter_name ) except ssm_client . exceptions . ParameterNotFound : LOGGER . info ( "%s parameter appears to have already been deleted..." , parameter_name ) return True | Delete SSM parameter . |
23,494 | def save_existing_iam_env_vars ( self ) : for i in [ 'AWS_ACCESS_KEY_ID' , 'AWS_SECRET_ACCESS_KEY' , 'AWS_SESSION_TOKEN' ] : if i in self . env_vars : self . env_vars [ 'OLD_' + i ] = self . env_vars [ i ] | Backup IAM environment variables for later restoration . |
23,495 | def restore_existing_iam_env_vars ( self ) : for i in [ 'AWS_ACCESS_KEY_ID' , 'AWS_SECRET_ACCESS_KEY' , 'AWS_SESSION_TOKEN' ] : if 'OLD_' + i in self . env_vars : self . env_vars [ i ] = self . env_vars [ 'OLD_' + i ] elif i in self . env_vars : self . env_vars . pop ( i ) | Restore backed up IAM environment variables . |
23,496 | def hello ( event , context ) : body = { "message" : "Go Serverless v1.0! Your function executed successfully!" , "input" : event } response = { "statusCode" : 200 , "body" : json . dumps ( body ) } return response | Return Serverless Hello World . |
23,497 | def get_env ( path , ignore_git_branch = False ) : if 'DEPLOY_ENVIRONMENT' in os . environ : return os . environ [ 'DEPLOY_ENVIRONMENT' ] if ignore_git_branch : LOGGER . info ( 'Skipping environment lookup from current git branch ' '("ignore_git_branch" is set to true in the runway ' 'config)' ) else : from git import Repo as GitRepo from git . exc import InvalidGitRepositoryError try : b_name = GitRepo ( path , search_parent_directories = True ) . active_branch . name LOGGER . info ( 'Deriving environment name from git branch %s...' , b_name ) return get_env_from_branch ( b_name ) except InvalidGitRepositoryError : pass LOGGER . info ( 'Deriving environment name from directory %s...' , path ) return get_env_from_directory ( os . path . basename ( path ) ) | Determine environment name . |
23,498 | def get_env_dirs ( self ) : repo_dirs = next ( os . walk ( self . env_root ) ) [ 1 ] if '.git' in repo_dirs : repo_dirs . remove ( '.git' ) return repo_dirs | Return list of directories in env_root . |
23,499 | def get_yaml_files_at_env_root ( self ) : yaml_files = glob . glob ( os . path . join ( self . env_root , '*.yaml' ) ) yml_files = glob . glob ( os . path . join ( self . env_root , '*.yml' ) ) return yaml_files + yml_files | Return list of yaml files in env_root . |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.