idx int64 0 63k | question stringlengths 61 4.03k | target stringlengths 6 1.23k |
|---|---|---|
24,100 | def real ( self ) : result = self . __raw [ '1' ] . copy ( ) result [ 'c' ] = self . __raw [ '1' ] [ 'value' ] result [ 'value' ] = self . __raw [ '200' ] [ 'v2' ] result [ 'date' ] = self . __raw [ '0' ] [ 'time' ] return result | Get realtime data |
24,101 | def importcsv ( self ) : csv_path = os . path . join ( os . path . dirname ( __file__ ) , self . stock_no_files ) with open ( csv_path ) as csv_file : csv_data = csv . reader ( csv_file ) result = { } for i in csv_data : try : result [ i [ 0 ] ] = str ( i [ 1 ] ) . decode ( 'utf-8' ) except ValueError : if i [ 0 ] == 'UPDATE' : self . last_update = str ( i [ 1 ] ) . decode ( 'utf-8' ) else : pass return result | import data from csv |
24,102 | def __loadindcomps ( self ) : csv_path = os . path . join ( os . path . dirname ( __file__ ) , self . stock_no_files ) with open ( csv_path ) as csv_file : csv_data = csv . reader ( csv_file ) result = { } check_words = re . compile ( r'^[\d]{2,}[\w]?' ) for i in csv_data : if check_words . match ( i [ 2 ] ) : try : result [ i [ 2 ] ] . append ( i [ 0 ] . decode ( 'utf-8' ) ) except ( ValueError , KeyError ) : try : result [ i [ 2 ] ] = [ i [ 0 ] . decode ( 'utf-8' ) ] except KeyError : pass return result | import industry comps |
24,103 | def caldata ( self , time ) : if time . date ( ) in self . __ocdate [ 'close' ] : return False elif time . date ( ) in self . __ocdate [ 'open' ] : return True else : if time . weekday ( ) <= 4 : return True else : return False | Market open or not . |
24,104 | def flaky ( max_runs = None , min_passes = None , rerun_filter = None ) : wrapped = None if hasattr ( max_runs , '__call__' ) : wrapped , max_runs = max_runs , None attrib = default_flaky_attributes ( max_runs , min_passes , rerun_filter ) def wrapper ( wrapped_object ) : for name , value in attrib . items ( ) : setattr ( wrapped_object , name , value ) return wrapped_object return wrapper ( wrapped ) if wrapped is not None else wrapper | Decorator used to mark a test as flaky . When used in conjuction with the flaky nosetests plugin will cause the decorated test to be retried until min_passes successes are achieved out of up to max_runs test runs . |
24,105 | def options ( self , parser , env = os . environ ) : super ( FlakyPlugin , self ) . options ( parser , env = env ) self . add_report_option ( parser . add_option ) group = OptionGroup ( parser , "Force flaky" , "Force all tests to be flaky." ) self . add_force_flaky_options ( group . add_option ) parser . add_option_group ( group ) | Base class override . Add options to the nose argument parser . |
24,106 | def _get_stream ( self , multiprocess = False ) : if multiprocess : from flaky . multiprocess_string_io import MultiprocessingStringIO return MultiprocessingStringIO ( ) return self . _stream | Get the stream used to store the flaky report . If this nose run is going to use the multiprocess plugin then use a multiprocess - list backed StringIO proxy ; otherwise use the default stream . |
24,107 | def configure ( self , options , conf ) : super ( FlakyPlugin , self ) . configure ( options , conf ) if not self . enabled : return is_multiprocess = int ( getattr ( options , 'multiprocess_workers' , 0 ) ) > 0 self . _stream = self . _get_stream ( is_multiprocess ) self . _flaky_result = TextTestResult ( self . _stream , [ ] , 0 ) self . _flaky_report = options . flaky_report self . _flaky_success_report = options . flaky_success_report self . _force_flaky = options . force_flaky self . _max_runs = options . max_runs self . _min_passes = options . min_passes | Base class override . |
24,108 | def handleError ( self , test , err ) : want_error = self . _handle_test_error_or_failure ( test , err ) if not want_error and id ( test ) in self . _tests_that_reran : self . _nose_result . addError ( test , err ) return want_error or None | Baseclass override . Called when a test raises an exception . |
24,109 | def handleFailure ( self , test , err ) : want_failure = self . _handle_test_error_or_failure ( test , err ) if not want_failure and id ( test ) in self . _tests_that_reran : self . _nose_result . addFailure ( test , err ) return want_failure or None | Baseclass override . Called when a test fails . |
24,110 | def addSuccess ( self , test ) : will_handle = self . _handle_test_success ( test ) test_id = id ( test ) if will_handle and test_id not in self . _tests_that_reran : self . _tests_that_have_been_reported . add ( test_id ) if not will_handle and test_id in self . _tests_that_reran and test_id not in self . _tests_that_have_been_reported : self . _nose_result . addSuccess ( test ) return will_handle or None | Baseclass override . Called when a test succeeds . |
24,111 | def default_flaky_attributes ( max_runs = None , min_passes = None , rerun_filter = None ) : if max_runs is None : max_runs = 2 if min_passes is None : min_passes = 1 if min_passes <= 0 : raise ValueError ( 'min_passes must be positive' ) if max_runs < min_passes : raise ValueError ( 'min_passes cannot be greater than max_runs!' ) return { FlakyNames . MAX_RUNS : max_runs , FlakyNames . MIN_PASSES : min_passes , FlakyNames . CURRENT_RUNS : 0 , FlakyNames . CURRENT_PASSES : 0 , FlakyNames . RERUN_FILTER : FilterWrapper ( rerun_filter or _true ) , } | Returns the default flaky attributes to set on a flaky test . |
24,112 | def ensure_unicode_string ( obj ) : try : return unicode_type ( obj ) except UnicodeDecodeError : if hasattr ( obj , 'decode' ) : return obj . decode ( 'utf-8' , 'replace' ) return str ( obj ) . decode ( 'utf-8' , 'replace' ) | Return a unicode string representation of the given obj . |
24,113 | def _report_final_failure ( self , err , flaky , name ) : min_passes = flaky [ FlakyNames . MIN_PASSES ] current_passes = flaky [ FlakyNames . CURRENT_PASSES ] message = self . _failure_message . format ( current_passes , min_passes , ) self . _log_test_failure ( name , err , message ) | Report that the test has failed too many times to pass at least min_passes times . |
24,114 | def _log_intermediate_failure ( self , err , flaky , name ) : max_runs = flaky [ FlakyNames . MAX_RUNS ] runs_left = max_runs - flaky [ FlakyNames . CURRENT_RUNS ] message = self . _retry_failure_message . format ( runs_left , max_runs , ) self . _log_test_failure ( name , err , message ) | Report that the test has failed but still has reruns left . Then rerun the test . |
24,115 | def add_report_option ( add_option ) : add_option ( '--no-flaky-report' , action = 'store_false' , dest = 'flaky_report' , default = True , help = "Suppress the report at the end of the " "run detailing flaky test results." , ) add_option ( '--no-success-flaky-report' , action = 'store_false' , dest = 'flaky_success_report' , default = True , help = "Suppress reporting flaky test successes" "in the report at the end of the " "run detailing flaky test results." , ) | Add an option to the test runner to suppress the flaky report . |
24,116 | def add_force_flaky_options ( add_option ) : add_option ( '--force-flaky' , action = "store_true" , dest = "force_flaky" , default = False , help = "If this option is specified, we will treat all tests as " "flaky." ) add_option ( '--max-runs' , action = "store" , dest = "max_runs" , type = int , default = 2 , help = "If --force-flaky is specified, we will run each test at " "most this many times (unless the test has its own flaky " "decorator)." ) add_option ( '--min-passes' , action = "store" , dest = "min_passes" , type = int , default = 1 , help = "If --force-flaky is specified, we will run each test at " "least this many times (unless the test has its own flaky " "decorator)." ) | Add options to the test runner that force all tests to be flaky . |
24,117 | def _add_flaky_report ( self , stream ) : value = self . _stream . getvalue ( ) if not self . _flaky_success_report and not value : return stream . write ( '===Flaky Test Report===\n\n' ) try : stream . write ( value ) except UnicodeEncodeError : stream . write ( value . encode ( 'utf-8' , 'replace' ) ) stream . write ( '\n===End Flaky Test Report===\n' ) | Baseclass override . Write details about flaky tests to the test report . |
24,118 | def _copy_flaky_attributes ( cls , test , test_class ) : test_callable = cls . _get_test_callable ( test ) if test_callable is None : return for attr , value in cls . _get_flaky_attributes ( test_class ) . items ( ) : already_set = hasattr ( test , attr ) if already_set : continue attr_on_callable = getattr ( test_callable , attr , None ) if attr_on_callable is not None : cls . _set_flaky_attribute ( test , attr , attr_on_callable ) elif value is not None : cls . _set_flaky_attribute ( test , attr , value ) | Copy flaky attributes from the test callable or class to the test . |
24,119 | def _increment_flaky_attribute ( cls , test_item , flaky_attribute ) : cls . _set_flaky_attribute ( test_item , flaky_attribute , cls . _get_flaky_attribute ( test_item , flaky_attribute ) + 1 ) | Increments the value of an attribute on a flaky test . |
24,120 | def _has_flaky_attributes ( cls , test ) : current_runs = cls . _get_flaky_attribute ( test , FlakyNames . CURRENT_RUNS ) return current_runs is not None | Returns True if the test callable in question is marked as flaky . |
24,121 | def _get_flaky_attributes ( cls , test_item ) : return { attr : cls . _get_flaky_attribute ( test_item , attr , ) for attr in FlakyNames ( ) } | Get all the flaky related attributes from the test . |
24,122 | def connection_ok ( ) : try : urlopen ( Dataset . base_url , timeout = 1 ) return True except HTTPError : return True except URLError : return False | Check web connection . Returns True if web connection is OK False otherwise . |
24,123 | def _download_datasets ( ) : def filepath ( * args ) : return abspath ( join ( dirname ( __file__ ) , '..' , 'vega_datasets' , * args ) ) dataset_listing = { } for name in DATASETS_TO_DOWNLOAD : data = Dataset ( name ) url = data . url filename = filepath ( '_data' , data . filename ) print ( "retrieving data {0} -> {1}" . format ( url , filename ) ) urlretrieve ( url , filename ) dataset_listing [ name ] = '_data/{0}' . format ( data . filename ) with open ( filepath ( 'local_datasets.json' ) , 'w' ) as f : json . dump ( dataset_listing , f , indent = 2 , sort_keys = True ) | Utility to download datasets into package source |
24,124 | def init ( cls , name ) : clsdict = { subcls . name : subcls for subcls in cls . __subclasses__ ( ) if hasattr ( subcls , 'name' ) } return clsdict . get ( name , cls ) ( name ) | Return an instance of this class or an appropriate subclass |
24,125 | def _infodict ( cls , name ) : info = cls . _dataset_info . get ( name , None ) if info is None : raise ValueError ( 'No such dataset {0} exists, ' 'use list_datasets() to get a list ' 'of available datasets.' . format ( name ) ) return info | load the info dictionary for the given name |
24,126 | def raw ( self , use_local = True ) : if use_local and self . is_local : return pkgutil . get_data ( 'vega_datasets' , self . pkg_filename ) else : return urlopen ( self . url ) . read ( ) | Load the raw dataset from remote URL or local file |
24,127 | def run ( self ) : chromedriver_dir = os . path . join ( os . path . abspath ( os . path . dirname ( __file__ ) ) , 'chromedriver_binary' ) chromedriver_filename = find_binary_in_path ( get_chromedriver_filename ( ) ) if chromedriver_filename : print ( "\nChromedriver already installed at {}...\n" . format ( chromedriver_filename ) ) new_filename = os . path . join ( chromedriver_dir , get_chromedriver_filename ( ) ) self . copy_file ( chromedriver_filename , new_filename ) else : chromedriver_bin = get_chromedriver_filename ( ) chromedriver_filename = os . path . join ( chromedriver_dir , chromedriver_bin ) if not os . path . isfile ( chromedriver_filename ) : print ( "\nDownloading Chromedriver...\n" ) if not os . path . isdir ( chromedriver_dir ) : os . mkdir ( chromedriver_dir ) url = get_chromedriver_url ( ) try : response = urlopen ( url ) if response . getcode ( ) != 200 : raise URLError ( 'Not Found' ) except URLError : raise RuntimeError ( 'Failed to download chromedriver archive: {}' . format ( url ) ) archive = BytesIO ( response . read ( ) ) with zipfile . ZipFile ( archive ) as zip_file : zip_file . extract ( chromedriver_bin , chromedriver_dir ) else : print ( "\nChromedriver already installed at {}...\n" . format ( chromedriver_filename ) ) if not os . access ( chromedriver_filename , os . X_OK ) : os . chmod ( chromedriver_filename , 0o744 ) build_py . run ( self ) | Downloads unzips and installs chromedriver . If a chromedriver binary is found in PATH it will be copied otherwise downloaded . |
24,128 | def add_chromedriver_to_path ( ) : chromedriver_dir = os . path . abspath ( os . path . dirname ( __file__ ) ) if 'PATH' not in os . environ : os . environ [ 'PATH' ] = chromedriver_dir elif chromedriver_dir not in os . environ [ 'PATH' ] : os . environ [ 'PATH' ] += utils . get_variable_separator ( ) + chromedriver_dir | Appends the directory of the chromedriver binary file to PATH . |
24,129 | async def _remote_close ( self , exc = None ) : if self . state in ( STATE_CLOSING , STATE_CLOSED ) : return log . info ( "close session: %s" , self . id ) self . state = STATE_CLOSING if exc is not None : self . exception = exc self . interrupted = True try : await self . handler ( SockjsMessage ( MSG_CLOSE , exc ) , self ) except Exception : log . exception ( "Exception in close handler." ) | close session from remote . |
24,130 | def send ( self , msg ) : assert isinstance ( msg , str ) , "String is required" if self . _debug : log . info ( "outgoing message: %s, %s" , self . id , str ( msg ) [ : 200 ] ) if self . state != STATE_OPEN : return self . _feed ( FRAME_MESSAGE , msg ) | send message to client . |
24,131 | def send_frame ( self , frm ) : if self . _debug : log . info ( "outgoing message: %s, %s" , self . id , frm [ : 200 ] ) if self . state != STATE_OPEN : return self . _feed ( FRAME_MESSAGE_BLOB , frm ) | send message frame to client . |
24,132 | async def clear ( self ) : for session in list ( self . values ( ) ) : if session . state != STATE_CLOSED : await session . _remote_closed ( ) self . sessions . clear ( ) super ( SessionManager , self ) . clear ( ) | Manually expire all sessions in the pool . |
24,133 | def new ( cls , alias , cert ) : timestamp = int ( time . time ( ) ) * 1000 tke = cls ( timestamp = timestamp , alias = alias . lower ( ) , cert = cert ) return tke | Helper function to create a new TrustedCertEntry . |
24,134 | def new ( cls , alias , certs , key , key_format = 'pkcs8' ) : timestamp = int ( time . time ( ) ) * 1000 cert_chain = [ ] for cert in certs : cert_chain . append ( ( 'X.509' , cert ) ) pke = cls ( timestamp = timestamp , alias = alias . lower ( ) , cert_chain = cert_chain ) if key_format == 'pkcs8' : private_key_info = decoder . decode ( key , asn1Spec = rfc5208 . PrivateKeyInfo ( ) ) [ 0 ] pke . _algorithm_oid = private_key_info [ 'privateKeyAlgorithm' ] [ 'algorithm' ] . asTuple ( ) pke . pkey = private_key_info [ 'privateKey' ] . asOctets ( ) pke . pkey_pkcs8 = key elif key_format == 'rsa_raw' : pke . _algorithm_oid = RSA_ENCRYPTION_OID private_key_info = rfc5208 . PrivateKeyInfo ( ) private_key_info . setComponentByName ( 'version' , 'v1' ) a = AlgorithmIdentifier ( ) a . setComponentByName ( 'algorithm' , pke . _algorithm_oid ) a . setComponentByName ( 'parameters' , '\x05\x00' ) private_key_info . setComponentByName ( 'privateKeyAlgorithm' , a ) private_key_info . setComponentByName ( 'privateKey' , key ) pke . pkey_pkcs8 = encoder . encode ( private_key_info , ifNotEmpty = True ) pke . pkey = key else : raise UnsupportedKeyFormatException ( "Key Format '%s' is not supported" % key_format ) return pke | Helper function to create a new PrivateKeyEntry . |
24,135 | def encrypt ( self , key_password ) : if not self . is_decrypted ( ) : return encrypted_private_key = sun_crypto . jks_pkey_encrypt ( self . pkey_pkcs8 , key_password ) a = AlgorithmIdentifier ( ) a . setComponentByName ( 'algorithm' , sun_crypto . SUN_JKS_ALGO_ID ) a . setComponentByName ( 'parameters' , '\x05\x00' ) epki = rfc5208 . EncryptedPrivateKeyInfo ( ) epki . setComponentByName ( 'encryptionAlgorithm' , a ) epki . setComponentByName ( 'encryptedData' , encrypted_private_key ) self . _encrypted = encoder . encode ( epki ) self . _pkey = None self . _pkey_pkcs8 = None self . _algorithm_oid = None | Encrypts the private key so that it can be saved to a keystore . |
24,136 | def new ( cls , alias , sealed_obj , algorithm , key , key_size ) : timestamp = int ( time . time ( ) ) * 1000 raise NotImplementedError ( "Creating Secret Keys not implemented" ) | Helper function to create a new SecretKeyEntry . |
24,137 | def new ( cls , store_type , store_entries ) : if store_type not in [ 'jks' , 'jceks' ] : raise UnsupportedKeystoreTypeException ( "The Keystore Type '%s' is not supported" % store_type ) entries = { } for entry in store_entries : if not isinstance ( entry , AbstractKeystoreEntry ) : raise UnsupportedKeystoreEntryTypeException ( "Entries must be a KeyStore Entry" ) if store_type != 'jceks' and isinstance ( entry , SecretKeyEntry ) : raise UnsupportedKeystoreEntryTypeException ( 'Secret Key only allowed in JCEKS keystores' ) alias = entry . alias if alias in entries : raise DuplicateAliasException ( "Found duplicate alias '%s'" % alias ) entries [ alias ] = entry return cls ( store_type , entries ) | Helper function to create a new KeyStore . |
24,138 | def saves ( self , store_password ) : if self . store_type == 'jks' : keystore = MAGIC_NUMBER_JKS elif self . store_type == 'jceks' : raise NotImplementedError ( "Saving of JCEKS keystores is not implemented" ) else : raise UnsupportedKeystoreTypeException ( "Only JKS and JCEKS keystores are supported" ) keystore += b4 . pack ( 2 ) keystore += b4 . pack ( len ( self . entries ) ) for alias , item in self . entries . items ( ) : if isinstance ( item , TrustedCertEntry ) : keystore += self . _write_trusted_cert ( alias , item ) elif isinstance ( item , PrivateKeyEntry ) : keystore += self . _write_private_key ( alias , item , store_password ) elif isinstance ( item , SecretKeyEntry ) : if self . store_type != 'jceks' : raise UnsupportedKeystoreEntryTypeException ( 'Secret Key only allowed in JCEKS keystores' ) raise NotImplementedError ( "Saving of Secret Keys not implemented" ) else : raise UnsupportedKeystoreEntryTypeException ( "Unknown entry type in keystore" ) hash_fn = hashlib . sha1 store_password_utf16 = store_password . encode ( 'utf-16be' ) hash = hash_fn ( store_password_utf16 + SIGNATURE_WHITENING + keystore ) . digest ( ) keystore += hash return keystore | Saves the keystore so that it can be read by other applications . |
24,139 | def _java_is_subclass ( cls , obj , class_name ) : clazz = obj . get_class ( ) while clazz : if clazz . name == class_name : return True clazz = clazz . superclass return False | Given a deserialized JavaObject as returned by the javaobj library determine whether it s a subclass of the given class name . |
24,140 | def _adjust ( a , a_offset , b ) : x = ( b [ - 1 ] & 0xFF ) + ( a [ a_offset + len ( b ) - 1 ] & 0xFF ) + 1 a [ a_offset + len ( b ) - 1 ] = ctypes . c_ubyte ( x ) . value x >>= 8 for i in range ( len ( b ) - 2 , - 1 , - 1 ) : x += ( b [ i ] & 0xFF ) + ( a [ a_offset + i ] & 0xFF ) a [ a_offset + i ] = ctypes . c_ubyte ( x ) . value x >>= 8 | a = bytearray a_offset = int b = bytearray |
24,141 | def jks_pkey_encrypt ( key , password_str ) : password_bytes = password_str . encode ( 'utf-16be' ) iv = os . urandom ( 20 ) key = bytearray ( key ) xoring = zip ( key , _jks_keystream ( iv , password_bytes ) ) data = bytearray ( [ d ^ k for d , k in xoring ] ) check = hashlib . sha1 ( bytes ( password_bytes + key ) ) . digest ( ) return bytes ( iv + data + check ) | Encrypts the private key with password protection algorithm used by JKS keystores . |
24,142 | def _jks_keystream ( iv , password ) : cur = iv while 1 : xhash = hashlib . sha1 ( bytes ( password + cur ) ) cur = bytearray ( xhash . digest ( ) ) for byte in cur : yield byte | Helper keystream generator for _jks_pkey_decrypt |
24,143 | def bitstring_to_bytes ( bitstr ) : bitlist = list ( bitstr ) bits_missing = ( 8 - len ( bitlist ) % 8 ) % 8 bitlist = [ 0 ] * bits_missing + bitlist result = bytearray ( ) for i in range ( 0 , len ( bitlist ) , 8 ) : byte = 0 for j in range ( 8 ) : byte = ( byte << 1 ) | bitlist [ i + j ] result . append ( byte ) return bytes ( result ) | Converts a pyasn1 univ . BitString instance to byte sequence of type bytes . The bit string is interpreted big - endian and is left - padded with 0 bits to form a multiple of 8 . |
24,144 | def _read_bks_key ( cls , data , pos , store_type ) : key_type = b1 . unpack_from ( data , pos ) [ 0 ] pos += 1 key_format , pos = BksKeyStore . _read_utf ( data , pos , kind = "key format" ) key_algorithm , pos = BksKeyStore . _read_utf ( data , pos , kind = "key algorithm" ) key_enc , pos = BksKeyStore . _read_data ( data , pos ) entry = BksKeyEntry ( key_type , key_format , key_algorithm , key_enc , store_type = store_type ) return entry , pos | Given a data stream attempt to parse a stored BKS key entry at the given position and return it as a BksKeyEntry . |
24,145 | def calc_columns_rows ( n ) : num_columns = int ( ceil ( sqrt ( n ) ) ) num_rows = int ( ceil ( n / float ( num_columns ) ) ) return ( num_columns , num_rows ) | Calculate the number of columns and rows required to divide an image into n parts . |
24,146 | def get_combined_size ( tiles ) : columns , rows = calc_columns_rows ( len ( tiles ) ) tile_size = tiles [ 0 ] . image . size return ( tile_size [ 0 ] * columns , tile_size [ 1 ] * rows ) | Calculate combined size of tiles . |
24,147 | def validate_image ( image , number_tiles ) : TILE_LIMIT = 99 * 99 try : number_tiles = int ( number_tiles ) except : raise ValueError ( 'number_tiles could not be cast to integer.' ) if number_tiles > TILE_LIMIT or number_tiles < 2 : raise ValueError ( 'Number of tiles must be between 2 and {} (you \ asked for {}).' . format ( TILE_LIMIT , number_tiles ) ) | Basic sanity checks prior to performing a split . |
24,148 | def validate_image_col_row ( image , col , row ) : SPLIT_LIMIT = 99 try : col = int ( col ) row = int ( row ) except : raise ValueError ( 'columns and rows values could not be cast to integer.' ) if col < 2 : raise ValueError ( 'Number of columns must be between 2 and {} (you \ asked for {}).' . format ( SPLIT_LIMIT , col ) ) if row < 2 : raise ValueError ( 'Number of rows must be between 2 and {} (you \ asked for {}).' . format ( SPLIT_LIMIT , row ) ) | Basic checks for columns and rows values |
24,149 | def slice ( filename , number_tiles = None , col = None , row = None , save = True ) : im = Image . open ( filename ) im_w , im_h = im . size columns = 0 rows = 0 if not number_tiles is None : validate_image ( im , number_tiles ) columns , rows = calc_columns_rows ( number_tiles ) extras = ( columns * rows ) - number_tiles else : validate_image_col_row ( im , col , row ) columns = col rows = row extras = ( columns * rows ) - number_tiles tile_w , tile_h = int ( floor ( im_w / columns ) ) , int ( floor ( im_h / rows ) ) tiles = [ ] number = 1 for pos_y in range ( 0 , im_h - rows , tile_h ) : for pos_x in range ( 0 , im_w - columns , tile_w ) : area = ( pos_x , pos_y , pos_x + tile_w , pos_y + tile_h ) image = im . crop ( area ) position = ( int ( floor ( pos_x / tile_w ) ) + 1 , int ( floor ( pos_y / tile_h ) ) + 1 ) coords = ( pos_x , pos_y ) tile = Tile ( image , number , position , coords ) tiles . append ( tile ) number += 1 if save : save_tiles ( tiles , prefix = get_basename ( filename ) , directory = os . path . dirname ( filename ) ) return tuple ( tiles ) | Split an image into a specified number of tiles . |
24,150 | def generate_filename ( self , directory = os . getcwd ( ) , prefix = 'tile' , format = 'png' , path = True ) : filename = prefix + '_{col:02d}_{row:02d}.{ext}' . format ( col = self . column , row = self . row , ext = format . lower ( ) . replace ( 'jpeg' , 'jpg' ) ) if not path : return filename return os . path . join ( directory , filename ) | Construct and return a filename for this tile . |
24,151 | def get_columns_rows ( filenames ) : tiles = [ ] for filename in filenames : row , column = os . path . splitext ( filename ) [ 0 ] [ - 5 : ] . split ( '_' ) tiles . append ( ( int ( row ) , int ( column ) ) ) rows = [ pos [ 0 ] for pos in tiles ] columns = [ pos [ 1 ] for pos in tiles ] num_rows = max ( rows ) num_columns = max ( columns ) return ( num_columns , num_rows ) | Derive number of columns and rows from filenames . |
24,152 | def get_signed_range ( se , expr ) : size = expr . size ( ) umin = umax = smin = smax = None if not sat_zero ( se , expr ) : try : umin = se . min ( expr , extra_constraints = [ claripy . Extract ( size - 1 , size - 1 , expr ) == 0 ] ) umax = se . max ( expr , extra_constraints = [ claripy . Extract ( size - 1 , size - 1 , expr ) == 0 ] ) return ( umin , umax ) except : pass try : smin = - ( 1 << size ) + se . min ( expr , extra_constraints = [ claripy . Extract ( size - 1 , size - 1 , expr ) == 1 ] ) smax = - ( 1 << size ) + se . max ( expr , extra_constraints = [ claripy . Extract ( size - 1 , size - 1 , expr ) == 1 ] ) return ( smin , smax ) except : pass return None else : try : umax = se . max ( expr , extra_constraints = [ claripy . Extract ( size - 1 , size - 1 , expr ) == 0 ] ) smin = 0 try : smin = - ( 1 << size ) + se . min ( expr , extra_constraints = [ claripy . Extract ( size - 1 , size - 1 , expr ) == 1 ] ) except : pass return ( smin , umax ) except : pass return None | Calculate the range of the expression with signed boundaries |
24,153 | def fn_check_full ( fn ) : status = True if not os . path . isfile ( fn ) : status = False else : try : open ( fn ) except IOError : status = False return status | Check for file existence |
24,154 | def fn_getma ( fn , bnum = 1 , return_ds = False ) : ds = fn_getds ( fn ) out = ds_getma ( ds , bnum = bnum ) if return_ds : out = ( out , ds ) return out | Get masked array from input filename |
24,155 | def b_getma ( b ) : b_ndv = get_ndv_b ( b ) bma = np . ma . masked_values ( b . ReadAsArray ( ) , b_ndv ) return bma | Get masked array from input GDAL Band |
24,156 | def get_sub_dim ( src_ds , scale = None , maxdim = 1024 ) : ns = src_ds . RasterXSize nl = src_ds . RasterYSize maxdim = float ( maxdim ) if scale is None : scale_ns = ns / maxdim scale_nl = nl / maxdim scale = max ( scale_ns , scale_nl ) if scale > 1 : ns = int ( round ( ns / scale ) ) nl = int ( round ( nl / scale ) ) return ns , nl , scale | Compute dimensions of subsampled dataset |
24,157 | def ds_getma_sub ( src_ds , bnum = 1 , scale = None , maxdim = 1024. , return_ds = False ) : b = src_ds . GetRasterBand ( bnum ) b_ndv = get_ndv_b ( b ) ns , nl , scale = get_sub_dim ( src_ds , scale , maxdim ) b_array = b . ReadAsArray ( buf_xsize = ns , buf_ysize = nl ) bma = np . ma . masked_values ( b_array , b_ndv ) out = bma if return_ds : dtype = src_ds . GetRasterBand ( 1 ) . DataType src_ds_sub = gdal . GetDriverByName ( 'MEM' ) . Create ( '' , ns , nl , 1 , dtype ) gt = np . array ( src_ds . GetGeoTransform ( ) ) gt [ [ 1 , 5 ] ] = gt [ [ 1 , 5 ] ] * scale src_ds_sub . SetGeoTransform ( list ( gt ) ) src_ds_sub . SetProjection ( src_ds . GetProjection ( ) ) b = src_ds_sub . GetRasterBand ( 1 ) b . WriteArray ( bma ) b . SetNoDataValue ( b_ndv ) out = ( bma , src_ds_sub ) return out | Load a subsampled array rather than full resolution |
24,158 | def writeGTiff ( a , dst_fn , src_ds = None , bnum = 1 , ndv = None , gt = None , proj = None , create = False , sparse = False ) : from pygeotools . lib . malib import checkma a = checkma ( a , fix = False ) if ndv is not None : a . set_fill_value ( ndv ) driver = gtif_drv nbands = 1 np_dt = a . dtype . name if src_ds is not None : if isinstance ( src_ds , str ) : src_ds = fn_getds ( src_ds ) src_dt = gdal . GetDataTypeName ( src_ds . GetRasterBand ( bnum ) . DataType ) src_gt = src_ds . GetGeoTransform ( ) src_proj = src_ds . GetProjection ( ) if gt is None : gt = src_gt if proj is None : proj = src_proj opt = list ( gdal_opt ) if sparse : opt . remove ( 'COMPRESS=LZW' ) opt . append ( 'COMPRESS=PACKBITS' ) if 'float' in np_dt . lower ( ) and 'COMPRESS=LZW' in opt : opt . append ( 'PREDICTOR=3' ) if not create and ( src_ds is not None ) and ( ( a . shape [ 0 ] == src_ds . RasterYSize ) and ( a . shape [ 1 ] == src_ds . RasterXSize ) and ( np_dt . lower ( ) == src_dt . lower ( ) ) ) and ( src_gt == gt ) and ( src_proj == proj ) : dst_ds = driver . CreateCopy ( dst_fn , src_ds , 0 , options = opt ) else : a_dtype = a . dtype gdal_dtype = np2gdal_dtype ( a_dtype ) if a_dtype . name == 'bool' : a . fill_value = False opt . remove ( 'COMPRESS=LZW' ) opt . append ( 'COMPRESS=DEFLATE' ) dst_ds = driver . Create ( dst_fn , a . shape [ 1 ] , a . shape [ 0 ] , nbands , gdal_dtype , options = opt ) if gt is not None : dst_ds . SetGeoTransform ( gt ) if proj is not None : dst_ds . SetProjection ( proj ) dst_ds . GetRasterBand ( bnum ) . WriteArray ( a . filled ( ) ) dst_ds . GetRasterBand ( bnum ) . SetNoDataValue ( float ( a . fill_value ) ) dst_ds = None | Write input array to disk as GeoTiff |
24,159 | def writevrt ( out_csv , srs = 'EPSG:4326' , x = 'field_1' , y = 'field_2' ) : out_vrt = os . path . splitext ( out_csv ) [ 0 ] + '.vrt' out_csv = os . path . split ( out_csv ) [ - 1 ] f = open ( out_vrt , 'w' ) f . write ( '<OGRVRTDataSource>\n' ) f . write ( ' <OGRVRTLayer name="%s">\n' % os . path . splitext ( out_csv ) [ 0 ] ) f . write ( ' <SrcDataSource>%s</SrcDataSource>\n' % out_csv ) f . write ( ' <GeometryType>wkbPoint</GeometryType>\n' ) f . write ( ' <LayerSRS>%s</LayerSRS>\n' % srs ) f . write ( ' <GeometryField encoding="PointFromColumns" x="%s" y="%s"/>\n' % ( x , y ) ) f . write ( ' </OGRVRTLayer>\n' ) f . write ( '</OGRVRTDataSource>\n' ) f . close ( ) | Write out a vrt to accompany a csv of points |
24,160 | def np2gdal_dtype ( d ) : dt_dict = gdal_array . codes if isinstance ( d , ( np . ndarray , np . generic ) ) : d = d . dtype if isinstance ( d , np . dtype ) : if d . name == 'int8' : gdal_dt = 1 elif d . name == 'bool' : gdal_dt = 1 else : gdal_dt = list ( dt_dict . keys ( ) ) [ list ( dt_dict . values ( ) ) . index ( d ) ] else : print ( "Input must be NumPy array or NumPy dtype" ) gdal_dt = None return gdal_dt | Get GDAL RasterBand datatype that corresponds with NumPy datatype Input should be numpy array or numpy dtype |
24,161 | def gdal2np_dtype ( b ) : dt_dict = gdal_array . codes if isinstance ( b , str ) : b = gdal . Open ( b ) if isinstance ( b , gdal . Dataset ) : b = b . GetRasterBand ( 1 ) if isinstance ( b , gdal . Band ) : b = b . DataType if isinstance ( b , int ) : np_dtype = dt_dict [ b ] else : np_dtype = None print ( "Input must be GDAL Dataset or RasterBand object" ) return np_dtype | Get NumPy datatype that corresponds with GDAL RasterBand datatype Input can be filename GDAL Dataset GDAL RasterBand or GDAL integer dtype |
24,162 | def get_ndv_b ( b ) : b_ndv = b . GetNoDataValue ( ) if b_ndv is None : ns = b . XSize nl = b . YSize ul = float ( b . ReadAsArray ( 0 , 0 , 1 , 1 ) ) lr = float ( b . ReadAsArray ( ns - 1 , nl - 1 , 1 , 1 ) ) if np . isnan ( ul ) or ul == lr : b_ndv = ul else : b_ndv = 0 elif np . isnan ( b_ndv ) : b_dt = gdal . GetDataTypeName ( b . DataType ) if 'Float' in b_dt : b_ndv = np . nan else : b_ndv = 0 return b_ndv | Get NoData value for GDAL band . |
24,163 | def cpu_count ( logical = True ) : if logical : from multiprocessing import cpu_count ncpu = cpu_count ( ) else : import psutil ncpu = psutil . cpu_count ( logical = False ) return ncpu | Return system CPU count |
24,164 | def getfile ( url , outdir = None ) : fn = os . path . split ( url ) [ - 1 ] if outdir is not None : fn = os . path . join ( outdir , fn ) if not os . path . exists ( fn ) : try : from urllib . request import urlretrieve except ImportError : from urllib import urlretrieve print ( "Retrieving: %s" % url ) urlretrieve ( url , fn ) return fn | Function to fetch files using urllib |
24,165 | def getfile2 ( url , auth = None , outdir = None ) : import requests print ( "Retrieving: %s" % url ) fn = os . path . split ( url ) [ - 1 ] if outdir is not None : fn = os . path . join ( outdir , fn ) if auth is not None : r = requests . get ( url , stream = True , auth = auth ) else : r = requests . get ( url , stream = True ) chunk_size = 1000000 with open ( fn , 'wb' ) as fd : for chunk in r . iter_content ( chunk_size ) : fd . write ( chunk ) | Function to fetch files using requests |
24,166 | def get_auth ( ) : import getpass from requests . auth import HTTPDigestAuth input_func = input try : input_func = raw_input except NameError : pass uname = input_func ( "MODSCAG Username:" ) pw = getpass . getpass ( "MODSCAG Password:" ) auth = HTTPDigestAuth ( uname , pw ) return auth | Get authorization token for https |
24,167 | def readcsv ( fn ) : import csv with open ( fn , 'r' ) as f : reader = csv . DictReader ( f ) hdr = reader . fieldnames skiprows = 1 if np . all ( f . isdigit ( ) for f in hdr ) : hdr = None skiprows = 0 pts = np . loadtxt ( fn , delimiter = ',' , skiprows = skiprows , dtype = None ) return pts | Wrapper to read arbitrary csv check for header |
24,168 | def absrange_fltr ( dem , rangelim ) : out = range_fltr ( np . ma . abs ( dem ) , * rangelim ) out = np . ma . array ( dem , mask = np . ma . getmaskarray ( out ) ) out . set_fill_value ( dem . fill_value ) return out | Absolute range filter |
24,169 | def gauss_fltr_astropy ( dem , size = None , sigma = None , origmask = False , fill_interior = False ) : import astropy . convolution dem = malib . checkma ( dem ) if size is not None : size = int ( np . floor ( size / 2 ) * 2 + 1 ) size = max ( size , 3 ) truncate = 3.0 if size is not None and sigma is None : sigma = ( size - 1 ) / ( 2 * truncate ) elif size is None and sigma is not None : size = int ( np . ceil ( ( sigma * ( 2 * truncate ) + 1 ) / 2 ) * 2 - 1 ) elif size is None and sigma is None : sigma = 1 size = int ( np . ceil ( ( sigma * ( 2 * truncate ) + 1 ) / 2 ) * 2 - 1 ) size = max ( size , 3 ) kernel = astropy . convolution . Gaussian2DKernel ( sigma , x_size = size , y_size = size , mode = 'oversample' ) print ( "Applying gaussian smoothing filter with size %i and sigma %0.3f (sum %0.3f)" % ( size , sigma , kernel . array . sum ( ) ) ) dem_filt_gauss = astropy . convolution . convolve ( dem . astype ( float ) . filled ( np . nan ) , kernel , boundary = 'fill' , fill_value = np . nan , normalize_kernel = True ) if origmask : print ( "Applying original mask" ) if fill_interior : mask = malib . maskfill ( dem ) else : mask = dem . mask dem_filt_gauss = np . ma . array ( dem_filt_gauss , mask = mask , fill_value = dem . fill_value ) out = np . ma . fix_invalid ( dem_filt_gauss , copy = False , fill_value = dem . fill_value ) out . set_fill_value ( dem . fill_value . astype ( dem . dtype ) ) return out . astype ( dem . dtype ) | Astropy gaussian filter properly handles convolution with NaN |
24,170 | def gauss_fltr_pyramid ( dem , size = None , full = False , origmask = False ) : dem = malib . checkma ( dem ) levels = int ( np . floor ( np . log2 ( size ) ) ) dim = np . floor ( np . array ( dem . shape ) / float ( 2 ** levels ) + 1 ) * ( 2 ** levels ) dem2 = np . full ( dim , dem . fill_value ) offset = ( dim - np . array ( dem . shape ) ) / 2.0 dem2 [ offset [ 0 ] : dem . shape [ 0 ] + offset [ 0 ] , offset [ 1 ] : dem . shape [ 1 ] + offset [ 1 ] ] = dem . data dem2 = np . ma . masked_equal ( dem2 , dem . fill_value ) for n in range ( levels ) : print ( dem2 . shape ) dim = ( np . floor ( np . array ( dem2 . shape ) / 2.0 + 1 ) * 2 ) . astype ( int ) dem2 = gauss_fltr_astropy ( dem2 , size = 5 ) dem2 = dem2 [ : : 2 , : : 2 ] if full : print ( "Resizing to original input dimensions" ) from scipy . ndimage import zoom for n in range ( levels ) : print ( dem2 . shape ) dem2 = zoom ( dem2 , 2 , order = 1 , prefilter = False , cval = dem . fill_value ) print ( dem2 . shape ) dem2 = dem2 [ offset [ 0 ] : dem . shape [ 0 ] + offset [ 0 ] , offset [ 1 ] : dem . shape [ 1 ] + offset [ 1 ] ] if origmask : print ( "Applying original mask" ) maskfill = malib . maskfill ( dem ) dem2 = np . ma . array ( dem2 , mask = maskfill , fill_value = dem . fill_value ) return dem2 | Pyaramidal downsampling approach for gaussian smoothing Avoids the need for large kernels very fast Needs testing |
24,171 | def gauss_fltr_opencv ( dem , size = 3 , sigma = 1 ) : import cv2 dem = malib . checkma ( dem ) dem_cv = cv2 . GaussianBlur ( dem . filled ( np . nan ) , ( size , size ) , sigma ) out = np . ma . fix_invalid ( dem_cv ) out . set_fill_value ( dem . fill_value ) return out | OpenCV Gaussian filter Still propagates NaN values |
24,172 | def gaussfill ( dem , size = 3 , newmask = None ) : smooth = gauss_fltr_astropy ( dem , size = size ) smooth [ ~ dem . mask ] = dem [ ~ dem . mask ] if newmask is not None : smooth = np . ma . array ( smooth , mask = newmask ) return smooth | Gaussian filter with filling |
24,173 | def median_fltr ( dem , fsize = 7 , origmask = False ) : print ( "Applying median filter with size %s" % fsize ) from scipy . ndimage . filters import median_filter dem_filt_med = median_filter ( dem . filled ( np . nan ) , fsize ) out = np . ma . fix_invalid ( dem_filt_med , copy = False , fill_value = dem . fill_value ) if origmask : out = np . ma . array ( out , mask = dem . mask , fill_value = dem . fill_value ) out . set_fill_value ( dem . fill_value ) return out | Scipy . ndimage median filter |
24,174 | def median_fltr_opencv ( dem , size = 3 , iterations = 1 ) : import cv2 dem = malib . checkma ( dem ) if size > 5 : print ( "Need to implement iteration" ) n = 0 out = dem while n <= iterations : dem_cv = cv2 . medianBlur ( out . astype ( np . float32 ) . filled ( np . nan ) , size ) out = np . ma . fix_invalid ( dem_cv ) out . set_fill_value ( dem . fill_value ) n += 1 return out | OpenCV median filter |
24,175 | def circular_mask ( size ) : r = size / 2 c = ( r , r ) y , x = np . ogrid [ - c [ 0 ] : size - c [ 0 ] , - c [ 1 ] : size - c [ 1 ] ] mask = ~ ( x * x + y * y <= r * r ) return mask | Create a circular mask for an array Useful when sampling rasters for a laser shot |
24,176 | def median_fltr_skimage ( dem , radius = 3 , erode = 1 , origmask = False ) : dem = malib . checkma ( dem ) dem = dem . astype ( np . float64 ) if erode > 0 : print ( "Eroding islands smaller than %s pixels" % ( erode * 2 ) ) dem = malib . mask_islands ( dem , iterations = erode ) print ( "Applying median filter with radius %s" % radius ) import skimage . filter dem_filt_med = skimage . filter . median_filter ( dem , radius , mask = ~ dem . mask ) ndv = np . min ( dem_filt_med ) out = np . ma . masked_less_equal ( dem_filt_med , ndv ) out . set_fill_value ( dem . fill_value ) if origmask : print ( "Applying original mask" ) maskfill = malib . maskfill ( dem ) out = np . ma . array ( out , mask = maskfill , fill_value = dem . fill_value ) return out | Older skimage . filter . median_filter |
24,177 | def dz_fltr ( dem_fn , refdem_fn , perc = None , rangelim = ( 0 , 30 ) , smooth = False ) : try : open ( refdem_fn ) except IOError : sys . exit ( 'Unable to open reference DEM: %s' % refdem_fn ) from pygeotools . lib import warplib dem_ds , refdem_ds = warplib . memwarp_multi_fn ( [ dem_fn , refdem_fn ] , res = 'first' , extent = 'first' , t_srs = 'first' ) dem = iolib . ds_getma ( dem_ds ) refdem = iolib . ds_getma ( refdem_ds ) out = dz_fltr_ma ( dem , refdem , perc , rangelim , smooth ) return out | Absolute elevation difference range filter using values from a source raster file and a reference raster file |
24,178 | def dz_fltr_ma ( dem , refdem , perc = None , rangelim = ( 0 , 30 ) , smooth = False ) : if smooth : refdem = gauss_fltr_astropy ( refdem ) dem = gauss_fltr_astropy ( dem ) dz = refdem - dem demmask = np . ma . getmaskarray ( dem ) if perc : dz_perc = malib . calcperc ( dz , perc ) print ( "Applying dz percentile filter (%s%%, %s%%): (%0.1f, %0.1f)" % ( perc [ 0 ] , perc [ 1 ] , dz_perc [ 0 ] , dz_perc [ 1 ] ) ) perc_mask = ( ( dz < dz_perc [ 0 ] ) | ( dz > dz_perc [ 1 ] ) ) . filled ( False ) demmask = ( demmask | perc_mask ) if rangelim : range_mask = ( ( np . abs ( dz ) < rangelim [ 0 ] ) | ( np . abs ( dz ) > rangelim [ 1 ] ) ) . filled ( False ) if False : cutoff = 150 rangelim = ( 0 , 80 ) low = ( refdem < cutoff ) . data range_mask [ low ] = ( ( np . abs ( dz ) < rangelim [ 0 ] ) | ( np . abs ( dz ) > rangelim [ 1 ] ) ) . filled ( False ) [ low ] demmask = ( demmask | range_mask ) out = np . ma . array ( dem , mask = demmask , fill_value = dem . fill_value ) return out | Absolute elevation difference range filter using values from a source array and a reference array |
24,179 | def erode_edge ( dem , iterations = 1 ) : import scipy . ndimage as ndimage print ( 'Eroding pixels near nodata: %i iterations' % iterations ) mask = np . ma . getmaskarray ( dem ) mask_dilate = ndimage . morphology . binary_dilation ( mask , iterations = iterations ) out = np . ma . array ( dem , mask = mask_dilate ) return out | Erode pixels near nodata |
24,180 | def butter ( dt_list , val , lowpass = 1.0 ) : import scipy . signal import matplotlib . pyplot as plt dt_diff = np . diff ( dt_list ) dt_diff = np . array ( [ dt . total_seconds ( ) for dt in dt_diff ] ) dt = malib . fast_median ( dt_diff ) fs = 1. / dt nyq = fs / 2. if False : sp_f , sp_psd = scipy . signal . periodogram ( val , fs , detrend = 'linear' ) sp_f_days = 1. / sp_f / 86400. plt . figure ( ) plt . plot ( sp_f , sp_psd ) plt . plot ( sp_f_days , sp_psd ) plt . semilogy ( sp_f_days , sp_psd ) plt . xlabel ( 'Frequency' ) plt . ylabel ( 'Power' ) print ( "Filtering tidal signal" ) f_max = ( 1. / ( 86400 * 0.1 ) ) / nyq f_min = ( 1. / ( 86400 * 1.8 ) ) / nyq order = 6 b , a = scipy . signal . butter ( order , f_min , btype = 'highpass' ) w , h = scipy . signal . freqz ( b , a , worN = 2000 ) w_f = ( nyq / np . pi ) * w w_f_days = 1 / w_f / 86400. val_f_tide = scipy . signal . filtfilt ( b , a , val ) b , a = scipy . signal . butter ( order , f_max , btype = 'lowpass' ) w , h = scipy . signal . freqz ( b , a , worN = 2000 ) w_f = ( nyq / np . pi ) * w w_f_days = 1 / w_f / 86400. val_f_tide_denoise = scipy . signal . filtfilt ( b , a , val_f_tide ) val_f_notide = val - val_f_tide | This is framework for a butterworth bandpass for 1D data Needs to be cleaned up and generalized |
24,181 | def freq_filt ( bma ) : bf = malib . randomfill ( bma ) import scipy . fftpack f = scipy . fftpack . fft2 ( bf ) ff = scipy . fftpack . fftshift ( f ) ff_dim = np . array ( ff . shape ) a , b = ff_dim / 2 n = ff_dim . max ( ) y , x = np . ogrid [ - a : n - a , - b : n - b ] r1 = 40 r2 = 60 ff_mask = np . ma . make_mask ( ff ) radial_mask = ( r1 ** 2 <= x ** 2 + y ** 2 ) & ( x ** 2 + y ** 2 < r2 ** 2 ) ff_mask [ : ] = radial_mask [ a - ff_dim [ 0 ] / 2 : a + ff_dim [ 0 ] , b - ff_dim [ 1 ] / 2 : b + 1 + ff_dim [ 1 ] / 2 ] fm = ff * ff_mask bf_filt = scipy . fftpack . ifft2 ( scipy . fftpack . ifftshift ( fm ) ) bf_filt = np . ma . masked_array ( bf_filt , bma . mask ) | This is a framework for 2D FFT filtering . It has not be tested or finished - might be a dead end |
24,182 | def stack_smooth ( s_orig , size = 7 , save = False ) : from copy import deepcopy from pygeotools . lib import filtlib print ( "Copying original DEMStack" ) s = deepcopy ( s_orig ) s . stack_fn = os . path . splitext ( s_orig . stack_fn ) [ 0 ] + '_smooth%ipx.npz' % size print ( "Smoothing all arrays in stack with %i px gaussian filter" % size ) for i in range ( s . ma_stack . shape [ 0 ] ) : print ( '%i of %i' % ( i + 1 , s . ma_stack . shape [ 0 ] ) ) s . ma_stack [ i ] = filtlib . gauss_fltr_astropy ( s . ma_stack [ i ] , size = size ) if s . stats : s . compute_stats ( ) if save : s . write_stats ( ) if s . datestack and s . date_list_o . count ( ) > 1 : s . compute_dt_stats ( ) if save : s . write_datestack ( ) if s . trend : s . compute_trend ( ) if save : s . write_trend ( ) if save : s . savestack ( ) return s | Run Gaussian smoothing filter on exising stack object |
24,183 | def stack_clip ( s_orig , extent , out_stack_fn = None , copy = True , save = False ) : if copy or save : from copy import deepcopy print ( "Copying original DEMStack" ) s = deepcopy ( s_orig ) else : s = s_orig from pygeotools . lib import geolib gt = s . gt s_shape = s . ma_stack . shape [ 1 : 3 ] min_x_px , max_y_px = geolib . mapToPixel ( extent [ 0 ] , extent [ 1 ] , gt ) max_x_px , min_y_px = geolib . mapToPixel ( extent [ 2 ] , extent [ 3 ] , gt ) min_x_px = int ( max ( 0 , min_x_px ) + 0.5 ) max_x_px = int ( min ( s_shape [ 1 ] , max_x_px ) + 0.5 ) min_y_px = int ( max ( 0 , min_y_px ) + 0.5 ) max_y_px = int ( min ( s_shape [ 0 ] , max_y_px ) + 0.5 ) x_slice = slice ( min_x_px , max_x_px ) y_slice = slice ( min_y_px , max_y_px ) s . ma_stack = s . ma_stack [ : , y_slice , x_slice ] out_ul = geolib . pixelToMap ( min_x_px - 0.5 , min_y_px - 0.5 , gt ) s . gt [ 0 ] = out_ul [ 0 ] s . gt [ 3 ] = out_ul [ 1 ] s . get_extent ( ) count_list = np . array ( [ i . count ( ) for i in s . ma_stack ] ) idx = count_list > 0 s_sub = get_stack_subset ( s , idx , out_stack_fn = out_stack_fn , copy = False , save = False ) print ( "Orig filename:" , s_orig . stack_fn ) print ( "Orig extent:" , s_orig . extent ) print ( "Orig dimensions:" , s_orig . ma_stack . shape ) print ( "Input extent:" , extent ) print ( "New filename:" , s_sub . stack_fn ) print ( "New extent:" , s_sub . extent ) print ( "New dimensions:" , s_sub . ma_stack . shape ) if save : if os . path . abspath ( s_orig . stack_fn ) == os . path . abspath ( s_sub . stack_fn ) : print ( "Original stack would be overwritten!" ) print ( "Skipping save" ) else : s_sub . save = True s_sub . savestack ( ) return s_sub | Create a new stack object with limited extent from an exising stack object |
24,184 | def get_stack_subset ( s_orig , idx , out_stack_fn = None , copy = True , save = False ) : idx = np . array ( idx ) if np . any ( idx ) : if copy or save : from copy import deepcopy print ( "Copying original DEMStack" ) s = deepcopy ( s_orig ) else : s = s_orig print ( "Original stack: %i" % len ( s_orig . fn_list ) ) s . fn_list = ( np . array ( s . fn_list ) [ idx ] ) . tolist ( ) print ( "Filtered stack: %i" % len ( s . fn_list ) ) s . date_list = s . date_list [ idx ] s . date_list_o = s . date_list_o [ idx ] s . ma_stack = s . ma_stack [ idx ] s . source = ( np . array ( s . source ) [ idx ] ) . tolist ( ) s . error = s . error [ idx ] s . error_dict_list = np . array ( s . error_dict_list ) [ idx ] if out_stack_fn is None : s . stack_fn = None s . get_stack_fn ( ) else : s . stack_fn = out_stack_fn if os . path . abspath ( s_orig . stack_fn ) == os . path . abspath ( s . stack_fn ) : print ( "Warning: new stack has identical filename: %s" % s . stack_fn ) print ( "As a precaution, new stack will not be saved" ) save = False s . save = save if s . stats : s . compute_stats ( ) if save : s . write_stats ( ) if s . datestack and s . date_list_o . count ( ) > 1 : s . compute_dt_stats ( ) if save : s . write_datestack ( ) if s . trend : s . compute_trend ( ) if save : s . write_trend ( ) if save : s . savestack ( ) else : print ( "No valid entries for input index array" ) s = None return s | Create a new stack object as a subset of an exising stack object |
24,185 | def stack_merge ( s1 , s2 , out_stack_fn = None , sort = True , save = False ) : from pygeotools . lib import geolib from copy import deepcopy if s1 . ma_stack . shape [ 1 : 3 ] != s2 . ma_stack . shape [ 1 : 3 ] : print ( s1 . ma_stack . shape ) print ( s2 . ma_stack . shape ) sys . exit ( 'Input stacks must have identical array dimensions' ) if not geolib . extent_compare ( s1 . extent , s2 . extent ) : print ( s1 . extent ) print ( s2 . extent ) sys . exit ( 'Input stacks must have identical extent' ) if not geolib . res_compare ( s1 . res , s2 . res ) : print ( s1 . res ) print ( s2 . res ) sys . exit ( 'Input stacks must have identical res' ) print ( "\nCombining fn_list and ma_stack" ) fn_list = np . array ( s1 . fn_list + s2 . fn_list ) if sort : sort_idx = np . argsort ( [ os . path . split ( x ) [ - 1 ] for x in fn_list ] ) else : sort_idx = Ellipsis fn_list = fn_list [ sort_idx ] ma_stack = np . ma . vstack ( ( s1 . ma_stack , s2 . ma_stack ) ) [ sort_idx ] source = np . array ( s1 . source + s2 . source ) [ sort_idx ] error = np . ma . concatenate ( [ s1 . error , s2 . error ] ) [ sort_idx ] error_dict_list = np . concatenate ( [ s1 . error_dict_list , s2 . error_dict_list ] ) [ sort_idx ] print ( "Creating copy for new stack" ) s = deepcopy ( s1 ) s . fn_list = list ( fn_list ) s . ma_stack = ma_stack s . source = list ( source ) s . error = error s . error_dict_list = error_dict_list if not out_stack_fn : s . get_stack_fn ( ) else : s . stack_fn = out_stack_fn s . get_date_list ( ) if s1 . datestack and s2 . datestack : s . compute_dt_stats ( ) if save and s1 . datestack : s . write_datestack ( ) if s1 . stats and s2 . stats : s . compute_stats ( ) if save and s1 . stats : s . write_stats ( ) if s1 . trend and s2 . trend : s . compute_trend ( ) if save and s1 . trend : s . write_trend ( ) if save : s . savestack ( ) return s | Merge two stack objects |
24,186 | def randomfill ( a ) : a = checkma ( a ) noise = a . mask * np . random . normal ( a . mean ( ) , a . std ( ) , a . shape ) b = a . filled ( 0 ) + noise return b | Fill masked areas with random noise This is needed for any fft - based operations |
24,187 | def nanfill ( a , f_a , * args , ** kwargs ) : a = checkma ( a ) ndv = a . fill_value b = f_a ( a . filled ( np . nan ) , * args , ** kwargs ) out = np . ma . fix_invalid ( b , copy = False ) out . set_fill_value ( ndv ) return out | Fill masked areas with np . nan |
24,188 | def fast_median ( a ) : a = checkma ( a ) if a . count ( ) > 0 : out = np . percentile ( a . compressed ( ) , 50 ) else : out = np . ma . masked return out | Fast median operation for masked array using 50th - percentile |
24,189 | def mad ( a , axis = None , c = 1.4826 , return_med = False ) : a = checkma ( a ) if a . count ( ) > 0 : if axis is None : med = fast_median ( a ) out = fast_median ( np . fabs ( a - med ) ) * c else : med = np . ma . median ( a , axis = axis ) med = np . expand_dims ( med , axis = axis ) out = np . ma . median ( np . ma . fabs ( a - med ) , axis = axis ) * c else : out = np . ma . masked if return_med : out = ( out , med ) return out | Compute normalized median absolute difference Can also return median array as this can be expensive and often we want both med and nmad |
24,190 | def calcperc ( b , perc = ( 0.1 , 99.9 ) ) : b = checkma ( b ) if b . count ( ) > 0 : low = np . percentile ( b . compressed ( ) , perc [ 0 ] ) high = np . percentile ( b . compressed ( ) , perc [ 1 ] ) else : low = 0 high = 0 return low , high | Calculate values at specified percentiles |
24,191 | def calcperc_sym ( b , perc = ( 0.1 , 99.9 ) ) : clim = np . max ( np . abs ( calcperc ( b , perc ) ) ) return - clim , clim | Get symmetrical percentile values Useful for determining clim centered on 0 for difference maps |
24,192 | def iqr ( b , perc = ( 25 , 75 ) ) : b = checkma ( b ) low , high = calcperc ( b , perc ) return low , high , high - low | Inter - quartile range |
24,193 | def iv ( b , ** kwargs ) : import matplotlib . pyplot as plt import imview . imviewer as imview b = checkma ( b ) fig = plt . figure ( ) imview . bma_fig ( fig , b , ** kwargs ) plt . show ( ) return fig | Quick access to imview for interactive sessions |
24,194 | def norm_shape ( shape ) : try : i = int ( shape ) return ( i , ) except TypeError : pass try : t = tuple ( shape ) return t except TypeError : pass raise TypeError ( 'shape must be an int, or a tuple of ints' ) | Normalize numpy array shapes so they re always expressed as a tuple even for one - dimensional shapes . Parameters shape - an int or a tuple of ints Returns a shape tuple |
24,195 | def localortho ( lon , lat ) : local_srs = osr . SpatialReference ( ) local_proj = '+proj=ortho +lat_0=%0.7f +lon_0=%0.7f +datum=WGS84 +units=m +no_defs ' % ( lat , lon ) local_srs . ImportFromProj4 ( local_proj ) return local_srs | Create srs for local orthographic projection centered at lat lon |
24,196 | def geom2localortho ( geom ) : cx , cy = geom . Centroid ( ) . GetPoint_2D ( ) lon , lat , z = cT_helper ( cx , cy , 0 , geom . GetSpatialReference ( ) , wgs_srs ) local_srs = localortho ( lon , lat ) local_geom = geom_dup ( geom ) geom_transform ( local_geom , local_srs ) return local_geom | Convert existing geom to local orthographic projection |
24,197 | def dd2dms ( dd ) : n = dd < 0 dd = abs ( dd ) m , s = divmod ( dd * 3600 , 60 ) d , m = divmod ( m , 60 ) if n : d = - d return d , m , s | Convert decimal degrees to degrees minutes seconds |
24,198 | def dms2dd ( d , m , s ) : if d < 0 : sign = - 1 else : sign = 1 dd = sign * ( int ( abs ( d ) ) + float ( m ) / 60 + float ( s ) / 3600 ) return dd | Convert degrees minutes seconds to decimal degrees |
24,199 | def dd2dm ( dd ) : d , m , s = dd2dms ( dd ) m = m + float ( s ) / 3600 return d , m , s | Convert decimal to degrees decimal minutes |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.