idx
int64
0
251k
question
stringlengths
53
3.53k
target
stringlengths
5
1.23k
len_question
int64
20
893
len_target
int64
3
238
223,800
def verify_request ( self , uri , http_method = 'GET' , body = None , headers = None , scopes = None ) : request = Request ( uri , http_method , body , headers ) request . token_type = self . find_token_type ( request ) request . scopes = scopes token_type_handler = self . tokens . get ( request . token_type , self . default_token_type_handler ) log . debug ( 'Dispatching token_type %s request to %r.' , request . token_type , token_type_handler ) return token_type_handler . validate_request ( request ) , request
Validate client code etc return body + headers
143
9
223,801
def find_token_type ( self , request ) : estimates = sorted ( ( ( t . estimate_type ( request ) , n ) for n , t in self . tokens . items ( ) ) , reverse = True ) return estimates [ 0 ] [ 1 ] if len ( estimates ) else None
Token type identification .
62
4
223,802
def load_file ( file_path ) : log . debug ( 'read data from {0}' . format ( file_path ) ) if os . path . exists ( file_path ) : with open ( file_path ) as f : json_data = json . load ( f ) tf_state = Tfstate ( json_data ) tf_state . tfstate_file = file_path return tf_state log . debug ( '{0} is not exist' . format ( file_path ) ) return Tfstate ( )
Read the tfstate file and load its contents parses then as JSON and put the result into the object
116
21
223,803
def generate_cmd_string ( self , cmd , * args , * * kwargs ) : cmds = cmd . split ( ) cmds = [ self . terraform_bin_path ] + cmds for option , value in kwargs . items ( ) : if '_' in option : option = option . replace ( '_' , '-' ) if type ( value ) is list : for sub_v in value : cmds += [ '-{k}={v}' . format ( k = option , v = sub_v ) ] continue if type ( value ) is dict : if 'backend-config' in option : for bk , bv in value . items ( ) : cmds += [ '-backend-config={k}={v}' . format ( k = bk , v = bv ) ] continue # since map type sent in string won't work, create temp var file for # variables, and clean it up later else : filename = self . temp_var_files . create ( value ) cmds += [ '-var-file={0}' . format ( filename ) ] continue # simple flag, if value is IsFlagged : cmds += [ '-{k}' . format ( k = option ) ] continue if value is None or value is IsNotFlagged : continue if type ( value ) is bool : value = 'true' if value else 'false' cmds += [ '-{k}={v}' . format ( k = option , v = value ) ] cmds += args return cmds
for any generate_cmd_string doesn t written as public method of terraform
339
17
223,804
def get_nameid_data ( self ) : nameid = None nameid_data = { } encrypted_id_data_nodes = self . __query_assertion ( '/saml:Subject/saml:EncryptedID/xenc:EncryptedData' ) if encrypted_id_data_nodes : encrypted_data = encrypted_id_data_nodes [ 0 ] key = self . __settings . get_sp_key ( ) nameid = OneLogin_Saml2_Utils . decrypt_element ( encrypted_data , key ) else : nameid_nodes = self . __query_assertion ( '/saml:Subject/saml:NameID' ) if nameid_nodes : nameid = nameid_nodes [ 0 ] is_strict = self . __settings . is_strict ( ) want_nameid = self . __settings . get_security_data ( ) . get ( 'wantNameId' , True ) if nameid is None : if is_strict and want_nameid : raise OneLogin_Saml2_ValidationError ( 'NameID not found in the assertion of the Response' , OneLogin_Saml2_ValidationError . NO_NAMEID ) else : if is_strict and want_nameid and not OneLogin_Saml2_Utils . element_text ( nameid ) : raise OneLogin_Saml2_ValidationError ( 'An empty NameID value found' , OneLogin_Saml2_ValidationError . EMPTY_NAMEID ) nameid_data = { 'Value' : OneLogin_Saml2_Utils . element_text ( nameid ) } for attr in [ 'Format' , 'SPNameQualifier' , 'NameQualifier' ] : value = nameid . get ( attr , None ) if value : if is_strict and attr == 'SPNameQualifier' : sp_data = self . __settings . get_sp_data ( ) sp_entity_id = sp_data . get ( 'entityId' , '' ) if sp_entity_id != value : raise OneLogin_Saml2_ValidationError ( 'The SPNameQualifier value mistmatch the SP entityID value.' , OneLogin_Saml2_ValidationError . SP_NAME_QUALIFIER_NAME_MISMATCH ) nameid_data [ attr ] = value return nameid_data
Gets the NameID Data provided by the SAML Response from the IdP
536
16
223,805
def validate_signed_elements ( self , signed_elements ) : if len ( signed_elements ) > 2 : return False response_tag = '{%s}Response' % OneLogin_Saml2_Constants . NS_SAMLP assertion_tag = '{%s}Assertion' % OneLogin_Saml2_Constants . NS_SAML if ( response_tag in signed_elements and signed_elements . count ( response_tag ) > 1 ) or ( assertion_tag in signed_elements and signed_elements . count ( assertion_tag ) > 1 ) or ( response_tag not in signed_elements and assertion_tag not in signed_elements ) : return False # Check that the signed elements found here, are the ones that will be verified # by OneLogin_Saml2_Utils.validate_sign if response_tag in signed_elements : expected_signature_nodes = OneLogin_Saml2_Utils . query ( self . document , OneLogin_Saml2_Utils . RESPONSE_SIGNATURE_XPATH ) if len ( expected_signature_nodes ) != 1 : raise OneLogin_Saml2_ValidationError ( 'Unexpected number of Response signatures found. SAML Response rejected.' , OneLogin_Saml2_ValidationError . WRONG_NUMBER_OF_SIGNATURES_IN_RESPONSE ) if assertion_tag in signed_elements : expected_signature_nodes = self . __query ( OneLogin_Saml2_Utils . ASSERTION_SIGNATURE_XPATH ) if len ( expected_signature_nodes ) != 1 : raise OneLogin_Saml2_ValidationError ( 'Unexpected number of Assertion signatures found. SAML Response rejected.' , OneLogin_Saml2_ValidationError . WRONG_NUMBER_OF_SIGNATURES_IN_ASSERTION ) return True
Verifies that the document has the expected signed nodes .
430
11
223,806
def __decrypt_assertion ( self , dom ) : key = self . __settings . get_sp_key ( ) debug = self . __settings . is_debug_active ( ) if not key : raise OneLogin_Saml2_Error ( 'No private key available to decrypt the assertion, check settings' , OneLogin_Saml2_Error . PRIVATE_KEY_NOT_FOUND ) encrypted_assertion_nodes = OneLogin_Saml2_Utils . query ( dom , '/samlp:Response/saml:EncryptedAssertion' ) if encrypted_assertion_nodes : encrypted_data_nodes = OneLogin_Saml2_Utils . query ( encrypted_assertion_nodes [ 0 ] , '//saml:EncryptedAssertion/xenc:EncryptedData' ) if encrypted_data_nodes : keyinfo = OneLogin_Saml2_Utils . query ( encrypted_assertion_nodes [ 0 ] , '//saml:EncryptedAssertion/xenc:EncryptedData/ds:KeyInfo' ) if not keyinfo : raise OneLogin_Saml2_ValidationError ( 'No KeyInfo present, invalid Assertion' , OneLogin_Saml2_ValidationError . KEYINFO_NOT_FOUND_IN_ENCRYPTED_DATA ) keyinfo = keyinfo [ 0 ] children = keyinfo . getchildren ( ) if not children : raise OneLogin_Saml2_ValidationError ( 'KeyInfo has no children nodes, invalid Assertion' , OneLogin_Saml2_ValidationError . CHILDREN_NODE_NOT_FOUND_IN_KEYINFO ) for child in children : if 'RetrievalMethod' in child . tag : if child . attrib [ 'Type' ] != 'http://www.w3.org/2001/04/xmlenc#EncryptedKey' : raise OneLogin_Saml2_ValidationError ( 'Unsupported Retrieval Method found' , OneLogin_Saml2_ValidationError . UNSUPPORTED_RETRIEVAL_METHOD ) uri = child . attrib [ 'URI' ] if not uri . startswith ( '#' ) : break uri = uri . split ( '#' ) [ 1 ] encrypted_key = OneLogin_Saml2_Utils . query ( encrypted_assertion_nodes [ 0 ] , './xenc:EncryptedKey[@Id=$tagid]' , None , uri ) if encrypted_key : keyinfo . append ( encrypted_key [ 0 ] ) encrypted_data = encrypted_data_nodes [ 0 ] decrypted = OneLogin_Saml2_Utils . decrypt_element ( encrypted_data , key , debug = debug , inplace = True ) dom . replace ( encrypted_assertion_nodes [ 0 ] , decrypted ) return dom
Decrypts the Assertion
644
7
223,807
def get_metadata ( url , validate_cert = True ) : valid = False if validate_cert : response = urllib2 . urlopen ( url ) else : ctx = ssl . create_default_context ( ) ctx . check_hostname = False ctx . verify_mode = ssl . CERT_NONE response = urllib2 . urlopen ( url , context = ctx ) xml = response . read ( ) if xml : try : dom = fromstring ( xml , forbid_dtd = True ) idp_descriptor_nodes = OneLogin_Saml2_Utils . query ( dom , '//md:IDPSSODescriptor' ) if idp_descriptor_nodes : valid = True except Exception : pass if not valid : raise Exception ( 'Not valid IdP XML found from URL: %s' % ( url ) ) return xml
Gets the metadata XML from the provided URL
198
9
223,808
def print_xmlsec_errors ( filename , line , func , error_object , error_subject , reason , msg ) : info = [ ] if error_object != "unknown" : info . append ( "obj=" + error_object ) if error_subject != "unknown" : info . append ( "subject=" + error_subject ) if msg . strip ( ) : info . append ( "msg=" + msg ) if reason != 1 : info . append ( "errno=%d" % reason ) if info : print ( "%s:%d(%s)" % ( filename , line , func ) , " " . join ( info ) )
Auxiliary method . It overrides the default xmlsec debug message .
140
15
223,809
def get_self_host ( request_data ) : if 'http_host' in request_data : current_host = request_data [ 'http_host' ] elif 'server_name' in request_data : current_host = request_data [ 'server_name' ] else : raise Exception ( 'No hostname defined' ) if ':' in current_host : current_host_data = current_host . split ( ':' ) possible_port = current_host_data [ - 1 ] try : possible_port = float ( possible_port ) current_host = current_host_data [ 0 ] except ValueError : current_host = ':' . join ( current_host_data ) return current_host
Returns the current host .
158
5
223,810
def parse_duration ( duration , timestamp = None ) : assert isinstance ( duration , basestring ) assert timestamp is None or isinstance ( timestamp , int ) timedelta = duration_parser ( duration ) if timestamp is None : data = datetime . utcnow ( ) + timedelta else : data = datetime . utcfromtimestamp ( timestamp ) + timedelta return calendar . timegm ( data . utctimetuple ( ) )
Interprets a ISO8601 duration value relative to a given timestamp .
94
15
223,811
def get_status ( dom ) : status = { } status_entry = OneLogin_Saml2_Utils . query ( dom , '/samlp:Response/samlp:Status' ) if len ( status_entry ) != 1 : raise OneLogin_Saml2_ValidationError ( 'Missing Status on response' , OneLogin_Saml2_ValidationError . MISSING_STATUS ) code_entry = OneLogin_Saml2_Utils . query ( dom , '/samlp:Response/samlp:Status/samlp:StatusCode' , status_entry [ 0 ] ) if len ( code_entry ) != 1 : raise OneLogin_Saml2_ValidationError ( 'Missing Status Code on response' , OneLogin_Saml2_ValidationError . MISSING_STATUS_CODE ) code = code_entry [ 0 ] . values ( ) [ 0 ] status [ 'code' ] = code status [ 'msg' ] = '' message_entry = OneLogin_Saml2_Utils . query ( dom , '/samlp:Response/samlp:Status/samlp:StatusMessage' , status_entry [ 0 ] ) if len ( message_entry ) == 0 : subcode_entry = OneLogin_Saml2_Utils . query ( dom , '/samlp:Response/samlp:Status/samlp:StatusCode/samlp:StatusCode' , status_entry [ 0 ] ) if len ( subcode_entry ) == 1 : status [ 'msg' ] = subcode_entry [ 0 ] . values ( ) [ 0 ] elif len ( message_entry ) == 1 : status [ 'msg' ] = OneLogin_Saml2_Utils . element_text ( message_entry [ 0 ] ) return status
Gets Status from a Response .
388
7
223,812
def write_temp_file ( content ) : f_temp = NamedTemporaryFile ( delete = True ) f_temp . file . write ( content ) f_temp . file . flush ( ) return f_temp
Writes some content into a temporary file and returns it .
46
12
223,813
def __add_default_values ( self ) : self . __sp . setdefault ( 'assertionConsumerService' , { } ) self . __sp [ 'assertionConsumerService' ] . setdefault ( 'binding' , OneLogin_Saml2_Constants . BINDING_HTTP_POST ) self . __sp . setdefault ( 'attributeConsumingService' , { } ) self . __sp . setdefault ( 'singleLogoutService' , { } ) self . __sp [ 'singleLogoutService' ] . setdefault ( 'binding' , OneLogin_Saml2_Constants . BINDING_HTTP_REDIRECT ) # Related to nameID self . __sp . setdefault ( 'NameIDFormat' , OneLogin_Saml2_Constants . NAMEID_UNSPECIFIED ) self . __security . setdefault ( 'nameIdEncrypted' , False ) # Metadata format self . __security . setdefault ( 'metadataValidUntil' , None ) # None means use default self . __security . setdefault ( 'metadataCacheDuration' , None ) # None means use default # Sign provided self . __security . setdefault ( 'authnRequestsSigned' , False ) self . __security . setdefault ( 'logoutRequestSigned' , False ) self . __security . setdefault ( 'logoutResponseSigned' , False ) self . __security . setdefault ( 'signMetadata' , False ) # Sign expected self . __security . setdefault ( 'wantMessagesSigned' , False ) self . __security . setdefault ( 'wantAssertionsSigned' , False ) # NameID element expected self . __security . setdefault ( 'wantNameId' , True ) # SAML responses with a InResponseTo attribute not rejected when requestId not passed self . __security . setdefault ( 'rejectUnsolicitedResponsesWithInResponseTo' , False ) # Encrypt expected self . __security . setdefault ( 'wantAssertionsEncrypted' , False ) self . __security . setdefault ( 'wantNameIdEncrypted' , False ) # Signature Algorithm self . __security . setdefault ( 'signatureAlgorithm' , OneLogin_Saml2_Constants . RSA_SHA1 ) # Digest Algorithm self . __security . setdefault ( 'digestAlgorithm' , OneLogin_Saml2_Constants . SHA1 ) # AttributeStatement required by default self . __security . setdefault ( 'wantAttributeStatement' , True ) self . __idp . setdefault ( 'x509cert' , '' ) self . __idp . setdefault ( 'certFingerprint' , '' ) self . __idp . setdefault ( 'certFingerprintAlgorithm' , 'sha1' ) self . __sp . setdefault ( 'x509cert' , '' ) self . __sp . setdefault ( 'privateKey' , '' ) self . __security . setdefault ( 'requestedAuthnContext' , True ) self . __security . setdefault ( 'failOnAuthnContextMismatch' , False )
Add default values if the settings info is not complete
677
10
223,814
def login ( self , return_to = None , force_authn = False , is_passive = False , set_nameid_policy = True , name_id_value_req = None ) : authn_request = OneLogin_Saml2_Authn_Request ( self . __settings , force_authn , is_passive , set_nameid_policy , name_id_value_req ) self . __last_request = authn_request . get_xml ( ) self . __last_request_id = authn_request . get_id ( ) saml_request = authn_request . get_request ( ) parameters = { 'SAMLRequest' : saml_request } if return_to is not None : parameters [ 'RelayState' ] = return_to else : parameters [ 'RelayState' ] = OneLogin_Saml2_Utils . get_self_url_no_query ( self . __request_data ) security = self . __settings . get_security_data ( ) if security . get ( 'authnRequestsSigned' , False ) : parameters [ 'SigAlg' ] = security [ 'signatureAlgorithm' ] parameters [ 'Signature' ] = self . build_request_signature ( saml_request , parameters [ 'RelayState' ] , security [ 'signatureAlgorithm' ] ) return self . redirect_to ( self . get_sso_url ( ) , parameters )
Initiates the SSO process .
326
8
223,815
def logout ( self , return_to = None , name_id = None , session_index = None , nq = None , name_id_format = None ) : slo_url = self . get_slo_url ( ) if slo_url is None : raise OneLogin_Saml2_Error ( 'The IdP does not support Single Log Out' , OneLogin_Saml2_Error . SAML_SINGLE_LOGOUT_NOT_SUPPORTED ) if name_id is None and self . __nameid is not None : name_id = self . __nameid if name_id_format is None and self . __nameid_format is not None : name_id_format = self . __nameid_format logout_request = OneLogin_Saml2_Logout_Request ( self . __settings , name_id = name_id , session_index = session_index , nq = nq , name_id_format = name_id_format ) self . __last_request = logout_request . get_xml ( ) self . __last_request_id = logout_request . id saml_request = logout_request . get_request ( ) parameters = { 'SAMLRequest' : logout_request . get_request ( ) } if return_to is not None : parameters [ 'RelayState' ] = return_to else : parameters [ 'RelayState' ] = OneLogin_Saml2_Utils . get_self_url_no_query ( self . __request_data ) security = self . __settings . get_security_data ( ) if security . get ( 'logoutRequestSigned' , False ) : parameters [ 'SigAlg' ] = security [ 'signatureAlgorithm' ] parameters [ 'Signature' ] = self . build_request_signature ( saml_request , parameters [ 'RelayState' ] , security [ 'signatureAlgorithm' ] ) return self . redirect_to ( slo_url , parameters )
Initiates the SLO process .
451
8
223,816
def get_slo_url ( self ) : url = None idp_data = self . __settings . get_idp_data ( ) if 'singleLogoutService' in idp_data . keys ( ) and 'url' in idp_data [ 'singleLogoutService' ] : url = idp_data [ 'singleLogoutService' ] [ 'url' ] return url
Gets the SLO URL .
87
7
223,817
def add_pyspark_path ( ) : try : spark_home = os . environ [ 'SPARK_HOME' ] sys . path . append ( os . path . join ( spark_home , 'python' ) ) py4j_src_zip = glob ( os . path . join ( spark_home , 'python' , 'lib' , 'py4j-*-src.zip' ) ) if len ( py4j_src_zip ) == 0 : raise ValueError ( 'py4j source archive not found in %s' % os . path . join ( spark_home , 'python' , 'lib' ) ) else : py4j_src_zip = sorted ( py4j_src_zip ) [ : : - 1 ] sys . path . append ( py4j_src_zip [ 0 ] ) except KeyError : logging . error ( """SPARK_HOME was not set. please set it. e.g. SPARK_HOME='/home/...' ./bin/pyspark [program]""" ) exit ( - 1 ) except ValueError as e : logging . error ( str ( e ) ) exit ( - 1 )
Add PySpark to the library path based on the value of SPARK_HOME .
255
18
223,818
def datetime_to_nanos ( dt ) : if isinstance ( dt , pd . Timestamp ) : return dt . value elif isinstance ( dt , str ) : return pd . Timestamp ( dt ) . value elif isinstance ( dt , long ) : return dt elif isinstance ( dt , datetime ) : return long ( dt . strftime ( "%s%f" ) ) * 1000 raise ValueError
Accepts a string Pandas Timestamp or long and returns nanos since the epoch .
102
18
223,819
def uniform ( start , end = None , periods = None , freq = None , sc = None ) : dtmodule = sc . _jvm . com . cloudera . sparkts . __getattr__ ( 'DateTimeIndex$' ) . __getattr__ ( 'MODULE$' ) if freq is None : raise ValueError ( "Missing frequency" ) elif end is None and periods == None : raise ValueError ( "Need an end date or number of periods" ) elif end is not None : return DateTimeIndex ( dtmodule . uniformFromInterval ( datetime_to_nanos ( start ) , datetime_to_nanos ( end ) , freq . _jfreq ) ) else : return DateTimeIndex ( dtmodule . uniform ( datetime_to_nanos ( start ) , periods , freq . _jfreq ) )
Instantiates a uniform DateTimeIndex .
194
9
223,820
def _zdt_to_nanos ( self , zdt ) : instant = zdt . toInstant ( ) return instant . getNano ( ) + instant . getEpochSecond ( ) * 1000000000
Extracts nanoseconds from a ZonedDateTime
45
13
223,821
def datetime_at_loc ( self , loc ) : return pd . Timestamp ( self . _zdt_to_nanos ( self . _jdt_index . dateTimeAtLoc ( loc ) ) )
Returns the timestamp at the given integer location as a Pandas Timestamp .
48
15
223,822
def islice ( self , start , end ) : jdt_index = self . _jdt_index . islice ( start , end ) return DateTimeIndex ( jdt_index = jdt_index )
Returns a new DateTimeIndex containing a subslice of the timestamps in this index as specified by the given integer start and end locations .
48
30
223,823
def fit_model ( y , x , yMaxLag , xMaxLag , includesOriginalX = True , noIntercept = False , sc = None ) : assert sc != None , "Missing SparkContext" jvm = sc . _jvm jmodel = jvm . com . cloudera . sparkts . models . AutoregressionX . fitModel ( _nparray2breezevector ( sc , y . toArray ( ) ) , _nparray2breezematrix ( sc , x . toArray ( ) ) , yMaxLag , xMaxLag , includesOriginalX , noIntercept ) return ARXModel ( jmodel = jmodel , sc = sc )
Fit an autoregressive model with additional exogenous variables . The model predicts a value at time t of a dependent variable Y as a function of previous values of Y and a combination of previous values of exogenous regressors X_i and current values of exogenous regressors X_i . This is a generalization of an AR model which is simply an ARX with no exogenous regressors . The fitting procedure here is the same using least squares . Note that all lags up to the maxlag are included . In the case of the dependent variable the max lag is yMaxLag while for the exogenous variables the max lag is xMaxLag with which each column in the original matrix provided is lagged accordingly .
155
148
223,824
def time_series_rdd_from_pandas_series_rdd ( series_rdd ) : first = series_rdd . first ( ) dt_index = irregular ( first [ 1 ] . index , series_rdd . ctx ) return TimeSeriesRDD ( dt_index , series_rdd . mapValues ( lambda x : x . values ) )
Instantiates a TimeSeriesRDD from an RDD of Pandas Series objects .
84
18
223,825
def time_series_rdd_from_observations ( dt_index , df , ts_col , key_col , val_col ) : jvm = df . _sc . _jvm jtsrdd = jvm . com . cloudera . sparkts . api . java . JavaTimeSeriesRDDFactory . timeSeriesRDDFromObservations ( dt_index . _jdt_index , df . _jdf , ts_col , key_col , val_col ) return TimeSeriesRDD ( None , None , jtsrdd , df . _sc )
Instantiates a TimeSeriesRDD from a DataFrame of observations .
132
15
223,826
def map_series ( self , fn , dt_index = None ) : if dt_index == None : dt_index = self . index ( ) return TimeSeriesRDD ( dt_index , self . map ( fn ) )
Returns a TimeSeriesRDD with a transformation applied to all the series in this RDD .
53
19
223,827
def to_instants ( self ) : jrdd = self . _jtsrdd . toInstants ( - 1 ) . map ( self . ctx . _jvm . com . cloudera . sparkts . InstantToBytes ( ) ) return RDD ( jrdd , self . ctx , _InstantDeserializer ( ) )
Returns an RDD of instants each a horizontal slice of this TimeSeriesRDD at a time .
77
21
223,828
def to_instants_dataframe ( self , sql_ctx ) : ssql_ctx = sql_ctx . _ssql_ctx jdf = self . _jtsrdd . toInstantsDataFrame ( ssql_ctx , - 1 ) return DataFrame ( jdf , sql_ctx )
Returns a DataFrame of instants each a horizontal slice of this TimeSeriesRDD at a time .
66
21
223,829
def to_observations_dataframe ( self , sql_ctx , ts_col = 'timestamp' , key_col = 'key' , val_col = 'value' ) : ssql_ctx = sql_ctx . _ssql_ctx jdf = self . _jtsrdd . toObservationsDataFrame ( ssql_ctx , ts_col , key_col , val_col ) return DataFrame ( jdf , sql_ctx )
Returns a DataFrame of observations each containing a timestamp a key and a value .
102
16
223,830
def to_pandas_series_rdd ( self ) : pd_index = self . index ( ) . to_pandas_index ( ) return self . map ( lambda x : ( x [ 0 ] , pd . Series ( x [ 1 ] , pd_index ) ) )
Returns an RDD of Pandas Series objects indexed with Pandas DatetimeIndexes
66
17
223,831
def to_pandas_dataframe ( self ) : pd_index = self . index ( ) . to_pandas_index ( ) return pd . DataFrame . from_items ( self . collect ( ) ) . set_index ( pd_index )
Pulls the contents of the RDD to the driver and places them in a Pandas DataFrame . Each record in the RDD becomes and column and the DataFrame is indexed with a DatetimeIndex generated from this RDD s index .
60
49
223,832
def _SetHeader ( self , values ) : if self . _values and len ( values ) != len ( self . _values ) : raise ValueError ( 'Header values not equal to existing data width.' ) if not self . _values : for _ in range ( len ( values ) ) : self . _values . append ( None ) self . _keys = list ( values ) self . _BuildIndex ( )
Set the row s header from a list .
87
9
223,833
def _SetValues ( self , values ) : def _ToStr ( value ) : """Convert individul list entries to string.""" if isinstance ( value , ( list , tuple ) ) : result = [ ] for val in value : result . append ( str ( val ) ) return result else : return str ( value ) # Row with identical header can be copied directly. if isinstance ( values , Row ) : if self . _keys != values . header : raise TypeError ( 'Attempt to append row with mismatched header.' ) self . _values = copy . deepcopy ( values . values ) elif isinstance ( values , dict ) : for key in self . _keys : if key not in values : raise TypeError ( 'Dictionary key mismatch with row.' ) for key in self . _keys : self [ key ] = _ToStr ( values [ key ] ) elif isinstance ( values , list ) or isinstance ( values , tuple ) : if len ( values ) != len ( self . _values ) : raise TypeError ( 'Supplied list length != row length' ) for ( index , value ) in enumerate ( values ) : self . _values [ index ] = _ToStr ( value ) else : raise TypeError ( 'Supplied argument must be Row, dict or list, not %s' , type ( values ) )
Set values from supplied dictionary or list .
288
8
223,834
def Filter ( self , function = None ) : flat = lambda x : x if isinstance ( x , str ) else '' . join ( [ flat ( y ) for y in x ] ) if function is None : function = lambda row : bool ( flat ( row . values ) ) new_table = self . __class__ ( ) # pylint: disable=protected-access new_table . _table = [ self . header ] for row in self : if function ( row ) is True : new_table . Append ( row ) return new_table
Construct Textable from the rows of which the function returns true .
118
13
223,835
def _GetTable ( self ) : result = [ ] # Avoid the global lookup cost on each iteration. lstr = str for row in self . _table : result . append ( '%s\n' % self . separator . join ( lstr ( v ) for v in row ) ) return '' . join ( result )
Returns table with column headers and separators .
70
9
223,836
def _SetTable ( self , table ) : if not isinstance ( table , TextTable ) : raise TypeError ( 'Not an instance of TextTable.' ) self . Reset ( ) self . _table = copy . deepcopy ( table . _table ) # pylint: disable=W0212 # Point parent table of each row back ourselves. for row in self : row . table = self
Sets table with column headers and separators .
84
10
223,837
def _TextJustify ( self , text , col_size ) : result = [ ] if '\n' in text : for paragraph in text . split ( '\n' ) : result . extend ( self . _TextJustify ( paragraph , col_size ) ) return result wrapper = textwrap . TextWrapper ( width = col_size - 2 , break_long_words = False , expand_tabs = False ) try : text_list = wrapper . wrap ( text ) except ValueError : raise TableError ( 'Field too small (minimum width: 3)' ) if not text_list : return [ ' ' * col_size ] for current_line in text_list : stripped_len = len ( terminal . StripAnsiText ( current_line ) ) ansi_color_adds = len ( current_line ) - stripped_len # +2 for white space on either side. if stripped_len + 2 > col_size : raise TableError ( 'String contains words that do not fit in column.' ) result . append ( ' %-*s' % ( col_size - 1 + ansi_color_adds , current_line ) ) return result
Formats text within column with white space padding .
253
10
223,838
def index ( self , name = None ) : # pylint: disable=C6409 try : return self . header . index ( name ) except ValueError : raise TableError ( 'Unknown index name %s.' % name )
Returns index number of supplied column name .
49
8
223,839
def _ParseCmdItem ( self , cmd_input , template_file = None ) : # Build FSM machine from the template. fsm = textfsm . TextFSM ( template_file ) if not self . _keys : self . _keys = set ( fsm . GetValuesByAttrib ( 'Key' ) ) # Pass raw data through FSM. table = texttable . TextTable ( ) table . header = fsm . header # Fill TextTable from record entries. for record in fsm . ParseText ( cmd_input ) : table . Append ( record ) return table
Creates Texttable with output of command .
129
9
223,840
def _Completion ( self , match ) : # pylint: disable=C6114 # Strip the outer '[[' & ']]' and replace with ()? regexp pattern. word = str ( match . group ( ) ) [ 2 : - 2 ] return '(' + ( '(' ) . join ( word ) + ')?' * len ( word )
r Replaces double square brackets with variable length completion .
77
11
223,841
def main ( argv = None ) : if argv is None : argv = sys . argv try : opts , args = getopt . getopt ( argv [ 1 : ] , 'h' , [ 'help' ] ) except getopt . error as msg : raise Usage ( msg ) for opt , _ in opts : if opt in ( '-h' , '--help' ) : print ( __doc__ ) print ( help_msg ) return 0 if not args or len ( args ) > 4 : raise Usage ( 'Invalid arguments.' ) # If we have an argument, parse content of file and display as a template. # Template displayed will match input template, minus any comment lines. with open ( args [ 0 ] , 'r' ) as template : fsm = TextFSM ( template ) print ( 'FSM Template:\n%s\n' % fsm ) if len ( args ) > 1 : # Second argument is file with example cli input. # Prints parsed tabular result. with open ( args [ 1 ] , 'r' ) as f : cli_input = f . read ( ) table = fsm . ParseText ( cli_input ) print ( 'FSM Table:' ) result = str ( fsm . header ) + '\n' for line in table : result += str ( line ) + '\n' print ( result , end = '' ) if len ( args ) > 2 : # Compare tabular result with data in third file argument. # Exit value indicates if processed data matched expected result. with open ( args [ 2 ] , 'r' ) as f : ref_table = f . read ( ) if ref_table != result : print ( 'Data mis-match!' ) return 1 else : print ( 'Data match!' )
Validate text parsed with FSM or validate an FSM via command line .
386
16
223,842
def ValidOptions ( cls ) : valid_options = [ ] for obj_name in dir ( cls ) : obj = getattr ( cls , obj_name ) if inspect . isclass ( obj ) and issubclass ( obj , cls . OptionBase ) : valid_options . append ( obj_name ) return valid_options
Returns a list of valid option names .
73
8
223,843
def Header ( self ) : # Call OnGetValue on options. _ = [ option . OnGetValue ( ) for option in self . options ] return self . name
Fetch the header name of this Value .
35
9
223,844
def _AddOption ( self , name ) : # Check for duplicate option declaration if name in [ option . name for option in self . options ] : raise TextFSMTemplateError ( 'Duplicate option "%s"' % name ) # Create the option object try : option = self . _options_cls . GetOption ( name ) ( self ) except AttributeError : raise TextFSMTemplateError ( 'Unknown option "%s"' % name ) self . options . append ( option )
Add an option to this Value .
103
7
223,845
def Reset ( self ) : # Current state is Start state. self . _cur_state = self . states [ 'Start' ] self . _cur_state_name = 'Start' # Clear table of results and current record. self . _result = [ ] self . _ClearAllRecord ( )
Preserves FSM but resets starting state and current record .
64
13
223,846
def _GetHeader ( self ) : header = [ ] for value in self . values : try : header . append ( value . Header ( ) ) except SkipValue : continue return header
Returns header .
38
3
223,847
def _GetValue ( self , name ) : for value in self . values : if value . name == name : return value
Returns the TextFSMValue object natching the requested name .
26
13
223,848
def _AppendRecord ( self ) : # If no Values then don't output. if not self . values : return cur_record = [ ] for value in self . values : try : value . OnSaveRecord ( ) except SkipRecord : self . _ClearRecord ( ) return except SkipValue : continue # Build current record into a list. cur_record . append ( value . value ) # If no Values in template or whole record is empty then don't output. if len ( cur_record ) == ( cur_record . count ( None ) + cur_record . count ( [ ] ) ) : return # Replace any 'None' entries with null string ''. while None in cur_record : cur_record [ cur_record . index ( None ) ] = '' self . _result . append ( cur_record ) self . _ClearRecord ( )
Adds current record to result if well formed .
180
9
223,849
def _Parse ( self , template ) : if not template : raise TextFSMTemplateError ( 'Null template.' ) # Parse header with Variables. self . _ParseFSMVariables ( template ) # Parse States. while self . _ParseFSMState ( template ) : pass # Validate destination states. self . _ValidateFSM ( )
Parses template file for FSM structure .
80
10
223,850
def _ParseFSMVariables ( self , template ) : self . values = [ ] for line in template : self . _line_num += 1 line = line . rstrip ( ) # Blank line signifies end of Value definitions. if not line : return # Skip commented lines. if self . comment_regex . match ( line ) : continue if line . startswith ( 'Value ' ) : try : value = TextFSMValue ( fsm = self , max_name_len = self . MAX_NAME_LEN , options_class = self . _options_cls ) value . Parse ( line ) except TextFSMTemplateError as error : raise TextFSMTemplateError ( '%s Line %s.' % ( error , self . _line_num ) ) if value . name in self . header : raise TextFSMTemplateError ( "Duplicate declarations for Value '%s'. Line: %s." % ( value . name , self . _line_num ) ) try : self . _ValidateOptions ( value ) except TextFSMTemplateError as error : raise TextFSMTemplateError ( '%s Line %s.' % ( error , self . _line_num ) ) self . values . append ( value ) self . value_map [ value . name ] = value . template # The line has text but without the 'Value ' prefix. elif not self . values : raise TextFSMTemplateError ( 'No Value definitions found.' ) else : raise TextFSMTemplateError ( 'Expected blank line after last Value entry. Line: %s.' % ( self . _line_num ) )
Extracts Variables from start of template file .
351
11
223,851
def _ParseFSMState ( self , template ) : if not template : return state_name = '' # Strip off extra white space lines (including comments). for line in template : self . _line_num += 1 line = line . rstrip ( ) # First line is state definition if line and not self . comment_regex . match ( line ) : # Ensure statename has valid syntax and is not a reserved word. if ( not self . state_name_re . match ( line ) or len ( line ) > self . MAX_NAME_LEN or line in TextFSMRule . LINE_OP or line in TextFSMRule . RECORD_OP ) : raise TextFSMTemplateError ( "Invalid state name: '%s'. Line: %s" % ( line , self . _line_num ) ) state_name = line if state_name in self . states : raise TextFSMTemplateError ( "Duplicate state name: '%s'. Line: %s" % ( line , self . _line_num ) ) self . states [ state_name ] = [ ] self . state_list . append ( state_name ) break # Parse each rule in the state. for line in template : self . _line_num += 1 line = line . rstrip ( ) # Finish rules processing on blank line. if not line : break if self . comment_regex . match ( line ) : continue # A rule within a state, starts with whitespace if not ( line . startswith ( ' ^' ) or line . startswith ( '\t^' ) ) : raise TextFSMTemplateError ( "Missing white space or carat ('^') before rule. Line: %s" % self . _line_num ) self . states [ state_name ] . append ( TextFSMRule ( line , self . _line_num , self . value_map ) ) return state_name
Extracts State and associated Rules from body of template file .
416
13
223,852
def _ValidateFSM ( self ) : # Must have 'Start' state. if 'Start' not in self . states : raise TextFSMTemplateError ( "Missing state 'Start'." ) # 'End/EOF' state (if specified) must be empty. if self . states . get ( 'End' ) : raise TextFSMTemplateError ( "Non-Empty 'End' state." ) if self . states . get ( 'EOF' ) : raise TextFSMTemplateError ( "Non-Empty 'EOF' state." ) # Remove 'End' state. if 'End' in self . states : del self . states [ 'End' ] self . state_list . remove ( 'End' ) # Ensure jump states are all valid. for state in self . states : for rule in self . states [ state ] : if rule . line_op == 'Error' : continue if not rule . new_state or rule . new_state in ( 'End' , 'EOF' ) : continue if rule . new_state not in self . states : raise TextFSMTemplateError ( "State '%s' not found, referenced in state '%s'" % ( rule . new_state , state ) ) return True
Checks state names and destinations for validity .
266
9
223,853
def ParseText ( self , text , eof = True ) : lines = [ ] if text : lines = text . splitlines ( ) for line in lines : self . _CheckLine ( line ) if self . _cur_state_name in ( 'End' , 'EOF' ) : break if self . _cur_state_name != 'End' and 'EOF' not in self . states and eof : # Implicit EOF performs Next.Record operation. # Suppressed if Null EOF state is instantiated. self . _AppendRecord ( ) return self . _result
Passes CLI output through FSM and returns list of tuples .
128
14
223,854
def ParseTextToDicts ( self , * args , * * kwargs ) : result_lists = self . ParseText ( * args , * * kwargs ) result_dicts = [ ] for row in result_lists : result_dicts . append ( dict ( zip ( self . header , row ) ) ) return result_dicts
Calls ParseText and turns the result into list of dicts .
77
15
223,855
def _AssignVar ( self , matched , value ) : _value = self . _GetValue ( value ) if _value is not None : _value . AssignVar ( matched . group ( value ) )
Assigns variable into current record from a matched rule .
45
12
223,856
def _Operations ( self , rule , line ) : # First process the Record operators. if rule . record_op == 'Record' : self . _AppendRecord ( ) elif rule . record_op == 'Clear' : # Clear record. self . _ClearRecord ( ) elif rule . record_op == 'Clearall' : # Clear all record entries. self . _ClearAllRecord ( ) # Lastly process line operators. if rule . line_op == 'Error' : if rule . new_state : raise TextFSMError ( 'Error: %s. Rule Line: %s. Input Line: %s.' % ( rule . new_state , rule . line_num , line ) ) raise TextFSMError ( 'State Error raised. Rule Line: %s. Input Line: %s' % ( rule . line_num , line ) ) elif rule . line_op == 'Continue' : # Continue with current line without returning to the start of the state. return False # Back to start of current state with a new line. return True
Operators on the data record .
229
7
223,857
def GetValuesByAttrib ( self , attribute ) : if attribute not in self . _options_cls . ValidOptions ( ) : raise ValueError ( "'%s': Not a valid attribute." % attribute ) result = [ ] for value in self . values : if attribute in value . OptionNames ( ) : result . append ( value . name ) return result
Returns the list of values that have a particular attribute .
76
11
223,858
def _AnsiCmd ( command_list ) : if not isinstance ( command_list , list ) : raise ValueError ( 'Invalid list: %s' % command_list ) # Checks that entries are valid SGR names. # No checking is done for sequences that are correct but 'nonsensical'. for sgr in command_list : if sgr . lower ( ) not in SGR : raise ValueError ( 'Invalid or unsupported SGR name: %s' % sgr ) # Convert to numerical strings. command_str = [ str ( SGR [ x . lower ( ) ] ) for x in command_list ] # Wrap values in Ansi escape sequence (CSI prefix & SGR suffix). return '\033[%sm' % ( ';' . join ( command_str ) )
Takes a list of SGR values and formats them as an ANSI escape sequence .
172
18
223,859
def TerminalSize ( ) : try : with open ( os . ctermid ( ) , 'r' ) as tty_instance : length_width = struct . unpack ( 'hh' , fcntl . ioctl ( tty_instance . fileno ( ) , termios . TIOCGWINSZ , '1234' ) ) except ( IOError , OSError ) : try : length_width = ( int ( os . environ [ 'LINES' ] ) , int ( os . environ [ 'COLUMNS' ] ) ) except ( ValueError , KeyError ) : length_width = ( 24 , 80 ) return length_width
Returns terminal length and width as a tuple .
144
9
223,860
def main ( argv = None ) : if argv is None : argv = sys . argv try : opts , args = getopt . getopt ( argv [ 1 : ] , 'dhs' , [ 'nodelay' , 'help' , 'size' ] ) except getopt . error as msg : raise Usage ( msg ) # Print usage and return, regardless of presence of other args. for opt , _ in opts : if opt in ( '-h' , '--help' ) : print ( __doc__ ) print ( help_msg ) return 0 isdelay = False for opt , _ in opts : # Prints the size of the terminal and returns. # Mutually exclusive to the paging of text and overrides that behaviour. if opt in ( '-s' , '--size' ) : print ( 'Length: %d, Width: %d' % TerminalSize ( ) ) return 0 elif opt in ( '-d' , '--delay' ) : isdelay = True else : raise Usage ( 'Invalid arguments.' ) # Page text supplied in either specified file or stdin. if len ( args ) == 1 : with open ( args [ 0 ] ) as f : fd = f . read ( ) else : fd = sys . stdin . read ( ) Pager ( fd , delay = isdelay ) . Page ( )
Routine to page text or determine window size via command line .
299
13
223,861
def Reset ( self ) : self . _displayed = 0 self . _currentpagelines = 0 self . _lastscroll = 1 self . _lines_to_show = self . _cli_lines
Reset the pager to the top of the text .
44
12
223,862
def SetLines ( self , lines ) : ( self . _cli_lines , self . _cli_cols ) = TerminalSize ( ) if lines : self . _cli_lines = int ( lines )
Set number of screen lines .
45
6
223,863
def Page ( self , text = None , show_percent = None ) : if text is not None : self . _text += text if show_percent is None : show_percent = text is None self . _show_percent = show_percent text = LineWrap ( self . _text ) . splitlines ( ) while True : # Get a list of new lines to display. self . _newlines = text [ self . _displayed : self . _displayed + self . _lines_to_show ] for line in self . _newlines : sys . stdout . write ( line + '\n' ) if self . _delay and self . _lastscroll > 0 : time . sleep ( 0.005 ) self . _displayed += len ( self . _newlines ) self . _currentpagelines += len ( self . _newlines ) if self . _currentpagelines >= self . _lines_to_show : self . _currentpagelines = 0 wish = self . _AskUser ( ) if wish == 'q' : # Quit pager. return False elif wish == 'g' : # Display till the end. self . _Scroll ( len ( text ) - self . _displayed + 1 ) elif wish == '\r' : # Enter, down a line. self . _Scroll ( 1 ) elif wish == '\033[B' : # Down arrow, down a line. self . _Scroll ( 1 ) elif wish == '\033[A' : # Up arrow, up a line. self . _Scroll ( - 1 ) elif wish == 'b' : # Up a page. self . _Scroll ( 0 - self . _cli_lines ) else : # Next page. self . _Scroll ( ) if self . _displayed >= len ( text ) : break return True
Page text .
397
3
223,864
def _Scroll ( self , lines = None ) : if lines is None : lines = self . _cli_lines if lines < 0 : self . _displayed -= self . _cli_lines self . _displayed += lines if self . _displayed < 0 : self . _displayed = 0 self . _lines_to_show = self . _cli_lines else : self . _lines_to_show = lines self . _lastscroll = lines
Set attributes to scroll the buffer correctly .
98
8
223,865
def _AskUser ( self ) : if self . _show_percent : progress = int ( self . _displayed * 100 / ( len ( self . _text . splitlines ( ) ) ) ) progress_text = ' (%d%%)' % progress else : progress_text = '' question = AnsiText ( 'Enter: next line, Space: next page, ' 'b: prev page, q: quit.%s' % progress_text , [ 'green' ] ) sys . stdout . write ( question ) sys . stdout . flush ( ) ch = self . _GetCh ( ) sys . stdout . write ( '\r%s\r' % ( ' ' * len ( question ) ) ) sys . stdout . flush ( ) return ch
Prompt the user for the next action .
166
9
223,866
def _GetCh ( self ) : fd = self . _tty . fileno ( ) old = termios . tcgetattr ( fd ) try : tty . setraw ( fd ) ch = self . _tty . read ( 1 ) # Also support arrow key shortcuts (escape + 2 chars) if ord ( ch ) == 27 : ch += self . _tty . read ( 2 ) finally : termios . tcsetattr ( fd , termios . TCSADRAIN , old ) return ch
Read a single character from the user .
109
8
223,867
def add_deflection ( position , observer , ephemeris , t , include_earth_deflection , count = 3 ) : # Compute light-time to observed object. tlt = length_of ( position ) / C_AUDAY # Cycle through gravitating bodies. jd_tdb = t . tdb ts = t . ts for name in deflectors [ : count ] : try : deflector = ephemeris [ name ] except KeyError : deflector = ephemeris [ name + ' barycenter' ] # Get position of gravitating body wrt ss barycenter at time 't_tdb'. bposition = deflector . at ( ts . tdb ( jd = jd_tdb ) ) . position . au # TODO # Get position of gravitating body wrt observer at time 'jd_tdb'. gpv = bposition - observer # Compute light-time from point on incoming light ray that is closest # to gravitating body. dlt = light_time_difference ( position , gpv ) # Get position of gravitating body wrt ss barycenter at time when # incoming photons were closest to it. tclose = jd_tdb # if dlt > 0.0: # tclose = jd - dlt tclose = where ( dlt > 0.0 , jd_tdb - dlt , tclose ) tclose = where ( tlt < dlt , jd_tdb - tlt , tclose ) # if tlt < dlt: # tclose = jd - tlt bposition = deflector . at ( ts . tdb ( jd = tclose ) ) . position . au # TODO rmass = rmasses [ name ] _add_deflection ( position , observer , bposition , rmass ) # If observer is not at geocenter, add in deflection due to Earth. if include_earth_deflection . any ( ) : deflector = ephemeris [ 'earth' ] bposition = deflector . at ( ts . tdb ( jd = tclose ) ) . position . au # TODO rmass = rmasses [ 'earth' ] # TODO: Make the following code less messy, maybe by having # _add_deflection() return a new vector instead of modifying the # old one in-place. deflected_position = position . copy ( ) _add_deflection ( deflected_position , observer , bposition , rmass ) if include_earth_deflection . shape : position [ : , include_earth_deflection ] = ( deflected_position [ : , include_earth_deflection ] ) else : position [ : ] = deflected_position [ : ]
Update position for how solar system masses will deflect its light .
587
12
223,868
def _add_deflection ( position , observer , deflector , rmass ) : # Construct vector 'pq' from gravitating body to observed object and # construct vector 'pe' from gravitating body to observer. pq = observer + position - deflector pe = observer - deflector # Compute vector magnitudes and unit vectors. pmag = length_of ( position ) qmag = length_of ( pq ) emag = length_of ( pe ) phat = position / where ( pmag , pmag , 1.0 ) # where() avoids divide-by-zero qhat = pq / where ( qmag , qmag , 1.0 ) ehat = pe / where ( emag , emag , 1.0 ) # Compute dot products of vectors. pdotq = dots ( phat , qhat ) qdote = dots ( qhat , ehat ) edotp = dots ( ehat , phat ) # If gravitating body is observed object, or is on a straight line # toward or away from observed object to within 1 arcsec, deflection # is set to zero set 'pos2' equal to 'pos1'. make_no_correction = abs ( edotp ) > 0.99999999999 # Compute scalar factors. fac1 = 2.0 * GS / ( C * C * emag * AU_M * rmass ) fac2 = 1.0 + qdote # Correct position vector. position += where ( make_no_correction , 0.0 , fac1 * ( pdotq * ehat - edotp * qhat ) / fac2 * pmag )
Correct a position vector for how one particular mass deflects light .
355
13
223,869
def add_aberration ( position , velocity , light_time ) : p1mag = light_time * C_AUDAY vemag = length_of ( velocity ) beta = vemag / C_AUDAY dot = dots ( position , velocity ) cosd = dot / ( p1mag * vemag ) gammai = sqrt ( 1.0 - beta * beta ) p = beta * cosd q = ( 1.0 + p / ( 1.0 + gammai ) ) * light_time r = 1.0 + p position *= gammai position += q * velocity position /= r
Correct a relative position vector for aberration of light .
132
11
223,870
def _center ( code , segment_dict ) : while code in segment_dict : segment = segment_dict [ code ] yield segment code = segment . center
Starting with code follow segments from target to center .
33
10
223,871
def names ( self ) : d = defaultdict ( list ) for code , name in target_name_pairs : if code in self . codes : d [ code ] . append ( name ) return dict ( d )
Return all target names that are valid with this kernel .
46
11
223,872
def decode ( self , name ) : if isinstance ( name , int ) : code = name else : name = name . upper ( ) code = _targets . get ( name ) if code is None : raise ValueError ( 'unknown SPICE target {0!r}' . format ( name ) ) if code not in self . codes : targets = ', ' . join ( _format_code_and_name ( c ) for c in self . codes ) raise KeyError ( 'kernel {0!r} is missing {1!r} -' ' the targets it supports are: {2}' . format ( self . filename , name , targets ) ) return code
Translate a target name into its integer code .
144
10
223,873
def _search ( mapping , filename ) : result = mapping . get ( filename ) if result is not None : return result name , ext = os . path . splitext ( filename ) result = mapping . get ( ext ) if result is not None : for pattern , result2 in result : if fnmatch ( filename , pattern ) : return result2 return None
Search a Loader data structure for a filename .
75
10
223,874
def load_file ( path ) : path = os . path . expanduser ( path ) base , ext = os . path . splitext ( path ) if ext == '.bsp' : return SpiceKernel ( path ) raise ValueError ( 'unrecognized file extension: {}' . format ( path ) )
Open a file on your local drive using its extension to guess its type .
66
15
223,875
def parse_deltat_data ( fileobj ) : array = np . loadtxt ( fileobj ) year , month , day = array [ - 1 , : 3 ] . astype ( int ) expiration_date = date ( year + 1 , month , day ) year , month , day , delta_t = array . T data = np . array ( ( julian_date ( year , month , day ) , delta_t ) ) return expiration_date , data
Parse the United States Naval Observatory deltat . data file .
101
14
223,876
def parse_deltat_preds ( fileobj ) : lines = iter ( fileobj ) header = next ( lines ) if header . startswith ( b'YEAR' ) : # Format in use until 2019 February next ( lines ) # discard blank line year_float , delta_t = np . loadtxt ( lines , usecols = [ 0 , 1 ] ) . T else : # Format in use since 2019 February year_float , delta_t = np . loadtxt ( lines , usecols = [ 1 , 2 ] ) . T year = year_float . astype ( int ) month = 1 + ( year_float * 12.0 ) . astype ( int ) % 12 expiration_date = date ( year [ 0 ] + 2 , month [ 0 ] , 1 ) data = np . array ( ( julian_date ( year , month , 1 ) , delta_t ) ) return expiration_date , data
Parse the United States Naval Observatory deltat . preds file .
202
15
223,877
def parse_leap_seconds ( fileobj ) : lines = iter ( fileobj ) for line in lines : if line . startswith ( b'# File expires on' ) : break else : raise ValueError ( 'Leap_Second.dat is missing its expiration date' ) line = line . decode ( 'ascii' ) with _lock : # won't help if anyone user threads are doing parsing, alas original_locale = locale . setlocale ( locale . LC_ALL ) locale . setlocale ( locale . LC_ALL , 'C' ) try : dt = datetime . strptime ( line , '# File expires on %d %B %Y\n' ) finally : locale . setlocale ( locale . LC_ALL , original_locale ) # The file went out of date at the beginning of July 2016, and kept # downloading every time a user ran a Skyfield program. So we now # build in a grace period: grace_period = timedelta ( days = 30 ) expiration_date = dt . date ( ) + grace_period mjd , day , month , year , offsets = np . loadtxt ( lines ) . T leap_dates = np . ndarray ( len ( mjd ) + 2 ) leap_dates [ 0 ] = '-inf' leap_dates [ 1 : - 1 ] = mjd + 2400000.5 leap_dates [ - 1 ] = 'inf' leap_offsets = np . ndarray ( len ( mjd ) + 2 ) leap_offsets [ 0 ] = leap_offsets [ 1 ] = offsets [ 0 ] leap_offsets [ 2 : ] = offsets return expiration_date , ( leap_dates , leap_offsets )
Parse the IERS file Leap_Second . dat .
379
12
223,878
def parse_tle ( fileobj ) : b0 = b1 = b'' for b2 in fileobj : if ( b1 . startswith ( b'1 ' ) and len ( b1 ) >= 69 and b2 . startswith ( b'2 ' ) and len ( b2 ) >= 69 ) : b0 = b0 . rstrip ( b'\n\r' ) if len ( b0 ) == 24 : # Celestrak name = b0 . decode ( 'ascii' ) . rstrip ( ) names = [ name ] elif b0 . startswith ( b'0 ' ) : # Spacetrack 3-line format name = b0 [ 2 : ] . decode ( 'ascii' ) . rstrip ( ) names = [ name ] else : name = None names = ( ) line1 = b1 . decode ( 'ascii' ) line2 = b2 . decode ( 'ascii' ) sat = EarthSatellite ( line1 , line2 , name ) if name and ' (' in name : # Given a name like `ISS (ZARYA)` or `HTV-6 (KOUNOTORI # 6)`, also support lookup by the name inside or outside # the parentheses. short_name , secondary_name = name . split ( ' (' ) secondary_name = secondary_name . rstrip ( ')' ) names . append ( short_name ) names . append ( secondary_name ) yield names , sat b0 = b1 b1 = b2
Parse a file of TLE satellite element sets .
331
11
223,879
def download ( url , path , verbose = None , blocksize = 128 * 1024 ) : tempname = path + '.download' try : connection = urlopen ( url ) except Exception as e : raise IOError ( 'cannot get {0} because {1}' . format ( url , e ) ) if verbose is None : verbose = sys . stderr . isatty ( ) bar = None if verbose : if _running_IDLE : print ( 'Downloading {0} ...' . format ( os . path . basename ( path ) ) , file = sys . stderr ) else : bar = ProgressBar ( path ) content_length = int ( connection . headers . get ( 'content-length' , - 1 ) ) # Python open() provides no way to achieve O_CREAT without also # truncating the file, which would ruin the work of another process # that is trying to download the same file at the same time. So: flags = getattr ( os , 'O_BINARY' , 0 ) | os . O_CREAT | os . O_RDWR fd = os . open ( tempname , flags , 0o666 ) with os . fdopen ( fd , 'wb' ) as w : try : if lockf is not None : fd = w . fileno ( ) lockf ( fd , LOCK_EX ) # only one download at a time if os . path . exists ( path ) : # did someone else finish first? if os . path . exists ( tempname ) : os . unlink ( tempname ) return w . seek ( 0 ) length = 0 while True : data = connection . read ( blocksize ) if not data : break w . write ( data ) length += len ( data ) if bar is not None : bar . report ( length , content_length ) w . flush ( ) if lockf is not None : # On Unix, rename while still protected by the lock. try : os . rename ( tempname , path ) except Exception as e : raise IOError ( 'error renaming {0} to {1} - {2}' . format ( tempname , path , e ) ) except Exception as e : raise IOError ( 'error getting {0} - {1}' . format ( url , e ) ) finally : if lockf is not None : lockf ( fd , LOCK_UN ) if lockf is None : # On Windows, rename here because the file needs to be closed first. try : _replace ( tempname , path ) except Exception as e : raise IOError ( 'error renaming {0} to {1} - {2}' . format ( tempname , path , e ) )
Download a file from a URL possibly displaying a progress bar .
585
12
223,880
def tle ( self , url , reload = False , filename = None ) : d = { } with self . open ( url , reload = reload , filename = filename ) as f : for names , sat in parse_tle ( f ) : d [ sat . model . satnum ] = sat for name in names : d [ name ] = sat return d
Load and parse a satellite TLE file .
75
9
223,881
def open ( self , url , mode = 'rb' , reload = False , filename = None ) : if '://' not in url : path_that_might_be_relative = url path = os . path . join ( self . directory , path_that_might_be_relative ) return open ( path , mode ) if filename is None : filename = urlparse ( url ) . path . split ( '/' ) [ - 1 ] path = self . path_to ( filename ) if reload and os . path . exists ( path ) : os . remove ( path ) if not os . path . exists ( path ) : download ( url , path , self . verbose ) return open ( path , mode )
Open a file downloading it first if it does not yet exist .
151
13
223,882
def timescale ( self , delta_t = None ) : if delta_t is not None : delta_t_recent = np . array ( ( ( - 1e99 , 1e99 ) , ( delta_t , delta_t ) ) ) else : data = self ( 'deltat.data' ) preds = self ( 'deltat.preds' ) data_end_time = data [ 0 , - 1 ] i = np . searchsorted ( preds [ 0 ] , data_end_time , side = 'right' ) delta_t_recent = np . concatenate ( [ data , preds [ : , i : ] ] , axis = 1 ) leap_dates , leap_offsets = self ( 'Leap_Second.dat' ) return Timescale ( delta_t_recent , leap_dates , leap_offsets )
Open or download three time scale files returning a Timescale .
189
12
223,883
def get_summary ( url , spk = True ) : # connect to file at URL bspurl = urllib2 . urlopen ( url ) # retrieve the "tip" of a file at URL bsptip = bspurl . read ( 10 ** 5 ) # first 100kB # save data in fake file object (in-memory) bspstr = StringIO ( bsptip ) # load into DAF object daf = DAF ( bspstr ) # return either SPK or DAF object if spk : # make a SPK object spk = SPK ( daf ) # return representation return spk else : # return representation return daf
simple function to retrieve the header of a BSP file and return SPK object
145
16
223,884
def _correct_for_light_travel_time ( observer , target ) : t = observer . t ts = t . ts cposition = observer . position . au cvelocity = observer . velocity . au_per_d tposition , tvelocity , gcrs_position , message = target . _at ( t ) distance = length_of ( tposition - cposition ) light_time0 = 0.0 t_tdb = t . tdb for i in range ( 10 ) : light_time = distance / C_AUDAY delta = light_time - light_time0 if - 1e-12 < min ( delta ) and max ( delta ) < 1e-12 : break t2 = ts . tdb ( jd = t_tdb - light_time ) tposition , tvelocity , gcrs_position , message = target . _at ( t2 ) distance = length_of ( tposition - cposition ) light_time0 = light_time else : raise ValueError ( 'light-travel time failed to converge' ) return tposition - cposition , tvelocity - cvelocity , t , light_time
Return a light - time corrected astrometric position and velocity .
249
13
223,885
def at ( self , t ) : if not isinstance ( t , Time ) : raise ValueError ( 'please provide the at() method with a Time' ' instance as its argument, instead of the' ' value {0!r}' . format ( t ) ) observer_data = ObserverData ( ) observer_data . ephemeris = self . ephemeris p , v , observer_data . gcrs_position , message = self . _at ( t ) center = self . center if center == 0 : observer_data . bcrs_position = p observer_data . bcrs_velocity = v self . _snag_observer_data ( observer_data , t ) position = build_position ( p , v , t , center , self . target , observer_data ) position . message = message return position
At time t compute the target s position relative to the center .
178
13
223,886
def _to_array ( value ) : if hasattr ( value , 'shape' ) : return value elif hasattr ( value , '__len__' ) : return array ( value ) else : return float_ ( value )
When value is a plain Python sequence return it as a NumPy array .
49
15
223,887
def julian_day ( year , month = 1 , day = 1 ) : janfeb = month < 3 return ( day + 1461 * ( year + 4800 - janfeb ) // 4 + 367 * ( month - 2 + janfeb * 12 ) // 12 - 3 * ( ( year + 4900 - janfeb ) // 100 ) // 4 - 32075 )
Given a proleptic Gregorian calendar date return a Julian day int .
83
15
223,888
def julian_date ( year , month = 1 , day = 1 , hour = 0 , minute = 0 , second = 0.0 ) : return julian_day ( year , month , day ) - 0.5 + ( second + minute * 60.0 + hour * 3600.0 ) / DAY_S
Given a proleptic Gregorian calendar date return a Julian date float .
70
15
223,889
def tdb_minus_tt ( jd_tdb ) : t = ( jd_tdb - T0 ) / 36525.0 # USNO Circular 179, eq. 2.6. return ( 0.001657 * sin ( 628.3076 * t + 6.2401 ) + 0.000022 * sin ( 575.3385 * t + 4.2970 ) + 0.000014 * sin ( 1256.6152 * t + 6.1969 ) + 0.000005 * sin ( 606.9777 * t + 4.0212 ) + 0.000005 * sin ( 52.9691 * t + 0.4444 ) + 0.000002 * sin ( 21.3299 * t + 5.5431 ) + 0.000010 * t * sin ( 628.3076 * t + 4.2490 ) )
Computes how far TDB is in advance of TT given TDB .
194
15
223,890
def interpolate_delta_t ( delta_t_table , tt ) : tt_array , delta_t_array = delta_t_table delta_t = _to_array ( interp ( tt , tt_array , delta_t_array , nan , nan ) ) missing = isnan ( delta_t ) if missing . any ( ) : # Test if we are dealing with an array and proceed appropriately if missing . shape : tt = tt [ missing ] delta_t [ missing ] = delta_t_formula_morrison_and_stephenson_2004 ( tt ) else : delta_t = delta_t_formula_morrison_and_stephenson_2004 ( tt ) return delta_t
Return interpolated Delta T values for the times in tt .
166
13
223,891
def build_delta_t_table ( delta_t_recent ) : ancient = load_bundled_npy ( 'morrison_stephenson_deltat.npy' ) historic = load_bundled_npy ( 'historic_deltat.npy' ) # Prefer USNO over Morrison and Stephenson where they overlap. historic_start_time = historic [ 0 , 0 ] i = searchsorted ( ancient [ 0 ] , historic_start_time ) bundled = concatenate ( [ ancient [ : , : i ] , historic ] , axis = 1 ) # Let recent data replace everything else. recent_start_time = delta_t_recent [ 0 , 0 ] i = searchsorted ( bundled [ 0 ] , recent_start_time ) row = ( ( 0 , ) , ( 0 , ) ) table = concatenate ( [ row , bundled [ : , : i ] , delta_t_recent , row ] , axis = 1 ) # Create initial and final point to provide continuity with formula. century = 36524.0 start = table [ 0 , 1 ] - century table [ : , 0 ] = start , delta_t_formula_morrison_and_stephenson_2004 ( start ) end = table [ 0 , - 2 ] + century table [ : , - 1 ] = end , delta_t_formula_morrison_and_stephenson_2004 ( end ) return table
Build a table for interpolating Delta T .
314
9
223,892
def utc ( self , year , month = 1 , day = 1 , hour = 0 , minute = 0 , second = 0.0 ) : if isinstance ( year , datetime ) : dt = year tai = _utc_datetime_to_tai ( self . leap_dates , self . leap_offsets , dt ) elif isinstance ( year , date ) : d = year tai = _utc_date_to_tai ( self . leap_dates , self . leap_offsets , d ) elif hasattr ( year , '__len__' ) and isinstance ( year [ 0 ] , datetime ) : # TODO: clean this up and better document the possibilities. list_of_datetimes = year tai = array ( [ _utc_datetime_to_tai ( self . leap_dates , self . leap_offsets , dt ) for dt in list_of_datetimes ] ) else : tai = _utc_to_tai ( self . leap_dates , self . leap_offsets , _to_array ( year ) , _to_array ( month ) , _to_array ( day ) , _to_array ( hour ) , _to_array ( minute ) , _to_array ( second ) ) t = Time ( self , tai + tt_minus_tai ) t . tai = tai return t
Build a Time from a UTC calendar date .
312
9
223,893
def tai ( self , year = None , month = 1 , day = 1 , hour = 0 , minute = 0 , second = 0.0 , jd = None ) : if jd is not None : tai = jd else : tai = julian_date ( _to_array ( year ) , _to_array ( month ) , _to_array ( day ) , _to_array ( hour ) , _to_array ( minute ) , _to_array ( second ) , ) return self . tai_jd ( tai )
Build a Time from a TAI calendar date .
122
10
223,894
def tai_jd ( self , jd ) : tai = _to_array ( jd ) t = Time ( self , tai + tt_minus_tai ) t . tai = tai return t
Build a Time from a TAI Julian date .
50
10
223,895
def tt ( self , year = None , month = 1 , day = 1 , hour = 0 , minute = 0 , second = 0.0 , jd = None ) : if jd is not None : tt = jd else : tt = julian_date ( _to_array ( year ) , _to_array ( month ) , _to_array ( day ) , _to_array ( hour ) , _to_array ( minute ) , _to_array ( second ) , ) tt = _to_array ( tt ) return Time ( self , tt )
Build a Time from a TT calendar date .
129
9
223,896
def tdb ( self , year = None , month = 1 , day = 1 , hour = 0 , minute = 0 , second = 0.0 , jd = None ) : if jd is not None : tdb = jd else : tdb = julian_date ( _to_array ( year ) , _to_array ( month ) , _to_array ( day ) , _to_array ( hour ) , _to_array ( minute ) , _to_array ( second ) , ) tdb = _to_array ( tdb ) tt = tdb - tdb_minus_tt ( tdb ) / DAY_S t = Time ( self , tt ) t . tdb = tdb return t
Build a Time from a TDB calendar date .
159
10
223,897
def tdb_jd ( self , jd ) : tdb = _to_array ( jd ) tt = tdb - tdb_minus_tt ( tdb ) / DAY_S t = Time ( self , tt ) t . tdb = tdb return t
Build a Time from a TDB Julian date .
62
10
223,898
def ut1 ( self , year = None , month = 1 , day = 1 , hour = 0 , minute = 0 , second = 0.0 , jd = None ) : if jd is not None : ut1 = jd else : ut1 = julian_date ( _to_array ( year ) , _to_array ( month ) , _to_array ( day ) , _to_array ( hour ) , _to_array ( minute ) , _to_array ( second ) , ) return self . ut1_jd ( ut1 )
Build a Time from a UT1 calendar date .
122
10
223,899
def ut1_jd ( self , jd ) : ut1 = _to_array ( jd ) # Estimate TT = UT1, to get a rough Delta T estimate. tt_approx = ut1 delta_t_approx = interpolate_delta_t ( self . delta_t_table , tt_approx ) # Use the rough Delta T to make a much better estimate of TT, # then generate an even better Delta T. tt_approx = ut1 + delta_t_approx / DAY_S delta_t_approx = interpolate_delta_t ( self . delta_t_table , tt_approx ) # We can now estimate TT with an error of < 1e-9 seconds within # 10 centuries of either side of the present; for details, see: # https://github.com/skyfielders/astronomy-notebooks # and look for the notebook "error-in-timescale-ut1.ipynb". tt = ut1 + delta_t_approx / DAY_S t = Time ( self , tt ) t . ut1 = ut1 return t
Build a Time from UT1 a Julian date .
253
10