idx
int64 0
165k
| question
stringlengths 73
4.15k
| target
stringlengths 5
918
| len_question
int64 21
890
| len_target
int64 3
255
|
|---|---|---|---|---|
151,800
|
private OCSPReq createRequest ( SFPair < Certificate , Certificate > pairIssuerSubject ) { Certificate issuer = pairIssuerSubject . left ; Certificate subject = pairIssuerSubject . right ; OCSPReqBuilder gen = new OCSPReqBuilder ( ) ; try { DigestCalculator digest = new SHA1DigestCalculator ( ) ; X509CertificateHolder certHolder = new X509CertificateHolder ( issuer . getEncoded ( ) ) ; CertificateID certId = new CertificateID ( digest , certHolder , subject . getSerialNumber ( ) . getValue ( ) ) ; gen . addRequest ( certId ) ; return gen . build ( ) ; } catch ( OCSPException | IOException ex ) { throw new RuntimeException ( "Failed to build a OCSPReq." ) ; } }
|
Creates a OCSP Request
| 181
| 6
|
151,801
|
private List < Certificate > convertToBouncyCastleCertificate ( X509Certificate [ ] chain ) { final List < Certificate > bcChain = new ArrayList <> ( ) ; for ( X509Certificate cert : chain ) { try { bcChain . add ( Certificate . getInstance ( cert . getEncoded ( ) ) ) ; } catch ( CertificateEncodingException ex ) { throw new RuntimeException ( "Failed to decode the certificate DER data" ) ; } } return bcChain ; }
|
Converts X509Certificate to Bouncy Castle Certificate
| 108
| 12
|
151,802
|
private List < SFPair < Certificate , Certificate > > getPairIssuerSubject ( List < Certificate > bcChain ) { List < SFPair < Certificate , Certificate > > pairIssuerSubject = new ArrayList <> ( ) ; for ( int i = 0 , len = bcChain . size ( ) ; i < len ; ++ i ) { Certificate bcCert = bcChain . get ( i ) ; if ( bcCert . getIssuer ( ) . equals ( bcCert . getSubject ( ) ) ) { continue ; // skipping ROOT CA } if ( i < len - 1 ) { pairIssuerSubject . add ( SFPair . of ( bcChain . get ( i + 1 ) , bcChain . get ( i ) ) ) ; } else { synchronized ( ROOT_CA_LOCK ) { // no root CA certificate is attached in the certificate chain, so // getting one from the root CA from JVM. Certificate issuer = ROOT_CA . get ( bcCert . getIssuer ( ) . hashCode ( ) ) ; if ( issuer == null ) { throw new RuntimeException ( "Failed to find the root CA." ) ; } pairIssuerSubject . add ( SFPair . of ( issuer , bcChain . get ( i ) ) ) ; } } } return pairIssuerSubject ; }
|
Creates a pair of Issuer and Subject certificates
| 281
| 10
|
151,803
|
private Set < String > getOcspUrls ( Certificate bcCert ) { TBSCertificate bcTbsCert = bcCert . getTBSCertificate ( ) ; Extensions bcExts = bcTbsCert . getExtensions ( ) ; if ( bcExts == null ) { throw new RuntimeException ( "Failed to get Tbs Certificate." ) ; } Set < String > ocsp = new HashSet <> ( ) ; for ( Enumeration en = bcExts . oids ( ) ; en . hasMoreElements ( ) ; ) { ASN1ObjectIdentifier oid = ( ASN1ObjectIdentifier ) en . nextElement ( ) ; Extension bcExt = bcExts . getExtension ( oid ) ; if ( bcExt . getExtnId ( ) == Extension . authorityInfoAccess ) { // OCSP URLS are included in authorityInfoAccess DLSequence seq = ( DLSequence ) bcExt . getParsedValue ( ) ; for ( ASN1Encodable asn : seq ) { ASN1Encodable [ ] pairOfAsn = ( ( DLSequence ) asn ) . toArray ( ) ; if ( pairOfAsn . length == 2 ) { ASN1ObjectIdentifier key = ( ASN1ObjectIdentifier ) pairOfAsn [ 0 ] ; if ( key == OIDocsp ) { // ensure OCSP and not CRL GeneralName gn = GeneralName . getInstance ( pairOfAsn [ 1 ] ) ; ocsp . add ( gn . getName ( ) . toString ( ) ) ; } } } } } return ocsp ; }
|
Gets OCSP URLs associated with the certificate .
| 358
| 10
|
151,804
|
private static boolean isValidityRange ( Date currentTime , Date thisUpdate , Date nextUpdate ) { long tolerableValidity = calculateTolerableVadility ( thisUpdate , nextUpdate ) ; return thisUpdate . getTime ( ) - MAX_CLOCK_SKEW_IN_MILLISECONDS <= currentTime . getTime ( ) && currentTime . getTime ( ) <= nextUpdate . getTime ( ) + tolerableValidity ; }
|
Checks the validity
| 98
| 4
|
151,805
|
private void processKeyUpdateDirective ( String issuer , String ssd ) { try { /** * Get unverified part of the JWT to extract issuer. * */ //PlainJWT jwt_unverified = PlainJWT.parse(ssd); SignedJWT jwt_signed = SignedJWT . parse ( ssd ) ; String jwt_issuer = ( String ) jwt_signed . getHeader ( ) . getCustomParam ( "ssd_iss" ) ; String ssd_pubKey ; if ( ! jwt_issuer . equals ( issuer ) ) { LOGGER . debug ( "Issuer mismatch. Invalid SSD" ) ; return ; } if ( jwt_issuer . equals ( "dep1" ) ) { ssd_pubKey = ssdManager . getPubKey ( "dep1" ) ; } else { ssd_pubKey = ssdManager . getPubKey ( "dep2" ) ; } if ( ssd_pubKey == null ) { LOGGER . debug ( "Invalid SSD" ) ; return ; } String publicKeyContent = ssd_pubKey . replaceAll ( "\\n" , "" ) . replace ( "-----BEGIN PUBLIC KEY-----" , "" ) . replace ( "-----END PUBLIC KEY-----" , "" ) ; KeyFactory kf = KeyFactory . getInstance ( "RSA" ) ; X509EncodedKeySpec keySpecX509 = new X509EncodedKeySpec ( Base64 . decodeBase64 ( publicKeyContent ) ) ; RSAPublicKey rsaPubKey = ( RSAPublicKey ) kf . generatePublic ( keySpecX509 ) ; /** * Verify signature of the JWT Token */ SignedJWT jwt_token_verified = SignedJWT . parse ( ssd ) ; JWSVerifier jwsVerifier = new RSASSAVerifier ( rsaPubKey ) ; try { if ( jwt_token_verified . verify ( jwsVerifier ) ) { /** * verify nbf time */ long cur_time = System . currentTimeMillis ( ) ; Date nbf = jwt_token_verified . getJWTClaimsSet ( ) . getNotBeforeTime ( ) ; //long nbf = jwt_token_verified.getJWTClaimsSet().getLongClaim("nbf"); //double nbf = jwt_token_verified.getJWTClaimsSet().getDoubleClaim("nbf"); if ( cur_time < nbf . getTime ( ) ) { LOGGER . debug ( "The SSD token is not yet valid. Current time less than Not Before Time" ) ; return ; } float key_ver = Float . parseFloat ( jwt_token_verified . getJWTClaimsSet ( ) . getStringClaim ( "keyVer" ) ) ; if ( key_ver <= ssdManager . getPubKeyVer ( jwt_issuer ) ) { return ; } ssdManager . updateKey ( jwt_issuer , jwt_token_verified . getJWTClaimsSet ( ) . getStringClaim ( "pubKey" ) , key_ver ) ; } } catch ( Throwable ex ) { LOGGER . debug ( "Failed to verify JWT Token" ) ; throw ex ; } } catch ( Throwable ex ) { LOGGER . debug ( "Failed to parse JWT Token, aborting" ) ; } }
|
SSD Processing Code
| 734
| 4
|
151,806
|
private String ocspResponseToB64 ( OCSPResp ocspResp ) { if ( ocspResp == null ) { return null ; } try { return Base64 . encodeBase64String ( ocspResp . getEncoded ( ) ) ; } catch ( Throwable ex ) { LOGGER . debug ( "Could not convert OCSP Response to Base64" ) ; return null ; } }
|
OCSP Response Utils
| 87
| 5
|
151,807
|
private void scheduleHeartbeat ( ) { // elapsed time in seconds since the last heartbeat long elapsedSecsSinceLastHeartBeat = System . currentTimeMillis ( ) / 1000 - lastHeartbeatStartTimeInSecs ; /* * The initial delay for the new scheduling is 0 if the elapsed * time is more than the heartbeat time interval, otherwise it is the * difference between the heartbeat time interval and the elapsed time. */ long initialDelay = Math . max ( heartBeatIntervalInSecs - elapsedSecsSinceLastHeartBeat , 0 ) ; LOGGER . debug ( "schedule heartbeat task with initial delay of {} seconds" , initialDelay ) ; // Creates and executes a periodic action to send heartbeats this . heartbeatFuture = this . scheduler . schedule ( this , initialDelay , TimeUnit . SECONDS ) ; }
|
Schedule the next heartbeat
| 178
| 5
|
151,808
|
public SnowflakeStorageClient createClient ( StageInfo stage , int parallel , RemoteStoreFileEncryptionMaterial encMat ) throws SnowflakeSQLException { logger . debug ( "createClient client type={}" , stage . getStageType ( ) . name ( ) ) ; switch ( stage . getStageType ( ) ) { case S3 : return createS3Client ( stage . getCredentials ( ) , parallel , encMat , stage . getRegion ( ) ) ; case AZURE : return createAzureClient ( stage , encMat ) ; default : // We don't create a storage client for FS_LOCAL, // so we should only find ourselves here if an unsupported // remote storage client type is specified throw new IllegalArgumentException ( "Unsupported storage client specified: " + stage . getStageType ( ) . name ( ) ) ; } }
|
Creates a storage client based on the value of stageLocationType
| 182
| 13
|
151,809
|
private SnowflakeS3Client createS3Client ( Map stageCredentials , int parallel , RemoteStoreFileEncryptionMaterial encMat , String stageRegion ) throws SnowflakeSQLException { final int S3_TRANSFER_MAX_RETRIES = 3 ; logger . debug ( "createS3Client encryption={}" , ( encMat == null ? "no" : "yes" ) ) ; SnowflakeS3Client s3Client ; ClientConfiguration clientConfig = new ClientConfiguration ( ) ; clientConfig . setMaxConnections ( parallel + 1 ) ; clientConfig . setMaxErrorRetry ( S3_TRANSFER_MAX_RETRIES ) ; clientConfig . setDisableSocketProxy ( HttpUtil . isSocksProxyDisabled ( ) ) ; logger . debug ( "s3 client configuration: maxConnection={}, connectionTimeout={}, " + "socketTimeout={}, maxErrorRetry={}" , clientConfig . getMaxConnections ( ) , clientConfig . getConnectionTimeout ( ) , clientConfig . getSocketTimeout ( ) , clientConfig . getMaxErrorRetry ( ) ) ; try { s3Client = new SnowflakeS3Client ( stageCredentials , clientConfig , encMat , stageRegion ) ; } catch ( Exception ex ) { logger . debug ( "Exception creating s3 client" , ex ) ; throw ex ; } logger . debug ( "s3 client created" ) ; return s3Client ; }
|
Creates a SnowflakeS3ClientObject which encapsulates the Amazon S3 client
| 311
| 17
|
151,810
|
public StorageObjectMetadata createStorageMetadataObj ( StageInfo . StageType stageType ) { switch ( stageType ) { case S3 : return new S3ObjectMetadata ( ) ; case AZURE : return new AzureObjectMetadata ( ) ; default : // An unsupported remote storage client type was specified // We don't create/implement a storage client for FS_LOCAL, // so we should never end up here while running on local file system throw new IllegalArgumentException ( "Unsupported stage type specified: " + stageType . name ( ) ) ; } }
|
Creates a storage provider specific metadata object accessible via the platform independent interface
| 121
| 14
|
151,811
|
private SnowflakeAzureClient createAzureClient ( StageInfo stage , RemoteStoreFileEncryptionMaterial encMat ) throws SnowflakeSQLException { logger . debug ( "createAzureClient encryption={}" , ( encMat == null ? "no" : "yes" ) ) ; //TODO: implement support for encryption SNOW-33042 SnowflakeAzureClient azureClient ; try { azureClient = SnowflakeAzureClient . createSnowflakeAzureClient ( stage , encMat ) ; } catch ( Exception ex ) { logger . debug ( "Exception creating Azure Storage client" , ex ) ; throw ex ; } logger . debug ( "Azure Storage client created" ) ; return azureClient ; }
|
Creates a SnowflakeAzureClientObject which encapsulates the Azure Storage client
| 155
| 16
|
151,812
|
public synchronized static BindUploader newInstance ( SFSession session , String stageDir ) throws BindException { try { Path bindDir = Files . createTempDirectory ( PREFIX ) ; return new BindUploader ( session , stageDir , bindDir ) ; } catch ( IOException ex ) { throw new BindException ( String . format ( "Failed to create temporary directory: %s" , ex . getMessage ( ) ) , BindException . Type . OTHER ) ; } }
|
Create a new BindUploader which will upload to the given stage path Ensure temporary directory for file writing exists
| 100
| 21
|
151,813
|
public void upload ( Map < String , ParameterBindingDTO > bindValues ) throws BindException { if ( ! closed ) { serializeBinds ( bindValues ) ; putBinds ( ) ; } }
|
Upload the bindValues to stage
| 45
| 6
|
151,814
|
private void serializeBinds ( Map < String , ParameterBindingDTO > bindValues ) throws BindException { List < ColumnTypeDataPair > columns = getColumnValues ( bindValues ) ; List < String [ ] > rows = buildRows ( columns ) ; writeRowsToCSV ( rows ) ; }
|
Save the binds to disk
| 69
| 5
|
151,815
|
private List < ColumnTypeDataPair > getColumnValues ( Map < String , ParameterBindingDTO > bindValues ) throws BindException { List < ColumnTypeDataPair > columns = new ArrayList <> ( bindValues . size ( ) ) ; for ( int i = 1 ; i <= bindValues . size ( ) ; i ++ ) { // bindValues should have n entries with string keys 1 ... n and list values String key = Integer . toString ( i ) ; if ( ! bindValues . containsKey ( key ) ) { throw new BindException ( String . format ( "Bind map with %d columns should contain key \"%d\"" , bindValues . size ( ) , i ) , BindException . Type . SERIALIZATION ) ; } ParameterBindingDTO value = bindValues . get ( key ) ; try { String type = value . getType ( ) ; List < String > list = ( List < String > ) value . getValue ( ) ; List < String > convertedList = new ArrayList <> ( list . size ( ) ) ; if ( "TIMESTAMP_LTZ" . equals ( type ) || "TIMESTAMP_NTZ" . equals ( type ) ) { for ( String e : list ) { convertedList . add ( synchronizedTimestampFormat ( e ) ) ; } } else if ( "DATE" . equals ( type ) ) { for ( String e : list ) { convertedList . add ( synchronizedDateFormat ( e ) ) ; } } else { convertedList = list ; } columns . add ( i - 1 , new ColumnTypeDataPair ( type , convertedList ) ) ; } catch ( ClassCastException ex ) { throw new BindException ( "Value in binding DTO could not be cast to a list" , BindException . Type . SERIALIZATION ) ; } } return columns ; }
|
Convert bind map to a list of values for each column Perform necessary type casts and invariant checks
| 399
| 20
|
151,816
|
private List < String [ ] > buildRows ( List < ColumnTypeDataPair > columns ) throws BindException { List < String [ ] > rows = new ArrayList <> ( ) ; int numColumns = columns . size ( ) ; // columns should have binds if ( columns . get ( 0 ) . data . isEmpty ( ) ) { throw new BindException ( "No binds found in first column" , BindException . Type . SERIALIZATION ) ; } int numRows = columns . get ( 0 ) . data . size ( ) ; // every column should have the same number of binds for ( int i = 0 ; i < numColumns ; i ++ ) { int iNumRows = columns . get ( i ) . data . size ( ) ; if ( columns . get ( i ) . data . size ( ) != numRows ) { throw new BindException ( String . format ( "Column %d has a different number of binds (%d) than column 1 (%d)" , i , iNumRows , numRows ) , BindException . Type . SERIALIZATION ) ; } } for ( int rowIdx = 0 ; rowIdx < numRows ; rowIdx ++ ) { String [ ] row = new String [ numColumns ] ; for ( int colIdx = 0 ; colIdx < numColumns ; colIdx ++ ) { row [ colIdx ] = columns . get ( colIdx ) . data . get ( rowIdx ) ; } rows . add ( row ) ; } return rows ; }
|
Transpose a list of columns and their values to a list of rows
| 335
| 14
|
151,817
|
private void writeRowsToCSV ( List < String [ ] > rows ) throws BindException { int numBytes ; int rowNum = 0 ; int fileCount = 0 ; while ( rowNum < rows . size ( ) ) { File file = getFile ( ++ fileCount ) ; try ( OutputStream out = openFile ( file ) ) { // until we reach the last row or the file is too big, write to the file numBytes = 0 ; while ( numBytes < fileSize && rowNum < rows . size ( ) ) { byte [ ] csv = createCSVRecord ( rows . get ( rowNum ) ) ; numBytes += csv . length ; out . write ( csv ) ; rowNum ++ ; } } catch ( IOException ex ) { throw new BindException ( String . format ( "Exception encountered while writing to file: %s" , ex . getMessage ( ) ) , BindException . Type . SERIALIZATION ) ; } } }
|
Write the list of rows to compressed CSV files in the temporary directory
| 206
| 13
|
151,818
|
private OutputStream openFile ( File file ) throws BindException { try { return new GZIPOutputStream ( new FileOutputStream ( file ) ) ; } catch ( IOException ex ) { throw new BindException ( String . format ( "Failed to create output file %s: %s" , file . toString ( ) , ex . getMessage ( ) ) , BindException . Type . SERIALIZATION ) ; } }
|
Create a new output stream for the given file
| 91
| 9
|
151,819
|
private byte [ ] createCSVRecord ( String [ ] data ) { StringBuilder sb = new StringBuilder ( 1024 ) ; for ( int i = 0 ; i < data . length ; ++ i ) { if ( i > 0 ) { sb . append ( ' ' ) ; } sb . append ( SnowflakeType . escapeForCSV ( data [ i ] ) ) ; } sb . append ( ' ' ) ; return sb . toString ( ) . getBytes ( UTF_8 ) ; }
|
Serialize row to a csv Duplicated from StreamLoader class
| 110
| 13
|
151,820
|
private String getPutStmt ( String bindDir , String stagePath ) { return String . format ( PUT_STMT , bindDir , File . separator , stagePath ) . replaceAll ( "\\\\" , "\\\\\\\\" ) ; }
|
Build PUT statement string . Handle filesystem differences and escaping backslashes .
| 52
| 15
|
151,821
|
private void putBinds ( ) throws BindException { createStageIfNeeded ( ) ; String putStatement = getPutStmt ( bindDir . toString ( ) , stagePath ) ; for ( int i = 0 ; i < PUT_RETRY_COUNT ; i ++ ) { try { SFStatement statement = new SFStatement ( session ) ; SFBaseResultSet putResult = statement . execute ( putStatement , null , null ) ; putResult . next ( ) ; // metadata is 0-based, result set is 1-based int column = putResult . getMetaData ( ) . getColumnIndex ( SnowflakeFileTransferAgent . UploadColumns . status . name ( ) ) + 1 ; String status = putResult . getString ( column ) ; if ( SnowflakeFileTransferAgent . ResultStatus . UPLOADED . name ( ) . equals ( status ) ) { return ; // success! } logger . debug ( "PUT statement failed. The response had status %s." , status ) ; } catch ( SFException | SQLException ex ) { logger . debug ( "Exception encountered during PUT operation. " , ex ) ; } } // if we haven't returned (on success), throw exception throw new BindException ( "Failed to PUT files to stage." , BindException . Type . UPLOAD ) ; }
|
Upload binds from local file to stage
| 284
| 7
|
151,822
|
private void createStageIfNeeded ( ) throws BindException { if ( session . getArrayBindStage ( ) != null ) { return ; } synchronized ( session ) { // another thread may have created the session by the time we enter this block if ( session . getArrayBindStage ( ) == null ) { try { SFStatement statement = new SFStatement ( session ) ; statement . execute ( CREATE_STAGE_STMT , null , null ) ; session . setArrayBindStage ( STAGE_NAME ) ; } catch ( SFException | SQLException ex ) { // to avoid repeated failures to create stage, disable array bind stage // optimization if we fail to create stage for some reason session . setArrayBindStageThreshold ( 0 ) ; throw new BindException ( String . format ( "Failed to create temporary stage for array binds. %s" , ex . getMessage ( ) ) , BindException . Type . UPLOAD ) ; } } } }
|
Check whether the session s temporary stage has been created and create it if not .
| 203
| 16
|
151,823
|
public static int arrayBindValueCount ( Map < String , ParameterBindingDTO > bindValues ) { if ( ! isArrayBind ( bindValues ) ) { return 0 ; } else { ParameterBindingDTO bindSample = bindValues . values ( ) . iterator ( ) . next ( ) ; List < String > bindSampleValues = ( List < String > ) bindSample . getValue ( ) ; return bindValues . size ( ) * bindSampleValues . size ( ) ; } }
|
Compute the number of array bind values in the given bind map
| 105
| 13
|
151,824
|
public static boolean isArrayBind ( Map < String , ParameterBindingDTO > bindValues ) { if ( bindValues == null || bindValues . size ( ) == 0 ) { return false ; } ParameterBindingDTO bindSample = bindValues . values ( ) . iterator ( ) . next ( ) ; return bindSample . getValue ( ) instanceof List ; }
|
Return whether the bind map uses array binds
| 80
| 8
|
151,825
|
public static StorageObjectSummary createFromS3ObjectSummary ( S3ObjectSummary objSummary ) { return new StorageObjectSummary ( objSummary . getBucketName ( ) , objSummary . getKey ( ) , // S3 ETag is not always MD5, but since this code path is only // used in skip duplicate files in PUT command, It's not // critical to guarantee that it's MD5 objSummary . getETag ( ) , objSummary . getSize ( ) ) ; }
|
Contructs a StorageObjectSummary object from the S3 equivalent S3ObjectSummary
| 104
| 17
|
151,826
|
public static StorageObjectSummary createFromAzureListBlobItem ( ListBlobItem listBlobItem ) throws StorageProviderException { String location , key , md5 ; long size ; // Retrieve the BLOB properties that we need for the Summary // Azure Storage stores metadata inside each BLOB, therefore the listBlobItem // will point us to the underlying BLOB and will get the properties from it // During the process the Storage Client could fail, hence we need to wrap the // get calls in try/catch and handle possible exceptions try { location = listBlobItem . getContainer ( ) . getName ( ) ; CloudBlob cloudBlob = ( CloudBlob ) listBlobItem ; key = cloudBlob . getName ( ) ; BlobProperties blobProperties = cloudBlob . getProperties ( ) ; // the content md5 property is not always the actual md5 of the file. But for here, it's only // used for skipping file on PUT command, hense is ok. md5 = convertBase64ToHex ( blobProperties . getContentMD5 ( ) ) ; size = blobProperties . getLength ( ) ; } catch ( URISyntaxException | StorageException ex ) { // This should only happen if somehow we got here with and invalid URI (it should never happen) // ...or there is a Storage service error. Unlike S3, Azure fetches metadata from the BLOB itself, // and its a lazy operation throw new StorageProviderException ( ex ) ; } return new StorageObjectSummary ( location , key , md5 , size ) ; }
|
Contructs a StorageObjectSummary object from Azure BLOB properties Using factory methods to create these objects since Azure can throw while retrieving the BLOB properties
| 339
| 30
|
151,827
|
private boolean isSnowflakeAuthenticator ( ) { String authenticator = ( String ) connectionPropertiesMap . get ( SFSessionProperty . AUTHENTICATOR ) ; PrivateKey privateKey = ( PrivateKey ) connectionPropertiesMap . get ( SFSessionProperty . PRIVATE_KEY ) ; return ( authenticator == null && privateKey == null ) || ClientAuthnDTO . AuthenticatorType . SNOWFLAKE . name ( ) . equalsIgnoreCase ( authenticator ) ; }
|
If authenticator is null and private key is specified jdbc will assume key pair authentication
| 106
| 18
|
151,828
|
boolean isExternalbrowserAuthenticator ( ) { String authenticator = ( String ) connectionPropertiesMap . get ( SFSessionProperty . AUTHENTICATOR ) ; return ClientAuthnDTO . AuthenticatorType . EXTERNALBROWSER . name ( ) . equalsIgnoreCase ( authenticator ) ; }
|
Returns true If authenticator is EXTERNALBROWSER .
| 67
| 13
|
151,829
|
synchronized void renewSession ( String prevSessionToken ) throws SFException , SnowflakeSQLException { if ( sessionToken != null && ! sessionToken . equals ( prevSessionToken ) ) { logger . debug ( "not renew session because session token has not been updated." ) ; return ; } SessionUtil . LoginInput loginInput = new SessionUtil . LoginInput ( ) ; loginInput . setServerUrl ( ( String ) connectionPropertiesMap . get ( SFSessionProperty . SERVER_URL ) ) . setSessionToken ( sessionToken ) . setMasterToken ( masterToken ) . setIdToken ( idToken ) . setLoginTimeout ( loginTimeout ) . setDatabaseName ( this . getDatabase ( ) ) . setSchemaName ( this . getSchema ( ) ) . setRole ( this . getRole ( ) ) . setWarehouse ( this . getWarehouse ( ) ) ; SessionUtil . LoginOutput loginOutput = SessionUtil . renewSession ( loginInput ) ; if ( loginOutput . isUpdatedByTokenRequestIssue ( ) ) { setCurrentObjects ( loginInput , loginOutput ) ; } sessionToken = loginOutput . getSessionToken ( ) ; masterToken = loginOutput . getMasterToken ( ) ; }
|
A helper function to call global service and renew session .
| 267
| 11
|
151,830
|
protected void startHeartbeatForThisSession ( ) { if ( enableHeartbeat && ! Strings . isNullOrEmpty ( masterToken ) ) { logger . debug ( "start heartbeat, master token validity: " + masterTokenValidityInSeconds ) ; HeartbeatBackground . getInstance ( ) . addSession ( this , masterTokenValidityInSeconds , this . heartbeatFrequency ) ; } else { logger . debug ( "heartbeat not enabled for the session" ) ; } }
|
Start heartbeat for this session
| 103
| 5
|
151,831
|
protected void stopHeartbeatForThisSession ( ) { if ( enableHeartbeat && ! Strings . isNullOrEmpty ( masterToken ) ) { logger . debug ( "stop heartbeat" ) ; HeartbeatBackground . getInstance ( ) . removeSession ( this ) ; } else { logger . debug ( "heartbeat not enabled for the session" ) ; } }
|
Stop heartbeat for this session
| 76
| 5
|
151,832
|
protected void heartbeat ( ) throws SFException , SQLException { logger . debug ( " public void heartbeat()" ) ; if ( isClosed ) { return ; } HttpPost postRequest = null ; String requestId = UUID . randomUUID ( ) . toString ( ) ; boolean retry = false ; // the loop for retrying if it runs into session expiration do { try { URIBuilder uriBuilder ; uriBuilder = new URIBuilder ( ( String ) connectionPropertiesMap . get ( SFSessionProperty . SERVER_URL ) ) ; uriBuilder . addParameter ( SFSession . SF_QUERY_REQUEST_ID , requestId ) ; uriBuilder . setPath ( SF_PATH_SESSION_HEARTBEAT ) ; postRequest = new HttpPost ( uriBuilder . build ( ) ) ; // remember the session token in case it expires we need to renew // the session only when no other thread has renewed it String prevSessionToken = sessionToken ; postRequest . setHeader ( SF_HEADER_AUTHORIZATION , SF_HEADER_SNOWFLAKE_AUTHTYPE + " " + SF_HEADER_TOKEN_TAG + "=\"" + prevSessionToken + "\"" ) ; logger . debug ( "Executing heartbeat request: {}" , postRequest . toString ( ) ) ; // the following will retry transient network issues String theResponse = HttpUtil . executeRequest ( postRequest , SF_HEARTBEAT_TIMEOUT , 0 , null ) ; JsonNode rootNode ; logger . debug ( "connection heartbeat response: {}" , theResponse ) ; rootNode = mapper . readTree ( theResponse ) ; // check the response to see if it is session expiration response if ( rootNode != null && ( Constants . SESSION_EXPIRED_GS_CODE == rootNode . path ( "code" ) . asInt ( ) ) ) { logger . debug ( "renew session and retry" ) ; this . renewSession ( prevSessionToken ) ; retry = true ; continue ; } SnowflakeUtil . checkErrorAndThrowException ( rootNode ) ; // success retry = false ; } catch ( Throwable ex ) { // for snowflake exception, just rethrow it if ( ex instanceof SnowflakeSQLException ) { throw ( SnowflakeSQLException ) ex ; } logger . error ( "unexpected exception" , ex ) ; throw ( SFException ) IncidentUtil . generateIncidentV2WithException ( this , new SFException ( ErrorCode . INTERNAL_ERROR , IncidentUtil . oneLiner ( "unexpected exception" , ex ) ) , null , requestId ) ; } } while ( retry ) ; }
|
Send heartbeat for the session
| 597
| 5
|
151,833
|
void setCurrentObjects ( SessionUtil . LoginInput loginInput , SessionUtil . LoginOutput loginOutput ) { this . sessionToken = loginOutput . sessionToken ; // used to run the commands. runInternalCommand ( "USE ROLE IDENTIFIER(?)" , loginInput . getRole ( ) ) ; runInternalCommand ( "USE WAREHOUSE IDENTIFIER(?)" , loginInput . getWarehouse ( ) ) ; runInternalCommand ( "USE DATABASE IDENTIFIER(?)" , loginInput . getDatabaseName ( ) ) ; runInternalCommand ( "USE SCHEMA IDENTIFIER(?)" , loginInput . getSchemaName ( ) ) ; // This ensures the session returns the current objects and refresh // the local cache. SFBaseResultSet result = runInternalCommand ( "SELECT ?" , "1" ) ; // refresh the current objects loginOutput . setSessionDatabase ( this . database ) ; loginOutput . setSessionSchema ( this . schema ) ; loginOutput . setSessionWarehouse ( this . warehouse ) ; loginOutput . setSessionRole ( this . role ) ; loginOutput . setIdToken ( loginInput . getIdToken ( ) ) ; // no common parameter is updated. if ( result != null ) { loginOutput . setCommonParams ( result . parameters ) ; } }
|
Sets the current objects if the session is not up to date . It can happen if the session is created by the id token which doesn t carry the current objects .
| 288
| 34
|
151,834
|
private void executeImmediate ( String stmtText ) throws SQLException { // execute the statement and auto-close it as well try ( final Statement statement = this . createStatement ( ) ) { statement . execute ( stmtText ) ; } }
|
Execute a statement where the result isn t needed and the statement is closed before this method returns
| 53
| 19
|
151,835
|
@ Override public Statement createStatement ( ) throws SQLException { raiseSQLExceptionIfConnectionIsClosed ( ) ; Statement stmt = createStatement ( ResultSet . TYPE_FORWARD_ONLY , ResultSet . CONCUR_READ_ONLY ) ; openStatements . add ( stmt ) ; return stmt ; }
|
Create a statement
| 74
| 3
|
151,836
|
@ Override public void setTransactionIsolation ( int level ) throws SQLException { logger . debug ( "void setTransactionIsolation(int level), level = {}" , level ) ; raiseSQLExceptionIfConnectionIsClosed ( ) ; if ( level == Connection . TRANSACTION_NONE || level == Connection . TRANSACTION_READ_COMMITTED ) { this . transactionIsolation = level ; } else { throw new SQLFeatureNotSupportedException ( "Transaction Isolation " + level + " not supported." , FEATURE_UNSUPPORTED . getSqlState ( ) , FEATURE_UNSUPPORTED . getMessageCode ( ) ) ; } }
|
Sets the transaction isolation level .
| 146
| 7
|
151,837
|
public InputStream downloadStream ( String stageName , String sourceFileName , boolean decompress ) throws SQLException { logger . debug ( "download data to stream: stageName={}" + ", sourceFileName={}" , stageName , sourceFileName ) ; if ( Strings . isNullOrEmpty ( stageName ) ) { throw new SnowflakeSQLException ( SqlState . INTERNAL_ERROR , ErrorCode . INTERNAL_ERROR . getMessageCode ( ) , "stage name is null or empty" ) ; } if ( Strings . isNullOrEmpty ( sourceFileName ) ) { throw new SnowflakeSQLException ( SqlState . INTERNAL_ERROR , ErrorCode . INTERNAL_ERROR . getMessageCode ( ) , "source file name is null or empty" ) ; } SnowflakeStatementV1 stmt = new SnowflakeStatementV1 ( this , ResultSet . TYPE_FORWARD_ONLY , ResultSet . CONCUR_READ_ONLY , ResultSet . CLOSE_CURSORS_AT_COMMIT ) ; StringBuilder getCommand = new StringBuilder ( ) ; getCommand . append ( "get " ) ; if ( ! stageName . startsWith ( "@" ) ) { getCommand . append ( "@" ) ; } getCommand . append ( stageName ) ; getCommand . append ( "/" ) ; if ( sourceFileName . startsWith ( "/" ) ) { sourceFileName = sourceFileName . substring ( 1 ) ; } getCommand . append ( sourceFileName ) ; //this is a fake path, used to form Get query and retrieve stage info, //no file will be downloaded to this location getCommand . append ( " file:///tmp/ /*jdbc download stream*/" ) ; SnowflakeFileTransferAgent transferAgent = new SnowflakeFileTransferAgent ( getCommand . toString ( ) , sfSession , stmt . getSfStatement ( ) ) ; InputStream stream = transferAgent . downloadStream ( sourceFileName ) ; if ( decompress ) { try { return new GZIPInputStream ( stream ) ; } catch ( IOException ex ) { throw new SnowflakeSQLException ( SqlState . INTERNAL_ERROR , ErrorCode . INTERNAL_ERROR . getMessageCode ( ) , ex . getMessage ( ) ) ; } } else { return stream ; } }
|
Download file from the given stage and return an input stream
| 516
| 11
|
151,838
|
public static InputStream decryptStream ( InputStream inputStream , String keyBase64 , String ivBase64 , RemoteStoreFileEncryptionMaterial encMat ) throws NoSuchPaddingException , NoSuchAlgorithmException , InvalidKeyException , BadPaddingException , IllegalBlockSizeException , InvalidAlgorithmParameterException { byte [ ] decodedKey = Base64 . decode ( encMat . getQueryStageMasterKey ( ) ) ; byte [ ] keyBytes = Base64 . decode ( keyBase64 ) ; byte [ ] ivBytes = Base64 . decode ( ivBase64 ) ; SecretKey queryStageMasterKey = new SecretKeySpec ( decodedKey , 0 , decodedKey . length , AES ) ; Cipher keyCipher = Cipher . getInstance ( KEY_CIPHER ) ; keyCipher . init ( Cipher . DECRYPT_MODE , queryStageMasterKey ) ; byte [ ] fileKeyBytes = keyCipher . doFinal ( keyBytes ) ; SecretKey fileKey = new SecretKeySpec ( fileKeyBytes , 0 , decodedKey . length , AES ) ; Cipher dataCipher = Cipher . getInstance ( FILE_CIPHER ) ; IvParameterSpec ivy = new IvParameterSpec ( ivBytes ) ; dataCipher . init ( Cipher . DECRYPT_MODE , fileKey , ivy ) ; return new CipherInputStream ( inputStream , dataCipher ) ; }
|
Decrypt a InputStream
| 295
| 5
|
151,839
|
synchronized void startFlusher ( ) { // Create a new scheduled executor service with a threadfactory that // creates daemonized threads; this way if the user doesn't exit nicely // the JVM Runtime won't hang flusher = Executors . newScheduledThreadPool ( 1 , new ThreadFactory ( ) { @ Override public Thread newThread ( Runnable r ) { Thread t = Executors . defaultThreadFactory ( ) . newThread ( r ) ; t . setDaemon ( true ) ; return t ; } } ) ; flusher . scheduleWithFixedDelay ( new QueueFlusher ( ) , 0 , flushPeriodMs , TimeUnit . MILLISECONDS ) ; }
|
Creates and runs a new QueueFlusher thread
| 153
| 11
|
151,840
|
public void dumpLogBuffer ( String identifier ) { final ArrayList < LogRecord > logBufferCopy ; final PrintWriter logDumper ; final OutputStream outStream ; Formatter formatter = this . getFormatter ( ) ; // Check if compression of dump file is enabled boolean disableCompression = System . getProperty ( DISABLE_DUMP_COMPR_PROP ) != null ; // If no identifying factor (eg, an incident id) was provided, get one if ( identifier == null ) { identifier = EventUtil . getDumpFileId ( ) ; } // Do some sanity checking to make sure we're not flooding the user's // disk with dump files cleanupSfDumps ( true ) ; String logDumpPath = logDumpPathPrefix + "/" + LOG_DUMP_FILE_NAME + identifier + LOG_DUMP_FILE_EXT ; if ( ! disableCompression ) { logDumpPath += LOG_DUMP_COMP_EXT ; } logger . debug ( "EventHandler dumping log buffer to {}" , logDumpPath ) ; // Copy logBuffer because this is potentially long running. synchronized ( this ) { logBufferCopy = new ArrayList <> ( logBuffer ) ; logBuffer . clear ( ) ; } File outputFile = new File ( logDumpPath ) ; /** * Because log files could potentially be very large, we should never open * them in append mode. It's rare that this should happen anyways... */ try { // If the dump path doesn't already exist, create it. if ( outputFile . getParentFile ( ) != null ) { outputFile . getParentFile ( ) . mkdirs ( ) ; } outStream = disableCompression ? new FileOutputStream ( logDumpPath , false ) : new GZIPOutputStream ( new FileOutputStream ( logDumpPath , false ) ) ; logDumper = new PrintWriter ( outStream , true ) ; } catch ( IOException exc ) { // Not much to do here, can't dump logs so exit out. logger . debug ( "Log dump failed, exception: {}" , exc . getMessage ( ) ) ; return ; } // Iterate over log entries, format them, then dump them. for ( LogRecord entry : logBufferCopy ) { logDumper . write ( formatter != null ? formatter . format ( entry ) : entry . getMessage ( ) ) ; } // Clean up logDumper . flush ( ) ; logDumper . close ( ) ; }
|
Dumps the contents of the in - memory log buffer to disk and clears the buffer .
| 532
| 18
|
151,841
|
protected void cleanupSfDumps ( boolean deleteOldest ) { // Check what the maximum number of dumpfiles and the max allowable // aggregate dump file size is. int maxDumpFiles = System . getProperty ( MAX_NUM_DUMP_FILES_PROP ) != null ? Integer . valueOf ( System . getProperty ( MAX_NUM_DUMP_FILES_PROP ) ) : DEFAULT_MAX_DUMP_FILES ; int maxDumpDirSizeMB = System . getProperty ( MAX_SIZE_DUMPS_MB_PROP ) != null ? Integer . valueOf ( System . getProperty ( MAX_SIZE_DUMPS_MB_PROP ) ) : DEFAULT_MAX_DUMPDIR_SIZE_MB ; File dumpDir = new File ( logDumpPathPrefix ) ; long dirSizeBytes = 0 ; if ( dumpDir . listFiles ( ) == null ) { return ; } // Keep a sorted list of files by size as we go in case we need to // delete some TreeSet < File > fileList = new TreeSet <> ( new Comparator < File > ( ) { @ Override public int compare ( File a , File b ) { return a . length ( ) < b . length ( ) ? - 1 : 1 ; } } ) ; // Loop over files in this directory and get rid of old ones // while accumulating the total size for ( File file : dumpDir . listFiles ( ) ) { if ( ( ! file . getName ( ) . startsWith ( LOG_DUMP_FILE_NAME ) && ! file . getName ( ) . startsWith ( IncidentUtil . INC_DUMP_FILE_NAME ) ) || ( System . currentTimeMillis ( ) - file . lastModified ( ) > FILE_EXPN_TIME_MS && file . delete ( ) ) ) { continue ; } dirSizeBytes += file . length ( ) ; fileList . add ( file ) ; } // If we're exceeding our max allotted disk usage, cut some stuff out; // else if we need to make space for a new dump delete the oldest. if ( dirSizeBytes >= ( ( long ) maxDumpDirSizeMB << 20 ) ) { // While we take up more than half the allotted disk usage, keep deleting. for ( File file : fileList ) { if ( dirSizeBytes < ( ( long ) maxDumpDirSizeMB << 19 ) ) { break ; } long victimSize = file . length ( ) ; if ( file . delete ( ) ) { dirSizeBytes -= victimSize ; } } } else if ( deleteOldest && fileList . size ( ) >= maxDumpFiles ) { fileList . first ( ) . delete ( ) ; } }
|
Function to remove old Snowflake Dump files to make room for new ones .
| 586
| 16
|
151,842
|
private synchronized boolean needsToThrottle ( String signature ) { AtomicInteger sigCount ; // Are we already throttling this signature? if ( throttledIncidents . containsKey ( signature ) ) { // Lazily check if it's time to unthrottle if ( throttledIncidents . get ( signature ) . plusHours ( THROTTLE_DURATION_HRS ) . compareTo ( DateTime . now ( ) ) <= 0 ) { // Start counting the # of times we've seen this again & stop throttling. throttledIncidents . remove ( signature ) ; incidentCounter . put ( signature , new AtomicInteger ( 1 ) ) ; return false ; } return true ; } sigCount = incidentCounter . get ( signature ) ; if ( sigCount == null ) { // If there isn't an entry to track this signature, make one. incidentCounter . put ( signature , sigCount = new AtomicInteger ( 0 ) ) ; } else if ( sigCount . get ( ) + 1 >= INCIDENT_THROTTLE_LIMIT_PER_HR ) { // We've hit the limit so throttle. incidentCounter . remove ( signature ) ; throttledIncidents . put ( signature , DateTime . now ( ) ) ; return true ; } sigCount . incrementAndGet ( ) ; return false ; }
|
Checks to see if the reporting of an incident should be throttled due to the number of times the signature has been seen in the last hour
| 277
| 29
|
151,843
|
@ Override public void start ( ) { LOGGER . debug ( "Start Loading" ) ; // validate parameters validateParameters ( ) ; if ( _op == null ) { this . abort ( new ConnectionError ( "Loader started with no operation" ) ) ; return ; } initDateFormats ( ) ; initQueues ( ) ; if ( _is_first_start_call ) { // is this the first start call? try { if ( _startTransaction ) { LOGGER . debug ( "Begin Transaction" ) ; _processConn . createStatement ( ) . execute ( "begin transaction" ) ; } else { LOGGER . debug ( "No Transaction started" ) ; } } catch ( SQLException ex ) { abort ( new Loader . ConnectionError ( "Failed to start Transaction" , Utils . getCause ( ex ) ) ) ; } if ( _truncate ) { truncateTargetTable ( ) ; } try { if ( _before != null ) { LOGGER . debug ( "Running Execute Before SQL" ) ; _processConn . createStatement ( ) . execute ( _before ) ; } } catch ( SQLException ex ) { abort ( new Loader . ConnectionError ( String . format ( "Execute Before SQL failed to run: %s" , _before ) , Utils . getCause ( ex ) ) ) ; } } }
|
Starts the loader
| 292
| 4
|
151,844
|
private void flushQueues ( ) { // Terminate data loading thread. LOGGER . debug ( "Flush Queues" ) ; try { _queueData . put ( new byte [ 0 ] ) ; _thread . join ( 10000 ) ; if ( _thread . isAlive ( ) ) { _thread . interrupt ( ) ; } } catch ( Exception ex ) { String msg = "Failed to join StreamLoader queue: " + ex . getMessage ( ) ; LOGGER . error ( msg , ex ) ; throw new DataError ( msg , Utils . getCause ( ex ) ) ; } // Put last stage on queue terminate ( ) ; // wait for the processing to finish _put . join ( ) ; _process . join ( ) ; if ( _aborted . get ( ) ) { // Loader was aborted due to an exception. // It was rolled back at that time. //LOGGER.log(Level.WARNING, // "Loader had been previously aborted by error", _abortCause); throw _abortCause ; } }
|
Flushes data by joining PUT and PROCESS queues
| 221
| 11
|
151,845
|
@ Override public void resetOperation ( Operation op ) { LOGGER . debug ( "Reset Loader" ) ; if ( op . equals ( _op ) ) { //no-op return ; } LOGGER . debug ( "Operation is changing from {} to {}" , _op , op ) ; _op = op ; if ( _stage != null ) { try { queuePut ( _stage ) ; } catch ( InterruptedException ex ) { LOGGER . error ( _stage . getId ( ) , ex ) ; } } _stage = new BufferStage ( this , _op , _csvFileBucketSize , _csvFileSize ) ; }
|
If operation changes existing stage needs to be scheduled for processing .
| 139
| 12
|
151,846
|
void overrideCacheFile ( File newCacheFile ) { this . cacheFile = newCacheFile ; this . cacheDir = newCacheFile . getParentFile ( ) ; this . baseCacheFileName = newCacheFile . getName ( ) ; }
|
Override the cache file .
| 52
| 5
|
151,847
|
JsonNode readCacheFile ( ) { if ( cacheFile == null || ! this . checkCacheLockFile ( ) ) { // no cache or the cache is not valid. return null ; } try { if ( ! cacheFile . exists ( ) ) { LOGGER . debug ( "Cache file doesn't exists. File: {}" , cacheFile ) ; return null ; } try ( Reader reader = new InputStreamReader ( new FileInputStream ( cacheFile ) , DEFAULT_FILE_ENCODING ) ) { return OBJECT_MAPPER . readTree ( reader ) ; } } catch ( IOException ex ) { LOGGER . debug ( "Failed to read the cache file. No worry. File: {}, Err: {}" , cacheFile , ex ) ; } return null ; }
|
Reads the cache file .
| 168
| 6
|
151,848
|
private boolean tryLockCacheFile ( ) { int cnt = 0 ; boolean locked = false ; while ( cnt < 100 && ! ( locked = lockCacheFile ( ) ) ) { try { Thread . sleep ( 100 ) ; } catch ( InterruptedException ex ) { // doesn't matter } ++ cnt ; } if ( ! locked ) { LOGGER . debug ( "Failed to lock the cache file." ) ; } return locked ; }
|
Tries to lock the cache file
| 94
| 7
|
151,849
|
private void verifyLocalFilePath ( String localFilePathFromGS ) throws SnowflakeSQLException { if ( command == null ) { logger . error ( "null command" ) ; return ; } if ( command . indexOf ( FILE_PROTOCOL ) < 0 ) { logger . error ( "file:// prefix not found in command: {}" , command ) ; return ; } int localFilePathBeginIdx = command . indexOf ( FILE_PROTOCOL ) + FILE_PROTOCOL . length ( ) ; boolean isLocalFilePathQuoted = ( localFilePathBeginIdx > FILE_PROTOCOL . length ( ) ) && ( command . charAt ( localFilePathBeginIdx - 1 - FILE_PROTOCOL . length ( ) ) == ' ' ) ; // the ending index is exclusive int localFilePathEndIdx = 0 ; String localFilePath = "" ; if ( isLocalFilePathQuoted ) { // look for the matching quote localFilePathEndIdx = command . indexOf ( "'" , localFilePathBeginIdx ) ; if ( localFilePathEndIdx > localFilePathBeginIdx ) { localFilePath = command . substring ( localFilePathBeginIdx , localFilePathEndIdx ) ; } // unescape backslashes to match the file name from GS localFilePath = localFilePath . replaceAll ( "\\\\\\\\" , "\\\\" ) ; } else { // look for the first space or new line or semi colon List < Integer > indexList = new ArrayList <> ( ) ; char [ ] delimiterChars = { ' ' , ' ' , ' ' } ; for ( int i = 0 ; i < delimiterChars . length ; i ++ ) { int charIndex = command . indexOf ( delimiterChars [ i ] , localFilePathBeginIdx ) ; if ( charIndex != - 1 ) { indexList . add ( charIndex ) ; } } localFilePathEndIdx = indexList . isEmpty ( ) ? - 1 : Collections . min ( indexList ) ; if ( localFilePathEndIdx > localFilePathBeginIdx ) { localFilePath = command . substring ( localFilePathBeginIdx , localFilePathEndIdx ) ; } else if ( localFilePathEndIdx == - 1 ) { localFilePath = command . substring ( localFilePathBeginIdx ) ; } } if ( ! localFilePath . isEmpty ( ) && ! localFilePath . equals ( localFilePathFromGS ) ) { throw new SnowflakeSQLException ( SqlState . INTERNAL_ERROR , ErrorCode . INTERNAL_ERROR . getMessageCode ( ) , "Unexpected local file path from GS. From GS: " + localFilePathFromGS + ", expected: " + localFilePath ) ; } else if ( localFilePath . isEmpty ( ) ) { logger . debug ( "fail to parse local file path from command: {}" , command ) ; } else { logger . trace ( "local file path from GS matches local parsing: {}" , localFilePath ) ; } }
|
A helper method to verify if the local file path from GS matches what s parsed locally . This is for security purpose as documented in SNOW - 15153 .
| 679
| 32
|
151,850
|
private void uploadStream ( ) throws SnowflakeSQLException { try { threadExecutor = SnowflakeUtil . createDefaultExecutorService ( "sf-stream-upload-worker-" , 1 ) ; RemoteStoreFileEncryptionMaterial encMat = encryptionMaterial . get ( 0 ) ; if ( commandType == CommandType . UPLOAD ) { threadExecutor . submit ( getUploadFileCallable ( stageInfo , SRC_FILE_NAME_FOR_STREAM , fileMetadataMap . get ( SRC_FILE_NAME_FOR_STREAM ) , ( stageInfo . getStageType ( ) == StageInfo . StageType . LOCAL_FS ) ? null : storageFactory . createClient ( stageInfo , parallel , encMat ) , connection , command , sourceStream , true , parallel , null , encMat ) ) ; } else if ( commandType == CommandType . DOWNLOAD ) { throw new SnowflakeSQLException ( SqlState . INTERNAL_ERROR , ErrorCode . INTERNAL_ERROR . getMessageCode ( ) ) ; } threadExecutor . shutdown ( ) ; try { // wait for all threads to complete without timeout threadExecutor . awaitTermination ( Long . MAX_VALUE , TimeUnit . DAYS ) ; } catch ( InterruptedException ex ) { throw new SnowflakeSQLException ( SqlState . QUERY_CANCELED , ErrorCode . INTERRUPTED . getMessageCode ( ) ) ; } logger . debug ( "Done with uploading from a stream" ) ; } finally { if ( threadExecutor != null ) { threadExecutor . shutdownNow ( ) ; threadExecutor = null ; } } }
|
Helper to upload data from a stream
| 361
| 7
|
151,851
|
InputStream downloadStream ( String fileName ) throws SnowflakeSQLException { if ( stageInfo . getStageType ( ) == StageInfo . StageType . LOCAL_FS ) { logger . error ( "downloadStream function doesn't support local file system" ) ; throw new SnowflakeSQLException ( SqlState . INTERNAL_ERROR , ErrorCode . INTERNAL_ERROR . getMessageCode ( ) , "downloadStream function only supported in remote stages" ) ; } remoteLocation remoteLocation = extractLocationAndPath ( stageInfo . getLocation ( ) ) ; String stageFilePath = fileName ; if ( ! remoteLocation . path . isEmpty ( ) ) { stageFilePath = SnowflakeUtil . concatFilePathNames ( remoteLocation . path , fileName , "/" ) ; } RemoteStoreFileEncryptionMaterial encMat = srcFileToEncMat . get ( fileName ) ; return storageFactory . createClient ( stageInfo , parallel , encMat ) . downloadToStream ( connection , command , parallel , remoteLocation . location , stageFilePath , stageInfo . getRegion ( ) ) ; }
|
Download a file from remote and return an input stream
| 241
| 10
|
151,852
|
private void downloadFiles ( ) throws SnowflakeSQLException { try { threadExecutor = SnowflakeUtil . createDefaultExecutorService ( "sf-file-download-worker-" , 1 ) ; for ( String srcFile : sourceFiles ) { FileMetadata fileMetadata = fileMetadataMap . get ( srcFile ) ; // Check if the result status is already set so that we don't need to // upload it if ( fileMetadata . resultStatus != ResultStatus . UNKNOWN ) { logger . debug ( "Skipping {}, status: {}, details: {}" , srcFile , fileMetadata . resultStatus , fileMetadata . errorDetails ) ; continue ; } RemoteStoreFileEncryptionMaterial encMat = srcFileToEncMat . get ( srcFile ) ; threadExecutor . submit ( getDownloadFileCallable ( stageInfo , srcFile , localLocation , fileMetadataMap , ( stageInfo . getStageType ( ) == StageInfo . StageType . LOCAL_FS ) ? null : storageFactory . createClient ( stageInfo , parallel , encMat ) , connection , command , parallel , encMat ) ) ; logger . debug ( "submitted download job for: {}" , srcFile ) ; } threadExecutor . shutdown ( ) ; try { // wait for all threads to complete without timeout threadExecutor . awaitTermination ( Long . MAX_VALUE , TimeUnit . DAYS ) ; } catch ( InterruptedException ex ) { throw new SnowflakeSQLException ( SqlState . QUERY_CANCELED , ErrorCode . INTERRUPTED . getMessageCode ( ) ) ; } logger . debug ( "Done with downloading" ) ; } finally { if ( threadExecutor != null ) { threadExecutor . shutdownNow ( ) ; threadExecutor = null ; } } }
|
Helper to download files from remote
| 392
| 6
|
151,853
|
private void uploadFiles ( Set < String > fileList , int parallel ) throws SnowflakeSQLException { try { threadExecutor = SnowflakeUtil . createDefaultExecutorService ( "sf-file-upload-worker-" , parallel ) ; for ( String srcFile : fileList ) { FileMetadata fileMetadata = fileMetadataMap . get ( srcFile ) ; // Check if the result status is already set so that we don't need to // upload it if ( fileMetadata . resultStatus != ResultStatus . UNKNOWN ) { logger . debug ( "Skipping {}, status: {}, details: {}" , srcFile , fileMetadata . resultStatus , fileMetadata . errorDetails ) ; continue ; } /** * For small files, we upload files in parallel, so we don't * want the remote store uploader to upload parts in parallel for each file. * For large files, we upload them in serial, and we want remote store uploader * to upload parts in parallel for each file. This is the reason * for the parallel value. */ File srcFileObj = new File ( srcFile ) ; threadExecutor . submit ( getUploadFileCallable ( stageInfo , srcFile , fileMetadata , ( stageInfo . getStageType ( ) == StageInfo . StageType . LOCAL_FS ) ? null : storageFactory . createClient ( stageInfo , parallel , encryptionMaterial . get ( 0 ) ) , connection , command , null , false , ( parallel > 1 ? 1 : this . parallel ) , srcFileObj , encryptionMaterial . get ( 0 ) ) ) ; logger . debug ( "submitted copy job for: {}" , srcFile ) ; } // shut down the thread executor threadExecutor . shutdown ( ) ; try { // wait for all threads to complete without timeout threadExecutor . awaitTermination ( Long . MAX_VALUE , TimeUnit . DAYS ) ; } catch ( InterruptedException ex ) { throw new SnowflakeSQLException ( SqlState . QUERY_CANCELED , ErrorCode . INTERRUPTED . getMessageCode ( ) ) ; } logger . debug ( "Done with uploading" ) ; } finally { // shut down the thread pool in any case if ( threadExecutor != null ) { threadExecutor . shutdownNow ( ) ; threadExecutor = null ; } } }
|
This method create a thread pool based on requested number of threads and upload the files using the thread pool .
| 503
| 21
|
151,854
|
static public Set < String > expandFileNames ( String [ ] filePathList ) throws SnowflakeSQLException { Set < String > result = new HashSet < String > ( ) ; // a location to file pattern map so that we only need to list the // same directory once when they appear in multiple times. Map < String , List < String > > locationToFilePatterns ; locationToFilePatterns = new HashMap < String , List < String > > ( ) ; String cwd = System . getProperty ( "user.dir" ) ; for ( String path : filePathList ) { // replace ~ with user home path = path . replace ( "~" , System . getProperty ( "user.home" ) ) ; // user may also specify files relative to current directory // add the current path if that is the case if ( ! ( new File ( path ) ) . isAbsolute ( ) ) { logger . debug ( "Adding current working dir to relative file path." ) ; path = cwd + localFSFileSep + path ; } // check if the path contains any wildcards if ( ! path . contains ( "*" ) && ! path . contains ( "?" ) && ! ( path . contains ( "[" ) && path . contains ( "]" ) ) ) { /* this file path doesn't have any wildcard, so we don't need to * expand it */ result . add ( path ) ; } else { // get the directory path int lastFileSepIndex = path . lastIndexOf ( localFSFileSep ) ; // SNOW-15203: if we don't find a default file sep, try "/" if it is not // the default file sep. if ( lastFileSepIndex < 0 && ! "/" . equals ( localFSFileSep ) ) { lastFileSepIndex = path . lastIndexOf ( "/" ) ; } String loc = path . substring ( 0 , lastFileSepIndex + 1 ) ; String filePattern = path . substring ( lastFileSepIndex + 1 ) ; List < String > filePatterns = locationToFilePatterns . get ( loc ) ; if ( filePatterns == null ) { filePatterns = new ArrayList < String > ( ) ; locationToFilePatterns . put ( loc , filePatterns ) ; } filePatterns . add ( filePattern ) ; } } // For each location, list files and match against the patterns for ( Map . Entry < String , List < String > > entry : locationToFilePatterns . entrySet ( ) ) { try { java . io . File dir = new java . io . File ( entry . getKey ( ) ) ; logger . debug ( "Listing files under: {} with patterns: {}" , entry . getKey ( ) , entry . getValue ( ) . toString ( ) ) ; // The following currently ignore sub directories for ( Object file : FileUtils . listFiles ( dir , new WildcardFileFilter ( entry . getValue ( ) ) , null ) ) { result . add ( ( ( java . io . File ) file ) . getCanonicalPath ( ) ) ; } } catch ( Exception ex ) { throw new SnowflakeSQLException ( ex , SqlState . DATA_EXCEPTION , ErrorCode . FAIL_LIST_FILES . getMessageCode ( ) , "Exception: " + ex . getMessage ( ) + ", Dir=" + entry . getKey ( ) + ", Patterns=" + entry . getValue ( ) . toString ( ) ) ; } } logger . debug ( "Expanded file paths: " ) ; for ( String filePath : result ) { logger . debug ( "file: {}" , filePath ) ; } return result ; }
|
process a list of file paths separated by and expand the wildcards if any to generate the list of paths for all files matched by the wildcards
| 793
| 29
|
151,855
|
private FileCompressionType mimeTypeToCompressionType ( String mimeTypeStr ) throws MimeTypeParseException { MimeType mimeType = null ; if ( mimeTypeStr != null ) { mimeType = new MimeType ( mimeTypeStr ) ; } if ( mimeType != null && mimeType . getSubType ( ) != null ) { return FileCompressionType . lookupByMimeSubType ( mimeType . getSubType ( ) . toLowerCase ( ) ) ; } return null ; }
|
Derive compression type from mime type
| 118
| 8
|
151,856
|
private String getMimeTypeFromFileExtension ( String srcFile ) { String srcFileLowCase = srcFile . toLowerCase ( ) ; for ( FileCompressionType compressionType : FileCompressionType . values ( ) ) { if ( srcFileLowCase . endsWith ( compressionType . fileExtension ) ) { return compressionType . mimeType + "/" + compressionType . mimeSubTypes . get ( 0 ) ; } } return null ; }
|
Derive mime type from file extension
| 99
| 8
|
151,857
|
static public remoteLocation extractLocationAndPath ( String stageLocationPath ) { String location = stageLocationPath ; String path = "" ; // split stage location as location name and path if ( stageLocationPath . contains ( "/" ) ) { location = stageLocationPath . substring ( 0 , stageLocationPath . indexOf ( "/" ) ) ; path = stageLocationPath . substring ( stageLocationPath . indexOf ( "/" ) + 1 ) ; } return new remoteLocation ( location , path ) ; }
|
A small helper for extracting location name and path from full location path
| 107
| 13
|
151,858
|
@ Override public List < SnowflakeColumnMetadata > describeColumns ( ) throws Exception { return SnowflakeUtil . describeFixedViewColumns ( commandType == CommandType . UPLOAD ? ( showEncryptionParameter ? UploadCommandEncryptionFacade . class : UploadCommandFacade . class ) : ( showEncryptionParameter ? DownloadCommandEncryptionFacade . class : DownloadCommandFacade . class ) ) ; }
|
Describe the metadata of a fixed view .
| 90
| 9
|
151,859
|
private void populateStatusRows ( ) { for ( Map . Entry < String , FileMetadata > entry : fileMetadataMap . entrySet ( ) ) { FileMetadata fileMetadata = entry . getValue ( ) ; if ( commandType == CommandType . UPLOAD ) { statusRows . add ( showEncryptionParameter ? new UploadCommandEncryptionFacade ( fileMetadata . srcFileName , fileMetadata . destFileName , fileMetadata . resultStatus . name ( ) , fileMetadata . errorDetails , fileMetadata . srcFileSize , fileMetadata . destFileSize , ( fileMetadata . srcCompressionType == null ) ? "NONE" : fileMetadata . srcCompressionType . name ( ) , ( fileMetadata . destCompressionType == null ) ? "NONE" : fileMetadata . destCompressionType . name ( ) , fileMetadata . isEncrypted ) : new UploadCommandFacade ( fileMetadata . srcFileName , fileMetadata . destFileName , fileMetadata . resultStatus . name ( ) , fileMetadata . errorDetails , fileMetadata . srcFileSize , fileMetadata . destFileSize , ( fileMetadata . srcCompressionType == null ) ? "NONE" : fileMetadata . srcCompressionType . name ( ) , ( fileMetadata . destCompressionType == null ) ? "NONE" : fileMetadata . destCompressionType . name ( ) ) ) ; } else if ( commandType == CommandType . DOWNLOAD ) { statusRows . add ( showEncryptionParameter ? new DownloadCommandEncryptionFacade ( fileMetadata . srcFileName . startsWith ( "/" ) ? fileMetadata . srcFileName . substring ( 1 ) : fileMetadata . srcFileName , fileMetadata . resultStatus . name ( ) , fileMetadata . errorDetails , fileMetadata . destFileSize , fileMetadata . isEncrypted ) : new DownloadCommandFacade ( fileMetadata . srcFileName . startsWith ( "/" ) ? fileMetadata . srcFileName . substring ( 1 ) : fileMetadata . srcFileName , fileMetadata . resultStatus . name ( ) , fileMetadata . errorDetails , fileMetadata . destFileSize ) ) ; } } /* we sort the result if the connection is in sorting mode */ Object sortProperty = null ; sortProperty = connection . getSFSessionProperty ( "sort" ) ; boolean sortResult = sortProperty != null && ( Boolean ) sortProperty ; if ( sortResult ) { Comparator comparator = ( commandType == CommandType . UPLOAD ) ? new Comparator < Object > ( ) { public int compare ( Object a , Object b ) { String srcFileNameA = ( ( UploadCommandFacade ) a ) . srcFile ; String srcFileNameB = ( ( UploadCommandFacade ) b ) . srcFile ; return srcFileNameA . compareTo ( srcFileNameB ) ; } } : new Comparator < Object > ( ) { public int compare ( Object a , Object b ) { String srcFileNameA = ( ( DownloadCommandFacade ) a ) . file ; String srcFileNameB = ( ( DownloadCommandFacade ) b ) . file ; return srcFileNameA . compareTo ( srcFileNameB ) ; } } ; // sort the rows by source file names Collections . sort ( statusRows , comparator ) ; } }
|
Generate status rows for each file
| 744
| 7
|
151,860
|
@ Override public void flush ( ) { ObjectMapper mapper = ObjectMapperFactory . getObjectMapper ( ) ; String dtoDump ; URI incidentURI ; try { dtoDump = mapper . writeValueAsString ( new IncidentV2DTO ( this ) ) ; } catch ( JsonProcessingException ex ) { logger . error ( "Incident registration failed, could not map " + "incident report to json string. Exception: {}" , ex . getMessage ( ) ) ; return ; } // Sanity check... Preconditions . checkNotNull ( dtoDump ) ; try { URIBuilder uriBuilder = new URIBuilder ( this . serverUrl ) ; uriBuilder . setPath ( SF_PATH_CREATE_INCIDENT_V2 ) ; incidentURI = uriBuilder . build ( ) ; } catch ( URISyntaxException ex ) { logger . error ( "Incident registration failed, " + "URI could not be built. Exception: {}" , ex . getMessage ( ) ) ; return ; } HttpPost postRequest = new HttpPost ( incidentURI ) ; postRequest . setHeader ( SFSession . SF_HEADER_AUTHORIZATION , SFSession . SF_HEADER_SNOWFLAKE_AUTHTYPE + " " + SFSession . SF_HEADER_TOKEN_TAG + "=\"" + this . sessionToken + "\"" ) ; // Compress the payload. ByteArrayEntity input = null ; try { ByteArrayOutputStream baos = new ByteArrayOutputStream ( ) ; GZIPOutputStream gzos = new GZIPOutputStream ( baos ) ; byte [ ] bytes = dtoDump . getBytes ( StandardCharsets . UTF_8 ) ; gzos . write ( bytes ) ; gzos . finish ( ) ; input = new ByteArrayEntity ( baos . toByteArray ( ) ) ; input . setContentType ( "application/json" ) ; } catch ( IOException exc ) { logger . debug ( "Incident registration failed, could not compress" + " payload. Exception: {}" , exc . getMessage ( ) ) ; } postRequest . setEntity ( input ) ; postRequest . addHeader ( "content-encoding" , "gzip" ) ; try { String response = HttpUtil . executeRequest ( postRequest , 1000 , 0 , null ) ; logger . debug ( "Incident registration was successful. Response: '{}'" , response ) ; } catch ( Exception ex ) { // No much we can do here besides complain. logger . error ( "Incident registration request failed, exception: {}" , ex . getMessage ( ) ) ; } }
|
Sends incident to GS to log
| 587
| 7
|
151,861
|
private static String [ ] decideCipherSuites ( ) { String sysCipherSuites = System . getProperty ( "https.cipherSuites" ) ; String [ ] cipherSuites = sysCipherSuites != null ? sysCipherSuites . split ( "," ) : // use jdk default cipher suites ( ( SSLServerSocketFactory ) SSLServerSocketFactory . getDefault ( ) ) . getDefaultCipherSuites ( ) ; // cipher suites need to be picked up in code explicitly for jdk 1.7 // https://stackoverflow.com/questions/44378970/ if ( logger . isTraceEnabled ( ) ) { logger . trace ( "Cipher suites used: {}" , Arrays . toString ( cipherSuites ) ) ; } return cipherSuites ; }
|
Decide cipher suites that will be passed into the SSLConnectionSocketFactory
| 176
| 14
|
151,862
|
public static Telemetry createTelemetry ( Connection conn , int flushSize ) { try { return createTelemetry ( conn . unwrap ( SnowflakeConnectionV1 . class ) . getSfSession ( ) , flushSize ) ; } catch ( SQLException ex ) { logger . debug ( "input connection is not a SnowflakeConnection" ) ; return null ; } }
|
Initialize the telemetry connector
| 79
| 6
|
151,863
|
public void addLogToBatch ( TelemetryData log ) throws IOException { if ( isClosed ) { throw new IOException ( "Telemetry connector is closed" ) ; } if ( ! isTelemetryEnabled ( ) ) { return ; // if disable, do nothing } synchronized ( locker ) { this . logBatch . add ( log ) ; } if ( this . logBatch . size ( ) >= this . forceFlushSize ) { this . sendBatch ( ) ; } }
|
Add log to batch to be submitted to telemetry . Send batch if forceFlushSize reached
| 106
| 19
|
151,864
|
public void tryAddLogToBatch ( TelemetryData log ) { try { addLogToBatch ( log ) ; } catch ( IOException ex ) { logger . debug ( "Exception encountered while sending metrics to telemetry endpoint." , ex ) ; } }
|
Attempt to add log to batch and suppress exceptions thrown in case of failure
| 55
| 14
|
151,865
|
public void close ( ) throws IOException { if ( isClosed ) { throw new IOException ( "Telemetry connector is closed" ) ; } try { this . sendBatch ( ) ; } catch ( IOException e ) { logger . error ( "Send logs failed on closing" , e ) ; } finally { this . isClosed = true ; } }
|
Close telemetry connector and send any unsubmitted logs
| 77
| 10
|
151,866
|
public boolean sendBatch ( ) throws IOException { if ( isClosed ) { throw new IOException ( "Telemetry connector is closed" ) ; } if ( ! isTelemetryEnabled ( ) ) { return false ; } LinkedList < TelemetryData > tmpList ; synchronized ( locker ) { tmpList = this . logBatch ; this . logBatch = new LinkedList <> ( ) ; } if ( session . isClosed ( ) ) { throw new UnexpectedException ( "Session is closed when sending log" ) ; } if ( ! tmpList . isEmpty ( ) ) { //session shared with JDBC String sessionToken = this . session . getSessionToken ( ) ; HttpPost post = new HttpPost ( this . telemetryUrl ) ; post . setEntity ( new StringEntity ( logsToString ( tmpList ) ) ) ; post . setHeader ( "Content-type" , "application/json" ) ; post . setHeader ( "Authorization" , "Snowflake Token=\"" + sessionToken + "\"" ) ; String response = null ; try { response = HttpUtil . executeRequest ( post , 1000 , 0 , null ) ; } catch ( SnowflakeSQLException e ) { disableTelemetry ( ) ; // when got error like 404 or bad request, disable telemetry in this telemetry instance logger . error ( "Telemetry request failed, " + "response: {}, exception: {}" , response , e . getMessage ( ) ) ; return false ; } } return true ; }
|
Send all cached logs to server
| 332
| 6
|
151,867
|
static ObjectNode logsToJson ( LinkedList < TelemetryData > telemetryData ) { ObjectNode node = mapper . createObjectNode ( ) ; ArrayNode logs = mapper . createArrayNode ( ) ; for ( TelemetryData data : telemetryData ) { logs . add ( data . toJson ( ) ) ; } node . set ( "logs" , logs ) ; return node ; }
|
convert a list of log to a JSON object
| 89
| 10
|
151,868
|
@ Override public ResultSet executeQuery ( String sql ) throws SQLException { raiseSQLExceptionIfStatementIsClosed ( ) ; return executeQueryInternal ( sql , null ) ; }
|
Execute SQL query
| 43
| 4
|
151,869
|
ResultSet executeQueryInternal ( String sql , Map < String , ParameterBindingDTO > parameterBindings ) throws SQLException { SFBaseResultSet sfResultSet ; try { sfResultSet = sfStatement . execute ( sql , parameterBindings , SFStatement . CallingMethod . EXECUTE_QUERY ) ; sfResultSet . setSession ( this . connection . getSfSession ( ) ) ; } catch ( SFException ex ) { throw new SnowflakeSQLException ( ex . getCause ( ) , ex . getSqlState ( ) , ex . getVendorCode ( ) , ex . getParams ( ) ) ; } if ( resultSet != null ) { openResultSets . add ( resultSet ) ; } resultSet = new SnowflakeResultSetV1 ( sfResultSet , this ) ; return getResultSet ( ) ; }
|
Internal method for executing a query with bindings accepted .
| 192
| 10
|
151,870
|
void setParameter ( String name , Object value ) throws Exception { logger . debug ( "public void setParameter" ) ; try { if ( this . sfStatement != null ) { this . sfStatement . addProperty ( name , value ) ; } } catch ( SFException ex ) { throw new SnowflakeSQLException ( ex ) ; } }
|
Sets a parameter at the statement level . Used for internal testing .
| 76
| 14
|
151,871
|
private static ThreadPoolExecutor createChunkDownloaderExecutorService ( final String threadNamePrefix , final int parallel ) { ThreadFactory threadFactory = new ThreadFactory ( ) { private int threadCount = 1 ; public Thread newThread ( final Runnable r ) { final Thread thread = new Thread ( r ) ; thread . setName ( threadNamePrefix + threadCount ++ ) ; thread . setUncaughtExceptionHandler ( new Thread . UncaughtExceptionHandler ( ) { public void uncaughtException ( Thread t , Throwable e ) { logger . error ( "uncaughtException in thread: " + t + " {}" , e ) ; } } ) ; thread . setDaemon ( true ) ; return thread ; } } ; return ( ThreadPoolExecutor ) Executors . newFixedThreadPool ( parallel , threadFactory ) ; }
|
Create a pool of downloader threads .
| 179
| 8
|
151,872
|
private void startNextDownloaders ( ) throws SnowflakeSQLException { long waitingTime = BASE_WAITING_MS ; // submit the chunks to be downloaded up to the prefetch slot capacity // and limited by memory while ( nextChunkToDownload - nextChunkToConsume < prefetchSlots && nextChunkToDownload < chunks . size ( ) ) { // check if memory limit allows more prefetching final SnowflakeResultChunk nextChunk = chunks . get ( nextChunkToDownload ) ; final long neededChunkMemory = nextChunk . computeNeededChunkMemory ( ) ; // each time only one thread can enter this block synchronized ( currentMemoryUsage ) { // make sure memoryLimit > neededChunkMemory; otherwise, the thread hangs if ( neededChunkMemory > memoryLimit ) { if ( logger . isDebugEnabled ( ) ) { logger . debug ( "{}: reset memoryLimit from {} MB to current chunk size {} MB" , Thread . currentThread ( ) . getName ( ) , memoryLimit / 1024 / 1024 , neededChunkMemory / 1024 / 1024 ) ; } memoryLimit = neededChunkMemory ; } // no memory allocate when memory is not enough for prefetch if ( currentMemoryUsage + neededChunkMemory > memoryLimit && nextChunkToDownload - nextChunkToConsume > 0 ) { break ; } // only allocate memory when the future usage is less than the limit if ( currentMemoryUsage + neededChunkMemory <= memoryLimit ) { nextChunk . tryReuse ( chunkDataCache ) ; currentMemoryUsage += neededChunkMemory ; if ( logger . isDebugEnabled ( ) ) { logger . debug ( "{}: currentMemoryUsage in MB: {}, nextChunkToDownload: {}, nextChunkToConsume: {}, " + "newReservedMemory in B: {} " , Thread . currentThread ( ) . getName ( ) , currentMemoryUsage / 1024 / 1024 , nextChunkToDownload , nextChunkToConsume , neededChunkMemory ) ; } logger . debug ( "submit chunk #{} for downloading, url={}" , this . nextChunkToDownload , nextChunk . getUrl ( ) ) ; executor . submit ( getDownloadChunkCallable ( this , nextChunk , qrmk , nextChunkToDownload , chunkHeadersMap , networkTimeoutInMilli , useJsonParserV2 ) ) ; // increment next chunk to download nextChunkToDownload ++ ; // make sure reset waiting time waitingTime = BASE_WAITING_MS ; // go to next chunk continue ; } } // waiting when nextChunkToDownload is equal to nextChunkToConsume but reach memory limit try { waitingTime *= WAITING_SECS_MULTIPLIER ; waitingTime = waitingTime > MAX_WAITING_MS ? MAX_WAITING_MS : waitingTime ; long jitter = ThreadLocalRandom . current ( ) . nextLong ( 0 , waitingTime / WAITING_JITTER_RATIO ) ; waitingTime += jitter ; if ( logger . isDebugEnabled ( ) ) { logger . debug ( "{} waiting for {}s: currentMemoryUsage in MB: {}, neededChunkMemory in MB: {}, " + "nextChunkToDownload: {}, nextChunkToConsume: {} " , Thread . currentThread ( ) . getName ( ) , waitingTime / 1000.0 , currentMemoryUsage / 1024 / 1024 , neededChunkMemory / 1024 / 1024 , nextChunkToDownload , nextChunkToConsume ) ; } Thread . sleep ( waitingTime ) ; } catch ( InterruptedException ie ) { throw new SnowflakeSQLException ( SqlState . INTERNAL_ERROR , ErrorCode . INTERNAL_ERROR . getMessageCode ( ) , "Waiting SnowflakeChunkDownloader has been interrupted." ) ; } } // clear the cache, we can't download more at the moment // so we won't need them in the near future chunkDataCache . clear ( ) ; }
|
Submit download chunk tasks to executor . Number depends on thread and memory limit
| 875
| 15
|
151,873
|
public void releaseAllChunkMemoryUsage ( ) { if ( chunks == null || chunks . size ( ) == 0 ) { return ; } for ( int i = 0 ; i < chunks . size ( ) ; i ++ ) { releaseCurrentMemoryUsage ( i , chunks . get ( i ) . computeNeededChunkMemory ( ) ) ; } }
|
release all existing chunk memory usage before close
| 74
| 8
|
151,874
|
private void logOutOfMemoryError ( ) { logger . error ( "Dump some crucial information below:\n" + "Total milliseconds waiting for chunks: {},\n" + "Total memory used: {}, Max heap size: {}, total download time: {} millisec,\n" + "total parsing time: {} milliseconds, total chunks: {},\n" + "currentMemoryUsage in Byte: {}, currentMemoryLimit in Bytes: {} \n" + "nextChunkToDownload: {}, nextChunkToConsume: {}\n" + "Several suggestions to try to resolve the OOM issue:\n" + "1. increase the JVM heap size if you have more space; or \n" + "2. use CLIENT_MEMORY_LIMIT to reduce the memory usage by the JDBC driver " + "(https://docs.snowflake.net/manuals/sql-reference/parameters.html#client-memory-limit)" + "3. please make sure 2 * CLIENT_PREFETCH_THREADS * CLIENT_RESULT_CHUNK_SIZE < CLIENT_MEMORY_LIMIT. " + "If not, please reduce CLIENT_PREFETCH_THREADS and CLIENT_RESULT_CHUNK_SIZE too." , numberMillisWaitingForChunks , Runtime . getRuntime ( ) . totalMemory ( ) , Runtime . getRuntime ( ) . maxMemory ( ) , totalMillisDownloadingChunks . get ( ) , totalMillisParsingChunks . get ( ) , chunks . size ( ) , currentMemoryUsage , memoryLimit , nextChunkToDownload , nextChunkToConsume ) ; }
|
log out of memory error and provide the suggestion to avoid this error
| 370
| 13
|
151,875
|
public Metrics terminate ( ) { if ( ! terminated ) { logger . debug ( "Total milliseconds waiting for chunks: {}, " + "Total memory used: {}, total download time: {} millisec, " + "total parsing time: {} milliseconds, total chunks: {}" , numberMillisWaitingForChunks , Runtime . getRuntime ( ) . totalMemory ( ) , totalMillisDownloadingChunks . get ( ) , totalMillisParsingChunks . get ( ) , chunks . size ( ) ) ; if ( executor != null ) { executor . shutdownNow ( ) ; executor = null ; } chunks = null ; chunkDataCache . clear ( ) ; terminated = true ; return new Metrics ( ) ; } return null ; }
|
terminate the downloader
| 161
| 5
|
151,876
|
public static String maskAWSSecret ( String sql ) { List < SecretDetector . SecretRange > secretRanges = SecretDetector . getAWSSecretPos ( sql ) ; for ( SecretDetector . SecretRange secretRange : secretRanges ) { sql = maskText ( sql , secretRange . beginPos , secretRange . endPos ) ; } return sql ; }
|
mask AWS secret in the input string
| 81
| 7
|
151,877
|
private void sanityCheckQuery ( String sql ) throws SQLException { if ( sql == null || sql . isEmpty ( ) ) { throw new SnowflakeSQLException ( SqlState . SQL_STATEMENT_NOT_YET_COMPLETE , ErrorCode . INVALID_SQL . getMessageCode ( ) , sql ) ; } }
|
Sanity check query text
| 76
| 5
|
151,878
|
private SFBaseResultSet executeQuery ( String sql , Map < String , ParameterBindingDTO > parametersBinding , boolean describeOnly , CallingMethod caller ) throws SQLException , SFException { sanityCheckQuery ( sql ) ; String trimmedSql = sql . trim ( ) ; // snowflake specific client side commands if ( isFileTransfer ( trimmedSql ) ) { // PUT/GET command logger . debug ( "Executing file transfer locally: {}" , sql ) ; return executeFileTransfer ( sql ) ; } // NOTE: It is intentional two describeOnly parameters are specified. return executeQueryInternal ( sql , parametersBinding , describeOnly , describeOnly , // internal query if describeOnly is true caller ) ; }
|
Execute SQL query with an option for describe only
| 154
| 10
|
151,879
|
public SFStatementMetaData describe ( String sql ) throws SFException , SQLException { SFBaseResultSet baseResultSet = executeQuery ( sql , null , true , null ) ; describeJobUUID = baseResultSet . getQueryId ( ) ; return new SFStatementMetaData ( baseResultSet . getMetaData ( ) , baseResultSet . getStatementType ( ) , baseResultSet . getNumberOfBinds ( ) , baseResultSet . isArrayBindSupported ( ) ) ; }
|
Describe a statement
| 106
| 4
|
151,880
|
private void setTimeBomb ( ScheduledExecutorService executor ) { class TimeBombTask implements Callable < Void > { private final SFStatement statement ; private TimeBombTask ( SFStatement statement ) { this . statement = statement ; } @ Override public Void call ( ) throws SQLException { try { statement . cancel ( ) ; } catch ( SFException ex ) { throw new SnowflakeSQLException ( ex , ex . getSqlState ( ) , ex . getVendorCode ( ) , ex . getParams ( ) ) ; } return null ; } } executor . schedule ( new TimeBombTask ( this ) , this . queryTimeout , TimeUnit . SECONDS ) ; }
|
Set a time bomb to cancel the outstanding query when timeout is reached .
| 151
| 14
|
151,881
|
private void cancelHelper ( String sql , String mediaType ) throws SnowflakeSQLException , SFException { synchronized ( this ) { if ( isClosed ) { throw new SFException ( ErrorCode . INTERNAL_ERROR , "statement already closed" ) ; } } StmtUtil . StmtInput stmtInput = new StmtUtil . StmtInput ( ) ; stmtInput . setServerUrl ( session . getServerUrl ( ) ) . setSql ( sql ) . setMediaType ( mediaType ) . setRequestId ( requestId ) . setSessionToken ( session . getSessionToken ( ) ) . setServiceName ( session . getServiceName ( ) ) ; StmtUtil . cancel ( stmtInput ) ; synchronized ( this ) { /* * done with the remote execution of the query. set sequenceId to -1 * and request id to null so that we don't try to abort it again upon * canceling. */ this . sequenceId = - 1 ; this . requestId = null ; } }
|
A helper method to build URL and cancel the SQL for exec
| 222
| 12
|
151,882
|
public boolean getMoreResults ( int current ) throws SQLException { // clean up current result, if exists if ( resultSet != null && ( current == Statement . CLOSE_CURRENT_RESULT || current == Statement . CLOSE_ALL_RESULTS ) ) { resultSet . close ( ) ; } resultSet = null ; // verify if more results exist if ( childResults == null || childResults . isEmpty ( ) ) { return false ; } // fetch next result using the query id SFChildResult nextResult = childResults . remove ( 0 ) ; try { JsonNode result = StmtUtil . getQueryResultJSON ( nextResult . getId ( ) , session ) ; Object sortProperty = session . getSFSessionProperty ( "sort" ) ; boolean sortResult = sortProperty != null && ( Boolean ) sortProperty ; resultSet = new SFResultSet ( result , this , sortResult ) ; // override statement type so we can treat the result set like a result of // the original statement called (and not the result scan) resultSet . setStatementType ( nextResult . getType ( ) ) ; return nextResult . getType ( ) . isGenerateResultSet ( ) ; } catch ( SFException ex ) { throw new SnowflakeSQLException ( ex ) ; } }
|
Sets the result set to the next one if available .
| 277
| 12
|
151,883
|
public boolean isServiceException404 ( ) { if ( ( Exception ) this instanceof AmazonServiceException ) { AmazonServiceException asEx = ( AmazonServiceException ) ( ( java . lang . Exception ) this ) ; return ( asEx . getStatusCode ( ) == HttpStatus . SC_NOT_FOUND ) ; } return false ; }
|
Returns true if this is an exception corresponding to a HTTP 404 error returned by the storage provider
| 72
| 18
|
151,884
|
public static String oneLiner ( Throwable thrown ) { StackTraceElement [ ] stack = thrown . getStackTrace ( ) ; String topOfStack = null ; if ( stack . length > 0 ) { topOfStack = " at " + stack [ 0 ] ; } return thrown . toString ( ) + topOfStack ; }
|
Produce a one line description of the throwable suitable for error message and log printing .
| 72
| 18
|
151,885
|
public static void dumpVmMetrics ( String incidentId ) { PrintWriter writer = null ; try { String dumpFile = EventUtil . getDumpPathPrefix ( ) + "/" + INC_DUMP_FILE_NAME + incidentId + INC_DUMP_FILE_EXT ; final OutputStream outStream = new GZIPOutputStream ( new FileOutputStream ( dumpFile ) ) ; writer = new PrintWriter ( outStream , true ) ; final VirtualMachineMetrics vm = VirtualMachineMetrics . getInstance ( ) ; writer . print ( "\n\n\n--------------------------- METRICS " + "---------------------------\n\n" ) ; writer . flush ( ) ; JsonFactory jf = new JsonFactory ( ) ; jf . disable ( JsonGenerator . Feature . AUTO_CLOSE_TARGET ) ; ObjectMapper mapper = new ObjectMapper ( jf ) ; mapper . registerModule ( new JodaModule ( ) ) ; mapper . setDateFormat ( new ISO8601DateFormat ( ) ) ; mapper . configure ( SerializationFeature . INDENT_OUTPUT , true ) ; MetricsServlet metrics = new MetricsServlet ( Clock . defaultClock ( ) , vm , Metrics . defaultRegistry ( ) , jf , true ) ; final JsonGenerator json = jf . createGenerator ( outStream , JsonEncoding . UTF8 ) ; json . useDefaultPrettyPrinter ( ) ; json . writeStartObject ( ) ; // JVM metrics writeVmMetrics ( json , vm ) ; // Components metrics metrics . writeRegularMetrics ( json , // json generator null , // class prefix false ) ; // include full samples json . writeEndObject ( ) ; json . close ( ) ; logger . debug ( "Creating full thread dump in dump file {}" , dumpFile ) ; // Thread dump next.... writer . print ( "\n\n\n--------------------------- THREAD DUMP " + "---------------------------\n\n" ) ; writer . flush ( ) ; vm . threadDump ( outStream ) ; logger . debug ( "Dump file {} is created." , dumpFile ) ; } catch ( Exception exc ) { logger . error ( "Unable to write dump file, exception: {}" , exc . getMessage ( ) ) ; } finally { if ( writer != null ) { writer . close ( ) ; } } }
|
Dumps JVM metrics for this process .
| 519
| 9
|
151,886
|
public static Throwable generateIncidentV2WithException ( SFSession session , Throwable exc , String jobId , String requestId ) { new Incident ( session , exc , jobId , requestId ) . trigger ( ) ; return exc ; }
|
Makes a V2 incident object and triggers ir effectively reporting the given exception to GS and possibly to crashmanager
| 52
| 22
|
151,887
|
public static String getUTCNow ( ) { SimpleDateFormat dateFormatGmt = new SimpleDateFormat ( "yyyy-MM-dd HH:mm:ss" ) ; dateFormatGmt . setTimeZone ( TimeZone . getTimeZone ( "GMT" ) ) ; //Time in GMT return dateFormatGmt . format ( new Date ( ) ) ; }
|
Get current time in UTC in the following format
| 78
| 9
|
151,888
|
@ Override public void renew ( Map stageCredentials ) throws SnowflakeSQLException { stageInfo . setCredentials ( stageCredentials ) ; setupAzureClient ( stageInfo , encMat ) ; }
|
Re - creates the encapsulated storage client with a fresh access token
| 49
| 13
|
151,889
|
@ Override public StorageObjectMetadata getObjectMetadata ( String remoteStorageLocation , String prefix ) throws StorageProviderException { AzureObjectMetadata azureObjectMetadata = null ; try { // Get a reference to the BLOB, to retrieve its metadata CloudBlobContainer container = azStorageClient . getContainerReference ( remoteStorageLocation ) ; CloudBlob blob = container . getBlockBlobReference ( prefix ) ; blob . downloadAttributes ( ) ; // Get the user-defined BLOB metadata Map < String , String > userDefinedMetadata = blob . getMetadata ( ) ; // Get the BLOB system properties we care about BlobProperties properties = blob . getProperties ( ) ; long contentLength = properties . getLength ( ) ; String contentEncoding = properties . getContentEncoding ( ) ; // Construct an Azure metadata object azureObjectMetadata = new AzureObjectMetadata ( contentLength , contentEncoding , userDefinedMetadata ) ; } catch ( StorageException ex ) { logger . debug ( "Failed to retrieve BLOB metadata: {} - {}" , ex . getErrorCode ( ) , ex . getExtendedErrorInformation ( ) ) ; throw new StorageProviderException ( ex ) ; } catch ( URISyntaxException ex ) { logger . debug ( "Cannot retrieve BLOB properties, invalid URI: {}" , ex ) ; throw new StorageProviderException ( ex ) ; } return azureObjectMetadata ; }
|
Returns the metadata properties for a remote storage object
| 309
| 9
|
151,890
|
@ Override public void download ( SFSession connection , String command , String localLocation , String destFileName , int parallelism , String remoteStorageLocation , String stageFilePath , String stageRegion ) throws SnowflakeSQLException { int retryCount = 0 ; do { try { String localFilePath = localLocation + localFileSep + destFileName ; File localFile = new File ( localFilePath ) ; CloudBlobContainer container = azStorageClient . getContainerReference ( remoteStorageLocation ) ; CloudBlob blob = container . getBlockBlobReference ( stageFilePath ) ; // Note that Azure doesn't offer a multi-part parallel download library, // where the user has control of block size and parallelism // we rely on Azure to handle the download, hence the "parallelism" parameter is ignored // in the Azure implementation of the method blob . downloadToFile ( localFilePath ) ; // Pull object metadata from Azure blob . downloadAttributes ( ) ; // Get the user-defined BLOB metadata Map < String , String > userDefinedMetadata = blob . getMetadata ( ) ; AbstractMap . SimpleEntry < String , String > encryptionData = parseEncryptionData ( userDefinedMetadata . get ( AZ_ENCRYPTIONDATAPROP ) ) ; String key = encryptionData . getKey ( ) ; String iv = encryptionData . getValue ( ) ; if ( this . isEncrypting ( ) && this . getEncryptionKeySize ( ) <= 256 ) { if ( key == null || iv == null ) { throw new SnowflakeSQLException ( SqlState . INTERNAL_ERROR , ErrorCode . INTERNAL_ERROR . getMessageCode ( ) , "File metadata incomplete" ) ; } // Decrypt file try { EncryptionProvider . decrypt ( localFile , key , iv , this . encMat ) ; } catch ( Exception ex ) { logger . error ( "Error decrypting file" , ex ) ; throw ex ; } } return ; } catch ( Exception ex ) { logger . debug ( "Download unsuccessful {}" , ex ) ; handleAzureException ( ex , ++ retryCount , "download" , connection , command , this ) ; } } while ( retryCount <= getMaxRetries ( ) ) ; throw new SnowflakeSQLException ( SqlState . INTERNAL_ERROR , ErrorCode . INTERNAL_ERROR . getMessageCode ( ) , "Unexpected: download unsuccessful without exception!" ) ; }
|
Download a file from remote storage .
| 535
| 7
|
151,891
|
private static void handleAzureException ( Exception ex , int retryCount , String operation , SFSession connection , String command , SnowflakeAzureClient azClient ) throws SnowflakeSQLException { // no need to retry if it is invalid key exception if ( ex . getCause ( ) instanceof InvalidKeyException ) { // Most likely cause is that the unlimited strength policy files are not installed // Log the error and throw a message that explains the cause SnowflakeFileTransferAgent . throwJCEMissingError ( operation , ex ) ; } if ( ( ( StorageException ) ex ) . getHttpStatusCode ( ) == 403 ) { // A 403 indicates that the SAS token has expired, // we need to refresh the Azure client with the new token SnowflakeFileTransferAgent . renewExpiredToken ( connection , command , azClient ) ; } if ( ex instanceof StorageException ) { StorageException se = ( StorageException ) ex ; // If we have exceeded the max number of retries, propagate the error if ( retryCount > azClient . getMaxRetries ( ) ) { throw new SnowflakeSQLException ( se , SqlState . SYSTEM_ERROR , ErrorCode . AZURE_SERVICE_ERROR . getMessageCode ( ) , operation , se . getErrorCode ( ) , se . getExtendedErrorInformation ( ) , se . getHttpStatusCode ( ) , se . getMessage ( ) ) ; } else { logger . debug ( "Encountered exception ({}) during {}, retry count: {}" , ex . getMessage ( ) , operation , retryCount ) ; logger . debug ( "Stack trace: " , ex ) ; // exponential backoff up to a limit int backoffInMillis = azClient . getRetryBackoffMin ( ) ; if ( retryCount > 1 ) { backoffInMillis <<= ( Math . min ( retryCount - 1 , azClient . getRetryBackoffMaxExponent ( ) ) ) ; } try { logger . debug ( "Sleep for {} milliseconds before retry" , backoffInMillis ) ; Thread . sleep ( backoffInMillis ) ; } catch ( InterruptedException ex1 ) { // ignore } if ( se . getHttpStatusCode ( ) == 403 ) { // A 403 indicates that the SAS token has expired, // we need to refresh the Azure client with the new token SnowflakeFileTransferAgent . renewExpiredToken ( connection , command , azClient ) ; } } } else { if ( ex instanceof InterruptedException || SnowflakeUtil . getRootCause ( ex ) instanceof SocketTimeoutException ) { if ( retryCount > azClient . getMaxRetries ( ) ) { throw new SnowflakeSQLException ( ex , SqlState . SYSTEM_ERROR , ErrorCode . IO_ERROR . getMessageCode ( ) , "Encountered exception during " + operation + ": " + ex . getMessage ( ) ) ; } else { logger . debug ( "Encountered exception ({}) during {}, retry count: {}" , ex . getMessage ( ) , operation , retryCount ) ; } } else { throw new SnowflakeSQLException ( ex , SqlState . SYSTEM_ERROR , ErrorCode . IO_ERROR . getMessageCode ( ) , "Encountered exception during " + operation + ": " + ex . getMessage ( ) ) ; } } }
|
Handles exceptions thrown by Azure Storage It will retry transient errors as defined by the Azure Client retry policy It will re - create the client if the SAS token has expired and re - try
| 733
| 39
|
151,892
|
@ Override public void addDigestMetadata ( StorageObjectMetadata meta , String digest ) { if ( ! SnowflakeUtil . isBlank ( digest ) ) { // Azure doesn't allow hyphens in the name of a metadata field. meta . addUserMetadata ( "sfcdigest" , digest ) ; } }
|
Adds digest metadata to the StorageObjectMetadata object
| 71
| 10
|
151,893
|
private static long initMemoryLimit ( final ResultOutput resultOutput ) { // default setting long memoryLimit = SessionUtil . DEFAULT_CLIENT_MEMORY_LIMIT * 1024 * 1024 ; if ( resultOutput . parameters . get ( CLIENT_MEMORY_LIMIT ) != null ) { // use the settings from the customer memoryLimit = ( int ) resultOutput . parameters . get ( CLIENT_MEMORY_LIMIT ) * 1024L * 1024L ; } long maxMemoryToUse = Runtime . getRuntime ( ) . maxMemory ( ) * 8 / 10 ; if ( ( int ) resultOutput . parameters . get ( CLIENT_MEMORY_LIMIT ) == SessionUtil . DEFAULT_CLIENT_MEMORY_LIMIT ) { // if the memory limit is the default value and best effort memory is enabled // set the memory limit to 80% of the maximum as the best effort memoryLimit = Math . max ( memoryLimit , maxMemoryToUse ) ; } // always make sure memoryLimit <= 80% of the maximum memoryLimit = Math . min ( memoryLimit , maxMemoryToUse ) ; logger . debug ( "Set allowed memory usage to {} bytes" , memoryLimit ) ; return memoryLimit ; }
|
initialize memory limit in bytes
| 266
| 6
|
151,894
|
static private Object effectiveParamValue ( Map < String , Object > parameters , String paramName ) { String upper = paramName . toUpperCase ( ) ; Object value = parameters . get ( upper ) ; if ( value != null ) { return value ; } value = defaultParameters . get ( upper ) ; if ( value != null ) { return value ; } logger . debug ( "Unknown Common Parameter: {}" , paramName ) ; return null ; }
|
Returns the effective parameter value using the value explicitly provided in parameters or the default if absent
| 96
| 17
|
151,895
|
static private SnowflakeDateTimeFormat specializedFormatter ( Map < String , Object > parameters , String id , String param , String defaultFormat ) { String sqlFormat = SnowflakeDateTimeFormat . effectiveSpecializedTimestampFormat ( ( String ) effectiveParamValue ( parameters , param ) , defaultFormat ) ; SnowflakeDateTimeFormat formatter = new SnowflakeDateTimeFormat ( sqlFormat ) ; if ( logger . isDebugEnabled ( ) ) { logger . debug ( "sql {} format: {}, java {} format: {}" , id , sqlFormat , id , formatter . toSimpleDateTimePattern ( ) ) ; } return formatter ; }
|
Helper function building a formatter for a specialized timestamp type . Note that it will be based on either the param value if set or the default format provided .
| 136
| 31
|
151,896
|
static public Timestamp adjustTimestamp ( Timestamp timestamp ) { long milliToAdjust = ResultUtil . msDiffJulianToGregorian ( timestamp ) ; if ( milliToAdjust != 0 ) { if ( logger . isDebugEnabled ( ) ) { logger . debug ( "adjust timestamp by {} days" , milliToAdjust / 86400000 ) ; } Timestamp newTimestamp = new Timestamp ( timestamp . getTime ( ) + milliToAdjust ) ; newTimestamp . setNanos ( timestamp . getNanos ( ) ) ; return newTimestamp ; } else { return timestamp ; } }
|
Adjust timestamp for dates before 1582 - 10 - 05
| 131
| 11
|
151,897
|
static public long msDiffJulianToGregorian ( java . util . Date date ) { // get the year of the date Calendar cal = Calendar . getInstance ( ) ; cal . setTime ( date ) ; int year = cal . get ( Calendar . YEAR ) ; int month = cal . get ( Calendar . MONTH ) ; int dayOfMonth = cal . get ( Calendar . DAY_OF_MONTH ) ; // if date is before 1582-10-05, apply the difference // by (H-(H/4)-2) where H is the hundreds digit of the year according to: // http://en.wikipedia.org/wiki/Gregorian_calendar if ( date . getTime ( ) < - 12220156800000L ) { // for dates on or before 02/28, use the previous year otherwise use // current year. // TODO: we need to revisit this since there is a potential issue using // the year/month/day from the calendar since that may not be the same // year/month/day as the original date (which is the problem we are // trying to solve here). if ( month == 0 || ( month == 1 && dayOfMonth <= 28 ) ) { year = year - 1 ; } int hundreds = year / 100 ; int differenceInDays = hundreds - ( hundreds / 4 ) - 2 ; return differenceInDays * 86400000 ; } else { return 0 ; } }
|
For dates before 1582 - 10 - 05 calculate the number of millis to adjust .
| 300
| 18
|
151,898
|
static public String getSFTimeAsString ( SFTime sft , int scale , SnowflakeDateTimeFormat timeFormatter ) { return timeFormatter . format ( sft , scale ) ; }
|
Convert a time value into a string
| 41
| 8
|
151,899
|
static public String getSFTimestampAsString ( SFTimestamp sfTS , int columnType , int scale , SnowflakeDateTimeFormat timestampNTZFormatter , SnowflakeDateTimeFormat timestampLTZFormatter , SnowflakeDateTimeFormat timestampTZFormatter , SFSession session ) throws SFException { // Derive the timestamp formatter to use SnowflakeDateTimeFormat formatter ; if ( columnType == Types . TIMESTAMP ) { formatter = timestampNTZFormatter ; } else if ( columnType == SnowflakeUtil . EXTRA_TYPES_TIMESTAMP_LTZ ) { formatter = timestampLTZFormatter ; } else // TZ { formatter = timestampTZFormatter ; } if ( formatter == null ) { throw ( SFException ) IncidentUtil . generateIncidentV2WithException ( session , new SFException ( ErrorCode . INTERNAL_ERROR , "missing timestamp formatter" ) , null , null ) ; } try { Timestamp adjustedTimestamp = ResultUtil . adjustTimestamp ( sfTS . getTimestamp ( ) ) ; return formatter . format ( adjustedTimestamp , sfTS . getTimeZone ( ) , scale ) ; } catch ( SFTimestamp . TimestampOperationNotAvailableException e ) { // this timestamp doesn't fit into a Java timestamp, and therefore we // can't format it (for now). Just print it out as seconds since epoch. BigDecimal nanosSinceEpoch = sfTS . getNanosSinceEpoch ( ) ; BigDecimal secondsSinceEpoch = nanosSinceEpoch . scaleByPowerOfTen ( - 9 ) ; return secondsSinceEpoch . setScale ( scale ) . toPlainString ( ) ; } }
|
Convert a SFTimestamp to a string value .
| 379
| 12
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.