idx
int64
0
41.2k
question
stringlengths
83
4.15k
target
stringlengths
5
715
28,400
private Set < String > getOcspUrls ( Certificate bcCert ) { TBSCertificate bcTbsCert = bcCert . getTBSCertificate ( ) ; Extensions bcExts = bcTbsCert . getExtensions ( ) ; if ( bcExts == null ) { throw new RuntimeException ( "Failed to get Tbs Certificate." ) ; } Set < String > ocsp = new HashSet < > ( ) ; for ( Enumeration en = bcExts . oids ( ) ; en . hasMoreElements ( ) ; ) { ASN1ObjectIdentifier oid = ( ASN1ObjectIdentifier ) en . nextElement ( ) ; Extension bcExt = bcExts . getExtension ( oid ) ; if ( bcExt . getExtnId ( ) == Extension . authorityInfoAccess ) { DLSequence seq = ( DLSequence ) bcExt . getParsedValue ( ) ; for ( ASN1Encodable asn : seq ) { ASN1Encodable [ ] pairOfAsn = ( ( DLSequence ) asn ) . toArray ( ) ; if ( pairOfAsn . length == 2 ) { ASN1ObjectIdentifier key = ( ASN1ObjectIdentifier ) pairOfAsn [ 0 ] ; if ( key == OIDocsp ) { GeneralName gn = GeneralName . getInstance ( pairOfAsn [ 1 ] ) ; ocsp . add ( gn . getName ( ) . toString ( ) ) ; } } } } } return ocsp ; }
Gets OCSP URLs associated with the certificate .
28,401
private static boolean isValidityRange ( Date currentTime , Date thisUpdate , Date nextUpdate ) { long tolerableValidity = calculateTolerableVadility ( thisUpdate , nextUpdate ) ; return thisUpdate . getTime ( ) - MAX_CLOCK_SKEW_IN_MILLISECONDS <= currentTime . getTime ( ) && currentTime . getTime ( ) <= nextUpdate . getTime ( ) + tolerableValidity ; }
Checks the validity
28,402
private void processKeyUpdateDirective ( String issuer , String ssd ) { try { SignedJWT jwt_signed = SignedJWT . parse ( ssd ) ; String jwt_issuer = ( String ) jwt_signed . getHeader ( ) . getCustomParam ( "ssd_iss" ) ; String ssd_pubKey ; if ( ! jwt_issuer . equals ( issuer ) ) { LOGGER . debug ( "Issuer mismatch. Invalid SSD" ) ; return ; } if ( jwt_issuer . equals ( "dep1" ) ) { ssd_pubKey = ssdManager . getPubKey ( "dep1" ) ; } else { ssd_pubKey = ssdManager . getPubKey ( "dep2" ) ; } if ( ssd_pubKey == null ) { LOGGER . debug ( "Invalid SSD" ) ; return ; } String publicKeyContent = ssd_pubKey . replaceAll ( "\\n" , "" ) . replace ( "-----BEGIN PUBLIC KEY-----" , "" ) . replace ( "-----END PUBLIC KEY-----" , "" ) ; KeyFactory kf = KeyFactory . getInstance ( "RSA" ) ; X509EncodedKeySpec keySpecX509 = new X509EncodedKeySpec ( Base64 . decodeBase64 ( publicKeyContent ) ) ; RSAPublicKey rsaPubKey = ( RSAPublicKey ) kf . generatePublic ( keySpecX509 ) ; SignedJWT jwt_token_verified = SignedJWT . parse ( ssd ) ; JWSVerifier jwsVerifier = new RSASSAVerifier ( rsaPubKey ) ; try { if ( jwt_token_verified . verify ( jwsVerifier ) ) { long cur_time = System . currentTimeMillis ( ) ; Date nbf = jwt_token_verified . getJWTClaimsSet ( ) . getNotBeforeTime ( ) ; if ( cur_time < nbf . getTime ( ) ) { LOGGER . debug ( "The SSD token is not yet valid. Current time less than Not Before Time" ) ; return ; } float key_ver = Float . parseFloat ( jwt_token_verified . getJWTClaimsSet ( ) . getStringClaim ( "keyVer" ) ) ; if ( key_ver <= ssdManager . getPubKeyVer ( jwt_issuer ) ) { return ; } ssdManager . updateKey ( jwt_issuer , jwt_token_verified . getJWTClaimsSet ( ) . getStringClaim ( "pubKey" ) , key_ver ) ; } } catch ( Throwable ex ) { LOGGER . debug ( "Failed to verify JWT Token" ) ; throw ex ; } } catch ( Throwable ex ) { LOGGER . debug ( "Failed to parse JWT Token, aborting" ) ; } }
SSD Processing Code
28,403
private String ocspResponseToB64 ( OCSPResp ocspResp ) { if ( ocspResp == null ) { return null ; } try { return Base64 . encodeBase64String ( ocspResp . getEncoded ( ) ) ; } catch ( Throwable ex ) { LOGGER . debug ( "Could not convert OCSP Response to Base64" ) ; return null ; } }
OCSP Response Utils
28,404
private void scheduleHeartbeat ( ) { long elapsedSecsSinceLastHeartBeat = System . currentTimeMillis ( ) / 1000 - lastHeartbeatStartTimeInSecs ; long initialDelay = Math . max ( heartBeatIntervalInSecs - elapsedSecsSinceLastHeartBeat , 0 ) ; LOGGER . debug ( "schedule heartbeat task with initial delay of {} seconds" , initialDelay ) ; this . heartbeatFuture = this . scheduler . schedule ( this , initialDelay , TimeUnit . SECONDS ) ; }
Schedule the next heartbeat
28,405
public SnowflakeStorageClient createClient ( StageInfo stage , int parallel , RemoteStoreFileEncryptionMaterial encMat ) throws SnowflakeSQLException { logger . debug ( "createClient client type={}" , stage . getStageType ( ) . name ( ) ) ; switch ( stage . getStageType ( ) ) { case S3 : return createS3Client ( stage . getCredentials ( ) , parallel , encMat , stage . getRegion ( ) ) ; case AZURE : return createAzureClient ( stage , encMat ) ; default : throw new IllegalArgumentException ( "Unsupported storage client specified: " + stage . getStageType ( ) . name ( ) ) ; } }
Creates a storage client based on the value of stageLocationType
28,406
private SnowflakeS3Client createS3Client ( Map stageCredentials , int parallel , RemoteStoreFileEncryptionMaterial encMat , String stageRegion ) throws SnowflakeSQLException { final int S3_TRANSFER_MAX_RETRIES = 3 ; logger . debug ( "createS3Client encryption={}" , ( encMat == null ? "no" : "yes" ) ) ; SnowflakeS3Client s3Client ; ClientConfiguration clientConfig = new ClientConfiguration ( ) ; clientConfig . setMaxConnections ( parallel + 1 ) ; clientConfig . setMaxErrorRetry ( S3_TRANSFER_MAX_RETRIES ) ; clientConfig . setDisableSocketProxy ( HttpUtil . isSocksProxyDisabled ( ) ) ; logger . debug ( "s3 client configuration: maxConnection={}, connectionTimeout={}, " + "socketTimeout={}, maxErrorRetry={}" , clientConfig . getMaxConnections ( ) , clientConfig . getConnectionTimeout ( ) , clientConfig . getSocketTimeout ( ) , clientConfig . getMaxErrorRetry ( ) ) ; try { s3Client = new SnowflakeS3Client ( stageCredentials , clientConfig , encMat , stageRegion ) ; } catch ( Exception ex ) { logger . debug ( "Exception creating s3 client" , ex ) ; throw ex ; } logger . debug ( "s3 client created" ) ; return s3Client ; }
Creates a SnowflakeS3ClientObject which encapsulates the Amazon S3 client
28,407
public StorageObjectMetadata createStorageMetadataObj ( StageInfo . StageType stageType ) { switch ( stageType ) { case S3 : return new S3ObjectMetadata ( ) ; case AZURE : return new AzureObjectMetadata ( ) ; default : throw new IllegalArgumentException ( "Unsupported stage type specified: " + stageType . name ( ) ) ; } }
Creates a storage provider specific metadata object accessible via the platform independent interface
28,408
private SnowflakeAzureClient createAzureClient ( StageInfo stage , RemoteStoreFileEncryptionMaterial encMat ) throws SnowflakeSQLException { logger . debug ( "createAzureClient encryption={}" , ( encMat == null ? "no" : "yes" ) ) ; SnowflakeAzureClient azureClient ; try { azureClient = SnowflakeAzureClient . createSnowflakeAzureClient ( stage , encMat ) ; } catch ( Exception ex ) { logger . debug ( "Exception creating Azure Storage client" , ex ) ; throw ex ; } logger . debug ( "Azure Storage client created" ) ; return azureClient ; }
Creates a SnowflakeAzureClientObject which encapsulates the Azure Storage client
28,409
public synchronized static BindUploader newInstance ( SFSession session , String stageDir ) throws BindException { try { Path bindDir = Files . createTempDirectory ( PREFIX ) ; return new BindUploader ( session , stageDir , bindDir ) ; } catch ( IOException ex ) { throw new BindException ( String . format ( "Failed to create temporary directory: %s" , ex . getMessage ( ) ) , BindException . Type . OTHER ) ; } }
Create a new BindUploader which will upload to the given stage path Ensure temporary directory for file writing exists
28,410
public void upload ( Map < String , ParameterBindingDTO > bindValues ) throws BindException { if ( ! closed ) { serializeBinds ( bindValues ) ; putBinds ( ) ; } }
Upload the bindValues to stage
28,411
private void serializeBinds ( Map < String , ParameterBindingDTO > bindValues ) throws BindException { List < ColumnTypeDataPair > columns = getColumnValues ( bindValues ) ; List < String [ ] > rows = buildRows ( columns ) ; writeRowsToCSV ( rows ) ; }
Save the binds to disk
28,412
private List < ColumnTypeDataPair > getColumnValues ( Map < String , ParameterBindingDTO > bindValues ) throws BindException { List < ColumnTypeDataPair > columns = new ArrayList < > ( bindValues . size ( ) ) ; for ( int i = 1 ; i <= bindValues . size ( ) ; i ++ ) { String key = Integer . toString ( i ) ; if ( ! bindValues . containsKey ( key ) ) { throw new BindException ( String . format ( "Bind map with %d columns should contain key \"%d\"" , bindValues . size ( ) , i ) , BindException . Type . SERIALIZATION ) ; } ParameterBindingDTO value = bindValues . get ( key ) ; try { String type = value . getType ( ) ; List < String > list = ( List < String > ) value . getValue ( ) ; List < String > convertedList = new ArrayList < > ( list . size ( ) ) ; if ( "TIMESTAMP_LTZ" . equals ( type ) || "TIMESTAMP_NTZ" . equals ( type ) ) { for ( String e : list ) { convertedList . add ( synchronizedTimestampFormat ( e ) ) ; } } else if ( "DATE" . equals ( type ) ) { for ( String e : list ) { convertedList . add ( synchronizedDateFormat ( e ) ) ; } } else { convertedList = list ; } columns . add ( i - 1 , new ColumnTypeDataPair ( type , convertedList ) ) ; } catch ( ClassCastException ex ) { throw new BindException ( "Value in binding DTO could not be cast to a list" , BindException . Type . SERIALIZATION ) ; } } return columns ; }
Convert bind map to a list of values for each column Perform necessary type casts and invariant checks
28,413
private List < String [ ] > buildRows ( List < ColumnTypeDataPair > columns ) throws BindException { List < String [ ] > rows = new ArrayList < > ( ) ; int numColumns = columns . size ( ) ; if ( columns . get ( 0 ) . data . isEmpty ( ) ) { throw new BindException ( "No binds found in first column" , BindException . Type . SERIALIZATION ) ; } int numRows = columns . get ( 0 ) . data . size ( ) ; for ( int i = 0 ; i < numColumns ; i ++ ) { int iNumRows = columns . get ( i ) . data . size ( ) ; if ( columns . get ( i ) . data . size ( ) != numRows ) { throw new BindException ( String . format ( "Column %d has a different number of binds (%d) than column 1 (%d)" , i , iNumRows , numRows ) , BindException . Type . SERIALIZATION ) ; } } for ( int rowIdx = 0 ; rowIdx < numRows ; rowIdx ++ ) { String [ ] row = new String [ numColumns ] ; for ( int colIdx = 0 ; colIdx < numColumns ; colIdx ++ ) { row [ colIdx ] = columns . get ( colIdx ) . data . get ( rowIdx ) ; } rows . add ( row ) ; } return rows ; }
Transpose a list of columns and their values to a list of rows
28,414
private void writeRowsToCSV ( List < String [ ] > rows ) throws BindException { int numBytes ; int rowNum = 0 ; int fileCount = 0 ; while ( rowNum < rows . size ( ) ) { File file = getFile ( ++ fileCount ) ; try ( OutputStream out = openFile ( file ) ) { numBytes = 0 ; while ( numBytes < fileSize && rowNum < rows . size ( ) ) { byte [ ] csv = createCSVRecord ( rows . get ( rowNum ) ) ; numBytes += csv . length ; out . write ( csv ) ; rowNum ++ ; } } catch ( IOException ex ) { throw new BindException ( String . format ( "Exception encountered while writing to file: %s" , ex . getMessage ( ) ) , BindException . Type . SERIALIZATION ) ; } } }
Write the list of rows to compressed CSV files in the temporary directory
28,415
private OutputStream openFile ( File file ) throws BindException { try { return new GZIPOutputStream ( new FileOutputStream ( file ) ) ; } catch ( IOException ex ) { throw new BindException ( String . format ( "Failed to create output file %s: %s" , file . toString ( ) , ex . getMessage ( ) ) , BindException . Type . SERIALIZATION ) ; } }
Create a new output stream for the given file
28,416
private byte [ ] createCSVRecord ( String [ ] data ) { StringBuilder sb = new StringBuilder ( 1024 ) ; for ( int i = 0 ; i < data . length ; ++ i ) { if ( i > 0 ) { sb . append ( ',' ) ; } sb . append ( SnowflakeType . escapeForCSV ( data [ i ] ) ) ; } sb . append ( '\n' ) ; return sb . toString ( ) . getBytes ( UTF_8 ) ; }
Serialize row to a csv Duplicated from StreamLoader class
28,417
private String getPutStmt ( String bindDir , String stagePath ) { return String . format ( PUT_STMT , bindDir , File . separator , stagePath ) . replaceAll ( "\\\\" , "\\\\\\\\" ) ; }
Build PUT statement string . Handle filesystem differences and escaping backslashes .
28,418
private void putBinds ( ) throws BindException { createStageIfNeeded ( ) ; String putStatement = getPutStmt ( bindDir . toString ( ) , stagePath ) ; for ( int i = 0 ; i < PUT_RETRY_COUNT ; i ++ ) { try { SFStatement statement = new SFStatement ( session ) ; SFBaseResultSet putResult = statement . execute ( putStatement , null , null ) ; putResult . next ( ) ; int column = putResult . getMetaData ( ) . getColumnIndex ( SnowflakeFileTransferAgent . UploadColumns . status . name ( ) ) + 1 ; String status = putResult . getString ( column ) ; if ( SnowflakeFileTransferAgent . ResultStatus . UPLOADED . name ( ) . equals ( status ) ) { return ; } logger . debug ( "PUT statement failed. The response had status %s." , status ) ; } catch ( SFException | SQLException ex ) { logger . debug ( "Exception encountered during PUT operation. " , ex ) ; } } throw new BindException ( "Failed to PUT files to stage." , BindException . Type . UPLOAD ) ; }
Upload binds from local file to stage
28,419
private void createStageIfNeeded ( ) throws BindException { if ( session . getArrayBindStage ( ) != null ) { return ; } synchronized ( session ) { if ( session . getArrayBindStage ( ) == null ) { try { SFStatement statement = new SFStatement ( session ) ; statement . execute ( CREATE_STAGE_STMT , null , null ) ; session . setArrayBindStage ( STAGE_NAME ) ; } catch ( SFException | SQLException ex ) { session . setArrayBindStageThreshold ( 0 ) ; throw new BindException ( String . format ( "Failed to create temporary stage for array binds. %s" , ex . getMessage ( ) ) , BindException . Type . UPLOAD ) ; } } } }
Check whether the session s temporary stage has been created and create it if not .
28,420
public static int arrayBindValueCount ( Map < String , ParameterBindingDTO > bindValues ) { if ( ! isArrayBind ( bindValues ) ) { return 0 ; } else { ParameterBindingDTO bindSample = bindValues . values ( ) . iterator ( ) . next ( ) ; List < String > bindSampleValues = ( List < String > ) bindSample . getValue ( ) ; return bindValues . size ( ) * bindSampleValues . size ( ) ; } }
Compute the number of array bind values in the given bind map
28,421
public static boolean isArrayBind ( Map < String , ParameterBindingDTO > bindValues ) { if ( bindValues == null || bindValues . size ( ) == 0 ) { return false ; } ParameterBindingDTO bindSample = bindValues . values ( ) . iterator ( ) . next ( ) ; return bindSample . getValue ( ) instanceof List ; }
Return whether the bind map uses array binds
28,422
public static StorageObjectSummary createFromS3ObjectSummary ( S3ObjectSummary objSummary ) { return new StorageObjectSummary ( objSummary . getBucketName ( ) , objSummary . getKey ( ) , objSummary . getETag ( ) , objSummary . getSize ( ) ) ; }
Contructs a StorageObjectSummary object from the S3 equivalent S3ObjectSummary
28,423
public static StorageObjectSummary createFromAzureListBlobItem ( ListBlobItem listBlobItem ) throws StorageProviderException { String location , key , md5 ; long size ; try { location = listBlobItem . getContainer ( ) . getName ( ) ; CloudBlob cloudBlob = ( CloudBlob ) listBlobItem ; key = cloudBlob . getName ( ) ; BlobProperties blobProperties = cloudBlob . getProperties ( ) ; md5 = convertBase64ToHex ( blobProperties . getContentMD5 ( ) ) ; size = blobProperties . getLength ( ) ; } catch ( URISyntaxException | StorageException ex ) { throw new StorageProviderException ( ex ) ; } return new StorageObjectSummary ( location , key , md5 , size ) ; }
Contructs a StorageObjectSummary object from Azure BLOB properties Using factory methods to create these objects since Azure can throw while retrieving the BLOB properties
28,424
private boolean isSnowflakeAuthenticator ( ) { String authenticator = ( String ) connectionPropertiesMap . get ( SFSessionProperty . AUTHENTICATOR ) ; PrivateKey privateKey = ( PrivateKey ) connectionPropertiesMap . get ( SFSessionProperty . PRIVATE_KEY ) ; return ( authenticator == null && privateKey == null ) || ClientAuthnDTO . AuthenticatorType . SNOWFLAKE . name ( ) . equalsIgnoreCase ( authenticator ) ; }
If authenticator is null and private key is specified jdbc will assume key pair authentication
28,425
boolean isExternalbrowserAuthenticator ( ) { String authenticator = ( String ) connectionPropertiesMap . get ( SFSessionProperty . AUTHENTICATOR ) ; return ClientAuthnDTO . AuthenticatorType . EXTERNALBROWSER . name ( ) . equalsIgnoreCase ( authenticator ) ; }
Returns true If authenticator is EXTERNALBROWSER .
28,426
synchronized void renewSession ( String prevSessionToken ) throws SFException , SnowflakeSQLException { if ( sessionToken != null && ! sessionToken . equals ( prevSessionToken ) ) { logger . debug ( "not renew session because session token has not been updated." ) ; return ; } SessionUtil . LoginInput loginInput = new SessionUtil . LoginInput ( ) ; loginInput . setServerUrl ( ( String ) connectionPropertiesMap . get ( SFSessionProperty . SERVER_URL ) ) . setSessionToken ( sessionToken ) . setMasterToken ( masterToken ) . setIdToken ( idToken ) . setLoginTimeout ( loginTimeout ) . setDatabaseName ( this . getDatabase ( ) ) . setSchemaName ( this . getSchema ( ) ) . setRole ( this . getRole ( ) ) . setWarehouse ( this . getWarehouse ( ) ) ; SessionUtil . LoginOutput loginOutput = SessionUtil . renewSession ( loginInput ) ; if ( loginOutput . isUpdatedByTokenRequestIssue ( ) ) { setCurrentObjects ( loginInput , loginOutput ) ; } sessionToken = loginOutput . getSessionToken ( ) ; masterToken = loginOutput . getMasterToken ( ) ; }
A helper function to call global service and renew session .
28,427
protected void startHeartbeatForThisSession ( ) { if ( enableHeartbeat && ! Strings . isNullOrEmpty ( masterToken ) ) { logger . debug ( "start heartbeat, master token validity: " + masterTokenValidityInSeconds ) ; HeartbeatBackground . getInstance ( ) . addSession ( this , masterTokenValidityInSeconds , this . heartbeatFrequency ) ; } else { logger . debug ( "heartbeat not enabled for the session" ) ; } }
Start heartbeat for this session
28,428
protected void stopHeartbeatForThisSession ( ) { if ( enableHeartbeat && ! Strings . isNullOrEmpty ( masterToken ) ) { logger . debug ( "stop heartbeat" ) ; HeartbeatBackground . getInstance ( ) . removeSession ( this ) ; } else { logger . debug ( "heartbeat not enabled for the session" ) ; } }
Stop heartbeat for this session
28,429
protected void heartbeat ( ) throws SFException , SQLException { logger . debug ( " public void heartbeat()" ) ; if ( isClosed ) { return ; } HttpPost postRequest = null ; String requestId = UUID . randomUUID ( ) . toString ( ) ; boolean retry = false ; do { try { URIBuilder uriBuilder ; uriBuilder = new URIBuilder ( ( String ) connectionPropertiesMap . get ( SFSessionProperty . SERVER_URL ) ) ; uriBuilder . addParameter ( SFSession . SF_QUERY_REQUEST_ID , requestId ) ; uriBuilder . setPath ( SF_PATH_SESSION_HEARTBEAT ) ; postRequest = new HttpPost ( uriBuilder . build ( ) ) ; String prevSessionToken = sessionToken ; postRequest . setHeader ( SF_HEADER_AUTHORIZATION , SF_HEADER_SNOWFLAKE_AUTHTYPE + " " + SF_HEADER_TOKEN_TAG + "=\"" + prevSessionToken + "\"" ) ; logger . debug ( "Executing heartbeat request: {}" , postRequest . toString ( ) ) ; String theResponse = HttpUtil . executeRequest ( postRequest , SF_HEARTBEAT_TIMEOUT , 0 , null ) ; JsonNode rootNode ; logger . debug ( "connection heartbeat response: {}" , theResponse ) ; rootNode = mapper . readTree ( theResponse ) ; if ( rootNode != null && ( Constants . SESSION_EXPIRED_GS_CODE == rootNode . path ( "code" ) . asInt ( ) ) ) { logger . debug ( "renew session and retry" ) ; this . renewSession ( prevSessionToken ) ; retry = true ; continue ; } SnowflakeUtil . checkErrorAndThrowException ( rootNode ) ; retry = false ; } catch ( Throwable ex ) { if ( ex instanceof SnowflakeSQLException ) { throw ( SnowflakeSQLException ) ex ; } logger . error ( "unexpected exception" , ex ) ; throw ( SFException ) IncidentUtil . generateIncidentV2WithException ( this , new SFException ( ErrorCode . INTERNAL_ERROR , IncidentUtil . oneLiner ( "unexpected exception" , ex ) ) , null , requestId ) ; } } while ( retry ) ; }
Send heartbeat for the session
28,430
void setCurrentObjects ( SessionUtil . LoginInput loginInput , SessionUtil . LoginOutput loginOutput ) { this . sessionToken = loginOutput . sessionToken ; runInternalCommand ( "USE ROLE IDENTIFIER(?)" , loginInput . getRole ( ) ) ; runInternalCommand ( "USE WAREHOUSE IDENTIFIER(?)" , loginInput . getWarehouse ( ) ) ; runInternalCommand ( "USE DATABASE IDENTIFIER(?)" , loginInput . getDatabaseName ( ) ) ; runInternalCommand ( "USE SCHEMA IDENTIFIER(?)" , loginInput . getSchemaName ( ) ) ; SFBaseResultSet result = runInternalCommand ( "SELECT ?" , "1" ) ; loginOutput . setSessionDatabase ( this . database ) ; loginOutput . setSessionSchema ( this . schema ) ; loginOutput . setSessionWarehouse ( this . warehouse ) ; loginOutput . setSessionRole ( this . role ) ; loginOutput . setIdToken ( loginInput . getIdToken ( ) ) ; if ( result != null ) { loginOutput . setCommonParams ( result . parameters ) ; } }
Sets the current objects if the session is not up to date . It can happen if the session is created by the id token which doesn t carry the current objects .
28,431
private void executeImmediate ( String stmtText ) throws SQLException { try ( final Statement statement = this . createStatement ( ) ) { statement . execute ( stmtText ) ; } }
Execute a statement where the result isn t needed and the statement is closed before this method returns
28,432
public Statement createStatement ( ) throws SQLException { raiseSQLExceptionIfConnectionIsClosed ( ) ; Statement stmt = createStatement ( ResultSet . TYPE_FORWARD_ONLY , ResultSet . CONCUR_READ_ONLY ) ; openStatements . add ( stmt ) ; return stmt ; }
Create a statement
28,433
public void setTransactionIsolation ( int level ) throws SQLException { logger . debug ( "void setTransactionIsolation(int level), level = {}" , level ) ; raiseSQLExceptionIfConnectionIsClosed ( ) ; if ( level == Connection . TRANSACTION_NONE || level == Connection . TRANSACTION_READ_COMMITTED ) { this . transactionIsolation = level ; } else { throw new SQLFeatureNotSupportedException ( "Transaction Isolation " + level + " not supported." , FEATURE_UNSUPPORTED . getSqlState ( ) , FEATURE_UNSUPPORTED . getMessageCode ( ) ) ; } }
Sets the transaction isolation level .
28,434
public InputStream downloadStream ( String stageName , String sourceFileName , boolean decompress ) throws SQLException { logger . debug ( "download data to stream: stageName={}" + ", sourceFileName={}" , stageName , sourceFileName ) ; if ( Strings . isNullOrEmpty ( stageName ) ) { throw new SnowflakeSQLException ( SqlState . INTERNAL_ERROR , ErrorCode . INTERNAL_ERROR . getMessageCode ( ) , "stage name is null or empty" ) ; } if ( Strings . isNullOrEmpty ( sourceFileName ) ) { throw new SnowflakeSQLException ( SqlState . INTERNAL_ERROR , ErrorCode . INTERNAL_ERROR . getMessageCode ( ) , "source file name is null or empty" ) ; } SnowflakeStatementV1 stmt = new SnowflakeStatementV1 ( this , ResultSet . TYPE_FORWARD_ONLY , ResultSet . CONCUR_READ_ONLY , ResultSet . CLOSE_CURSORS_AT_COMMIT ) ; StringBuilder getCommand = new StringBuilder ( ) ; getCommand . append ( "get " ) ; if ( ! stageName . startsWith ( "@" ) ) { getCommand . append ( "@" ) ; } getCommand . append ( stageName ) ; getCommand . append ( "/" ) ; if ( sourceFileName . startsWith ( "/" ) ) { sourceFileName = sourceFileName . substring ( 1 ) ; } getCommand . append ( sourceFileName ) ; getCommand . append ( " file:///tmp/ /*jdbc download stream*/" ) ; SnowflakeFileTransferAgent transferAgent = new SnowflakeFileTransferAgent ( getCommand . toString ( ) , sfSession , stmt . getSfStatement ( ) ) ; InputStream stream = transferAgent . downloadStream ( sourceFileName ) ; if ( decompress ) { try { return new GZIPInputStream ( stream ) ; } catch ( IOException ex ) { throw new SnowflakeSQLException ( SqlState . INTERNAL_ERROR , ErrorCode . INTERNAL_ERROR . getMessageCode ( ) , ex . getMessage ( ) ) ; } } else { return stream ; } }
Download file from the given stage and return an input stream
28,435
public static InputStream decryptStream ( InputStream inputStream , String keyBase64 , String ivBase64 , RemoteStoreFileEncryptionMaterial encMat ) throws NoSuchPaddingException , NoSuchAlgorithmException , InvalidKeyException , BadPaddingException , IllegalBlockSizeException , InvalidAlgorithmParameterException { byte [ ] decodedKey = Base64 . decode ( encMat . getQueryStageMasterKey ( ) ) ; byte [ ] keyBytes = Base64 . decode ( keyBase64 ) ; byte [ ] ivBytes = Base64 . decode ( ivBase64 ) ; SecretKey queryStageMasterKey = new SecretKeySpec ( decodedKey , 0 , decodedKey . length , AES ) ; Cipher keyCipher = Cipher . getInstance ( KEY_CIPHER ) ; keyCipher . init ( Cipher . DECRYPT_MODE , queryStageMasterKey ) ; byte [ ] fileKeyBytes = keyCipher . doFinal ( keyBytes ) ; SecretKey fileKey = new SecretKeySpec ( fileKeyBytes , 0 , decodedKey . length , AES ) ; Cipher dataCipher = Cipher . getInstance ( FILE_CIPHER ) ; IvParameterSpec ivy = new IvParameterSpec ( ivBytes ) ; dataCipher . init ( Cipher . DECRYPT_MODE , fileKey , ivy ) ; return new CipherInputStream ( inputStream , dataCipher ) ; }
Decrypt a InputStream
28,436
synchronized void startFlusher ( ) { flusher = Executors . newScheduledThreadPool ( 1 , new ThreadFactory ( ) { public Thread newThread ( Runnable r ) { Thread t = Executors . defaultThreadFactory ( ) . newThread ( r ) ; t . setDaemon ( true ) ; return t ; } } ) ; flusher . scheduleWithFixedDelay ( new QueueFlusher ( ) , 0 , flushPeriodMs , TimeUnit . MILLISECONDS ) ; }
Creates and runs a new QueueFlusher thread
28,437
public void dumpLogBuffer ( String identifier ) { final ArrayList < LogRecord > logBufferCopy ; final PrintWriter logDumper ; final OutputStream outStream ; Formatter formatter = this . getFormatter ( ) ; boolean disableCompression = System . getProperty ( DISABLE_DUMP_COMPR_PROP ) != null ; if ( identifier == null ) { identifier = EventUtil . getDumpFileId ( ) ; } cleanupSfDumps ( true ) ; String logDumpPath = logDumpPathPrefix + "/" + LOG_DUMP_FILE_NAME + identifier + LOG_DUMP_FILE_EXT ; if ( ! disableCompression ) { logDumpPath += LOG_DUMP_COMP_EXT ; } logger . debug ( "EventHandler dumping log buffer to {}" , logDumpPath ) ; synchronized ( this ) { logBufferCopy = new ArrayList < > ( logBuffer ) ; logBuffer . clear ( ) ; } File outputFile = new File ( logDumpPath ) ; try { if ( outputFile . getParentFile ( ) != null ) { outputFile . getParentFile ( ) . mkdirs ( ) ; } outStream = disableCompression ? new FileOutputStream ( logDumpPath , false ) : new GZIPOutputStream ( new FileOutputStream ( logDumpPath , false ) ) ; logDumper = new PrintWriter ( outStream , true ) ; } catch ( IOException exc ) { logger . debug ( "Log dump failed, exception: {}" , exc . getMessage ( ) ) ; return ; } for ( LogRecord entry : logBufferCopy ) { logDumper . write ( formatter != null ? formatter . format ( entry ) : entry . getMessage ( ) ) ; } logDumper . flush ( ) ; logDumper . close ( ) ; }
Dumps the contents of the in - memory log buffer to disk and clears the buffer .
28,438
protected void cleanupSfDumps ( boolean deleteOldest ) { int maxDumpFiles = System . getProperty ( MAX_NUM_DUMP_FILES_PROP ) != null ? Integer . valueOf ( System . getProperty ( MAX_NUM_DUMP_FILES_PROP ) ) : DEFAULT_MAX_DUMP_FILES ; int maxDumpDirSizeMB = System . getProperty ( MAX_SIZE_DUMPS_MB_PROP ) != null ? Integer . valueOf ( System . getProperty ( MAX_SIZE_DUMPS_MB_PROP ) ) : DEFAULT_MAX_DUMPDIR_SIZE_MB ; File dumpDir = new File ( logDumpPathPrefix ) ; long dirSizeBytes = 0 ; if ( dumpDir . listFiles ( ) == null ) { return ; } TreeSet < File > fileList = new TreeSet < > ( new Comparator < File > ( ) { public int compare ( File a , File b ) { return a . length ( ) < b . length ( ) ? - 1 : 1 ; } } ) ; for ( File file : dumpDir . listFiles ( ) ) { if ( ( ! file . getName ( ) . startsWith ( LOG_DUMP_FILE_NAME ) && ! file . getName ( ) . startsWith ( IncidentUtil . INC_DUMP_FILE_NAME ) ) || ( System . currentTimeMillis ( ) - file . lastModified ( ) > FILE_EXPN_TIME_MS && file . delete ( ) ) ) { continue ; } dirSizeBytes += file . length ( ) ; fileList . add ( file ) ; } if ( dirSizeBytes >= ( ( long ) maxDumpDirSizeMB << 20 ) ) { for ( File file : fileList ) { if ( dirSizeBytes < ( ( long ) maxDumpDirSizeMB << 19 ) ) { break ; } long victimSize = file . length ( ) ; if ( file . delete ( ) ) { dirSizeBytes -= victimSize ; } } } else if ( deleteOldest && fileList . size ( ) >= maxDumpFiles ) { fileList . first ( ) . delete ( ) ; } }
Function to remove old Snowflake Dump files to make room for new ones .
28,439
private synchronized boolean needsToThrottle ( String signature ) { AtomicInteger sigCount ; if ( throttledIncidents . containsKey ( signature ) ) { if ( throttledIncidents . get ( signature ) . plusHours ( THROTTLE_DURATION_HRS ) . compareTo ( DateTime . now ( ) ) <= 0 ) { throttledIncidents . remove ( signature ) ; incidentCounter . put ( signature , new AtomicInteger ( 1 ) ) ; return false ; } return true ; } sigCount = incidentCounter . get ( signature ) ; if ( sigCount == null ) { incidentCounter . put ( signature , sigCount = new AtomicInteger ( 0 ) ) ; } else if ( sigCount . get ( ) + 1 >= INCIDENT_THROTTLE_LIMIT_PER_HR ) { incidentCounter . remove ( signature ) ; throttledIncidents . put ( signature , DateTime . now ( ) ) ; return true ; } sigCount . incrementAndGet ( ) ; return false ; }
Checks to see if the reporting of an incident should be throttled due to the number of times the signature has been seen in the last hour
28,440
public void start ( ) { LOGGER . debug ( "Start Loading" ) ; validateParameters ( ) ; if ( _op == null ) { this . abort ( new ConnectionError ( "Loader started with no operation" ) ) ; return ; } initDateFormats ( ) ; initQueues ( ) ; if ( _is_first_start_call ) { try { if ( _startTransaction ) { LOGGER . debug ( "Begin Transaction" ) ; _processConn . createStatement ( ) . execute ( "begin transaction" ) ; } else { LOGGER . debug ( "No Transaction started" ) ; } } catch ( SQLException ex ) { abort ( new Loader . ConnectionError ( "Failed to start Transaction" , Utils . getCause ( ex ) ) ) ; } if ( _truncate ) { truncateTargetTable ( ) ; } try { if ( _before != null ) { LOGGER . debug ( "Running Execute Before SQL" ) ; _processConn . createStatement ( ) . execute ( _before ) ; } } catch ( SQLException ex ) { abort ( new Loader . ConnectionError ( String . format ( "Execute Before SQL failed to run: %s" , _before ) , Utils . getCause ( ex ) ) ) ; } } }
Starts the loader
28,441
private void flushQueues ( ) { LOGGER . debug ( "Flush Queues" ) ; try { _queueData . put ( new byte [ 0 ] ) ; _thread . join ( 10000 ) ; if ( _thread . isAlive ( ) ) { _thread . interrupt ( ) ; } } catch ( Exception ex ) { String msg = "Failed to join StreamLoader queue: " + ex . getMessage ( ) ; LOGGER . error ( msg , ex ) ; throw new DataError ( msg , Utils . getCause ( ex ) ) ; } terminate ( ) ; _put . join ( ) ; _process . join ( ) ; if ( _aborted . get ( ) ) { throw _abortCause ; } }
Flushes data by joining PUT and PROCESS queues
28,442
public void resetOperation ( Operation op ) { LOGGER . debug ( "Reset Loader" ) ; if ( op . equals ( _op ) ) { return ; } LOGGER . debug ( "Operation is changing from {} to {}" , _op , op ) ; _op = op ; if ( _stage != null ) { try { queuePut ( _stage ) ; } catch ( InterruptedException ex ) { LOGGER . error ( _stage . getId ( ) , ex ) ; } } _stage = new BufferStage ( this , _op , _csvFileBucketSize , _csvFileSize ) ; }
If operation changes existing stage needs to be scheduled for processing .
28,443
void overrideCacheFile ( File newCacheFile ) { this . cacheFile = newCacheFile ; this . cacheDir = newCacheFile . getParentFile ( ) ; this . baseCacheFileName = newCacheFile . getName ( ) ; }
Override the cache file .
28,444
JsonNode readCacheFile ( ) { if ( cacheFile == null || ! this . checkCacheLockFile ( ) ) { return null ; } try { if ( ! cacheFile . exists ( ) ) { LOGGER . debug ( "Cache file doesn't exists. File: {}" , cacheFile ) ; return null ; } try ( Reader reader = new InputStreamReader ( new FileInputStream ( cacheFile ) , DEFAULT_FILE_ENCODING ) ) { return OBJECT_MAPPER . readTree ( reader ) ; } } catch ( IOException ex ) { LOGGER . debug ( "Failed to read the cache file. No worry. File: {}, Err: {}" , cacheFile , ex ) ; } return null ; }
Reads the cache file .
28,445
private boolean tryLockCacheFile ( ) { int cnt = 0 ; boolean locked = false ; while ( cnt < 100 && ! ( locked = lockCacheFile ( ) ) ) { try { Thread . sleep ( 100 ) ; } catch ( InterruptedException ex ) { } ++ cnt ; } if ( ! locked ) { LOGGER . debug ( "Failed to lock the cache file." ) ; } return locked ; }
Tries to lock the cache file
28,446
private void verifyLocalFilePath ( String localFilePathFromGS ) throws SnowflakeSQLException { if ( command == null ) { logger . error ( "null command" ) ; return ; } if ( command . indexOf ( FILE_PROTOCOL ) < 0 ) { logger . error ( "file:// prefix not found in command: {}" , command ) ; return ; } int localFilePathBeginIdx = command . indexOf ( FILE_PROTOCOL ) + FILE_PROTOCOL . length ( ) ; boolean isLocalFilePathQuoted = ( localFilePathBeginIdx > FILE_PROTOCOL . length ( ) ) && ( command . charAt ( localFilePathBeginIdx - 1 - FILE_PROTOCOL . length ( ) ) == '\'' ) ; int localFilePathEndIdx = 0 ; String localFilePath = "" ; if ( isLocalFilePathQuoted ) { localFilePathEndIdx = command . indexOf ( "'" , localFilePathBeginIdx ) ; if ( localFilePathEndIdx > localFilePathBeginIdx ) { localFilePath = command . substring ( localFilePathBeginIdx , localFilePathEndIdx ) ; } localFilePath = localFilePath . replaceAll ( "\\\\\\\\" , "\\\\" ) ; } else { List < Integer > indexList = new ArrayList < > ( ) ; char [ ] delimiterChars = { ' ' , '\n' , ';' } ; for ( int i = 0 ; i < delimiterChars . length ; i ++ ) { int charIndex = command . indexOf ( delimiterChars [ i ] , localFilePathBeginIdx ) ; if ( charIndex != - 1 ) { indexList . add ( charIndex ) ; } } localFilePathEndIdx = indexList . isEmpty ( ) ? - 1 : Collections . min ( indexList ) ; if ( localFilePathEndIdx > localFilePathBeginIdx ) { localFilePath = command . substring ( localFilePathBeginIdx , localFilePathEndIdx ) ; } else if ( localFilePathEndIdx == - 1 ) { localFilePath = command . substring ( localFilePathBeginIdx ) ; } } if ( ! localFilePath . isEmpty ( ) && ! localFilePath . equals ( localFilePathFromGS ) ) { throw new SnowflakeSQLException ( SqlState . INTERNAL_ERROR , ErrorCode . INTERNAL_ERROR . getMessageCode ( ) , "Unexpected local file path from GS. From GS: " + localFilePathFromGS + ", expected: " + localFilePath ) ; } else if ( localFilePath . isEmpty ( ) ) { logger . debug ( "fail to parse local file path from command: {}" , command ) ; } else { logger . trace ( "local file path from GS matches local parsing: {}" , localFilePath ) ; } }
A helper method to verify if the local file path from GS matches what s parsed locally . This is for security purpose as documented in SNOW - 15153 .
28,447
private void uploadStream ( ) throws SnowflakeSQLException { try { threadExecutor = SnowflakeUtil . createDefaultExecutorService ( "sf-stream-upload-worker-" , 1 ) ; RemoteStoreFileEncryptionMaterial encMat = encryptionMaterial . get ( 0 ) ; if ( commandType == CommandType . UPLOAD ) { threadExecutor . submit ( getUploadFileCallable ( stageInfo , SRC_FILE_NAME_FOR_STREAM , fileMetadataMap . get ( SRC_FILE_NAME_FOR_STREAM ) , ( stageInfo . getStageType ( ) == StageInfo . StageType . LOCAL_FS ) ? null : storageFactory . createClient ( stageInfo , parallel , encMat ) , connection , command , sourceStream , true , parallel , null , encMat ) ) ; } else if ( commandType == CommandType . DOWNLOAD ) { throw new SnowflakeSQLException ( SqlState . INTERNAL_ERROR , ErrorCode . INTERNAL_ERROR . getMessageCode ( ) ) ; } threadExecutor . shutdown ( ) ; try { threadExecutor . awaitTermination ( Long . MAX_VALUE , TimeUnit . DAYS ) ; } catch ( InterruptedException ex ) { throw new SnowflakeSQLException ( SqlState . QUERY_CANCELED , ErrorCode . INTERRUPTED . getMessageCode ( ) ) ; } logger . debug ( "Done with uploading from a stream" ) ; } finally { if ( threadExecutor != null ) { threadExecutor . shutdownNow ( ) ; threadExecutor = null ; } } }
Helper to upload data from a stream
28,448
InputStream downloadStream ( String fileName ) throws SnowflakeSQLException { if ( stageInfo . getStageType ( ) == StageInfo . StageType . LOCAL_FS ) { logger . error ( "downloadStream function doesn't support local file system" ) ; throw new SnowflakeSQLException ( SqlState . INTERNAL_ERROR , ErrorCode . INTERNAL_ERROR . getMessageCode ( ) , "downloadStream function only supported in remote stages" ) ; } remoteLocation remoteLocation = extractLocationAndPath ( stageInfo . getLocation ( ) ) ; String stageFilePath = fileName ; if ( ! remoteLocation . path . isEmpty ( ) ) { stageFilePath = SnowflakeUtil . concatFilePathNames ( remoteLocation . path , fileName , "/" ) ; } RemoteStoreFileEncryptionMaterial encMat = srcFileToEncMat . get ( fileName ) ; return storageFactory . createClient ( stageInfo , parallel , encMat ) . downloadToStream ( connection , command , parallel , remoteLocation . location , stageFilePath , stageInfo . getRegion ( ) ) ; }
Download a file from remote and return an input stream
28,449
private void downloadFiles ( ) throws SnowflakeSQLException { try { threadExecutor = SnowflakeUtil . createDefaultExecutorService ( "sf-file-download-worker-" , 1 ) ; for ( String srcFile : sourceFiles ) { FileMetadata fileMetadata = fileMetadataMap . get ( srcFile ) ; if ( fileMetadata . resultStatus != ResultStatus . UNKNOWN ) { logger . debug ( "Skipping {}, status: {}, details: {}" , srcFile , fileMetadata . resultStatus , fileMetadata . errorDetails ) ; continue ; } RemoteStoreFileEncryptionMaterial encMat = srcFileToEncMat . get ( srcFile ) ; threadExecutor . submit ( getDownloadFileCallable ( stageInfo , srcFile , localLocation , fileMetadataMap , ( stageInfo . getStageType ( ) == StageInfo . StageType . LOCAL_FS ) ? null : storageFactory . createClient ( stageInfo , parallel , encMat ) , connection , command , parallel , encMat ) ) ; logger . debug ( "submitted download job for: {}" , srcFile ) ; } threadExecutor . shutdown ( ) ; try { threadExecutor . awaitTermination ( Long . MAX_VALUE , TimeUnit . DAYS ) ; } catch ( InterruptedException ex ) { throw new SnowflakeSQLException ( SqlState . QUERY_CANCELED , ErrorCode . INTERRUPTED . getMessageCode ( ) ) ; } logger . debug ( "Done with downloading" ) ; } finally { if ( threadExecutor != null ) { threadExecutor . shutdownNow ( ) ; threadExecutor = null ; } } }
Helper to download files from remote
28,450
private void uploadFiles ( Set < String > fileList , int parallel ) throws SnowflakeSQLException { try { threadExecutor = SnowflakeUtil . createDefaultExecutorService ( "sf-file-upload-worker-" , parallel ) ; for ( String srcFile : fileList ) { FileMetadata fileMetadata = fileMetadataMap . get ( srcFile ) ; if ( fileMetadata . resultStatus != ResultStatus . UNKNOWN ) { logger . debug ( "Skipping {}, status: {}, details: {}" , srcFile , fileMetadata . resultStatus , fileMetadata . errorDetails ) ; continue ; } File srcFileObj = new File ( srcFile ) ; threadExecutor . submit ( getUploadFileCallable ( stageInfo , srcFile , fileMetadata , ( stageInfo . getStageType ( ) == StageInfo . StageType . LOCAL_FS ) ? null : storageFactory . createClient ( stageInfo , parallel , encryptionMaterial . get ( 0 ) ) , connection , command , null , false , ( parallel > 1 ? 1 : this . parallel ) , srcFileObj , encryptionMaterial . get ( 0 ) ) ) ; logger . debug ( "submitted copy job for: {}" , srcFile ) ; } threadExecutor . shutdown ( ) ; try { threadExecutor . awaitTermination ( Long . MAX_VALUE , TimeUnit . DAYS ) ; } catch ( InterruptedException ex ) { throw new SnowflakeSQLException ( SqlState . QUERY_CANCELED , ErrorCode . INTERRUPTED . getMessageCode ( ) ) ; } logger . debug ( "Done with uploading" ) ; } finally { if ( threadExecutor != null ) { threadExecutor . shutdownNow ( ) ; threadExecutor = null ; } } }
This method create a thread pool based on requested number of threads and upload the files using the thread pool .
28,451
static public Set < String > expandFileNames ( String [ ] filePathList ) throws SnowflakeSQLException { Set < String > result = new HashSet < String > ( ) ; Map < String , List < String > > locationToFilePatterns ; locationToFilePatterns = new HashMap < String , List < String > > ( ) ; String cwd = System . getProperty ( "user.dir" ) ; for ( String path : filePathList ) { path = path . replace ( "~" , System . getProperty ( "user.home" ) ) ; if ( ! ( new File ( path ) ) . isAbsolute ( ) ) { logger . debug ( "Adding current working dir to relative file path." ) ; path = cwd + localFSFileSep + path ; } if ( ! path . contains ( "*" ) && ! path . contains ( "?" ) && ! ( path . contains ( "[" ) && path . contains ( "]" ) ) ) { result . add ( path ) ; } else { int lastFileSepIndex = path . lastIndexOf ( localFSFileSep ) ; if ( lastFileSepIndex < 0 && ! "/" . equals ( localFSFileSep ) ) { lastFileSepIndex = path . lastIndexOf ( "/" ) ; } String loc = path . substring ( 0 , lastFileSepIndex + 1 ) ; String filePattern = path . substring ( lastFileSepIndex + 1 ) ; List < String > filePatterns = locationToFilePatterns . get ( loc ) ; if ( filePatterns == null ) { filePatterns = new ArrayList < String > ( ) ; locationToFilePatterns . put ( loc , filePatterns ) ; } filePatterns . add ( filePattern ) ; } } for ( Map . Entry < String , List < String > > entry : locationToFilePatterns . entrySet ( ) ) { try { java . io . File dir = new java . io . File ( entry . getKey ( ) ) ; logger . debug ( "Listing files under: {} with patterns: {}" , entry . getKey ( ) , entry . getValue ( ) . toString ( ) ) ; for ( Object file : FileUtils . listFiles ( dir , new WildcardFileFilter ( entry . getValue ( ) ) , null ) ) { result . add ( ( ( java . io . File ) file ) . getCanonicalPath ( ) ) ; } } catch ( Exception ex ) { throw new SnowflakeSQLException ( ex , SqlState . DATA_EXCEPTION , ErrorCode . FAIL_LIST_FILES . getMessageCode ( ) , "Exception: " + ex . getMessage ( ) + ", Dir=" + entry . getKey ( ) + ", Patterns=" + entry . getValue ( ) . toString ( ) ) ; } } logger . debug ( "Expanded file paths: " ) ; for ( String filePath : result ) { logger . debug ( "file: {}" , filePath ) ; } return result ; }
process a list of file paths separated by and expand the wildcards if any to generate the list of paths for all files matched by the wildcards
28,452
private FileCompressionType mimeTypeToCompressionType ( String mimeTypeStr ) throws MimeTypeParseException { MimeType mimeType = null ; if ( mimeTypeStr != null ) { mimeType = new MimeType ( mimeTypeStr ) ; } if ( mimeType != null && mimeType . getSubType ( ) != null ) { return FileCompressionType . lookupByMimeSubType ( mimeType . getSubType ( ) . toLowerCase ( ) ) ; } return null ; }
Derive compression type from mime type
28,453
private String getMimeTypeFromFileExtension ( String srcFile ) { String srcFileLowCase = srcFile . toLowerCase ( ) ; for ( FileCompressionType compressionType : FileCompressionType . values ( ) ) { if ( srcFileLowCase . endsWith ( compressionType . fileExtension ) ) { return compressionType . mimeType + "/" + compressionType . mimeSubTypes . get ( 0 ) ; } } return null ; }
Derive mime type from file extension
28,454
static public remoteLocation extractLocationAndPath ( String stageLocationPath ) { String location = stageLocationPath ; String path = "" ; if ( stageLocationPath . contains ( "/" ) ) { location = stageLocationPath . substring ( 0 , stageLocationPath . indexOf ( "/" ) ) ; path = stageLocationPath . substring ( stageLocationPath . indexOf ( "/" ) + 1 ) ; } return new remoteLocation ( location , path ) ; }
A small helper for extracting location name and path from full location path
28,455
public List < SnowflakeColumnMetadata > describeColumns ( ) throws Exception { return SnowflakeUtil . describeFixedViewColumns ( commandType == CommandType . UPLOAD ? ( showEncryptionParameter ? UploadCommandEncryptionFacade . class : UploadCommandFacade . class ) : ( showEncryptionParameter ? DownloadCommandEncryptionFacade . class : DownloadCommandFacade . class ) ) ; }
Describe the metadata of a fixed view .
28,456
private void populateStatusRows ( ) { for ( Map . Entry < String , FileMetadata > entry : fileMetadataMap . entrySet ( ) ) { FileMetadata fileMetadata = entry . getValue ( ) ; if ( commandType == CommandType . UPLOAD ) { statusRows . add ( showEncryptionParameter ? new UploadCommandEncryptionFacade ( fileMetadata . srcFileName , fileMetadata . destFileName , fileMetadata . resultStatus . name ( ) , fileMetadata . errorDetails , fileMetadata . srcFileSize , fileMetadata . destFileSize , ( fileMetadata . srcCompressionType == null ) ? "NONE" : fileMetadata . srcCompressionType . name ( ) , ( fileMetadata . destCompressionType == null ) ? "NONE" : fileMetadata . destCompressionType . name ( ) , fileMetadata . isEncrypted ) : new UploadCommandFacade ( fileMetadata . srcFileName , fileMetadata . destFileName , fileMetadata . resultStatus . name ( ) , fileMetadata . errorDetails , fileMetadata . srcFileSize , fileMetadata . destFileSize , ( fileMetadata . srcCompressionType == null ) ? "NONE" : fileMetadata . srcCompressionType . name ( ) , ( fileMetadata . destCompressionType == null ) ? "NONE" : fileMetadata . destCompressionType . name ( ) ) ) ; } else if ( commandType == CommandType . DOWNLOAD ) { statusRows . add ( showEncryptionParameter ? new DownloadCommandEncryptionFacade ( fileMetadata . srcFileName . startsWith ( "/" ) ? fileMetadata . srcFileName . substring ( 1 ) : fileMetadata . srcFileName , fileMetadata . resultStatus . name ( ) , fileMetadata . errorDetails , fileMetadata . destFileSize , fileMetadata . isEncrypted ) : new DownloadCommandFacade ( fileMetadata . srcFileName . startsWith ( "/" ) ? fileMetadata . srcFileName . substring ( 1 ) : fileMetadata . srcFileName , fileMetadata . resultStatus . name ( ) , fileMetadata . errorDetails , fileMetadata . destFileSize ) ) ; } } Object sortProperty = null ; sortProperty = connection . getSFSessionProperty ( "sort" ) ; boolean sortResult = sortProperty != null && ( Boolean ) sortProperty ; if ( sortResult ) { Comparator comparator = ( commandType == CommandType . UPLOAD ) ? new Comparator < Object > ( ) { public int compare ( Object a , Object b ) { String srcFileNameA = ( ( UploadCommandFacade ) a ) . srcFile ; String srcFileNameB = ( ( UploadCommandFacade ) b ) . srcFile ; return srcFileNameA . compareTo ( srcFileNameB ) ; } } : new Comparator < Object > ( ) { public int compare ( Object a , Object b ) { String srcFileNameA = ( ( DownloadCommandFacade ) a ) . file ; String srcFileNameB = ( ( DownloadCommandFacade ) b ) . file ; return srcFileNameA . compareTo ( srcFileNameB ) ; } } ; Collections . sort ( statusRows , comparator ) ; } }
Generate status rows for each file
28,457
public void flush ( ) { ObjectMapper mapper = ObjectMapperFactory . getObjectMapper ( ) ; String dtoDump ; URI incidentURI ; try { dtoDump = mapper . writeValueAsString ( new IncidentV2DTO ( this ) ) ; } catch ( JsonProcessingException ex ) { logger . error ( "Incident registration failed, could not map " + "incident report to json string. Exception: {}" , ex . getMessage ( ) ) ; return ; } Preconditions . checkNotNull ( dtoDump ) ; try { URIBuilder uriBuilder = new URIBuilder ( this . serverUrl ) ; uriBuilder . setPath ( SF_PATH_CREATE_INCIDENT_V2 ) ; incidentURI = uriBuilder . build ( ) ; } catch ( URISyntaxException ex ) { logger . error ( "Incident registration failed, " + "URI could not be built. Exception: {}" , ex . getMessage ( ) ) ; return ; } HttpPost postRequest = new HttpPost ( incidentURI ) ; postRequest . setHeader ( SFSession . SF_HEADER_AUTHORIZATION , SFSession . SF_HEADER_SNOWFLAKE_AUTHTYPE + " " + SFSession . SF_HEADER_TOKEN_TAG + "=\"" + this . sessionToken + "\"" ) ; ByteArrayEntity input = null ; try { ByteArrayOutputStream baos = new ByteArrayOutputStream ( ) ; GZIPOutputStream gzos = new GZIPOutputStream ( baos ) ; byte [ ] bytes = dtoDump . getBytes ( StandardCharsets . UTF_8 ) ; gzos . write ( bytes ) ; gzos . finish ( ) ; input = new ByteArrayEntity ( baos . toByteArray ( ) ) ; input . setContentType ( "application/json" ) ; } catch ( IOException exc ) { logger . debug ( "Incident registration failed, could not compress" + " payload. Exception: {}" , exc . getMessage ( ) ) ; } postRequest . setEntity ( input ) ; postRequest . addHeader ( "content-encoding" , "gzip" ) ; try { String response = HttpUtil . executeRequest ( postRequest , 1000 , 0 , null ) ; logger . debug ( "Incident registration was successful. Response: '{}'" , response ) ; } catch ( Exception ex ) { logger . error ( "Incident registration request failed, exception: {}" , ex . getMessage ( ) ) ; } }
Sends incident to GS to log
28,458
private static String [ ] decideCipherSuites ( ) { String sysCipherSuites = System . getProperty ( "https.cipherSuites" ) ; String [ ] cipherSuites = sysCipherSuites != null ? sysCipherSuites . split ( "," ) : ( ( SSLServerSocketFactory ) SSLServerSocketFactory . getDefault ( ) ) . getDefaultCipherSuites ( ) ; if ( logger . isTraceEnabled ( ) ) { logger . trace ( "Cipher suites used: {}" , Arrays . toString ( cipherSuites ) ) ; } return cipherSuites ; }
Decide cipher suites that will be passed into the SSLConnectionSocketFactory
28,459
public static Telemetry createTelemetry ( Connection conn , int flushSize ) { try { return createTelemetry ( conn . unwrap ( SnowflakeConnectionV1 . class ) . getSfSession ( ) , flushSize ) ; } catch ( SQLException ex ) { logger . debug ( "input connection is not a SnowflakeConnection" ) ; return null ; } }
Initialize the telemetry connector
28,460
public void addLogToBatch ( TelemetryData log ) throws IOException { if ( isClosed ) { throw new IOException ( "Telemetry connector is closed" ) ; } if ( ! isTelemetryEnabled ( ) ) { return ; } synchronized ( locker ) { this . logBatch . add ( log ) ; } if ( this . logBatch . size ( ) >= this . forceFlushSize ) { this . sendBatch ( ) ; } }
Add log to batch to be submitted to telemetry . Send batch if forceFlushSize reached
28,461
public void tryAddLogToBatch ( TelemetryData log ) { try { addLogToBatch ( log ) ; } catch ( IOException ex ) { logger . debug ( "Exception encountered while sending metrics to telemetry endpoint." , ex ) ; } }
Attempt to add log to batch and suppress exceptions thrown in case of failure
28,462
public void close ( ) throws IOException { if ( isClosed ) { throw new IOException ( "Telemetry connector is closed" ) ; } try { this . sendBatch ( ) ; } catch ( IOException e ) { logger . error ( "Send logs failed on closing" , e ) ; } finally { this . isClosed = true ; } }
Close telemetry connector and send any unsubmitted logs
28,463
public boolean sendBatch ( ) throws IOException { if ( isClosed ) { throw new IOException ( "Telemetry connector is closed" ) ; } if ( ! isTelemetryEnabled ( ) ) { return false ; } LinkedList < TelemetryData > tmpList ; synchronized ( locker ) { tmpList = this . logBatch ; this . logBatch = new LinkedList < > ( ) ; } if ( session . isClosed ( ) ) { throw new UnexpectedException ( "Session is closed when sending log" ) ; } if ( ! tmpList . isEmpty ( ) ) { String sessionToken = this . session . getSessionToken ( ) ; HttpPost post = new HttpPost ( this . telemetryUrl ) ; post . setEntity ( new StringEntity ( logsToString ( tmpList ) ) ) ; post . setHeader ( "Content-type" , "application/json" ) ; post . setHeader ( "Authorization" , "Snowflake Token=\"" + sessionToken + "\"" ) ; String response = null ; try { response = HttpUtil . executeRequest ( post , 1000 , 0 , null ) ; } catch ( SnowflakeSQLException e ) { disableTelemetry ( ) ; logger . error ( "Telemetry request failed, " + "response: {}, exception: {}" , response , e . getMessage ( ) ) ; return false ; } } return true ; }
Send all cached logs to server
28,464
static ObjectNode logsToJson ( LinkedList < TelemetryData > telemetryData ) { ObjectNode node = mapper . createObjectNode ( ) ; ArrayNode logs = mapper . createArrayNode ( ) ; for ( TelemetryData data : telemetryData ) { logs . add ( data . toJson ( ) ) ; } node . set ( "logs" , logs ) ; return node ; }
convert a list of log to a JSON object
28,465
public ResultSet executeQuery ( String sql ) throws SQLException { raiseSQLExceptionIfStatementIsClosed ( ) ; return executeQueryInternal ( sql , null ) ; }
Execute SQL query
28,466
ResultSet executeQueryInternal ( String sql , Map < String , ParameterBindingDTO > parameterBindings ) throws SQLException { SFBaseResultSet sfResultSet ; try { sfResultSet = sfStatement . execute ( sql , parameterBindings , SFStatement . CallingMethod . EXECUTE_QUERY ) ; sfResultSet . setSession ( this . connection . getSfSession ( ) ) ; } catch ( SFException ex ) { throw new SnowflakeSQLException ( ex . getCause ( ) , ex . getSqlState ( ) , ex . getVendorCode ( ) , ex . getParams ( ) ) ; } if ( resultSet != null ) { openResultSets . add ( resultSet ) ; } resultSet = new SnowflakeResultSetV1 ( sfResultSet , this ) ; return getResultSet ( ) ; }
Internal method for executing a query with bindings accepted .
28,467
void setParameter ( String name , Object value ) throws Exception { logger . debug ( "public void setParameter" ) ; try { if ( this . sfStatement != null ) { this . sfStatement . addProperty ( name , value ) ; } } catch ( SFException ex ) { throw new SnowflakeSQLException ( ex ) ; } }
Sets a parameter at the statement level . Used for internal testing .
28,468
private static ThreadPoolExecutor createChunkDownloaderExecutorService ( final String threadNamePrefix , final int parallel ) { ThreadFactory threadFactory = new ThreadFactory ( ) { private int threadCount = 1 ; public Thread newThread ( final Runnable r ) { final Thread thread = new Thread ( r ) ; thread . setName ( threadNamePrefix + threadCount ++ ) ; thread . setUncaughtExceptionHandler ( new Thread . UncaughtExceptionHandler ( ) { public void uncaughtException ( Thread t , Throwable e ) { logger . error ( "uncaughtException in thread: " + t + " {}" , e ) ; } } ) ; thread . setDaemon ( true ) ; return thread ; } } ; return ( ThreadPoolExecutor ) Executors . newFixedThreadPool ( parallel , threadFactory ) ; }
Create a pool of downloader threads .
28,469
private void startNextDownloaders ( ) throws SnowflakeSQLException { long waitingTime = BASE_WAITING_MS ; while ( nextChunkToDownload - nextChunkToConsume < prefetchSlots && nextChunkToDownload < chunks . size ( ) ) { final SnowflakeResultChunk nextChunk = chunks . get ( nextChunkToDownload ) ; final long neededChunkMemory = nextChunk . computeNeededChunkMemory ( ) ; synchronized ( currentMemoryUsage ) { if ( neededChunkMemory > memoryLimit ) { if ( logger . isDebugEnabled ( ) ) { logger . debug ( "{}: reset memoryLimit from {} MB to current chunk size {} MB" , Thread . currentThread ( ) . getName ( ) , memoryLimit / 1024 / 1024 , neededChunkMemory / 1024 / 1024 ) ; } memoryLimit = neededChunkMemory ; } if ( currentMemoryUsage + neededChunkMemory > memoryLimit && nextChunkToDownload - nextChunkToConsume > 0 ) { break ; } if ( currentMemoryUsage + neededChunkMemory <= memoryLimit ) { nextChunk . tryReuse ( chunkDataCache ) ; currentMemoryUsage += neededChunkMemory ; if ( logger . isDebugEnabled ( ) ) { logger . debug ( "{}: currentMemoryUsage in MB: {}, nextChunkToDownload: {}, nextChunkToConsume: {}, " + "newReservedMemory in B: {} " , Thread . currentThread ( ) . getName ( ) , currentMemoryUsage / 1024 / 1024 , nextChunkToDownload , nextChunkToConsume , neededChunkMemory ) ; } logger . debug ( "submit chunk #{} for downloading, url={}" , this . nextChunkToDownload , nextChunk . getUrl ( ) ) ; executor . submit ( getDownloadChunkCallable ( this , nextChunk , qrmk , nextChunkToDownload , chunkHeadersMap , networkTimeoutInMilli , useJsonParserV2 ) ) ; nextChunkToDownload ++ ; waitingTime = BASE_WAITING_MS ; continue ; } } try { waitingTime *= WAITING_SECS_MULTIPLIER ; waitingTime = waitingTime > MAX_WAITING_MS ? MAX_WAITING_MS : waitingTime ; long jitter = ThreadLocalRandom . current ( ) . nextLong ( 0 , waitingTime / WAITING_JITTER_RATIO ) ; waitingTime += jitter ; if ( logger . isDebugEnabled ( ) ) { logger . debug ( "{} waiting for {}s: currentMemoryUsage in MB: {}, neededChunkMemory in MB: {}, " + "nextChunkToDownload: {}, nextChunkToConsume: {} " , Thread . currentThread ( ) . getName ( ) , waitingTime / 1000.0 , currentMemoryUsage / 1024 / 1024 , neededChunkMemory / 1024 / 1024 , nextChunkToDownload , nextChunkToConsume ) ; } Thread . sleep ( waitingTime ) ; } catch ( InterruptedException ie ) { throw new SnowflakeSQLException ( SqlState . INTERNAL_ERROR , ErrorCode . INTERNAL_ERROR . getMessageCode ( ) , "Waiting SnowflakeChunkDownloader has been interrupted." ) ; } } chunkDataCache . clear ( ) ; }
Submit download chunk tasks to executor . Number depends on thread and memory limit
28,470
public void releaseAllChunkMemoryUsage ( ) { if ( chunks == null || chunks . size ( ) == 0 ) { return ; } for ( int i = 0 ; i < chunks . size ( ) ; i ++ ) { releaseCurrentMemoryUsage ( i , chunks . get ( i ) . computeNeededChunkMemory ( ) ) ; } }
release all existing chunk memory usage before close
28,471
private void logOutOfMemoryError ( ) { logger . error ( "Dump some crucial information below:\n" + "Total milliseconds waiting for chunks: {},\n" + "Total memory used: {}, Max heap size: {}, total download time: {} millisec,\n" + "total parsing time: {} milliseconds, total chunks: {},\n" + "currentMemoryUsage in Byte: {}, currentMemoryLimit in Bytes: {} \n" + "nextChunkToDownload: {}, nextChunkToConsume: {}\n" + "Several suggestions to try to resolve the OOM issue:\n" + "1. increase the JVM heap size if you have more space; or \n" + "2. use CLIENT_MEMORY_LIMIT to reduce the memory usage by the JDBC driver " + "(https://docs.snowflake.net/manuals/sql-reference/parameters.html#client-memory-limit)" + "3. please make sure 2 * CLIENT_PREFETCH_THREADS * CLIENT_RESULT_CHUNK_SIZE < CLIENT_MEMORY_LIMIT. " + "If not, please reduce CLIENT_PREFETCH_THREADS and CLIENT_RESULT_CHUNK_SIZE too." , numberMillisWaitingForChunks , Runtime . getRuntime ( ) . totalMemory ( ) , Runtime . getRuntime ( ) . maxMemory ( ) , totalMillisDownloadingChunks . get ( ) , totalMillisParsingChunks . get ( ) , chunks . size ( ) , currentMemoryUsage , memoryLimit , nextChunkToDownload , nextChunkToConsume ) ; }
log out of memory error and provide the suggestion to avoid this error
28,472
public Metrics terminate ( ) { if ( ! terminated ) { logger . debug ( "Total milliseconds waiting for chunks: {}, " + "Total memory used: {}, total download time: {} millisec, " + "total parsing time: {} milliseconds, total chunks: {}" , numberMillisWaitingForChunks , Runtime . getRuntime ( ) . totalMemory ( ) , totalMillisDownloadingChunks . get ( ) , totalMillisParsingChunks . get ( ) , chunks . size ( ) ) ; if ( executor != null ) { executor . shutdownNow ( ) ; executor = null ; } chunks = null ; chunkDataCache . clear ( ) ; terminated = true ; return new Metrics ( ) ; } return null ; }
terminate the downloader
28,473
public static String maskAWSSecret ( String sql ) { List < SecretDetector . SecretRange > secretRanges = SecretDetector . getAWSSecretPos ( sql ) ; for ( SecretDetector . SecretRange secretRange : secretRanges ) { sql = maskText ( sql , secretRange . beginPos , secretRange . endPos ) ; } return sql ; }
mask AWS secret in the input string
28,474
private void sanityCheckQuery ( String sql ) throws SQLException { if ( sql == null || sql . isEmpty ( ) ) { throw new SnowflakeSQLException ( SqlState . SQL_STATEMENT_NOT_YET_COMPLETE , ErrorCode . INVALID_SQL . getMessageCode ( ) , sql ) ; } }
Sanity check query text
28,475
private SFBaseResultSet executeQuery ( String sql , Map < String , ParameterBindingDTO > parametersBinding , boolean describeOnly , CallingMethod caller ) throws SQLException , SFException { sanityCheckQuery ( sql ) ; String trimmedSql = sql . trim ( ) ; if ( isFileTransfer ( trimmedSql ) ) { logger . debug ( "Executing file transfer locally: {}" , sql ) ; return executeFileTransfer ( sql ) ; } return executeQueryInternal ( sql , parametersBinding , describeOnly , describeOnly , caller ) ; }
Execute SQL query with an option for describe only
28,476
public SFStatementMetaData describe ( String sql ) throws SFException , SQLException { SFBaseResultSet baseResultSet = executeQuery ( sql , null , true , null ) ; describeJobUUID = baseResultSet . getQueryId ( ) ; return new SFStatementMetaData ( baseResultSet . getMetaData ( ) , baseResultSet . getStatementType ( ) , baseResultSet . getNumberOfBinds ( ) , baseResultSet . isArrayBindSupported ( ) ) ; }
Describe a statement
28,477
private void setTimeBomb ( ScheduledExecutorService executor ) { class TimeBombTask implements Callable < Void > { private final SFStatement statement ; private TimeBombTask ( SFStatement statement ) { this . statement = statement ; } public Void call ( ) throws SQLException { try { statement . cancel ( ) ; } catch ( SFException ex ) { throw new SnowflakeSQLException ( ex , ex . getSqlState ( ) , ex . getVendorCode ( ) , ex . getParams ( ) ) ; } return null ; } } executor . schedule ( new TimeBombTask ( this ) , this . queryTimeout , TimeUnit . SECONDS ) ; }
Set a time bomb to cancel the outstanding query when timeout is reached .
28,478
private void cancelHelper ( String sql , String mediaType ) throws SnowflakeSQLException , SFException { synchronized ( this ) { if ( isClosed ) { throw new SFException ( ErrorCode . INTERNAL_ERROR , "statement already closed" ) ; } } StmtUtil . StmtInput stmtInput = new StmtUtil . StmtInput ( ) ; stmtInput . setServerUrl ( session . getServerUrl ( ) ) . setSql ( sql ) . setMediaType ( mediaType ) . setRequestId ( requestId ) . setSessionToken ( session . getSessionToken ( ) ) . setServiceName ( session . getServiceName ( ) ) ; StmtUtil . cancel ( stmtInput ) ; synchronized ( this ) { this . sequenceId = - 1 ; this . requestId = null ; } }
A helper method to build URL and cancel the SQL for exec
28,479
public boolean getMoreResults ( int current ) throws SQLException { if ( resultSet != null && ( current == Statement . CLOSE_CURRENT_RESULT || current == Statement . CLOSE_ALL_RESULTS ) ) { resultSet . close ( ) ; } resultSet = null ; if ( childResults == null || childResults . isEmpty ( ) ) { return false ; } SFChildResult nextResult = childResults . remove ( 0 ) ; try { JsonNode result = StmtUtil . getQueryResultJSON ( nextResult . getId ( ) , session ) ; Object sortProperty = session . getSFSessionProperty ( "sort" ) ; boolean sortResult = sortProperty != null && ( Boolean ) sortProperty ; resultSet = new SFResultSet ( result , this , sortResult ) ; resultSet . setStatementType ( nextResult . getType ( ) ) ; return nextResult . getType ( ) . isGenerateResultSet ( ) ; } catch ( SFException ex ) { throw new SnowflakeSQLException ( ex ) ; } }
Sets the result set to the next one if available .
28,480
public boolean isServiceException404 ( ) { if ( ( Exception ) this instanceof AmazonServiceException ) { AmazonServiceException asEx = ( AmazonServiceException ) ( ( java . lang . Exception ) this ) ; return ( asEx . getStatusCode ( ) == HttpStatus . SC_NOT_FOUND ) ; } return false ; }
Returns true if this is an exception corresponding to a HTTP 404 error returned by the storage provider
28,481
public static String oneLiner ( Throwable thrown ) { StackTraceElement [ ] stack = thrown . getStackTrace ( ) ; String topOfStack = null ; if ( stack . length > 0 ) { topOfStack = " at " + stack [ 0 ] ; } return thrown . toString ( ) + topOfStack ; }
Produce a one line description of the throwable suitable for error message and log printing .
28,482
public static void dumpVmMetrics ( String incidentId ) { PrintWriter writer = null ; try { String dumpFile = EventUtil . getDumpPathPrefix ( ) + "/" + INC_DUMP_FILE_NAME + incidentId + INC_DUMP_FILE_EXT ; final OutputStream outStream = new GZIPOutputStream ( new FileOutputStream ( dumpFile ) ) ; writer = new PrintWriter ( outStream , true ) ; final VirtualMachineMetrics vm = VirtualMachineMetrics . getInstance ( ) ; writer . print ( "\n\n\n--------------------------- METRICS " + "---------------------------\n\n" ) ; writer . flush ( ) ; JsonFactory jf = new JsonFactory ( ) ; jf . disable ( JsonGenerator . Feature . AUTO_CLOSE_TARGET ) ; ObjectMapper mapper = new ObjectMapper ( jf ) ; mapper . registerModule ( new JodaModule ( ) ) ; mapper . setDateFormat ( new ISO8601DateFormat ( ) ) ; mapper . configure ( SerializationFeature . INDENT_OUTPUT , true ) ; MetricsServlet metrics = new MetricsServlet ( Clock . defaultClock ( ) , vm , Metrics . defaultRegistry ( ) , jf , true ) ; final JsonGenerator json = jf . createGenerator ( outStream , JsonEncoding . UTF8 ) ; json . useDefaultPrettyPrinter ( ) ; json . writeStartObject ( ) ; writeVmMetrics ( json , vm ) ; metrics . writeRegularMetrics ( json , null , false ) ; json . writeEndObject ( ) ; json . close ( ) ; logger . debug ( "Creating full thread dump in dump file {}" , dumpFile ) ; writer . print ( "\n\n\n--------------------------- THREAD DUMP " + "---------------------------\n\n" ) ; writer . flush ( ) ; vm . threadDump ( outStream ) ; logger . debug ( "Dump file {} is created." , dumpFile ) ; } catch ( Exception exc ) { logger . error ( "Unable to write dump file, exception: {}" , exc . getMessage ( ) ) ; } finally { if ( writer != null ) { writer . close ( ) ; } } }
Dumps JVM metrics for this process .
28,483
public static Throwable generateIncidentV2WithException ( SFSession session , Throwable exc , String jobId , String requestId ) { new Incident ( session , exc , jobId , requestId ) . trigger ( ) ; return exc ; }
Makes a V2 incident object and triggers ir effectively reporting the given exception to GS and possibly to crashmanager
28,484
public static String getUTCNow ( ) { SimpleDateFormat dateFormatGmt = new SimpleDateFormat ( "yyyy-MM-dd HH:mm:ss" ) ; dateFormatGmt . setTimeZone ( TimeZone . getTimeZone ( "GMT" ) ) ; return dateFormatGmt . format ( new Date ( ) ) ; }
Get current time in UTC in the following format
28,485
public void renew ( Map stageCredentials ) throws SnowflakeSQLException { stageInfo . setCredentials ( stageCredentials ) ; setupAzureClient ( stageInfo , encMat ) ; }
Re - creates the encapsulated storage client with a fresh access token
28,486
public StorageObjectMetadata getObjectMetadata ( String remoteStorageLocation , String prefix ) throws StorageProviderException { AzureObjectMetadata azureObjectMetadata = null ; try { CloudBlobContainer container = azStorageClient . getContainerReference ( remoteStorageLocation ) ; CloudBlob blob = container . getBlockBlobReference ( prefix ) ; blob . downloadAttributes ( ) ; Map < String , String > userDefinedMetadata = blob . getMetadata ( ) ; BlobProperties properties = blob . getProperties ( ) ; long contentLength = properties . getLength ( ) ; String contentEncoding = properties . getContentEncoding ( ) ; azureObjectMetadata = new AzureObjectMetadata ( contentLength , contentEncoding , userDefinedMetadata ) ; } catch ( StorageException ex ) { logger . debug ( "Failed to retrieve BLOB metadata: {} - {}" , ex . getErrorCode ( ) , ex . getExtendedErrorInformation ( ) ) ; throw new StorageProviderException ( ex ) ; } catch ( URISyntaxException ex ) { logger . debug ( "Cannot retrieve BLOB properties, invalid URI: {}" , ex ) ; throw new StorageProviderException ( ex ) ; } return azureObjectMetadata ; }
Returns the metadata properties for a remote storage object
28,487
public void download ( SFSession connection , String command , String localLocation , String destFileName , int parallelism , String remoteStorageLocation , String stageFilePath , String stageRegion ) throws SnowflakeSQLException { int retryCount = 0 ; do { try { String localFilePath = localLocation + localFileSep + destFileName ; File localFile = new File ( localFilePath ) ; CloudBlobContainer container = azStorageClient . getContainerReference ( remoteStorageLocation ) ; CloudBlob blob = container . getBlockBlobReference ( stageFilePath ) ; blob . downloadToFile ( localFilePath ) ; blob . downloadAttributes ( ) ; Map < String , String > userDefinedMetadata = blob . getMetadata ( ) ; AbstractMap . SimpleEntry < String , String > encryptionData = parseEncryptionData ( userDefinedMetadata . get ( AZ_ENCRYPTIONDATAPROP ) ) ; String key = encryptionData . getKey ( ) ; String iv = encryptionData . getValue ( ) ; if ( this . isEncrypting ( ) && this . getEncryptionKeySize ( ) <= 256 ) { if ( key == null || iv == null ) { throw new SnowflakeSQLException ( SqlState . INTERNAL_ERROR , ErrorCode . INTERNAL_ERROR . getMessageCode ( ) , "File metadata incomplete" ) ; } try { EncryptionProvider . decrypt ( localFile , key , iv , this . encMat ) ; } catch ( Exception ex ) { logger . error ( "Error decrypting file" , ex ) ; throw ex ; } } return ; } catch ( Exception ex ) { logger . debug ( "Download unsuccessful {}" , ex ) ; handleAzureException ( ex , ++ retryCount , "download" , connection , command , this ) ; } } while ( retryCount <= getMaxRetries ( ) ) ; throw new SnowflakeSQLException ( SqlState . INTERNAL_ERROR , ErrorCode . INTERNAL_ERROR . getMessageCode ( ) , "Unexpected: download unsuccessful without exception!" ) ; }
Download a file from remote storage .
28,488
private static void handleAzureException ( Exception ex , int retryCount , String operation , SFSession connection , String command , SnowflakeAzureClient azClient ) throws SnowflakeSQLException { if ( ex . getCause ( ) instanceof InvalidKeyException ) { SnowflakeFileTransferAgent . throwJCEMissingError ( operation , ex ) ; } if ( ( ( StorageException ) ex ) . getHttpStatusCode ( ) == 403 ) { SnowflakeFileTransferAgent . renewExpiredToken ( connection , command , azClient ) ; } if ( ex instanceof StorageException ) { StorageException se = ( StorageException ) ex ; if ( retryCount > azClient . getMaxRetries ( ) ) { throw new SnowflakeSQLException ( se , SqlState . SYSTEM_ERROR , ErrorCode . AZURE_SERVICE_ERROR . getMessageCode ( ) , operation , se . getErrorCode ( ) , se . getExtendedErrorInformation ( ) , se . getHttpStatusCode ( ) , se . getMessage ( ) ) ; } else { logger . debug ( "Encountered exception ({}) during {}, retry count: {}" , ex . getMessage ( ) , operation , retryCount ) ; logger . debug ( "Stack trace: " , ex ) ; int backoffInMillis = azClient . getRetryBackoffMin ( ) ; if ( retryCount > 1 ) { backoffInMillis <<= ( Math . min ( retryCount - 1 , azClient . getRetryBackoffMaxExponent ( ) ) ) ; } try { logger . debug ( "Sleep for {} milliseconds before retry" , backoffInMillis ) ; Thread . sleep ( backoffInMillis ) ; } catch ( InterruptedException ex1 ) { } if ( se . getHttpStatusCode ( ) == 403 ) { SnowflakeFileTransferAgent . renewExpiredToken ( connection , command , azClient ) ; } } } else { if ( ex instanceof InterruptedException || SnowflakeUtil . getRootCause ( ex ) instanceof SocketTimeoutException ) { if ( retryCount > azClient . getMaxRetries ( ) ) { throw new SnowflakeSQLException ( ex , SqlState . SYSTEM_ERROR , ErrorCode . IO_ERROR . getMessageCode ( ) , "Encountered exception during " + operation + ": " + ex . getMessage ( ) ) ; } else { logger . debug ( "Encountered exception ({}) during {}, retry count: {}" , ex . getMessage ( ) , operation , retryCount ) ; } } else { throw new SnowflakeSQLException ( ex , SqlState . SYSTEM_ERROR , ErrorCode . IO_ERROR . getMessageCode ( ) , "Encountered exception during " + operation + ": " + ex . getMessage ( ) ) ; } } }
Handles exceptions thrown by Azure Storage It will retry transient errors as defined by the Azure Client retry policy It will re - create the client if the SAS token has expired and re - try
28,489
public void addDigestMetadata ( StorageObjectMetadata meta , String digest ) { if ( ! SnowflakeUtil . isBlank ( digest ) ) { meta . addUserMetadata ( "sfcdigest" , digest ) ; } }
Adds digest metadata to the StorageObjectMetadata object
28,490
private static long initMemoryLimit ( final ResultOutput resultOutput ) { long memoryLimit = SessionUtil . DEFAULT_CLIENT_MEMORY_LIMIT * 1024 * 1024 ; if ( resultOutput . parameters . get ( CLIENT_MEMORY_LIMIT ) != null ) { memoryLimit = ( int ) resultOutput . parameters . get ( CLIENT_MEMORY_LIMIT ) * 1024L * 1024L ; } long maxMemoryToUse = Runtime . getRuntime ( ) . maxMemory ( ) * 8 / 10 ; if ( ( int ) resultOutput . parameters . get ( CLIENT_MEMORY_LIMIT ) == SessionUtil . DEFAULT_CLIENT_MEMORY_LIMIT ) { memoryLimit = Math . max ( memoryLimit , maxMemoryToUse ) ; } memoryLimit = Math . min ( memoryLimit , maxMemoryToUse ) ; logger . debug ( "Set allowed memory usage to {} bytes" , memoryLimit ) ; return memoryLimit ; }
initialize memory limit in bytes
28,491
static private Object effectiveParamValue ( Map < String , Object > parameters , String paramName ) { String upper = paramName . toUpperCase ( ) ; Object value = parameters . get ( upper ) ; if ( value != null ) { return value ; } value = defaultParameters . get ( upper ) ; if ( value != null ) { return value ; } logger . debug ( "Unknown Common Parameter: {}" , paramName ) ; return null ; }
Returns the effective parameter value using the value explicitly provided in parameters or the default if absent
28,492
static private SnowflakeDateTimeFormat specializedFormatter ( Map < String , Object > parameters , String id , String param , String defaultFormat ) { String sqlFormat = SnowflakeDateTimeFormat . effectiveSpecializedTimestampFormat ( ( String ) effectiveParamValue ( parameters , param ) , defaultFormat ) ; SnowflakeDateTimeFormat formatter = new SnowflakeDateTimeFormat ( sqlFormat ) ; if ( logger . isDebugEnabled ( ) ) { logger . debug ( "sql {} format: {}, java {} format: {}" , id , sqlFormat , id , formatter . toSimpleDateTimePattern ( ) ) ; } return formatter ; }
Helper function building a formatter for a specialized timestamp type . Note that it will be based on either the param value if set or the default format provided .
28,493
static public Timestamp adjustTimestamp ( Timestamp timestamp ) { long milliToAdjust = ResultUtil . msDiffJulianToGregorian ( timestamp ) ; if ( milliToAdjust != 0 ) { if ( logger . isDebugEnabled ( ) ) { logger . debug ( "adjust timestamp by {} days" , milliToAdjust / 86400000 ) ; } Timestamp newTimestamp = new Timestamp ( timestamp . getTime ( ) + milliToAdjust ) ; newTimestamp . setNanos ( timestamp . getNanos ( ) ) ; return newTimestamp ; } else { return timestamp ; } }
Adjust timestamp for dates before 1582 - 10 - 05
28,494
static public long msDiffJulianToGregorian ( java . util . Date date ) { Calendar cal = Calendar . getInstance ( ) ; cal . setTime ( date ) ; int year = cal . get ( Calendar . YEAR ) ; int month = cal . get ( Calendar . MONTH ) ; int dayOfMonth = cal . get ( Calendar . DAY_OF_MONTH ) ; if ( date . getTime ( ) < - 12220156800000L ) { if ( month == 0 || ( month == 1 && dayOfMonth <= 28 ) ) { year = year - 1 ; } int hundreds = year / 100 ; int differenceInDays = hundreds - ( hundreds / 4 ) - 2 ; return differenceInDays * 86400000 ; } else { return 0 ; } }
For dates before 1582 - 10 - 05 calculate the number of millis to adjust .
28,495
static public String getSFTimeAsString ( SFTime sft , int scale , SnowflakeDateTimeFormat timeFormatter ) { return timeFormatter . format ( sft , scale ) ; }
Convert a time value into a string
28,496
static public String getSFTimestampAsString ( SFTimestamp sfTS , int columnType , int scale , SnowflakeDateTimeFormat timestampNTZFormatter , SnowflakeDateTimeFormat timestampLTZFormatter , SnowflakeDateTimeFormat timestampTZFormatter , SFSession session ) throws SFException { SnowflakeDateTimeFormat formatter ; if ( columnType == Types . TIMESTAMP ) { formatter = timestampNTZFormatter ; } else if ( columnType == SnowflakeUtil . EXTRA_TYPES_TIMESTAMP_LTZ ) { formatter = timestampLTZFormatter ; } else { formatter = timestampTZFormatter ; } if ( formatter == null ) { throw ( SFException ) IncidentUtil . generateIncidentV2WithException ( session , new SFException ( ErrorCode . INTERNAL_ERROR , "missing timestamp formatter" ) , null , null ) ; } try { Timestamp adjustedTimestamp = ResultUtil . adjustTimestamp ( sfTS . getTimestamp ( ) ) ; return formatter . format ( adjustedTimestamp , sfTS . getTimeZone ( ) , scale ) ; } catch ( SFTimestamp . TimestampOperationNotAvailableException e ) { BigDecimal nanosSinceEpoch = sfTS . getNanosSinceEpoch ( ) ; BigDecimal secondsSinceEpoch = nanosSinceEpoch . scaleByPowerOfTen ( - 9 ) ; return secondsSinceEpoch . setScale ( scale ) . toPlainString ( ) ; } }
Convert a SFTimestamp to a string value .
28,497
static public String getDateAsString ( Date date , SnowflakeDateTimeFormat dateFormatter ) { return dateFormatter . format ( date , timeZoneUTC ) ; }
Convert a date value into a string
28,498
static public Date adjustDate ( Date date ) { long milliToAdjust = ResultUtil . msDiffJulianToGregorian ( date ) ; if ( milliToAdjust != 0 ) { return new Date ( date . getTime ( ) + milliToAdjust ) ; } else { return date ; } }
Adjust date for before 1582 - 10 - 05
28,499
static public Date getDate ( String str , TimeZone tz , SFSession session ) throws SFException { try { long milliSecsSinceEpoch = Long . valueOf ( str ) * 86400000 ; SFTimestamp tsInUTC = SFTimestamp . fromDate ( new Date ( milliSecsSinceEpoch ) , 0 , TimeZone . getTimeZone ( "UTC" ) ) ; SFTimestamp tsInClientTZ = tsInUTC . moveToTimeZone ( tz ) ; if ( logger . isDebugEnabled ( ) ) { logger . debug ( "getDate: tz offset={}" , tsInClientTZ . getTimeZone ( ) . getOffset ( tsInClientTZ . getTime ( ) ) ) ; } Date preDate = new Date ( tsInClientTZ . getTime ( ) ) ; Date newDate = adjustDate ( preDate ) ; if ( logger . isDebugEnabled ( ) ) { logger . debug ( "Adjust date from {} to {}" , preDate . toString ( ) , newDate . toString ( ) ) ; } return newDate ; } catch ( NumberFormatException ex ) { throw ( SFException ) IncidentUtil . generateIncidentV2WithException ( session , new SFException ( ErrorCode . INTERNAL_ERROR , "Invalid date value: " + str ) , null , null ) ; } }
Convert a date internal object to a Date object in specified timezone .