index int64 0 0 | repo_id stringlengths 26 205 | file_path stringlengths 51 246 | content stringlengths 8 433k | __index_level_0__ int64 0 10k |
|---|---|---|---|---|
0 | Create_ds/amazon-redshift-jdbc-driver/src/main/java/com/amazon | Create_ds/amazon-redshift-jdbc-driver/src/main/java/com/amazon/redshift/AuthMech.java | package com.amazon.redshift;
/**
* Provided authentication mechanism type enum.
*/
public enum AuthMech
{
/**
* Indicates the mechanism type is non-SSL.
*/
DISABLE,
/**
* Indicates that the mechanism type is using non-SSL first and then SSL if non-SSL fails.
*/
ALLOW,
/**
* Indicates that the mechanism type is using SSL first and then non-SSL if SSL fails.
*/
PREFER,
/**
* Indicates the mechanism type is using SSL.
*/
REQUIRE,
/**
* Indicates the mechanism type is using SSL and verify the trusted certificate authority.
*/
VERIFY_CA,
/**
* Indicates the mechanism type is using SSL and verify the trusted certificate authority and
* the server hostname
*/
VERIFY_FULL;
}
| 8,300 |
0 | Create_ds/amazon-redshift-jdbc-driver/src/main/java/com/amazon/redshift | Create_ds/amazon-redshift-jdbc-driver/src/main/java/com/amazon/redshift/ssl/SingleCertValidatingFactory.java | /*
* Copyright (c) 2004, PostgreSQL Global Development Group
* See the LICENSE file in the project root for more information.
*/
package com.amazon.redshift.ssl;
import com.amazon.redshift.util.GT;
import java.io.BufferedInputStream;
import java.io.ByteArrayInputStream;
import java.io.FileInputStream;
import java.io.IOException;
import java.io.InputStream;
import java.security.GeneralSecurityException;
import java.security.KeyStore;
import java.security.cert.CertificateException;
import java.security.cert.CertificateFactory;
import java.security.cert.X509Certificate;
import java.util.UUID;
import javax.net.ssl.SSLContext;
import javax.net.ssl.TrustManager;
import javax.net.ssl.TrustManagerFactory;
import javax.net.ssl.X509TrustManager;
/**
* <p>Provides a SSLSocketFactory that authenticates the remote server against an explicit pre-shared
* SSL certificate. This is more secure than using the NonValidatingFactory as it prevents "man in
* the middle" attacks. It is also more secure than relying on a central CA signing your server's
* certificate as it pins the server's certificate.</p>
*
* <p>This class requires a single String parameter specified by setting the connection property
* <code>sslfactoryarg</code>. The value of this property is the PEM-encoded remote server's SSL
* certificate.</p>
*
* <p>Where the certificate is loaded from is based upon the prefix of the <code>sslfactoryarg</code> property.
* The following table lists the valid set of prefixes.</p>
*
* <table border="1">
* <caption>Valid prefixes for sslfactoryarg</caption>
* <tr>
* <th>Prefix</th>
* <th>Example</th>
* <th>Explanation</th>
* </tr>
* <tr>
* <td><code>classpath:</code></td>
* <td><code>classpath:ssl/server.crt</code></td>
* <td>Loaded from the classpath.</td>
* </tr>
* <tr>
* <td><code>file:</code></td>
* <td><code>file:/foo/bar/server.crt</code></td>
* <td>Loaded from the filesystem.</td>
* </tr>
* <tr>
* <td><code>env:</code></td>
* <td><code>env:mydb_cert</code></td>
* <td>Loaded from string value of the <code>mydb_cert</code> environment variable.</td>
* </tr>
* <tr>
* <td><code>sys:</code></td>
* <td><code>sys:mydb_cert</code></td>
* <td>Loaded from string value of the <code>mydb_cert</code> system property.</td>
* </tr>
* <tr>
* <td><pre>-----BEGIN CERTIFICATE------</pre></td>
* <td>
* <pre>
* -----BEGIN CERTIFICATE-----
* MIIDQzCCAqygAwIBAgIJAOd1tlfiGoEoMA0GCSqGSIb3DQEBBQUAMHUxCzAJBgNV
* [... truncated ...]
* UCmmYqgiVkAGWRETVo+byOSDZ4swb10=
* -----END CERTIFICATE-----
* </pre>
* </td>
* <td>Loaded from string value of the argument.</td>
* </tr>
* </table>
*/
public class SingleCertValidatingFactory extends WrappedFactory {
private static final String FILE_PREFIX = "file:";
private static final String CLASSPATH_PREFIX = "classpath:";
private static final String ENV_PREFIX = "env:";
private static final String SYS_PROP_PREFIX = "sys:";
public SingleCertValidatingFactory(String sslFactoryArg) throws GeneralSecurityException {
if (sslFactoryArg == null || sslFactoryArg.equals("")) {
throw new GeneralSecurityException(GT.tr("The sslfactoryarg property may not be empty."));
}
InputStream in = null;
try {
if (sslFactoryArg.startsWith(FILE_PREFIX)) {
String path = sslFactoryArg.substring(FILE_PREFIX.length());
in = new BufferedInputStream(new FileInputStream(path));
} else if (sslFactoryArg.startsWith(CLASSPATH_PREFIX)) {
String path = sslFactoryArg.substring(CLASSPATH_PREFIX.length());
in = new BufferedInputStream(
Thread.currentThread().getContextClassLoader().getResourceAsStream(path));
} else if (sslFactoryArg.startsWith(ENV_PREFIX)) {
String name = sslFactoryArg.substring(ENV_PREFIX.length());
String cert = System.getenv(name);
if (cert == null || "".equals(cert)) {
throw new GeneralSecurityException(GT.tr(
"The environment variable containing the server's SSL certificate must not be empty."));
}
in = new ByteArrayInputStream(cert.getBytes("UTF-8"));
} else if (sslFactoryArg.startsWith(SYS_PROP_PREFIX)) {
String name = sslFactoryArg.substring(SYS_PROP_PREFIX.length());
String cert = System.getProperty(name);
if (cert == null || "".equals(cert)) {
throw new GeneralSecurityException(GT.tr(
"The system property containing the server's SSL certificate must not be empty."));
}
in = new ByteArrayInputStream(cert.getBytes("UTF-8"));
} else if (sslFactoryArg.startsWith("-----BEGIN CERTIFICATE-----")) {
in = new ByteArrayInputStream(sslFactoryArg.getBytes("UTF-8"));
} else {
throw new GeneralSecurityException(GT.tr(
"The sslfactoryarg property must start with the prefix file:, classpath:, env:, sys:, or -----BEGIN CERTIFICATE-----."));
}
SSLContext ctx = SSLContext.getInstance("TLS");
ctx.init(null, new TrustManager[]{new SingleCertTrustManager(in)}, null);
factory = ctx.getSocketFactory();
} catch (RuntimeException e) {
throw e;
} catch (Exception e) {
if (e instanceof GeneralSecurityException) {
throw (GeneralSecurityException) e;
}
throw new GeneralSecurityException(GT.tr("An error occurred reading the certificate"), e);
} finally {
if (in != null) {
try {
in.close();
} catch (Exception e2) {
// ignore
}
}
}
}
public static class SingleCertTrustManager implements X509TrustManager {
X509Certificate cert;
X509TrustManager trustManager;
public SingleCertTrustManager(InputStream in) throws IOException, GeneralSecurityException {
KeyStore ks = KeyStore.getInstance(KeyStore.getDefaultType());
// Note: KeyStore requires it be loaded even if you don't load anything into it:
ks.load(null);
CertificateFactory cf = CertificateFactory.getInstance("X509");
cert = (X509Certificate) cf.generateCertificate(in);
ks.setCertificateEntry(UUID.randomUUID().toString(), cert);
TrustManagerFactory tmf =
TrustManagerFactory.getInstance(TrustManagerFactory.getDefaultAlgorithm());
tmf.init(ks);
for (TrustManager tm : tmf.getTrustManagers()) {
if (tm instanceof X509TrustManager) {
trustManager = (X509TrustManager) tm;
break;
}
}
if (trustManager == null) {
throw new GeneralSecurityException(GT.tr("No X509TrustManager found"));
}
}
public void checkClientTrusted(X509Certificate[] chain, String authType)
throws CertificateException {
}
public void checkServerTrusted(X509Certificate[] chain, String authType)
throws CertificateException {
trustManager.checkServerTrusted(chain, authType);
}
public X509Certificate[] getAcceptedIssuers() {
return new X509Certificate[]{cert};
}
}
}
| 8,301 |
0 | Create_ds/amazon-redshift-jdbc-driver/src/main/java/com/amazon/redshift | Create_ds/amazon-redshift-jdbc-driver/src/main/java/com/amazon/redshift/ssl/PKCS12KeyManager.java | /*
* Copyright (c) 2019, PostgreSQL Global Development Group
* See the LICENSE file in the project root for more information.
*/
package com.amazon.redshift.ssl;
import com.amazon.redshift.util.GT;
import com.amazon.redshift.util.RedshiftException;
import com.amazon.redshift.util.RedshiftState;
import java.io.File;
import java.io.FileInputStream;
import java.net.Socket;
import java.security.KeyStore;
import java.security.KeyStoreException;
import java.security.Principal;
import java.security.PrivateKey;
import java.security.cert.Certificate;
import java.security.cert.X509Certificate;
import javax.net.ssl.X509KeyManager;
import javax.security.auth.callback.Callback;
import javax.security.auth.callback.CallbackHandler;
import javax.security.auth.callback.PasswordCallback;
import javax.security.auth.callback.UnsupportedCallbackException;
import javax.security.auth.x500.X500Principal;
public class PKCS12KeyManager implements X509KeyManager {
private final CallbackHandler cbh;
private RedshiftException error = null;
private final String keyfile;
private final KeyStore keyStore;
boolean keystoreLoaded = false;
public PKCS12KeyManager(String pkcsFile, CallbackHandler cbh) throws RedshiftException {
try {
keyStore = KeyStore.getInstance("pkcs12");
keyfile = pkcsFile;
this.cbh = cbh;
} catch ( KeyStoreException kse ) {
throw new RedshiftException(GT.tr(
"Unable to find pkcs12 keystore."),
RedshiftState.CONNECTION_FAILURE, kse);
}
}
/**
* getCertificateChain and getPrivateKey cannot throw exeptions, therefore any exception is stored
* in {@link #error} and can be raised by this method.
*
* @throws RedshiftException if any exception is stored in {@link #error} and can be raised
*/
public void throwKeyManagerException() throws RedshiftException {
if (error != null) {
throw error;
}
}
@Override
public String[] getClientAliases(String keyType, Principal[] principals) {
String alias = chooseClientAlias(new String[]{keyType}, principals, (Socket) null);
return (alias == null ? new String[]{} : new String[]{alias});
}
@Override
public String chooseClientAlias(String[] strings, Principal[] principals, Socket socket) {
if (principals == null || principals.length == 0) {
// Postgres 8.4 and earlier do not send the list of accepted certificate authorities
// to the client. See BUG #5468. We only hope, that our certificate will be accepted.
return "user";
} else {
// Sending a wrong certificate makes the connection rejected, even, if clientcert=0 in
// pg_hba.conf.
// therefore we only send our certificate, if the issuer is listed in issuers
X509Certificate[] certchain = getCertificateChain("user");
if (certchain == null) {
return null;
} else {
X500Principal ourissuer = certchain[certchain.length - 1].getIssuerX500Principal();
boolean found = false;
for (Principal issuer : principals) {
if (ourissuer.equals(issuer)) {
found = true;
}
}
return (found ? "user" : null);
}
}
}
@Override
public String[] getServerAliases(String s, Principal[] principals) {
return new String[]{};
}
@Override
public String chooseServerAlias(String s, Principal[] principals, Socket socket) {
// we are not a server
return null;
}
@Override
public X509Certificate[] getCertificateChain(String alias) {
try {
loadKeyStore();
Certificate []certs = keyStore.getCertificateChain(alias);
X509Certificate [] x509Certificates = new X509Certificate[certs.length];
int i = 0;
for (Certificate cert : certs) {
x509Certificates[i++] = (X509Certificate)cert;
}
return x509Certificates;
} catch (Exception kse ) {
error = new RedshiftException(GT.tr(
"Could not find a java cryptographic algorithm: X.509 CertificateFactory not available."),
RedshiftState.CONNECTION_FAILURE, kse);
}
return null;
}
@Override
public PrivateKey getPrivateKey(String s) {
try {
loadKeyStore();
PasswordCallback pwdcb = new PasswordCallback(GT.tr("Enter SSL password: "), false);
cbh.handle(new Callback[]{pwdcb});
KeyStore.ProtectionParameter protParam = new KeyStore.PasswordProtection(pwdcb.getPassword());
KeyStore.PrivateKeyEntry pkEntry =
(KeyStore.PrivateKeyEntry) keyStore.getEntry("user", protParam);
PrivateKey myPrivateKey = pkEntry.getPrivateKey();
return myPrivateKey;
} catch (Exception ioex ) {
error = new RedshiftException(GT.tr("Could not read SSL key file {0}.", keyfile),
RedshiftState.CONNECTION_FAILURE, ioex);
}
return null;
}
private synchronized void loadKeyStore() throws Exception {
if (keystoreLoaded) {
return;
}
// We call back for the password
PasswordCallback pwdcb = new PasswordCallback(GT.tr("Enter SSL password: "), false);
try {
cbh.handle(new Callback[]{pwdcb});
} catch (UnsupportedCallbackException ucex) {
if ((cbh instanceof LibPQFactory.ConsoleCallbackHandler)
&& ("Console is not available".equals(ucex.getMessage()))) {
error = new RedshiftException(GT
.tr("Could not read password for SSL key file, console is not available."),
RedshiftState.CONNECTION_FAILURE, ucex);
} else {
error =
new RedshiftException(
GT.tr("Could not read password for SSL key file by callbackhandler {0}.",
cbh.getClass().getName()),
RedshiftState.CONNECTION_FAILURE, ucex);
}
}
keyStore.load(new FileInputStream(new File(keyfile)), pwdcb.getPassword());
keystoreLoaded = true;
}
}
| 8,302 |
0 | Create_ds/amazon-redshift-jdbc-driver/src/main/java/com/amazon/redshift | Create_ds/amazon-redshift-jdbc-driver/src/main/java/com/amazon/redshift/ssl/NonValidatingFactory.java | /*
* Copyright (c) 2004, PostgreSQL Global Development Group
* See the LICENSE file in the project root for more information.
*/
package com.amazon.redshift.ssl;
import java.security.GeneralSecurityException;
import java.security.cert.X509Certificate;
import javax.net.ssl.SSLContext;
import javax.net.ssl.TrustManager;
import javax.net.ssl.X509TrustManager;
/**
* Provide a SSLSocketFactory that allows SSL connections to be made without validating the server's
* certificate. This is more convenient for some applications, but is less secure as it allows "man
* in the middle" attacks.
*/
public class NonValidatingFactory extends WrappedFactory {
/**
* We provide a constructor that takes an unused argument solely because the ssl calling code will
* look for this constructor first and then fall back to the no argument constructor, so we avoid
* an exception and additional reflection lookups.
*
* @param arg input argument
* @throws GeneralSecurityException if something goes wrong
*/
public NonValidatingFactory(String arg) throws GeneralSecurityException {
SSLContext ctx = SSLContext.getInstance("TLS"); // or "SSL" ?
ctx.init(null, new TrustManager[]{new NonValidatingTM()}, null);
factory = ctx.getSocketFactory();
}
public static class NonValidatingTM implements X509TrustManager {
public X509Certificate[] getAcceptedIssuers() {
return new X509Certificate[0];
}
public void checkClientTrusted(X509Certificate[] certs, String authType) {
}
public void checkServerTrusted(X509Certificate[] certs, String authType) {
}
}
}
| 8,303 |
0 | Create_ds/amazon-redshift-jdbc-driver/src/main/java/com/amazon/redshift | Create_ds/amazon-redshift-jdbc-driver/src/main/java/com/amazon/redshift/ssl/DbKeyStoreSocketFactory.java | /*
* Copyright (c) 2004, PostgreSQL Global Development Group
* See the LICENSE file in the project root for more information.
*/
package com.amazon.redshift.ssl;
import java.io.InputStream;
import java.security.KeyStore;
import javax.net.ssl.KeyManagerFactory;
import javax.net.ssl.SSLContext;
import javax.net.ssl.TrustManagerFactory;
public abstract class DbKeyStoreSocketFactory extends com.amazon.redshift.ssl.WrappedFactory {
/*
* Populate the WrappedFactory member factory with an SSL Socket Factory that uses the JKS
* keystore provided by getKeyStorePassword() and getKeyStoreStream(). A subclass only needs to
* implement these two methods. The key store will be used both for selecting a private key
* certificate to send to the server, as well as checking the server's certificate against a set
* of trusted CAs.
*/
public DbKeyStoreSocketFactory() throws DbKeyStoreSocketException {
KeyStore keys;
char[] password;
try {
keys = KeyStore.getInstance("JKS");
password = getKeyStorePassword();
keys.load(getKeyStoreStream(), password);
} catch (java.security.GeneralSecurityException gse) {
throw new DbKeyStoreSocketException("Failed to load keystore: " + gse.getMessage());
} catch (java.io.FileNotFoundException fnfe) {
throw new DbKeyStoreSocketException("Failed to find keystore file." + fnfe.getMessage());
} catch (java.io.IOException ioe) {
throw new DbKeyStoreSocketException("Failed to read keystore file: " + ioe.getMessage());
}
try {
KeyManagerFactory keyfact =
KeyManagerFactory.getInstance(KeyManagerFactory.getDefaultAlgorithm());
keyfact.init(keys, password);
TrustManagerFactory trustfact =
TrustManagerFactory.getInstance(TrustManagerFactory.getDefaultAlgorithm());
trustfact.init(keys);
SSLContext ctx = SSLContext.getInstance("SSL");
ctx.init(keyfact.getKeyManagers(), trustfact.getTrustManagers(), null);
factory = ctx.getSocketFactory();
} catch (java.security.GeneralSecurityException gse) {
throw new DbKeyStoreSocketException(
"Failed to set up database socket factory: " + gse.getMessage());
}
}
public abstract char[] getKeyStorePassword();
public abstract InputStream getKeyStoreStream();
public static class DbKeyStoreSocketException extends Exception {
public DbKeyStoreSocketException(String message) {
super(message);
}
}
}
| 8,304 |
0 | Create_ds/amazon-redshift-jdbc-driver/src/main/java/com/amazon/redshift | Create_ds/amazon-redshift-jdbc-driver/src/main/java/com/amazon/redshift/ssl/DefaultJavaSSLFactory.java | /*
* Copyright (c) 2017, PostgreSQL Global Development Group
* See the LICENSE file in the project root for more information.
*/
package com.amazon.redshift.ssl;
import java.util.Properties;
import javax.net.ssl.SSLSocketFactory;
/**
* Socket factory that uses Java's default truststore to validate server certificate.
* Note: it always validates server certificate, so it might result to downgrade to non-encrypted
* connection when default truststore lacks certificates to validate server.
*/
public class DefaultJavaSSLFactory extends WrappedFactory {
public DefaultJavaSSLFactory(Properties info) {
factory = (SSLSocketFactory) SSLSocketFactory.getDefault();
}
}
| 8,305 |
0 | Create_ds/amazon-redshift-jdbc-driver/src/main/java/com/amazon/redshift | Create_ds/amazon-redshift-jdbc-driver/src/main/java/com/amazon/redshift/ssl/LazyKeyManager.java | /*
* Copyright (c) 2004, PostgreSQL Global Development Group
* See the LICENSE file in the project root for more information.
*/
package com.amazon.redshift.ssl;
import com.amazon.redshift.util.GT;
import com.amazon.redshift.util.RedshiftException;
import com.amazon.redshift.util.RedshiftState;
import java.io.FileInputStream;
import java.io.FileNotFoundException;
import java.io.IOException;
import java.io.RandomAccessFile;
import java.net.Socket;
import java.security.AlgorithmParameters;
import java.security.GeneralSecurityException;
import java.security.Key;
import java.security.KeyFactory;
import java.security.NoSuchAlgorithmException;
import java.security.Principal;
import java.security.PrivateKey;
import java.security.cert.Certificate;
import java.security.cert.CertificateException;
import java.security.cert.CertificateFactory;
import java.security.cert.X509Certificate;
import java.security.spec.InvalidKeySpecException;
import java.security.spec.KeySpec;
import java.security.spec.PKCS8EncodedKeySpec;
import java.util.Collection;
import javax.crypto.Cipher;
import javax.crypto.EncryptedPrivateKeyInfo;
import javax.crypto.NoSuchPaddingException;
import javax.crypto.SecretKeyFactory;
import javax.crypto.spec.PBEKeySpec;
import javax.net.ssl.X509KeyManager;
import javax.security.auth.callback.Callback;
import javax.security.auth.callback.CallbackHandler;
import javax.security.auth.callback.PasswordCallback;
import javax.security.auth.callback.UnsupportedCallbackException;
import javax.security.auth.x500.X500Principal;
/**
* A Key manager that only loads the keys, if necessary.
*/
public class LazyKeyManager implements X509KeyManager {
private X509Certificate[] cert = null;
private PrivateKey key = null;
private String certfile;
private String keyfile;
private CallbackHandler cbh;
private boolean defaultfile;
private RedshiftException error = null;
/**
* Constructor. certfile and keyfile can be null, in that case no certificate is presented to the
* server.
*
* @param certfile certfile
* @param keyfile key file
* @param cbh callback handler
* @param defaultfile default file
*/
public LazyKeyManager(String certfile, String keyfile, CallbackHandler cbh, boolean defaultfile) {
this.certfile = certfile;
this.keyfile = keyfile;
this.cbh = cbh;
this.defaultfile = defaultfile;
}
/**
* getCertificateChain and getPrivateKey cannot throw exeptions, therefore any exception is stored
* in {@link #error} and can be raised by this method.
*
* @throws RedshiftException if any exception is stored in {@link #error} and can be raised
*/
public void throwKeyManagerException() throws RedshiftException {
if (error != null) {
throw error;
}
}
@Override
public String chooseClientAlias(String[] keyType, Principal[] issuers, Socket socket) {
if (certfile == null) {
return null;
} else {
if (issuers == null || issuers.length == 0) {
// Postgres 8.4 and earlier do not send the list of accepted certificate authorities
// to the client. See BUG #5468. We only hope, that our certificate will be accepted.
return "user";
} else {
// Sending a wrong certificate makes the connection rejected, even, if clientcert=0 in
// pg_hba.conf.
// therefore we only send our certificate, if the issuer is listed in issuers
X509Certificate[] certchain = getCertificateChain("user");
if (certchain == null) {
return null;
} else {
X500Principal ourissuer = certchain[certchain.length - 1].getIssuerX500Principal();
boolean found = false;
for (Principal issuer : issuers) {
if (ourissuer.equals(issuer)) {
found = true;
}
}
return (found ? "user" : null);
}
}
}
}
@Override
public String chooseServerAlias(String keyType, Principal[] issuers, Socket socket) {
return null; // We are not a server
}
@Override
public X509Certificate[] getCertificateChain(String alias) {
if (cert == null && certfile != null) {
// If certfile is null, we do not load the certificate
// The certificate must be loaded
CertificateFactory cf;
try {
cf = CertificateFactory.getInstance("X.509");
} catch (CertificateException ex) {
// For some strange reason it throws CertificateException instead of
// NoSuchAlgorithmException...
error = new RedshiftException(GT.tr(
"Could not find a java cryptographic algorithm: X.509 CertificateFactory not available."),
RedshiftState.CONNECTION_FAILURE, ex);
return null;
}
Collection<? extends Certificate> certs;
try {
certs = cf.generateCertificates(new FileInputStream(certfile));
} catch (FileNotFoundException ioex) {
if (!defaultfile) { // It is not an error if there is no file at the default location
error = new RedshiftException(
GT.tr("Could not open SSL certificate file {0}.", certfile),
RedshiftState.CONNECTION_FAILURE, ioex);
}
return null;
} catch (CertificateException gsex) {
error = new RedshiftException(GT.tr("Loading the SSL certificate {0} into a KeyManager failed.",
certfile), RedshiftState.CONNECTION_FAILURE, gsex);
return null;
}
cert = certs.toArray(new X509Certificate[0]);
}
return cert;
}
@Override
public String[] getClientAliases(String keyType, Principal[] issuers) {
String alias = chooseClientAlias(new String[]{keyType}, issuers, (Socket) null);
return (alias == null ? new String[]{} : new String[]{alias});
}
private static byte[] readFileFully(String path) throws IOException {
RandomAccessFile raf = new RandomAccessFile(path, "r");
try {
byte[] ret = new byte[(int) raf.length()];
raf.readFully(ret);
return ret;
} finally {
raf.close();
}
}
@Override
public PrivateKey getPrivateKey(String alias) {
try {
if (key == null && keyfile != null) {
// If keyfile is null, we do not load the key
// The private key must be loaded
if (cert == null) { // We need the certificate for the algorithm
if (getCertificateChain("user") == null) {
return null; // getCertificateChain failed...
}
}
byte[] keydata;
try {
keydata = readFileFully(keyfile);
} catch (FileNotFoundException ex) {
if (!defaultfile) {
// It is not an error if there is no file at the default location
throw ex;
}
return null;
}
KeyFactory kf = KeyFactory.getInstance(cert[0].getPublicKey().getAlgorithm());
try {
KeySpec pkcs8KeySpec = new PKCS8EncodedKeySpec(keydata);
key = kf.generatePrivate(pkcs8KeySpec);
} catch (InvalidKeySpecException ex) {
// The key might be password protected
EncryptedPrivateKeyInfo ePKInfo = new EncryptedPrivateKeyInfo(keydata);
Cipher cipher;
try {
cipher = Cipher.getInstance(ePKInfo.getAlgName());
} catch (NoSuchPaddingException npex) {
// Why is it not a subclass of NoSuchAlgorithmException?
throw new NoSuchAlgorithmException(npex.getMessage(), npex);
}
// We call back for the password
PasswordCallback pwdcb = new PasswordCallback(GT.tr("Enter SSL password: "), false);
try {
cbh.handle(new Callback[]{pwdcb});
} catch (UnsupportedCallbackException ucex) {
if ((cbh instanceof LibPQFactory.ConsoleCallbackHandler)
&& ("Console is not available".equals(ucex.getMessage()))) {
error = new RedshiftException(GT
.tr("Could not read password for SSL key file, console is not available."),
RedshiftState.CONNECTION_FAILURE, ucex);
} else {
error =
new RedshiftException(
GT.tr("Could not read password for SSL key file by callbackhandler {0}.",
cbh.getClass().getName()),
RedshiftState.CONNECTION_FAILURE, ucex);
}
return null;
}
try {
PBEKeySpec pbeKeySpec = new PBEKeySpec(pwdcb.getPassword());
pwdcb.clearPassword();
// Now create the Key from the PBEKeySpec
SecretKeyFactory skFac = SecretKeyFactory.getInstance(ePKInfo.getAlgName());
Key pbeKey = skFac.generateSecret(pbeKeySpec);
// Extract the iteration count and the salt
AlgorithmParameters algParams = ePKInfo.getAlgParameters();
cipher.init(Cipher.DECRYPT_MODE, pbeKey, algParams);
// Decrypt the encryped private key into a PKCS8EncodedKeySpec
KeySpec pkcs8KeySpec = ePKInfo.getKeySpec(cipher);
key = kf.generatePrivate(pkcs8KeySpec);
} catch (GeneralSecurityException ikex) {
error = new RedshiftException(
GT.tr("Could not decrypt SSL key file {0}.", keyfile),
RedshiftState.CONNECTION_FAILURE, ikex);
return null;
}
}
}
} catch (IOException ioex) {
error = new RedshiftException(GT.tr("Could not read SSL key file {0}.", keyfile),
RedshiftState.CONNECTION_FAILURE, ioex);
} catch (NoSuchAlgorithmException ex) {
error = new RedshiftException(GT.tr("Could not find a java cryptographic algorithm: {0}.",
ex.getMessage()), RedshiftState.CONNECTION_FAILURE, ex);
return null;
}
return key;
}
@Override
public String[] getServerAliases(String keyType, Principal[] issuers) {
return new String[]{};
}
}
| 8,306 |
0 | Create_ds/amazon-redshift-jdbc-driver/src/main/java/com/amazon/redshift | Create_ds/amazon-redshift-jdbc-driver/src/main/java/com/amazon/redshift/ssl/RedshiftjdbcHostnameVerifier.java | /*
* Copyright (c) 2018, PostgreSQL Global Development Group
* See the LICENSE file in the project root for more information.
*/
package com.amazon.redshift.ssl;
import com.amazon.redshift.logger.LogLevel;
import com.amazon.redshift.logger.RedshiftLogger;
import com.amazon.redshift.util.GT;
import java.net.IDN;
import java.security.cert.CertificateParsingException;
import java.security.cert.X509Certificate;
import java.util.ArrayList;
import java.util.Collection;
import java.util.Collections;
import java.util.Comparator;
import java.util.List;
import javax.naming.InvalidNameException;
import javax.naming.ldap.LdapName;
import javax.naming.ldap.Rdn;
import javax.net.ssl.HostnameVerifier;
import javax.net.ssl.SSLPeerUnverifiedException;
import javax.net.ssl.SSLSession;
import javax.security.auth.x500.X500Principal;
public class RedshiftjdbcHostnameVerifier implements HostnameVerifier {
private static final RedshiftLogger logger = RedshiftLogger.getDriverLogger();
public static final RedshiftjdbcHostnameVerifier INSTANCE = new RedshiftjdbcHostnameVerifier();
private static final int TYPE_DNS_NAME = 2;
private static final int TYPE_IP_ADDRESS = 7;
public static final Comparator<String> HOSTNAME_PATTERN_COMPARATOR = new Comparator<String>() {
private int countChars(String value, char ch) {
int count = 0;
int pos = -1;
while (true) {
pos = value.indexOf(ch, pos + 1);
if (pos == -1) {
break;
}
count++;
}
return count;
}
@Override
public int compare(String o1, String o2) {
// The more the dots the better: a.b.c.xyz.com is more specific than xyz.com
int d1 = countChars(o1, '.');
int d2 = countChars(o2, '.');
if (d1 != d2) {
return d1 > d2 ? 1 : -1;
}
// The less the stars the better: xyz.com is more specific than *.*.xyz.com
int s1 = countChars(o1, '*');
int s2 = countChars(o2, '*');
if (s1 != s2) {
return s1 < s2 ? 1 : -1;
}
// The longer the better: xyz.org is more specific than sql.org
int l1 = o1.length();
int l2 = o2.length();
if (l1 != l2) {
return l1 > l2 ? 1 : -1;
}
return 0;
}
};
@Override
public boolean verify(String hostname, SSLSession session) {
X509Certificate[] peerCerts;
try {
peerCerts = (X509Certificate[]) session.getPeerCertificates();
} catch (SSLPeerUnverifiedException e) {
if (RedshiftLogger.isEnable() && logger != null)
logger.log(LogLevel.ERROR,
GT.tr("Unable to parse X509Certificate for hostname {0}", hostname), e);
return false;
}
if (peerCerts == null || peerCerts.length == 0) {
if (RedshiftLogger.isEnable() && logger != null)
logger.log(LogLevel.ERROR,
GT.tr("No certificates found for hostname {0}", hostname));
return false;
}
String canonicalHostname;
if (hostname.startsWith("[") && hostname.endsWith("]")) {
// IPv6 address like [2001:db8:0:1:1:1:1:1]
canonicalHostname = hostname.substring(1, hostname.length() - 1);
} else {
// This converts unicode domain name to ASCII
try {
canonicalHostname = IDN.toASCII(hostname);
if (RedshiftLogger.isEnable() && logger != null) {
logger.log(LogLevel.DEBUG, "Canonical host name for {0} is {1}",
new Object[]{hostname, canonicalHostname});
}
} catch (IllegalArgumentException e) {
// e.g. hostname is invalid
if (RedshiftLogger.isEnable() && logger != null)
logger.log(LogLevel.ERROR,
GT.tr("Hostname {0} is invalid", hostname), e);
return false;
}
}
X509Certificate serverCert = peerCerts[0];
// Check for Subject Alternative Names (see RFC 6125)
Collection<List<?>> subjectAltNames;
try {
subjectAltNames = serverCert.getSubjectAlternativeNames();
if (subjectAltNames == null) {
subjectAltNames = Collections.emptyList();
}
} catch (CertificateParsingException e) {
if (RedshiftLogger.isEnable() && logger != null)
logger.log(LogLevel.ERROR,
GT.tr("Unable to parse certificates for hostname {0}", hostname), e);
return false;
}
boolean anyDnsSan = false;
/*
* Each item in the SAN collection is a 2-element list.
* See {@link X509Certificate#getSubjectAlternativeNames}
* The first element in each list is a number indicating the type of entry.
*/
for (List<?> sanItem : subjectAltNames) {
if (sanItem.size() != 2) {
continue;
}
Integer sanType = (Integer) sanItem.get(0);
if (sanType == null) {
// just in case
continue;
}
if (sanType != TYPE_IP_ADDRESS && sanType != TYPE_DNS_NAME) {
continue;
}
String san = (String) sanItem.get(1);
if (sanType == TYPE_IP_ADDRESS && san.startsWith("*")) {
// Wildcards should not be present in the IP Address field
continue;
}
anyDnsSan |= sanType == TYPE_DNS_NAME;
if (verifyHostName(canonicalHostname, san)) {
if (RedshiftLogger.isEnable() && logger != null) {
logger.log(LogLevel.DEBUG,
GT.tr("Server name validation pass for {0}, subjectAltName {1}", hostname, san));
}
return true;
}
}
if (anyDnsSan) {
/*
* RFC2818, section 3.1 (I bet you won't recheck :)
* If a subjectAltName extension of type dNSName is present, that MUST
* be used as the identity. Otherwise, the (most specific) Common Name
* field in the Subject field of the certificate MUST be used. Although
* the use of the Common Name is existing practice, it is deprecated and
* Certification Authorities are encouraged to use the dNSName instead.
*/
if (RedshiftLogger.isEnable() && logger != null)
logger.log(LogLevel.ERROR,
GT.tr("Server name validation failed: certificate for host {0} dNSName entries subjectAltName,"
+ " but none of them match. Assuming server name validation failed", hostname));
return false;
}
// Last attempt: no DNS Subject Alternative Name entries detected, try common name
LdapName dn;
try {
dn = new LdapName(serverCert.getSubjectX500Principal().getName(X500Principal.RFC2253));
} catch (InvalidNameException e) {
if (RedshiftLogger.isEnable() && logger != null)
logger.log(LogLevel.ERROR,
GT.tr("Server name validation failed: unable to extract common name"
+ " from X509Certificate for hostname {0}", hostname), e);
return false;
}
List<String> commonNames = new ArrayList<String>(1);
for (Rdn rdn : dn.getRdns()) {
if ("CN".equals(rdn.getType())) {
commonNames.add((String) rdn.getValue());
}
}
if (commonNames.isEmpty()) {
if (RedshiftLogger.isEnable() && logger != null)
logger.log(LogLevel.ERROR,
GT.tr("Server name validation failed: certificate for hostname {0} has no DNS subjectAltNames,"
+ " and it CommonName is missing as well",
hostname));
return false;
}
if (commonNames.size() > 1) {
/*
* RFC2818, section 3.1
* If a subjectAltName extension of type dNSName is present, that MUST
* be used as the identity. Otherwise, the (most specific) Common Name
* field in the Subject field of the certificate MUST be used
*
* The sort is from less specific to most specific.
*/
Collections.sort(commonNames, HOSTNAME_PATTERN_COMPARATOR);
}
String commonName = commonNames.get(commonNames.size() - 1);
boolean result = verifyHostName(canonicalHostname, commonName);
if (!result) {
if (RedshiftLogger.isEnable() && logger != null)
logger.log(LogLevel.ERROR,
GT.tr("Server name validation failed: hostname {0} does not match common name {1}",
hostname, commonName));
}
return result;
}
public boolean verifyHostName(String hostname, String pattern) {
if (hostname == null || pattern == null) {
return false;
}
int lastStar = pattern.lastIndexOf('*');
if (lastStar == -1) {
// No wildcard => just compare hostnames
return hostname.equalsIgnoreCase(pattern);
}
if (lastStar > 0) {
// Wildcards like foo*.com are not supported yet
return false;
}
if (pattern.indexOf('.') == -1) {
// Wildcard certificates should contain at least one dot
return false;
}
// pattern starts with *, so hostname should be at least (pattern.length-1) long
if (hostname.length() < pattern.length() - 1) {
return false;
}
// Use case insensitive comparison
final boolean ignoreCase = true;
// Below code is "hostname.endsWithIgnoreCase(pattern.withoutFirstStar())"
// E.g. hostname==sub.host.com; pattern==*.host.com
// We need to start the offset of ".host.com" in hostname
// For this we take hostname.length() - pattern.length()
// and +1 is required since pattern is known to start with *
int toffset = hostname.length() - pattern.length() + 1;
// Wildcard covers just one domain level
// a.b.c.com should not be covered by *.c.com
if (hostname.lastIndexOf('.', toffset - 1) >= 0) {
// If there's a dot in between 0..toffset
return false;
}
return hostname.regionMatches(ignoreCase, toffset,
pattern, 1, pattern.length() - 1);
}
}
| 8,307 |
0 | Create_ds/amazon-redshift-jdbc-driver/src/main/java/com/amazon/redshift | Create_ds/amazon-redshift-jdbc-driver/src/main/java/com/amazon/redshift/ssl/LibPQFactory.java | /*
* Copyright (c) 2004, PostgreSQL Global Development Group
* See the LICENSE file in the project root for more information.
*/
package com.amazon.redshift.ssl;
import com.amazon.redshift.RedshiftProperty;
import com.amazon.redshift.jdbc.SslMode;
import com.amazon.redshift.ssl.NonValidatingFactory.NonValidatingTM;
import com.amazon.redshift.util.GT;
import com.amazon.redshift.util.ObjectFactory;
import com.amazon.redshift.util.RedshiftException;
import com.amazon.redshift.util.RedshiftState;
import java.io.Console;
import java.io.File;
import java.io.FileInputStream;
import java.io.FileNotFoundException;
import java.io.IOException;
import java.io.InputStream;
import java.security.GeneralSecurityException;
import java.security.KeyManagementException;
import java.security.KeyStore;
import java.security.KeyStoreException;
import java.security.NoSuchAlgorithmException;
import java.security.cert.Certificate;
import java.security.cert.CertificateFactory;
import java.util.Properties;
import javax.net.ssl.KeyManager;
import javax.net.ssl.SSLContext;
import javax.net.ssl.TrustManager;
import javax.net.ssl.TrustManagerFactory;
import javax.security.auth.callback.Callback;
import javax.security.auth.callback.CallbackHandler;
import javax.security.auth.callback.PasswordCallback;
import javax.security.auth.callback.UnsupportedCallbackException;
/**
* Provide an SSLSocketFactory that is compatible with the libpq behaviour.
*/
public class LibPQFactory extends WrappedFactory {
/**
* The system property to set/get the trustore path.
*/
private static final String TRUSTSTORE_PROPERTY = "javax.net.ssl.trustStore";
/**
* The system property to set/get the trustore passphrase.
*/
private static final String TRUSTSTORE_PWD_PROPERTY ="javax.net.ssl.trustStorePassword";
KeyManager km;
boolean defaultfile;
private CallbackHandler getCallbackHandler(Properties info) throws RedshiftException {
// Determine the callback handler
CallbackHandler cbh;
String sslpasswordcallback = RedshiftProperty.SSL_PASSWORD_CALLBACK.get(info);
if (sslpasswordcallback != null) {
try {
cbh = ObjectFactory.instantiate(CallbackHandler.class,sslpasswordcallback, info, false, null);
} catch (Exception e) {
throw new RedshiftException(
GT.tr("The password callback class provided {0} could not be instantiated.",
sslpasswordcallback),
RedshiftState.CONNECTION_FAILURE, e);
}
} else {
cbh = new ConsoleCallbackHandler(RedshiftProperty.SSL_PASSWORD.get(info));
}
return cbh;
}
private void initPk8(String sslkeyfile, String defaultdir, Properties info) throws RedshiftException {
// Load the client's certificate and key
String sslcertfile = RedshiftProperty.SSL_CERT.get(info);
if (sslcertfile == null) { // Fall back to default
defaultfile = true;
sslcertfile = defaultdir + "redshift.crt";
}
// If the properties are empty, give null to prevent client key selection
km = new LazyKeyManager(("".equals(sslcertfile) ? null : sslcertfile),
("".equals(sslkeyfile) ? null : sslkeyfile), getCallbackHandler(info), defaultfile);
}
private void initP12(String sslkeyfile, Properties info) throws RedshiftException {
km = new PKCS12KeyManager(sslkeyfile, getCallbackHandler(info));
}
/**
* @param info the connection parameters The following parameters are used:
* sslmode,sslcert,sslkey,sslrootcert,sslhostnameverifier,sslpasswordcallback,sslpassword
* @throws RedshiftException if security error appears when initializing factory
*/
public LibPQFactory(Properties info) throws RedshiftException {
try {
SSLContext ctx = SSLContext.getInstance("TLS"); // or "SSL" ?
// Determining the default file location
String pathsep = System.getProperty("file.separator");
String defaultdir;
if (System.getProperty("os.name").toLowerCase().contains("windows")) { // It is Windows
defaultdir = System.getenv("APPDATA") + pathsep + "redshift" + pathsep;
} else {
defaultdir = System.getProperty("user.home") + pathsep + ".redshift" + pathsep;
}
String sslkeyfile = RedshiftProperty.SSL_KEY.get(info);
if (sslkeyfile == null) { // Fall back to default
defaultfile = true;
sslkeyfile = defaultdir + "redshift.pk8";
}
if (sslkeyfile.endsWith("pk8")) {
initPk8(sslkeyfile, defaultdir, info);
}
if (sslkeyfile.endsWith("p12")) {
initP12(sslkeyfile, info);
}
TrustManager[] tm;
SslMode sslMode = SslMode.of(info);
if (!sslMode.verifyCertificate()) {
// server validation is not required
tm = new TrustManager[]{new NonValidatingTM()};
} else {
String sslTrustStorePath = RedshiftProperty.SSL_TRUSTSTORE_PATH_KEY.get(info);
String sslrootcertfile = RedshiftProperty.SSL_ROOT_CERT.get(info);
String sslTrustStorePwd = RedshiftProperty.SSL_TRUSTSTORE_PWD_KEY.get(info);
// Load the server certificate
if (null != sslTrustStorePath)
{
tm = getTrustManagerWithDefinedTrustStore(sslTrustStorePath, sslTrustStorePwd);
}
else if(null != sslrootcertfile)
{
tm = getTrustManagerWithImportedCertificate(sslrootcertfile);
}
else
{
tm = getDefaultTrustManager();
}
/* The original root.crt code start ---
TrustManagerFactory tmf = TrustManagerFactory.getInstance("PKIX");
KeyStore ks;
try {
ks = KeyStore.getInstance("jks");
} catch (KeyStoreException e) {
// this should never happen
throw new NoSuchAlgorithmException("jks KeyStore not available");
}
// String sslrootcertfile = RedshiftProperty.SSL_ROOT_CERT.get(info);
if (sslrootcertfile == null) { // Fall back to default
sslrootcertfile = defaultdir + "root.crt";
}
FileInputStream fis;
try {
fis = new FileInputStream(sslrootcertfile); // NOSONAR
} catch (FileNotFoundException ex) {
throw new RedshiftException(
GT.tr("Could not open SSL root certificate file {0}.", sslrootcertfile),
RedshiftState.CONNECTION_FAILURE, ex);
}
try {
CertificateFactory cf = CertificateFactory.getInstance("X.509");
// Certificate[] certs = cf.generateCertificates(fis).toArray(new Certificate[]{}); //Does
// not work in java 1.4
Object[] certs = cf.generateCertificates(fis).toArray(new Certificate[]{});
ks.load(null, null);
for (int i = 0; i < certs.length; i++) {
ks.setCertificateEntry("cert" + i, (Certificate) certs[i]);
}
tmf.init(ks);
} catch (IOException ioex) {
throw new RedshiftException(
GT.tr("Could not read SSL root certificate file {0}.", sslrootcertfile),
RedshiftState.CONNECTION_FAILURE, ioex);
} catch (GeneralSecurityException gsex) {
throw new RedshiftException(
GT.tr("Loading the SSL root certificate {0} into a TrustManager failed.",
sslrootcertfile),
RedshiftState.CONNECTION_FAILURE, gsex);
} finally {
try {
fis.close();
} catch (IOException e) {
// ignore
}
}
tm = tmf.getTrustManagers();
--- The original root.crt code end. */
}
// finally we can initialize the context
try {
ctx.init(new KeyManager[]{km}, tm, null);
} catch (KeyManagementException ex) {
throw new RedshiftException(GT.tr("Could not initialize SSL context."),
RedshiftState.CONNECTION_FAILURE, ex);
}
factory = ctx.getSocketFactory();
} catch (NoSuchAlgorithmException ex) {
throw new RedshiftException(GT.tr("Could not find a java cryptographic algorithm: {0}.",
ex.getMessage()), RedshiftState.CONNECTION_FAILURE, ex);
}
}
/**
* Propagates any exception from {@link LazyKeyManager}.
*
* @throws RedshiftException if there is an exception to propagate
*/
public void throwKeyManagerException() throws RedshiftException {
if (km != null) {
if (km instanceof LazyKeyManager) {
((LazyKeyManager)km).throwKeyManagerException();
}
if (km instanceof PKCS12KeyManager) {
((PKCS12KeyManager)km).throwKeyManagerException();
}
}
}
/**
* Initialize and return the trust manager with SSLtruststore passed in from the user
*
* @return TrustManager[] Array of initialized trust managers
* @throws RedshiftException
*/
private TrustManager[] getTrustManagerWithDefinedTrustStore(String sslTrustStorePath,
String sslTrustStorePwd) throws RedshiftException
{
// The Keystore containing certificates
KeyStore truststore = null;
// The input stream to read in the jks file
FileInputStream trustStoreSource = null;
try
{
trustStoreSource = new FileInputStream(sslTrustStorePath);
//Load the trust store using the trust store location provided
truststore = KeyStore.getInstance(KeyStore.getDefaultType());
truststore.load(
trustStoreSource,
sslTrustStorePwd != null ?
sslTrustStorePwd.toCharArray() : null);
// Initialize the TrustManagerFactory
TrustManagerFactory tmf =
TrustManagerFactory.getInstance(TrustManagerFactory.getDefaultAlgorithm());
tmf.init(truststore);
return tmf.getTrustManagers();
}
catch (Exception e)
{
// Error retrieving the available trust managers
throw new RedshiftException(
GT.tr("Error retrieving the available trust managers {0}.", sslTrustStorePath),
RedshiftState.CONNECTION_FAILURE, e);
}
finally
{
if (trustStoreSource != null)
{
try
{
trustStoreSource.close();
}
catch (IOException e)
{
// Ignore
}
}
}
}
/**
* Returns the trust managers for the temporary Truststore with the given certificate
* imported into the Truststore.
*
* @param keystorePath The path to the keystore.
* @param certificatePath The path to the certificate file. The file must be in either
* PEM or DER format.
*
* @throws RedshiftException If an error occurs.
*/
private TrustManager[] getTrustManagerWithImportedCertificate(String sslRootCert)
throws RedshiftException
{
KeyStore truststore = getDefaultKeystore();
try
{
Certificate[] chain = getCertificateChain(sslRootCert);
// Add the certificate to the truststore.
truststore.setCertificateEntry(sslRootCert, chain[0]);
}
catch (Exception e)
{
// Error loading the certificate file.
throw new RedshiftException(
GT.tr("Error loading the certificate file {0}.", sslRootCert),
RedshiftState.CONNECTION_FAILURE, e);
}
return getTrustManager(truststore);
}
/**
* Returns the KeyStore for the given external path.
*
* @throws RedshiftException If an error occurs.
*
*/
private KeyStore getDefaultKeystore() throws RedshiftException
{
InputStream keystoreStream = null;
String passphrase = null;
String keystorePath = System.getProperty(TRUSTSTORE_PROPERTY);
passphrase = System.getProperty(TRUSTSTORE_PWD_PROPERTY);
if (null == keystorePath)
{
// Default keystore : ${JAVA_HOME}/lib/security/cacerts
StringBuilder trustorePath = new StringBuilder();
trustorePath.append(System.getProperty("java.home"));
trustorePath.append(File.separatorChar);
trustorePath.append("lib");
trustorePath.append(File.separatorChar);
trustorePath.append("security");
trustorePath.append(File.separatorChar);
trustorePath.append("cacerts");
keystorePath = trustorePath.toString();
}
try
{
keystoreStream = new FileInputStream(new File(keystorePath));
}
catch (Exception e)
{
// Error retrieving the available trust managers
throw new RedshiftException(
GT.tr("Error loading the keystore {0}.", keystorePath),
RedshiftState.CONNECTION_FAILURE, e);
}
try
{
// Load the keystore
KeyStore keystore = KeyStore.getInstance(KeyStore.getDefaultType());
char[] passphraseArray = null;
if (null != passphrase)
{
passphraseArray = passphrase.toCharArray();
}
keystore.load(keystoreStream, passphraseArray);
keystoreStream.close();
loadDefaultCA(keystore, "redshift.crt");
loadDefaultCA(keystore, "bjs.redshift.crt");
loadDefaultCA(keystore, "pdt.redshift.crt");
return keystore;
}
catch (Exception e)
{
return fallbackKeyStores(keystorePath, passphrase, e);
}
}
/*
* BouncyCastle is very strict about key store type. JDK8 has default type JKS,
* while JDK11 has default type PKCS12.
* So JDK11 with BouncyCastle provider throws error - java.io.IOException: stream does not represent a PKCS12 key store.
* This fallback mechanism fix the issue as it tries different key store types.
*/
private KeyStore fallbackKeyStores(String keystorePath,
String passphrase,
Exception originalEx) throws RedshiftException
{
String[] keystoreTypes = {"JKS", "PKCS12", "JCEKS"};
for(String keystoreType:keystoreTypes)
{
try
{
InputStream keystoreStream = null;
try
{
keystoreStream = new FileInputStream(new File(keystorePath));
}
catch (Exception e)
{
// Error retrieving the available trust managers
throw new RedshiftException(
GT.tr("Error loading the keystore {0}.", keystorePath),
RedshiftState.CONNECTION_FAILURE, e);
}
// Load the keystore
KeyStore keystore = KeyStore.getInstance(keystoreType);
char[] passphraseArray = null;
if (null != passphrase)
{
passphraseArray = passphrase.toCharArray();
}
keystore.load(keystoreStream, passphraseArray);
keystoreStream.close();
loadDefaultCA(keystore, "redshift.crt");
loadDefaultCA(keystore, "bjs.redshift.crt");
loadDefaultCA(keystore, "pdt.redshift.crt");
return keystore;
}
catch (RedshiftException rsex)
{
// inner exception of stream, propagate to caller
throw rsex;
}
catch (Exception e)
{
// Ignore and try another keystore type in the loop.
}
} // Loop
// Error retrieving the available trust managers
throw new RedshiftException(
GT.tr("Error loading the provided keystore."),
RedshiftState.CONNECTION_FAILURE, originalEx);
}
/**
* Loads the certificate into the keystore.
*
* @param keystore The keystore.
* @param name The name of the certificate.
* @throws IOException When the file is not found.
* @throws GeneralSecurityException
*/
private void loadDefaultCA(KeyStore keystore, String name)
throws IOException, GeneralSecurityException
{
InputStream is = null;
try
{
is = NonValidatingFactory.class.getResourceAsStream(name);
if (is == null)
{
return;
}
CertificateFactory cf = CertificateFactory.getInstance("X.509");
Certificate cert = cf.generateCertificate(is);
keystore.setCertificateEntry(name, cert);
}
finally
{
if (is != null)
{
is.close();
}
}
}
/**
* Returns a certificate chain with the certificate found at certificatePath added to it.
*
* @param certificatePath The path to the certificate.
*
* @throws RedshiftException If an error occurs.
*/
private Certificate[] getCertificateChain(String certificatePath)
throws RedshiftException
{
Certificate[] chain = {};
try
{
File certificateFile = new File(certificatePath);
if (!certificateFile.isFile() || !certificateFile.exists())
{
throw new RedshiftException(
GT.tr("Error certificate file doesn't found {0}.", certificatePath),
RedshiftState.CONNECTION_FAILURE);
}
InputStream certificateStream = new FileInputStream(certificateFile);
CertificateFactory certificateFactory = CertificateFactory.getInstance("X.509");
chain = certificateFactory.generateCertificates(certificateStream).toArray(chain);
certificateStream.close();
if (0 >= chain.length || null == chain[0])
{
throw new RedshiftException(
GT.tr("Error missing certificate."),
RedshiftState.CONNECTION_FAILURE);
}
}
catch (Exception e)
{
throw new RedshiftException(
GT.tr("Error loading certificate chain."),
RedshiftState.CONNECTION_FAILURE, e);
}
return chain;
}
/**
* Returns the trust managers for the given external Truststore.
*
* @param keystore The keystore
*
* @throws RedshiftException If an error occurs.
*
*/
private TrustManager[] getTrustManager(KeyStore keystore)
throws RedshiftException
{
try
{
// Initialize the TrustManagerFactory
TrustManagerFactory tmf =
TrustManagerFactory.getInstance(TrustManagerFactory.getDefaultAlgorithm());
tmf.init(keystore);
return tmf.getTrustManagers();
}
catch (Exception e)
{
// Error retrieving the available trust managers
throw new RedshiftException(
GT.tr("Error retrieving the available trust managers."),
RedshiftState.CONNECTION_FAILURE, e);
}
}
/**
* Returns the trust managers for the trustore.
*
* @param keystoreStream The keystore stream.
* @param passphrase The keystore passphrase.
*
* @throws RedshiftException If an error occurs.
*
*/
private TrustManager[] getDefaultTrustManager()
throws RedshiftException
{
KeyStore keystore = getDefaultKeystore();
return getTrustManager(keystore);
}
/**
* A CallbackHandler that reads the password from the console or returns the password given to its
* constructor.
*/
public static class ConsoleCallbackHandler implements CallbackHandler {
private char[] password = null;
ConsoleCallbackHandler(String password) {
if (password != null) {
this.password = password.toCharArray();
}
}
/**
* Handles the callbacks.
*
* @param callbacks The callbacks to handle
* @throws UnsupportedCallbackException If the console is not available or other than
* PasswordCallback is supplied
*/
@Override
public void handle(Callback[] callbacks) throws IOException, UnsupportedCallbackException {
Console cons = System.console();
if (cons == null && password == null) {
throw new UnsupportedCallbackException(callbacks[0], "Console is not available");
}
for (Callback callback : callbacks) {
if (!(callback instanceof PasswordCallback)) {
throw new UnsupportedCallbackException(callback);
}
PasswordCallback pwdCallback = (PasswordCallback) callback;
if (password != null) {
pwdCallback.setPassword(password);
continue;
}
// It is used instead of cons.readPassword(prompt), because the prompt may contain '%'
// characters
pwdCallback.setPassword(cons.readPassword("%s", pwdCallback.getPrompt()));
}
}
} // ConsoleCallbackHandler class
}
| 8,308 |
0 | Create_ds/amazon-redshift-jdbc-driver/src/main/java/com/amazon/redshift | Create_ds/amazon-redshift-jdbc-driver/src/main/java/com/amazon/redshift/ssl/WrappedFactory.java | /*
* Copyright (c) 2004, PostgreSQL Global Development Group
* See the LICENSE file in the project root for more information.
*/
package com.amazon.redshift.ssl;
import java.io.IOException;
import java.net.InetAddress;
import java.net.Socket;
import javax.net.ssl.SSLSocketFactory;
/**
* Provide a wrapper to a real SSLSocketFactory delegating all calls to the contained instance. A
* subclass needs only provide a constructor for the wrapped SSLSocketFactory.
*/
public abstract class WrappedFactory extends SSLSocketFactory {
protected SSLSocketFactory factory;
public Socket createSocket(InetAddress host, int port) throws IOException {
return factory.createSocket(host, port);
}
public Socket createSocket(String host, int port) throws IOException {
return factory.createSocket(host, port);
}
public Socket createSocket(String host, int port, InetAddress localHost, int localPort)
throws IOException {
return factory.createSocket(host, port, localHost, localPort);
}
public Socket createSocket(InetAddress address, int port, InetAddress localAddress, int localPort)
throws IOException {
return factory.createSocket(address, port, localAddress, localPort);
}
public Socket createSocket(Socket socket, String host, int port, boolean autoClose)
throws IOException {
return factory.createSocket(socket, host, port, autoClose);
}
public String[] getDefaultCipherSuites() {
return factory.getDefaultCipherSuites();
}
public String[] getSupportedCipherSuites() {
return factory.getSupportedCipherSuites();
}
}
| 8,309 |
0 | Create_ds/amazon-redshift-jdbc-driver/src/main/java/com/amazon/redshift | Create_ds/amazon-redshift-jdbc-driver/src/main/java/com/amazon/redshift/ssl/MakeSSL.java | /*
* Copyright (c) 2004, PostgreSQL Global Development Group
* See the LICENSE file in the project root for more information.
*/
package com.amazon.redshift.ssl;
import com.amazon.redshift.RedshiftProperty;
import com.amazon.redshift.core.RedshiftStream;
import com.amazon.redshift.core.SocketFactoryFactory;
import com.amazon.redshift.jdbc.SslMode;
import com.amazon.redshift.util.GT;
import com.amazon.redshift.util.ObjectFactory;
import com.amazon.redshift.util.RedshiftException;
import com.amazon.redshift.util.RedshiftState;
import java.io.IOException;
import java.util.Properties;
import javax.net.ssl.HostnameVerifier;
import javax.net.ssl.SSLSocket;
import javax.net.ssl.SSLSocketFactory;
public class MakeSSL extends ObjectFactory {
public static void convert(RedshiftStream stream, Properties info)
throws RedshiftException, IOException {
SSLSocketFactory factory = SocketFactoryFactory.getSslSocketFactory(info);
SSLSocket newConnection;
try {
newConnection = (SSLSocket) factory.createSocket(stream.getSocket(),
stream.getHostSpec().getHost(), stream.getHostSpec().getPort(), true);
// We must invoke manually, otherwise the exceptions are hidden
newConnection.setUseClientMode(true);
newConnection.startHandshake();
} catch (IOException ex) {
throw new RedshiftException(GT.tr("SSL error: {0}", ex.getMessage()),
RedshiftState.CONNECTION_FAILURE, ex);
}
if (factory instanceof LibPQFactory) { // throw any KeyManager exception
((LibPQFactory) factory).throwKeyManagerException();
}
SslMode sslMode = SslMode.of(info);
if (sslMode.verifyPeerName()) {
verifyPeerName(stream, info, newConnection);
}
stream.changeSocket(newConnection, false, info);
}
private static void verifyPeerName(RedshiftStream stream, Properties info, SSLSocket newConnection)
throws RedshiftException {
HostnameVerifier hvn;
String sslhostnameverifier = RedshiftProperty.SSL_HOSTNAME_VERIFIER.get(info);
if (sslhostnameverifier == null) {
hvn = RedshiftjdbcHostnameVerifier.INSTANCE;
sslhostnameverifier = "RedshiftjdbcHostnameVerifier";
} else {
try {
hvn = instantiate(HostnameVerifier.class, sslhostnameverifier, info, false, null);
} catch (Exception e) {
throw new RedshiftException(
GT.tr("The HostnameVerifier class provided {0} could not be instantiated.",
sslhostnameverifier),
RedshiftState.CONNECTION_FAILURE, e);
}
}
if (hvn.verify(stream.getHostSpec().getHost(), newConnection.getSession())) {
return;
}
throw new RedshiftException(
GT.tr("The hostname {0} could not be verified by hostnameverifier {1}.",
stream.getHostSpec().getHost(), sslhostnameverifier),
RedshiftState.CONNECTION_FAILURE);
}
}
| 8,310 |
0 | Create_ds/amazon-redshift-jdbc-driver/src/main/java/com/amazon/redshift/ssl | Create_ds/amazon-redshift-jdbc-driver/src/main/java/com/amazon/redshift/ssl/jdbc4/LibPQFactory.java | /*
* Copyright (c) 2017, PostgreSQL Global Development Group
* See the LICENSE file in the project root for more information.
*/
package com.amazon.redshift.ssl.jdbc4;
import com.amazon.redshift.jdbc.SslMode;
import com.amazon.redshift.ssl.RedshiftjdbcHostnameVerifier;
import com.amazon.redshift.util.RedshiftException;
import java.net.IDN;
import java.util.Properties;
import javax.net.ssl.HostnameVerifier;
import javax.net.ssl.SSLSession;
/**
* @deprecated prefer {@link com.amazon.redshift.ssl.LibPQFactory}
*/
@Deprecated
public class LibPQFactory extends com.amazon.redshift.ssl.LibPQFactory implements HostnameVerifier {
private final SslMode sslMode;
/**
* @param info the connection parameters The following parameters are used:
* sslmode,sslcert,sslkey,sslrootcert,sslhostnameverifier,sslpasswordcallback,sslpassword
* @throws RedshiftException if security error appears when initializing factory
* @deprecated prefer {@link com.amazon.redshift.ssl.LibPQFactory}
*/
@Deprecated
public LibPQFactory(Properties info) throws RedshiftException {
super(info);
sslMode = SslMode.of(info);
}
/**
* Verifies if given hostname matches pattern.
*
* @deprecated use {@link RedshiftjdbcHostnameVerifier}
* @param hostname input hostname
* @param pattern domain name pattern
* @return true when domain matches pattern
*/
@Deprecated
public static boolean verifyHostName(String hostname, String pattern) {
String canonicalHostname;
if (hostname.startsWith("[") && hostname.endsWith("]")) {
// IPv6 address like [2001:db8:0:1:1:1:1:1]
canonicalHostname = hostname.substring(1, hostname.length() - 1);
} else {
// This converts unicode domain name to ASCII
try {
canonicalHostname = IDN.toASCII(hostname);
} catch (IllegalArgumentException e) {
// e.g. hostname is invalid
return false;
}
}
return RedshiftjdbcHostnameVerifier.INSTANCE.verifyHostName(canonicalHostname, pattern);
}
/**
* Verifies the server certificate according to the libpq rules. The cn attribute of the
* certificate is matched against the hostname. If the cn attribute starts with an asterisk (*),
* it will be treated as a wildcard, and will match all characters except a dot (.). This means
* the certificate will not match subdomains. If the connection is made using an IP address
* instead of a hostname, the IP address will be matched (without doing any DNS lookups).
*
* @deprecated use PgjdbcHostnameVerifier
* @param hostname Hostname or IP address of the server.
* @param session The SSL session.
* @return true if the certificate belongs to the server, false otherwise.
* @see RedshiftjdbcHostnameVerifier
*/
@Deprecated
public boolean verify(String hostname, SSLSession session) {
if (!sslMode.verifyPeerName()) {
return true;
}
return RedshiftjdbcHostnameVerifier.INSTANCE.verify(hostname, session);
}
}
| 8,311 |
0 | Create_ds/amazon-redshift-jdbc-driver/src/main/java/com/amazon/redshift | Create_ds/amazon-redshift-jdbc-driver/src/main/java/com/amazon/redshift/hostchooser/HostStatus.java | /*
* Copyright (c) 2014, PostgreSQL Global Development Group
* See the LICENSE file in the project root for more information.
*/
package com.amazon.redshift.hostchooser;
/**
* Known state of a server.
*/
public enum HostStatus {
ConnectFail,
ConnectOK,
Primary,
Secondary
}
| 8,312 |
0 | Create_ds/amazon-redshift-jdbc-driver/src/main/java/com/amazon/redshift | Create_ds/amazon-redshift-jdbc-driver/src/main/java/com/amazon/redshift/hostchooser/HostChooser.java | /*
* Copyright (c) 2014, PostgreSQL Global Development Group
* See the LICENSE file in the project root for more information.
*/
package com.amazon.redshift.hostchooser;
import java.util.Iterator;
/**
* Lists connections in preferred order.
*/
public interface HostChooser extends Iterable<CandidateHost> {
/**
* Lists connection hosts in preferred order.
*
* @return connection hosts in preferred order.
*/
@Override
Iterator<CandidateHost> iterator();
}
| 8,313 |
0 | Create_ds/amazon-redshift-jdbc-driver/src/main/java/com/amazon/redshift | Create_ds/amazon-redshift-jdbc-driver/src/main/java/com/amazon/redshift/hostchooser/HostRequirement.java | /*
* Copyright (c) 2014, PostgreSQL Global Development Group
* See the LICENSE file in the project root for more information.
*/
package com.amazon.redshift.hostchooser;
/**
* Describes the required server type.
*/
public enum HostRequirement {
any {
public boolean allowConnectingTo(HostStatus status) {
return status != HostStatus.ConnectFail;
}
},
/**
* @deprecated we no longer use the terms master or slave in the driver, or the Redshift
* project.
*/
@Deprecated
master {
public boolean allowConnectingTo(HostStatus status) {
return primary.allowConnectingTo(status);
}
},
primary {
public boolean allowConnectingTo(HostStatus status) {
return status == HostStatus.Primary || status == HostStatus.ConnectOK;
}
},
secondary {
public boolean allowConnectingTo(HostStatus status) {
return status == HostStatus.Secondary || status == HostStatus.ConnectOK;
}
},
preferSecondary {
public boolean allowConnectingTo(HostStatus status) {
return status != HostStatus.ConnectFail;
}
};
public abstract boolean allowConnectingTo(HostStatus status);
/**
* <p>The Redshift project has decided not to use the term slave to refer to alternate servers.
* secondary or standby is preferred. We have arbitrarily chosen secondary.
* As of Jan 2018 in order not to break existing code we are going to accept both slave or
* secondary for names of alternate servers.</p>
*
* <p>The current policy is to keep accepting this silently but not document slave, or slave preferSlave</p>
*
* <p>As of Jul 2018 silently deprecate the use of the word master as well</p>
*
* @param targetServerType the value of {@code targetServerType} connection property
* @return HostRequirement
*/
public static HostRequirement getTargetServerType(String targetServerType) {
String allowSlave = targetServerType.replace("lave", "econdary").replace("master", "primary");
return valueOf(allowSlave);
}
}
| 8,314 |
0 | Create_ds/amazon-redshift-jdbc-driver/src/main/java/com/amazon/redshift | Create_ds/amazon-redshift-jdbc-driver/src/main/java/com/amazon/redshift/hostchooser/MultiHostChooser.java | /*
* Copyright (c) 2014, PostgreSQL Global Development Group
* See the LICENSE file in the project root for more information.
*/
package com.amazon.redshift.hostchooser;
import static java.util.Collections.shuffle;
import com.amazon.redshift.RedshiftProperty;
import com.amazon.redshift.util.HostSpec;
import com.amazon.redshift.util.RedshiftException;
import java.util.AbstractList;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Collections;
import java.util.Iterator;
import java.util.List;
import java.util.Properties;
/**
* HostChooser that keeps track of known host statuses.
*/
class MultiHostChooser implements HostChooser {
private HostSpec[] hostSpecs;
private final HostRequirement targetServerType;
private int hostRecheckTime;
private boolean loadBalance;
MultiHostChooser(HostSpec[] hostSpecs, HostRequirement targetServerType,
Properties info) {
this.hostSpecs = hostSpecs;
this.targetServerType = targetServerType;
try {
hostRecheckTime = RedshiftProperty.HOST_RECHECK_SECONDS.getInt(info) * 1000;
loadBalance = RedshiftProperty.LOAD_BALANCE_HOSTS.getBoolean(info);
} catch (RedshiftException e) {
throw new RuntimeException(e);
}
}
@Override
public Iterator<CandidateHost> iterator() {
Iterator<CandidateHost> res = candidateIterator();
if (!res.hasNext()) {
// In case all the candidate hosts are unavailable or do not match, try all the hosts just in case
List<HostSpec> allHosts = Arrays.asList(hostSpecs);
if (loadBalance) {
allHosts = new ArrayList<HostSpec>(allHosts);
Collections.shuffle(allHosts);
}
res = withReqStatus(targetServerType, allHosts).iterator();
}
return res;
}
private Iterator<CandidateHost> candidateIterator() {
if (targetServerType != HostRequirement.preferSecondary) {
return getCandidateHosts(targetServerType).iterator();
}
// preferSecondary tries to find secondary hosts first
// Note: sort does not work here since there are "unknown" hosts,
// and that "unknown" might turn out to be master, so we should discard that
// if other secondaries exist
List<CandidateHost> secondaries = getCandidateHosts(HostRequirement.secondary);
List<CandidateHost> any = getCandidateHosts(HostRequirement.any);
if (secondaries.isEmpty()) {
return any.iterator();
}
if (any.isEmpty()) {
return secondaries.iterator();
}
if (secondaries.get(secondaries.size() - 1).equals(any.get(0))) {
// When the last secondary's hostspec is the same as the first in "any" list, there's no need
// to attempt to connect it as "secondary"
// Note: this is only an optimization
secondaries = rtrim(1, secondaries);
}
return append(secondaries, any).iterator();
}
private List<CandidateHost> getCandidateHosts(HostRequirement hostRequirement) {
List<HostSpec> candidates =
GlobalHostStatusTracker.getCandidateHosts(hostSpecs, hostRequirement, hostRecheckTime);
if (loadBalance) {
shuffle(candidates);
}
return withReqStatus(hostRequirement, candidates);
}
private List<CandidateHost> withReqStatus(final HostRequirement requirement, final List<HostSpec> hosts) {
return new AbstractList<CandidateHost>() {
@Override
public CandidateHost get(int index) {
return new CandidateHost(hosts.get(index), requirement);
}
@Override
public int size() {
return hosts.size();
}
};
}
private <T> List<T> append(final List<T> a, final List<T> b) {
return new AbstractList<T>() {
@Override
public T get(int index) {
return index < a.size() ? a.get(index) : b.get(index - a.size());
}
@Override
public int size() {
return a.size() + b.size();
}
};
}
private <T> List<T> rtrim(final int size, final List<T> a) {
return new AbstractList<T>() {
@Override
public T get(int index) {
return a.get(index);
}
@Override
public int size() {
return Math.max(0, a.size() - size);
}
};
}
}
| 8,315 |
0 | Create_ds/amazon-redshift-jdbc-driver/src/main/java/com/amazon/redshift | Create_ds/amazon-redshift-jdbc-driver/src/main/java/com/amazon/redshift/hostchooser/HostChooserFactory.java | /*
* Copyright (c) 2014, PostgreSQL Global Development Group
* See the LICENSE file in the project root for more information.
*/
package com.amazon.redshift.hostchooser;
import com.amazon.redshift.util.HostSpec;
import java.util.Properties;
/**
* Chooses a {@link HostChooser} instance based on the number of hosts and properties.
*/
public class HostChooserFactory {
public static HostChooser createHostChooser(HostSpec[] hostSpecs,
HostRequirement targetServerType, Properties info) {
if (hostSpecs.length == 1) {
return new SingleHostChooser(hostSpecs[0], targetServerType);
}
return new MultiHostChooser(hostSpecs, targetServerType, info);
}
}
| 8,316 |
0 | Create_ds/amazon-redshift-jdbc-driver/src/main/java/com/amazon/redshift | Create_ds/amazon-redshift-jdbc-driver/src/main/java/com/amazon/redshift/hostchooser/GlobalHostStatusTracker.java | /*
* Copyright (c) 2014, PostgreSQL Global Development Group
* See the LICENSE file in the project root for more information.
*/
package com.amazon.redshift.hostchooser;
import com.amazon.redshift.util.HostSpec;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
/**
* Keeps track of HostSpec targets in a global map.
*/
public class GlobalHostStatusTracker {
private static final Map<HostSpec, HostSpecStatus> hostStatusMap =
new HashMap<HostSpec, HostSpecStatus>();
/**
* Store the actual observed host status.
*
* @param hostSpec The host whose status is known.
* @param hostStatus Latest known status for the host.
*/
public static void reportHostStatus(HostSpec hostSpec, HostStatus hostStatus) {
long now = System.nanoTime() / 1000000;
synchronized (hostStatusMap) {
HostSpecStatus hostSpecStatus = hostStatusMap.get(hostSpec);
if (hostSpecStatus == null) {
hostSpecStatus = new HostSpecStatus(hostSpec);
hostStatusMap.put(hostSpec, hostSpecStatus);
}
hostSpecStatus.status = hostStatus;
hostSpecStatus.lastUpdated = now;
}
}
/**
* Returns a list of candidate hosts that have the required targetServerType.
*
* @param hostSpecs The potential list of hosts.
* @param targetServerType The required target server type.
* @param hostRecheckMillis How stale information is allowed.
* @return candidate hosts to connect to.
*/
static List<HostSpec> getCandidateHosts(HostSpec[] hostSpecs,
HostRequirement targetServerType, long hostRecheckMillis) {
List<HostSpec> candidates = new ArrayList<HostSpec>(hostSpecs.length);
long latestAllowedUpdate = System.nanoTime() / 1000000 - hostRecheckMillis;
synchronized (hostStatusMap) {
for (HostSpec hostSpec : hostSpecs) {
HostSpecStatus hostInfo = hostStatusMap.get(hostSpec);
// candidates are nodes we do not know about and the nodes with correct type
if (hostInfo == null
|| hostInfo.lastUpdated < latestAllowedUpdate
|| targetServerType.allowConnectingTo(hostInfo.status)) {
candidates.add(hostSpec);
}
}
}
return candidates;
}
static class HostSpecStatus {
final HostSpec host;
HostStatus status;
long lastUpdated;
HostSpecStatus(HostSpec host) {
this.host = host;
}
@Override
public String toString() {
return host.toString() + '=' + status;
}
}
}
| 8,317 |
0 | Create_ds/amazon-redshift-jdbc-driver/src/main/java/com/amazon/redshift | Create_ds/amazon-redshift-jdbc-driver/src/main/java/com/amazon/redshift/hostchooser/CandidateHost.java | /*
* Copyright (c) 2017, PostgreSQL Global Development Group
* See the LICENSE file in the project root for more information.
*/
package com.amazon.redshift.hostchooser;
import com.amazon.redshift.util.HostSpec;
/**
* Candidate host to be connected.
*/
public class CandidateHost {
public final HostSpec hostSpec;
public final HostRequirement targetServerType;
public CandidateHost(HostSpec hostSpec, HostRequirement targetServerType) {
this.hostSpec = hostSpec;
this.targetServerType = targetServerType;
}
}
| 8,318 |
0 | Create_ds/amazon-redshift-jdbc-driver/src/main/java/com/amazon/redshift | Create_ds/amazon-redshift-jdbc-driver/src/main/java/com/amazon/redshift/hostchooser/SingleHostChooser.java | /*
* Copyright (c) 2014, PostgreSQL Global Development Group
* See the LICENSE file in the project root for more information.
*/
package com.amazon.redshift.hostchooser;
import com.amazon.redshift.util.HostSpec;
import java.util.Collection;
import java.util.Collections;
import java.util.Iterator;
/**
* Host chooser that returns the single host.
*/
class SingleHostChooser implements HostChooser {
private final Collection<CandidateHost> candidateHost;
SingleHostChooser(HostSpec hostSpec, HostRequirement targetServerType) {
this.candidateHost = Collections.singletonList(new CandidateHost(hostSpec, targetServerType));
}
@Override
public Iterator<CandidateHost> iterator() {
return candidateHost.iterator();
}
}
| 8,319 |
0 | Create_ds/amazon-redshift-jdbc-driver/src/main/java/com/amazon/redshift/httpclient | Create_ds/amazon-redshift-jdbc-driver/src/main/java/com/amazon/redshift/httpclient/log/IamCustomLogFactory.java | package com.amazon.redshift.httpclient.log;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogConfigurationException;
import org.apache.commons.logging.impl.LogFactoryImpl;
import org.apache.commons.logging.impl.NoOpLog;
/**
* This class provides an implementation of LogFactoryImpl that will prevent any http wire logging.
* This was requested as a security measure to prevent possible interception of user names and
* passwords when connecting with IAM.
*/
public class IamCustomLogFactory extends LogFactoryImpl
{
/**
* The class to block logging for.
*/
private static String BANNED = "org.apache.http.wire";
/**
* Get the Log indicated by the class name. If trying to get wire logs, block by returning
* new NoOpLog instance.
*
* @param clazz The log class to return.
*/
@Override
public Log getInstance(Class clazz) throws LogConfigurationException
{
if (clazz.getName().equals(BANNED))
{
return new NoOpLog();
}
else
{
return super.getInstance(clazz);
}
}
/**
* Get the Log indicated by the name. If trying to get wire logs, block by returning
* new NoOpLog instance.
*
* @param name The name of the log class to return.
*/
@Override
public Log getInstance(String name) throws LogConfigurationException
{
if (name.equals(BANNED))
{
return new NoOpLog();
}
else
{
return super.getInstance(name);
}
}
}
| 8,320 |
0 | Create_ds/amazon-redshift-jdbc-driver/src/main/java/com/amazon/redshift | Create_ds/amazon-redshift-jdbc-driver/src/main/java/com/amazon/redshift/gss/GssAction.java | /*
* Copyright (c) 2008, PostgreSQL Global Development Group
* See the LICENSE file in the project root for more information.
*/
package com.amazon.redshift.gss;
import com.amazon.redshift.core.RedshiftStream;
import com.amazon.redshift.logger.LogLevel;
import com.amazon.redshift.logger.RedshiftLogger;
import com.amazon.redshift.util.GT;
import com.amazon.redshift.util.RedshiftException;
import com.amazon.redshift.util.RedshiftState;
import com.amazon.redshift.util.ServerErrorMessage;
import org.ietf.jgss.GSSContext;
import org.ietf.jgss.GSSCredential;
import org.ietf.jgss.GSSException;
import org.ietf.jgss.GSSManager;
import org.ietf.jgss.GSSName;
import org.ietf.jgss.Oid;
import java.io.IOException;
import java.security.PrivilegedAction;
class GssAction implements PrivilegedAction<Exception> {
private RedshiftLogger logger;
private final RedshiftStream pgStream;
private final String host;
private final String user;
private final String kerberosServerName;
private final boolean useSpnego;
private final GSSCredential clientCredentials;
private final boolean logServerErrorDetail;
GssAction(RedshiftStream pgStream, GSSCredential clientCredentials, String host, String user,
String kerberosServerName, boolean useSpnego, boolean logServerErrorDetail,
RedshiftLogger logger) {
this.logger = logger;
this.pgStream = pgStream;
this.clientCredentials = clientCredentials;
this.host = host;
this.user = user;
this.kerberosServerName = kerberosServerName;
this.useSpnego = useSpnego;
this.logServerErrorDetail = logServerErrorDetail;
}
private static boolean hasSpnegoSupport(GSSManager manager) throws GSSException {
org.ietf.jgss.Oid spnego = new org.ietf.jgss.Oid("1.3.6.1.5.5.2");
org.ietf.jgss.Oid[] mechs = manager.getMechs();
for (Oid mech : mechs) {
if (mech.equals(spnego)) {
return true;
}
}
return false;
}
@Override
public Exception run() {
try {
GSSManager manager = GSSManager.getInstance();
GSSCredential clientCreds = null;
Oid[] desiredMechs = new Oid[1];
if (clientCredentials == null) {
if (useSpnego && hasSpnegoSupport(manager)) {
desiredMechs[0] = new Oid("1.3.6.1.5.5.2");
} else {
desiredMechs[0] = new Oid("1.2.840.113554.1.2.2");
}
GSSName clientName = manager.createName(user, GSSName.NT_USER_NAME);
clientCreds = manager.createCredential(clientName, 8 * 3600, desiredMechs,
GSSCredential.INITIATE_ONLY);
} else {
desiredMechs[0] = new Oid("1.2.840.113554.1.2.2");
clientCreds = clientCredentials;
}
GSSName serverName =
manager.createName(kerberosServerName + "@" + host, GSSName.NT_HOSTBASED_SERVICE);
GSSContext secContext = manager.createContext(serverName, desiredMechs[0], clientCreds,
GSSContext.DEFAULT_LIFETIME);
secContext.requestMutualAuth(true);
byte[] inToken = new byte[0];
byte[] outToken = null;
boolean established = false;
while (!established) {
outToken = secContext.initSecContext(inToken, 0, inToken.length);
if (outToken != null) {
if(RedshiftLogger.isEnable())
logger.log(LogLevel.DEBUG, " FE=> Password(GSS Authentication Token)");
pgStream.sendChar('p');
pgStream.sendInteger4(4 + outToken.length);
pgStream.send(outToken);
pgStream.flush();
}
if (!secContext.isEstablished()) {
int response = pgStream.receiveChar();
// Error
switch (response) {
case 'E':
int elen = pgStream.receiveInteger4();
ServerErrorMessage errorMsg
= new ServerErrorMessage(pgStream.receiveErrorString(elen - 4));
if(RedshiftLogger.isEnable())
logger.log(LogLevel.DEBUG, " <=BE ErrorMessage({0})", errorMsg);
return new RedshiftException(errorMsg, logServerErrorDetail);
case 'R':
if(RedshiftLogger.isEnable())
logger.log(LogLevel.DEBUG, " <=BE AuthenticationGSSContinue");
int len = pgStream.receiveInteger4();
int type = pgStream.receiveInteger4();
// should check type = 8
inToken = pgStream.receive(len - 8);
break;
default:
// Unknown/unexpected message type.
return new RedshiftException(GT.tr("Protocol error. Session setup failed."),
RedshiftState.CONNECTION_UNABLE_TO_CONNECT);
}
} else {
established = true;
}
}
} catch (IOException e) {
return e;
} catch (GSSException gsse) {
return new RedshiftException(GT.tr("GSS Authentication failed"), RedshiftState.CONNECTION_FAILURE,
gsse);
}
return null;
}
}
| 8,321 |
0 | Create_ds/amazon-redshift-jdbc-driver/src/main/java/com/amazon/redshift | Create_ds/amazon-redshift-jdbc-driver/src/main/java/com/amazon/redshift/gss/GSSCallbackHandler.java | /*
* Copyright (c) 2008, PostgreSQL Global Development Group
* See the LICENSE file in the project root for more information.
*/
package com.amazon.redshift.gss;
import java.io.IOException;
import javax.security.auth.callback.Callback;
import javax.security.auth.callback.CallbackHandler;
import javax.security.auth.callback.NameCallback;
import javax.security.auth.callback.PasswordCallback;
import javax.security.auth.callback.TextOutputCallback;
import javax.security.auth.callback.UnsupportedCallbackException;
public class GSSCallbackHandler implements CallbackHandler {
private final String user;
private final String password;
public GSSCallbackHandler(String user, String password) {
this.user = user;
this.password = password;
}
public void handle(Callback[] callbacks) throws IOException, UnsupportedCallbackException {
for (Callback callback : callbacks) {
if (callback instanceof TextOutputCallback) {
TextOutputCallback toc = (TextOutputCallback) callback;
switch (toc.getMessageType()) {
case TextOutputCallback.INFORMATION:
System.out.println("INFO: " + toc.getMessage());
break;
case TextOutputCallback.ERROR:
System.out.println("ERROR: " + toc.getMessage());
break;
case TextOutputCallback.WARNING:
System.out.println("WARNING: " + toc.getMessage());
break;
default:
throw new IOException("Unsupported message type: " + toc.getMessageType());
}
} else if (callback instanceof NameCallback) {
NameCallback nc = (NameCallback) callback;
nc.setName(user);
} else if (callback instanceof PasswordCallback) {
PasswordCallback pc = (PasswordCallback) callback;
if (password == null) {
throw new IOException("No cached kerberos ticket found and no password supplied.");
}
pc.setPassword(password.toCharArray());
} else {
throw new UnsupportedCallbackException(callback, "Unrecognized Callback");
}
}
}
}
| 8,322 |
0 | Create_ds/amazon-redshift-jdbc-driver/src/main/java/com/amazon/redshift | Create_ds/amazon-redshift-jdbc-driver/src/main/java/com/amazon/redshift/gss/MakeGSS.java | /*
* Copyright (c) 2008, PostgreSQL Global Development Group
* See the LICENSE file in the project root for more information.
*/
package com.amazon.redshift.gss;
import com.amazon.redshift.core.RedshiftStream;
import com.amazon.redshift.logger.LogLevel;
import com.amazon.redshift.logger.RedshiftLogger;
import com.amazon.redshift.util.GT;
import com.amazon.redshift.util.RedshiftException;
import com.amazon.redshift.util.RedshiftState;
import org.ietf.jgss.GSSCredential;
import java.io.IOException;
import java.security.AccessController;
import java.security.PrivilegedAction;
import java.sql.SQLException;
import java.util.Set;
import javax.security.auth.Subject;
import javax.security.auth.login.LoginContext;
public class MakeGSS {
public static void authenticate(RedshiftStream pgStream, String host, String user, String password,
String jaasApplicationName, String kerberosServerName, boolean useSpnego, boolean jaasLogin,
boolean logServerErrorDetail, RedshiftLogger logger)
throws IOException, SQLException {
if(RedshiftLogger.isEnable())
logger.log(LogLevel.DEBUG, " <=BE AuthenticationReqGSS");
if (jaasApplicationName == null) {
jaasApplicationName = "rsjdbc";
}
if (kerberosServerName == null) {
kerberosServerName = "postgres";
}
Exception result;
try {
boolean performAuthentication = jaasLogin;
GSSCredential gssCredential = null;
Subject sub = Subject.getSubject(AccessController.getContext());
if (sub != null) {
Set<GSSCredential> gssCreds = sub.getPrivateCredentials(GSSCredential.class);
if (gssCreds != null && !gssCreds.isEmpty()) {
gssCredential = gssCreds.iterator().next();
performAuthentication = false;
}
}
if (performAuthentication) {
LoginContext lc =
new LoginContext(jaasApplicationName, new GSSCallbackHandler(user, password));
lc.login();
sub = lc.getSubject();
}
PrivilegedAction<Exception> action = new GssAction(pgStream, gssCredential, host, user,
kerberosServerName, useSpnego, logServerErrorDetail, logger);
result = Subject.doAs(sub, action);
} catch (Exception e) {
throw new RedshiftException(GT.tr("GSS Authentication failed"), RedshiftState.CONNECTION_FAILURE, e);
}
if (result instanceof IOException) {
throw (IOException) result;
} else if (result instanceof SQLException) {
throw (SQLException) result;
} else if (result != null) {
throw new RedshiftException(GT.tr("GSS Authentication failed"), RedshiftState.CONNECTION_FAILURE,
result);
}
}
}
| 8,323 |
0 | Create_ds/amazon-redshift-jdbc-driver/src/main/java/com/amazon/redshift | Create_ds/amazon-redshift-jdbc-driver/src/main/java/com/amazon/redshift/core/ServerVersion.java | /*
* Copyright (c) 2003, PostgreSQL Global Development Group
* See the LICENSE file in the project root for more information.
*/
package com.amazon.redshift.core;
import java.text.NumberFormat;
import java.text.ParsePosition;
/**
* Enumeration for Redshift versions.
*/
public enum ServerVersion implements Version {
INVALID("0.0.0"),
v8_0("8.0.2"), // Redshift server version
v8_2("8.2.0"),
v8_3("8.3.0"),
v8_4("8.4.0"),
v9_0("9.0.0"),
v9_1("9.1.0"),
v9_2("9.2.0"),
v9_3("9.3.0"),
v9_4("9.4.0"),
v9_5("9.5.0"),
v9_6("9.6.0"),
v10("10"),
v11("11"),
v12("12")
;
private final int version;
ServerVersion(String version) {
this.version = parseServerVersionStr(version);
}
/**
* Get a machine-readable version number.
*
* @return the version in numeric XXYYZZ form, e.g. 90401 for 9.4.1
*/
@Override
public int getVersionNum() {
return version;
}
/**
* <p>Attempt to parse the server version string into an XXYYZZ form version number into a
* {@link Version}.</p>
*
* <p>If the specified version cannot be parsed, the {@link Version#getVersionNum()} will return 0.</p>
*
* @param version version in numeric XXYYZZ form, e.g. "090401" for 9.4.1
* @return a {@link Version} representing the specified version string.
*/
public static Version from(String version) {
final int versionNum = parseServerVersionStr(version);
return new Version() {
@Override
public int getVersionNum() {
return versionNum;
}
@Override
public boolean equals(Object obj) {
if (obj instanceof Version) {
return this.getVersionNum() == ((Version) obj).getVersionNum();
}
return false;
}
@Override
public int hashCode() {
return getVersionNum();
}
@Override
public String toString() {
return Integer.toString(versionNum);
}
};
}
/**
* <p>Attempt to parse the server version string into an XXYYZZ form version number.</p>
*
* <p>Returns 0 if the version could not be parsed.</p>
*
* <p>Returns minor version 0 if the minor version could not be determined, e.g. devel or beta
* releases.</p>
*
* <p>If a single major part like 90400 is passed, it's assumed to be a pre-parsed version and
* returned verbatim. (Anything equal to or greater than 10000 is presumed to be this form).</p>
*
* <p>The yy or zz version parts may be larger than 99. A NumberFormatException is thrown if a
* version part is out of range.</p>
*
* @param serverVersion server vertion in a XXYYZZ form
* @return server version in number form
*/
static int parseServerVersionStr(String serverVersion) throws NumberFormatException {
NumberFormat numformat = NumberFormat.getIntegerInstance();
numformat.setGroupingUsed(false);
ParsePosition parsepos = new ParsePosition(0);
if (serverVersion == null) {
return 0;
}
int[] parts = new int[3];
int versionParts;
for (versionParts = 0; versionParts < 3; versionParts++) {
Number part = (Number) numformat.parseObject(serverVersion, parsepos);
if (part == null) {
break;
}
parts[versionParts] = part.intValue();
if (parsepos.getIndex() == serverVersion.length()
|| serverVersion.charAt(parsepos.getIndex()) != '.') {
break;
}
// Skip .
parsepos.setIndex(parsepos.getIndex() + 1);
}
versionParts++;
if (parts[0] >= 10000) {
/*
* Redshift version 1000? I don't think so. We're seeing a version like 90401; return it
* verbatim, but only if there's nothing else in the version. If there is, treat it as a parse
* error.
*/
if (parsepos.getIndex() == serverVersion.length() && versionParts == 1) {
return parts[0];
} else {
throw new NumberFormatException(
"First major-version part equal to or greater than 10000 in invalid version string: "
+ serverVersion);
}
}
/* #667 - Allow for versions with greater than 3 parts.
For versions with more than 3 parts, still return 3 parts (4th part ignored for now
as no functionality is dependent on the 4th part .
Allows for future versions of the server to utilize more than 3 part version numbers
without upgrading the jdbc driver */
if (versionParts >= 3) {
if (parts[1] > 99) {
throw new NumberFormatException(
"Unsupported second part of major version > 99 in invalid version string: "
+ serverVersion);
}
if (parts[2] > 99) {
throw new NumberFormatException(
"Unsupported second part of minor version > 99 in invalid version string: "
+ serverVersion);
}
return (parts[0] * 100 + parts[1]) * 100 + parts[2];
}
if (versionParts == 2) {
if (parts[0] >= 10) {
return parts[0] * 100 * 100 + parts[1];
}
if (parts[1] > 99) {
throw new NumberFormatException(
"Unsupported second part of major version > 99 in invalid version string: "
+ serverVersion);
}
return (parts[0] * 100 + parts[1]) * 100;
}
if (versionParts == 1) {
if (parts[0] >= 10) {
return parts[0] * 100 * 100;
}
}
return 0; /* unknown */
}
}
| 8,324 |
0 | Create_ds/amazon-redshift-jdbc-driver/src/main/java/com/amazon/redshift | Create_ds/amazon-redshift-jdbc-driver/src/main/java/com/amazon/redshift/core/ResultCursor.java | /*
* Copyright (c) 2004, PostgreSQL Global Development Group
* See the LICENSE file in the project root for more information.
*/
// Copyright (c) 2004, Open Cloud Limited.
package com.amazon.redshift.core;
/**
* Abstraction of a cursor over a returned resultset. This is an opaque interface that only provides
* a way to close the cursor; all other operations are done by passing a ResultCursor to
* QueryExecutor methods.
*
* @author Oliver Jowett (oliver@opencloud.com)
*/
public interface ResultCursor {
/**
* Close this cursor. This may not immediately free underlying resources but may make it happen
* more promptly. Closed cursors should not be passed to QueryExecutor methods.
*/
void close();
}
| 8,325 |
0 | Create_ds/amazon-redshift-jdbc-driver/src/main/java/com/amazon/redshift | Create_ds/amazon-redshift-jdbc-driver/src/main/java/com/amazon/redshift/core/VisibleBufferedInputStream.java | /*
* Copyright (c) 2006, PostgreSQL Global Development Group
* See the LICENSE file in the project root for more information.
*/
package com.amazon.redshift.core;
import java.io.EOFException;
import java.io.IOException;
import java.io.InputStream;
import java.net.SocketTimeoutException;
/**
* A faster version of BufferedInputStream. Does no synchronisation and allows direct access to the
* used byte[] buffer.
*
* @author Mikko Tiihonen
*/
public class VisibleBufferedInputStream extends InputStream {
/**
* If a direct read to byte array is called that would require a smaller read from the wrapped
* stream that MINIMUM_READ then first fill the buffer and serve the bytes from there. Larger
* reads are directly done to the provided byte array.
*/
private static final int MINIMUM_READ = 1024;
/**
* In how large spans is the C string zero-byte scanned.
*/
private static final int STRING_SCAN_SPAN = 1024;
/**
* The wrapped input stream.
*/
private final InputStream wrapped;
/**
* The buffer.
*/
private byte[] buffer;
/**
* Current read position in the buffer.
*/
private int index;
/**
* How far is the buffer filled with valid data.
*/
private int endIndex;
/**
* socket timeout has been requested
*/
private boolean timeoutRequested = false;
/**
* number of bytes read from stream
*/
private long bytesReadFromStream = 0;
/**
* Creates a new buffer around the given stream.
*
* @param in The stream to buffer.
* @param bufferSize The initial size of the buffer.
*/
public VisibleBufferedInputStream(InputStream in, int bufferSize) {
wrapped = in;
buffer = new byte[bufferSize < MINIMUM_READ ? MINIMUM_READ : bufferSize];
}
/**
* {@inheritDoc}
*/
public int read() throws IOException {
if (ensureBytes(1)) {
return buffer[index++] & 0xFF;
}
return -1;
}
/**
* Reads a byte from the buffer without advancing the index pointer.
*
* @return byte from the buffer without advancing the index pointer
* @throws IOException if something wrong happens
*/
public int peek() throws IOException {
if (ensureBytes(1)) {
return buffer[index] & 0xFF;
}
return -1;
}
/**
* Reads byte from the buffer without any checks. This method never reads from the underlaying
* stream. Before calling this method the {@link #ensureBytes} method must have been called.
*
* @return The next byte from the buffer.
* @throws ArrayIndexOutOfBoundsException If ensureBytes was not called to make sure the buffer
* contains the byte.
*/
public byte readRaw() {
return buffer[index++];
}
/**
* Ensures that the buffer contains at least n bytes. This method invalidates the buffer and index
* fields.
*
* @param n The amount of bytes to ensure exists in buffer
* @return true if required bytes are available and false if EOF
* @throws IOException If reading of the wrapped stream failed.
*/
public boolean ensureBytes(int n) throws IOException {
return ensureBytes(n, true);
}
/**
* Ensures that the buffer contains at least n bytes. This method invalidates the buffer and index
* fields.
*
* @param n The amount of bytes to ensure exists in buffer
* @param block whether or not to block the IO
* @return true if required bytes are available and false if EOF or the parameter block was false and socket timeout occurred.
* @throws IOException If reading of the wrapped stream failed.
*/
public boolean ensureBytes(int n, boolean block) throws IOException {
int required = n - endIndex + index;
while (required > 0) {
if (!readMore(required, block)) {
return false;
}
required = n - endIndex + index;
}
return true;
}
/**
* Reads more bytes into the buffer.
*
* @param wanted How much should be at least read.
* @return True if at least some bytes were read.
* @throws IOException If reading of the wrapped stream failed.
*/
private boolean readMore(int wanted, boolean block) throws IOException {
if (endIndex == index) {
index = 0;
endIndex = 0;
}
int canFit = buffer.length - endIndex;
if (canFit < wanted) {
// would the wanted bytes fit if we compacted the buffer
// and still leave some slack
if (index + canFit > wanted + MINIMUM_READ) {
compact();
} else {
doubleBuffer();
}
canFit = buffer.length - endIndex;
}
int read = 0;
try {
read = wrapped.read(buffer, endIndex, canFit);
if(read > 0)
{
bytesReadFromStream += read;
}
if (!block && read == 0) {
return false;
}
} catch (SocketTimeoutException e) {
if (!block) {
return false;
}
if (timeoutRequested) {
throw e;
}
}
if (read < 0) {
return false;
}
endIndex += read;
return true;
}
/**
* Doubles the size of the buffer.
*/
private void doubleBuffer() {
byte[] buf = new byte[buffer.length * 2];
moveBufferTo(buf);
buffer = buf;
}
/**
* Compacts the unread bytes of the buffer to the beginning of the buffer.
*/
private void compact() {
moveBufferTo(buffer);
}
/**
* Moves bytes from the buffer to the beginning of the destination buffer. Also sets the index and
* endIndex variables.
*
* @param dest The destination buffer.
*/
private void moveBufferTo(byte[] dest) {
int size = endIndex - index;
System.arraycopy(buffer, index, dest, 0, size);
index = 0;
endIndex = size;
}
/**
* {@inheritDoc}
*/
public int read(byte[] to, int off, int len) throws IOException {
if ((off | len | (off + len) | (to.length - (off + len))) < 0) {
throw new IndexOutOfBoundsException();
} else if (len == 0) {
return 0;
}
// if the read would go to wrapped stream, but would result
// in a small read then try read to the buffer instead
int avail = endIndex - index;
if (len - avail < MINIMUM_READ) {
ensureBytes(len);
avail = endIndex - index;
}
// first copy from buffer
if (avail > 0) {
if (len <= avail) {
System.arraycopy(buffer, index, to, off, len);
index += len;
return len;
}
System.arraycopy(buffer, index, to, off, avail);
len -= avail;
off += avail;
}
int read = avail;
// good place to reset index because the buffer is fully drained
index = 0;
endIndex = 0;
// then directly from wrapped stream
do {
int r;
try {
r = wrapped.read(to, off, len);
if(r > 0)
{
bytesReadFromStream += r;
}
} catch (SocketTimeoutException e) {
if (read == 0 && timeoutRequested) {
throw e;
}
return read;
}
if (r <= 0) {
return (read == 0) ? r : read;
}
read += r;
off += r;
len -= r;
} while (len > 0);
return read;
}
/**
* {@inheritDoc}
*/
public long skip(long n) throws IOException {
int avail = endIndex - index;
if (avail >= n) {
index += n;
return n;
}
n -= avail;
index = 0;
endIndex = 0;
return avail + wrapped.skip(n);
}
/**
* {@inheritDoc}
*/
public int available() throws IOException {
int avail = endIndex - index;
return avail > 0 ? avail : wrapped.available();
}
/**
* {@inheritDoc}
*/
public void close() throws IOException {
wrapped.close();
}
/**
* Returns direct handle to the used buffer. Use the {@link #ensureBytes} to prefill required
* bytes the buffer and {@link #getIndex} to fetch the current position of the buffer.
*
* @return The underlaying buffer.
*/
public byte[] getBuffer() {
return buffer;
}
/**
* Returns the current read position in the buffer.
*
* @return the current read position in the buffer.
*/
public int getIndex() {
return index;
}
/**
* Returns the number of bytes read by the stream
*/
public long getBytesReadFromStream()
{
return bytesReadFromStream;
}
/**
* Scans the length of the next null terminated string (C-style string) from the stream.
*
* @return The length of the next null terminated string.
* @throws IOException If reading of stream fails.
* @throws EOFException If the stream did not contain any null terminators.
*/
public int scanCStringLength() throws IOException {
int pos = index;
while (true) {
while (pos < endIndex) {
if (buffer[pos++] == '\0') {
return pos - index;
}
}
if (!readMore(STRING_SCAN_SPAN, true)) {
throw new EOFException();
}
pos = index;
}
}
public void setTimeoutRequested(boolean timeoutRequested) {
this.timeoutRequested = timeoutRequested;
}
}
| 8,326 |
0 | Create_ds/amazon-redshift-jdbc-driver/src/main/java/com/amazon/redshift | Create_ds/amazon-redshift-jdbc-driver/src/main/java/com/amazon/redshift/core/Parser.java | /*
* Copyright (c) 2006, PostgreSQL Global Development Group
* See the LICENSE file in the project root for more information.
*/
package com.amazon.redshift.core;
import com.amazon.redshift.jdbc.EscapeSyntaxCallMode;
import com.amazon.redshift.jdbc.EscapedFunctions2;
import com.amazon.redshift.util.GT;
import com.amazon.redshift.util.RedshiftException;
import com.amazon.redshift.util.RedshiftState;
import java.lang.reflect.InvocationTargetException;
import java.lang.reflect.Method;
import java.sql.SQLException;
import java.util.ArrayList;
import java.util.Collections;
import java.util.HashSet;
import java.util.List;
import java.util.Set;
/**
* Basic query parser infrastructure.
* Note: This class should not be considered as pgjdbc public API.
*
* @author Michael Paesold (mpaesold@gmx.at)
* @author Christopher Deckers (chrriis@gmail.com)
*/
public class Parser {
private static final int[] NO_BINDS = new int[0];
/**
* Parses JDBC query into Redshift's native format. Several queries might be given if separated
* by semicolon.
*
* @param query jdbc query to parse
* @param standardConformingStrings whether to allow backslashes to be used as escape characters
* in single quote literals
* @param withParameters whether to replace ?, ? with $1, $2, etc
* @param splitStatements whether to split statements by semicolon
* @param isBatchedReWriteConfigured whether re-write optimization is enabled
* @param isMultiSqlSupport whether multiple SQL commands support is enabled
* @param returningColumnNames for simple insert, update, delete add returning with given column names
* @return list of native queries
* @throws SQLException if unable to add returning clause (invalid column names)
*/
public static List<NativeQuery> parseJdbcSql(String query, boolean standardConformingStrings,
boolean withParameters, boolean splitStatements,
boolean isBatchedReWriteConfigured,
boolean isMultiSqlSupport,
String... returningColumnNames) throws SQLException {
if (!withParameters && !splitStatements
&& returningColumnNames != null && returningColumnNames.length == 0) {
return Collections.singletonList(new NativeQuery(query,
SqlCommand.createStatementTypeInfo(SqlCommandType.BLANK)));
}
int fragmentStart = 0;
int inParen = 0;
char[] aChars = query.toCharArray();
StringBuilder nativeSql = new StringBuilder(query.length() + 10);
List<Integer> bindPositions = null; // initialized on demand
Set<String> redshiftBindNames = null; // initialized on demand
List<Integer> redshiftParamMarkers = null; // initialized on demand
List<NativeQuery> nativeQueries = null;
boolean isCurrentReWriteCompatible = false;
boolean isValuesFound = false;
int valuesBraceOpenPosition = -1;
int valuesBraceClosePosition = -1;
boolean valuesBraceCloseFound = false;
boolean isInsertPresent = false;
boolean isReturningPresent = false;
boolean isReturningPresentPrev = false;
SqlCommandType currentCommandType = SqlCommandType.BLANK;
SqlCommandType prevCommandType = SqlCommandType.BLANK;
int numberOfStatements = 0;
boolean whitespaceOnly = true;
int keyWordCount = 0;
int keywordStart = -1;
int keywordEnd = -1;
boolean jdbcParameterMarker = false;
boolean redshiftParameterMarker = false;
for (int i = 0; i < aChars.length; ++i) {
char aChar = aChars[i];
boolean isKeyWordChar = false;
// ';' is ignored as it splits the queries
whitespaceOnly &= aChar == ';' || Character.isWhitespace(aChar);
keywordEnd = i; // parseSingleQuotes, parseDoubleQuotes, etc move index so we keep old value
switch (aChar) {
case '\'': // single-quotes
i = Parser.parseSingleQuotes(aChars, i, standardConformingStrings);
break;
case '"': // double-quotes
i = Parser.parseDoubleQuotes(aChars, i);
break;
case '-': // possibly -- style comment
i = Parser.parseLineComment(aChars, i);
break;
case '/': // possibly /* */ style comment
i = Parser.parseBlockComment(aChars, i);
break;
case '$': // possibly dollar quote start
int savPos = i;
i = Parser.parseDollarQuotes(aChars, i);
// PREPARE SQL command has own way of replacing $ marker.
// Those are not JDBC Bind values but values pass during EXECUTE SQL call.
// Also check for whether it is part of an identifier or not.
if (savPos == i
&& withParameters
&& currentCommandType != SqlCommandType.PREPARE
&& keywordStart == -1) {
i = Parser.parseDollarParam(aChars, i);
if (i != savPos) {
if(jdbcParameterMarker) {
// Throw an exception, if application uses $ and ? both as parameter marker.
throw new RedshiftException(GT.tr("Redshift parameter marker and JDBC parameter marker in same SQL command is not allowed."),
RedshiftState.UNEXPECTED_ERROR);
}
redshiftParameterMarker = true;
// Get $ and all digits
String paramName = new String(aChars, savPos, i - savPos + 1);
nativeSql.append(aChars, fragmentStart, (i + 1) - fragmentStart);
fragmentStart = i + 1; // Point at after the last digit
// We found $n parameter marker
if (redshiftBindNames == null) {
redshiftBindNames = new HashSet<String>();
}
if (bindPositions == null) {
bindPositions = new ArrayList<Integer>();
}
if (redshiftParamMarkers == null) {
redshiftParamMarkers = new ArrayList<Integer>();
}
// is it unique?
if (!redshiftBindNames.contains(paramName)) {
redshiftBindNames.add(paramName);
int dollarSignPos = nativeSql.length() - (i - savPos) - 1;
bindPositions.add(dollarSignPos); // Point at $
redshiftParamMarkers.add(Integer.parseInt(paramName.substring(1)));
}
}
}
break;
// case '(' moved below to parse "values(" properly
case ')':
inParen--;
if (inParen == 0 && isValuesFound && !valuesBraceCloseFound) {
// If original statement is multi-values like VALUES (...), (...), ... then
// search for the latest closing paren
valuesBraceClosePosition = nativeSql.length() + i - fragmentStart;
}
break;
case '?':
nativeSql.append(aChars, fragmentStart, i - fragmentStart);
if (i + 1 < aChars.length && aChars[i + 1] == '?') /* replace ?? with ? */ {
nativeSql.append('?');
i++; // make sure the coming ? is not treated as a bind
} else {
if (!withParameters) {
nativeSql.append('?');
} else {
if(redshiftParameterMarker) {
// Throw an exception, if application uses $ and ? both as parameter marker.
throw new RedshiftException(GT.tr("Redshift parameter marker and JDBC parameter marker in same SQL command is not allowed."),
RedshiftState.UNEXPECTED_ERROR);
}
jdbcParameterMarker = true;
if (bindPositions == null) {
bindPositions = new ArrayList<Integer>();
}
bindPositions.add(nativeSql.length());
int bindIndex = bindPositions.size();
nativeSql.append(NativeQuery.bindName(bindIndex));
}
}
fragmentStart = i + 1;
break;
case ';':
if (inParen == 0) {
if (!whitespaceOnly) {
numberOfStatements++;
nativeSql.append(aChars, fragmentStart, i - fragmentStart);
nativeSql.append(';');
whitespaceOnly = true;
}
fragmentStart = i + 1;
if (nativeSql.length() > 0) {
if (addReturning(nativeSql, currentCommandType, returningColumnNames, isReturningPresent)) {
isReturningPresent = true;
}
if(!isMultiSqlSupport) {
// Throw an exception, if application doesn't need multiple SQL commands support.
throw new RedshiftException(GT.tr("Multiple SQL commands support is disabled."),
RedshiftState.UNEXPECTED_ERROR);
}
if (splitStatements) {
if (nativeQueries == null) {
nativeQueries = new ArrayList<NativeQuery>();
}
if (!isValuesFound || !isCurrentReWriteCompatible || valuesBraceClosePosition == -1
|| (bindPositions != null
&& valuesBraceClosePosition < bindPositions.get(bindPositions.size() - 1))) {
valuesBraceOpenPosition = -1;
valuesBraceClosePosition = -1;
}
nativeQueries.add(new NativeQuery(nativeSql.toString(),
toIntArray(bindPositions), false,
SqlCommand.createStatementTypeInfo(
currentCommandType, isBatchedReWriteConfigured, valuesBraceOpenPosition,
valuesBraceClosePosition,
isReturningPresent, nativeQueries.size()),
(redshiftParamMarkers != null) ? toIntArray(redshiftParamMarkers) : null));
}
}
prevCommandType = currentCommandType;
isReturningPresentPrev = isReturningPresent;
currentCommandType = SqlCommandType.BLANK;
isReturningPresent = false;
if (splitStatements) {
// Prepare for next query
if (bindPositions != null) {
bindPositions.clear();
}
nativeSql.setLength(0);
isValuesFound = false;
isCurrentReWriteCompatible = false;
valuesBraceOpenPosition = -1;
valuesBraceClosePosition = -1;
valuesBraceCloseFound = false;
}
}
break;
default:
if (keywordStart >= 0) {
// When we are inside a keyword, we need to detect keyword end boundary
// Note that isKeyWordChar is initialized to false before the switch, so
// all other characters would result in isKeyWordChar=false
isKeyWordChar = isIdentifierContChar(aChar);
break;
}
// Not in keyword, so just detect next keyword start
isKeyWordChar = isIdentifierStartChar(aChar);
if (isKeyWordChar) {
keywordStart = i;
if (valuesBraceOpenPosition != -1 && inParen == 0) {
// When the statement already has multi-values, stop looking for more of them
// Since values(?,?),(?,?),... should not contain keywords in the middle
valuesBraceCloseFound = true;
}
}
break;
}
if (keywordStart >= 0 && (i == aChars.length - 1 || !isKeyWordChar)) {
int wordLength = (isKeyWordChar ? i + 1 : keywordEnd) - keywordStart;
if (currentCommandType == SqlCommandType.BLANK) {
if (wordLength == 6 && parseUpdateKeyword(aChars, keywordStart)) {
currentCommandType = SqlCommandType.UPDATE;
} else if (wordLength == 6 && parseDeleteKeyword(aChars, keywordStart)) {
currentCommandType = SqlCommandType.DELETE;
} else if (wordLength == 4 && parseMoveKeyword(aChars, keywordStart)) {
currentCommandType = SqlCommandType.MOVE;
} else if (wordLength == 6 && parseSelectKeyword(aChars, keywordStart)) {
currentCommandType = SqlCommandType.SELECT;
} else if (wordLength == 4 && parseWithKeyword(aChars, keywordStart)) {
currentCommandType = SqlCommandType.WITH;
} else if (wordLength == 6 && parseInsertKeyword(aChars, keywordStart)) {
if (!isInsertPresent && (nativeQueries == null || nativeQueries.isEmpty())) {
// Only allow rewrite for insert command starting with the insert keyword.
// Else, too many risks of wrong interpretation.
isCurrentReWriteCompatible = keyWordCount == 0;
isInsertPresent = true;
currentCommandType = SqlCommandType.INSERT;
} else {
isCurrentReWriteCompatible = false;
}
}
else if (wordLength == 7 && parsePrepareKeyword(aChars, keywordStart)) {
currentCommandType = SqlCommandType.PREPARE;
}
} else if (currentCommandType == SqlCommandType.WITH
&& inParen == 0) {
SqlCommandType command = parseWithCommandType(aChars, i, keywordStart, wordLength);
if (command != null) {
currentCommandType = command;
}
}
if (inParen != 0 || aChar == ')') {
// RETURNING and VALUES cannot be present in braces
} else if (wordLength == 9 && parseReturningKeyword(aChars, keywordStart)) {
isReturningPresent = true;
} else if (wordLength == 6 && parseValuesKeyword(aChars, keywordStart)) {
isValuesFound = true;
}
keywordStart = -1;
keyWordCount++;
}
if (aChar == '(') {
inParen++;
if (inParen == 1 && isValuesFound && valuesBraceOpenPosition == -1) {
valuesBraceOpenPosition = nativeSql.length() + i - fragmentStart;
}
}
} // Loop for each char
if (!isValuesFound || !isCurrentReWriteCompatible || valuesBraceClosePosition == -1
|| (bindPositions != null
&& valuesBraceClosePosition < bindPositions.get(bindPositions.size() - 1))) {
valuesBraceOpenPosition = -1;
valuesBraceClosePosition = -1;
}
if (fragmentStart < aChars.length && !whitespaceOnly) {
nativeSql.append(aChars, fragmentStart, aChars.length - fragmentStart);
} else {
if (numberOfStatements > 1) {
isReturningPresent = false;
currentCommandType = SqlCommandType.BLANK;
} else if (numberOfStatements == 1) {
isReturningPresent = isReturningPresentPrev;
currentCommandType = prevCommandType;
}
}
if (nativeSql.length() == 0) {
return nativeQueries != null ? nativeQueries : Collections.<NativeQuery>emptyList();
}
if (addReturning(nativeSql, currentCommandType, returningColumnNames, isReturningPresent)) {
isReturningPresent = true;
}
NativeQuery lastQuery = new NativeQuery(nativeSql.toString(),
toIntArray(bindPositions), !splitStatements,
SqlCommand.createStatementTypeInfo(currentCommandType,
isBatchedReWriteConfigured, valuesBraceOpenPosition, valuesBraceClosePosition,
isReturningPresent, (nativeQueries == null ? 0 : nativeQueries.size())),
(redshiftParamMarkers != null) ? toIntArray(redshiftParamMarkers) : null);
if (nativeQueries == null) {
return Collections.singletonList(lastQuery);
}
if (!whitespaceOnly) {
nativeQueries.add(lastQuery);
}
return nativeQueries;
}
private static SqlCommandType parseWithCommandType(char[] aChars, int i, int keywordStart,
int wordLength) {
// This parses `with x as (...) ...`
// Corner case is `with select as (insert ..) select * from select
SqlCommandType command;
if (wordLength == 6 && parseUpdateKeyword(aChars, keywordStart)) {
command = SqlCommandType.UPDATE;
} else if (wordLength == 6 && parseDeleteKeyword(aChars, keywordStart)) {
command = SqlCommandType.DELETE;
} else if (wordLength == 6 && parseInsertKeyword(aChars, keywordStart)) {
command = SqlCommandType.INSERT;
} else if (wordLength == 6 && parseSelectKeyword(aChars, keywordStart)) {
command = SqlCommandType.SELECT;
} else {
return null;
}
// update/delete/insert/select keyword detected
// Check if `AS` follows
int nextInd = i;
// The loop should skip whitespace and comments
for (; nextInd < aChars.length; nextInd++) {
char nextChar = aChars[nextInd];
if (nextChar == '-') {
nextInd = Parser.parseLineComment(aChars, nextInd);
} else if (nextChar == '/') {
nextInd = Parser.parseBlockComment(aChars, nextInd);
} else if (Character.isWhitespace(nextChar)) {
// Skip whitespace
continue;
} else {
break;
}
}
if (nextInd + 2 >= aChars.length
|| (!parseAsKeyword(aChars, nextInd)
|| isIdentifierContChar(aChars[nextInd + 2]))) {
return command;
}
return null;
}
private static boolean addReturning(StringBuilder nativeSql, SqlCommandType currentCommandType,
String[] returningColumnNames, boolean isReturningPresent) throws SQLException {
if (isReturningPresent || returningColumnNames.length == 0) {
return false;
}
if (currentCommandType != SqlCommandType.INSERT
&& currentCommandType != SqlCommandType.UPDATE
&& currentCommandType != SqlCommandType.DELETE
&& currentCommandType != SqlCommandType.WITH) {
return false;
}
nativeSql.append("\nRETURNING ");
if (returningColumnNames.length == 1 && returningColumnNames[0].charAt(0) == '*') {
nativeSql.append('*');
return true;
}
for (int col = 0; col < returningColumnNames.length; col++) {
String columnName = returningColumnNames[col];
if (col > 0) {
nativeSql.append(", ");
}
Utils.escapeIdentifier(nativeSql, columnName);
}
return true;
}
/**
* Converts {@code List<Integer>} to {@code int[]}. Empty and {@code null} lists are converted to
* empty array.
*
* @param list input list
* @return output array
*/
private static int[] toIntArray(List<Integer> list) {
if (list == null || list.isEmpty()) {
return NO_BINDS;
}
int[] res = new int[list.size()];
for (int i = 0; i < list.size(); i++) {
res[i] = list.get(i); // must not be null
}
return res;
}
/**
* <p>Find the end of the single-quoted string starting at the given offset.</p>
*
* <p>Note: for {@code 'single '' quote in string'}, this method currently returns the offset of
* first {@code '} character after the initial one. The caller must call the method a second time
* for the second part of the quoted string.</p>
*
* @param query query
* @param offset start offset
* @param standardConformingStrings standard conforming strings
* @return position of the end of the single-quoted string
*/
public static int parseSingleQuotes(final char[] query, int offset,
boolean standardConformingStrings) {
// check for escape string syntax (E'')
if (standardConformingStrings
&& offset >= 2
&& (query[offset - 1] == 'e' || query[offset - 1] == 'E')
&& charTerminatesIdentifier(query[offset - 2])) {
standardConformingStrings = false;
}
if (standardConformingStrings) {
// do NOT treat backslashes as escape characters
while (++offset < query.length) {
switch (query[offset]) {
case '\'':
return offset;
default:
break;
}
}
} else {
// treat backslashes as escape characters
while (++offset < query.length) {
switch (query[offset]) {
case '\\':
++offset;
break;
case '\'':
return offset;
default:
break;
}
}
}
return query.length;
}
/**
* <p>Find the end of the double-quoted string starting at the given offset.</p>
*
* <p>Note: for {@code "double "" quote in string"}, this method currently
* returns the offset of first {@code "} character after the initial one. The caller must
* call the method a second time for the second part of the quoted string.</p>
*
* @param query query
* @param offset start offset
* @return position of the end of the double-quoted string
*/
public static int parseDoubleQuotes(final char[] query, int offset) {
while (++offset < query.length && query[offset] != '"') {
// do nothing
}
return offset;
}
/**
* Test if the dollar character ({@code $}) at the given offset starts a dollar-quoted string and
* return the offset of the ending dollar character.
*
* @param query query
* @param offset start offset
* @return offset of the ending dollar character
*/
public static int parseDollarQuotes(final char[] query, int offset) {
if (offset + 1 < query.length
&& (offset == 0 || !isIdentifierContChar(query[offset - 1]))) {
int endIdx = -1;
if (query[offset + 1] == '$') {
endIdx = offset + 1;
} else if (isDollarQuoteStartChar(query[offset + 1])) {
for (int d = offset + 2; d < query.length; ++d) {
if (query[d] == '$') {
endIdx = d;
break;
} else if (!isDollarQuoteContChar(query[d])) {
break;
}
}
}
if (endIdx > 0) {
// found; note: tag includes start and end $ character
int tagIdx = offset;
int tagLen = endIdx - offset + 1;
offset = endIdx; // loop continues at endIdx + 1
for (++offset; offset < query.length; ++offset) {
if (query[offset] == '$'
&& subArraysEqual(query, tagIdx, offset, tagLen)) {
offset += tagLen - 1;
break;
}
}
}
}
return offset;
}
/**
* Skip all digits for backend parameter marker e.g. $1, $10 etc.
*
* @param query User query
* @param offset start offset
* @return offset of the ending digit
*/
private static int parseDollarParam(final char[] query, int offset) {
int numDigits = 0;
int savOffset = offset;
// Skip $
offset++;
// Loop for each digits
while(offset < query.length) {
if (Character.isDigit(query[offset])) {
offset++;
numDigits++;
}
else
break;
}
return savOffset + numDigits;
}
/**
* Test if the {@code -} character at {@code offset} starts a {@code --} style line comment,
* and return the position of the first {@code \r} or {@code \n} character.
*
* @param query query
* @param offset start offset
* @return position of the first {@code \r} or {@code \n} character
*/
public static int parseLineComment(final char[] query, int offset) {
if (offset + 1 < query.length && query[offset + 1] == '-') {
while (offset + 1 < query.length) {
offset++;
if (query[offset] == '\r' || query[offset] == '\n') {
break;
}
}
}
return offset;
}
/**
* Test if the {@code /} character at {@code offset} starts a block comment, and return the
* position of the last {@code /} character.
*
* @param query query
* @param offset start offset
* @return position of the last {@code /} character
*/
public static int parseBlockComment(final char[] query, int offset) {
if (offset + 1 < query.length && query[offset + 1] == '*') {
// /* /* */ */ nest, according to SQL spec
int level = 1;
for (offset += 2; offset < query.length; ++offset) {
switch (query[offset - 1]) {
case '*':
if (query[offset] == '/') {
--level;
++offset; // don't parse / in */* twice
}
break;
case '/':
if (query[offset] == '*') {
++level;
++offset; // don't parse * in /*/ twice
}
break;
default:
break;
}
if (level == 0) {
--offset; // reset position to last '/' char
break;
}
}
}
return offset;
}
/**
* Parse string to check presence of DELETE keyword regardless of case. The initial character is
* assumed to have been matched.
*
* @param query char[] of the query statement
* @param offset position of query to start checking
* @return boolean indicates presence of word
*/
public static boolean parseDeleteKeyword(final char[] query, int offset) {
if (query.length < (offset + 6)) {
return false;
}
return (query[offset] | 32) == 'd'
&& (query[offset + 1] | 32) == 'e'
&& (query[offset + 2] | 32) == 'l'
&& (query[offset + 3] | 32) == 'e'
&& (query[offset + 4] | 32) == 't'
&& (query[offset + 5] | 32) == 'e';
}
/**
* Parse string to check presence of INSERT keyword regardless of case.
*
* @param query char[] of the query statement
* @param offset position of query to start checking
* @return boolean indicates presence of word
*/
public static boolean parseInsertKeyword(final char[] query, int offset) {
if (query.length < (offset + 7)) {
return false;
}
return (query[offset] | 32) == 'i'
&& (query[offset + 1] | 32) == 'n'
&& (query[offset + 2] | 32) == 's'
&& (query[offset + 3] | 32) == 'e'
&& (query[offset + 4] | 32) == 'r'
&& (query[offset + 5] | 32) == 't';
}
/**
* Parse string to check presence of MOVE keyword regardless of case.
*
* @param query char[] of the query statement
* @param offset position of query to start checking
* @return boolean indicates presence of word
*/
public static boolean parseMoveKeyword(final char[] query, int offset) {
if (query.length < (offset + 4)) {
return false;
}
return (query[offset] | 32) == 'm'
&& (query[offset + 1] | 32) == 'o'
&& (query[offset + 2] | 32) == 'v'
&& (query[offset + 3] | 32) == 'e';
}
/**
* Parse string to check presence of PREPARE keyword regardless of case.
*
* @param query char[] of the query statement
* @param offset position of query to start checking
* @return boolean indicates presence of word
*/
public static boolean parsePrepareKeyword(final char[] query, int offset) {
if (query.length < (offset + 7)) {
return false;
}
return (query[offset] | 32) == 'p'
&& (query[offset + 1] | 32) == 'r'
&& (query[offset + 2] | 32) == 'e'
&& (query[offset + 3] | 32) == 'p'
&& (query[offset + 4] | 32) == 'a'
&& (query[offset + 5] | 32) == 'r'
&& (query[offset + 6] | 32) == 'e';
}
/**
* Parse string to check presence of RETURNING keyword regardless of case.
*
* @param query char[] of the query statement
* @param offset position of query to start checking
* @return boolean indicates presence of word
*/
public static boolean parseReturningKeyword(final char[] query, int offset) {
if (query.length < (offset + 9)) {
return false;
}
return (query[offset] | 32) == 'r'
&& (query[offset + 1] | 32) == 'e'
&& (query[offset + 2] | 32) == 't'
&& (query[offset + 3] | 32) == 'u'
&& (query[offset + 4] | 32) == 'r'
&& (query[offset + 5] | 32) == 'n'
&& (query[offset + 6] | 32) == 'i'
&& (query[offset + 7] | 32) == 'n'
&& (query[offset + 8] | 32) == 'g';
}
/**
* Parse string to check presence of SELECT keyword regardless of case.
*
* @param query char[] of the query statement
* @param offset position of query to start checking
* @return boolean indicates presence of word
*/
public static boolean parseSelectKeyword(final char[] query, int offset) {
if (query.length < (offset + 6)) {
return false;
}
return (query[offset] | 32) == 's'
&& (query[offset + 1] | 32) == 'e'
&& (query[offset + 2] | 32) == 'l'
&& (query[offset + 3] | 32) == 'e'
&& (query[offset + 4] | 32) == 'c'
&& (query[offset + 5] | 32) == 't';
}
/**
* Parse string to check presence of UPDATE keyword regardless of case.
*
* @param query char[] of the query statement
* @param offset position of query to start checking
* @return boolean indicates presence of word
*/
public static boolean parseUpdateKeyword(final char[] query, int offset) {
if (query.length < (offset + 6)) {
return false;
}
return (query[offset] | 32) == 'u'
&& (query[offset + 1] | 32) == 'p'
&& (query[offset + 2] | 32) == 'd'
&& (query[offset + 3] | 32) == 'a'
&& (query[offset + 4] | 32) == 't'
&& (query[offset + 5] | 32) == 'e';
}
/**
* Parse string to check presence of VALUES keyword regardless of case.
*
* @param query char[] of the query statement
* @param offset position of query to start checking
* @return boolean indicates presence of word
*/
public static boolean parseValuesKeyword(final char[] query, int offset) {
if (query.length < (offset + 6)) {
return false;
}
return (query[offset] | 32) == 'v'
&& (query[offset + 1] | 32) == 'a'
&& (query[offset + 2] | 32) == 'l'
&& (query[offset + 3] | 32) == 'u'
&& (query[offset + 4] | 32) == 'e'
&& (query[offset + 5] | 32) == 's';
}
/**
* Faster version of {@link Long#parseLong(String)} when parsing a substring is required
*
* @param s string to parse
* @param beginIndex begin index
* @param endIndex end index
* @return long value
*/
public static long parseLong(String s, int beginIndex, int endIndex) {
// Fallback to default implementation in case the string is long
if (endIndex - beginIndex > 16) {
return Long.parseLong(s.substring(beginIndex, endIndex));
}
long res = digitAt(s, beginIndex);
for (beginIndex++; beginIndex < endIndex; beginIndex++) {
res = res * 10 + digitAt(s, beginIndex);
}
return res;
}
/**
* Parse string to check presence of WITH keyword regardless of case.
*
* @param query char[] of the query statement
* @param offset position of query to start checking
* @return boolean indicates presence of word
*/
public static boolean parseWithKeyword(final char[] query, int offset) {
if (query.length < (offset + 4)) {
return false;
}
return (query[offset] | 32) == 'w'
&& (query[offset + 1] | 32) == 'i'
&& (query[offset + 2] | 32) == 't'
&& (query[offset + 3] | 32) == 'h';
}
/**
* Parse string to check presence of AS keyword regardless of case.
*
* @param query char[] of the query statement
* @param offset position of query to start checking
* @return boolean indicates presence of word
*/
public static boolean parseAsKeyword(final char[] query, int offset) {
if (query.length < (offset + 2)) {
return false;
}
return (query[offset] | 32) == 'a'
&& (query[offset + 1] | 32) == 's';
}
/**
* Returns true if a given string {@code s} has digit at position {@code pos}.
* @param s input string
* @param pos position (0-based)
* @return true if input string s has digit at position pos
*/
public static boolean isDigitAt(String s, int pos) {
return pos > 0 && pos < s.length() && Character.isDigit(s.charAt(pos));
}
/**
* Converts digit at position {@code pos} in string {@code s} to integer or throws.
* @param s input string
* @param pos position (0-based)
* @return integer value of a digit at position pos
* @throws NumberFormatException if character at position pos is not an integer
*/
public static int digitAt(String s, int pos) {
int c = s.charAt(pos) - '0';
if (c < 0 || c > 9) {
throw new NumberFormatException("Input string: \"" + s + "\", position: " + pos);
}
return c;
}
/**
* @param c character
* @return true if the character is a whitespace character as defined in the backend's parser
*/
public static boolean isSpace(char c) {
return c == ' ' || c == '\t' || c == '\n' || c == '\r' || c == '\f';
}
/**
* @param c character
* @return true if the given character is a valid character for an operator in the backend's
* parser
*/
public static boolean isOperatorChar(char c) {
/*
* Extracted from operators defined by {self} and {op_chars}
* in pgsql/src/backend/parser/scan.l.
*/
return ",()[].;:+-*/%^<>=~!@#&|`?".indexOf(c) != -1;
}
/**
* Checks if a character is valid as the start of an identifier.
* PostgreSQL 9.4 allows column names like _, ‿, ⁀, ⁔, ︳, ︴, ﹍, ﹎, ﹏, _, so
* it is assumed isJavaIdentifierPart is good enough for Redshift.
*
* @param c the character to check
* @return true if valid as first character of an identifier; false if not
* @see <a href="https://www.postgresql.org/docs/9.6/static/sql-syntax-lexical.html#SQL-SYNTAX-IDENTIFIERS">Identifiers and Key Words</a>
*/
public static boolean isIdentifierStartChar(char c) {
/*
* Redshift's implmementation is located in
* pgsql/src/backend/parser/scan.l:
* ident_start [A-Za-z\200-\377_]
* ident_cont [A-Za-z\200-\377_0-9\$]
* however is is not clear how that interacts with unicode, so we just use Java's implementation.
*/
return Character.isJavaIdentifierStart(c);
}
/**
* Checks if a character is valid as the second or later character of an identifier.
*
* @param c the character to check
* @return true if valid as second or later character of an identifier; false if not
*/
public static boolean isIdentifierContChar(char c) {
return Character.isJavaIdentifierPart(c);
}
/**
* @param c character
* @return true if the character terminates an identifier
*/
public static boolean charTerminatesIdentifier(char c) {
return c == '"' || isSpace(c) || isOperatorChar(c);
}
/**
* Checks if a character is valid as the start of a dollar quoting tag.
*
* @param c the character to check
* @return true if valid as first character of a dollar quoting tag; false if not
*/
public static boolean isDollarQuoteStartChar(char c) {
/*
* The allowed dollar quote start and continuation characters
* must stay in sync with what the backend defines in
* pgsql/src/backend/parser/scan.l
*
* The quoted string starts with $foo$ where "foo" is an optional string
* in the form of an identifier, except that it may not contain "$",
* and extends to the first occurrence of an identical string.
* There is *no* processing of the quoted text.
*/
return c != '$' && isIdentifierStartChar(c);
}
/**
* Checks if a character is valid as the second or later character of a dollar quoting tag.
*
* @param c the character to check
* @return true if valid as second or later character of a dollar quoting tag; false if not
*/
public static boolean isDollarQuoteContChar(char c) {
return c != '$' && isIdentifierContChar(c);
}
/**
* Compares two sub-arrays of the given character array for equalness. If the length is zero, the
* result is true if and only if the offsets are within the bounds of the array.
*
* @param arr a char array
* @param offA first sub-array start offset
* @param offB second sub-array start offset
* @param len length of the sub arrays to compare
* @return true if the sub-arrays are equal; false if not
*/
private static boolean subArraysEqual(final char[] arr,
final int offA, final int offB,
final int len) {
if (offA < 0 || offB < 0
|| offA >= arr.length || offB >= arr.length
|| offA + len > arr.length || offB + len > arr.length) {
return false;
}
for (int i = 0; i < len; ++i) {
if (arr[offA + i] != arr[offB + i]) {
return false;
}
}
return true;
}
/**
* Converts JDBC-specific callable statement escapes {@code { [? =] call <some_function> [(?,
* [?,..])] }} into the Redshift format which is {@code select <some_function> (?, [?, ...]) as
* result} or {@code select * from <some_function> (?, [?, ...]) as result} (7.3)
*
* @param jdbcSql sql text with JDBC escapes
* @param stdStrings if backslash in single quotes should be regular character or escape one
* @param serverVersion server version
* @param protocolVersion protocol version
* @param escapeSyntaxCallMode mode specifying whether JDBC escape call syntax is transformed into a CALL/SELECT statement
* @return SQL in appropriate for given server format
* @throws SQLException if given SQL is malformed
*/
public static JdbcCallParseInfo modifyJdbcCall(String jdbcSql, boolean stdStrings,
int serverVersion, int protocolVersion, EscapeSyntaxCallMode escapeSyntaxCallMode) throws SQLException {
// Mini-parser for JDBC function-call syntax (only)
// TODO: Merge with escape processing (and parameter parsing?) so we only parse each query once.
// RE: frequently used statements are cached (see {@link com.amazon.redshift.jdbc.PgConnection#borrowQuery}), so this "merge" is not that important.
String sql = jdbcSql;
boolean isFunction = false;
boolean outParamBeforeFunc = false;
int len = jdbcSql.length();
int state = 1;
boolean inQuotes = false;
boolean inEscape = false;
int startIndex = -1;
int endIndex = -1;
boolean syntaxError = false;
int i = 0;
while (i < len && !syntaxError) {
char ch = jdbcSql.charAt(i);
switch (state) {
case 1: // Looking for { at start of query
if (ch == '{') {
++i;
++state;
} else if (Character.isWhitespace(ch)) {
++i;
} else {
// Not function-call syntax. Skip the rest of the string.
i = len;
}
break;
case 2: // After {, looking for ? or =, skipping whitespace
if (ch == '?') {
outParamBeforeFunc =
isFunction = true; // { ? = call ... } -- function with one out parameter
++i;
++state;
} else if (ch == 'c' || ch == 'C') { // { call ... } -- proc with no out parameters
state += 3; // Don't increase 'i'
} else if (Character.isWhitespace(ch)) {
++i;
} else {
// "{ foo ...", doesn't make sense, complain.
syntaxError = true;
}
break;
case 3: // Looking for = after ?, skipping whitespace
if (ch == '=') {
++i;
++state;
} else if (Character.isWhitespace(ch)) {
++i;
} else {
syntaxError = true;
}
break;
case 4: // Looking for 'call' after '? =' skipping whitespace
if (ch == 'c' || ch == 'C') {
++state; // Don't increase 'i'.
} else if (Character.isWhitespace(ch)) {
++i;
} else {
syntaxError = true;
}
break;
case 5: // Should be at 'call ' either at start of string or after ?=
if ((ch == 'c' || ch == 'C') && i + 4 <= len && jdbcSql.substring(i, i + 4)
.equalsIgnoreCase("call")) {
isFunction = true;
i += 4;
++state;
} else if (Character.isWhitespace(ch)) {
++i;
} else {
syntaxError = true;
}
break;
case 6: // Looking for whitespace char after 'call'
if (Character.isWhitespace(ch)) {
// Ok, we found the start of the real call.
++i;
++state;
startIndex = i;
} else {
syntaxError = true;
}
break;
case 7: // In "body" of the query (after "{ [? =] call ")
if (ch == '\'') {
inQuotes = !inQuotes;
++i;
} else if (inQuotes && ch == '\\' && !stdStrings) {
// Backslash in string constant, skip next character.
i += 2;
} else if (!inQuotes && ch == '{') {
inEscape = !inEscape;
++i;
} else if (!inQuotes && ch == '}') {
if (!inEscape) {
// Should be end of string.
endIndex = i;
++i;
++state;
} else {
inEscape = false;
}
} else if (!inQuotes && ch == ';') {
syntaxError = true;
} else {
// Everything else is ok.
++i;
}
break;
case 8: // At trailing end of query, eating whitespace
if (Character.isWhitespace(ch)) {
++i;
} else {
syntaxError = true;
}
break;
default:
throw new IllegalStateException("somehow got into bad state " + state);
}
}
// We can only legally end in a couple of states here.
if (i == len && !syntaxError) {
if (state == 1) {
// Not an escaped syntax.
// Detect Redshift native CALL.
// (OUT parameter registration, needed for stored procedures with INOUT arguments, will fail without this)
i = 0;
while (i < len && Character.isWhitespace(jdbcSql.charAt(i))) {
i++; // skip any preceding whitespace
}
if (i < len - 5) { // 5 == length of "call" + 1 whitespace
//Check for CALL followed by whitespace
char ch = jdbcSql.charAt(i);
if ((ch == 'c' || ch == 'C') && jdbcSql.substring(i, i + 4).equalsIgnoreCase("call")
&& Character.isWhitespace(jdbcSql.charAt(i + 4))) {
isFunction = true;
}
}
return new JdbcCallParseInfo(sql, isFunction);
}
if (state != 8) {
syntaxError = true; // Ran out of query while still parsing
}
}
if (syntaxError) {
throw new RedshiftException(
GT.tr("Malformed function or procedure escape syntax at offset {0}.", i),
RedshiftState.STATEMENT_NOT_ALLOWED_IN_FUNCTION_CALL);
}
String prefix;
String suffix;
if (escapeSyntaxCallMode == EscapeSyntaxCallMode.SELECT /* || serverVersion < 110000 */
|| (outParamBeforeFunc && escapeSyntaxCallMode == EscapeSyntaxCallMode.CALL_IF_NO_RETURN)) {
// prefix = "select * from ";
// suffix = " as result";
prefix = "select ";
suffix = "";
} else {
prefix = "call ";
suffix = "";
}
String s = jdbcSql.substring(startIndex, endIndex);
int prefixLength = prefix.length();
StringBuilder sb = new StringBuilder(prefixLength + jdbcSql.length() + suffix.length() + 10);
sb.append(prefix);
sb.append(s);
int opening = s.indexOf('(') + 1;
if (opening == 0) {
// here the function call has no parameters declaration eg : "{ ? = call pack_getValue}"
sb.append(outParamBeforeFunc ? "(?)" : "()");
} else if (outParamBeforeFunc) {
// move the single out parameter into the function call
// so that it can be treated like all other parameters
boolean needComma = false;
// the following loop will check if the function call has parameters
// eg "{ ? = call pack_getValue(?) }" vs "{ ? = call pack_getValue() }
for (int j = opening + prefixLength; j < sb.length(); j++) {
char c = sb.charAt(j);
if (c == ')') {
break;
}
if (!Character.isWhitespace(c)) {
needComma = true;
break;
}
}
// insert the return parameter as the first parameter of the function call
if (needComma) {
sb.insert(opening + prefixLength, "?,");
} else {
sb.insert(opening + prefixLength, "?");
}
}
if (!suffix.isEmpty()) {
sql = sb.append(suffix).toString();
} else {
sql = sb.toString();
}
return new JdbcCallParseInfo(sql, isFunction);
}
/**
* <p>Filter the SQL string of Java SQL Escape clauses.</p>
*
* <p>Currently implemented Escape clauses are those mentioned in 11.3 in the specification.
* Basically we look through the sql string for {d xxx}, {t xxx}, {ts xxx}, {oj xxx} or {fn xxx}
* in non-string sql code. When we find them, we just strip the escape part leaving only the xxx
* part. So, something like "select * from x where d={d '2001-10-09'}" would return "select * from
* x where d= '2001-10-09'".</p>
*
* @param sql the original query text
* @param replaceProcessingEnabled whether replace_processing_enabled is on
* @param standardConformingStrings whether standard_conforming_strings is on
* @return Redshift-compatible SQL
* @throws SQLException if given SQL is wrong
*/
public static String replaceProcessing(String sql, boolean replaceProcessingEnabled,
boolean standardConformingStrings) throws SQLException {
if (replaceProcessingEnabled) {
// Since escape codes can only appear in SQL CODE, we keep track
// of if we enter a string or not.
int len = sql.length();
char[] chars = sql.toCharArray();
StringBuilder newsql = new StringBuilder(len);
int i = 0;
while (i < len) {
i = parseSql(chars, i, newsql, false, standardConformingStrings);
// We need to loop here in case we encounter invalid
// SQL, consider: SELECT a FROM t WHERE (1 > 0)) ORDER BY a
// We can't ending replacing after the extra closing paren
// because that changes a syntax error to a valid query
// that isn't what the user specified.
if (i < len) {
newsql.append(chars[i]);
i++;
}
}
return newsql.toString();
} else {
return sql;
}
}
/**
* parse the given sql from index i, appending it to the given buffer until we hit an unmatched
* right parentheses or end of string. When the stopOnComma flag is set we also stop processing
* when a comma is found in sql text that isn't inside nested parenthesis.
*
* @param sql the original query text
* @param i starting position for replacing
* @param newsql where to write the replaced output
* @param stopOnComma should we stop after hitting the first comma in sql text?
* @param stdStrings whether standard_conforming_strings is on
* @return the position we stopped processing at
* @throws SQLException if given SQL is wrong
*/
private static int parseSql(char[] sql, int i, StringBuilder newsql, boolean stopOnComma,
boolean stdStrings) throws SQLException {
SqlParseState state = SqlParseState.IN_SQLCODE;
int len = sql.length;
int nestedParenthesis = 0;
boolean endOfNested = false;
// because of the ++i loop
i--;
while (!endOfNested && ++i < len) {
char c = sql[i];
state_switch:
switch (state) {
case IN_SQLCODE:
if (c == '$') {
int i0 = i;
i = parseDollarQuotes(sql, i);
checkParsePosition(i, len, i0, sql,
"Unterminated dollar quote started at position {0} in SQL {1}. Expected terminating $$");
newsql.append(sql, i0, i - i0 + 1);
break;
} else if (c == '\'') {
// start of a string?
int i0 = i;
i = parseSingleQuotes(sql, i, stdStrings);
checkParsePosition(i, len, i0, sql,
"Unterminated string literal started at position {0} in SQL {1}. Expected ' char");
newsql.append(sql, i0, i - i0 + 1);
break;
} else if (c == '"') {
// start of a identifier?
int i0 = i;
i = parseDoubleQuotes(sql, i);
checkParsePosition(i, len, i0, sql,
"Unterminated identifier started at position {0} in SQL {1}. Expected \" char");
newsql.append(sql, i0, i - i0 + 1);
break;
} else if (c == '/') {
int i0 = i;
i = parseBlockComment(sql, i);
checkParsePosition(i, len, i0, sql,
"Unterminated block comment started at position {0} in SQL {1}. Expected */ sequence");
newsql.append(sql, i0, i - i0 + 1);
break;
} else if (c == '-') {
int i0 = i;
i = parseLineComment(sql, i);
newsql.append(sql, i0, i - i0 + 1);
break;
} else if (c == '(') { // begin nested sql
nestedParenthesis++;
} else if (c == ')') { // end of nested sql
nestedParenthesis--;
if (nestedParenthesis < 0) {
endOfNested = true;
break;
}
} else if (stopOnComma && c == ',' && nestedParenthesis == 0) {
endOfNested = true;
break;
} else if (c == '{') { // start of an escape code?
if (i + 1 < len) {
SqlParseState[] availableStates = SqlParseState.VALUES;
// skip first state, it's not a escape code state
for (int j = 1; j < availableStates.length; j++) {
SqlParseState availableState = availableStates[j];
int matchedPosition = availableState.getMatchedPosition(sql, i + 1);
if (matchedPosition == 0) {
continue;
}
i += matchedPosition;
if (availableState.replacementKeyword != null) {
newsql.append(availableState.replacementKeyword);
}
state = availableState;
break state_switch;
}
}
}
newsql.append(c);
break;
case ESC_FUNCTION:
// extract function name
i = escapeFunction(sql, i, newsql, stdStrings);
state = SqlParseState.IN_SQLCODE; // end of escaped function (or query)
break;
case ESC_DATE:
case ESC_TIME:
case ESC_TIMESTAMP:
case ESC_OUTERJOIN:
case ESC_ESCAPECHAR:
if (c == '}') {
state = SqlParseState.IN_SQLCODE; // end of escape code.
} else {
newsql.append(c);
}
break;
} // end switch
}
return i;
}
private static int findOpenBrace(char[] sql, int i) {
int posArgs = i;
while (posArgs < sql.length && sql[posArgs] != '(') {
posArgs++;
}
return posArgs;
}
private static void checkParsePosition(int i, int len, int i0, char[] sql,
String message)
throws RedshiftException {
if (i < len) {
return;
}
throw new RedshiftException(
GT.tr(message, i0, new String(sql)),
RedshiftState.SYNTAX_ERROR);
}
private static int escapeFunction(char[] sql, int i, StringBuilder newsql, boolean stdStrings) throws SQLException {
String functionName;
int argPos = findOpenBrace(sql, i);
if (argPos < sql.length) {
functionName = new String(sql, i, argPos - i).trim();
// extract arguments
i = argPos + 1;// we start the scan after the first (
i = escapeFunctionArguments(newsql, functionName, sql, i, stdStrings);
}
// go to the end of the function copying anything found
i++;
while (i < sql.length && sql[i] != '}') {
newsql.append(sql[i++]);
}
return i;
}
/**
* Generate sql for escaped functions.
*
* @param newsql destination StringBuilder
* @param functionName the escaped function name
* @param sql input SQL text (containing arguments of a function call with possible JDBC escapes)
* @param i position in the input SQL
* @param stdStrings whether standard_conforming_strings is on
* @return the right Redshift sql
* @throws SQLException if something goes wrong
*/
private static int escapeFunctionArguments(StringBuilder newsql, String functionName, char[] sql, int i,
boolean stdStrings)
throws SQLException {
// Maximum arity of functions in EscapedFunctions is 3
List<CharSequence> parsedArgs = new ArrayList<CharSequence>(3);
while (true) {
StringBuilder arg = new StringBuilder();
int lastPos = i;
i = parseSql(sql, i, arg, true, stdStrings);
if (i != lastPos) {
parsedArgs.add(arg);
}
if (i >= sql.length // should not happen
|| sql[i] != ',') {
break;
}
i++;
}
Method method = EscapedFunctions2.getFunction(functionName);
if (method == null) {
newsql.append(functionName);
EscapedFunctions2.appendCall(newsql, "(", ",", ")", parsedArgs);
return i;
}
try {
method.invoke(null, newsql, parsedArgs);
} catch (InvocationTargetException e) {
Throwable targetException = e.getTargetException();
if (targetException instanceof SQLException) {
throw (SQLException) targetException;
} else {
throw new RedshiftException(targetException.getMessage(), RedshiftState.SYSTEM_ERROR);
}
} catch (IllegalAccessException e) {
throw new RedshiftException(e.getMessage(), RedshiftState.SYSTEM_ERROR);
}
return i;
}
private static final char[] QUOTE_OR_ALPHABETIC_MARKER = {'\"', '0'};
private static final char[] QUOTE_OR_ALPHABETIC_MARKER_OR_PARENTHESIS = {'\"', '0', '('};
private static final char[] SINGLE_QUOTE = {'\''};
// Static variables for parsing SQL when replaceProcessing is true.
private enum SqlParseState {
IN_SQLCODE,
ESC_DATE("d", SINGLE_QUOTE, "DATE "),
ESC_TIME("t", SINGLE_QUOTE, "TIME "),
ESC_TIMESTAMP("ts", SINGLE_QUOTE, "TIMESTAMP "),
ESC_FUNCTION("fn", QUOTE_OR_ALPHABETIC_MARKER, null),
ESC_OUTERJOIN("oj", QUOTE_OR_ALPHABETIC_MARKER_OR_PARENTHESIS, null),
ESC_ESCAPECHAR("escape", SINGLE_QUOTE, "ESCAPE ");
private static final SqlParseState[] VALUES = values();
private final char[] escapeKeyword;
private final char[] allowedValues;
private final String replacementKeyword;
SqlParseState() {
this("", new char[0], null);
}
SqlParseState(String escapeKeyword, char[] allowedValues, String replacementKeyword) {
this.escapeKeyword = escapeKeyword.toCharArray();
this.allowedValues = allowedValues;
this.replacementKeyword = replacementKeyword;
}
private boolean startMatches(char[] sql, int pos) {
// check for the keyword
for (char c : escapeKeyword) {
if (pos >= sql.length) {
return false;
}
char curr = sql[pos++];
if (curr != c && curr != Character.toUpperCase(c)) {
return false;
}
}
return pos < sql.length;
}
private int getMatchedPosition(char[] sql, int pos) {
// check for the keyword
if (!startMatches(sql, pos)) {
return 0;
}
int newPos = pos + escapeKeyword.length;
// check for the beginning of the value
char curr = sql[newPos];
// ignore any in-between whitespace
while (curr == ' ') {
newPos++;
if (newPos >= sql.length) {
return 0;
}
curr = sql[newPos];
}
for (char c : allowedValues) {
if (curr == c || (c == '0' && Character.isLetter(curr))) {
return newPos - pos;
}
}
return 0;
}
}
}
| 8,327 |
0 | Create_ds/amazon-redshift-jdbc-driver/src/main/java/com/amazon/redshift | Create_ds/amazon-redshift-jdbc-driver/src/main/java/com/amazon/redshift/core/ResultHandlerDelegate.java | /*
* Copyright (c) 2003, PostgreSQL Global Development Group
* See the LICENSE file in the project root for more information.
*/
package com.amazon.redshift.core;
import java.sql.SQLException;
import java.sql.SQLWarning;
import java.util.List;
import com.amazon.redshift.core.v3.MessageLoopState;
import com.amazon.redshift.core.v3.RedshiftRowsBlockingQueue;
/**
* Internal to the driver class, please do not use in the application.
*
* <p>The class simplifies creation of ResultHandler delegates: it provides default implementation
* for the interface methods</p>
*/
public class ResultHandlerDelegate implements ResultHandler {
private final ResultHandler delegate;
public ResultHandlerDelegate(ResultHandler delegate) {
this.delegate = delegate;
}
@Override
public void handleResultRows(Query fromQuery, Field[] fields, List<Tuple> tuples,
ResultCursor cursor, RedshiftRowsBlockingQueue<Tuple> queueTuples,
int[] rowCount, Thread ringBufferThread) {
if (delegate != null) {
delegate.handleResultRows(fromQuery, fields, tuples, cursor, queueTuples, rowCount, ringBufferThread);
}
}
@Override
public void handleCommandStatus(String status, long updateCount, long insertOID) {
if (delegate != null) {
delegate.handleCommandStatus(status, updateCount, insertOID);
}
}
@Override
public void handleWarning(SQLWarning warning) {
if (delegate != null) {
delegate.handleWarning(warning);
}
}
@Override
public void handleError(SQLException error) {
if (delegate != null) {
delegate.handleError(error);
}
}
@Override
public void handleCompletion() throws SQLException {
if (delegate != null) {
delegate.handleCompletion();
}
}
@Override
public void secureProgress() {
if (delegate != null) {
delegate.secureProgress();
}
}
@Override
public SQLException getException() {
if (delegate != null) {
return delegate.getException();
}
return null;
}
@Override
public SQLWarning getWarning() {
if (delegate != null) {
return delegate.getWarning();
}
return null;
}
@Override
public void setStatementStateIdleFromInQuery() {
if (delegate != null) {
delegate.setStatementStateIdleFromInQuery();
}
}
@Override
public void setStatementStateInQueryFromIdle() {
if (delegate != null) {
delegate.setStatementStateInQueryFromIdle();
}
}
@Override
public boolean wantsScrollableResultSet() {
if (delegate != null) {
return delegate.wantsScrollableResultSet();
}
return true;
}
}
| 8,328 |
0 | Create_ds/amazon-redshift-jdbc-driver/src/main/java/com/amazon/redshift | Create_ds/amazon-redshift-jdbc-driver/src/main/java/com/amazon/redshift/core/RedshiftStream.java | /*
* Copyright (c) 2017, PostgreSQL Global Development Group
* See the LICENSE file in the project root for more information.
*/
package com.amazon.redshift.core;
import com.amazon.redshift.RedshiftProperty;
import com.amazon.redshift.logger.LogLevel;
import com.amazon.redshift.logger.RedshiftLogger;
import com.amazon.redshift.util.ByteStreamWriter;
import com.amazon.redshift.util.GT;
import com.amazon.redshift.util.HostSpec;
import com.amazon.redshift.util.RedshiftPropertyMaxResultBufferParser;
import com.amazon.redshift.util.RedshiftException;
import com.amazon.redshift.util.RedshiftState;
import java.io.BufferedOutputStream;
import java.io.Closeable;
import java.io.EOFException;
import java.io.FilterOutputStream;
import java.io.Flushable;
import java.io.IOException;
import java.io.InputStream;
import java.io.OutputStream;
import java.io.Writer;
import java.net.InetSocketAddress;
import java.net.Socket;
import java.net.SocketTimeoutException;
import java.sql.SQLException;
import java.util.Properties;
import javax.net.SocketFactory;
import static com.amazon.redshift.jdbc.RedshiftConnectionImpl.getOptionalSetting;
/**
* <p>Wrapper around the raw connection to the server that implements some basic primitives
* (reading/writing formatted data, doing string encoding, etc).</p>
*
* <p>In general, instances of RedshiftStream are not threadsafe; the caller must ensure that only one thread
* at a time is accessing a particular RedshiftStream instance.</p>
*/
public class RedshiftStream implements Closeable, Flushable {
private final SocketFactory socketFactory;
private final HostSpec hostSpec;
private final byte[] int4Buf;
private final byte[] int2Buf;
private Socket connection;
private VisibleBufferedInputStream pgInput;
private CompressedInputStream pgCompressedInput;
private OutputStream pgOutput;
private byte[] streamBuffer;
private long nextStreamAvailableCheckTime;
// This is a workaround for SSL sockets: sslInputStream.available() might return 0
// so we perform "1ms reads" once in a while
private int minStreamAvailableCheckDelay = 1000;
private Encoding encoding;
private Writer encodingWriter;
private long maxResultBuffer = -1;
private long resultBufferByteCount = 0;
private RedshiftLogger logger;
/**
* Constructor: Connect to the Redshift back end and return a stream connection.
*
* @param socketFactory socket factory to use when creating sockets
* @param hostSpec the host and port to connect to
* @param timeout timeout in milliseconds, or 0 if no timeout set
* @param logger the logger to log the entry for debugging.
* @throws IOException if an IOException occurs below it.
*/
public RedshiftStream(SocketFactory socketFactory, HostSpec hostSpec, int timeout, RedshiftLogger logger, boolean disableCompressionForSSL, Properties info) throws IOException {
this.logger = logger;
this.socketFactory = socketFactory;
this.hostSpec = hostSpec;
Socket socket = socketFactory.createSocket();
if (!socket.isConnected()) {
// When using a SOCKS proxy, the host might not be resolvable locally,
// thus we defer resolution until the traffic reaches the proxy. If there
// is no proxy, we must resolve the host to an IP to connect the socket.
if(RedshiftLogger.isEnable())
logger.log(LogLevel.INFO, "hostspec host: " + hostSpec.getHost());
logger.log(LogLevel.INFO, "hostspec port: " + hostSpec.getPort());
InetSocketAddress address = hostSpec.shouldResolve()
? new InetSocketAddress(hostSpec.getHost(), hostSpec.getPort())
: InetSocketAddress.createUnresolved(hostSpec.getHost(), hostSpec.getPort());
socket.connect(address, timeout);
if(RedshiftLogger.isEnable())
logger.log(LogLevel.INFO, "address: " + address.getAddress());
logger.log(LogLevel.INFO, "port: " + address.getPort());
logger.log(LogLevel.INFO, "hostname: " + address.getHostName());
logger.log(LogLevel.INFO, "hoststring: " + address.getHostString());
}
changeSocket(socket, disableCompressionForSSL, info);
setEncoding(Encoding.getJVMEncoding("UTF-8", logger));
int2Buf = new byte[2];
int4Buf = new byte[4];
if(RedshiftLogger.isEnable())
logger.log(LogLevel.INFO, "Gets a new stream on a new socket");
}
/**
* Constructor: Connect to the Redshift back end and return a stream connection.
*
* @param socketFactory socket factory
* @param hostSpec the host and port to connect to
* @param logger the logger to log the entry for debugging.
* @throws IOException if an IOException occurs below it.
* @deprecated use {@link #RedshiftStream(SocketFactory, com.amazon.redshift.util.HostSpec, int, RedshiftLogger, boolean, Properties)}
*/
@Deprecated
public RedshiftStream(SocketFactory socketFactory, HostSpec hostSpec, RedshiftLogger logger, Properties info) throws IOException {
this(socketFactory, hostSpec, 0, logger, true, info);
}
public RedshiftLogger getLogger() {
return logger;
}
public HostSpec getHostSpec() {
return hostSpec;
}
public Socket getSocket() {
return connection;
}
public SocketFactory getSocketFactory() {
return socketFactory;
}
/**
* Check for pending backend messages without blocking. Might return false when there actually are
* messages waiting, depending on the characteristics of the underlying socket. This is used to
* detect asynchronous notifies from the backend, when available.
*
* @return true if there is a pending backend message
* @throws IOException if something wrong happens
*/
public boolean hasMessagePending() throws IOException {
boolean available = false;
// In certain cases, available returns 0, yet there are bytes
if (pgInput.available() > 0) {
return true;
}
long now = System.nanoTime() / 1000000;
if (now < nextStreamAvailableCheckTime && minStreamAvailableCheckDelay != 0) {
// Do not use ".peek" too often
return false;
}
int soTimeout = getNetworkTimeout();
connection.setSoTimeout(1);
try {
if (!pgInput.ensureBytes(1, false)) {
return false;
}
available = (pgInput.peek() != -1);
} catch (SocketTimeoutException e) {
return false;
} finally {
connection.setSoTimeout(soTimeout);
}
/*
If none available then set the next check time
In the event that there more async bytes available we will continue to get them all
see issue 1547 https://github.com/pgjdbc/pgjdbc/issues/1547
*/
if (!available) {
nextStreamAvailableCheckTime = now + minStreamAvailableCheckDelay;
}
return available;
}
public void setMinStreamAvailableCheckDelay(int delay) {
this.minStreamAvailableCheckDelay = delay;
}
/**
* Switch this stream to using a new socket. Any existing socket is <em>not</em> closed; it's
* assumed that we are changing to a new socket that delegates to the original socket (e.g. SSL).
*
* @param socket the new socket to change to
* @throws IOException if something goes wrong
*/
public void changeSocket(Socket socket, Boolean disableCompressionForSSL, Properties info) throws IOException {
this.connection = socket;
changeStream(disableCompressionForSSL, info);
}
public void changeStream(Boolean disableCompressionForSSL, Properties info) throws IOException {
// Submitted by Jason Venner <jason@idiom.com>. Disable Nagle
// as we are selective about flushing output only when we
// really need to.
connection.setTcpNoDelay(true);
// Buffer sizes submitted by Sverre H Huseby <sverrehu@online.no>
InputStream connectionStream = connection.getInputStream();
String compressionMode = getOptionalSetting(RedshiftProperty.COMPRESSION.getName(), info);
compressionMode = null == compressionMode ? RedshiftProperty.COMPRESSION.getDefaultValue() : compressionMode;
// If doing SSL handshake or if compression is set to off by user, use regular input stream
if(disableCompressionForSSL || compressionMode.equalsIgnoreCase("off"))
{
if(RedshiftLogger.isEnable())
{
logger.logInfo("Compression is disabled. Creating regular input stream.");
}
pgInput = new VisibleBufferedInputStream(connectionStream, 8192);
}
else
{
// Use a compressed input stream
if(RedshiftLogger.isEnable())
{
logger.logInfo("Compression is enabled. Creating compressed input stream.");
}
pgCompressedInput = new CompressedInputStream(connectionStream, logger);
pgInput = new VisibleBufferedInputStream(pgCompressedInput, 8192);
}
pgOutput = new BufferedOutputStream(connection.getOutputStream(), 8192);
if (encoding != null) {
setEncoding(encoding);
}
}
public long getBytesFromStream()
{
if(null != pgCompressedInput)
{
return pgCompressedInput.getBytesReadFromStream();
}
return pgInput.getBytesReadFromStream();
}
public Encoding getEncoding() {
return encoding;
}
/**
* Change the encoding used by this connection.
*
* @param encoding the new encoding to use
* @throws IOException if something goes wrong
*/
public void setEncoding(Encoding encoding) throws IOException {
if (this.encoding != null && this.encoding.name().equals(encoding.name())) {
return;
}
// Close down any old writer.
if (encodingWriter != null) {
encodingWriter.close();
}
this.encoding = encoding;
// Intercept flush() downcalls from the writer; our caller
// will call RedshiftStream.flush() as needed.
OutputStream interceptor = new FilterOutputStream(pgOutput) {
public void flush() throws IOException {
}
public void close() throws IOException {
super.flush();
}
};
encodingWriter = encoding.getEncodingWriter(interceptor);
}
/**
* <p>Get a Writer instance that encodes directly onto the underlying stream.</p>
*
* <p>The returned Writer should not be closed, as it's a shared object. Writer.flush needs to be
* called when switching between use of the Writer and use of the RedshiftStream write methods, but it
* won't actually flush output all the way out -- call {@link #flush} to actually ensure all
* output has been pushed to the server.</p>
*
* @return the shared Writer instance
* @throws IOException if something goes wrong.
*/
public Writer getEncodingWriter() throws IOException {
if (encodingWriter == null) {
throw new IOException("No encoding has been set on this connection");
}
return encodingWriter;
}
/**
* Sends a single character to the back end.
*
* @param val the character to be sent
* @throws IOException if an I/O error occurs
*/
public void sendChar(int val) throws IOException {
pgOutput.write(val);
}
/**
* Sends a 4-byte integer to the back end.
*
* @param val the integer to be sent
* @throws IOException if an I/O error occurs
*/
public void sendInteger4(int val) throws IOException {
int4Buf[0] = (byte) (val >>> 24);
int4Buf[1] = (byte) (val >>> 16);
int4Buf[2] = (byte) (val >>> 8);
int4Buf[3] = (byte) (val);
pgOutput.write(int4Buf);
}
/**
* Sends a 2-byte integer (short) to the back end.
*
* @param val the integer to be sent
* @throws IOException if an I/O error occurs or {@code val} cannot be encoded in 2 bytes
*/
public void sendInteger2(int val) throws IOException {
if (val < Short.MIN_VALUE || val > Short.MAX_VALUE) {
throw new IOException("Tried to send an out-of-range integer as a 2-byte value: " + val);
}
int2Buf[0] = (byte) (val >>> 8);
int2Buf[1] = (byte) val;
pgOutput.write(int2Buf);
}
/**
* Send an array of bytes to the backend.
*
* @param buf The array of bytes to be sent
* @throws IOException if an I/O error occurs
*/
public void send(byte[] buf) throws IOException {
pgOutput.write(buf);
}
/**
* Send a fixed-size array of bytes to the backend. If {@code buf.length < siz}, pad with zeros.
* If {@code buf.lengh > siz}, truncate the array.
*
* @param buf the array of bytes to be sent
* @param siz the number of bytes to be sent
* @throws IOException if an I/O error occurs
*/
public void send(byte[] buf, int siz) throws IOException {
send(buf, 0, siz);
}
/**
* Send a fixed-size array of bytes to the backend. If {@code length < siz}, pad with zeros. If
* {@code length > siz}, truncate the array.
*
* @param buf the array of bytes to be sent
* @param off offset in the array to start sending from
* @param siz the number of bytes to be sent
* @throws IOException if an I/O error occurs
*/
public void send(byte[] buf, int off, int siz) throws IOException {
int bufamt = buf.length - off;
pgOutput.write(buf, off, bufamt < siz ? bufamt : siz);
for (int i = bufamt; i < siz; ++i) {
pgOutput.write(0);
}
}
/**
* Send a fixed-size array of bytes to the backend. If {@code length < siz}, pad with zeros. If
* {@code length > siz}, truncate the array.
*
* @param writer the stream writer to invoke to send the bytes
* @throws IOException if an I/O error occurs
*/
public void send(ByteStreamWriter writer) throws IOException {
final FixedLengthOutputStream fixedLengthStream = new FixedLengthOutputStream(writer.getLength(), pgOutput);
try {
writer.writeTo(new ByteStreamWriter.ByteStreamTarget() {
@Override
public OutputStream getOutputStream() {
return fixedLengthStream;
}
});
} catch (IOException ioe) {
throw ioe;
} catch (Exception re) {
throw new IOException("Error writing bytes to stream", re);
}
for (int i = fixedLengthStream.remaining(); i > 0; i--) {
pgOutput.write(0);
}
}
/**
* Receives a single character from the backend, without advancing the current protocol stream
* position.
*
* @return the character received
* @throws IOException if an I/O Error occurs
*/
public int peekChar() throws IOException {
int c = pgInput.peek();
if (c < 0) {
throw new EOFException();
}
return c;
}
/**
* Receives a single character from the backend.
*
* @return the character received
* @throws IOException if an I/O Error occurs
*/
public int receiveChar() throws IOException {
int c = pgInput.read();
if (c < 0) {
throw new EOFException("The server closed the connection.");
}
return c;
}
/**
* Receives a four byte integer from the backend.
*
* @return the integer received from the backend
* @throws IOException if an I/O error occurs
*/
public int receiveInteger4() throws IOException {
if (pgInput.read(int4Buf) != 4) {
throw new EOFException();
}
return (int4Buf[0] & 0xFF) << 24 | (int4Buf[1] & 0xFF) << 16 | (int4Buf[2] & 0xFF) << 8
| int4Buf[3] & 0xFF;
}
/**
* Receives a two byte integer from the backend.
*
* @return the integer received from the backend
* @throws IOException if an I/O error occurs
*/
public int receiveInteger2() throws IOException {
if (pgInput.read(int2Buf) != 2) {
throw new EOFException();
}
return (int2Buf[0] & 0xFF) << 8 | int2Buf[1] & 0xFF;
}
/**
* Receives a fixed-size string from the backend.
*
* @param len the length of the string to receive, in bytes.
* @return the decoded string
* @throws IOException if something wrong happens
*/
public String receiveString(int len) throws IOException {
if (!pgInput.ensureBytes(len)) {
throw new EOFException();
}
String res = encoding.decode(pgInput.getBuffer(), pgInput.getIndex(), len);
pgInput.skip(len);
return res;
}
/**
* Receives a fixed-size string from the backend, and tries to avoid "UTF-8 decode failed"
* errors.
*
* @param len the length of the string to receive, in bytes.
* @return the decoded string
* @throws IOException if something wrong happens
*/
public EncodingPredictor.DecodeResult receiveErrorString(int len) throws IOException {
if (!pgInput.ensureBytes(len)) {
throw new EOFException();
}
EncodingPredictor.DecodeResult res;
try {
String value = encoding.decode(pgInput.getBuffer(), pgInput.getIndex(), len);
// no autodetect warning as the message was converted on its own
res = new EncodingPredictor.DecodeResult(value, null);
} catch (IOException e) {
res = EncodingPredictor.decode(pgInput.getBuffer(), pgInput.getIndex(), len, logger);
if (res == null) {
Encoding enc = Encoding.defaultEncoding();
String value = enc.decode(pgInput.getBuffer(), pgInput.getIndex(), len);
res = new EncodingPredictor.DecodeResult(value, enc.name());
}
}
pgInput.skip(len);
return res;
}
/**
* Receives a null-terminated string from the backend. If we don't see a null, then we assume
* something has gone wrong.
*
* @return string from back end
* @throws IOException if an I/O error occurs, or end of file
*/
public String receiveString() throws IOException {
int len = pgInput.scanCStringLength();
String res = encoding.decode(pgInput.getBuffer(), pgInput.getIndex(), len - 1);
pgInput.skip(len);
return res;
}
/**
* Read a tuple from the back end. A tuple is a two dimensional array of bytes. This variant reads
* the V3 protocol's tuple representation.
*
* @return tuple from the back end
* @throws IOException if a data I/O error occurs
* @throws SQLException if read more bytes than set maxResultBuffer
*/
public Tuple receiveTupleV3() throws IOException, OutOfMemoryError, SQLException {
int messageSize = receiveInteger4(); // MESSAGE SIZE
int nf = receiveInteger2();
//size = messageSize - 4 bytes of message size - 2 bytes of field count - 4 bytes for each column length
int dataToReadSize = messageSize - 4 - 2 - 4 * nf;
byte[][] answer = new byte[nf][];
increaseByteCounter(dataToReadSize);
OutOfMemoryError oom = null;
for (int i = 0; i < nf; ++i) {
int size = receiveInteger4();
if (size != -1) {
try {
answer[i] = new byte[size];
receive(answer[i], 0, size);
} catch (OutOfMemoryError oome) {
oom = oome;
skip(size);
}
}
}
if (oom != null) {
throw oom;
}
return new Tuple(answer, dataToReadSize);
}
/**
* Reads in a given number of bytes from the backend.
*
* @param siz number of bytes to read
* @return array of bytes received
* @throws IOException if a data I/O error occurs
*/
public byte[] receive(int siz) throws IOException {
byte[] answer = new byte[siz];
receive(answer, 0, siz);
return answer;
}
/**
* Reads in a given number of bytes from the backend.
*
* @param buf buffer to store result
* @param off offset in buffer
* @param siz number of bytes to read
* @throws IOException if a data I/O error occurs
*/
public void receive(byte[] buf, int off, int siz) throws IOException {
int s = 0;
while (s < siz) {
int w = pgInput.read(buf, off + s, siz - s);
if (w < 0) {
throw new EOFException();
}
s += w;
}
}
public void skip(int size) throws IOException {
long s = 0;
while (s < size) {
s += pgInput.skip(size - s);
}
}
/**
* Copy data from an input stream to the connection.
*
* @param inStream the stream to read data from
* @param remaining the number of bytes to copy
* @throws IOException if a data I/O error occurs
*/
public void sendStream(InputStream inStream, int remaining) throws IOException {
int expectedLength = remaining;
if (streamBuffer == null) {
streamBuffer = new byte[8192];
}
while (remaining > 0) {
int count = (remaining > streamBuffer.length ? streamBuffer.length : remaining);
int readCount;
try {
readCount = inStream.read(streamBuffer, 0, count);
if (readCount < 0) {
throw new EOFException(
GT.tr("Premature end of input stream, expected {0} bytes, but only read {1}.",
expectedLength, expectedLength - remaining));
}
} catch (IOException ioe) {
while (remaining > 0) {
send(streamBuffer, count);
remaining -= count;
count = (remaining > streamBuffer.length ? streamBuffer.length : remaining);
}
throw new RedshiftBindException(ioe);
}
send(streamBuffer, readCount);
remaining -= readCount;
}
}
/**
* Flush any pending output to the backend.
*
* @throws IOException if an I/O error occurs
*/
@Override
public void flush() throws IOException {
if (encodingWriter != null) {
encodingWriter.flush();
}
pgOutput.flush();
}
/**
* Consume an expected EOF from the backend.
*
* @throws IOException if an I/O error occurs
* @throws SQLException if we get something other than an EOF
*/
public void receiveEOF() throws SQLException, IOException {
int c = pgInput.read();
if (c < 0) {
return;
}
throw new RedshiftException(GT.tr("Expected an EOF from server, got: {0}", c),
RedshiftState.COMMUNICATION_ERROR);
}
/**
* Closes the connection.
*
* @throws IOException if an I/O Error occurs
*/
@Override
public void close() throws IOException {
if(RedshiftLogger.isEnable())
logger.log(LogLevel.INFO, "Stream on a connected socket closed");
if (encodingWriter != null) {
encodingWriter.close();
}
pgOutput.close();
pgInput.close();
connection.close();
}
public void setNetworkTimeout(int milliseconds) throws IOException {
connection.setSoTimeout(milliseconds);
pgInput.setTimeoutRequested(milliseconds != 0);
}
public int getNetworkTimeout() throws IOException {
return connection.getSoTimeout();
}
/**
* Method to set MaxResultBuffer inside RedshiftStream.
*
* @param value value of new max result buffer as string (cause we can expect % or chars to use
* multiplier)
* @throws RedshiftException exception returned when occurred parsing problem.
*/
public void setMaxResultBuffer(String value) throws RedshiftException {
maxResultBuffer = RedshiftPropertyMaxResultBufferParser.parseProperty(value, RedshiftProperty.MAX_RESULT_BUFFER.getName());
}
/**
* Method to clear count of byte buffer.
*/
public void clearResultBufferCount() {
resultBufferByteCount = 0;
}
/**
* Method to increase actual count of buffer. If buffer count is bigger than max result buffer
* limit, then gonna return an exception.
*
* @param value size of bytes to add to byte buffer.
* @throws SQLException exception returned when result buffer count is bigger than max result
* buffer.
*/
private void increaseByteCounter(long value) throws SQLException {
if (maxResultBuffer != -1) {
resultBufferByteCount += value;
if (resultBufferByteCount > maxResultBuffer) {
throw new RedshiftException(GT.tr(
"Result set exceeded maxResultBuffer limit. Received: {0}; Current limit: {1}",
String.valueOf(resultBufferByteCount), String.valueOf(maxResultBuffer)),RedshiftState.COMMUNICATION_ERROR);
}
}
}
public boolean isClosed() {
return connection.isClosed();
}
}
| 8,329 |
0 | Create_ds/amazon-redshift-jdbc-driver/src/main/java/com/amazon/redshift | Create_ds/amazon-redshift-jdbc-driver/src/main/java/com/amazon/redshift/core/ParameterList.java | /*
* Copyright (c) 2004, PostgreSQL Global Development Group
* See the LICENSE file in the project root for more information.
*/
// Copyright (c) 2004, Open Cloud Limited.
package com.amazon.redshift.core;
import com.amazon.redshift.util.ByteStreamWriter;
import java.io.InputStream;
import java.sql.SQLException;
/**
* <p>Abstraction of a list of parameters to be substituted into a Query. The protocol-specific details
* of how to efficiently store and stream the parameters is hidden behind implementations of this
* interface.</p>
*
* <p>In general, instances of ParameterList are associated with a particular Query object (the one
* that created them) and shouldn't be used against another Query.</p>
*
* <p>Parameter indexes are 1-based to match JDBC's PreparedStatement, i.e. the first parameter has
* index 1.</p>
*
* @author Oliver Jowett (oliver@opencloud.com)
*/
public interface ParameterList {
void registerOutParameter(int index, int sqlType) throws SQLException;
/**
* Get the number of parameters in this list. This value never changes for a particular instance,
* and might be zero.
*
* @return the number of parameters in this list.
*/
int getParameterCount();
/**
* Get the number of IN parameters in this list.
*
* @return the number of IN parameters in this list
*/
int getInParameterCount();
/**
* Get the number of OUT parameters in this list.
*
* @return the number of OUT parameters in this list
*/
int getOutParameterCount();
/**
* Return the oids of the parameters in this list. May be null for a ParameterList that does not
* support typing of parameters.
*
* @return oids of the parameters
*/
int[] getTypeOIDs();
/**
* Binds an integer value to a parameter. The type of the parameter is implicitly 'int4'.
*
* @param index the 1-based parameter index to bind.
* @param value the integer value to use.
* @throws SQLException on error or if <code>index</code> is out of range
*/
void setIntParameter(int index, int value) throws SQLException;
/**
* Binds a String value that is an unquoted literal to the server's query parser (for example, a
* bare integer) to a parameter. Associated with the parameter is a typename for the parameter
* that should correspond to an entry in pg_types.
*
* @param index the 1-based parameter index to bind.
* @param value the unquoted literal string to use.
* @param oid the type OID of the parameter, or <code>0</code> to infer the type.
* @throws SQLException on error or if <code>index</code> is out of range
*/
void setLiteralParameter(int index, String value, int oid) throws SQLException;
/**
* Binds a String value that needs to be quoted for the server's parser to understand (for
* example, a timestamp) to a parameter. Associated with the parameter is a typename for the
* parameter that should correspond to an entry in pg_types.
*
* @param index the 1-based parameter index to bind.
* @param value the quoted string to use.
* @param oid the type OID of the parameter, or <code>0</code> to infer the type.
* @throws SQLException on error or if <code>index</code> is out of range
*/
void setStringParameter(int index, String value, int oid) throws SQLException;
/**
* Binds a binary bytea value stored as a bytearray to a parameter. The parameter's type is
* implicitly set to 'bytea'. The bytearray's contains should remain unchanged until query
* execution has completed.
*
* @param index the 1-based parameter index to bind.
* @param data an array containing the raw data value
* @param offset the offset within <code>data</code> of the start of the parameter data.
* @param length the number of bytes of parameter data within <code>data</code> to use.
* @throws SQLException on error or if <code>index</code> is out of range
*/
void setBytea(int index, byte[] data, int offset, int length) throws SQLException;
/**
* Proprietary bind method for VARBYTE datatype.
*
* @param index the 1-based parameter index to bind.
* @param data an array containing the raw data value
* @param offset the offset within <code>data</code> of the start of the parameter data.
* @param length the number of bytes of parameter data within <code>data</code> to use.
* @throws SQLException SQLException on error or if <code>index</code> is out of range
*/
void setVarbyte(int index, byte[] data, int offset, int length) throws SQLException;
/**
* Proprietary bind method for GEOGRAPHY datatype.
*
* @param index the 1-based parameter index to bind.
* @param data an array containing the raw data value
* @param offset the offset within <code>data</code> of the start of the parameter data.
* @param length the number of bytes of parameter data within <code>data</code> to use.
* @throws SQLException SQLException on error or if <code>index</code> is out of range
*/
void setGeography(int index, byte[] data, int offset, int length) throws SQLException;
/**
* Binds a binary bytea value stored as an InputStream. The parameter's type is implicitly set to
* 'bytea'. The stream should remain valid until query execution has completed.
*
* @param index the 1-based parameter index to bind.
* @param stream a stream containing the parameter data.
* @param length the number of bytes of parameter data to read from <code>stream</code>.
* @throws SQLException on error or if <code>index</code> is out of range
*/
void setBytea(int index, InputStream stream, int length) throws SQLException;
/**
* Binds a binary bytea value stored as an InputStream. The parameter's type is implicitly set to
* 'bytea'. The stream should remain valid until query execution has completed.
*
* @param index the 1-based parameter index to bind.
* @param stream a stream containing the parameter data.
* @throws SQLException on error or if <code>index</code> is out of range
*/
void setBytea(int index, InputStream stream) throws SQLException;
/**
* Binds a binary bytea value stored as a ByteStreamWriter. The parameter's type is implicitly set to
* 'bytea'. The stream should remain valid until query execution has completed.
*
* @param index the 1-based parameter index to bind.
* @param writer a writer that can write the bytes for the parameter
* @throws SQLException on error or if <code>index</code> is out of range
*/
void setBytea(int index, ByteStreamWriter writer) throws SQLException;
/**
* Binds a text value stored as an InputStream that is a valid UTF-8 byte stream.
* Any byte-order marks (BOM) in the stream are passed to the backend.
* The parameter's type is implicitly set to 'text'.
* The stream should remain valid until query execution has completed.
*
* @param index the 1-based parameter index to bind.
* @param stream a stream containing the parameter data.
* @throws SQLException on error or if <code>index</code> is out of range
*/
void setText(int index, InputStream stream) throws SQLException;
/**
* Binds given byte[] value to a parameter. The bytes must already be in correct format matching
* the OID.
*
* @param index the 1-based parameter index to bind.
* @param value the bytes to send.
* @param oid the type OID of the parameter.
* @throws SQLException on error or if <code>index</code> is out of range
*/
void setBinaryParameter(int index, byte[] value, int oid) throws SQLException;
/**
* Binds a SQL NULL value to a parameter. Associated with the parameter is a typename for the
* parameter that should correspond to an entry in pg_types.
*
* @param index the 1-based parameter index to bind.
* @param oid the type OID of the parameter, or <code>0</code> to infer the type.
* @throws SQLException on error or if <code>index</code> is out of range
*/
void setNull(int index, int oid) throws SQLException;
/**
* Perform a shallow copy of this ParameterList, returning a new instance (still suitable for
* passing to the owning Query). If this ParameterList is immutable, copy() may return the same
* immutable object.
*
* @return a new ParameterList instance
*/
ParameterList copy();
/**
* Unbind all parameter values bound in this list.
*/
void clear();
/**
* Return a human-readable representation of a particular parameter in this ParameterList. If the
* parameter is not bound, returns "?".
*
* @param index the 1-based parameter index to bind.
* @param standardConformingStrings true if \ is not an escape character in strings literals
* @return a string representation of the parameter.
*/
String toString(int index, boolean standardConformingStrings);
/**
* Use this operation to append more parameters to the current list.
* @param list of parameters to append with.
* @throws SQLException fault raised if driver or back end throw an exception
*/
void appendAll(ParameterList list) throws SQLException ;
/**
* Returns the bound parameter values.
* @return Object array containing the parameter values.
*/
Object[] getValues();
}
| 8,330 |
0 | Create_ds/amazon-redshift-jdbc-driver/src/main/java/com/amazon/redshift | Create_ds/amazon-redshift-jdbc-driver/src/main/java/com/amazon/redshift/core/ReplicationProtocol.java | /*
* Copyright (c) 2016, PostgreSQL Global Development Group
* See the LICENSE file in the project root for more information.
*/
package com.amazon.redshift.core;
import com.amazon.redshift.logger.RedshiftLogger;
import com.amazon.redshift.replication.RedshiftReplicationStream;
import com.amazon.redshift.replication.fluent.logical.LogicalReplicationOptions;
import com.amazon.redshift.replication.fluent.physical.PhysicalReplicationOptions;
import java.sql.SQLException;
/**
* <p>Abstracts the protocol-specific details of physic and logic replication.</p>
*
* <p>With each connection open with replication options associate own instance ReplicationProtocol.</p>
*/
public interface ReplicationProtocol {
/**
* @param options not null options for logical replication stream
* @param logger the logger to log the entry for debugging.
* @return not null stream instance from which available fetch wal logs that was decode by output
* plugin
* @throws SQLException on error
*/
RedshiftReplicationStream startLogical(LogicalReplicationOptions options, RedshiftLogger logger) throws SQLException;
/**
* @param options not null options for physical replication stream
* @param logger the logger to log the entry for debugging.
* @return not null stream instance from which available fetch wal logs
* @throws SQLException on error
*/
RedshiftReplicationStream startPhysical(PhysicalReplicationOptions options, RedshiftLogger logger) throws SQLException;
}
| 8,331 |
0 | Create_ds/amazon-redshift-jdbc-driver/src/main/java/com/amazon/redshift | Create_ds/amazon-redshift-jdbc-driver/src/main/java/com/amazon/redshift/core/Tuple.java | /*
* Copyright (c) 2020, PostgreSQL Global Development Group
* See the LICENSE file in the project root for more information.
*/
package com.amazon.redshift.core;
import com.amazon.redshift.jdbc.RedshiftConnectionImpl;
/**
* Class representing a row in a {@link java.sql.ResultSet}.
*/
public class Tuple {
private final boolean forUpdate;
final byte[][] data;
private final int rowSize;
/**
* Construct an empty tuple. Used in updatable result sets.
* @param length the number of fields in the tuple.
*/
public Tuple(int length) {
this(new byte[length][], true);
}
/**
* Construct a populated tuple. Used when returning results.
* @param data the tuple data
*/
public Tuple(byte[][] data) {
this(data, false);
}
public Tuple(byte[][] data, int rowSize) {
this(data, false, rowSize);
}
private Tuple(byte[][] data, boolean forUpdate) {
this(data, forUpdate, 0);
}
private Tuple(byte[][] data, boolean forUpdate, int rowSize) {
this.data = data;
this.forUpdate = forUpdate;
this.rowSize = rowSize;
}
/**
* Number of fields in the tuple
* @return number of fields
*/
public int fieldCount() {
return data.length;
}
/**
* Total length in bytes of the tuple data.
* @return the number of bytes in this tuple
*/
public int length() {
if (rowSize != 0)
return rowSize;
else {
int length = 0;
for (byte[] field : data) {
if (field != null) {
length += field.length;
}
}
return length;
}
}
/**
* Total size in bytes (including overheads) of this Tuple instance on the heap (estimated)
* @return the estimated number of bytes of heap memory used by this tuple.
*/
public int getTupleSize() {
int rawSize = 0;
int nullFieldCount = 0;
for (byte[] field : data) {
if (field != null) {
rawSize += field.length; // Adding raw data size
} else {
nullFieldCount++; // Count of null fields
}
}
int refSize = RedshiftConnectionImpl.IS_64_BIT_JVM ? 8 : 4;
int arrayHeaderSize = RedshiftConnectionImpl.IS_64_BIT_JVM ? 24 : 16;
int overhead = (RedshiftConnectionImpl.IS_64_BIT_JVM ? 16 : 8) // Tuple object header overhead
+ refSize // Reference to the data array
+ refSize * data.length // Reference to each byte[] array
+ arrayHeaderSize * (data.length - nullFieldCount) // Overhead for each non-null byte[] array header
+ arrayHeaderSize // Overhead for the data array header
+ 5; // (4 + 1) => 4 byte for rowSize (int) and 1 byte for forUpdate (boolean)
return rawSize + overhead;
}
/**
* Get the data for the given field
* @param index 0-based field position in the tuple
* @return byte array of the data
*/
public byte[] get(int index) {
return data[index];
}
/**
* Create a copy of the tuple for updating.
* @return a copy of the tuple that allows updates
*/
public Tuple updateableCopy() {
return copy(true);
}
/**
* Create a read-only copy of the tuple
* @return a copy of the tuple that does not allow updates
*/
public Tuple readOnlyCopy() {
return copy(false);
}
private Tuple copy(boolean forUpdate) {
byte[][] dataCopy = new byte[data.length][];
System.arraycopy(data, 0, dataCopy, 0, data.length);
return new Tuple(dataCopy, forUpdate);
}
/**
* Set the given field to the given data.
* @param index 0-based field position
* @param fieldData the data to set
*/
public void set(int index, byte[] fieldData) {
if (!forUpdate) {
throw new IllegalArgumentException("Attempted to write to readonly tuple");
}
data[index] = fieldData;
}
}
| 8,332 |
0 | Create_ds/amazon-redshift-jdbc-driver/src/main/java/com/amazon/redshift | Create_ds/amazon-redshift-jdbc-driver/src/main/java/com/amazon/redshift/core/Provider.java | /*
* Copyright (c) 2003, PostgreSQL Global Development Group
* See the LICENSE file in the project root for more information.
*/
package com.amazon.redshift.core;
/**
* Represents a provider of results.
*
* @param <T> the type of results provided by this provider
*/
public interface Provider<T> {
/**
* Gets a result.
*
* @return a result
*/
T get();
}
| 8,333 |
0 | Create_ds/amazon-redshift-jdbc-driver/src/main/java/com/amazon/redshift | Create_ds/amazon-redshift-jdbc-driver/src/main/java/com/amazon/redshift/core/QueryExecutor.java | /*
* Copyright (c) 2003, PostgreSQL Global Development Group
* See the LICENSE file in the project root for more information.
*/
// Copyright (c) 2004, Open Cloud Limited.
package com.amazon.redshift.core;
import com.amazon.redshift.RedshiftNotification;
import com.amazon.redshift.copy.CopyOperation;
import com.amazon.redshift.core.v3.RedshiftRowsBlockingQueue;
import com.amazon.redshift.core.v3.TypeTransferModeRegistry;
import com.amazon.redshift.jdbc.AutoSave;
import com.amazon.redshift.jdbc.BatchResultHandler;
import com.amazon.redshift.jdbc.EscapeSyntaxCallMode;
import com.amazon.redshift.jdbc.PreferQueryMode;
import com.amazon.redshift.util.HostSpec;
import java.io.IOException;
import java.sql.SQLException;
import java.sql.SQLWarning;
import java.util.List;
import java.util.Map;
import java.util.Set;
import java.util.TimeZone;
/**
* <p>Abstracts the protocol-specific details of executing a query.</p>
*
* <p>Every connection has a single QueryExecutor implementation associated with it. This object
* provides:</p>
*
* <ul>
* <li>factory methods for Query objects ({@link #createSimpleQuery(String)} and
* {@link #createQuery(String, boolean, boolean, String...)})
* <li>execution methods for created Query objects (
* {@link #execute(Query, ParameterList, ResultHandler, int, int, int)} for single queries and
* {@link #execute(Query[], ParameterList[], BatchResultHandler, int, int, int)} for batches of queries)
* <li>a fastpath call interface ({@link #createFastpathParameters} and {@link #fastpathCall}).
* </ul>
*
* <p>Query objects may represent a query that has parameter placeholders. To provide actual values for
* these parameters, a {@link ParameterList} object is created via a factory method (
* {@link Query#createParameterList}). The parameters are filled in by the caller and passed along
* with the query to the query execution methods. Several ParameterLists for a given query might
* exist at one time (or over time); this allows the underlying Query to be reused for several
* executions, or for batch execution of the same Query.</p>
*
* <p>In general, a Query created by a particular QueryExecutor may only be executed by that
* QueryExecutor, and a ParameterList created by a particular Query may only be used as parameters
* to that Query. Unpredictable things will happen if this isn't done.</p>
*
* @author Oliver Jowett (oliver@opencloud.com)
*/
public interface QueryExecutor extends TypeTransferModeRegistry {
/**
* Flag for query execution that indicates the given Query object is unlikely to be reused.
*/
int QUERY_ONESHOT = 1;
/**
* Flag for query execution that indicates that resultset metadata isn't needed and can be safely
* omitted.
*/
int QUERY_NO_METADATA = 2;
/**
* Flag for query execution that indicates that a resultset isn't expected and the query executor
* can safely discard any rows (although the resultset should still appear to be from a
* resultset-returning query).
*/
int QUERY_NO_RESULTS = 4;
/**
* Flag for query execution that indicates a forward-fetch-capable cursor should be used if
* possible.
*/
int QUERY_FORWARD_CURSOR = 8;
/**
* Flag for query execution that indicates the automatic BEGIN on the first statement when outside
* a transaction should not be done.
*/
int QUERY_SUPPRESS_BEGIN = 16;
/**
* Flag for query execution when we don't really want to execute, we just want to get the
* parameter metadata for the statement.
*/
int QUERY_DESCRIBE_ONLY = 32;
/**
* Flag for query execution used by generated keys where we want to receive both the ResultSet and
* associated update count from the command status.
*/
int QUERY_BOTH_ROWS_AND_STATUS = 64;
/**
* Force this query to be described at each execution. This is done in pipelined batches where we
* might need to detect mismatched result types.
*/
int QUERY_FORCE_DESCRIBE_PORTAL = 512;
/**
* Flag to disable batch execution when we expect results (generated keys) from a statement.
*
* @deprecated in PgJDBC 9.4 as we now auto-size batches.
*/
@Deprecated
int QUERY_DISALLOW_BATCHING = 128;
/**
* Flag for query execution to avoid using binary transfer.
*/
int QUERY_NO_BINARY_TRANSFER = 256;
/**
* Execute the query via simple 'Q' command (not parse, bind, exec, but simple execute).
* This sends query text on each execution, however it supports sending multiple queries
* separated with ';' as a single command.
*/
int QUERY_EXECUTE_AS_SIMPLE = 1024;
int MAX_SAVE_POINTS = 1000;
/**
* Flag indicating that when beginning a transaction, it should be read only.
*/
int QUERY_READ_ONLY_HINT = 2048;
/**
* Execute a Query, passing results to a provided ResultHandler.
*
* @param query the query to execute; must be a query returned from calling
* {@link #wrap(List)} on this QueryExecutor object.
* @param parameters the parameters for the query. Must be non-<code>null</code> if the query
* takes parameters. Must be a parameter object returned by
* {@link com.amazon.redshift.core.Query#createParameterList()}.
* @param handler a ResultHandler responsible for handling results generated by this query
* @param maxRows the maximum number of rows to retrieve
* @param fetchSize if QUERY_FORWARD_CURSOR is set, the preferred number of rows to retrieve
* before suspending
* @param flags a combination of QUERY_* flags indicating how to handle the query.
* @throws SQLException if query execution fails
*/
void execute(Query query, ParameterList parameters, ResultHandler handler, int maxRows,
int fetchSize, int flags) throws SQLException;
/**
* Execute several Query, passing results to a provided ResultHandler.
*
* @param queries the queries to execute; each must be a query returned from calling
* {@link #wrap(List)} on this QueryExecutor object.
* @param parameterLists the parameter lists for the queries. The parameter lists correspond 1:1
* to the queries passed in the <code>queries</code> array. Each must be non-
* <code>null</code> if the corresponding query takes parameters, and must be a parameter
* object returned by {@link com.amazon.redshift.core.Query#createParameterList()} created by
* the corresponding query.
* @param handler a ResultHandler responsible for handling results generated by this query
* @param maxRows the maximum number of rows to retrieve
* @param fetchSize if QUERY_FORWARD_CURSOR is set, the preferred number of rows to retrieve
* before suspending
* @param flags a combination of QUERY_* flags indicating how to handle the query.
* @throws SQLException if query execution fails
*/
void execute(Query[] queries, ParameterList[] parameterLists, BatchResultHandler handler, int maxRows,
int fetchSize, int flags) throws SQLException;
/**
* Fetch additional rows from a cursor.
*
* @param cursor the cursor to fetch from
* @param handler the handler to feed results to
* @param fetchSize the preferred number of rows to retrieve before suspending
* @param initRowCount the number of rows already fetched
* @throws SQLException if query execution fails
*/
void fetch(ResultCursor cursor, ResultHandler handler, int fetchSize, int initRowCount) throws SQLException;
/**
* Create an unparameterized Query object suitable for execution by this QueryExecutor. The
* provided query string is not parsed for parameter placeholders ('?' characters), and the
* {@link Query#createParameterList} of the returned object will always return an empty
* ParameterList.
*
* @param sql the SQL for the query to create
* @return a new Query object
* @throws SQLException if something goes wrong
*/
Query createSimpleQuery(String sql) throws SQLException;
boolean isReWriteBatchedInsertsEnabled();
CachedQuery createQuery(String sql, boolean escapeProcessing, boolean isParameterized,
String... columnNames)
throws SQLException;
Object createQueryKey(String sql, boolean escapeProcessing, boolean isParameterized,
String... columnNames);
CachedQuery createQueryByKey(Object key) throws SQLException;
CachedQuery borrowQueryByKey(Object key) throws SQLException;
CachedQuery borrowQuery(String sql) throws SQLException;
CachedQuery borrowCallableQuery(String sql) throws SQLException;
CachedQuery borrowReturningQuery(String sql, String[] columnNames) throws SQLException;
void releaseQuery(CachedQuery cachedQuery);
/**
* Wrap given native query into a ready for execution format.
* @param queries list of queries in native to database syntax
* @return query object ready for execution by this query executor
*/
Query wrap(List<NativeQuery> queries);
/**
* Prior to attempting to retrieve notifications, we need to pull any recently received
* notifications off of the network buffers. The notification retrieval in ProtocolConnection
* cannot do this as it is prone to deadlock, so the higher level caller must be responsible which
* requires exposing this method.
*
* @throws SQLException if and error occurs while fetching notifications
*/
void processNotifies() throws SQLException;
/**
* Prior to attempting to retrieve notifications, we need to pull any recently received
* notifications off of the network buffers. The notification retrieval in ProtocolConnection
* cannot do this as it is prone to deadlock, so the higher level caller must be responsible which
* requires exposing this method. This variant supports blocking for the given time in millis.
*
* @param timeoutMillis number of milliseconds to block for
* @throws SQLException if and error occurs while fetching notifications
*/
void processNotifies(int timeoutMillis) throws SQLException;
//
// Fastpath interface.
//
/**
* Create a new ParameterList implementation suitable for invoking a fastpath function via
* {@link #fastpathCall}.
*
* @param count the number of parameters the fastpath call will take
* @return a ParameterList suitable for passing to {@link #fastpathCall}.
* @deprecated This API is somewhat obsolete, as one may achieve similar performance
* and greater functionality by setting up a prepared statement to define
* the function call. Then, executing the statement with binary transmission of parameters
* and results substitutes for a fast-path function call.
*/
@Deprecated
ParameterList createFastpathParameters(int count);
/**
* Invoke a backend function via the fastpath interface.
*
* @param fnid the OID of the backend function to invoke
* @param params a ParameterList returned from {@link #createFastpathParameters} containing the
* parameters to pass to the backend function
* @param suppressBegin if begin should be suppressed
* @return the binary-format result of the fastpath call, or <code>null</code> if a void result
* was returned
* @throws SQLException if an error occurs while executing the fastpath call
* @deprecated This API is somewhat obsolete, as one may achieve similar performance
* and greater functionality by setting up a prepared statement to define
* the function call. Then, executing the statement with binary transmission of parameters
* and results substitutes for a fast-path function call.
*/
@Deprecated
byte[] fastpathCall(int fnid, ParameterList params, boolean suppressBegin) throws SQLException;
/**
* Issues a COPY FROM STDIN / COPY TO STDOUT statement and returns handler for associated
* operation. Until the copy operation completes, no other database operation may be performed.
* Implemented for protocol version 3 only.
*
* @param sql input sql
* @param suppressBegin if begin should be suppressed
* @return handler for associated operation
* @throws SQLException when initializing the given query fails
*/
CopyOperation startCopy(String sql, boolean suppressBegin) throws SQLException;
/**
* @return the version of the implementation
*/
int getProtocolVersion();
/**
* Sets the oids that should be received using binary encoding.
*
* @param useBinaryForOids The oids to request with binary encoding.
*/
void setBinaryReceiveOids(Set<Integer> useBinaryForOids);
/**
* Sets the oids that should be sent using binary encoding.
*
* @param useBinaryForOids The oids to send with binary encoding.
*/
void setBinarySendOids(Set<Integer> useBinaryForOids);
/**
* Returns true if server uses integer instead of double for binary date and time encodings.
*
* @return the server integer_datetime setting.
*/
boolean getIntegerDateTimes();
/**
* @return the host and port this connection is connected to.
*/
HostSpec getHostSpec();
/**
* @return the user this connection authenticated as.
*/
String getUser();
/**
* @return the database this connection is connected to.
*/
String getDatabase();
/**
* Sends a query cancellation for this connection.
*
* @throws SQLException if something goes wrong.
*/
void sendQueryCancel() throws SQLException;
/**
* Return the process ID (PID) of the backend server process handling this connection.
*
* @return process ID (PID) of the backend server process handling this connection
*/
int getBackendPID();
/**
* Abort at network level without sending the Terminate message to the backend.
*/
void abort();
/**
* Close this connection cleanly.
*/
void close();
/**
* Check if this connection is closed.
*
* @return true iff the connection is closed.
*/
boolean isClosed();
/**
* <p>Return the server version from the server_version GUC.</p>
*
* <p>Note that there's no requirement for this to be numeric or of the form x.y.z. Redshift
* development releases usually have the format x.ydevel e.g. 9.4devel; betas usually x.ybetan
* e.g. 9.4beta1. The --with-extra-version configure option may add an arbitrary string to this.</p>
*
* <p>Don't use this string for logic, only use it when displaying the server version to the user.
* Prefer getServerVersionNum() for all logic purposes.</p>
*
* @return the server version string from the server_version guc
*/
String getServerVersion();
/**
* Redshift supports different versions to accomodate
* new request for optimization for better performance.
*
* 0 means base protocol version
* 1 means extended resultset metadata protocol version
*
* @return the server protocol version.
*/
int getServerProtocolVersion();
/**
* Get the server capability for datashare database query support.
* This should enable for data sharing (either producer/consumer).
*
* @return true means datashare database query is supported otherwise false.
*/
boolean isDatashareEnabled();
/**
* Retrieve and clear the set of asynchronous notifications pending on this connection.
*
* @return an array of notifications; if there are no notifications, an empty array is returned.
* @throws SQLException if and error occurs while fetching notifications
*/
RedshiftNotification[] getNotifications() throws SQLException;
/**
* Retrieve and clear the chain of warnings accumulated on this connection.
*
* @return the first SQLWarning in the chain; subsequent warnings can be found via
* SQLWarning.getNextWarning().
*/
SQLWarning getWarnings();
/**
* <p>Get a machine-readable server version.</p>
*
* <p>This returns the value of the server_version_num GUC. If no such GUC exists, it falls back on
* attempting to parse the text server version for the major version. If there's no minor version
* (e.g. a devel or beta release) then the minor version is set to zero. If the version could not
* be parsed, zero is returned.</p>
*
* @return the server version in numeric XXYYZZ form, eg 090401, from server_version_num
*/
int getServerVersionNum();
/**
* Get the current transaction state of this connection.
*
* @return a ProtocolConnection.TRANSACTION_* constant.
*/
TransactionState getTransactionState();
/**
* Returns whether the server treats string-literals according to the SQL standard or if it uses
* traditional Redshift escaping rules. Versions up to 8.1 always treated backslashes as escape
* characters in string-literals. Since 8.2, this depends on the value of the
* {@code standard_conforming_strings} server variable.
*
* @return true if the server treats string literals according to the SQL standard
*/
boolean getStandardConformingStrings();
/**
* Returns backend timezone in java format.
* @return backend timezone in java format.
*/
TimeZone getTimeZone();
/**
* @return the current encoding in use by this connection
*/
Encoding getEncoding();
/**
* Returns application_name connection property.
* @return application_name connection property
*/
String getApplicationName();
boolean isColumnSanitiserDisabled();
EscapeSyntaxCallMode getEscapeSyntaxCallMode();
PreferQueryMode getPreferQueryMode();
AutoSave getAutoSave();
void setAutoSave(AutoSave autoSave);
boolean willHealOnRetry(SQLException e);
boolean isRaiseExceptionOnSilentRollback();
void setRaiseExceptionOnSilentRollback(boolean raiseExceptionOnSilentRollback);
/**
* By default, the connection resets statement cache in case deallocate all/discard all
* message is observed.
* This API allows to disable that feature for testing purposes.
*
* @param flushCacheOnDeallocate true if statement cache should be reset when "deallocate/discard" message observed
*/
void setFlushCacheOnDeallocate(boolean flushCacheOnDeallocate);
/**
* @return the ReplicationProtocol instance for this connection.
*/
ReplicationProtocol getReplicationProtocol();
void setNetworkTimeout(int milliseconds) throws IOException;
int getNetworkTimeout() throws IOException;
// Expose parameter status to RedshiftConnection
Map<String,String> getParameterStatuses();
String getParameterStatus(String parameterName);
/**
* Close the last active ring buffer thread.
*
* @param queueRows the blocking queue rows
* @param ringBufferThread the thread fetching rows in the blocking queue.
*/
void closeRingBufferThread(RedshiftRowsBlockingQueue<Tuple> queueRows, Thread ringBufferThread);
/**
* Check for a running ring buffer thread.
*
* @return returns true if Ring buffer thread is running, otherwise false.
*/
boolean isRingBufferThreadRunning();
void waitForRingBufferThreadToFinish(boolean calledFromConnectionClose,
boolean calledFromResultsetClose,
boolean calledFromStatementClose,
RedshiftRowsBlockingQueue<Tuple> queueRows,
Thread ringBufferThread);
/**
* Close the statement and portal when statement get closed.
*/
void closeStatementAndPortal();
/**
* Get multiple SQL supports.
*
* @return true, if supported. false otherwise.
*/
boolean isMultiSqlSupport();
}
| 8,334 |
0 | Create_ds/amazon-redshift-jdbc-driver/src/main/java/com/amazon/redshift | Create_ds/amazon-redshift-jdbc-driver/src/main/java/com/amazon/redshift/core/IdpAuthHelper.java | package com.amazon.redshift.core;
import java.util.Enumeration;
import java.util.Iterator;
import java.util.Locale;
import java.util.Properties;
import java.util.Map.Entry;
import com.amazon.redshift.AuthMech;
import com.amazon.redshift.RedshiftProperty;
import com.amazon.redshift.jdbc.RedshiftConnectionImpl;
import com.amazon.redshift.logger.LogLevel;
import com.amazon.redshift.logger.RedshiftLogger;
import com.amazon.redshift.plugin.utils.RequestUtils;
import com.amazon.redshift.util.GT;
import com.amazon.redshift.util.RedshiftException;
import com.amazon.redshift.util.RedshiftProperties;
import com.amazon.redshift.util.RedshiftState;
import com.amazonaws.ClientConfiguration;
import com.amazonaws.auth.AWSCredentials;
import com.amazonaws.auth.AWSCredentialsProvider;
import com.amazonaws.auth.AWSStaticCredentialsProvider;
import com.amazonaws.auth.BasicAWSCredentials;
import com.amazonaws.auth.BasicSessionCredentials;
import com.amazonaws.client.builder.AwsClientBuilder.EndpointConfiguration;
import com.amazonaws.services.redshift.AmazonRedshift;
import com.amazonaws.services.redshift.AmazonRedshiftClientBuilder;
import com.amazonaws.services.redshift.model.DescribeAuthenticationProfilesRequest;
import com.amazonaws.services.redshift.model.DescribeAuthenticationProfilesResult;
import com.amazonaws.util.StringUtils;
import com.amazonaws.util.json.Jackson;
import com.fasterxml.jackson.databind.JsonNode;
public class IdpAuthHelper {
// Subtype of plugin
public static final int SAML_PLUGIN = 1;
public static final int JWT_PLUGIN = 2;
public static final int IDC_PLUGIN = 3;
protected IdpAuthHelper() {
}
protected static RedshiftProperties setAuthProperties(RedshiftProperties info, RedshiftJDBCSettings settings, RedshiftLogger log)
throws RedshiftException {
try {
// Plugin requires an SSL connection to work. Make sure that m_authMech is
// set to
// SSL level VERIFY_CA or higher.
if (settings.m_authMech == null || settings.m_authMech.ordinal() < AuthMech.VERIFY_CA.ordinal()) {
settings.m_authMech = AuthMech.VERIFY_CA;
}
// Check for IAM keys and AuthProfile first
String iamAccessKey = RedshiftConnectionImpl.getOptionalConnSetting(RedshiftProperty.IAM_ACCESS_KEY_ID.getName(),
info);
String iamSecretKey = RedshiftConnectionImpl
.getOptionalConnSetting(RedshiftProperty.IAM_SECRET_ACCESS_KEY.getName(), info);
String iamSessionToken = RedshiftConnectionImpl
.getOptionalConnSetting(RedshiftProperty.IAM_SESSION_TOKEN.getName(), info);
String authProfile = RedshiftConnectionImpl.getOptionalConnSetting(RedshiftProperty.AUTH_PROFILE.getName(), info);
if (!StringUtils.isNullOrEmpty(authProfile)) {
if (!StringUtils.isNullOrEmpty(iamAccessKey)) {
RedshiftProperties authProfileProps = readAuthProfile(authProfile, iamAccessKey, iamSecretKey, iamSessionToken, log,
info);
if (authProfileProps != null) {
// Merge auth profile props with user props.
// User props overrides auth profile props
authProfileProps.putAll(info);
info = authProfileProps;
}
} else {
// Auth profile specified but IAM keys are not
RedshiftException err = new RedshiftException(
GT.tr("Dependent connection property setting for {0} is missing {1}",
RedshiftProperty.AUTH_PROFILE.getName(), RedshiftProperty.IAM_ACCESS_KEY_ID.getName()),
RedshiftState.UNEXPECTED_ERROR);
if (RedshiftLogger.isEnable())
log.log(LogLevel.ERROR, err.toString());
throw err;
}
} // AuthProfile
String userName = RedshiftConnectionImpl.getOptionalConnSetting(RedshiftProperty.UID.getName(), info);
if (userName == null)
userName = RedshiftConnectionImpl.getOptionalConnSetting(RedshiftProperty.USER.getName(), info);
String password = RedshiftConnectionImpl.getOptionalConnSetting(RedshiftProperty.PWD.getName(), info);
if (password == null)
password = RedshiftConnectionImpl.getOptionalConnSetting(RedshiftProperty.PASSWORD.getName(), info);
String iamCredentialProvider = RedshiftConnectionImpl
.getOptionalConnSetting(RedshiftProperty.CREDENTIALS_PROVIDER.getName(), info);
String iamDisableCache = RedshiftConnectionImpl
.getOptionalConnSetting(RedshiftProperty.IAM_DISABLE_CACHE.getName(), info);
if (null != userName) {
settings.m_username = userName;
}
if (null != password) {
settings.m_password = password;
}
if (null != iamCredentialProvider) {
settings.m_credentialsProvider = iamCredentialProvider;
}
settings.m_iamDisableCache = iamDisableCache == null ? false : Boolean.valueOf(iamDisableCache);
Enumeration<String> enums = (Enumeration<String>) info.propertyNames();
while (enums.hasMoreElements()) {
// The given properties are String pairs, so this should be OK.
String key = enums.nextElement();
String value = info.getProperty(key);
if (!"*".equals(value)) {
settings.m_pluginArgs.put(key, value);
}
}
}
catch (RedshiftException re) {
if (RedshiftLogger.isEnable())
log.logError(re);
throw re;
}
return info;
}
/*
* Response format like: "{ " + " \"AuthenticationProfiles\": [ " + " {" +
* " \"AuthenticationProfileName\":\"ExampleProfileName\", " +
* " \"AuthenticationProfileContent\":\"{" +
* " \\\"AllowDBUserOverride\\\": \\\"1\\\", " +
* " \\\"databaseMetadataCurrentDbOnly\\\": \\\"true\\\" " + " }\" " + " }" +
* "] " + " } ";
*/
private static RedshiftProperties readAuthProfile(String authProfile, String iamAccessKeyID, String iamSecretKey,
String iamSessionToken, RedshiftLogger log, RedshiftProperties info) throws RedshiftException {
RedshiftProperties authProfileProps = null;
AWSCredentials credentials;
String awsRegion = RedshiftConnectionImpl.getOptionalConnSetting(RedshiftProperty.AWS_REGION.getName(), info);
String endpointUrl = RedshiftConnectionImpl.getOptionalConnSetting(RedshiftProperty.ENDPOINT_URL.getName(), info);
if (!StringUtils.isNullOrEmpty(iamSessionToken)) {
credentials = new BasicSessionCredentials(iamAccessKeyID, iamSecretKey, iamSessionToken);
} else {
credentials = new BasicAWSCredentials(iamAccessKeyID, iamSecretKey);
}
AWSCredentialsProvider provider = new AWSStaticCredentialsProvider(credentials);
AmazonRedshiftClientBuilder builder = AmazonRedshiftClientBuilder.standard();
ClientConfiguration clientConfig = RequestUtils.getProxyClientConfig(log);
if (clientConfig != null) {
builder.setClientConfiguration(clientConfig);
}
if (endpointUrl != null) {
EndpointConfiguration cfg = new EndpointConfiguration(endpointUrl, awsRegion);
builder.setEndpointConfiguration(cfg);
} else if (awsRegion != null && !awsRegion.isEmpty()) {
builder.setRegion(awsRegion);
}
AmazonRedshift client = builder.withCredentials(provider).build();
DescribeAuthenticationProfilesRequest request = new DescribeAuthenticationProfilesRequest();
request.setAuthenticationProfileName(authProfile);
DescribeAuthenticationProfilesResult result = client.describeAuthenticationProfiles(request);
String profileContent = result.getAuthenticationProfiles().get(0).getAuthenticationProfileContent();
authProfileProps = new RedshiftProperties(info);
JsonNode profileJson = Jackson.jsonNodeOf(profileContent);
if (profileJson != null) {
Iterator<Entry<String, JsonNode>> elements = profileJson.fields();
while (elements.hasNext()) {
Entry<String, JsonNode> element = elements.next();
String key = element.getKey();
String val = element.getValue().asText();
authProfileProps.put(key, val);
}
} else {
// Error
RedshiftException err = new RedshiftException(GT.tr("Auth profile JSON error"), RedshiftState.UNEXPECTED_ERROR);
if (RedshiftLogger.isEnable())
log.log(LogLevel.ERROR, err.toString());
throw err;
}
return authProfileProps;
}
}
| 8,335 |
0 | Create_ds/amazon-redshift-jdbc-driver/src/main/java/com/amazon/redshift | Create_ds/amazon-redshift-jdbc-driver/src/main/java/com/amazon/redshift/core/Notification.java | /*
* Copyright (c) 2003, PostgreSQL Global Development Group
* See the LICENSE file in the project root for more information.
*/
package com.amazon.redshift.core;
import com.amazon.redshift.RedshiftNotification;
public class Notification implements RedshiftNotification {
private final String name;
private final String parameter;
private final int pid;
public Notification(String name, int pid) {
this(name, pid, "");
}
public Notification(String name, int pid, String parameter) {
this.name = name;
this.pid = pid;
this.parameter = parameter;
}
/*
* Returns name of this notification
*/
public String getName() {
return name;
}
/*
* Returns the process id of the backend process making this notification
*/
public int getPID() {
return pid;
}
public String getParameter() {
return parameter;
}
}
| 8,336 |
0 | Create_ds/amazon-redshift-jdbc-driver/src/main/java/com/amazon/redshift | Create_ds/amazon-redshift-jdbc-driver/src/main/java/com/amazon/redshift/core/RedshiftJDBCSettings.java | package com.amazon.redshift.core;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import com.amazon.redshift.AuthMech;
public class RedshiftJDBCSettings
{
/*
* Static variable(s) ==========================================================================
*/
/*
* Instance variable(s) ========================================================================
*/
/**
* The host to connect to.
*/
public String m_host;
/**
* The port to connect to.
*/
public int m_port;
/**
* The timeout. 0 indicates no timeout.
*/
// public int m_loginTimeoutMS;
/**
* The number of rows to fetch for each request.
*/
// public int m_rowsFetchedPerBlock;
/**
* The size of columns with types that have undefinded lengths.
*/
// public Integer m_unknownLength;
/**
* The Authentication Mechanism to use.
*/
public AuthMech m_authMech;
/**
* The user name.
*/
public String m_username;
/**
* The password.
*/
public String m_password;
/**
* The Kerberos realm.
*/
// public String m_krbRealm;
/**
* The Kerberos service name.
*/
// public String m_krbServiceName;
/**
* The host fully-qualified domain name.
*/
// public String m_krbHostFQDN;
/**
* The path to the SSL Keytstore file.
*/
// public String m_sslKeyStore;
/**
* The password for the SSL Keystore file.
*/
// public String m_sslKeyStorePwd;
/**
* The password for the key file.
*/
// public String m_sslPassword;
/**
* The path to the key file.
*/
// public String m_sslKey;
/**
* The path to the server certificate file.
*/
// public String m_sslCert;
/**
* The path to the CA certificate file (root.crt).
*/
// public String m_sslRootCert;
/**
* The setting for the default used schema.
* This is a DBNAME.
*/
public String m_Schema;
/**
* The delegation UID.
*/
// public String m_delegationUID;
/**
* How many rows to limit the fetch to. 0 will not limit the fetch.
*/
// public int m_nRowMode;
/**
* How many minutes of inactivity must happen prior to a keepalive being issued
* This is the "new tcp connection" style, where another connection is attempted.
* If the new connection fails, then the socket is considered dead.
*/
// public int m_newTCPConnectionKeepAliveMinutes;
/**
* The Filter Level that will be used by the client for incoming error and notice logs
*/
// public String m_filterLevel;
/**
* The current socket timeout value in Milliseconds.
*/
// public int m_socketTimeoutMS;
/**
* Indicates whether the isValid() query should be disabled.
* The default is false, meaning the query is NOT disabled.
*/
// public boolean m_disableIsValid;
/**
* Indicates whether use IAM authentication.
*/
public boolean m_iamAuth;
/**
* The IAM access key id for the IAM user or role.
*/
public String m_iamAccessKeyID;
/**
* The IAM secret key for the IAM user or role.
*/
public String m_iamSecretKey;
/**
* The IAM security token for an IAM user or role.
*/
public String m_iamSessionToken;
/**
* The AWS profile name for credentials.
*/
public String m_profile;
/**
* A external id string for AssumeRole request.
*/
public String m_externalId;
/**
* The name of the Redshift Cluster to use.
*/
public String m_clusterIdentifier;
/**
* The time in seconds until the temporary IAM credentials expire.
* Range: 900 - 3600
*/
public int m_iamDuration;
/**
* Indicates whether the user should be created if not exists.
* Default is false.
*/
public Boolean m_autocreate;
/**
* The database user name for IAM authentication.
*/
public String m_dbUser;
/**
* A list of database group names to join.
*/
public List<String> m_dbGroups;
/**
* Forces the database group names to be lower case.
*/
public Boolean m_forceLowercase;
/**
* The AWS endpoint url for Redshift.
*/
public String m_endpoint;
/**
* The AWS endpoint url for STS.
*/
public String m_stsEndpoint;
/**
* The AWS region where the cluster specified by m_clusterIdentifier is located.
*/
public String m_awsRegion;
/**
* The fully qualified class path to a class that implements AWSCredentialsProvider.
*/
public String m_credentialsProvider;
/**
* Connection specific trust store path
*/
// public String m_sslTrustStorePath;
/**
* Connection specific trust store pwd
*/
// public String m_sslTrustStorePwd;
/**
* The plugin arguments.
*/
public Map<String, String> m_pluginArgs = new HashMap<String, String>();
/**
* Indicates whether the schema pattern has a match in external schemas.
*/
// public boolean m_hasExtSchemaPatternMatch;
/**
* Name of a class to use as a SelectorProvider.
*/
// public String m_selectorProvider;
/**
* A String to pass as an argument to the selectorProvider constructor.
*/
// public String m_selectorProviderArg;
/**
* Disable IAM credentials cache.
*/
public boolean m_iamDisableCache;
/**
* Use the same IDP Groups in the Redshift.
* false means use v1 version of GetClusterCredentials
* true means use v2 version of GetClusterCredentialsV2
*/
public Boolean m_groupFederation;
/**
* Generated SAML token or user provided JWT token.
* Used in V2 API.
*/
public String m_idpToken;
/**
* Preferred role provided by user.
* Used in V2 API.
*/
public String m_preferredRole;
/**
* Role session name provided by user.
* Used in V2 API.
*/
public String m_roleSessionName;
/**
* Group filter parameter for SAML provider
*/
public String m_dbGroupsFilter;
/**
* Role provided by user.
* Used in V2 API.
*/
public String m_roleArn;
/**
* is it serverless?
*/
public boolean m_isServerless;
/**
* The acct id of the Redshift serverless.
*/
public String m_acctId;
/**
* The work group of the Redshift serverless.
*/
public String m_workGroup;
/**
* Is there a custom cluster name?
*/
public boolean m_isCname;
}
| 8,337 |
0 | Create_ds/amazon-redshift-jdbc-driver/src/main/java/com/amazon/redshift | Create_ds/amazon-redshift-jdbc-driver/src/main/java/com/amazon/redshift/core/NativeAuthPluginHelper.java | package com.amazon.redshift.core;
import com.amazon.redshift.plugin.utils.RequestUtils;
import com.amazon.redshift.util.RedshiftProperties;
import com.amazonaws.util.StringUtils;
import com.amazon.redshift.INativePlugin;
import com.amazon.redshift.IPlugin;
import com.amazon.redshift.NativeTokenHolder;
import com.amazon.redshift.RedshiftProperty;
import com.amazon.redshift.jdbc.RedshiftConnectionImpl;
import com.amazon.redshift.logger.LogLevel;
import com.amazon.redshift.logger.RedshiftLogger;
import com.amazon.redshift.util.GT;
import com.amazon.redshift.util.RedshiftException;
import com.amazon.redshift.util.RedshiftState;
import java.util.Date;
import java.util.Map;
import java.util.Properties;
public final class NativeAuthPluginHelper extends IdpAuthHelper {
private NativeAuthPluginHelper() {
}
/**
* Helper function to handle Native Auth Plugin connection properties.
* If any Plugin related
* connection property is specified, all other <b>required</b> IAM properties
* must be specified too or else it throws an error.
*
* @param info
* Redshift client settings used to authenticate if connection should
* be granted.
* @param settings
* Redshift Native Plugin settings
* @param log
* Redshift logger
*
* @return New property object with properties from auth profile and given
* input info properties, if auth profile found. Otherwise same
* property object as info return.
*
* @throws RedshiftException
* If an error occurs.
*/
public static RedshiftProperties setNativeAuthPluginProperties(RedshiftProperties info, RedshiftJDBCSettings settings, RedshiftLogger log)
throws RedshiftException {
try {
String authProfile = RedshiftConnectionImpl.getOptionalConnSetting(RedshiftProperty.AUTH_PROFILE.getName(), info);
// Common code for IAM and Native Auth
info = setAuthProperties(info, settings, log);
String idpToken = getNativeAuthPluginCredentials(settings, log, authProfile);
if (RedshiftLogger.isEnable())
log.logInfo("NativeAuthPluginHelper: Obtained idp token of length={0}", idpToken != null ? idpToken.length() : -1);
info.put(RedshiftProperty.WEB_IDENTITY_TOKEN.getName(), idpToken);
return info;
} catch (RedshiftException re) {
if (RedshiftLogger.isEnable())
log.logError(re);
throw re;
}
}
/**
* Helper function to create the appropriate IDP token.
*
* @throws RedshiftException
* If an unspecified error occurs.
*/
private static String getNativeAuthPluginCredentials(RedshiftJDBCSettings settings, RedshiftLogger log, String authProfile) throws RedshiftException {
String idpToken = null;
INativePlugin provider = null;
if (!StringUtils.isNullOrEmpty(settings.m_credentialsProvider)) {
try {
Class<? extends INativePlugin> clazz = (Class.forName(settings.m_credentialsProvider)
.asSubclass(INativePlugin.class));
provider = clazz.newInstance();
if (provider instanceof INativePlugin) {
INativePlugin plugin = ((INativePlugin) provider);
plugin.setLogger(log);
for (Map.Entry<String, String> entry : settings.m_pluginArgs.entrySet()) {
String pluginArgKey = entry.getKey();
plugin.addParameter(pluginArgKey, entry.getValue());
} // For loop
}
else {
RedshiftException err = new RedshiftException(
GT.tr("Invalid credentials provider class {0}", settings.m_credentialsProvider),
RedshiftState.UNEXPECTED_ERROR);
if (RedshiftLogger.isEnable())
log.log(LogLevel.ERROR, err.toString());
throw err;
}
} catch (InstantiationException | IllegalAccessException | ClassNotFoundException e) {
RedshiftException err = new RedshiftException(
GT.tr("Invalid credentials provider class {0}", settings.m_credentialsProvider),
RedshiftState.UNEXPECTED_ERROR, e);
if (RedshiftLogger.isEnable())
log.log(LogLevel.ERROR, err.toString());
throw err;
} catch (NumberFormatException e) {
RedshiftException err = new RedshiftException(
GT.tr("{0} : {1}", e.getMessage(), settings.m_credentialsProvider), RedshiftState.UNEXPECTED_ERROR, e);
if (RedshiftLogger.isEnable())
log.log(LogLevel.ERROR, err.toString());
throw err;
}
}
else {
RedshiftException err = new RedshiftException(
GT.tr("Required credentials provider class parameter is null or empty {0}", settings.m_credentialsProvider),
RedshiftState.UNEXPECTED_ERROR);
if (RedshiftLogger.isEnable())
log.log(LogLevel.ERROR, err.toString());
throw err;
}
if (RedshiftLogger.isEnable())
log.log(LogLevel.DEBUG, "IDP Credential Provider {0}:{1}", provider, settings.m_credentialsProvider);
if (RedshiftLogger.isEnable())
log.log(LogLevel.DEBUG, "Calling provider.getCredentials()");
settings.m_idpToken = idpToken;
// Provider will cache the credentials, it's OK to call getCredentials()
// here.
NativeTokenHolder credentials = provider.getCredentials();
if (credentials == null || RequestUtils.isCredentialExpired(credentials.getExpiration())) {
// If not found or expired
// Get IDP token
IPlugin plugin = (IPlugin) provider;
if (RedshiftLogger.isEnable())
log.log(LogLevel.DEBUG, "Calling plugin.getIdpToken()");
idpToken = plugin.getIdpToken();
settings.m_idpToken = idpToken;
}
else {
idpToken = credentials.getAccessToken();
}
return idpToken;
}
}
| 8,338 |
0 | Create_ds/amazon-redshift-jdbc-driver/src/main/java/com/amazon/redshift | Create_ds/amazon-redshift-jdbc-driver/src/main/java/com/amazon/redshift/core/FixedLengthOutputStream.java | /*
* Copyright (c) 2020, PostgreSQL Global Development Group
* See the LICENSE file in the project root for more information.
*/
package com.amazon.redshift.core;
import java.io.IOException;
import java.io.OutputStream;
/**
* A stream that refuses to write more than a maximum number of bytes.
*/
public class FixedLengthOutputStream extends OutputStream {
private final int size;
private final OutputStream target;
private int written;
public FixedLengthOutputStream(int size, OutputStream target) {
this.size = size;
this.target = target;
}
@Override
public void write(int b) throws IOException {
verifyAllowed(1);
written++;
target.write(b);
}
public void write(byte[] buf, int offset, int len) throws IOException {
if ((offset < 0) || (len < 0) || ((offset + len) > buf.length)) {
throw new IndexOutOfBoundsException();
} else if (len == 0) {
return;
}
verifyAllowed(len);
target.write(buf, offset, len);
written += len;
}
public int remaining() {
return size - written;
}
private void verifyAllowed(int wanted) throws IOException {
if (remaining() < wanted) {
throw new IOException("Attempt to write more than the specified " + size + " bytes");
}
}
}
| 8,339 |
0 | Create_ds/amazon-redshift-jdbc-driver/src/main/java/com/amazon/redshift | Create_ds/amazon-redshift-jdbc-driver/src/main/java/com/amazon/redshift/core/SocketFactoryFactory.java | /*
* Copyright (c) 2003, PostgreSQL Global Development Group
* See the LICENSE file in the project root for more information.
*/
package com.amazon.redshift.core;
import com.amazon.redshift.RedshiftProperty;
import com.amazon.redshift.jdbc.RedshiftConnectionImpl;
import com.amazon.redshift.ssl.LibPQFactory;
import com.amazon.redshift.ssl.NonValidatingFactory;
import com.amazon.redshift.util.GT;
import com.amazon.redshift.util.ObjectFactory;
import com.amazon.redshift.util.RedshiftException;
import com.amazon.redshift.util.RedshiftState;
import java.util.Properties;
import javax.net.SocketFactory;
import javax.net.ssl.SSLSocketFactory;
/**
* Instantiates {@link SocketFactory} based on the {@link RedshiftProperty#SOCKET_FACTORY}.
*/
public class SocketFactoryFactory {
/**
* Instantiates {@link SocketFactory} based on the {@link RedshiftProperty#SOCKET_FACTORY}.
*
* @param info connection properties
* @return socket factory
* @throws RedshiftException if something goes wrong
*/
public static SocketFactory getSocketFactory(Properties info) throws RedshiftException {
// Socket factory
String socketFactoryClassName = RedshiftProperty.SOCKET_FACTORY.get(info);
if (socketFactoryClassName == null) {
return SocketFactory.getDefault();
}
try {
return ObjectFactory.instantiate(SocketFactory.class, socketFactoryClassName, info, true,
RedshiftProperty.SOCKET_FACTORY_ARG.get(info));
} catch (Exception e) {
throw new RedshiftException(
GT.tr("The SocketFactory class provided {0} could not be instantiated.",
socketFactoryClassName),
RedshiftState.CONNECTION_FAILURE, e);
}
}
/**
* Instantiates {@link SSLSocketFactory} based on the {@link RedshiftProperty#SSL_FACTORY}.
*
* @param info connection properties
* @return SSL socket factory
* @throws RedshiftException if something goes wrong
*/
public static SSLSocketFactory getSslSocketFactory(Properties info) throws RedshiftException {
String classname = RedshiftProperty.SSL_FACTORY.get(info);
if (classname == null
|| "com.amazon.redshift.ssl.jdbc4.LibPQFactory".equals(classname)
|| "com.amazon.redshift.ssl.LibPQFactory".equals(classname)) {
return new LibPQFactory(info);
}
try {
if (classname.equals(RedshiftConnectionImpl.NON_VALIDATING_SSL_FACTORY))
classname = NonValidatingFactory.class.getName();
return ObjectFactory.instantiate(SSLSocketFactory.class, classname, info, true,
RedshiftProperty.SSL_FACTORY_ARG.get(info));
} catch (Exception e) {
throw new RedshiftException(
GT.tr("The SSLSocketFactory class provided {0} could not be instantiated.", classname),
RedshiftState.CONNECTION_FAILURE, e);
}
}
}
| 8,340 |
0 | Create_ds/amazon-redshift-jdbc-driver/src/main/java/com/amazon/redshift | Create_ds/amazon-redshift-jdbc-driver/src/main/java/com/amazon/redshift/core/Utils.java | /*
* Copyright (c) 2004, PostgreSQL Global Development Group
* See the LICENSE file in the project root for more information.
*/
// Copyright (c) 2004, Open Cloud Limited.
package com.amazon.redshift.core;
import com.amazon.redshift.util.GT;
import com.amazon.redshift.util.RedshiftException;
import com.amazon.redshift.util.RedshiftState;
import java.io.IOException;
import java.nio.charset.Charset;
import java.sql.SQLException;
/**
* Collection of utilities used by the protocol-level code.
*/
public class Utils {
/**
* Turn a bytearray into a printable form, representing each byte in hex.
*
* @param data the bytearray to stringize
* @return a hex-encoded printable representation of {@code data}
*/
public static String toHexString(byte[] data) {
StringBuilder sb = new StringBuilder(data.length * 2);
for (byte element : data) {
sb.append(Integer.toHexString((element >> 4) & 15));
sb.append(Integer.toHexString(element & 15));
}
return sb.toString();
}
/**
* Keep a local copy of the UTF-8 Charset so we can avoid synchronization overhead from looking up
* the Charset by name as String.getBytes(String) requires.
*/
private static final Charset utf8Charset = Charset.forName("UTF-8");
/**
* Encode a string as UTF-8.
*
* @param str the string to encode
* @return the UTF-8 representation of {@code str}
*/
public static byte[] encodeUTF8(String str) {
// See com.amazon.redshift.benchmark.encoding.UTF8Encoding#string_getBytes
// for performance measurements.
// In OracleJDK 6u65, 7u55, and 8u40 String.getBytes(Charset) is
// 3 times faster than other JDK approaches.
return str.getBytes(utf8Charset);
}
/**
* Escape the given literal {@code value} and append it to the string builder {@code sbuf}. If
* {@code sbuf} is {@code null}, a new StringBuilder will be returned. The argument
* {@code standardConformingStrings} defines whether the backend expects standard-conforming
* string literals or allows backslash escape sequences.
*
* @param sbuf the string builder to append to; or {@code null}
* @param value the string value
* @param standardConformingStrings if standard conforming strings should be used
* @param onlyQuotes only escape quote and not the backslash for database name
* @return the sbuf argument; or a new string builder for sbuf == null
* @throws SQLException if the string contains a {@code \0} character
*/
public static StringBuilder escapeLiteral(StringBuilder sbuf, String value,
boolean standardConformingStrings, boolean onlyQuotes) throws SQLException {
if (sbuf == null) {
sbuf = new StringBuilder((value.length() + 10) / 10 * 11); // Add 10% for escaping.
}
doAppendEscapedLiteral(sbuf, value, standardConformingStrings, onlyQuotes);
return sbuf;
}
public static StringBuilder escapeLiteral(StringBuilder sbuf, String value,
boolean standardConformingStrings) throws SQLException {
return escapeLiteral(sbuf, value, standardConformingStrings, false);
}
/**
* Common part for {@link #escapeLiteral(StringBuilder, String, boolean)}.
*
* @param sbuf Either StringBuffer or StringBuilder as we do not expect any IOException to be
* thrown
* @param value value to append
* @param standardConformingStrings if standard conforming strings should be used
*/
private static void doAppendEscapedLiteral(Appendable sbuf, String value,
boolean standardConformingStrings, boolean onlyQuote) throws SQLException {
try {
if (standardConformingStrings) {
// With standard_conforming_strings on, escape only single-quotes.
for (int i = 0; i < value.length(); ++i) {
char ch = value.charAt(i);
if (ch == '\0') {
throw new RedshiftException(GT.tr("Zero bytes may not occur in string parameters."),
RedshiftState.INVALID_PARAMETER_VALUE);
}
if (ch == '\'') {
sbuf.append('\'');
}
sbuf.append(ch);
}
} else {
// With standard_conforming_string off, escape backslashes and
// single-quotes, but still escape single-quotes by doubling, to
// avoid a security hazard if the reported value of
// standard_conforming_strings is incorrect, or an error if
// backslash_quote is off.
for (int i = 0; i < value.length(); ++i) {
char ch = value.charAt(i);
if (ch == '\0') {
throw new RedshiftException(GT.tr("Zero bytes may not occur in string parameters."),
RedshiftState.INVALID_PARAMETER_VALUE);
}
if(onlyQuote) {
if (ch == '\'') {
sbuf.append(ch);
}
}
else
if (ch == '\\' || ch == '\'') {
sbuf.append(ch);
}
sbuf.append(ch);
}
}
} catch (IOException e) {
throw new RedshiftException(GT.tr("No IOException expected from StringBuffer or StringBuilder"),
RedshiftState.UNEXPECTED_ERROR, e);
}
}
/**
* Escape the given identifier {@code value} and append it to the string builder {@code sbuf}.
* If {@code sbuf} is {@code null}, a new StringBuilder will be returned. This method is
* different from appendEscapedLiteral in that it includes the quoting required for the identifier
* while {@link #escapeLiteral(StringBuilder, String, boolean)} does not.
*
* @param sbuf the string builder to append to; or {@code null}
* @param value the string value
* @return the sbuf argument; or a new string builder for sbuf == null
* @throws SQLException if the string contains a {@code \0} character
*/
public static StringBuilder escapeIdentifier(StringBuilder sbuf, String value)
throws SQLException {
if (sbuf == null) {
sbuf = new StringBuilder(2 + (value.length() + 10) / 10 * 11); // Add 10% for escaping.
}
doAppendEscapedIdentifier(sbuf, value);
return sbuf;
}
/**
* Common part for appendEscapedIdentifier.
*
* @param sbuf Either StringBuffer or StringBuilder as we do not expect any IOException to be
* thrown.
* @param value value to append
*/
private static void doAppendEscapedIdentifier(Appendable sbuf, String value) throws SQLException {
try {
sbuf.append('"');
for (int i = 0; i < value.length(); ++i) {
char ch = value.charAt(i);
if (ch == '\0') {
throw new RedshiftException(GT.tr("Zero bytes may not occur in identifiers."),
RedshiftState.INVALID_PARAMETER_VALUE);
}
if (ch == '"') {
sbuf.append(ch);
}
sbuf.append(ch);
}
sbuf.append('"');
} catch (IOException e) {
throw new RedshiftException(GT.tr("No IOException expected from StringBuffer or StringBuilder"),
RedshiftState.UNEXPECTED_ERROR, e);
}
}
/**
* <p>Attempt to parse the server version string into an XXYYZZ form version number.</p>
*
* <p>Returns 0 if the version could not be parsed.</p>
*
* <p>Returns minor version 0 if the minor version could not be determined, e.g. devel or beta
* releases.</p>
*
* <p>If a single major part like 90400 is passed, it's assumed to be a pre-parsed version and
* returned verbatim. (Anything equal to or greater than 10000 is presumed to be this form).</p>
*
* <p>The yy or zz version parts may be larger than 99. A NumberFormatException is thrown if a
* version part is out of range.</p>
*
* @param serverVersion server vertion in a XXYYZZ form
* @return server version in number form
* @deprecated use specific {@link Version} instance
*/
@Deprecated
public static int parseServerVersionStr(String serverVersion) throws NumberFormatException {
return ServerVersion.parseServerVersionStr(serverVersion);
}
}
| 8,341 |
0 | Create_ds/amazon-redshift-jdbc-driver/src/main/java/com/amazon/redshift | Create_ds/amazon-redshift-jdbc-driver/src/main/java/com/amazon/redshift/core/ResultHandler.java | /*
* Copyright (c) 2004, PostgreSQL Global Development Group
* See the LICENSE file in the project root for more information.
*/
// Copyright (c) 2004, Open Cloud Limited.
package com.amazon.redshift.core;
import java.sql.SQLException;
import java.sql.SQLWarning;
import java.util.List;
import com.amazon.redshift.core.v3.MessageLoopState;
import com.amazon.redshift.core.v3.RedshiftRowsBlockingQueue;
/**
* <p>Callback interface for passing query results from the protocol-specific layer to the
* protocol-independent JDBC implementation code.</p>
*
* <p>In general, a single query execution will consist of a number of calls to handleResultRows,
* handleCommandStatus, handleWarning, and handleError, followed by a single call to
* handleCompletion when query execution is complete. If the caller wants to throw SQLException,
* this can be done in handleCompletion.</p>
*
* <p>Each executed query ends with a call to handleResultRows, handleCommandStatus, or handleError. If
* an error occurs, subsequent queries won't generate callbacks.</p>
*
* @author Oliver Jowett (oliver@opencloud.com)
*/
public interface ResultHandler {
/**
* Called when result rows are received from a query.
*
* @param fromQuery the underlying query that generated these results; this may not be very
* specific (e.g. it may be a query that includes multiple statements).
* @param fields column metadata for the resultset; might be <code>null</code> if
* Query.QUERY_NO_METADATA was specified.
* @param tuples the actual data. If this is set then queueTuples will be null.
* @param cursor a cursor to use to fetch additional data; <code>null</code> if no further results
* are present.
* @param queueTuples the actual data in a blocking queue. If this is set then tuples will be null.
* @param rowCount number of rows fetched from the socket.
* @param ringBufferThread a thread to fetch rows in the limited rows buffer.
*/
void handleResultRows(Query fromQuery, Field[] fields, List<Tuple> tuples, ResultCursor cursor, RedshiftRowsBlockingQueue<Tuple> queueTuples, int[] rowCount, Thread ringBufferThread);
/**
* Called when a query that did not return a resultset completes.
*
* @param status the command status string (e.g. "SELECT") returned by the backend
* @param updateCount the number of rows affected by an INSERT, UPDATE, DELETE, FETCH, or MOVE
* command; -1 if not available.
* @param insertOID for a single-row INSERT query, the OID of the newly inserted row; 0 if not
* available.
*/
void handleCommandStatus(String status, long updateCount, long insertOID);
/**
* Called when a warning is emitted.
*
* @param warning the warning that occurred.
*/
void handleWarning(SQLWarning warning);
/**
* Called when an error occurs. Subsequent queries are abandoned; in general the only calls
* between a handleError call and a subsequent handleCompletion call are handleError or
* handleWarning.
*
* @param error the error that occurred
*/
void handleError(SQLException error);
/**
* Called before a QueryExecutor method returns. This method may throw a SQLException if desired;
* if it does, the QueryExecutor method will propagate that exception to the original caller.
*
* @throws SQLException if the handler wishes the original method to throw an exception.
*/
void handleCompletion() throws SQLException;
/**
* Callback for batch statements. In case batch statement is executed in autocommit==true mode,
* the executor might commit "as it this it is best", so the result handler should track which
* statements are executed successfully and which are not.
*/
void secureProgress();
/**
* Returns the first encountered exception. The rest are chained via {@link SQLException#setNextException(SQLException)}
* @return the first encountered exception
*/
SQLException getException();
/**
* Returns the first encountered warning. The rest are chained via {@link SQLException#setNextException(SQLException)}
* @return the first encountered warning
*/
SQLWarning getWarning();
/**
* Set statement state on completion of the Ring buffer thread.
*/
void setStatementStateIdleFromInQuery();
/**
* Set statement state on start from IDLE to IN_QUERY,
* in case Ring Buffer thread reset the new query state as IDLE.
*/
void setStatementStateInQueryFromIdle();
/** Check the statement type
*
* @return true if scrollable, false for forward only.
*/
boolean wantsScrollableResultSet();
}
| 8,342 |
0 | Create_ds/amazon-redshift-jdbc-driver/src/main/java/com/amazon/redshift | Create_ds/amazon-redshift-jdbc-driver/src/main/java/com/amazon/redshift/core/NativeQuery.java | /*
* Copyright (c) 2003, PostgreSQL Global Development Group
* See the LICENSE file in the project root for more information.
*/
// Copyright (c) 2004, Open Cloud Limited.
package com.amazon.redshift.core;
/**
* Represents a query that is ready for execution by backend. The main difference from JDBC is ? are
* replaced with $1, $2, etc.
*/
public class NativeQuery {
private static final String[] BIND_NAMES = new String[128 * 10];
private static final int[] NO_BINDS = new int[0];
public final String nativeSql;
public final int[] bindPositions;
public final SqlCommand command;
public final boolean multiStatement;
public final int[] redshiftParamMarkers;
static {
for (int i = 1; i < BIND_NAMES.length; i++) {
BIND_NAMES[i] = "$" + i;
}
}
public NativeQuery(String nativeSql, SqlCommand dml) {
this(nativeSql, NO_BINDS, true, dml);
}
public NativeQuery(String nativeSql, int[] bindPositions, boolean multiStatement, SqlCommand dml) {
this(nativeSql, bindPositions, multiStatement, dml, null);
}
public NativeQuery(String nativeSql, int[] bindPositions, boolean multiStatement, SqlCommand dml,
int[] redshiftParamMarkers) {
this.nativeSql = nativeSql;
this.bindPositions =
bindPositions == null || bindPositions.length == 0 ? NO_BINDS : bindPositions;
this.multiStatement = multiStatement;
this.command = dml;
this.redshiftParamMarkers = redshiftParamMarkers;
}
/**
* Stringize this query to a human-readable form, substituting particular parameter values for
* parameter placeholders.
*
* @param parameters a ParameterList returned by this Query's {@link Query#createParameterList}
* method, or {@code null} to leave the parameter placeholders unsubstituted.
* @return a human-readable representation of this query
*/
public String toString(ParameterList parameters) {
if (bindPositions.length == 0) {
return nativeSql;
}
int queryLength = nativeSql.length();
String[] params = new String[bindPositions.length];
for (int i = 1; i <= bindPositions.length; ++i) {
String param = parameters == null ? "?" : parameters.toString(i, true);
params[i - 1] = param;
queryLength += param.length() - bindName(i).length();
}
StringBuilder sbuf = new StringBuilder(queryLength);
sbuf.append(nativeSql, 0, bindPositions[0]);
for (int i = 1; i <= bindPositions.length; ++i) {
sbuf.append(params[i - 1]);
int nextBind = i < bindPositions.length ? bindPositions[i] : nativeSql.length();
sbuf.append(nativeSql, bindPositions[i - 1] + bindName(i).length(), nextBind);
}
return sbuf.toString();
}
/**
* Returns $1, $2, etc names of bind variables used by backend.
*
* @param index index of a bind variable
* @return bind variable name
*/
public static String bindName(int index) {
return index < BIND_NAMES.length ? BIND_NAMES[index] : "$" + index;
}
public static StringBuilder appendBindName(StringBuilder sb, int index) {
if (index < BIND_NAMES.length) {
return sb.append(bindName(index));
}
sb.append('$');
sb.append(index);
return sb;
}
/**
* Calculate the text length required for the given number of bind variables
* including dollars.
* Do this to avoid repeated calls to
* AbstractStringBuilder.expandCapacity(...) and Arrays.copyOf
*
* @param bindCount total number of parameters in a query
* @return int total character length for $xyz kind of binds
*/
public static int calculateBindLength(int bindCount) {
int res = 0;
int bindLen = 2; // $1
int maxBindsOfLen = 9; // $0 .. $9
while (bindCount > 0) {
int numBinds = Math.min(maxBindsOfLen, bindCount);
bindCount -= numBinds;
res += bindLen * numBinds;
bindLen++;
maxBindsOfLen *= 10; // $0..$9 (9 items) -> $10..$99 (90 items)
}
return res;
}
public SqlCommand getCommand() {
return command;
}
}
| 8,343 |
0 | Create_ds/amazon-redshift-jdbc-driver/src/main/java/com/amazon/redshift | Create_ds/amazon-redshift-jdbc-driver/src/main/java/com/amazon/redshift/core/CachedQuery.java | /*
* Copyright (c) 2015, PostgreSQL Global Development Group
* See the LICENSE file in the project root for more information.
*/
package com.amazon.redshift.core;
import com.amazon.redshift.util.CanEstimateSize;
/**
* Stores information on the parsed JDBC query. It is used to cut parsing overhead when executing
* the same query through {@link java.sql.Connection#prepareStatement(String)}.
*/
public class CachedQuery implements CanEstimateSize {
/**
* Cache key. {@link String} or {@code com.amazon.redshift.util.CanEstimateSize}.
*/
public final Object key;
public final Query query;
public final boolean isFunction;
private int executeCount;
public CachedQuery(Object key, Query query, boolean isFunction) {
assert key instanceof String || key instanceof CanEstimateSize
: "CachedQuery.key should either be String or implement CanEstimateSize."
+ " Actual class is " + key.getClass();
this.key = key;
this.query = query;
this.isFunction = isFunction;
}
public void increaseExecuteCount() {
if (executeCount < Integer.MAX_VALUE) {
executeCount++;
}
}
public void increaseExecuteCount(int inc) {
int newValue = executeCount + inc;
if (newValue > 0) { // if overflows, just ignore the update
executeCount = newValue;
}
}
/**
* Number of times this statement has been used.
*
* @return number of times this statement has been used
*/
public int getExecuteCount() {
return executeCount;
}
@Override
public long getSize() {
long queryLength;
if (key instanceof String) {
queryLength = ((String) key).length() * 2L; // 2 bytes per char, revise with Java 9's compact strings
} else {
queryLength = ((CanEstimateSize) key).getSize();
}
return queryLength * 2 /* original query and native sql */
+ 100L /* entry in hash map, CachedQuery wrapper, etc */;
}
@Override
public String toString() {
return "CachedQuery{"
+ "executeCount=" + executeCount
+ ", query=" + query
+ ", isFunction=" + isFunction
+ '}';
}
}
| 8,344 |
0 | Create_ds/amazon-redshift-jdbc-driver/src/main/java/com/amazon/redshift | Create_ds/amazon-redshift-jdbc-driver/src/main/java/com/amazon/redshift/core/PluginProfilesConfigFile.java | package com.amazon.redshift.core;
import java.util.Date;
import java.util.Locale;
import java.util.Map;
import java.util.Set;
import java.util.concurrent.ConcurrentHashMap;
import com.amazonaws.SdkClientException;
import com.amazonaws.auth.AWSCredentials;
import com.amazonaws.auth.AWSCredentialsProvider;
import com.amazonaws.auth.AWSSessionCredentials;
import com.amazonaws.auth.AWSStaticCredentialsProvider;
import com.amazonaws.auth.BasicSessionCredentials;
import com.amazonaws.auth.ProcessCredentialsProvider;
import com.amazonaws.auth.profile.ProfilesConfigFile;
import com.amazonaws.auth.profile.internal.BasicProfile;
import com.amazonaws.auth.profile.internal.ProfileStaticCredentialsProvider;
import com.amazonaws.services.securitytoken.AWSSecurityTokenService;
import com.amazonaws.services.securitytoken.AWSSecurityTokenServiceClientBuilder;
import com.amazonaws.services.securitytoken.model.AssumeRoleRequest;
import com.amazonaws.services.securitytoken.model.AssumeRoleResult;
import com.amazonaws.services.securitytoken.model.Credentials;
import com.amazonaws.util.StringUtils;
import com.amazon.redshift.CredentialsHolder;
import com.amazon.redshift.IPlugin;
import com.amazon.redshift.RedshiftProperty;
import com.amazon.redshift.logger.RedshiftLogger;
import com.amazon.redshift.plugin.utils.RequestUtils;
public class PluginProfilesConfigFile extends ProfilesConfigFile
{
private Map<String, CredentialsHolder> cache = new ConcurrentHashMap<String, CredentialsHolder>();
private RedshiftJDBCSettings m_settings;
private RedshiftLogger m_log;
private static final String PROFILE_PREFIX = "profile ";
public PluginProfilesConfigFile(RedshiftJDBCSettings settings, RedshiftLogger log)
{
m_settings = settings;
m_log = log;
}
/**
* Returns the AWS credentials for the specified profile.
*/
public CredentialsHolder getCredentials(String profileName)
{
CredentialsHolder credentials = cache.get(profileName);
if (credentials == null)
{
// in case if profile is in ~/.aws/config file, check for 'profile ' prefix
credentials = cache.get(PROFILE_PREFIX+profileName);
}
if (credentials != null && !credentials.isExpired())
{
if(RedshiftLogger.isEnable()) {
Date now = new Date();
m_log.logInfo(now + ": Using existing entry for PluginProfilesConfigFile.getCredentials cache with expiration " + credentials.getExpiration());
}
return credentials;
}
Map<String, BasicProfile> map = getAllBasicProfiles();
if(RedshiftLogger.isEnable()) {
Set<String> profiles = map.keySet();
m_log.logInfo("profiles:" + profiles.toString());
}
BasicProfile profile = map.get(profileName);
if (profile == null)
{
// in case if profile is in ~/.aws/config file, check for 'profile ' prefix
profile = map.get(PROFILE_PREFIX+profileName);
}
if (profile == null)
{
throw new SdkClientException("No AWS profile named '" + profileName + "'");
}
if (profile.isRoleBasedProfile())
{
String srcProfile = profile.getRoleSourceProfile();
if (StringUtils.isNullOrEmpty(srcProfile))
{
throw new SdkClientException("Unable to load credentials from role based profile [" + profileName + "]: Source profile name is not specified");
}
CredentialsHolder srcCred = getCredentials(srcProfile);
AWSCredentialsProvider provider = new AWSStaticCredentialsProvider(srcCred);
credentials = assumeRole(profile, provider);
credentials.setMetadata(srcCred.getMetadata());
cache.put(profileName, credentials);
if(RedshiftLogger.isEnable()) {
Date now = new Date();
m_log.logInfo(now + ": Adding new role based entry for PluginProfilesConfigFile.getCredentials cache with expiration " + credentials.getExpiration());
}
return credentials;
}
String dbUser = null;
String autoCreate = null;
String dbGroups = null;
String forceLowercase = null;
String pluginName = profile.getPropertyValue("plugin_name");
if (!StringUtils.isNullOrEmpty(pluginName))
{
try
{
Class<? extends AWSCredentialsProvider> clazz =
(Class.forName(pluginName).asSubclass(AWSCredentialsProvider.class));
AWSCredentialsProvider p = clazz.newInstance();
if (p instanceof IPlugin)
{
IPlugin plugin = (IPlugin)p;
plugin.setLogger(m_log);
Map<String, String> prop = profile.getProperties();
for (Map.Entry<String, String> entry : prop.entrySet())
{
String key = entry.getKey().toLowerCase(Locale.getDefault());
if (!"plugin_name".equals(key))
{
String value = entry.getValue();
plugin.addParameter(key, value);
// DbUser value in connection string
if (RedshiftProperty.DB_USER.getName().equalsIgnoreCase(key))
{
dbUser = value;
}
else if (RedshiftProperty.DB_GROUPS.getName().equalsIgnoreCase(key))
{
dbGroups = value;
}
else if (RedshiftProperty.FORCE_LOWERCASE.getName().equalsIgnoreCase(key))
{
forceLowercase = value;
}
else if (RedshiftProperty.USER_AUTOCREATE.getName().equalsIgnoreCase(key))
{
autoCreate = value;
}
}
}
// Add parameters from URL to plugin, override parameters from profile
for (Map.Entry<String, String> entry : m_settings.m_pluginArgs.entrySet())
{
String key = entry.getKey().toLowerCase(Locale.getDefault());
if (!"plugin_name".equals(key))
{
plugin.addParameter(entry.getKey(), entry.getValue());
}
}
}
credentials = CredentialsHolder.newInstance(p.getCredentials());
}
catch (InstantiationException e)
{
throw new SdkClientException("Invalid plugin: '" + pluginName + "'");
}
catch (IllegalAccessException e)
{
throw new SdkClientException("Invalid plugin: '" + pluginName + "'");
}
catch (ClassNotFoundException e)
{
throw new SdkClientException("Invalid plugin: '" + pluginName + "'");
}
}
else if (profile.isProcessBasedProfile())
{
ProcessCredentialsProvider provider = ProcessCredentialsProvider.builder()
.withCommand(profile.getCredentialProcess())
.build();
AWSCredentials c = provider.getCredentials();
Date expirationTime = provider.getCredentialExpirationTime().toDate();
credentials = CredentialsHolder.newInstance(c, expirationTime);
}
else
{
AWSCredentials c = new ProfileStaticCredentialsProvider(profile).getCredentials();
credentials = CredentialsHolder.newInstance(c);
}
// override DbUser, AutoCreate , DbGroups, and ForceLowercase if null and defined in profile
CredentialsHolder.IamMetadata metadata = credentials.getMetadata();
if (null == metadata)
{
metadata = new CredentialsHolder.IamMetadata();
}
if (null != dbUser)
{
metadata.setProfileDbUser(dbUser);
}
if (null != autoCreate)
{
metadata.setAutoCreate(Boolean.valueOf(autoCreate));
}
if (null != dbGroups)
{
metadata.setDbGroups(dbGroups);
}
if (null != forceLowercase)
{
metadata.setForceLowercase(Boolean.valueOf(forceLowercase));
}
credentials.setMetadata(metadata);
cache.put(profileName, credentials);
if(RedshiftLogger.isEnable()) {
Date now = new Date();
m_log.logInfo(now + ": Using entry for PluginProfilesConfigFile.getCredentials cache with expiration " + credentials.getExpiration());
}
return credentials;
}
private CredentialsHolder assumeRole(BasicProfile profile, AWSCredentialsProvider provider)
{
AWSSecurityTokenServiceClientBuilder builder = AWSSecurityTokenServiceClientBuilder.standard();
AWSSecurityTokenService stsSvc;
try {
stsSvc = RequestUtils.buildSts(m_settings.m_stsEndpoint, m_settings.m_awsRegion, builder, provider,m_log);
} catch (Exception e) {
throw new SdkClientException("Profile Plugin error: " + e.getMessage(), e);
}
String roleArn = profile.getRoleArn();
String roleSessionName = profile.getRoleSessionName();
if (StringUtils.isNullOrEmpty(roleSessionName))
{
roleSessionName = "redshift-jdbc-" + System.currentTimeMillis();
}
String externalId = profile.getRoleExternalId();
AssumeRoleRequest assumeRoleRequest =
new AssumeRoleRequest().withRoleArn(roleArn).withRoleSessionName(roleSessionName);
if (!StringUtils.isNullOrEmpty(externalId))
{
assumeRoleRequest = assumeRoleRequest.withExternalId(externalId);
}
AssumeRoleResult result = stsSvc.assumeRole(assumeRoleRequest);
Credentials cred = result.getCredentials();
Date expiration = cred.getExpiration();
AWSSessionCredentials c = new BasicSessionCredentials(
cred.getAccessKeyId(),
cred.getSecretAccessKey(),
cred.getSessionToken());
return CredentialsHolder.newInstance(c, expiration);
}
}
| 8,345 |
0 | Create_ds/amazon-redshift-jdbc-driver/src/main/java/com/amazon/redshift | Create_ds/amazon-redshift-jdbc-driver/src/main/java/com/amazon/redshift/core/CompressedInputStream.java | package com.amazon.redshift.core;
import com.amazon.redshift.logger.LogLevel;
import com.amazon.redshift.logger.RedshiftLogger;
import com.amazon.redshift.Driver;
import java.lang.Math;
import java.io.IOException;
import java.io.InputStream;
import java.util.Arrays;
public class CompressedInputStream extends InputStream {
private final InputStream wrapped;
private static final int LZ4_MAX_MESSAGE_SIZE = 16 * 1024;
private static final int LZ4_RING_BUFFER_SIZE = 64 * 1024;
// 80KB as per buffer size on server
private static final int BUFFER_SIZE = 80 * 1024;
private byte[] buffer;
private final RedshiftLogger logger;
private byte[] decompress_buffer;
private int decompress_buffer_offset;
private int bytes_before_next_message = 0;
private int next_byte = 0;
private int next_empty_byte = 0;
private long compressedBytesReadFromStream = 0;
public CompressedInputStream(InputStream in, RedshiftLogger in_logger) {
wrapped = in;
logger = in_logger;
decompress_buffer = null;
decompress_buffer_offset = 0;
buffer = new byte[BUFFER_SIZE];
}
public int read() throws IOException {
int readResult;
do {
readResult = tryReadMessage();
if (readResult < 0)
return readResult;
} while (readResult == 1);
bytes_before_next_message--;
return buffer[next_byte++];
}
public long getBytesReadFromStream()
{
return compressedBytesReadFromStream;
}
static final int MIN_MATCH = 4; // minimum length of a match
/**
* Implementation of lz4 decompression. Curently I could not make any library to do stream decompression
* as is required by LZ4_decompress_safe_continue().
*/
public static int lz4_decompress(byte[] compressed, int position, int compressedLen, byte[] dest, int dOff, RedshiftLogger logger) throws IOException {
final int destEnd = dest.length;
int startOff = dOff;
compressedLen += position;
do
{
// literals
final int token = compressed[position++] & 0xFF;
int literalLen = token >>> 4;
if (literalLen != 0)
{
if (literalLen == 0x0F)
{
byte len;
while ((len = compressed[position++]) == (byte) 0xFF)
{
literalLen += 0xFF;
}
literalLen += len & 0xFF;
}
for (int i = 0; i < literalLen; i++)
dest[dOff + i] = compressed[position++];
dOff += literalLen;
}
if (position >= compressedLen)
{
break;
}
// matches
int a = compressed[position++] & 0xFF;
int b = compressed[position++] & 0xFF;
final int matchDec = (a) | (b << 8);
assert matchDec > 0;
int matchLen = token & 0x0F;
if (matchLen == 0x0F)
{
int len;
while ((len = compressed[position++]) == (byte) 0xFF)
{
matchLen += 0xFF;
}
matchLen += len & 0xFF;
}
matchLen += MIN_MATCH;
// copying a multiple of 8 bytes can make decompression from 5% to 10% faster
final int fastLen = (matchLen + 7) & 0xFFFFFFF8;
if (matchDec < matchLen || dOff + fastLen > destEnd)
{
// overlap -> naive incremental copy
for (int ref = dOff - matchDec, end = dOff + matchLen; dOff < end; ++ref, ++dOff)
{
dest[dOff] = dest[ref];
}
// Note(xformmm): here we should use memcpy instead of byte loop as we do in
// https://github.com/postgres/postgres/commit/c60e520f
}
else
{
// no overlap -> arraycopy
try
{
System.arraycopy(dest, dOff - matchDec, dest, dOff, fastLen);
}
catch (Exception e)
{
if(RedshiftLogger.isEnable())
{
logger.logInfo("matchDec : " + matchDec);
logger.logInfo("matchLen : " + matchLen);
Integer initialSourcePosition = dOff - matchDec;
Integer initialDestinationPosition = dOff;
Integer length = fastLen;
Integer lastSourcePosition = initialSourcePosition + length - 1;
Integer lastDestinationPosition = initialDestinationPosition + length - 1;
logger.logInfo("initialSourcePosition : " + initialSourcePosition);
logger.logInfo("initialDestinationPosition : " + initialDestinationPosition);
logger.logInfo("length : " + length);
logger.logInfo("lastSourcePosition : " + lastSourcePosition);
logger.logInfo("lastDestinationPosition : " + lastDestinationPosition);
logger.logInfo("buffer length : " + dest.length);
}
throw e;
}
dOff += matchLen;
}
} while (position < compressedLen);
return dOff - startOff;
}
/**
* Ensures that we have at least one byte and checks if it is compressed message
* returns 1 if caller have to repeat ()
*/
private int tryReadMessage() throws IOException
{
if (bytes_before_next_message == 0)
{
if (!readFromNetwork(5))
{
if(RedshiftLogger.isEnable())
{
logger.logInfo("Not yet ready to read from network");
}
return -1;
}
byte msg_type = buffer[next_byte];
next_byte++; // Consume message type from stream
int msgSize = ntoh32();
if (msg_type == 'k' || msg_type == 'z')
{
if (RedshiftLogger.isEnable())
{
if (msg_type == 'z')
logger.log(LogLevel.DEBUG, "Compression-aware server, Compression acknowledged");
else if (msg_type == 'k')
logger.log(LogLevel.DEBUG, "Set Compression method");
}
/*
* SetCompressionMessageType or CompressionAckMessage
* We must restart decompression codec and discard rest of the message.
*/
if (!readFromNetwork(msgSize))
{
if(RedshiftLogger.isEnable())
{
logger.logInfo("Not yet ready to read from network");
}
return -1;
}
next_byte += msgSize;
if (decompress_buffer == null)
decompress_buffer = new byte[LZ4_MAX_MESSAGE_SIZE + 2 * LZ4_RING_BUFFER_SIZE];
decompress_buffer_offset = 0;
/* We still have bytes_before_next_message == 0 - next packet is coming */
return 1;
}
else if (msg_type == 'm')
{
/*
* CompressedData
* Decompress everything and add data to buffer
*/
next_byte--; // return pointer to the beginning of message
msgSize++; // account message type byte with message
if (!readFromNetwork(msgSize))
{
if(RedshiftLogger.isEnable())
{
logger.logInfo("Not yet ready to read from network");
}
return -1;
}
ensureCapacity(LZ4_MAX_MESSAGE_SIZE);
int decompressSize = lz4_decompress(buffer, next_byte + 5, msgSize - 5,
decompress_buffer, decompress_buffer_offset, logger);
if (decompressSize < 0)
{
if (RedshiftLogger.isEnable())
{
logger.logError("Decompressed message has a negative size");
}
return decompressSize; // Error happened
}
/* Shift data after current compressed message */
try
{
if (decompressSize + next_empty_byte - msgSize > buffer.length)
{
// Reallocate buffer size to avoid overflowing. This is a fallback to prevent errors.
Integer bufferSizeMultiplier = ((decompressSize + next_empty_byte - msgSize) / buffer.length) + 1;
buffer = Arrays.copyOf(buffer, buffer.length * bufferSizeMultiplier);
}
System.arraycopy(buffer, next_byte + msgSize, buffer,
next_byte + decompressSize, next_empty_byte - next_byte - msgSize);
}
catch (Exception e)
{
if (RedshiftLogger.isEnable())
{
Integer bufferLength = buffer.length;
Integer initialSourcePosition = next_byte + msgSize;
Integer initialDestinationPosition = next_byte + decompressSize;
Integer length = next_empty_byte - next_byte - msgSize + 1;
Integer lastSourcePosition = initialSourcePosition + length - 1;
Integer lastDestinationPosition = initialDestinationPosition + length - 1;
logger.logDebug("next_byte : " + next_byte);
logger.logDebug("msgSize : " + msgSize);
logger.logDebug("decompressSize : " + decompressSize);
logger.logDebug("next_empty_byte : " + next_empty_byte);
logger.logDebug("initialSourcePosition : " + initialSourcePosition);
logger.logDebug("initialDestinationPosition : " + initialDestinationPosition);
logger.logDebug("length : " + length);
logger.logDebug("lastSourcePosition : " + lastSourcePosition);
logger.logDebug("lastDestinationPosition : " + lastDestinationPosition);
logger.logDebug("buffer length : " + bufferLength);
}
throw e;
}
byte[] decompressedData = new byte[decompressSize];
for (int i = 0; i < decompressSize; i++)
{
decompressedData[i] = decompress_buffer[decompress_buffer_offset];
}
/* Fit decompressed data in */
System.arraycopy(decompress_buffer, decompress_buffer_offset, buffer, next_byte, decompressSize);
/* Adjust all counters */
next_empty_byte = next_empty_byte - msgSize + decompressSize;
decompress_buffer_offset += decompressSize;
bytes_before_next_message = decompressSize;
/* shift decompression buffer if necessary */
if (decompress_buffer_offset >= 2 * LZ4_RING_BUFFER_SIZE)
{
System.arraycopy(decompress_buffer, LZ4_RING_BUFFER_SIZE, decompress_buffer, 0,
LZ4_RING_BUFFER_SIZE + LZ4_MAX_MESSAGE_SIZE);
decompress_buffer_offset -= LZ4_RING_BUFFER_SIZE;
}
return 0;
}
else
{
next_byte--; // Return message type byte to the stream
bytes_before_next_message += msgSize + 1; // Scroll through next message
}
}
/* Ensure at least one byte is ready for the client */
if (!readFromNetwork(1))
{
if (RedshiftLogger.isEnable())
{
logger.logInfo("Not yet ready to read from network");
}
return -1;
}
return 0;
}
/**
* Read 32-bit integer in network format.
* This function assumes 4 bytes in buffer.
*/
private int ntoh32() {
return ((buffer[next_byte] & 0xFF) << 24) + ((buffer[next_byte + 1] & 0xFF) << 16)
+ ((buffer[next_byte + 2] & 0xFF) << 8) + (buffer[next_byte + 3] & 0xFF);
}
/* Ensures at least min bytes fetched from network */
private boolean readFromNetwork(int min) throws IOException {
while (next_empty_byte - next_byte < min) {
/* Make some room if we are out of empty space */
ensureCapacity(min);
int read = wrapped.read(buffer, next_empty_byte, buffer.length - next_empty_byte);
if(read > 0)
{
compressedBytesReadFromStream += read;
}
if (read < 0)
return false;
next_empty_byte += read;
}
return true;
}
/* Prevents buffer overflow when reading on the edge of buffer */
private void ensureCapacity(int min) {
if (next_empty_byte + min >= buffer.length) {
next_empty_byte = next_empty_byte - next_byte;
for (int i = 0; i < next_empty_byte; i++) {
buffer[i] = buffer[i + next_byte];
}
next_byte = 0;
}
}
public void close() throws IOException {
wrapped.close();
}
@Override
public int available() throws IOException {
return Math.min(next_empty_byte - next_byte, bytes_before_next_message);
}
@Override
public long skip(long n) throws IOException {
int readResult;
do {
readResult = tryReadMessage();
if (readResult < 0)
return readResult;
} while (readResult == 1);
long available = Math.min(available(), n);
next_byte += available;
bytes_before_next_message -= available;
return available;
}
@Override
public int read(byte[] b, int off, int len) throws IOException {
int readResult;
do {
readResult = tryReadMessage();
if (readResult < 0)
return readResult;
} while (readResult == 1);
int available = Math.min(available(), len);
System.arraycopy(buffer, next_byte, b, off, available);
next_byte += available;
bytes_before_next_message -= available;
return available;
}
}
| 8,346 |
0 | Create_ds/amazon-redshift-jdbc-driver/src/main/java/com/amazon/redshift | Create_ds/amazon-redshift-jdbc-driver/src/main/java/com/amazon/redshift/core/ServerlessIamHelper.java | package com.amazon.redshift.core;
import java.util.Date;
import java.util.HashMap;
import java.util.Map;
import com.amazon.redshift.core.IamHelper.CredentialProviderType;
import com.amazon.redshift.logger.RedshiftLogger;
import com.amazon.redshift.plugin.utils.RequestUtils;
import com.amazonaws.AmazonClientException;
import com.amazonaws.auth.AWSCredentialsProvider;
import com.amazonaws.services.redshiftserverless.AWSRedshiftServerlessClient;
import com.amazonaws.services.redshiftserverless.AWSRedshiftServerlessClientBuilder;
import com.amazonaws.services.redshiftserverless.model.GetWorkgroupRequest;
import com.amazonaws.services.redshiftserverless.model.GetWorkgroupResult;
import com.amazonaws.services.redshiftserverless.model.Endpoint;
import com.amazonaws.services.redshiftserverless.model.GetCredentialsRequest;
import com.amazonaws.services.redshiftserverless.model.GetCredentialsResult;
// In Serverless there is no V2 API.
// If user specify group_federation with serverless,
// it will call Provision V2 API.
public final class ServerlessIamHelper {
private RedshiftLogger log;
private AWSRedshiftServerlessClient client;
private static Map<String, GetCredentialsResult> credentialsCache = new HashMap<String, GetCredentialsResult>();
ServerlessIamHelper(RedshiftJDBCSettings settings,
RedshiftLogger log,
AWSCredentialsProvider credProvider) {
this.log = log;
AWSRedshiftServerlessClientBuilder builder = AWSRedshiftServerlessClientBuilder.standard();
builder = (AWSRedshiftServerlessClientBuilder) IamHelper.setBuilderConfiguration(settings, log, builder);
client = (AWSRedshiftServerlessClient) builder.withCredentials(credProvider).build();
}
synchronized void describeConfiguration(RedshiftJDBCSettings settings) {
com.amazonaws.services.redshiftserverless.model.GetWorkgroupRequest req = new GetWorkgroupRequest();
if(settings.m_workGroup != null && settings.m_workGroup.length() > 0) {
// Set workgroup in the request
req.setWorkgroupName(settings.m_workGroup);
}
else
{
throw new AmazonClientException("Serverless workgroup is not set.");
}
com.amazonaws.services.redshiftserverless.model.GetWorkgroupResult resp = client.getWorkgroup(req);
Endpoint endpoint = resp.getWorkgroup().getEndpoint();
if (null == endpoint)
{
throw new AmazonClientException("Serverless endpoint is not available yet.");
}
settings.m_host = endpoint.getAddress();
settings.m_port = endpoint.getPort();
}
synchronized void getCredentialsResult(RedshiftJDBCSettings settings,
CredentialProviderType providerType,
boolean idpCredentialsRefresh
) throws AmazonClientException {
String key = null;
GetCredentialsResult credentials = null;
if(!settings.m_iamDisableCache) {
key = IamHelper.getCredentialsCacheKey(settings, providerType, true);
credentials = credentialsCache.get(key);
}
if (credentials == null
|| (providerType == CredentialProviderType.PLUGIN
&& idpCredentialsRefresh)
|| RequestUtils.isCredentialExpired(credentials.getExpiration()))
{
if (RedshiftLogger.isEnable())
log.logInfo("GetCredentials NOT from cache");
if(!settings.m_iamDisableCache)
credentialsCache.remove(key);
GetCredentialsRequest request = new GetCredentialsRequest();
if (settings.m_iamDuration > 0)
{
request.setDurationSeconds(settings.m_iamDuration);
}
request.setDbName(settings.m_Schema);
if(settings.m_workGroup != null && settings.m_workGroup.length() > 0) {
// Set workgroup in the request
request.setWorkgroupName(settings.m_workGroup);
}
else
{
if(settings.m_isCname)
{
request.setCustomDomainName(settings.m_host);
}
}
if (RedshiftLogger.isEnable()) {
log.logInfo(request.toString());
}
for (int i = 0; i < IamHelper.MAX_AMAZONCLIENT_RETRY; ++i)
{
try
{
credentials = client.getCredentials(request);
break;
}
catch (AmazonClientException ace)
{
IamHelper.checkForApiCallRateExceedError(ace, i, "getCredentialsResult", log);
}
}
if(!settings.m_iamDisableCache)
credentialsCache.put(key, credentials);
}
else {
if (RedshiftLogger.isEnable())
log.logInfo("GetCredentials from cache");
}
settings.m_username = credentials.getDbUser();
settings.m_password = credentials.getDbPassword();
if(RedshiftLogger.isEnable()) {
Date now = new Date();
log.logInfo(now + ": Using GetCredentialsResult with expiration " + credentials.getExpiration());
log.logInfo(now + ": Using GetCredentialsResultV2 with TimeToRefresh " + credentials.getNextRefreshTime());
}
}
}
| 8,347 |
0 | Create_ds/amazon-redshift-jdbc-driver/src/main/java/com/amazon/redshift | Create_ds/amazon-redshift-jdbc-driver/src/main/java/com/amazon/redshift/core/CachedQueryCreateAction.java | /*
* Copyright (c) 2015, PostgreSQL Global Development Group
* See the LICENSE file in the project root for more information.
*/
package com.amazon.redshift.core;
import com.amazon.redshift.jdbc.PreferQueryMode;
import com.amazon.redshift.util.LruCache;
import java.sql.SQLException;
import java.util.List;
/**
* Creates an instance of {@link CachedQuery} for a given connection.
*/
class CachedQueryCreateAction implements LruCache.CreateAction<Object, CachedQuery> {
private static final String[] EMPTY_RETURNING = new String[0];
private final QueryExecutor queryExecutor;
CachedQueryCreateAction(QueryExecutor queryExecutor) {
this.queryExecutor = queryExecutor;
}
@Override
public CachedQuery create(Object key) throws SQLException {
assert key instanceof String || key instanceof BaseQueryKey
: "Query key should be String or BaseQueryKey. Given " + key.getClass() + ", sql: "
+ String.valueOf(key);
BaseQueryKey queryKey;
String parsedSql;
if (key instanceof BaseQueryKey) {
queryKey = (BaseQueryKey) key;
parsedSql = queryKey.sql;
} else {
queryKey = null;
parsedSql = (String) key;
}
if (key instanceof String || queryKey.escapeProcessing) {
parsedSql =
Parser.replaceProcessing(parsedSql, true, queryExecutor.getStandardConformingStrings());
}
boolean isFunction;
if (key instanceof CallableQueryKey) {
JdbcCallParseInfo callInfo =
Parser.modifyJdbcCall(parsedSql, queryExecutor.getStandardConformingStrings(),
queryExecutor.getServerVersionNum(), queryExecutor.getProtocolVersion(), queryExecutor.getEscapeSyntaxCallMode());
parsedSql = callInfo.getSql();
isFunction = callInfo.isFunction();
} else {
isFunction = false;
}
boolean isParameterized = key instanceof String || queryKey.isParameterized;
boolean splitStatements = isParameterized || queryExecutor.getPreferQueryMode().compareTo(PreferQueryMode.EXTENDED) >= 0;
String[] returningColumns;
if (key instanceof QueryWithReturningColumnsKey) {
returningColumns = ((QueryWithReturningColumnsKey) key).columnNames;
} else {
returningColumns = EMPTY_RETURNING;
}
List<NativeQuery> queries = Parser.parseJdbcSql(parsedSql,
queryExecutor.getStandardConformingStrings(), isParameterized, splitStatements,
queryExecutor.isReWriteBatchedInsertsEnabled(),
queryExecutor.isMultiSqlSupport(),
returningColumns);
Query query = queryExecutor.wrap(queries);
return new CachedQuery(key, query, isFunction);
}
}
| 8,348 |
0 | Create_ds/amazon-redshift-jdbc-driver/src/main/java/com/amazon/redshift | Create_ds/amazon-redshift-jdbc-driver/src/main/java/com/amazon/redshift/core/BaseStatement.java | /*
* Copyright (c) 2003, PostgreSQL Global Development Group
* See the LICENSE file in the project root for more information.
*/
package com.amazon.redshift.core;
import com.amazon.redshift.RedshiftStatement;
import com.amazon.redshift.core.v3.MessageLoopState;
import com.amazon.redshift.core.v3.RedshiftRowsBlockingQueue;
import java.sql.ResultSet;
import java.sql.SQLException;
import java.sql.Statement;
import java.util.List;
/**
* Driver-internal statement interface. Application code should not use this interface.
*/
public interface BaseStatement extends RedshiftStatement, Statement {
/**
* Create a synthetic resultset from data provided by the driver.
*
* @param fields the column metadata for the resultset
* @param tuples the resultset data
* @return the new ResultSet
* @throws SQLException if something goes wrong
*/
ResultSet createDriverResultSet(Field[] fields, List<Tuple> tuples) throws SQLException;
/**
* Create a resultset from data retrieved from the server.
*
* @param originalQuery the query that generated this resultset; used when dealing with updateable
* resultsets
* @param fields the column metadata for the resultset
* @param tuples the resultset data
* @param cursor the cursor to use to retrieve more data from the server; if null, no additional
* data is present.
* @return the new ResultSet
* @throws SQLException if something goes wrong
*/
/**
*
* Create a resultset from data retrieved from the server.
*
* @param originalQuery the query that generated this resultset; used when dealing with updateable
* resultsets
* @param fields the column metadata for the resultset
* @param tuples the resultset data
* @param cursor the cursor to use to retrieve more data from the server; if null, no additional
* data is present.
* @param queueTuples the actual data in a blocking queue. If this is set then tuples will be null.
* @param rowCount number of rows fetched from the socket.
* @param ringBufferThread a thread to fetch rows in the limited rows buffer.
*
* @return the new ResultSet
* @throws SQLException if something goes wrong
*/
ResultSet createResultSet(Query originalQuery, Field[] fields, List<Tuple> tuples,
ResultCursor cursor, RedshiftRowsBlockingQueue<Tuple> queueTuples,
int[] rowCount, Thread ringBufferThread) throws SQLException;
/**
* Execute a query, passing additional query flags.
*
* @param sql the query to execute (JDBC-style query)
* @param flags additional {@link QueryExecutor} flags for execution; these are bitwise-ORed into
* the default flags.
* @return true if there is a result set
* @throws SQLException if something goes wrong.
*/
boolean executeWithFlags(String sql, int flags) throws SQLException;
/**
* Execute a query, passing additional query flags.
*
* @param cachedQuery the query to execute (native to Redshift)
* @param flags additional {@link QueryExecutor} flags for execution; these are bitwise-ORed into
* the default flags.
* @return true if there is a result set
* @throws SQLException if something goes wrong.
*/
boolean executeWithFlags(CachedQuery cachedQuery, int flags) throws SQLException;
/**
* Execute a prepared query, passing additional query flags.
*
* @param flags additional {@link QueryExecutor} flags for execution; these are bitwise-ORed into
* the default flags.
* @return true if there is a result set
* @throws SQLException if something goes wrong.
*/
boolean executeWithFlags(int flags) throws SQLException;
}
| 8,349 |
0 | Create_ds/amazon-redshift-jdbc-driver/src/main/java/com/amazon/redshift | Create_ds/amazon-redshift-jdbc-driver/src/main/java/com/amazon/redshift/core/SqlCommand.java | /*
* Copyright (c) 2003, PostgreSQL Global Development Group
* See the LICENSE file in the project root for more information.
*/
package com.amazon.redshift.core;
import static com.amazon.redshift.core.SqlCommandType.INSERT;
import static com.amazon.redshift.core.SqlCommandType.SELECT;
import static com.amazon.redshift.core.SqlCommandType.WITH;
/**
* Data Modification Language inspection support.
*
* @author Jeremy Whiting jwhiting@redhat.com
* @author Christopher Deckers (chrriis@gmail.com)
*
*/
public class SqlCommand {
public static final SqlCommand BLANK = SqlCommand.createStatementTypeInfo(SqlCommandType.BLANK);
public boolean isBatchedReWriteCompatible() {
return valuesBraceOpenPosition >= 0;
}
public int getBatchRewriteValuesBraceOpenPosition() {
return valuesBraceOpenPosition;
}
public int getBatchRewriteValuesBraceClosePosition() {
return valuesBraceClosePosition;
}
public SqlCommandType getType() {
return commandType;
}
public boolean isReturningKeywordPresent() {
return parsedSQLhasRETURNINGKeyword;
}
public boolean returnsRows() {
return parsedSQLhasRETURNINGKeyword || commandType == SELECT || commandType == WITH;
}
public static SqlCommand createStatementTypeInfo(SqlCommandType type,
boolean isBatchedReWritePropertyConfigured,
int valuesBraceOpenPosition, int valuesBraceClosePosition, boolean isRETURNINGkeywordPresent,
int priorQueryCount) {
return new SqlCommand(type, isBatchedReWritePropertyConfigured,
valuesBraceOpenPosition, valuesBraceClosePosition, isRETURNINGkeywordPresent,
priorQueryCount);
}
public static SqlCommand createStatementTypeInfo(SqlCommandType type) {
return new SqlCommand(type, false, -1, -1, false, 0);
}
public static SqlCommand createStatementTypeInfo(SqlCommandType type,
boolean isRETURNINGkeywordPresent) {
return new SqlCommand(type, false, -1, -1, isRETURNINGkeywordPresent, 0);
}
private SqlCommand(SqlCommandType type, boolean isBatchedReWriteConfigured,
int valuesBraceOpenPosition, int valuesBraceClosePosition, boolean isPresent,
int priorQueryCount) {
commandType = type;
parsedSQLhasRETURNINGKeyword = isPresent;
boolean batchedReWriteCompatible = (type == INSERT) && isBatchedReWriteConfigured
&& valuesBraceOpenPosition >= 0 && valuesBraceClosePosition > valuesBraceOpenPosition
&& !isPresent && priorQueryCount == 0;
this.valuesBraceOpenPosition = batchedReWriteCompatible ? valuesBraceOpenPosition : -1;
this.valuesBraceClosePosition = batchedReWriteCompatible ? valuesBraceClosePosition : -1;
}
private final SqlCommandType commandType;
private final boolean parsedSQLhasRETURNINGKeyword;
private final int valuesBraceOpenPosition;
private final int valuesBraceClosePosition;
}
| 8,350 |
0 | Create_ds/amazon-redshift-jdbc-driver/src/main/java/com/amazon/redshift | Create_ds/amazon-redshift-jdbc-driver/src/main/java/com/amazon/redshift/core/BaseConnection.java | /*
* Copyright (c) 2003, PostgreSQL Global Development Group
* See the LICENSE file in the project root for more information.
*/
package com.amazon.redshift.core;
import com.amazon.redshift.RedshiftConnection;
import com.amazon.redshift.RedshiftProperty;
import com.amazon.redshift.jdbc.FieldMetadata;
import com.amazon.redshift.jdbc.TimestampUtils;
import com.amazon.redshift.logger.RedshiftLogger;
import com.amazon.redshift.util.LruCache;
import java.sql.Connection;
import java.sql.ResultSet;
import java.sql.SQLException;
import java.util.TimerTask;
/**
* Driver-internal connection interface. Application code should not use this interface.
*/
public interface BaseConnection extends RedshiftConnection, Connection {
/**
* Cancel the current query executing on this connection.
*
* @throws SQLException if something goes wrong.
*/
void cancelQuery() throws SQLException;
/**
* Execute a SQL query that returns a single resultset. Never causes a new transaction to be
* started regardless of the autocommit setting.
*
* @param s the query to execute
* @return the (non-null) returned resultset
* @throws SQLException if something goes wrong.
*/
ResultSet execSQLQuery(String s) throws SQLException;
ResultSet execSQLQuery(String s, int resultSetType, int resultSetConcurrency)
throws SQLException;
/**
* Execute a SQL query that does not return results. Never causes a new transaction to be started
* regardless of the autocommit setting.
*
* @param s the query to execute
* @throws SQLException if something goes wrong.
*/
void execSQLUpdate(String s) throws SQLException;
/**
* Get the QueryExecutor implementation for this connection.
*
* @return the (non-null) executor
*/
QueryExecutor getQueryExecutor();
/**
* Internal protocol for work with physical and logical replication.
* Unsupported in Redshift.
*
* @return not null replication protocol
*/
ReplicationProtocol getReplicationProtocol();
/**
* <p>Construct and return an appropriate object for the given type and value. This only considers
* the types registered via {@link com.amazon.redshift.RedshiftConnection#addDataType(String, Class)} and
* {@link com.amazon.redshift.RedshiftConnection#addDataType(String, String)}.</p>
*
* <p>If no class is registered as handling the given type, then a generic
* {@link com.amazon.redshift.util.RedshiftObject} instance is returned.</p>
*
* @param type the backend typename
* @param value the type-specific string representation of the value
* @param byteValue the type-specific binary representation of the value
* @return an appropriate object; never null.
* @throws SQLException if something goes wrong
*/
Object getObject(String type, String value, byte[] byteValue) throws SQLException;
Encoding getEncoding() throws SQLException;
TypeInfo getTypeInfo();
/**
* <p>Check if we have at least a particular server version.</p>
*
* <p>The input version is of the form xxyyzz, matching a Redshift version like xx.yy.zz. So 08.00.02
* is 080002.</p>
*
* @param ver the server version to check, of the form xxyyzz eg 90401
* @return true if the server version is at least "ver".
*/
boolean haveMinimumServerVersion(int ver);
/**
* <p>Check if we have at least a particular server version.</p>
*
* <p>The input version is of the form xxyyzz, matching a Redshift version like xx.yy.zz. So 8.0.2
* is 80002.</p>
*
* @param ver the server version to check
* @return true if the server version is at least "ver".
*/
boolean haveMinimumServerVersion(Version ver);
/**
* Encode a string using the database's client_encoding (usually UTF8, but can vary on older
* server versions). This is used when constructing synthetic resultsets (for example, in metadata
* methods).
*
* @param str the string to encode
* @return an encoded representation of the string
* @throws SQLException if something goes wrong.
*/
byte[] encodeString(String str) throws SQLException;
/**
* Escapes a string for use as string-literal within an SQL command. The method chooses the
* applicable escaping rules based on the value of {@link #getStandardConformingStrings()}.
*
* @param str a string value
* @return the escaped representation of the string
* @throws SQLException if the string contains a {@code \0} character
*/
String escapeString(String str) throws SQLException;
/**
* Escapes only quotes in string for catalog name. The method chooses the
* applicable escaping rules based on the value of {@link #getStandardConformingStrings()}.
*
* @param str a string value
* @return the escaped representation of the string
* @throws SQLException if the string contains a {@code \0} character
*/
String escapeOnlyQuotesString(String str) throws SQLException;
/**
* Returns whether the server treats string-literals according to the SQL standard or if it uses
* traditional Redshift escaping rules. Versions up to 8.1 always treated backslashes as escape
* characters in string-literals. Since 8.2, this depends on the value of the
* {@code standard_conforming_strings} server variable.
*
* @return true if the server treats string literals according to the SQL standard
* @see QueryExecutor#getStandardConformingStrings()
*/
boolean getStandardConformingStrings();
// Ew. Quick hack to give access to the connection-specific utils implementation.
TimestampUtils getTimestampUtils();
// Get the per-connection logger.
RedshiftLogger getLogger();
// Get the bind-string-as-varchar config flag
boolean getStringVarcharFlag();
/**
* Get the current transaction state of this connection.
*
* @return current transaction state of this connection
*/
TransactionState getTransactionState();
/**
* Returns true if value for the given oid should be sent using binary transfer. False if value
* should be sent using text transfer.
*
* @param oid The oid to check.
* @return True for binary transfer, false for text transfer.
*/
boolean binaryTransferSend(int oid);
/**
* Return whether to disable column name sanitation.
*
* @return true column sanitizer is disabled
*/
boolean isColumnSanitiserDisabled();
/**
* Schedule a TimerTask for later execution. The task will be scheduled with the shared Timer for
* this connection.
*
* @param timerTask timer task to schedule
* @param milliSeconds delay in milliseconds
*/
void addTimerTask(TimerTask timerTask, long milliSeconds);
/**
* Invoke purge() on the underlying shared Timer so that internal resources will be released.
*/
void purgeTimerTasks();
/**
* Return metadata cache for given connection.
*
* @return metadata cache
*/
LruCache<FieldMetadata.Key, FieldMetadata> getFieldMetadataCache();
CachedQuery createQuery(String sql, boolean escapeProcessing, boolean isParameterized,
String... columnNames)
throws SQLException;
/**
* By default, the connection resets statement cache in case deallocate all/discard all
* message is observed.
* This API allows to disable that feature for testing purposes.
*
* @param flushCacheOnDeallocate true if statement cache should be reset when "deallocate/discard" message observed
*/
void setFlushCacheOnDeallocate(boolean flushCacheOnDeallocate);
/**
* Indicates if statements to backend should be hinted as read only.
*
* @return Indication if hints to backend (such as when transaction begins)
* should be read only.
* @see RedshiftProperty#READ_ONLY_MODE
*/
boolean hintReadOnly();
}
| 8,351 |
0 | Create_ds/amazon-redshift-jdbc-driver/src/main/java/com/amazon/redshift | Create_ds/amazon-redshift-jdbc-driver/src/main/java/com/amazon/redshift/core/SetupQueryRunner.java | /*
* Copyright (c) 2003, PostgreSQL Global Development Group
* See the LICENSE file in the project root for more information.
*/
// Copyright (c) 2004, Open Cloud Limited.
package com.amazon.redshift.core;
import com.amazon.redshift.core.v3.MessageLoopState;
import com.amazon.redshift.core.v3.RedshiftRowsBlockingQueue;
import com.amazon.redshift.util.GT;
import com.amazon.redshift.util.RedshiftException;
import com.amazon.redshift.util.RedshiftState;
import java.sql.SQLException;
import java.sql.SQLWarning;
import java.util.List;
/**
* Poor man's Statement & ResultSet, used for initial queries while we're still initializing the
* system.
*/
public class SetupQueryRunner {
private static class SimpleResultHandler extends ResultHandlerBase {
private List<Tuple> tuples;
List<Tuple> getResults() {
return tuples;
}
public void handleResultRows(Query fromQuery, Field[] fields, List<Tuple> tuples,
ResultCursor cursor, RedshiftRowsBlockingQueue<Tuple> queueTuples,
int[] rowCount, Thread ringBufferThread) {
this.tuples = tuples;
}
public void handleWarning(SQLWarning warning) {
// We ignore warnings. We assume we know what we're
// doing in the setup queries.
}
}
public static Tuple run(QueryExecutor executor, String queryString,
boolean wantResults) throws SQLException {
Query query = executor.createSimpleQuery(queryString);
SimpleResultHandler handler = new SimpleResultHandler();
int flags = QueryExecutor.QUERY_ONESHOT | QueryExecutor.QUERY_SUPPRESS_BEGIN
| QueryExecutor.QUERY_EXECUTE_AS_SIMPLE;
if (!wantResults) {
flags |= QueryExecutor.QUERY_NO_RESULTS | QueryExecutor.QUERY_NO_METADATA;
}
try {
executor.execute(query, null, handler, 0, 0, flags);
} finally {
query.close();
}
if (!wantResults) {
return null;
}
List<Tuple> tuples = handler.getResults();
if (tuples == null || tuples.size() != 1) {
throw new RedshiftException(GT.tr("An unexpected result was returned by a query."),
RedshiftState.CONNECTION_UNABLE_TO_CONNECT);
}
return tuples.get(0);
}
}
| 8,352 |
0 | Create_ds/amazon-redshift-jdbc-driver/src/main/java/com/amazon/redshift | Create_ds/amazon-redshift-jdbc-driver/src/main/java/com/amazon/redshift/core/ByteOptimizedUTF8Encoder.java | /*
* Copyright (c) 2019, PostgreSQL Global Development Group
* See the LICENSE file in the project root for more information.
*/
package com.amazon.redshift.core;
import java.io.IOException;
import java.nio.charset.Charset;
/**
* UTF-8 encoder which validates input and is optimized for jdk 9+ where {@code String} objects are backed by
* {@code byte[]}.
* @author Brett Okken
*/
final class ByteOptimizedUTF8Encoder extends OptimizedUTF8Encoder {
private static final Charset ASCII_CHARSET = Charset.forName("ascii");
/**
* {@inheritDoc}
*/
@Override
public String decode(byte[] encodedString, int offset, int length) throws IOException {
//for very short strings going straight to chars is up to 30% faster
if (length <= 32) {
return charDecode(encodedString, offset, length);
}
for (int i = offset, j = offset + length; i < j; ++i) {
// bytes are signed values. all ascii values are positive
if (encodedString[i] < 0) {
return slowDecode(encodedString, offset, length, i);
}
}
// we have confirmed all chars are ascii, give java that hint
return new String(encodedString, offset, length, ASCII_CHARSET);
}
/**
* Decodes to {@code char[]} in presence of non-ascii values after first copying all known ascii chars directly
* from {@code byte[]} to {@code char[]}.
*/
private synchronized String slowDecode(byte[] encodedString, int offset, int length, int curIdx) throws IOException {
final char[] chars = getCharArray(length);
int out = 0;
for (int i = offset; i < curIdx; ++i) {
chars[out++] = (char) encodedString[i];
}
return decodeToChars(encodedString, curIdx, length - (curIdx - offset), chars, out);
}
}
| 8,353 |
0 | Create_ds/amazon-redshift-jdbc-driver/src/main/java/com/amazon/redshift | Create_ds/amazon-redshift-jdbc-driver/src/main/java/com/amazon/redshift/core/JavaVersion.java | /*
* Copyright (c) 2017, PostgreSQL Global Development Group
* See the LICENSE file in the project root for more information.
*/
package com.amazon.redshift.core;
public enum JavaVersion {
// Note: order is important,
v1_6,
v1_7,
v1_8,
other;
private static final JavaVersion RUNTIME_VERSION = from(System.getProperty("java.version"));
/**
* Returns enum value that represents current runtime. For instance, when using -jre7.jar via Java
* 8, this would return v18
*
* @return enum value that represents current runtime.
*/
public static JavaVersion getRuntimeVersion() {
return RUNTIME_VERSION;
}
/**
* Java version string like in {@code "java.version"} property.
*
* @param version string like 1.6, 1.7, etc
* @return JavaVersion enum
*/
public static JavaVersion from(String version) {
// Minimum supported is Java 1.6
if (version.startsWith("1.6")) {
return v1_6;
}
if (version.startsWith("1.7")) {
return v1_7;
}
if (version.startsWith("1.8")) {
return v1_8;
}
return other;
}
}
| 8,354 |
0 | Create_ds/amazon-redshift-jdbc-driver/src/main/java/com/amazon/redshift | Create_ds/amazon-redshift-jdbc-driver/src/main/java/com/amazon/redshift/core/JdbcCallParseInfo.java | /*
* Copyright (c) 2015, PostgreSQL Global Development Group
* See the LICENSE file in the project root for more information.
*/
package com.amazon.redshift.core;
/**
* Contains parse flags from {@link Parser#modifyJdbcCall(String, boolean, int, int, EscapeSyntaxCallMode)}.
*/
public class JdbcCallParseInfo {
private final String sql;
private final boolean isFunction;
public JdbcCallParseInfo(String sql, boolean isFunction) {
this.sql = sql;
this.isFunction = isFunction;
}
/**
* SQL in a native for certain backend version.
*
* @return SQL in a native for certain backend version
*/
public String getSql() {
return sql;
}
/**
* Returns if given SQL is a function.
*
* @return {@code true} if given SQL is a function
*/
public boolean isFunction() {
return isFunction;
}
}
| 8,355 |
0 | Create_ds/amazon-redshift-jdbc-driver/src/main/java/com/amazon/redshift | Create_ds/amazon-redshift-jdbc-driver/src/main/java/com/amazon/redshift/core/Query.java | /*
* Copyright (c) 2004, PostgreSQL Global Development Group
* See the LICENSE file in the project root for more information.
*/
// Copyright (c) 2004, Open Cloud Limited.
package com.amazon.redshift.core;
import java.util.Map;
/**
* <p>Abstraction of a generic Query, hiding the details of any protocol-version-specific data needed
* to execute the query efficiently.</p>
*
* <p>Query objects should be explicitly closed when no longer needed; if resources are allocated on
* the server for this query, their cleanup is triggered by closing the Query.</p>
*
* @author Oliver Jowett (oliver@opencloud.com)
*/
public interface Query {
/**
* <p>Create a ParameterList suitable for storing parameters associated with this Query.</p>
*
* <p>If this query has no parameters, a ParameterList will be returned, but it may be a shared
* immutable object. If this query does have parameters, the returned ParameterList is a new list,
* unshared by other callers.</p>
*
* @return a suitable ParameterList instance for this query
*/
ParameterList createParameterList();
/**
* Stringize this query to a human-readable form, substituting particular parameter values for
* parameter placeholders.
*
* @param parameters a ParameterList returned by this Query's {@link #createParameterList} method,
* or <code>null</code> to leave the parameter placeholders unsubstituted.
* @return a human-readable representation of this query
*/
String toString(ParameterList parameters);
/**
* Returns SQL in native for database format.
* @return SQL in native for database format
*/
String getNativeSql();
/**
* Returns properties of the query (sql keyword, and some other parsing info).
* @return returns properties of the query (sql keyword, and some other parsing info) or null if not applicable
*/
SqlCommand getSqlCommand();
/**
* <p>Close this query and free any server-side resources associated with it. The resources may not
* be immediately deallocated, but closing a Query may make the deallocation more prompt.</p>
*
* <p>A closed Query should not be executed.</p>
*/
void close();
boolean isStatementDescribed();
boolean isEmpty();
/**
* Get the number of times this Query has been batched.
* @return number of times <code>addBatch()</code> has been called.
*/
int getBatchSize();
/**
* Get a map that a result set can use to find the index associated to a name.
*
* @return null if the query implementation does not support this method.
*/
Map<String, Integer> getResultSetColumnNameIndexMap();
/**
* Return a list of the Query objects that make up this query. If this object is already a
* SimpleQuery, returns null (avoids an extra array construction in the common case).
*
* @return an array of single-statement queries, or <code>null</code> if this object is already a
* single-statement query.
*/
Query[] getSubqueries();
}
| 8,356 |
0 | Create_ds/amazon-redshift-jdbc-driver/src/main/java/com/amazon/redshift | Create_ds/amazon-redshift-jdbc-driver/src/main/java/com/amazon/redshift/core/OptimizedUTF8Encoder.java | /*
* Copyright (c) 2019, PostgreSQL Global Development Group
* See the LICENSE file in the project root for more information.
*/
package com.amazon.redshift.core;
import com.amazon.redshift.logger.RedshiftLogger;
import com.amazon.redshift.util.GT;
import java.io.IOException;
import java.nio.charset.Charset;
import java.nio.charset.CharsetDecoder;
/**
* UTF-8 encoder implementation which validates values during decoding which is
* significantly faster than using a {@link CharsetDecoder}.
*/
abstract class OptimizedUTF8Encoder extends Encoding {
static final Charset UTF_8_CHARSET = Charset.forName("UTF-8");
private static final int MIN_2_BYTES = 0x80;
private static final int MIN_3_BYTES = 0x800;
private static final int MIN_4_BYTES = 0x10000;
private static final int MAX_CODE_POINT = 0x10ffff;
private final int thresholdSize = 8 * 1024;
private char[] decoderArray;
OptimizedUTF8Encoder() {
super(UTF_8_CHARSET, true, RedshiftLogger.getDriverLogger());
decoderArray = new char[1024];
}
/**
* Returns a {@code char[]} to use for decoding. Will use member variable if <i>size</i>
* is small enough. This method must be called, and returned {@code char[]} only used, from
* {@code synchronized} block.
*
* @param size
* The needed size of returned {@code char[]}.
* @return
* A {@code char[]} at least as long as <i>length</i>.
*/
char[] getCharArray(int size) {
if (size <= decoderArray.length) {
return decoderArray;
}
final char[] chars = new char[size];
//only if size is below the threshold do we want to keep new char[] for future reuse
if (size <= thresholdSize) {
decoderArray = chars;
}
return chars;
}
/**
* Decodes binary content to {@code String} by first converting to {@code char[]}.
*/
synchronized String charDecode(byte[] encodedString, int offset, int length) throws IOException {
final char[] chars = getCharArray(length);
int out = 0;
for (int i = offset, j = offset + length; i < j; ++i) {
// bytes are signed values. all ascii values are positive
if (encodedString[i] >= 0) {
chars[out++] = (char) encodedString[i];
} else {
return decodeToChars(encodedString, i, j - i, chars, out);
}
}
return new String(chars, 0, out);
}
/**
* Decodes <i>data</i> from <i>offset</i> with given <i>length</i> as utf-8 and
* gives each decoded code point to the <i>codePointConsumer</i>.
*
* @param data
* The {@code byte[]} to decode.
* @param offset
* The starting index in <i>data</i>.
* @param length
* The number of bytes in <i>data</i> to decode.
* @param codePointConsumer
* The consumer of all decoded code points.
* @throws IOException
* If data is not valid utf-8 content.
*/
static String decodeToChars(byte[] data, int offset, int length, char[] chars, int out) throws IOException {
int in = offset;
final int end = length + offset;
try {
while (in < end) {
int ch = data[in++] & 0xff;
// Convert UTF-8 to 21-bit codepoint.
if (ch < 0x80) {
// 0xxxxxxx -- length 1.
} else if (ch < 0xc0) {
// 10xxxxxx -- illegal!
throw new IOException(GT.tr("Illegal UTF-8 sequence: initial byte is {0}: {1}",
"10xxxxxx", ch));
} else if (ch < 0xe0) {
// 110xxxxx 10xxxxxx
ch = ((ch & 0x1f) << 6);
checkByte(data[in], 2, 2);
ch = ch | (data[in++] & 0x3f);
checkMinimal(ch, MIN_2_BYTES);
} else if (ch < 0xf0) {
// 1110xxxx 10xxxxxx 10xxxxxx
ch = ((ch & 0x0f) << 12);
checkByte(data[in], 2, 3);
ch = ch | ((data[in++] & 0x3f) << 6);
checkByte(data[in], 3, 3);
ch = ch | (data[in++] & 0x3f);
checkMinimal(ch, MIN_3_BYTES);
} else if (ch < 0xf8) {
// 11110xxx 10xxxxxx 10xxxxxx 10xxxxxx
ch = ((ch & 0x07) << 18);
checkByte(data[in], 2, 4);
ch = ch | ((data[in++] & 0x3f) << 12);
checkByte(data[in], 3, 4);
ch = ch | ((data[in++] & 0x3f) << 6);
checkByte(data[in], 4, 4);
ch = ch | (data[in++] & 0x3f);
checkMinimal(ch, MIN_4_BYTES);
} else {
throw new IOException(GT.tr("Illegal UTF-8 sequence: initial byte is {0}: {1}",
"11111xxx", ch));
}
if (ch > MAX_CODE_POINT) {
throw new IOException(
GT.tr("Illegal UTF-8 sequence: final value is out of range: {0}", ch));
}
// Convert 21-bit codepoint to Java chars:
// 0..ffff are represented directly as a single char
// 10000..10ffff are represented as a "surrogate pair" of two chars
// See: http://java.sun.com/developer/technicalArticles/Intl/Supplementary/
if (ch > 0xffff) {
// Use a surrogate pair to represent it.
ch -= 0x10000; // ch is now 0..fffff (20 bits)
chars[out++] = (char) (0xd800 + (ch >> 10)); // top 10 bits
chars[out++] = (char) (0xdc00 + (ch & 0x3ff)); // bottom 10 bits
} else if (ch >= 0xd800 && ch < 0xe000) {
// Not allowed to encode the surrogate range directly.
throw new IOException(GT.tr("Illegal UTF-8 sequence: final value is a surrogate value: {0}", ch));
} else {
// Normal case.
chars[out++] = (char) ch;
}
}
} catch (ArrayIndexOutOfBoundsException a) {
throw new IOException("Illegal UTF-8 sequence: multibyte sequence was truncated");
}
return new String(chars, 0, out);
}
// helper for decode
private static void checkByte(int ch, int pos, int len) throws IOException {
if ((ch & 0xc0) != 0x80) {
throw new IOException(
GT.tr("Illegal UTF-8 sequence: byte {0} of {1} byte sequence is not 10xxxxxx: {2}", pos, len, ch));
}
}
private static void checkMinimal(int ch, int minValue) throws IOException {
if (ch >= minValue) {
return;
}
int actualLen;
switch (minValue) {
case MIN_2_BYTES:
actualLen = 2;
break;
case MIN_3_BYTES:
actualLen = 3;
break;
case MIN_4_BYTES:
actualLen = 4;
break;
default:
throw new IllegalArgumentException("unexpected minValue passed to checkMinimal: " + minValue);
}
int expectedLen;
if (ch < MIN_2_BYTES) {
expectedLen = 1;
} else if (ch < MIN_3_BYTES) {
expectedLen = 2;
} else if (ch < MIN_4_BYTES) {
expectedLen = 3;
} else {
throw new IllegalArgumentException("unexpected ch passed to checkMinimal: " + ch);
}
throw new IOException(
GT.tr("Illegal UTF-8 sequence: {0} bytes used to encode a {1} byte value: {2}", actualLen, expectedLen, ch));
}
}
| 8,357 |
0 | Create_ds/amazon-redshift-jdbc-driver/src/main/java/com/amazon/redshift | Create_ds/amazon-redshift-jdbc-driver/src/main/java/com/amazon/redshift/core/TransactionState.java | /*
* Copyright (c) 2003, PostgreSQL Global Development Group
* See the LICENSE file in the project root for more information.
*/
package com.amazon.redshift.core;
public enum TransactionState {
IDLE,
OPEN,
FAILED
}
| 8,358 |
0 | Create_ds/amazon-redshift-jdbc-driver/src/main/java/com/amazon/redshift | Create_ds/amazon-redshift-jdbc-driver/src/main/java/com/amazon/redshift/core/BaseQueryKey.java | /*
* Copyright (c) 2003, PostgreSQL Global Development Group
* See the LICENSE file in the project root for more information.
*/
package com.amazon.redshift.core;
import com.amazon.redshift.util.CanEstimateSize;
/**
* This class is used as a cache key for simple statements that have no "returning columns".
* Prepared statements that have no returning columns use just {@code String sql} as a key.
* Simple and Prepared statements that have returning columns use {@link QueryWithReturningColumnsKey}
* as a cache key.
*/
class BaseQueryKey implements CanEstimateSize {
public final String sql;
public final boolean isParameterized;
public final boolean escapeProcessing;
BaseQueryKey(String sql, boolean isParameterized, boolean escapeProcessing) {
this.sql = sql;
this.isParameterized = isParameterized;
this.escapeProcessing = escapeProcessing;
}
@Override
public String toString() {
return "BaseQueryKey{"
+ "sql='" + sql + '\''
+ ", isParameterized=" + isParameterized
+ ", escapeProcessing=" + escapeProcessing
+ '}';
}
@Override
public long getSize() {
if (sql == null) { // just in case
return 16;
}
return 16 + sql.length() * 2L; // 2 bytes per char, revise with Java 9's compact strings
}
@Override
public boolean equals(Object o) {
if (this == o) {
return true;
}
if (o == null || getClass() != o.getClass()) {
return false;
}
BaseQueryKey that = (BaseQueryKey) o;
if (isParameterized != that.isParameterized) {
return false;
}
if (escapeProcessing != that.escapeProcessing) {
return false;
}
return sql != null ? sql.equals(that.sql) : that.sql == null;
}
@Override
public int hashCode() {
int result = sql != null ? sql.hashCode() : 0;
result = 31 * result + (isParameterized ? 1 : 0);
result = 31 * result + (escapeProcessing ? 1 : 0);
return result;
}
}
| 8,359 |
0 | Create_ds/amazon-redshift-jdbc-driver/src/main/java/com/amazon/redshift | Create_ds/amazon-redshift-jdbc-driver/src/main/java/com/amazon/redshift/core/CharOptimizedUTF8Encoder.java | /*
* Copyright (c) 2019, PostgreSQL Global Development Group
* See the LICENSE file in the project root for more information.
*/
package com.amazon.redshift.core;
import java.io.IOException;
/**
* UTF-8 encoder which validates input and is optimized for jdk 8 and lower where {@code String} objects are backed by
* {@code char[]}.
* @author Brett Okken
*/
final class CharOptimizedUTF8Encoder extends OptimizedUTF8Encoder {
/**
* {@inheritDoc}
*/
@Override
public String decode(byte[] encodedString, int offset, int length) throws IOException {
return charDecode(encodedString, offset, length);
}
}
| 8,360 |
0 | Create_ds/amazon-redshift-jdbc-driver/src/main/java/com/amazon/redshift | Create_ds/amazon-redshift-jdbc-driver/src/main/java/com/amazon/redshift/core/IamHelper.java | package com.amazon.redshift.core;
import com.amazon.redshift.util.RedshiftProperties;
import com.amazonaws.AmazonClientException;
import com.amazonaws.ClientConfiguration;
import com.amazonaws.auth.AWSCredentials;
import com.amazonaws.auth.AWSCredentialsProvider;
import com.amazonaws.auth.AWSStaticCredentialsProvider;
import com.amazonaws.auth.BasicAWSCredentials;
import com.amazonaws.auth.BasicSessionCredentials;
import com.amazonaws.auth.DefaultAWSCredentialsProviderChain;
import com.amazonaws.auth.profile.ProfileCredentialsProvider;
import com.amazonaws.auth.profile.ProfilesConfigFile;
import com.amazonaws.client.builder.AwsClientBuilder;
import com.amazonaws.client.builder.AwsClientBuilder.EndpointConfiguration;
import com.amazonaws.services.redshift.AmazonRedshift;
import com.amazonaws.services.redshift.AmazonRedshiftClientBuilder;
import com.amazonaws.services.redshift.model.Cluster;
import com.amazonaws.services.redshift.model.DescribeClustersRequest;
import com.amazonaws.services.redshift.model.DescribeClustersResult;
import com.amazonaws.services.redshift.model.Endpoint;
import com.amazonaws.services.redshift.model.GetClusterCredentialsRequest;
import com.amazonaws.services.redshift.model.GetClusterCredentialsResult;
import com.amazonaws.services.redshift.model.GetClusterCredentialsWithIAMRequest;
import com.amazonaws.services.redshift.model.GetClusterCredentialsWithIAMResult;
import com.amazonaws.services.redshift.model.DescribeCustomDomainAssociationsRequest;
import com.amazonaws.services.redshift.model.DescribeCustomDomainAssociationsResult;
import com.amazonaws.services.redshift.AmazonRedshiftClient;
import com.amazonaws.services.redshift.AmazonRedshiftClientBuilder;
import com.amazonaws.util.StringUtils;
import com.amazon.redshift.CredentialsHolder;
import com.amazon.redshift.IPlugin;
import com.amazon.redshift.RedshiftProperty;
import com.amazon.redshift.jdbc.RedshiftConnectionImpl;
import com.amazon.redshift.logger.LogLevel;
import com.amazon.redshift.logger.RedshiftLogger;
import com.amazon.redshift.plugin.utils.RequestUtils;
import com.amazon.redshift.util.GT;
import com.amazon.redshift.util.RedshiftException;
import com.amazon.redshift.util.RedshiftState;
import java.util.Arrays;
import java.util.Collections;
import java.util.Date;
import java.util.HashMap;
import java.util.List;
import java.util.Locale;
import java.util.Map;
import java.util.Properties;
import java.util.regex.Matcher;
import java.util.regex.Pattern;
public final class IamHelper extends IdpAuthHelper {
static final int MAX_AMAZONCLIENT_RETRY = 5;
static final int MAX_AMAZONCLIENT_RETRY_DELAY_MS = 1000;
private static final String KEY_PREFERRED_ROLE = "preferred_role";
private static final String KEY_ROLE_SESSION_NAME = "roleSessionName";
private static final String KEY_ROLE_ARN = "roleArn";
// Type of GetClusterCredential API
public static final int GET_CLUSTER_CREDENTIALS_V1_API = 1;
public static final int GET_CLUSTER_CREDENTIALS_IAM_V2_API = 2;
public static final int GET_CLUSTER_CREDENTIALS_SAML_V2_API = 3;
public static final int GET_CLUSTER_CREDENTIALS_JWT_V2_API = 4;
public static final int GET_SERVERLESS_CREDENTIALS_V1_API = 5;
private static final Pattern HOST_PATTERN =
Pattern.compile("(.+)\\.(.+)\\.(.+).redshift(-dev)?\\.amazonaws\\.com(.)*");
private static final Pattern SERVERLESS_WORKGROUP_HOST_PATTERN =
Pattern.compile("(.+)\\.(.+)\\.(.+).redshift-serverless(-dev)?\\.amazonaws\\.com(.)*");
enum CredentialProviderType
{
NONE, PROFILE, IAM_KEYS_WITH_SESSION, IAM_KEYS, PLUGIN
}
private static Map<String, GetClusterCredentialsResult> credentialsCache = new HashMap<String, GetClusterCredentialsResult>();
private static Map<String, GetClusterCredentialsWithIAMResult> credentialsV2Cache = new HashMap<String, GetClusterCredentialsWithIAMResult>();
private IamHelper() {
}
/**
* Helper function to handle IAM connection properties. If any IAM related
* connection property is specified, all other <b>required</b> IAM properties
* must be specified too or else it throws an error.
*
* @param info
* Redshift client settings used to authenticate if connection should
* be granted.
* @param settings
* Redshift IAM settings
* @param log
* Redshift logger
*
* @return New property object with properties from auth profile and given
* input info properties, if auth profile found. Otherwise same
* property object as info return.
*
* @throws RedshiftException
* If an error occurs.
*/
public static RedshiftProperties setIAMProperties(RedshiftProperties info, RedshiftJDBCSettings settings, RedshiftLogger log)
throws RedshiftException {
try {
// Common code for IAM and Native Auth
info = setAuthProperties(info, settings, log);
// IAM keys
String iamAccessKey = RedshiftConnectionImpl.getOptionalConnSetting(RedshiftProperty.IAM_ACCESS_KEY_ID.getName(),
info);
String iamSecretKey = RedshiftConnectionImpl
.getOptionalConnSetting(RedshiftProperty.IAM_SECRET_ACCESS_KEY.getName(), info);
String iamSessionToken = RedshiftConnectionImpl
.getOptionalConnSetting(RedshiftProperty.IAM_SESSION_TOKEN.getName(), info);
String authProfile = RedshiftConnectionImpl.getOptionalConnSetting(RedshiftProperty.AUTH_PROFILE.getName(), info);
String host = RedshiftConnectionImpl.getOptionalConnSetting(RedshiftProperty.HOST.getName(), info);
String userSetServerless = RedshiftConnectionImpl.getOptionalConnSetting(RedshiftProperty.IS_SERVERLESS.getName(), info);
Boolean hasUserSetServerless = "true".equalsIgnoreCase(userSetServerless);
String acctId = null;
String workGroup = null;
Matcher mProvisioned = null;
Matcher mServerless = null;
if(null != host)
{
mProvisioned = HOST_PATTERN.matcher(host);
mServerless = SERVERLESS_WORKGROUP_HOST_PATTERN.matcher(host);
}
String clusterId = null;
if (null != mProvisioned && mProvisioned.matches())
{
// provisioned vanilla
if (RedshiftLogger.isEnable())
log.logInfo("Code flow for regular provisioned cluster");
clusterId = RedshiftConnectionImpl.getRequiredConnSetting(RedshiftProperty.CLUSTER_IDENTIFIER.getName(), info);
}
else if (null != mServerless && mServerless.matches())
{
// serverless vanilla
// do nothing, regular serverless logic flow
if (RedshiftLogger.isEnable())
log.logInfo("Code flow for regular serverless cluster");
// String isServerless = RedshiftConnectionImpl.getOptionalConnSetting(RedshiftProperty.IS_SERVERLESS.getName(), info);
// settings.m_isServerless = isServerless == null ? false : Boolean.valueOf(isServerless);
settings.m_isServerless = true;
acctId = RedshiftConnectionImpl.getOptionalConnSetting(RedshiftProperty.SERVERLESS_ACCT_ID.getName(), info);
workGroup = RedshiftConnectionImpl.getOptionalConnSetting(RedshiftProperty.SERVERLESS_WORK_GROUP.getName(), info);
}
else if (hasUserSetServerless)
{
// hostname doesn't match serverless regex but serverless set to true explicitly by user
// when ready for implementation, remove setting of the isServerless property automatically in parseUrl(),
// set it here instead
// currently do nothing as server does not support cname for serverless
settings.m_isServerless = true;
workGroup = RedshiftConnectionImpl.getOptionalConnSetting(RedshiftProperty.SERVERLESS_WORK_GROUP.getName(), info);
acctId = RedshiftConnectionImpl.getOptionalConnSetting(RedshiftProperty.SERVERLESS_ACCT_ID.getName(), info);
if(workGroup != null)
{
// workgroup specified by user - serverless nlb call
// check for serverlessAcctId to enter serverless NLB logic flow, for when we implement this for serverless after server side is ready
// currently do nothing as regular code flow is sufficient
if (RedshiftLogger.isEnable())
log.logInfo("Code flow for nlb serverless cluster");
}
else
{
// attempt serverless cname call - currently not supported by server
// currently sets isCname to true which will be asserted on later, as cname for serverless is not supported yet
if (RedshiftLogger.isEnable())
log.logInfo("Code flow for cname serverless cluster");
settings.m_isCname = true;
}
}
else
{
if (RedshiftLogger.isEnable())
log.logInfo("Code flow for nlb/cname in provisioned clusters");
clusterId = RedshiftConnectionImpl.getOptionalConnSetting(RedshiftProperty.CLUSTER_IDENTIFIER.getName(), info);
// attempt provisioned cname call
// cluster id will be fetched upon describing custom domain name
settings.m_isCname = true;
}
String awsRegion = RedshiftConnectionImpl.getOptionalConnSetting(RedshiftProperty.AWS_REGION.getName(), info);
String endpointUrl = RedshiftConnectionImpl.getOptionalConnSetting(RedshiftProperty.ENDPOINT_URL.getName(), info);
String stsEndpointUrl = RedshiftConnectionImpl.getOptionalConnSetting(RedshiftProperty.STS_ENDPOINT_URL.getName(),
info);
String profile = RedshiftConnectionImpl.getOptionalConnSetting(RedshiftProperty.AWS_PROFILE.getName(), info);
if (profile == null)
profile = RedshiftConnectionImpl.getOptionalConnSetting(RedshiftProperty.AWS_PROFILE.getName().toLowerCase(),
info);
String iamDuration = RedshiftConnectionImpl.getOptionalConnSetting(RedshiftProperty.IAM_DURATION.getName(), info);
String iamAutoCreate = RedshiftConnectionImpl.getOptionalConnSetting(RedshiftProperty.USER_AUTOCREATE.getName(),
info);
String iamDbUser = RedshiftConnectionImpl.getOptionalConnSetting(RedshiftProperty.DB_USER.getName(), info);
String iamDbGroups = RedshiftConnectionImpl.getOptionalConnSetting(RedshiftProperty.DB_GROUPS.getName(), info);
String iamForceLowercase = RedshiftConnectionImpl
.getOptionalConnSetting(RedshiftProperty.FORCE_LOWERCASE.getName(), info);
String iamGroupFederation = RedshiftConnectionImpl
.getOptionalConnSetting(RedshiftProperty.GROUP_FEDERATION.getName(), info);
String dbName = RedshiftConnectionImpl.getOptionalConnSetting(RedshiftProperty.DBNAME.getName(), info);
String hosts = RedshiftConnectionImpl.getOptionalConnSetting(RedshiftProperty.HOST.getName(), info);
String ports = RedshiftConnectionImpl.getOptionalConnSetting(RedshiftProperty.PORT.getName(), info);
settings.m_clusterIdentifier = clusterId;
if (!settings.m_isServerless && !settings.m_isCname
&& (null == settings.m_clusterIdentifier || settings.m_clusterIdentifier.isEmpty()))
{
RedshiftException err = new RedshiftException(
GT.tr("Missing connection property {0}", RedshiftProperty.CLUSTER_IDENTIFIER.getName()),
RedshiftState.UNEXPECTED_ERROR);
if (RedshiftLogger.isEnable())
log.log(LogLevel.ERROR, err.toString());
throw err;
}
if (settings.m_isServerless) {
settings.m_acctId = acctId;
settings.m_workGroup = workGroup;
}
// Regions.fromName(string) requires the string to be lower case and in
// this format:
// E.g. "us-west-2"
if (null != awsRegion) {
settings.m_awsRegion = awsRegion.trim().toLowerCase();
}
if (null != endpointUrl) {
settings.m_endpoint = endpointUrl;
} else {
settings.m_endpoint = System.getProperty("redshift.endpoint-url");
}
if (null != stsEndpointUrl) {
settings.m_stsEndpoint = stsEndpointUrl;
} else {
settings.m_stsEndpoint = System.getProperty("sts.endpoint-url");
}
if (null != profile) {
settings.m_profile = profile;
}
if (null != iamDuration) {
try {
settings.m_iamDuration = Integer.parseInt(iamDuration);
if (settings.m_iamDuration < 900 || settings.m_iamDuration > 3600) {
RedshiftException err = new RedshiftException(
GT.tr("Invalid connection property value or type range(900-3600) {0}",
RedshiftProperty.IAM_DURATION.getName()),
RedshiftState.UNEXPECTED_ERROR);
if (RedshiftLogger.isEnable())
log.log(LogLevel.ERROR, err.toString());
throw err;
}
} catch (NumberFormatException e) {
RedshiftException err = new RedshiftException(GT.tr("Invalid connection property value {0} : {1}",
RedshiftProperty.IAM_DURATION.getName(), iamDuration), RedshiftState.UNEXPECTED_ERROR, e);
if (RedshiftLogger.isEnable())
log.log(LogLevel.DEBUG, err.toString());
throw err;
}
}
if (null != iamAccessKey) {
settings.m_iamAccessKeyID = iamAccessKey;
}
// Because the secret access key should be hidden, and most applications
// (for example:
// SQL Workbench) only hide passwords, Amazon has requested that we allow
// the
// secret access key to be passed as either the IAMSecretAccessKey
// property or
// as a password value.
if (null != iamSecretKey) {
if (StringUtils.isNullOrEmpty(settings.m_iamAccessKeyID)) {
RedshiftException err = new RedshiftException(
GT.tr("Missing connection property {0}", RedshiftProperty.IAM_ACCESS_KEY_ID.getName()),
RedshiftState.UNEXPECTED_ERROR);
if (RedshiftLogger.isEnable())
log.log(LogLevel.ERROR, err.toString());
throw err;
}
settings.m_iamSecretKey = iamSecretKey;
if (settings.m_iamSecretKey.isEmpty()) {
settings.m_iamSecretKey = settings.m_password;
}
} else {
settings.m_iamSecretKey = settings.m_password;
}
if (null != iamSessionToken) {
if (StringUtils.isNullOrEmpty(settings.m_iamAccessKeyID)) {
RedshiftException err = new RedshiftException(
GT.tr("Missing connection property {0}", RedshiftProperty.IAM_ACCESS_KEY_ID.getName()),
RedshiftState.UNEXPECTED_ERROR);
if (RedshiftLogger.isEnable())
log.log(LogLevel.ERROR, err.toString());
throw err;
}
settings.m_iamSessionToken = iamSessionToken;
}
settings.m_autocreate = iamAutoCreate == null ? null : Boolean.valueOf(iamAutoCreate);
settings.m_forceLowercase = iamForceLowercase == null ? null : Boolean.valueOf(iamForceLowercase);
settings.m_groupFederation = iamGroupFederation == null ? false : Boolean.valueOf(iamGroupFederation);
if (null != iamDbUser) {
settings.m_dbUser = iamDbUser;
}
settings.m_dbGroups = (iamDbGroups != null)
? Arrays.asList((settings.m_forceLowercase != null && settings.m_forceLowercase
? iamDbGroups.toLowerCase(Locale.getDefault()) : iamDbGroups).split(","))
: Collections.<String>emptyList();
settings.m_Schema = dbName;
if (hosts != null) {
settings.m_host = hosts;
}
if (ports != null) {
settings.m_port = Integer.parseInt(ports);
}
setIAMCredentials(settings, log, authProfile);
return info;
} catch (RedshiftException re) {
if (RedshiftLogger.isEnable())
log.logError(re);
throw re;
}
}
/**
* Helper function to create the appropriate credential providers.
*
* @throws RedshiftException
* If an unspecified error occurs.
*/
private static void setIAMCredentials(RedshiftJDBCSettings settings, RedshiftLogger log, String authProfile) throws RedshiftException {
AWSCredentialsProvider provider;
CredentialProviderType providerType = CredentialProviderType.NONE;
boolean idpCredentialsRefresh = false;
String idpToken = null;
if (!StringUtils.isNullOrEmpty(settings.m_credentialsProvider)) {
if (!StringUtils.isNullOrEmpty(settings.m_profile)) {
RedshiftException err = new RedshiftException(
GT.tr("Conflict in connection property setting {0} and {1}",
RedshiftProperty.CREDENTIALS_PROVIDER.getName(), RedshiftProperty.AWS_PROFILE.getName()),
RedshiftState.UNEXPECTED_ERROR);
if (RedshiftLogger.isEnable())
log.log(LogLevel.ERROR, err.toString());
throw err;
}
if (StringUtils.isNullOrEmpty(authProfile)
&& !StringUtils.isNullOrEmpty(settings.m_iamAccessKeyID)) {
RedshiftException err = new RedshiftException(
GT.tr("Conflict in connection property setting {0} and {1}",
RedshiftProperty.CREDENTIALS_PROVIDER.getName(), RedshiftProperty.IAM_ACCESS_KEY_ID.getName()),
RedshiftState.UNEXPECTED_ERROR);
if (RedshiftLogger.isEnable())
log.log(LogLevel.ERROR, err.toString());
throw err;
}
try {
Class<? extends AWSCredentialsProvider> clazz = (Class.forName(settings.m_credentialsProvider)
.asSubclass(AWSCredentialsProvider.class));
provider = clazz.newInstance();
if (provider instanceof IPlugin) {
IPlugin plugin = ((IPlugin) provider);
providerType = CredentialProviderType.PLUGIN;
plugin.setLogger(log);
plugin.setGroupFederation(settings.m_groupFederation);
for (Map.Entry<String, String> entry : settings.m_pluginArgs.entrySet()) {
String pluginArgKey = entry.getKey();
plugin.addParameter(pluginArgKey, entry.getValue());
if (KEY_PREFERRED_ROLE.equalsIgnoreCase(pluginArgKey))
settings.m_preferredRole = entry.getValue();
else if (KEY_ROLE_ARN.equalsIgnoreCase(pluginArgKey))
settings.m_roleArn = entry.getValue();
else if (KEY_ROLE_SESSION_NAME.equalsIgnoreCase(pluginArgKey))
settings.m_roleSessionName = entry.getValue();
else if (RedshiftProperty.DB_GROUPS_FILTER.getName().equalsIgnoreCase(pluginArgKey))
settings.m_dbGroupsFilter = entry.getValue();
}
}
} catch (InstantiationException | IllegalAccessException | ClassNotFoundException e) {
RedshiftException err = new RedshiftException(
GT.tr("Invalid credentials provider class {0}", settings.m_credentialsProvider),
RedshiftState.UNEXPECTED_ERROR, e);
if (RedshiftLogger.isEnable())
log.log(LogLevel.ERROR, err.toString());
throw err;
} catch (NumberFormatException e) {
RedshiftException err = new RedshiftException(
GT.tr("{0} : {1}", e.getMessage(), settings.m_credentialsProvider), RedshiftState.UNEXPECTED_ERROR, e);
if (RedshiftLogger.isEnable())
log.log(LogLevel.ERROR, err.toString());
throw err;
}
} else if (!StringUtils.isNullOrEmpty(settings.m_profile)) {
if (StringUtils.isNullOrEmpty(authProfile)
&& !StringUtils.isNullOrEmpty(settings.m_iamAccessKeyID)) {
RedshiftException err = new RedshiftException(GT.tr("Conflict in connection property setting {0} and {1}",
RedshiftProperty.AWS_PROFILE.getName(), RedshiftProperty.IAM_ACCESS_KEY_ID.getName()),
RedshiftState.UNEXPECTED_ERROR);
if (RedshiftLogger.isEnable())
log.log(LogLevel.ERROR, err.toString());
throw err;
}
ProfilesConfigFile pcf = new PluginProfilesConfigFile(settings, log);
provider = new ProfileCredentialsProvider(pcf, settings.m_profile);
providerType = CredentialProviderType.PROFILE;
} else if (!StringUtils.isNullOrEmpty(settings.m_iamAccessKeyID)) {
AWSCredentials credentials;
if (!StringUtils.isNullOrEmpty(settings.m_iamSessionToken)) {
credentials = new BasicSessionCredentials(settings.m_iamAccessKeyID, settings.m_iamSecretKey,
settings.m_iamSessionToken);
providerType = CredentialProviderType.IAM_KEYS_WITH_SESSION;
} else {
credentials = new BasicAWSCredentials(settings.m_iamAccessKeyID, settings.m_iamSecretKey);
providerType = CredentialProviderType.IAM_KEYS;
}
provider = new AWSStaticCredentialsProvider(credentials);
} else {
provider = new DefaultAWSCredentialsProviderChain();
}
if (RedshiftLogger.isEnable())
log.log(LogLevel.DEBUG, "IDP Credential Provider {0}:{1}", provider, settings.m_credentialsProvider);
int getClusterCredentialApiType = findTypeOfGetClusterCredentialsAPI(settings, providerType, provider);
if (getClusterCredentialApiType == GET_CLUSTER_CREDENTIALS_V1_API
|| getClusterCredentialApiType == GET_CLUSTER_CREDENTIALS_IAM_V2_API
|| getClusterCredentialApiType == GET_SERVERLESS_CREDENTIALS_V1_API)
{
if (RedshiftLogger.isEnable())
log.log(LogLevel.DEBUG, "Calling provider.getCredentials()");
// Provider will cache the credentials, it's OK to call getCredentials()
// here.
AWSCredentials credentials = provider.getCredentials();
if (credentials instanceof CredentialsHolder) {
idpCredentialsRefresh = ((CredentialsHolder) credentials).isRefresh();
// autoCreate, user and password from URL take priority.
CredentialsHolder.IamMetadata im = ((CredentialsHolder) credentials).getMetadata();
if (null != im) {
Boolean autoCreate = im.getAutoCreate();
String dbUser = im.getDbUser();
String samlDbUser = im.getSamlDbUser();
String profileDbUser = im.getProfileDbUser();
String dbGroups = im.getDbGroups();
boolean forceLowercase = im.getForceLowercase();
boolean allowDbUserOverride = im.getAllowDbUserOverride();
if (null == settings.m_autocreate) {
settings.m_autocreate = autoCreate;
}
if (null == settings.m_forceLowercase) {
settings.m_forceLowercase = forceLowercase;
}
/*
* Order of precedence when configuring settings.m_dbUser:
*
* If allowDbUserOverride = true: 1. Value from SAML assertion. 2.
* Value from connection string setting. 3. Value from credentials
* profile setting.
*
* If allowDbUserOverride = false (default): 1. Value from connection
* string setting. 2. Value from credentials profile setting. 3. Value
* from SAML assertion.
*/
if (allowDbUserOverride) {
if (null != samlDbUser) {
settings.m_dbUser = samlDbUser;
} else if (null != dbUser) {
settings.m_dbUser = dbUser;
} else if (null != profileDbUser) {
settings.m_dbUser = profileDbUser;
}
} else {
if (null != dbUser) {
settings.m_dbUser = dbUser;
} else if (null != profileDbUser) {
settings.m_dbUser = profileDbUser;
} else if (null != samlDbUser) {
settings.m_dbUser = samlDbUser;
}
}
if (settings.m_dbGroups.isEmpty() && null != dbGroups) {
settings.m_dbGroups = Arrays
.asList((settings.m_forceLowercase ? dbGroups.toLowerCase(Locale.getDefault()) : dbGroups).split(","));
}
}
}
if ("*".equals(settings.m_username) && null == settings.m_dbUser) {
RedshiftException err = new RedshiftException(
GT.tr("Missing connection property {0}", RedshiftProperty.DB_USER.getName()),
RedshiftState.UNEXPECTED_ERROR);
if (RedshiftLogger.isEnable())
log.log(LogLevel.ERROR, err.toString());
throw err;
}
} // V1 Or IAM_V2 for provisional cluster or serverless
else {
// TODO not yet decided
if (RedshiftLogger.isEnable())
log.log(LogLevel.DEBUG, "groupFederation=" + settings.m_groupFederation);
// Check for GetClusterCredentialsV2 cache
// Combine key of IDP and V2 API
String key = null;
GetClusterCredentialsWithIAMResult credentials = null;
if (!settings.m_iamDisableCache) {
key = getCredentialsV2CacheKey(settings, providerType, provider, getClusterCredentialApiType, false);
credentials = credentialsV2Cache.get(key);
}
if (credentials == null
|| RequestUtils.isCredentialExpired(credentials.getExpiration())) {
// If not found or expired
// Get IDP token
if (providerType == CredentialProviderType.PLUGIN) {
IPlugin plugin = (IPlugin) provider;
if (RedshiftLogger.isEnable())
log.log(LogLevel.DEBUG, "Calling plugin.getIdpToken()");
idpToken = plugin.getIdpToken();
}
settings.m_idpToken = idpToken;
}
} // Group federation API for plugin
setClusterCredentials(provider, settings, log, providerType, idpCredentialsRefresh, getClusterCredentialApiType);
}
/**
* Calls the AWS SDK methods to return temporary credentials. The expiration
* date is returned as the local time set by the client machines OS.
*
* @throws RedshiftException
* If getting the cluster credentials fails.
*/
private static void setClusterCredentials(AWSCredentialsProvider credProvider, RedshiftJDBCSettings settings,
RedshiftLogger log, CredentialProviderType providerType, boolean idpCredentialsRefresh,
int getClusterCredentialApiType) throws RedshiftException {
try {
AmazonRedshiftClientBuilder builder = AmazonRedshiftClientBuilder.standard();
builder = (AmazonRedshiftClientBuilder) setBuilderConfiguration(settings, log, builder);
switch (getClusterCredentialApiType) {
case GET_CLUSTER_CREDENTIALS_V1_API:
// Call Provision cluster V1 API
AmazonRedshift client = builder.withCredentials(credProvider).build();
callDescribeCustomDomainNameAssociationsAPIForV1(settings, client, log);
callDescribeClustersAPIForV1(settings, client);
if (RedshiftLogger.isEnable())
log.log(LogLevel.DEBUG, "Call V1 API of GetClusterCredentials");
GetClusterCredentialsResult result = getClusterCredentialsResult(settings, client, log, providerType,
idpCredentialsRefresh);
settings.m_username = result.getDbUser();
settings.m_password = result.getDbPassword();
if (RedshiftLogger.isEnable())
{
Date now = new Date();
log.logInfo(now + ": Using GetClusterCredentialsResult with expiration " + result.getExpiration());
}
break;
case GET_SERVERLESS_CREDENTIALS_V1_API:
// Serverless V1 API
ServerlessIamHelper serverlessIamHelper = new ServerlessIamHelper(settings, log, credProvider);
if (null == settings.m_host || settings.m_port == 0) {
serverlessIamHelper.describeConfiguration(settings);
}
if (RedshiftLogger.isEnable())
log.log(LogLevel.DEBUG, "Call Serverless V1 API of GetCredentials");
serverlessIamHelper.getCredentialsResult(settings, providerType, idpCredentialsRefresh);
break;
case GET_CLUSTER_CREDENTIALS_IAM_V2_API:
// Call V2 IAM API Provision
AmazonRedshiftClient iamClient = (AmazonRedshiftClient) builder.withCredentials(credProvider).build();
callDescribeCustomDomainNameAssociationsAPIForV2(settings, iamClient, log);
callDescribeClustersAPIForV2(settings, iamClient);
if (RedshiftLogger.isEnable())
log.log(LogLevel.DEBUG, "Call V2 API of GetClusterCredentials");
GetClusterCredentialsWithIAMResult iamResult = getClusterCredentialsResultV2(settings, iamClient, log, providerType,
idpCredentialsRefresh, credProvider, getClusterCredentialApiType);
settings.m_username = iamResult.getDbUser();
settings.m_password = iamResult.getDbPassword();
// result will contain TimeToRefresh
if (RedshiftLogger.isEnable()) {
Date now = new Date();
log.logInfo(now + ": Using GetClusterCredentialsResultV2 with expiration " + iamResult.getExpiration());
log.logInfo(now + ": Using GetClusterCredentialsResultV2 with TimeToRefresh " + iamResult.getNextRefreshTime());
}
break;
}
}
catch (AmazonClientException e)
{
RedshiftException err = new RedshiftException(GT.tr("IAM error retrieving temp credentials: {0}", e.getMessage()),
RedshiftState.UNEXPECTED_ERROR, e);
if (RedshiftLogger.isEnable())
log.log(LogLevel.ERROR, err.toString());
throw err;
}
}
/**
* Helper function to call the DescribeClustersAPIForV2 for IAM clients for provisioned clusters
*/
static void callDescribeClustersAPIForV2(RedshiftJDBCSettings settings, AmazonRedshiftClient iamClient)
{
if (null == settings.m_host || settings.m_port == 0)
{
DescribeClustersRequest req = new DescribeClustersRequest();
req.setClusterIdentifier(settings.m_clusterIdentifier);
DescribeClustersResult resp = iamClient.describeClusters(req);
List<Cluster> clusters = resp.getClusters();
if (clusters.isEmpty()) {
throw new AmazonClientException("Failed to describeClusters.");
}
Cluster cluster = clusters.get(0);
Endpoint endpoint = cluster.getEndpoint();
if (null == endpoint) {
throw new AmazonClientException("Cluster is not fully created yet.");
}
settings.m_host = endpoint.getAddress();
settings.m_port = endpoint.getPort();
}
}
/**
* Helper function to call the DescribeClustersAPIForV1 for provisioned clusters
*/
static void callDescribeClustersAPIForV1(RedshiftJDBCSettings settings, AmazonRedshift client)
{
if (null == settings.m_host || settings.m_port == 0)
{
DescribeClustersRequest req = new DescribeClustersRequest();
req.setClusterIdentifier(settings.m_clusterIdentifier);
DescribeClustersResult resp = client.describeClusters(req);
List<Cluster> clusters = resp.getClusters();
if (clusters.isEmpty()) {
throw new AmazonClientException("Failed to describeClusters.");
}
Cluster cluster = clusters.get(0);
Endpoint endpoint = cluster.getEndpoint();
if (null == endpoint) {
throw new AmazonClientException("Cluster is not fully created yet.");
}
settings.m_host = endpoint.getAddress();
settings.m_port = endpoint.getPort();
}
}
/**
* Helper function to call the DescribeCustomDomainNameAssociationsAPI for IAM clients for provisioned clusters
*/
static void callDescribeCustomDomainNameAssociationsAPIForV2(RedshiftJDBCSettings settings, AmazonRedshiftClient iamClient, RedshiftLogger log) throws RedshiftException
{
if(settings.m_isCname)
{
DescribeCustomDomainAssociationsRequest describeRequest = new DescribeCustomDomainAssociationsRequest();
describeRequest.setCustomDomainName(settings.m_host);
try
{
DescribeCustomDomainAssociationsResult describeResponse = iamClient.describeCustomDomainAssociations(describeRequest);
settings.m_clusterIdentifier = describeResponse.getAssociations().get(0).getCertificateAssociations().get(0).getClusterIdentifier();
}
catch (Exception ex)
{
log.logInfo("No cluster identifier received from Redshift CNAME lookup");
}
}
}
/**
* Helper function to call the DescribeCustomDomainNameAssociationsAPI for provisioned clusters
*/
static void callDescribeCustomDomainNameAssociationsAPIForV1(RedshiftJDBCSettings settings, AmazonRedshift client, RedshiftLogger log) throws RedshiftException
{
if(settings.m_isCname)
{
DescribeCustomDomainAssociationsRequest describeRequest = new DescribeCustomDomainAssociationsRequest();
describeRequest.setCustomDomainName(settings.m_host);
try
{
DescribeCustomDomainAssociationsResult describeResponse = client.describeCustomDomainAssociations(describeRequest);
settings.m_clusterIdentifier = describeResponse.getAssociations().get(0).getCertificateAssociations().get(0).getClusterIdentifier();
}
catch (Exception ex)
{
log.logInfo("No cluster identifier received from Redshift CNAME lookup");
}
}
}
private static synchronized GetClusterCredentialsResult getClusterCredentialsResult(RedshiftJDBCSettings settings,
AmazonRedshift client, RedshiftLogger log, CredentialProviderType providerType, boolean idpCredentialsRefresh)
throws AmazonClientException {
String key = null;
GetClusterCredentialsResult credentials = null;
if (!settings.m_iamDisableCache) {
key = getCredentialsCacheKey(settings, providerType, false);
credentials = credentialsCache.get(key);
}
if (credentials == null || (providerType == CredentialProviderType.PLUGIN && idpCredentialsRefresh)
|| RequestUtils.isCredentialExpired(credentials.getExpiration())) {
if (RedshiftLogger.isEnable())
log.logInfo("GetClusterCredentials NOT from cache");
if (!settings.m_iamDisableCache)
credentialsCache.remove(key);
if(settings.m_isCname)
{
// construct request packet with cname
GetClusterCredentialsRequest request = constructRequestForGetClusterCredentials(settings, true, log);
try
{
// make api call with cname
credentials = makeGetClusterCredentialsAPICall(request, credentials, client, log);
}
catch(AmazonClientException ace)
{
// if api call with cname fails, recreate request packet with clusterid and re-make api call
if(RedshiftLogger.isEnable())
{
log.logInfo("GetClusterCredentials API call failed with CNAME request. Retrying with ClusterID.");
}
request = constructRequestForGetClusterCredentials(settings, false, log);
credentials = makeGetClusterCredentialsAPICall(request, credentials, client, log);
}
}
else
{
// construct request packet with clusterid and make api call
GetClusterCredentialsRequest request = constructRequestForGetClusterCredentials(settings, false, log);
credentials = makeGetClusterCredentialsAPICall(request, credentials, client, log);
}
if (!settings.m_iamDisableCache)
credentialsCache.put(key, credentials);
}
else
{
if (RedshiftLogger.isEnable())
log.logInfo("GetClusterCredentials from cache");
}
return credentials;
}
/**
* Helper function to construct the request object for GetClusterCredentials API
*/
static GetClusterCredentialsRequest constructRequestForGetClusterCredentials(RedshiftJDBCSettings settings, boolean constructWithCname, RedshiftLogger log)
{
GetClusterCredentialsRequest request = new GetClusterCredentialsRequest();
if (settings.m_iamDuration > 0)
{
request.setDurationSeconds(settings.m_iamDuration);
}
request.setDbName(settings.m_Schema);
request.setDbUser(settings.m_dbUser == null ? settings.m_username : settings.m_dbUser);
request.setAutoCreate(settings.m_autocreate);
request.setDbGroups(settings.m_dbGroups);
if(constructWithCname)
{
request.setCustomDomainName(settings.m_host);
}
else
{
request.setClusterIdentifier(settings.m_clusterIdentifier);
}
if (RedshiftLogger.isEnable())
{
log.logInfo(request.toString());
}
return request;
}
/**
* Helper function to make the API call to GetClusterCredentials
*/
static GetClusterCredentialsResult makeGetClusterCredentialsAPICall(GetClusterCredentialsRequest request, GetClusterCredentialsResult credentials, AmazonRedshift client, RedshiftLogger log)
{
for (int i = 0; i < MAX_AMAZONCLIENT_RETRY; ++i)
{
try
{
credentials = client.getClusterCredentials(request);
break;
}
catch (AmazonClientException ce)
{
checkForApiCallRateExceedError(ce, i, "getClusterCredentialsResult", log);
}
}
return credentials;
}
static void checkForApiCallRateExceedError(AmazonClientException ace, int i, String callerMethod, RedshiftLogger log)
throws AmazonClientException {
if (ace.getMessage().contains("Rate exceeded") && i < MAX_AMAZONCLIENT_RETRY - 1) {
if (RedshiftLogger.isEnable())
log.logInfo(callerMethod + " caught 'Rate exceeded' error...");
try {
Thread.sleep(MAX_AMAZONCLIENT_RETRY_DELAY_MS);
} catch (InterruptedException ex) {
Thread.currentThread().interrupt();
}
} else {
throw ace;
}
}
private static synchronized GetClusterCredentialsWithIAMResult getClusterCredentialsResultV2(
RedshiftJDBCSettings settings, AmazonRedshiftClient client, RedshiftLogger log,
CredentialProviderType providerType, boolean idpCredentialsRefresh, AWSCredentialsProvider provider,
int getClusterCredentialApiType) throws AmazonClientException
{
String key = null;
GetClusterCredentialsWithIAMResult credentials = null;
if (!settings.m_iamDisableCache)
{
key = getCredentialsV2CacheKey(settings, providerType, provider, getClusterCredentialApiType, false);
credentials = credentialsV2Cache.get(key);
}
if (credentials == null || (providerType == CredentialProviderType.PLUGIN && settings.m_idpToken != null)
|| RequestUtils.isCredentialExpired(credentials.getExpiration()))
{
if (RedshiftLogger.isEnable())
log.logInfo("GetClusterCredentialsV2 NOT from cache");
if (!settings.m_iamDisableCache)
credentialsV2Cache.remove(key);
if(settings.m_isCname)
{
// construct request packet with cname
GetClusterCredentialsWithIAMRequest request = constructRequestForGetClusterCredentialsWithIAM(settings, true, log);
try
{
// make api call with cname
credentials = makeGetClusterCredentialsWithIAMAPICall(request, credentials, client, log);
}
catch (AmazonClientException ce)
{
// if api call with cname fails, recreate request packet with clusterid and re-make api call
if(RedshiftLogger.isEnable())
{
log.logInfo("GetClusterCredentials API call failed with CNAME request. Retrying with ClusterID.");
}
request = constructRequestForGetClusterCredentialsWithIAM(settings, false, log);
credentials = makeGetClusterCredentialsWithIAMAPICall(request, credentials, client, log);
}
}
else
{
// construct request packet with clusterid and make api call
GetClusterCredentialsWithIAMRequest request = constructRequestForGetClusterCredentialsWithIAM(settings, false, log);
credentials = makeGetClusterCredentialsWithIAMAPICall(request, credentials, client, log);
}
if (!settings.m_iamDisableCache)
credentialsV2Cache.put(key, credentials);
}
else
{
if (RedshiftLogger.isEnable())
log.logInfo("GetClusterCredentialsV2 from cache");
}
return credentials;
}
/**
* Helper function to construct the request object for GetClusterCredentialsWithIAM API
*/
static GetClusterCredentialsWithIAMRequest constructRequestForGetClusterCredentialsWithIAM(RedshiftJDBCSettings settings, boolean constructWithCname, RedshiftLogger log)
{
GetClusterCredentialsWithIAMRequest request = new GetClusterCredentialsWithIAMRequest();
if (settings.m_iamDuration > 0) {
request.setDurationSeconds(settings.m_iamDuration);
}
request.setDbName(settings.m_Schema);
if (constructWithCname)
{
request.setCustomDomainName(settings.m_host);
}
else
{
request.setClusterIdentifier(settings.m_clusterIdentifier);
}
if (RedshiftLogger.isEnable())
log.logInfo(request.toString());
return request;
}
/**
* Helper function to make the API call to GetClusterCredentialsWithIAM
*/
static GetClusterCredentialsWithIAMResult makeGetClusterCredentialsWithIAMAPICall(GetClusterCredentialsWithIAMRequest request, GetClusterCredentialsWithIAMResult credentials, AmazonRedshiftClient client, RedshiftLogger log)
{
for (int i = 0; i < MAX_AMAZONCLIENT_RETRY; ++i)
{
try
{
credentials = client.getClusterCredentialsWithIAM(request);
break;
}
catch (AmazonClientException ace)
{
checkForApiCallRateExceedError(ace, i, "getClusterCredentialsResultV2", log);
}
}
return credentials;
}
static String getCredentialsCacheKey(RedshiftJDBCSettings settings, CredentialProviderType providerType,
boolean serverless) {
String key;
String dbGroups = "";
if (settings.m_dbGroups != null && !settings.m_dbGroups.isEmpty()) {
Collections.sort(settings.m_dbGroups);
dbGroups = String.join(",", settings.m_dbGroups);
}
key = ((!serverless) ? settings.m_clusterIdentifier : settings.m_acctId) + ";"
+ ((serverless && settings.m_workGroup != null) ? settings.m_workGroup : "") + ";"
+ (settings.m_dbUser == null ? settings.m_username : settings.m_dbUser) + ";"
+ (settings.m_Schema == null ? "" : settings.m_Schema) + ";" + dbGroups + ";" + settings.m_autocreate + ";"
+ settings.m_iamDuration;
switch (providerType) {
case PROFILE: {
key += ";" + settings.m_profile;
break;
}
case IAM_KEYS_WITH_SESSION: {
key += ";" + settings.m_iamAccessKeyID + ";" + settings.m_iamSecretKey + ";" + settings.m_iamSessionToken;
break;
}
case IAM_KEYS: {
key += ";" + settings.m_iamAccessKeyID + ";" + settings.m_iamSecretKey;
break;
}
default: {
break;
}
} // Switch
return key;
}
static String getCredentialsV2CacheKey(RedshiftJDBCSettings settings, CredentialProviderType providerType,
AWSCredentialsProvider provider, int getClusterCredentialApiType, boolean serverless) {
String key = "";
if (providerType == CredentialProviderType.PLUGIN) {
// Get IDP key
IPlugin plugin = (IPlugin) provider;
key = plugin.getCacheKey();
}
// Combine IDP key with V2 API parameters
key += (((!serverless) ? settings.m_clusterIdentifier : settings.m_acctId) + ";"
+ ((serverless && settings.m_workGroup != null) ? settings.m_workGroup : "") + ";"
+ (settings.m_Schema == null ? "" : settings.m_Schema) + ";" + settings.m_iamDuration);
if (getClusterCredentialApiType == GET_CLUSTER_CREDENTIALS_SAML_V2_API) {
if (settings.m_preferredRole != null) {
key += (settings.m_preferredRole + ";");
}
if (settings.m_dbGroupsFilter != null) {
key += (settings.m_dbGroupsFilter + ";");
}
} else if (getClusterCredentialApiType == GET_CLUSTER_CREDENTIALS_JWT_V2_API) {
if (settings.m_idpToken != null) {
key += (settings.m_idpToken + ";");
}
if (settings.m_roleArn != null) {
key += (settings.m_roleArn + ";");
}
if (settings.m_roleSessionName != null) {
key += (settings.m_roleSessionName + ";");
}
}
switch (providerType) {
case PROFILE: {
key += ";" + settings.m_profile;
break;
}
case IAM_KEYS_WITH_SESSION: {
key += ";" + settings.m_iamAccessKeyID + ";" + settings.m_iamSecretKey + ";" + settings.m_iamSessionToken;
break;
}
case IAM_KEYS: {
key += ";" + settings.m_iamAccessKeyID + ";" + settings.m_iamSecretKey;
break;
}
default: {
break;
}
} // Switch
return key;
}
private static int findTypeOfGetClusterCredentialsAPI(RedshiftJDBCSettings settings,
CredentialProviderType providerType, AWSCredentialsProvider provider) {
if (!settings.m_isServerless)
{
if (!settings.m_groupFederation)
return GET_CLUSTER_CREDENTIALS_V1_API;
else
{
return GET_CLUSTER_CREDENTIALS_IAM_V2_API;
}
} else {
// Serverless
if (!settings.m_groupFederation)
return GET_SERVERLESS_CREDENTIALS_V1_API;
else
{
if (settings.m_isCname)
{
throw new AmazonClientException("Custom cluster names are not supported for Redshift Serverless");
}
else
{
return GET_CLUSTER_CREDENTIALS_IAM_V2_API; // Fallback to Provision API support in serverless
}
}
} // Serverless
}
static AwsClientBuilder setBuilderConfiguration(RedshiftJDBCSettings settings, RedshiftLogger log,
AwsClientBuilder builder) {
ClientConfiguration clientConfig = RequestUtils.getProxyClientConfig(log);
if (clientConfig != null) {
builder.setClientConfiguration(clientConfig);
}
if (RedshiftLogger.isEnable()) {
log.logInfo("setBuilderConfiguration: settings.m_endpoint= " + settings.m_endpoint + " settings.m_awsRegion = "
+ settings.m_awsRegion);
}
if (settings.m_endpoint != null) {
EndpointConfiguration cfg = new EndpointConfiguration(settings.m_endpoint, settings.m_awsRegion);
builder.setEndpointConfiguration(cfg);
} else if (settings.m_awsRegion != null && !settings.m_awsRegion.isEmpty()) {
builder.setRegion(settings.m_awsRegion);
}
return builder;
}
}
| 8,361 |
0 | Create_ds/amazon-redshift-jdbc-driver/src/main/java/com/amazon/redshift | Create_ds/amazon-redshift-jdbc-driver/src/main/java/com/amazon/redshift/core/RedshiftBindException.java | /*
* Copyright (c) 2004, PostgreSQL Global Development Group
* See the LICENSE file in the project root for more information.
*/
package com.amazon.redshift.core;
import java.io.IOException;
public class RedshiftBindException extends IOException {
private final IOException ioe;
public RedshiftBindException(IOException ioe) {
this.ioe = ioe;
}
public IOException getIOException() {
return ioe;
}
}
| 8,362 |
0 | Create_ds/amazon-redshift-jdbc-driver/src/main/java/com/amazon/redshift | Create_ds/amazon-redshift-jdbc-driver/src/main/java/com/amazon/redshift/core/Field.java | /*
* Copyright (c) 2003, PostgreSQL Global Development Group
* See the LICENSE file in the project root for more information.
*/
package com.amazon.redshift.core;
import com.amazon.redshift.jdbc.FieldMetadata;
/*
*/
public class Field {
// The V3 protocol defines two constants for the format of data
public static final int TEXT_FORMAT = 0;
public static final int BINARY_FORMAT = 1;
private final int length; // Internal Length of this field
private final int oid; // OID of the type
private final int mod; // type modifier of this field
private final String columnLabel; // Column label
private int format = TEXT_FORMAT; // In the V3 protocol each field has a format
// 0 = text, 1 = binary
// In the V2 protocol all fields in a
// binary cursor are binary and all
// others are text
private final int tableOid; // OID of table ( zero if no table )
private final int positionInTable;
// Cache fields filled in by AbstractJdbc2ResultSetMetaData.fetchFieldMetaData.
// Don't use unless that has been called.
private FieldMetadata metadata;
private int sqlType;
private String rsType = NOT_YET_LOADED;
// New string to avoid clashes with other strings
private static final String NOT_YET_LOADED = new String("pgType is not yet loaded");
/**
* Construct a field based on the information fed to it.
*
* @param name the name (column name and label) of the field
* @param oid the OID of the field
* @param length the length of the field
* @param mod modifier
*/
public Field(String name, int oid, int length, int mod) {
this(name, oid, length, mod, 0, 0);
}
/**
* Constructor without mod parameter.
*
* @param name the name (column name and label) of the field
* @param oid the OID of the field
*/
public Field(String name, int oid) {
this(name, oid, 0, -1);
}
/**
* Construct a field based on the information fed to it.
* @param columnLabel the column label of the field
* @param oid the OID of the field
* @param length the length of the field
* @param mod modifier
* @param tableOid the OID of the columns' table
* @param positionInTable the position of column in the table (first column is 1, second column is 2, etc...)
*/
public Field(String columnLabel, int oid, int length, int mod, int tableOid,
int positionInTable) {
this.columnLabel = columnLabel;
this.oid = oid;
this.length = length;
this.mod = mod;
this.tableOid = tableOid;
this.positionInTable = positionInTable;
this.metadata = tableOid == 0 ? new FieldMetadata(columnLabel) : null;
}
/**
* @return the oid of this Field's data type
*/
public int getOID() {
return oid;
}
/**
* @return the mod of this Field's data type
*/
public int getMod() {
return mod;
}
/**
* @return the column label of this Field's data type
*/
public String getColumnLabel() {
return columnLabel;
}
/**
* @return the length of this Field's data type
*/
public int getLength() {
return length;
}
/**
* @return the format of this Field's data (text=0, binary=1)
*/
public int getFormat() {
return format;
}
/**
* @param format the format of this Field's data (text=0, binary=1)
*/
public void setFormat(int format) {
this.format = format;
}
/**
* @return the columns' table oid, zero if no oid available
*/
public int getTableOid() {
return tableOid;
}
public int getPositionInTable() {
return positionInTable;
}
public FieldMetadata getMetadata() {
return metadata;
}
public void setMetadata(FieldMetadata metadata) {
this.metadata = metadata;
}
public String toString() {
return "Field(" + (columnLabel != null ? columnLabel : "")
+ "," + Oid.toString(oid)
+ "," + length
+ "," + (format == TEXT_FORMAT ? 'T' : 'B')
+ ")";
}
public void setSQLType(int sqlType) {
this.sqlType = sqlType;
}
public int getSQLType() {
return sqlType;
}
public void setRSType(String pgType) {
this.rsType = pgType;
}
public String getRSType() {
return rsType;
}
public boolean isTypeInitialized() {
return rsType != NOT_YET_LOADED;
}
}
| 8,363 |
0 | Create_ds/amazon-redshift-jdbc-driver/src/main/java/com/amazon/redshift | Create_ds/amazon-redshift-jdbc-driver/src/main/java/com/amazon/redshift/core/TypeInfo.java | /*
* Copyright (c) 2008, PostgreSQL Global Development Group
* See the LICENSE file in the project root for more information.
*/
package com.amazon.redshift.core;
import com.amazon.redshift.util.RedshiftObject;
import java.sql.SQLException;
import java.util.Iterator;
public interface TypeInfo {
void addCoreType(String rsTypeName, Integer oid, Integer sqlType, String javaClass,
Integer arrayOid);
void addDataType(String type, Class<? extends RedshiftObject> klass) throws SQLException;
/**
* Look up the SQL typecode for a given type oid.
*
* @param oid the type's OID
* @return the SQL type code (a constant from {@link java.sql.Types}) for the type
* @throws SQLException if an error occurs when retrieving sql type
*/
int getSQLType(int oid) throws SQLException;
/**
* Look up the SQL typecode for a given Redshift type name.
*
* @param rsTypeName the server type name to look up
* @return the SQL type code (a constant from {@link java.sql.Types}) for the type
* @throws SQLException if an error occurs when retrieving sql type
*/
int getSQLType(String rsTypeName) throws SQLException;
/**
* Look up the oid for a given redshift type name. This is the inverse of
* {@link #getRSType(int)}.
*
* @param rsTypeName the server type name to look up
* @return the type's OID, or 0 if unknown
* @throws SQLException if an error occurs when retrieving RS type
*/
int getRSType(String rsTypeName) throws SQLException;
/**
* Look up the redshift type name for a given oid. This is the inverse of
* {@link #getRSType(String)}.
*
* @param oid the type's OID
* @return the server type name for that OID or null if unknown
* @throws SQLException if an error occurs when retrieving RS type
*/
String getRSType(int oid) throws SQLException;
/**
* Look up the oid of an array's base type given the array's type oid.
*
* @param oid the array type's OID
* @return the base type's OID, or 0 if unknown
* @throws SQLException if an error occurs when retrieving array element
*/
int getRSArrayElement(int oid) throws SQLException;
/**
* Determine the oid of the given base Redshift type's array type.
*
* @param elementTypeName the base type's
* @return the array type's OID, or 0 if unknown
* @throws SQLException if an error occurs when retrieving array type
*/
int getRSArrayType(String elementTypeName) throws SQLException;
/**
* Determine the delimiter for the elements of the given array type oid.
*
* @param oid the array type's OID
* @return the base type's array type delimiter
* @throws SQLException if an error occurs when retrieving array delimiter
*/
char getArrayDelimiter(int oid) throws SQLException;
Iterator<String> getRSTypeNamesWithSQLTypes();
Class<? extends RedshiftObject> getRSobject(String type);
String getJavaClass(int oid) throws SQLException;
String getTypeForAlias(String alias);
int getPrecision(int oid, int typmod);
int getScale(int oid, int typmod);
boolean isCaseSensitive(int oid);
boolean isSigned(int oid);
int getDisplaySize(int oid, int typmod);
int getMaximumPrecision(int oid);
boolean requiresQuoting(int oid) throws SQLException;
/**
* Returns true if particular sqlType requires quoting.
* This method is used internally by the driver, so it might disappear without notice.
*
* @param sqlType sql type as in java.sql.Types
* @return true if the type requires quoting
* @throws SQLException if something goes wrong
*/
boolean requiresQuotingSqlType(int sqlType) throws SQLException;
}
| 8,364 |
0 | Create_ds/amazon-redshift-jdbc-driver/src/main/java/com/amazon/redshift | Create_ds/amazon-redshift-jdbc-driver/src/main/java/com/amazon/redshift/core/Encoding.java | /*
* Copyright (c) 2003, PostgreSQL Global Development Group
* See the LICENSE file in the project root for more information.
*/
package com.amazon.redshift.core;
import java.io.IOException;
import java.io.InputStream;
import java.io.InputStreamReader;
import java.io.OutputStream;
import java.io.OutputStreamWriter;
import java.io.Reader;
import java.io.Writer;
import java.nio.charset.Charset;
import java.util.HashMap;
import com.amazon.redshift.logger.LogLevel;
import com.amazon.redshift.logger.RedshiftLogger;
/**
* Representation of a particular character encoding.
*/
public class Encoding {
protected RedshiftLogger logger;
private static final Encoding DEFAULT_ENCODING = new Encoding();
/*
* Preferred JVM encodings for backend encodings.
*/
private static final HashMap<String, String[]> encodings = new HashMap<String, String[]>();
static {
//Note: this list should match the set of supported server
// encodings found in backend/util/mb/encnames.c
encodings.put("SQL_ASCII", new String[]{"ASCII", "US-ASCII"});
encodings.put("UNICODE", new String[]{"UTF-8", "UTF8"});
encodings.put("UTF8", new String[]{"UTF-8", "UTF8"});
encodings.put("LATIN1", new String[]{"ISO8859_1"});
encodings.put("LATIN2", new String[]{"ISO8859_2"});
encodings.put("LATIN3", new String[]{"ISO8859_3"});
encodings.put("LATIN4", new String[]{"ISO8859_4"});
encodings.put("ISO_8859_5", new String[]{"ISO8859_5"});
encodings.put("ISO_8859_6", new String[]{"ISO8859_6"});
encodings.put("ISO_8859_7", new String[]{"ISO8859_7"});
encodings.put("ISO_8859_8", new String[]{"ISO8859_8"});
encodings.put("LATIN5", new String[]{"ISO8859_9"});
encodings.put("LATIN7", new String[]{"ISO8859_13"});
encodings.put("LATIN9", new String[]{"ISO8859_15_FDIS"});
encodings.put("EUC_JP", new String[]{"EUC_JP"});
encodings.put("EUC_CN", new String[]{"EUC_CN"});
encodings.put("EUC_KR", new String[]{"EUC_KR"});
encodings.put("JOHAB", new String[]{"Johab"});
encodings.put("EUC_TW", new String[]{"EUC_TW"});
encodings.put("SJIS", new String[]{"MS932", "SJIS"});
encodings.put("BIG5", new String[]{"Big5", "MS950", "Cp950"});
encodings.put("GBK", new String[]{"GBK", "MS936"});
encodings.put("UHC", new String[]{"MS949", "Cp949", "Cp949C"});
encodings.put("TCVN", new String[]{"Cp1258"});
encodings.put("WIN1256", new String[]{"Cp1256"});
encodings.put("WIN1250", new String[]{"Cp1250"});
encodings.put("WIN874", new String[]{"MS874", "Cp874"});
encodings.put("WIN", new String[]{"Cp1251"});
encodings.put("ALT", new String[]{"Cp866"});
// We prefer KOI8-U, since it is a superset of KOI8-R.
encodings.put("KOI8", new String[]{"KOI8_U", "KOI8_R"});
// If the database isn't encoding-aware then we can't have
// any preferred encodings.
encodings.put("UNKNOWN", new String[0]);
// The following encodings do not have a java equivalent
encodings.put("MULE_INTERNAL", new String[0]);
encodings.put("LATIN6", new String[0]);
encodings.put("LATIN8", new String[0]);
encodings.put("LATIN10", new String[0]);
}
private interface UTFEncodingProvider {
Encoding getEncoding();
}
private static final UTFEncodingProvider UTF_ENCODING_PROVIDER;
static {
//for java 1.8 and older, use implementation optimized for char[]
final JavaVersion runtimeVersion = JavaVersion.getRuntimeVersion();
if (JavaVersion.v1_8.compareTo(runtimeVersion) >= 0) {
UTF_ENCODING_PROVIDER = new UTFEncodingProvider() {
@Override
public Encoding getEncoding() {
return new CharOptimizedUTF8Encoder();
}
};
} else {
//for newer versions, use default java behavior
UTF_ENCODING_PROVIDER = new UTFEncodingProvider() {
@Override
public Encoding getEncoding() {
return new ByteOptimizedUTF8Encoder();
}
};
}
}
private final Charset encoding;
private final boolean fastASCIINumbers;
/**
* Uses the default charset of the JVM.
*/
private Encoding() {
this(Charset.defaultCharset(), RedshiftLogger.getDriverLogger());
}
/**
* Subclasses may use this constructor if they know in advance of their ASCII number
* compatibility.
*
* @param encoding charset to use
* @param logger the logger to log the entry for debugging.
* @param fastASCIINumbers whether this encoding is compatible with ASCII numbers.
*/
protected Encoding(Charset encoding, boolean fastASCIINumbers, RedshiftLogger logger) {
this.logger = logger;
if (encoding == null) {
throw new NullPointerException("Null encoding charset not supported");
}
this.encoding = encoding;
this.fastASCIINumbers = fastASCIINumbers;
if (RedshiftLogger.isEnable()) {
this.logger.log(LogLevel.DEBUG, "Creating new Encoding {0} with fastASCIINumbers {1}",
new Object[]{encoding, fastASCIINumbers});
}
}
/**
* Use the charset passed as parameter and tests at creation time whether the specified encoding
* is compatible with ASCII numbers.
*
* @param encoding charset to use
* @param logger the logger to log the entry for debugging.
*/
protected Encoding(Charset encoding, RedshiftLogger logger) {
this(encoding, testAsciiNumbers(encoding), logger);
}
/**
* Returns true if this encoding has characters '-' and '0'..'9' in exactly same posision as
* ascii.
*
* @return true if the bytes can be scanned directly for ascii numbers.
*/
public boolean hasAsciiNumbers() {
return fastASCIINumbers;
}
/**
* Construct an Encoding for a given JVM encoding.
*
* @param jvmEncoding the name of the JVM encoding
* @param logger the logger to log the entry for debugging.
* @return an Encoding instance for the specified encoding, or an Encoding instance for the
* default JVM encoding if the specified encoding is unavailable.
*/
public static Encoding getJVMEncoding(String jvmEncoding, RedshiftLogger logger) {
if ("UTF-8".equals(jvmEncoding)) {
return UTF_ENCODING_PROVIDER.getEncoding();
}
if (Charset.isSupported(jvmEncoding)) {
return new Encoding(Charset.forName(jvmEncoding), logger);
}
return DEFAULT_ENCODING;
}
/**
* Construct an Encoding for a given database encoding.
*
* @param databaseEncoding the name of the database encoding
* @param logger the logger to log the entry for debugging.
* @return an Encoding instance for the specified encoding, or an Encoding instance for the
* default JVM encoding if the specified encoding is unavailable.
*/
public static Encoding getDatabaseEncoding(String databaseEncoding, RedshiftLogger logger) {
if ("UTF8".equals(databaseEncoding)) {
return UTF_ENCODING_PROVIDER.getEncoding();
}
// If the backend encoding is known and there is a suitable
// encoding in the JVM we use that. Otherwise we fall back
// to the default encoding of the JVM.
String[] candidates = encodings.get(databaseEncoding);
if (candidates != null) {
for (String candidate : candidates) {
if(RedshiftLogger.isEnable())
logger.log(LogLevel.DEBUG, "Search encoding candidate {0}", candidate);
if (Charset.isSupported(candidate)) {
return new Encoding(Charset.forName(candidate), logger);
}
}
}
// Try the encoding name directly -- maybe the charset has been
// provided by the user.
if (Charset.isSupported(databaseEncoding)) {
return new Encoding(Charset.forName(databaseEncoding), logger);
}
// Fall back to default JVM encoding.
if(RedshiftLogger.isEnable())
logger.log(LogLevel.DEBUG, "{0} encoding not found, returning default encoding", databaseEncoding);
return DEFAULT_ENCODING;
}
/**
* Get the name of the (JVM) encoding used.
*
* @return the JVM encoding name used by this instance.
*/
public String name() {
return encoding.name();
}
/**
* Encode a string to an array of bytes.
*
* @param s the string to encode
* @return a bytearray containing the encoded string
* @throws IOException if something goes wrong
*/
public byte[] encode(String s) throws IOException {
if (s == null) {
return null;
}
return s.getBytes(encoding);
}
/**
* Decode an array of bytes into a string.
*
* @param encodedString a byte array containing the string to decode
* @param offset the offset in <code>encodedString</code> of the first byte of the encoded
* representation
* @param length the length, in bytes, of the encoded representation
* @return the decoded string
* @throws IOException if something goes wrong
*/
public String decode(byte[] encodedString, int offset, int length) throws IOException {
return new String(encodedString, offset, length, encoding);
}
/**
* Decode an array of bytes into a string.
*
* @param encodedString a byte array containing the string to decode
* @return the decoded string
* @throws IOException if something goes wrong
*/
public String decode(byte[] encodedString) throws IOException {
return decode(encodedString, 0, encodedString.length);
}
/**
* Get a Reader that decodes the given InputStream using this encoding.
*
* @param in the underlying stream to decode from
* @return a non-null Reader implementation.
* @throws IOException if something goes wrong
*/
public Reader getDecodingReader(InputStream in) throws IOException {
return new InputStreamReader(in, encoding);
}
/**
* Get a Writer that encodes to the given OutputStream using this encoding.
*
* @param out the underlying stream to encode to
* @return a non-null Writer implementation.
* @throws IOException if something goes wrong
*/
public Writer getEncodingWriter(OutputStream out) throws IOException {
return new OutputStreamWriter(out, encoding);
}
/**
* Get an Encoding using the default encoding for the JVM.
*
* @return an Encoding instance
*/
public static Encoding defaultEncoding() {
return DEFAULT_ENCODING;
}
@Override
public String toString() {
return encoding.name();
}
/**
* Checks whether this encoding is compatible with ASCII for the number characters '-' and
* '0'..'9'. Where compatible means that they are encoded with exactly same values.
*
* @return If faster ASCII number parsing can be used with this encoding.
*/
private static boolean testAsciiNumbers(Charset encoding) {
// TODO: test all postgres supported encoding to see if there are
// any which do _not_ have ascii numbers in same location
// at least all the encoding listed in the encodings hashmap have
// working ascii numbers
try {
String test = "-0123456789";
byte[] bytes = test.getBytes(encoding);
String res = new String(bytes, "US-ASCII");
return test.equals(res);
} catch (java.io.UnsupportedEncodingException e) {
return false;
}
}
}
| 8,365 |
0 | Create_ds/amazon-redshift-jdbc-driver/src/main/java/com/amazon/redshift | Create_ds/amazon-redshift-jdbc-driver/src/main/java/com/amazon/redshift/core/ConnectionFactory.java | /*
* Copyright (c) 2004, PostgreSQL Global Development Group
* See the LICENSE file in the project root for more information.
*/
// Copyright (c) 2004, Open Cloud Limited.
package com.amazon.redshift.core;
import com.amazon.redshift.RedshiftProperty;
import com.amazon.redshift.core.v3.ConnectionFactoryImpl;
import com.amazon.redshift.logger.RedshiftLogger;
import com.amazon.redshift.util.GT;
import com.amazon.redshift.util.HostSpec;
import com.amazon.redshift.util.RedshiftException;
import com.amazon.redshift.util.RedshiftState;
import java.io.IOException;
import java.sql.SQLException;
import java.util.Properties;
/**
* Handles protocol-specific connection setup.
*
* @author Oliver Jowett (oliver@opencloud.com)
*/
public abstract class ConnectionFactory {
/**
* <p>Establishes and initializes a new connection.</p>
*
* <p>If the "protocolVersion" property is specified, only that protocol version is tried. Otherwise,
* all protocols are tried in order, falling back to older protocols as necessary.</p>
*
* <p>Currently, protocol versions 3 (7.4+) is supported.</p>
*
* @param hostSpecs at least one host and port to connect to; multiple elements for round-robin
* failover
* @param user the username to authenticate with; may not be null.
* @param database the database on the server to connect to; may not be null.
* @param info extra properties controlling the connection; notably, "password" if present
* supplies the password to authenticate with.
* @param logger the logger to log the entry for debugging.
* @return the new, initialized, connection
* @throws SQLException if the connection could not be established.
*/
public static QueryExecutor openConnection(HostSpec[] hostSpecs, String user,
String database, Properties info, RedshiftLogger logger) throws SQLException {
String protoName = RedshiftProperty.PROTOCOL_VERSION.get(info);
if (protoName == null || protoName.isEmpty() || "3".equals(protoName)) {
ConnectionFactory connectionFactory = new ConnectionFactoryImpl();
QueryExecutor queryExecutor = connectionFactory.openConnectionImpl(
hostSpecs, user, database, info, logger);
if (queryExecutor != null) {
return queryExecutor;
}
}
throw new RedshiftException(
GT.tr("A connection could not be made using the requested protocol {0}.", protoName),
RedshiftState.CONNECTION_UNABLE_TO_CONNECT);
}
/**
* Implementation of {@link #openConnection} for a particular protocol version. Implemented by
* subclasses of {@link ConnectionFactory}.
*
* @param hostSpecs at least one host and port to connect to; multiple elements for round-robin
* failover
* @param user the username to authenticate with; may not be null.
* @param database the database on the server to connect to; may not be null.
* @param info extra properties controlling the connection; notably, "password" if present
* supplies the password to authenticate with.
* @param logger the logger to log the entry for debugging.
* @return the new, initialized, connection, or <code>null</code> if this protocol version is not
* supported by the server.
* @throws SQLException if the connection could not be established for a reason other than
* protocol version incompatibility.
*/
public abstract QueryExecutor openConnectionImpl(HostSpec[] hostSpecs, String user,
String database, Properties info, RedshiftLogger logger) throws SQLException;
/**
* Safely close the given stream.
*
* @param newStream The stream to close.
*/
protected void closeStream(RedshiftStream newStream) {
if (newStream != null) {
try {
newStream.close();
} catch (IOException e) {
}
}
}
}
| 8,366 |
0 | Create_ds/amazon-redshift-jdbc-driver/src/main/java/com/amazon/redshift | Create_ds/amazon-redshift-jdbc-driver/src/main/java/com/amazon/redshift/core/ResultHandlerBase.java | /*
* Copyright (c) 2004, PostgreSQL Global Development Group
* See the LICENSE file in the project root for more information.
*/
// Copyright (c) 2004, Open Cloud Limited.
package com.amazon.redshift.core;
import java.sql.SQLException;
import java.sql.SQLWarning;
import java.util.List;
import com.amazon.redshift.core.v3.MessageLoopState;
import com.amazon.redshift.core.v3.RedshiftRowsBlockingQueue;
/**
* Empty implementation of {@link ResultHandler} interface.
* {@link SQLException#setNextException(SQLException)} has {@code O(N)} complexity,
* so this class tracks the last exception object to speedup {@code setNextException}.
*/
public class ResultHandlerBase implements ResultHandler {
// Last exception is tracked to avoid O(N) SQLException#setNextException just in case there
// will be lots of exceptions (e.g. all batch rows fail with constraint violation or so)
private SQLException firstException;
private SQLException lastException;
private SQLWarning firstWarning;
private SQLWarning lastWarning;
@Override
public void handleResultRows(Query fromQuery, Field[] fields, List<Tuple> tuples,
ResultCursor cursor, RedshiftRowsBlockingQueue<Tuple> queueTuples,
int[] rowCount, Thread ringBufferThread) {
}
@Override
public void handleCommandStatus(String status, long updateCount, long insertOID) {
}
@Override
public void secureProgress() {
}
@Override
public void handleWarning(SQLWarning warning) {
if (firstWarning == null) {
firstWarning = lastWarning = warning;
return;
}
lastWarning.setNextException(warning);
lastWarning = warning;
}
@Override
public void handleError(SQLException error) {
if (firstException == null) {
firstException = lastException = error;
return;
}
lastException.setNextException(error);
lastException = error;
}
@Override
public void handleCompletion() throws SQLException {
if (firstException != null) {
throw firstException;
}
}
@Override
public SQLException getException() {
return firstException;
}
@Override
public SQLWarning getWarning() {
return firstWarning;
}
@Override
public void setStatementStateIdleFromInQuery() {
// Do nothing
}
@Override
public void setStatementStateInQueryFromIdle() {
// Do nothing
}
@Override
public boolean wantsScrollableResultSet() {
return false;
}
}
| 8,367 |
0 | Create_ds/amazon-redshift-jdbc-driver/src/main/java/com/amazon/redshift | Create_ds/amazon-redshift-jdbc-driver/src/main/java/com/amazon/redshift/core/Oid.java | /*
* Copyright (c) 2004, PostgreSQL Global Development Group
* See the LICENSE file in the project root for more information.
*/
package com.amazon.redshift.core;
import com.amazon.redshift.util.GT;
import com.amazon.redshift.util.RedshiftException;
import com.amazon.redshift.util.RedshiftState;
import java.lang.reflect.Field;
import java.util.HashMap;
import java.util.Map;
/**
* Provides constants for well-known backend OIDs for the types we commonly use.
*/
public class Oid {
public static final int INT2 = 21;
public static final int INT2_ARRAY = 1005;
public static final int INT4 = 23;
public static final int INT4_ARRAY = 1007;
public static final int INT8 = 20;
public static final int INT8_ARRAY = 1016;
public static final int TEXT = 25;
public static final int TEXT_ARRAY = 1009;
public static final int NUMERIC = 1700;
public static final int NUMERIC_ARRAY = 1231;
public static final int FLOAT4 = 700;
public static final int FLOAT4_ARRAY = 1021;
public static final int FLOAT8 = 701;
public static final int FLOAT8_ARRAY = 1022;
public static final int BOOL = 16;
public static final int BOOL_ARRAY = 1000;
public static final int DATE = 1082;
public static final int DATE_ARRAY = 1182;
public static final int TIME = 1083;
public static final int TIME_ARRAY = 1183;
public static final int TIMETZ = 1266;
public static final int TIMETZ_ARRAY = 1270;
public static final int TIMESTAMP = 1114;
public static final int TIMESTAMP_ARRAY = 1115;
public static final int TIMESTAMPTZ = 1184;
public static final int TIMESTAMPTZ_ARRAY = 1185;
public static final int BYTEA = 17;
public static final int BYTEA_ARRAY = 1001;
public static final int VARCHAR = 1043;
public static final int VARCHAR_ARRAY = 1015;
public static final int OID = 26;
public static final int OID_ARRAY = 1028;
public static final int BPCHAR = 1042;
public static final int BPCHAR_ARRAY = 1014;
public static final int MONEY = 790;
public static final int MONEY_ARRAY = 791;
public static final int NAME = 19;
public static final int NAME_ARRAY = 1003;
public static final int BIT = 1560;
public static final int BIT_ARRAY = 1561;
public static final int VOID = 2278;
public static final int INTERVAL = 1186;
public static final int INTERVAL_ARRAY = 1187;
public static final int INTERVALY2M = 1188;
public static final int INTERVALY2M_ARRAY = 1189;
public static final int INTERVALD2S = 1190;
public static final int INTERVALD2S_ARRAY = 1191;
public static final int CHAR = 18; // This is not char(N), this is "char" a single byte type.
public static final int CHAR_ARRAY = 1002;
public static final int VARBIT = 1562;
public static final int VARBIT_ARRAY = 1563;
public static final int UUID = 2950;
public static final int UUID_ARRAY = 2951;
public static final int XML = 142;
public static final int XML_ARRAY = 143;
public static final int POINT = 600;
public static final int POINT_ARRAY = 1017;
public static final int BOX = 603;
public static final int JSONB_ARRAY = 3807;
public static final int JSON = 114;
public static final int JSON_ARRAY = 199;
public static final int REF_CURSOR = 1790;
public static final int REF_CURSOR_ARRAY = 2201;
public static final int GEOMETRY = 3000;
public static final int GEOMETRY_ARRAY = 0; // UNSPECIFIED
public static final int GEOMETRYHEX = 3999;
public static final int GEOMETRYHEX_ARRAY = 0; // UNSPECIFIED
public static final int SUPER = 4000;
public static final int SUPER_ARRAY = 0; // UNSPECIFIED
public static final int VARBYTE = 6551;
public static final int VARBYTE_ARRAY = 0; // UNSPECIFIED
public static final int GEOGRAPHY = 3001;
public static final int GEOGRAPHY_ARRAY = 0; // UNSPECIFIED
public static final int TIDOID = 27; // VARCHAR
public static final int TIDARRAYOID = 1010;
public static final int XIDOID = 28; // INTEGER
public static final int XIDARRAYOID = 1011;
public static final int ACLITEM = 1033; // In Binary mode treat it as VARCHAR, as data comes from server same as VARCHAR.
public static final int ACLITEM_ARRAY = 1034;
public static final int ABSTIMEOID = 702; // validuntil col in pg_user
public static final int ABSTIMEARRAYOID = 1023; // UNSPECIFIED
public static final int REGPROC = 24; // validuntil col in pg_type, pg_operator
public static final int REGPROC_ARRAY = 1008;
public static final int OIDVECTOR = 30; // validuntil col in pg_proc
public static final int OIDVECTOR_ARRAY= 1013;
// Keep this as last field to log correctly. As we have many UNSPECIFIED values.
public static final int UNSPECIFIED = 0;
private static final Map<Integer, String> OID_TO_NAME = new HashMap<Integer, String>(100);
private static final Map<String, Integer> NAME_TO_OID = new HashMap<String, Integer>(100);
static {
for (Field field : Oid.class.getFields()) {
try {
int oid = field.getInt(null);
String name = field.getName().toUpperCase();
OID_TO_NAME.put(oid, name);
NAME_TO_OID.put(name, oid);
} catch (IllegalAccessException e) {
// ignore
}
}
}
/**
* Returns the name of the oid as string.
*
* @param oid The oid to convert to name.
* @return The name of the oid or {@code "<unknown>"} if oid no constant for oid value has been
* defined.
*/
public static String toString(int oid) {
String name = OID_TO_NAME.get(oid);
if (name == null) {
name = "<unknown:" + oid + ">";
}
return name;
}
public static int valueOf(String oid) throws RedshiftException {
if (oid.length() > 0 && !Character.isDigit(oid.charAt(0))) {
Integer id = NAME_TO_OID.get(oid);
if (id == null) {
id = NAME_TO_OID.get(oid.toUpperCase());
}
if (id != null) {
return id;
}
} else {
try {
// OID are unsigned 32bit integers, so Integer.parseInt is not enough
return (int) Long.parseLong(oid);
} catch (NumberFormatException ex) {
}
}
throw new RedshiftException(GT.tr("oid type {0} not known and not a number", oid),
RedshiftState.INVALID_PARAMETER_VALUE);
}
}
| 8,368 |
0 | Create_ds/amazon-redshift-jdbc-driver/src/main/java/com/amazon/redshift | Create_ds/amazon-redshift-jdbc-driver/src/main/java/com/amazon/redshift/core/QueryExecutorBase.java | /*
* Copyright (c) 2003, PostgreSQL Global Development Group
* See the LICENSE file in the project root for more information.
*/
package com.amazon.redshift.core;
import com.amazon.redshift.RedshiftNotification;
import com.amazon.redshift.RedshiftProperty;
import com.amazon.redshift.core.v3.RedshiftRowsBlockingQueue;
import com.amazon.redshift.jdbc.AutoSave;
import com.amazon.redshift.jdbc.EscapeSyntaxCallMode;
import com.amazon.redshift.jdbc.PreferQueryMode;
import com.amazon.redshift.logger.LogLevel;
import com.amazon.redshift.logger.RedshiftLogger;
import com.amazon.redshift.util.HostSpec;
import com.amazon.redshift.util.LruCache;
import com.amazon.redshift.util.RedshiftException;
import com.amazon.redshift.util.RedshiftState;
import com.amazon.redshift.util.ServerErrorMessage;
import java.io.IOException;
import java.sql.SQLException;
import java.sql.SQLWarning;
import java.util.ArrayList;
import java.util.Collections;
import java.util.Map;
import java.util.Properties;
import java.util.TreeMap;
public abstract class QueryExecutorBase implements QueryExecutor {
protected RedshiftLogger logger;
protected final RedshiftStream pgStream;
private final String user;
private final String database;
private final int cancelSignalTimeout;
private int cancelPid;
private int cancelKey;
private boolean closed = false;
private String serverVersion;
private int serverVersionNum = 0;
private TransactionState transactionState;
private final boolean reWriteBatchedInserts;
private final boolean columnSanitiserDisabled;
private final EscapeSyntaxCallMode escapeSyntaxCallMode;
private final PreferQueryMode preferQueryMode;
private AutoSave autoSave;
private boolean flushCacheOnDeallocate = true;
protected final boolean logServerErrorDetail;
private boolean raiseExceptionOnSilentRollback;
// default value for server versions that don't report standard_conforming_strings
private boolean standardConformingStrings = false;
private SQLWarning warnings;
private final ArrayList<RedshiftNotification> notifications = new ArrayList<RedshiftNotification>();
private final LruCache<Object, CachedQuery> statementCache;
private final CachedQueryCreateAction cachedQueryCreateAction;
// For getParameterStatuses(), GUC_REPORT tracking
private final TreeMap<String,String> parameterStatuses
= new TreeMap<String,String>(String.CASE_INSENSITIVE_ORDER);
protected boolean enableStatementCache;
protected int serverProtocolVersion;
protected boolean datashareEnabled;
protected boolean enableMultiSqlSupport;
protected Properties properties;
protected QueryExecutorBase(RedshiftStream pgStream, String user,
String database, int cancelSignalTimeout, Properties info,
RedshiftLogger logger) throws SQLException {
this.logger = logger;
this.pgStream = pgStream;
this.user = user;
this.database = database;
this.properties = info;
this.cancelSignalTimeout = cancelSignalTimeout;
this.reWriteBatchedInserts = RedshiftProperty.REWRITE_BATCHED_INSERTS.getBoolean(info);
this.columnSanitiserDisabled = RedshiftProperty.DISABLE_COLUMN_SANITISER.getBoolean(info);
String callMode = RedshiftProperty.ESCAPE_SYNTAX_CALL_MODE.get(info);
this.escapeSyntaxCallMode = EscapeSyntaxCallMode.of(callMode);
String preferMode = RedshiftProperty.PREFER_QUERY_MODE.get(info);
this.preferQueryMode = PreferQueryMode.of(preferMode);
this.autoSave = AutoSave.of(RedshiftProperty.AUTOSAVE.get(info));
this.logServerErrorDetail = RedshiftProperty.LOG_SERVER_ERROR_DETAIL.getBoolean(info);
this.cachedQueryCreateAction = new CachedQueryCreateAction(this);
statementCache = new LruCache<Object, CachedQuery>(
Math.max(0, RedshiftProperty.PREPARED_STATEMENT_CACHE_QUERIES.getInt(info)),
Math.max(0, RedshiftProperty.PREPARED_STATEMENT_CACHE_SIZE_MIB.getInt(info) * 1024 * 1024),
false,
cachedQueryCreateAction,
new LruCache.EvictAction<CachedQuery>() {
@Override
public void evict(CachedQuery cachedQuery) throws SQLException {
cachedQuery.query.close();
}
});
this.datashareEnabled = false;
this.enableMultiSqlSupport = RedshiftProperty.ENABLE_MULTI_SQL_SUPPORT.getBoolean(info);
}
protected abstract void sendCloseMessage() throws IOException;
@Override
public void setNetworkTimeout(int milliseconds) throws IOException {
pgStream.setNetworkTimeout(milliseconds);
}
@Override
public int getNetworkTimeout() throws IOException {
return pgStream.getNetworkTimeout();
}
@Override
public HostSpec getHostSpec() {
return pgStream.getHostSpec();
}
@Override
public String getUser() {
return user;
}
@Override
public String getDatabase() {
return database;
}
public void setBackendKeyData(int cancelPid, int cancelKey) {
this.cancelPid = cancelPid;
this.cancelKey = cancelKey;
}
@Override
public int getBackendPID() {
return cancelPid;
}
@Override
public void abort() {
try {
pgStream.getSocket().close();
} catch (IOException e) {
// ignore
}
closed = true;
}
@Override
public void close() {
if (closed) {
return;
}
try {
if(RedshiftLogger.isEnable())
logger.log(LogLevel.DEBUG, " FE=> Terminate");
sendCloseMessage();
pgStream.flush();
pgStream.close();
} catch (IOException ioe) {
if(RedshiftLogger.isEnable())
logger.log(LogLevel.DEBUG, "Discarding IOException on close:", ioe);
}
closed = true;
}
@Override
public boolean isClosed() {
return closed;
}
@Override
public void sendQueryCancel() throws SQLException {
if (cancelPid <= 0) {
if(RedshiftLogger.isEnable()) {
logger.logError("sendQueryCancel: cancelPid <= 0 (pid={0})", cancelPid);
}
return;
}
RedshiftStream cancelStream = null;
// Now we need to construct and send a cancel packet
try {
if(RedshiftLogger.isEnable()) {
logger.logDebug(" FE=> CancelRequest(pid={0},ckey={1})", new Object[]{cancelPid, cancelKey});
}
cancelStream =
new RedshiftStream(pgStream.getSocketFactory(), pgStream.getHostSpec(), cancelSignalTimeout, logger, false, this.properties);
if (cancelSignalTimeout > 0) {
cancelStream.setNetworkTimeout(cancelSignalTimeout);
}
cancelStream.sendInteger4(16);
cancelStream.sendInteger2(1234);
cancelStream.sendInteger2(5678);
cancelStream.sendInteger4(cancelPid);
cancelStream.sendInteger4(cancelKey);
cancelStream.flush();
cancelStream.receiveEOF();
} catch (IOException e) {
// Safe to ignore.
if(RedshiftLogger.isEnable()) {
logger.log(LogLevel.DEBUG, "Ignoring exception on cancel request:", e);
logger.logError(e);
}
} finally {
if (cancelStream != null) {
try {
cancelStream.close();
} catch (IOException e) {
// Ignored.
}
}
}
}
public synchronized void addWarning(SQLWarning newWarning) {
if (warnings == null) {
warnings = newWarning;
} else {
warnings.setNextWarning(newWarning);
}
}
public synchronized void addNotification(RedshiftNotification notification) {
notifications.add(notification);
}
@Override
public synchronized RedshiftNotification[] getNotifications() throws SQLException {
RedshiftNotification[] array = notifications.toArray(new RedshiftNotification[0]);
notifications.clear();
return array;
}
@Override
public synchronized SQLWarning getWarnings() {
SQLWarning chain = warnings;
warnings = null;
return chain;
}
@Override
public String getServerVersion() {
return serverVersion;
}
@Override
public int getServerProtocolVersion() {
return serverProtocolVersion;
}
@Override
public boolean isDatashareEnabled() {
return datashareEnabled;
}
@Override
public int getServerVersionNum() {
if (serverVersionNum != 0) {
return serverVersionNum;
}
return serverVersionNum = Utils.parseServerVersionStr(serverVersion);
}
public void setServerVersion(String serverVersion) {
this.serverVersion = serverVersion;
}
protected void setServerProtocolVersion(String serverProtocolVersion) {
this.serverProtocolVersion = (serverProtocolVersion != null && serverProtocolVersion.length() != 0)
? Integer.parseInt(serverProtocolVersion)
: 0;
}
protected void setDatashareEnabled(boolean datashareEnabled) {
this.datashareEnabled = datashareEnabled;
}
public void setServerVersionNum(int serverVersionNum) {
this.serverVersionNum = serverVersionNum;
}
public synchronized void setTransactionState(TransactionState state) {
transactionState = state;
}
public synchronized void setStandardConformingStrings(boolean value) {
standardConformingStrings = value;
}
@Override
public synchronized boolean getStandardConformingStrings() {
return standardConformingStrings;
}
@Override
public synchronized TransactionState getTransactionState() {
return transactionState;
}
public void setEncoding(Encoding encoding) throws IOException {
pgStream.setEncoding(encoding);
}
@Override
public Encoding getEncoding() {
return pgStream.getEncoding();
}
@Override
public boolean isReWriteBatchedInsertsEnabled() {
return this.reWriteBatchedInserts;
}
@Override
public boolean isMultiSqlSupport() {
return enableMultiSqlSupport;
}
@Override
public final CachedQuery borrowQuery(String sql) throws SQLException {
return statementCache.borrow(sql);
}
@Override
public final CachedQuery borrowCallableQuery(String sql) throws SQLException {
return statementCache.borrow(new CallableQueryKey(sql));
}
@Override
public final CachedQuery borrowReturningQuery(String sql, String[] columnNames) throws SQLException {
return statementCache.borrow(new QueryWithReturningColumnsKey(sql, true, true,
columnNames
));
}
@Override
public CachedQuery borrowQueryByKey(Object key) throws SQLException {
return statementCache.borrow(key);
}
@Override
public void releaseQuery(CachedQuery cachedQuery) {
if(enableStatementCache)
statementCache.put(cachedQuery.key, cachedQuery);
else {
// Disabled the statement cache.
// So the driver is not putting the cachedquery in the LRUCache.
// Enabled statement cache causes the issue when objects used in same statement, drop/create,
// it can change the OID. Then parsed statement is no more useful.
if(cachedQuery != null
&& cachedQuery.query != null) {
cachedQuery.query.close();
}
}
}
@Override
public final Object createQueryKey(String sql, boolean escapeProcessing,
boolean isParameterized, String... columnNames) {
Object key;
if (columnNames == null || columnNames.length != 0) {
// Null means "return whatever sensible columns are" (e.g. primary key, or serial, or something like that)
key = new QueryWithReturningColumnsKey(sql, isParameterized, escapeProcessing, columnNames);
} else if (isParameterized) {
// If no generated columns requested, just use the SQL as a cache key
key = sql;
} else {
key = new BaseQueryKey(sql, false, escapeProcessing);
}
return key;
}
@Override
public CachedQuery createQueryByKey(Object key) throws SQLException {
return cachedQueryCreateAction.create(key);
}
@Override
public final CachedQuery createQuery(String sql, boolean escapeProcessing,
boolean isParameterized, String... columnNames)
throws SQLException {
Object key = createQueryKey(sql, escapeProcessing, isParameterized, columnNames);
// Note: cache is not reused here for two reasons:
// 1) Simplify initial implementation for simple statements
// 2) Non-prepared statements are likely to have literals, thus query reuse would not be often
return createQueryByKey(key);
}
@Override
public boolean isColumnSanitiserDisabled() {
return columnSanitiserDisabled;
}
@Override
public EscapeSyntaxCallMode getEscapeSyntaxCallMode() {
return escapeSyntaxCallMode;
}
@Override
public PreferQueryMode getPreferQueryMode() {
return preferQueryMode;
}
public AutoSave getAutoSave() {
return autoSave;
}
public void setAutoSave(AutoSave autoSave) {
this.autoSave = autoSave;
}
protected boolean willHealViaReparse(SQLException e) {
if (e == null || e.getSQLState() == null) {
return false;
}
// "prepared statement \"S_2\" does not exist"
if (RedshiftState.INVALID_SQL_STATEMENT_NAME.getState().equals(e.getSQLState())) {
return true;
}
if (!RedshiftState.NOT_IMPLEMENTED.getState().equals(e.getSQLState())) {
return false;
}
if (!(e instanceof RedshiftException)) {
return false;
}
RedshiftException pe = (RedshiftException) e;
ServerErrorMessage serverErrorMessage = pe.getServerErrorMessage();
if (serverErrorMessage == null) {
return false;
}
// "cached plan must not change result type"
String routine = pe.getServerErrorMessage().getRoutine();
return "RevalidateCachedQuery".equals(routine) // 9.2+
|| "RevalidateCachedPlan".equals(routine); // <= 9.1
}
@Override
public boolean willHealOnRetry(SQLException e) {
if (autoSave == AutoSave.NEVER && getTransactionState() == TransactionState.FAILED) {
// If autorollback is not activated, then every statement will fail with
// 'transaction is aborted', etc, etc
return false;
}
return willHealViaReparse(e);
}
public boolean isFlushCacheOnDeallocate() {
return flushCacheOnDeallocate;
}
public void setFlushCacheOnDeallocate(boolean flushCacheOnDeallocate) {
this.flushCacheOnDeallocate = flushCacheOnDeallocate;
}
@Override
public boolean isRaiseExceptionOnSilentRollback() {
return raiseExceptionOnSilentRollback;
}
@Override
public void setRaiseExceptionOnSilentRollback(boolean raiseExceptionOnSilentRollback) {
this.raiseExceptionOnSilentRollback = raiseExceptionOnSilentRollback;
}
protected boolean hasNotifications() {
return notifications.size() > 0;
}
@Override
public final Map<String,String> getParameterStatuses() {
return Collections.unmodifiableMap(parameterStatuses);
}
@Override
public final String getParameterStatus(String parameterName) {
return parameterStatuses.get(parameterName);
}
/**
* Update the parameter status map in response to a new ParameterStatus
* wire protocol message.
*
* <p>The server sends ParameterStatus messages when GUC_REPORT settings are
* initially assigned and whenever they change.</p>
*
* <p>A future version may invoke a client-defined listener class at this point,
* so this should be the only access path.</p>
*
* <p>Keys are case-insensitive and case-preserving.</p>
*
* <p>The server doesn't provide a way to report deletion of a reportable
* parameter so we don't expose one here.</p>
*
* @param parameterName case-insensitive case-preserving name of parameter to create or update
* @param parameterStatus new value of parameter
* @see com.amazon.redshift.RedshiftConnection#getParameterStatuses
* @see com.amazon.redshift.RedshiftConnection#getParameterStatus
*/
protected void onParameterStatus(String parameterName, String parameterStatus) {
if (parameterName == null || parameterName.equals("")) {
throw new IllegalStateException("attempt to set GUC_REPORT parameter with null or empty-string name");
}
parameterStatuses.put(parameterName, parameterStatus);
}
/**
* Close the last active ring buffer thread.
*/
@Override
public void closeRingBufferThread(RedshiftRowsBlockingQueue<Tuple> queueRows, Thread ringBufferThread) {
// Does nothing
}
/**
* Check for a running ring buffer thread.
*
* @return returns true if Ring buffer thread is running, otherwise false.
*/
@Override
public boolean isRingBufferThreadRunning() {
return false;
}
@Override
public void closeStatementAndPortal() {
// Do nothing
}
}
| 8,369 |
0 | Create_ds/amazon-redshift-jdbc-driver/src/main/java/com/amazon/redshift | Create_ds/amazon-redshift-jdbc-driver/src/main/java/com/amazon/redshift/core/CommandCompleteParser.java | /*
* Copyright (c) 2018, PostgreSQL Global Development Group
* See the LICENSE file in the project root for more information.
*/
package com.amazon.redshift.core;
import com.amazon.redshift.util.GT;
import com.amazon.redshift.util.RedshiftException;
import com.amazon.redshift.util.RedshiftState;
/**
* Parses {@code oid} and {@code rows} from a {@code CommandComplete (B)} message (end of Execute).
*/
public final class CommandCompleteParser {
private long oid;
private long rows;
public CommandCompleteParser() {
}
public long getOid() {
return oid;
}
public long getRows() {
return rows;
}
void set(long oid, long rows) {
this.oid = oid;
this.rows = rows;
}
/**
* Parses {@code CommandComplete (B)} message.
* Status is in the format of "COMMAND OID ROWS" where both 'OID' and 'ROWS' are optional
* and COMMAND can have spaces within it, like CREATE TABLE.
*
* @param status COMMAND OID ROWS message
* @throws RedshiftException in case the status cannot be parsed
*/
public void parse(String status) throws RedshiftException {
// Assumption: command neither starts nor ends with a digit
if (!Parser.isDigitAt(status, status.length() - 1)) {
set(0, 0);
return;
}
// Scan backwards, while searching for a maximum of two number groups
// COMMAND OID ROWS
// COMMAND ROWS
long oid = 0;
long rows = 0;
try {
int lastSpace = status.lastIndexOf(' ');
// Status ends with a digit => it is ROWS
if (Parser.isDigitAt(status, lastSpace + 1)) {
rows = Parser.parseLong(status, lastSpace + 1, status.length());
if (Parser.isDigitAt(status, lastSpace - 1)) {
int penultimateSpace = status.lastIndexOf(' ', lastSpace - 1);
if (Parser.isDigitAt(status, penultimateSpace + 1)) {
oid = Parser.parseLong(status, penultimateSpace + 1, lastSpace);
}
}
}
} catch (NumberFormatException e) {
// This should only occur if the oid or rows are out of 0..Long.MAX_VALUE range
throw new RedshiftException(
GT.tr("Unable to parse the count in command completion tag: {0}.", status),
RedshiftState.CONNECTION_FAILURE, e);
}
set(oid, rows);
}
@Override
public String toString() {
return "CommandStatus{"
+ "oid=" + oid
+ ", rows=" + rows
+ '}';
}
@Override
public boolean equals(Object o) {
if (this == o) {
return true;
}
if (o == null || getClass() != o.getClass()) {
return false;
}
CommandCompleteParser that = (CommandCompleteParser) o;
if (oid != that.oid) {
return false;
}
return rows == that.rows;
}
@Override
public int hashCode() {
int result = (int) (oid ^ (oid >>> 32));
result = 31 * result + (int) (rows ^ (rows >>> 32));
return result;
}
}
| 8,370 |
0 | Create_ds/amazon-redshift-jdbc-driver/src/main/java/com/amazon/redshift | Create_ds/amazon-redshift-jdbc-driver/src/main/java/com/amazon/redshift/core/QueryWithReturningColumnsKey.java | /*
* Copyright (c) 2003, PostgreSQL Global Development Group
* See the LICENSE file in the project root for more information.
*/
package com.amazon.redshift.core;
import java.util.Arrays;
/**
* Cache key for a query that have some returning columns.
* {@code columnNames} should contain non-quoted column names.
* The parser will quote them automatically.
* <p>There's a special case of {@code columnNames == new String[]{"*"}} that means all columns
* should be returned. {@link Parser} is aware of that and does not quote {@code *}</p>
*/
class QueryWithReturningColumnsKey extends BaseQueryKey {
public final String[] columnNames;
private int size; // query length cannot exceed MAX_INT
QueryWithReturningColumnsKey(String sql, boolean isParameterized, boolean escapeProcessing,
String[] columnNames) {
super(sql, isParameterized, escapeProcessing);
if (columnNames == null) {
// TODO: teach parser to fetch key columns somehow when no column names were given
columnNames = new String[]{"*"};
}
this.columnNames = columnNames;
}
@Override
public long getSize() {
int size = this.size;
if (size != 0) {
return size;
}
size = (int) super.getSize();
if (columnNames != null) {
size += 16L; // array itself
for (String columnName: columnNames) {
size += columnName.length() * 2L; // 2 bytes per char, revise with Java 9's compact strings
}
}
this.size = size;
return size;
}
@Override
public String toString() {
return "QueryWithReturningColumnsKey{"
+ "sql='" + sql + '\''
+ ", isParameterized=" + isParameterized
+ ", escapeProcessing=" + escapeProcessing
+ ", columnNames=" + Arrays.toString(columnNames)
+ '}';
}
@Override
public boolean equals(Object o) {
if (this == o) {
return true;
}
if (o == null || getClass() != o.getClass()) {
return false;
}
if (!super.equals(o)) {
return false;
}
QueryWithReturningColumnsKey that = (QueryWithReturningColumnsKey) o;
// Probably incorrect - comparing Object[] arrays with Arrays.equals
return Arrays.equals(columnNames, that.columnNames);
}
@Override
public int hashCode() {
int result = super.hashCode();
result = 31 * result + Arrays.hashCode(columnNames);
return result;
}
}
| 8,371 |
0 | Create_ds/amazon-redshift-jdbc-driver/src/main/java/com/amazon/redshift | Create_ds/amazon-redshift-jdbc-driver/src/main/java/com/amazon/redshift/core/EncodingPredictor.java | /*
* Copyright (c) 2003, PostgreSQL Global Development Group
* See the LICENSE file in the project root for more information.
*/
package com.amazon.redshift.core;
import java.io.IOException;
import com.amazon.redshift.logger.RedshiftLogger;
/**
* <p>Predicts encoding for error messages based on some heuristics.</p>
*
* <ol>
* <li>For certain languages, it is known how "FATAL" is translated</li>
* <li>For Japanese, several common words are hardcoded</li>
* <li>Then try various LATIN encodings</li>
* </ol>
*/
public class EncodingPredictor {
/**
* In certain cases the encoding is not known for sure (e.g. before authentication).
* In such cases, backend might send messages in "native to database" encoding,
* thus pgjdbc has to guess the encoding nad
*/
public static class DecodeResult {
public final String result;
public final String encoding; // JVM name
DecodeResult(String result, String encoding) {
this.result = result;
this.encoding = encoding;
}
}
static class Translation {
public final String fatalText;
private final String[] texts;
public final String language;
public final String[] encodings;
Translation(String fatalText, String[] texts, String language, String... encodings) {
this.fatalText = fatalText;
this.texts = texts;
this.language = language;
this.encodings = encodings;
}
}
private static final Translation[] FATAL_TRANSLATIONS =
new Translation[]{
new Translation("ВАЖНО", null, "ru", "WIN", "ALT", "KOI8"),
new Translation("致命错误", null, "zh_CN", "EUC_CN", "GBK", "BIG5"),
new Translation("KATASTROFALNY", null, "pl", "LATIN2"),
new Translation("FATALE", null, "it", "LATIN1", "LATIN9"),
new Translation("FATAL", new String[]{"は存在しません" /* ~ does not exist */,
"ロール" /* ~ role */, "ユーザ" /* ~ user */}, "ja", "EUC_JP", "SJIS"),
new Translation(null, null, "fr/de/es/pt_BR", "LATIN1", "LATIN3", "LATIN4", "LATIN5",
"LATIN7", "LATIN9"),
};
public static DecodeResult decode(byte[] bytes, int offset, int length, RedshiftLogger logger) {
Encoding defaultEncoding = Encoding.defaultEncoding();
for (Translation tr : FATAL_TRANSLATIONS) {
for (String encoding : tr.encodings) {
Encoding encoder = Encoding.getDatabaseEncoding(encoding, logger);
if (encoder == defaultEncoding) {
continue;
}
// If there is a translation for "FATAL", then try typical encodings for that language
if (tr.fatalText != null) {
byte[] encoded;
try {
byte[] tmp = encoder.encode(tr.fatalText);
encoded = new byte[tmp.length + 2];
encoded[0] = 'S';
encoded[encoded.length - 1] = 0;
System.arraycopy(tmp, 0, encoded, 1, tmp.length);
} catch (IOException e) {
continue;// should not happen
}
if (!arrayContains(bytes, offset, length, encoded, 0, encoded.length)) {
continue;
}
}
// No idea how to tell Japanese from Latin languages, thus just hard-code certain Japanese words
if (tr.texts != null) {
boolean foundOne = false;
for (String text : tr.texts) {
try {
byte[] textBytes = encoder.encode(text);
if (arrayContains(bytes, offset, length, textBytes, 0, textBytes.length)) {
foundOne = true;
break;
}
} catch (IOException e) {
// do not care, will try other encodings
}
}
if (!foundOne) {
// Error message does not have key parts, will try other encodings
continue;
}
}
try {
String decoded = encoder.decode(bytes, offset, length);
if (decoded.indexOf(65533) != -1) {
// bad character in string, try another encoding
continue;
}
return new DecodeResult(decoded, encoder.name());
} catch (IOException e) {
// do not care
}
}
}
return null;
}
private static boolean arrayContains(
byte[] first, int firstOffset, int firstLength,
byte[] second, int secondOffset, int secondLength
) {
if (firstLength < secondLength) {
return false;
}
for (int i = 0; i < firstLength; i++) {
for (; i < firstLength && first[firstOffset + i] != second[secondOffset]; i++) {
// find the first matching byte
}
int j = 1;
for (; j < secondLength && first[firstOffset + i + j] == second[secondOffset + j]; j++) {
// compare arrays
}
if (j == secondLength) {
return true;
}
}
return false;
}
}
| 8,372 |
0 | Create_ds/amazon-redshift-jdbc-driver/src/main/java/com/amazon/redshift | Create_ds/amazon-redshift-jdbc-driver/src/main/java/com/amazon/redshift/core/Version.java | /*
* Copyright (c) 2003, PostgreSQL Global Development Group
* See the LICENSE file in the project root for more information.
*/
package com.amazon.redshift.core;
public interface Version {
/**
* Get a machine-readable version number.
*
* @return the version in numeric XXYYZZ form, e.g. 90401 for 9.4.1
*/
int getVersionNum();
}
| 8,373 |
0 | Create_ds/amazon-redshift-jdbc-driver/src/main/java/com/amazon/redshift | Create_ds/amazon-redshift-jdbc-driver/src/main/java/com/amazon/redshift/core/CallableQueryKey.java | /*
* Copyright (c) 2015, PostgreSQL Global Development Group
* See the LICENSE file in the project root for more information.
*/
package com.amazon.redshift.core;
/**
* Serves as a cache key for {@link java.sql.CallableStatement}.
* Callable statements require some special parsing before use (due to JDBC {@code {?= call...}}
* syntax, thus a special cache key class is used to trigger proper parsing for callable statements.
*/
class CallableQueryKey extends BaseQueryKey {
CallableQueryKey(String sql) {
super(sql, true, true);
}
@Override
public String toString() {
return "CallableQueryKey{"
+ "sql='" + sql + '\''
+ ", isParameterized=" + isParameterized
+ ", escapeProcessing=" + escapeProcessing
+ '}';
}
@Override
public int hashCode() {
return super.hashCode() * 31;
}
@Override
public boolean equals(Object o) {
// Nothing interesting here, overriding equals to make hashCode and equals paired
return super.equals(o);
}
}
| 8,374 |
0 | Create_ds/amazon-redshift-jdbc-driver/src/main/java/com/amazon/redshift | Create_ds/amazon-redshift-jdbc-driver/src/main/java/com/amazon/redshift/core/SqlCommandType.java | /*
* Copyright (c) 2003, PostgreSQL Global Development Group
* See the LICENSE file in the project root for more information.
*/
package com.amazon.redshift.core;
/**
* Type information inspection support.
* @author Jeremy Whiting jwhiting@redhat.com
*
*/
public enum SqlCommandType {
/**
* Use BLANK for empty sql queries or when parsing the sql string is not
* necessary.
*/
BLANK,
INSERT,
UPDATE,
DELETE,
MOVE,
SELECT,
WITH,
PREPARE;
}
| 8,375 |
0 | Create_ds/amazon-redshift-jdbc-driver/src/main/java/com/amazon/redshift/core | Create_ds/amazon-redshift-jdbc-driver/src/main/java/com/amazon/redshift/core/v3/CopyOperationImpl.java | /*
* Copyright (c) 2009, PostgreSQL Global Development Group
* See the LICENSE file in the project root for more information.
*/
package com.amazon.redshift.core.v3;
import com.amazon.redshift.copy.CopyOperation;
import com.amazon.redshift.util.GT;
import com.amazon.redshift.util.RedshiftException;
import com.amazon.redshift.util.RedshiftState;
import java.sql.SQLException;
public abstract class CopyOperationImpl implements CopyOperation {
QueryExecutorImpl queryExecutor;
int rowFormat;
int[] fieldFormats;
long handledRowCount = -1;
void init(QueryExecutorImpl q, int fmt, int[] fmts) {
queryExecutor = q;
rowFormat = fmt;
fieldFormats = fmts;
}
public void cancelCopy() throws SQLException {
queryExecutor.cancelCopy(this);
}
public int getFieldCount() {
return fieldFormats.length;
}
public int getFieldFormat(int field) {
return fieldFormats[field];
}
public int getFormat() {
return rowFormat;
}
public boolean isActive() {
synchronized (queryExecutor) {
return queryExecutor.hasLock(this);
}
}
public void handleCommandStatus(String status) throws RedshiftException {
if (status.startsWith("COPY")) {
int i = status.lastIndexOf(' ');
handledRowCount = i > 3 ? Long.parseLong(status.substring(i + 1)) : -1;
} else {
throw new RedshiftException(GT.tr("CommandComplete expected COPY but got: " + status),
RedshiftState.COMMUNICATION_ERROR);
}
}
/**
* Consume received copy data.
*
* @param data data that was receive by copy protocol
* @throws RedshiftException if some internal problem occurs
*/
protected abstract void handleCopydata(byte[] data) throws RedshiftException;
public long getHandledRowCount() {
return handledRowCount;
}
}
| 8,376 |
0 | Create_ds/amazon-redshift-jdbc-driver/src/main/java/com/amazon/redshift/core | Create_ds/amazon-redshift-jdbc-driver/src/main/java/com/amazon/redshift/core/v3/CopyInImpl.java | /*
* Copyright (c) 2009, PostgreSQL Global Development Group
* See the LICENSE file in the project root for more information.
*/
package com.amazon.redshift.core.v3;
import com.amazon.redshift.copy.CopyIn;
import com.amazon.redshift.util.ByteStreamWriter;
import com.amazon.redshift.util.GT;
import com.amazon.redshift.util.RedshiftException;
import com.amazon.redshift.util.RedshiftState;
import java.sql.SQLException;
/**
* <p>COPY FROM STDIN operation.</p>
*
* <p>Anticipated flow:
*
* CopyManager.copyIn() ->QueryExecutor.startCopy() - sends given query to server
* ->processCopyResults(): - receives CopyInResponse from Server - creates new CopyInImpl
* ->initCopy(): - receives copy metadata from server ->CopyInImpl.init() ->lock()
* connection for this operation - if query fails an exception is thrown - if query returns wrong
* CopyOperation, copyIn() cancels it before throwing exception <-return: new CopyInImpl holding
* lock on connection repeat CopyIn.writeToCopy() for all data ->CopyInImpl.writeToCopy()
* ->QueryExecutorImpl.writeToCopy() - sends given data ->processCopyResults() - parameterized
* not to block, just peek for new messages from server - on ErrorResponse, waits until protocol is
* restored and unlocks connection CopyIn.endCopy() ->CopyInImpl.endCopy()
* ->QueryExecutorImpl.endCopy() - sends CopyDone - processCopyResults() - on CommandComplete
* ->CopyOperationImpl.handleCommandComplete() - sets updatedRowCount when applicable - on
* ReadyForQuery unlock() connection for use by other operations <-return:
* CopyInImpl.getUpdatedRowCount()</p>
*/
public class CopyInImpl extends CopyOperationImpl implements CopyIn {
public void writeToCopy(byte[] data, int off, int siz) throws SQLException {
queryExecutor.writeToCopy(this, data, off, siz);
}
public void writeToCopy(ByteStreamWriter from) throws SQLException {
queryExecutor.writeToCopy(this, from);
}
public void flushCopy() throws SQLException {
queryExecutor.flushCopy(this);
}
public long endCopy() throws SQLException {
return queryExecutor.endCopy(this);
}
protected void handleCopydata(byte[] data) throws RedshiftException {
throw new RedshiftException(GT.tr("CopyIn copy direction can't receive data"),
RedshiftState.PROTOCOL_VIOLATION);
}
}
| 8,377 |
0 | Create_ds/amazon-redshift-jdbc-driver/src/main/java/com/amazon/redshift/core | Create_ds/amazon-redshift-jdbc-driver/src/main/java/com/amazon/redshift/core/v3/CompositeQuery.java | /*
* Copyright (c) 2004, PostgreSQL Global Development Group
* See the LICENSE file in the project root for more information.
*/
// Copyright (c) 2004, Open Cloud Limited.
package com.amazon.redshift.core.v3;
import com.amazon.redshift.core.ParameterList;
import com.amazon.redshift.core.Query;
import com.amazon.redshift.core.SqlCommand;
import java.util.Map;
/**
* V3 Query implementation for queries that involve multiple statements. We split it up into one
* SimpleQuery per statement, and wrap the corresponding per-statement SimpleParameterList objects
* in a CompositeParameterList.
*
* @author Oliver Jowett (oliver@opencloud.com)
*/
class CompositeQuery implements Query {
CompositeQuery(SimpleQuery[] subqueries, int[] offsets) {
this.subqueries = subqueries;
this.offsets = offsets;
}
public ParameterList createParameterList() {
SimpleParameterList[] subparams = new SimpleParameterList[subqueries.length];
for (int i = 0; i < subqueries.length; ++i) {
subparams[i] = (SimpleParameterList) subqueries[i].createParameterList();
}
return new CompositeParameterList(subparams, offsets);
}
public String toString(ParameterList parameters) {
StringBuilder sbuf = new StringBuilder(subqueries[0].toString());
for (int i = 1; i < subqueries.length; ++i) {
sbuf.append(';');
sbuf.append(subqueries[i]);
}
return sbuf.toString();
}
@Override
public String getNativeSql() {
StringBuilder sbuf = new StringBuilder(subqueries[0].getNativeSql());
for (int i = 1; i < subqueries.length; ++i) {
sbuf.append(';');
sbuf.append(subqueries[i].getNativeSql());
}
return sbuf.toString();
}
@Override
public SqlCommand getSqlCommand() {
return null;
}
public String toString() {
return toString(null);
}
public void close() {
for (SimpleQuery subquery : subqueries) {
subquery.close();
}
}
public Query[] getSubqueries() {
return subqueries;
}
public boolean isStatementDescribed() {
for (SimpleQuery subquery : subqueries) {
if (!subquery.isStatementDescribed()) {
return false;
}
}
return true;
}
public boolean isEmpty() {
for (SimpleQuery subquery : subqueries) {
if (!subquery.isEmpty()) {
return false;
}
}
return true;
}
public int getBatchSize() {
return 0; // no-op, unsupported
}
@Override
public Map<String, Integer> getResultSetColumnNameIndexMap() {
return null; // unsupported
}
private final SimpleQuery[] subqueries;
private final int[] offsets;
}
| 8,378 |
0 | Create_ds/amazon-redshift-jdbc-driver/src/main/java/com/amazon/redshift/core | Create_ds/amazon-redshift-jdbc-driver/src/main/java/com/amazon/redshift/core/v3/V3ParameterList.java | /*
* Copyright (c) 2004, PostgreSQL Global Development Group
* See the LICENSE file in the project root for more information.
*/
// Copyright (c) 2004, Open Cloud Limited.
package com.amazon.redshift.core.v3;
import com.amazon.redshift.core.ParameterList;
import java.sql.SQLException;
/**
* Common interface for all V3 parameter list implementations.
*
* @author Oliver Jowett (oliver@opencloud.com)
*/
interface V3ParameterList extends ParameterList {
/**
* Ensure that all parameters in this list have been assigned values. Return silently if all is
* well, otherwise throw an appropriate exception.
*
* @throws SQLException if not all parameters are set.
*/
void checkAllParametersSet() throws SQLException;
/**
* Convert any function output parameters to the correct type (void) and set an ignorable value
* for it.
*/
void convertFunctionOutParameters();
/**
* Return a list of the SimpleParameterList objects that make up this parameter list. If this
* object is already a SimpleParameterList, returns null (avoids an extra array construction in
* the common case).
*
* @return an array of single-statement parameter lists, or <code>null</code> if this object is
* already a single-statement parameter list.
*/
SimpleParameterList[] getSubparams();
/**
* Return the parameter type information.
* @return an array of {@link com.amazon.redshift.core.Oid} type information
*/
int[] getParamTypes();
/**
* Return the flags for each parameter.
* @return an array of bytes used to store flags.
*/
byte[] getFlags();
/**
* Return the encoding for each parameter.
* @return nested byte array of bytes with encoding information.
*/
byte[][] getEncoding();
}
| 8,379 |
0 | Create_ds/amazon-redshift-jdbc-driver/src/main/java/com/amazon/redshift/core | Create_ds/amazon-redshift-jdbc-driver/src/main/java/com/amazon/redshift/core/v3/TypeTransferModeRegistry.java | /*
* Copyright (c) 2003, PostgreSQL Global Development Group
* See the LICENSE file in the project root for more information.
*/
package com.amazon.redshift.core.v3;
public interface TypeTransferModeRegistry {
/**
* Returns if given oid should be sent in binary format.
* @param oid type oid
* @return true if given oid should be sent in binary format
*/
boolean useBinaryForSend(int oid);
/**
* Returns if given oid should be received in binary format.
* @param oid type oid
* @return true if given oid should be received in binary format
*/
boolean useBinaryForReceive(int oid);
}
| 8,380 |
0 | Create_ds/amazon-redshift-jdbc-driver/src/main/java/com/amazon/redshift/core | Create_ds/amazon-redshift-jdbc-driver/src/main/java/com/amazon/redshift/core/v3/MessageLoopState.java | package com.amazon.redshift.core.v3;
import com.amazon.redshift.core.Tuple;
/**
* Keep the state of the message loop for Ring Buffer to work on separate thread.
* This is use in processResult(). It store all local vars of processResult() methods,
* so it can process in multiple threads.
*
* @author igarish
*
*/
public class MessageLoopState
{
// All vars are package-private, so no need to expose accessor methods.
RedshiftRowsBlockingQueue<Tuple> queueTuples;
// At the end of a command execution we have the CommandComplete
// message to tell us we're done, but with a describeOnly command
// we have no real flag to let us know we're done. We've got to
// look for the next RowDescription or NoData message and return
// from there.
boolean doneAfterRowDescNoData;
// Constructor
public MessageLoopState()
{
initMessageLoopState(null, false);
}
public MessageLoopState(RedshiftRowsBlockingQueue<Tuple> queueTuples,
boolean doneAfterRowDescNoData)
{
initMessageLoopState(queueTuples,
doneAfterRowDescNoData);
}
/**
* Initialize the object before starting the run.
*
*/
void initMessageLoopState(RedshiftRowsBlockingQueue<Tuple> queueTuples,
boolean doneAfterRowDescNoData)
{
this.queueTuples = queueTuples;
this.doneAfterRowDescNoData = doneAfterRowDescNoData;
}
}
| 8,381 |
0 | Create_ds/amazon-redshift-jdbc-driver/src/main/java/com/amazon/redshift/core | Create_ds/amazon-redshift-jdbc-driver/src/main/java/com/amazon/redshift/core/v3/QueryExecutorImpl.java | /*
* Copyright (c) 2003, PostgreSQL Global Development Group
* See the LICENSE file in the project root for more information.
*/
// Copyright (c) 2004, Open Cloud Limited.
package com.amazon.redshift.core.v3;
import com.amazon.redshift.RedshiftProperty;
import com.amazon.redshift.copy.CopyIn;
import com.amazon.redshift.copy.CopyOperation;
import com.amazon.redshift.copy.CopyOut;
import com.amazon.redshift.core.CommandCompleteParser;
import com.amazon.redshift.core.Encoding;
import com.amazon.redshift.core.EncodingPredictor;
import com.amazon.redshift.core.Field;
import com.amazon.redshift.core.NativeQuery;
import com.amazon.redshift.core.Oid;
import com.amazon.redshift.core.RedshiftBindException;
import com.amazon.redshift.core.RedshiftStream;
import com.amazon.redshift.core.ParameterList;
import com.amazon.redshift.core.Parser;
import com.amazon.redshift.core.Query;
import com.amazon.redshift.core.QueryExecutor;
import com.amazon.redshift.core.QueryExecutorBase;
import com.amazon.redshift.core.ReplicationProtocol;
import com.amazon.redshift.core.ResultCursor;
import com.amazon.redshift.core.ResultHandler;
import com.amazon.redshift.core.ResultHandlerBase;
import com.amazon.redshift.core.ResultHandlerDelegate;
import com.amazon.redshift.core.SqlCommand;
import com.amazon.redshift.core.SqlCommandType;
import com.amazon.redshift.core.TransactionState;
import com.amazon.redshift.core.Tuple;
import com.amazon.redshift.core.Utils;
import com.amazon.redshift.core.v3.replication.V3ReplicationProtocol;
import com.amazon.redshift.jdbc.AutoSave;
import com.amazon.redshift.jdbc.BatchResultHandler;
import com.amazon.redshift.jdbc.FieldMetadata;
import com.amazon.redshift.jdbc.TimestampUtils;
import com.amazon.redshift.logger.LogLevel;
import com.amazon.redshift.logger.RedshiftLogger;
import com.amazon.redshift.util.QuerySanitizer;
import com.amazon.redshift.util.ByteStreamWriter;
import com.amazon.redshift.util.GT;
import com.amazon.redshift.util.RedshiftException;
import com.amazon.redshift.util.RedshiftPropertyMaxResultBufferParser;
import com.amazon.redshift.util.RedshiftState;
import com.amazon.redshift.util.RedshiftWarning;
import com.amazon.redshift.util.ServerErrorMessage;
import java.io.IOException;
import java.lang.ref.PhantomReference;
import java.lang.ref.Reference;
import java.lang.ref.ReferenceQueue;
import java.net.Socket;
import java.net.SocketException;
import java.net.SocketTimeoutException;
import java.sql.ResultSetMetaData;
import java.sql.SQLException;
import java.sql.SQLWarning;
import java.util.ArrayDeque;
import java.util.ArrayList;
import java.util.Collections;
import java.util.Deque;
import java.util.HashMap;
import java.util.HashSet;
import java.util.List;
import java.util.Properties;
import java.util.Set;
import java.util.TimeZone;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.atomic.AtomicBoolean;
import java.util.concurrent.locks.Lock;
import java.util.concurrent.locks.ReentrantLock;
import java.util.regex.Pattern;
/**
* QueryExecutor implementation for the V3 protocol.
*/
public class QueryExecutorImpl extends QueryExecutorBase {
private static final String COPY_ERROR_MESSAGE = "COPY commands are only supported using the CopyManager API.";
private static final Pattern ROLLBACK_PATTERN = Pattern.compile("\\brollback\\b", Pattern.CASE_INSENSITIVE);
private static final Pattern COMMIT_PATTERN = Pattern.compile("\\bcommit\\b", Pattern.CASE_INSENSITIVE);
private static final Pattern PREPARE_PATTERN = Pattern.compile("\\bprepare ++transaction\\b", Pattern.CASE_INSENSITIVE);
private static boolean looksLikeCommit(String sql) {
if ("COMMIT".equalsIgnoreCase(sql)) {
return true;
}
if ("ROLLBACK".equalsIgnoreCase(sql)) {
return false;
}
return COMMIT_PATTERN.matcher(sql).find() && !ROLLBACK_PATTERN.matcher(sql).find();
}
private static boolean looksLikePrepare(String sql) {
return sql.startsWith("PREPARE TRANSACTION") || PREPARE_PATTERN.matcher(sql).find();
}
/**
* TimeZone of the current connection (TimeZone backend parameter).
*/
private TimeZone timeZone;
/**
* application_name connection property.
*/
private String applicationName;
/**
* True if server uses integers for date and time fields. False if server uses double.
*/
private boolean integerDateTimes;
/**
* Bit set that has a bit set for each oid which should be received using binary format.
*/
private final Set<Integer> useBinaryReceiveForOids = new HashSet<Integer>();
/**
* Bit set that has a bit set for each oid which should be sent using binary format.
*/
private final Set<Integer> useBinarySendForOids = new HashSet<Integer>();
/**
* This is a fake query object so processResults can distinguish "ReadyForQuery" messages
* from Sync messages vs from simple execute (aka 'Q').
*/
private final SimpleQuery sync = (SimpleQuery) createQuery("SYNC", false, true).query;
private short deallocateEpoch;
/**
* This caches the latest observed {@code set search_path} query so the reset of prepared
* statement cache can be skipped if using repeated calls for the same {@code set search_path}
* value.
*/
private String lastSetSearchPathQuery;
/**
* The exception that caused the last transaction to fail.
*/
private SQLException transactionFailCause;
private final ReplicationProtocol replicationProtocol;
private boolean enableFetchRingBuffer;
private long fetchRingBufferSize;
// Last running ring buffer thread.
private RingBufferThread m_ringBufferThread = null;
private boolean m_ringBufferStopThread = false;
private Object m_ringBufferThreadLock = new Object();
// Query or some execution on a socket in process
private final Lock m_executingLock = new ReentrantLock();
/**
* {@code CommandComplete(B)} messages are quite common, so we reuse instance to parse those
*/
private final CommandCompleteParser commandCompleteParser = new CommandCompleteParser();
private final CopyQueryExecutor copyQueryExecutor;
public QueryExecutorImpl(RedshiftStream pgStream, String user, String database,
int cancelSignalTimeout, Properties info, RedshiftLogger logger) throws SQLException, IOException {
super(pgStream, user, database, cancelSignalTimeout, info, logger);
this.allowEncodingChanges = RedshiftProperty.ALLOW_ENCODING_CHANGES.getBoolean(info);
this.cleanupSavePoints = RedshiftProperty.CLEANUP_SAVEPOINTS.getBoolean(info);
this.replicationProtocol = new V3ReplicationProtocol(this, pgStream);
this.enableFetchRingBuffer = RedshiftProperty.ENABLE_FETCH_RING_BUFFER.getBoolean(info);
String fetchRingBufferSizeStr = RedshiftProperty.FETCH_RING_BUFFER_SIZE.get(info);
this.fetchRingBufferSize = (fetchRingBufferSizeStr != null )
? RedshiftPropertyMaxResultBufferParser.parseProperty(fetchRingBufferSizeStr, RedshiftProperty.FETCH_RING_BUFFER_SIZE.getName())
: 0;
this.enableStatementCache = RedshiftProperty.ENABLE_STATEMENT_CACHE.getBoolean(info);
this.copyQueryExecutor = new CopyQueryExecutor(this, logger, pgStream);
this.serverProtocolVersion = 0;
readStartupMessages();
}
@Override
public int getProtocolVersion() {
return 3;
}
public long getBytesReadFromStream()
{
return pgStream.getBytesFromStream();
}
/**
* <p>Supplement to synchronization of public methods on current QueryExecutor.</p>
*
* <p>Necessary for keeping the connection intact between calls to public methods sharing a state
* such as COPY subprotocol. waitOnLock() must be called at beginning of each connection access
* point.</p>
*
* <p>Public methods sharing that state must then be synchronized among themselves. Normal method
* synchronization typically suffices for that.</p>
*
* <p>See notes on related methods as well as currentCopy() below.</p>
*/
private Object lockedFor = null;
/**
* Obtain lock over this connection for given object, blocking to wait if necessary.
*
* @param obtainer object that gets the lock. Normally current thread.
* @throws RedshiftException when already holding the lock or getting interrupted.
*/
void lock(Object obtainer) throws RedshiftException {
if (lockedFor == obtainer) {
throw new RedshiftException(GT.tr("Tried to obtain lock while already holding it"),
RedshiftState.OBJECT_NOT_IN_STATE);
}
waitOnLock();
lockedFor = obtainer;
}
/**
* Release lock on this connection presumably held by given object.
*
* @param holder object that holds the lock. Normally current thread.
* @throws RedshiftException when this thread does not hold the lock
*/
void unlock(Object holder) throws RedshiftException {
if (lockedFor != holder) {
throw new RedshiftException(GT.tr("Tried to break lock on database connection"),
RedshiftState.OBJECT_NOT_IN_STATE);
}
lockedFor = null;
this.notify();
}
/**
* Wait until our lock is released. Execution of a single synchronized method can then continue
* without further ado. Must be called at beginning of each synchronized public method.
*/
void waitOnLock() throws RedshiftException {
while (lockedFor != null) {
try {
this.wait();
} catch (InterruptedException ie) {
Thread.currentThread().interrupt();
throw new RedshiftException(
GT.tr("Interrupted while waiting to obtain lock on database connection"),
RedshiftState.OBJECT_NOT_IN_STATE, ie);
}
}
}
/**
* @param holder object assumed to hold the lock
* @return whether given object actually holds the lock
*/
boolean hasLock(Object holder) {
return lockedFor == holder;
}
//
// Query parsing
//
public Query createSimpleQuery(String sql) throws SQLException {
List<NativeQuery> queries = Parser.parseJdbcSql(sql,
getStandardConformingStrings(), false, true, true,
isReWriteBatchedInsertsEnabled());
return wrap(queries);
}
@Override
public Query wrap(List<NativeQuery> queries) {
if (queries.isEmpty()) {
// Empty query
return emptyQuery;
}
if (queries.size() == 1) {
NativeQuery firstQuery = queries.get(0);
if (isReWriteBatchedInsertsEnabled()
&& firstQuery.getCommand().isBatchedReWriteCompatible()) {
int valuesBraceOpenPosition =
firstQuery.getCommand().getBatchRewriteValuesBraceOpenPosition();
int valuesBraceClosePosition =
firstQuery.getCommand().getBatchRewriteValuesBraceClosePosition();
return new BatchedQuery(firstQuery, this, valuesBraceOpenPosition,
valuesBraceClosePosition, isColumnSanitiserDisabled(), logger);
} else {
return new SimpleQuery(firstQuery, this, isColumnSanitiserDisabled(), logger);
}
}
// Multiple statements.
SimpleQuery[] subqueries = new SimpleQuery[queries.size()];
int[] offsets = new int[subqueries.length];
int offset = 0;
for (int i = 0; i < queries.size(); ++i) {
NativeQuery nativeQuery = queries.get(i);
offsets[i] = offset;
subqueries[i] = new SimpleQuery(nativeQuery, this, isColumnSanitiserDisabled(), logger);
offset += nativeQuery.bindPositions.length;
}
return new CompositeQuery(subqueries, offsets);
}
//
// Query execution
//
private int updateQueryMode(int flags) {
switch (getPreferQueryMode()) {
case SIMPLE:
return flags | QUERY_EXECUTE_AS_SIMPLE;
case EXTENDED:
return flags & ~QUERY_EXECUTE_AS_SIMPLE;
default:
return flags;
}
}
public void execute(Query query, ParameterList parameters, ResultHandler handler,
int maxRows, int fetchSize, int flags) throws SQLException {
// Wait for current ring buffer thread to finish, if any.
// Shouldn't call from synchronized method, which can cause dead-lock.
waitForRingBufferThreadToFinish(false, false, false, null, null);
synchronized(this) {
waitOnLock();
try {
m_executingLock.lock();
if (RedshiftLogger.isEnable()) {
logger.log(LogLevel.DEBUG, " simple execute, handler={0}, maxRows={1}, fetchSize={2}, flags={3}",
new Object[]{handler, maxRows, fetchSize, flags});
}
if (handler != null) {
handler.setStatementStateInQueryFromIdle();
}
if (parameters == null) {
parameters = SimpleQuery.NO_PARAMETERS;
}
flags = updateQueryMode(flags);
boolean describeOnly = (QUERY_DESCRIBE_ONLY & flags) != 0;
((V3ParameterList) parameters).convertFunctionOutParameters();
// Check parameters are all set..
if (!describeOnly) {
((V3ParameterList) parameters).checkAllParametersSet();
}
boolean autosave = false;
try {
try {
handler = sendQueryPreamble(handler, flags);
autosave = sendAutomaticSavepoint(query, flags);
sendQuery(query, (V3ParameterList) parameters, maxRows, fetchSize, flags,
handler, null);
if ((flags & QueryExecutor.QUERY_EXECUTE_AS_SIMPLE) != 0) {
// Sync message is not required for 'Q' execution as 'Q' ends with ReadyForQuery message
// on its own
} else {
sendFlush();
sendSync(true);
}
processResults(handler, flags, fetchSize, (query.getSubqueries() != null), maxRows);
estimatedReceiveBufferBytes = 0;
} catch (RedshiftBindException se) {
// There are three causes of this error, an
// invalid total Bind message length, a
// BinaryStream that cannot provide the amount
// of data claimed by the length argument, and
// a BinaryStream that throws an Exception
// when reading.
//
// We simply do not send the Execute message
// so we can just continue on as if nothing
// has happened. Perhaps we need to
// introduce an error here to force the
// caller to rollback if there is a
// transaction in progress?
//
sendSync(true);
processResults(handler, flags, 0, (query.getSubqueries() != null), maxRows);
estimatedReceiveBufferBytes = 0;
handler
.handleError(new RedshiftException(GT.tr("Unable to bind parameter values for statement."),
RedshiftState.INVALID_PARAMETER_VALUE, se.getIOException(), logger));
}
} catch (IOException e) {
abort();
handler.handleError(
new RedshiftException(GT.tr("An I/O error occurred while sending to the backend."),
RedshiftState.CONNECTION_FAILURE, e, logger));
} catch (SQLException sqe) {
if(RedshiftLogger.isEnable())
logger.logError(sqe);
throw sqe;
}
try {
handler.handleCompletion();
if (cleanupSavePoints) {
releaseSavePoint(autosave, flags);
}
} catch (SQLException e) {
rollbackIfRequired(autosave, e);
}
}
finally {
m_executingLock.unlock();
}
} // synchronized
}
private boolean sendAutomaticSavepoint(Query query, int flags) throws IOException {
if (((flags & QueryExecutor.QUERY_SUPPRESS_BEGIN) == 0
|| getTransactionState() == TransactionState.OPEN)
&& query != restoreToAutoSave
&& getAutoSave() != AutoSave.NEVER
// If query has no resulting fields, it cannot fail with 'cached plan must not change result type'
// thus no need to set a savepoint before such query
&& (getAutoSave() == AutoSave.ALWAYS
// If CompositeQuery is observed, just assume it might fail and set the savepoint
|| !(query instanceof SimpleQuery)
|| ((SimpleQuery) query).getFields() != null)) {
/*
create a different SAVEPOINT the first time so that all subsequent SAVEPOINTS can be released
easily. There have been reports of server resources running out if there are too many
SAVEPOINTS.
*/
sendOneQuery(autoSaveQuery, SimpleQuery.NO_PARAMETERS, 1, 0,
QUERY_NO_RESULTS | QUERY_NO_METADATA
// Redshift does not support bind, exec, simple, sync message flow,
// so we force autosavepoint to use simple if the main query is using simple
| QUERY_EXECUTE_AS_SIMPLE);
return true;
}
return false;
}
private void releaseSavePoint(boolean autosave, int flags) throws SQLException {
if ( autosave
&& getAutoSave() == AutoSave.ALWAYS
&& getTransactionState() == TransactionState.OPEN) {
try {
sendOneQuery(releaseAutoSave, SimpleQuery.NO_PARAMETERS, 1, 0,
QUERY_NO_RESULTS | QUERY_NO_METADATA
| QUERY_EXECUTE_AS_SIMPLE);
} catch (IOException ex) {
throw new RedshiftException(GT.tr("Error releasing savepoint"), RedshiftState.IO_ERROR);
}
}
}
private void rollbackIfRequired(boolean autosave, SQLException e) throws SQLException {
if (autosave
&& getTransactionState() == TransactionState.FAILED
&& (getAutoSave() == AutoSave.ALWAYS || willHealOnRetry(e))) {
try {
// ROLLBACK and AUTOSAVE are executed as simple always to overcome "statement no longer exists S_xx"
execute(restoreToAutoSave, SimpleQuery.NO_PARAMETERS, new ResultHandlerDelegate(null),
1, 0, QUERY_NO_RESULTS | QUERY_NO_METADATA | QUERY_EXECUTE_AS_SIMPLE);
} catch (SQLException e2) {
// That's O(N), sorry
e.setNextException(e2);
}
}
if(RedshiftLogger.isEnable())
logger.logError(e);
throw e;
}
// Deadlock avoidance:
//
// It's possible for the send and receive streams to get "deadlocked" against each other since
// we do not have a separate thread. The scenario is this: we have two streams:
//
// driver -> TCP buffering -> server
// server -> TCP buffering -> driver
//
// The server behaviour is roughly:
// while true:
// read message
// execute message
// write results
//
// If the server -> driver stream has a full buffer, the write will block.
// If the driver is still writing when this happens, and the driver -> server
// stream also fills up, we deadlock: the driver is blocked on write() waiting
// for the server to read some more data, and the server is blocked on write()
// waiting for the driver to read some more data.
//
// To avoid this, we guess at how much response data we can request from the
// server before the server -> driver stream's buffer is full (MAX_BUFFERED_RECV_BYTES).
// This is the point where the server blocks on write and stops reading data. If we
// reach this point, we force a Sync message and read pending data from the server
// until ReadyForQuery, then go back to writing more queries unless we saw an error.
//
// This is not 100% reliable -- it's only done in the batch-query case and only
// at a reasonably high level (per query, not per message), and it's only an estimate
// -- so it might break. To do it correctly in all cases would seem to require a
// separate send or receive thread as we can only do the Sync-and-read-results
// operation at particular points, and also as we don't really know how much data
// the server is sending.
//
// Our message size estimation is coarse, and disregards asynchronous
// notifications, warnings/info/debug messages, etc, so the response size may be
// quite different from the 250 bytes assumed here even for queries that don't
// return data.
//
// See github issue #194 and #195 .
//
// Assume 64k server->client buffering, which is extremely conservative. A typical
// system will have 200kb or more of buffers for its receive buffers, and the sending
// system will typically have the same on the send side, giving us 400kb or to work
// with. (We could check Java's receive buffer size, but prefer to assume a very
// conservative buffer instead, and we don't know how big the server's send
// buffer is.)
//
private static final int MAX_BUFFERED_RECV_BYTES = 64000;
private static final int NODATA_QUERY_RESPONSE_SIZE_BYTES = 250;
public void execute(Query[] queries, ParameterList[] parameterLists,
BatchResultHandler batchHandler, int maxRows, int fetchSize, int flags) throws SQLException {
// Wait for current ring buffer thread to finish, if any.
// Shouldn't call from synchronized method, which can cause dead-lock.
waitForRingBufferThreadToFinish(false, false, false, null, null);
synchronized(this) {
waitOnLock();
try {
m_executingLock.lock();
if (RedshiftLogger.isEnable()) {
logger.log(LogLevel.DEBUG, " batch execute {0} queries, handler={1}, maxRows={2}, fetchSize={3}, flags={4}",
new Object[]{queries.length, batchHandler, maxRows, fetchSize, flags});
}
if (batchHandler != null) {
batchHandler.setStatementStateInQueryFromIdle();
}
flags = updateQueryMode(flags);
boolean describeOnly = (QUERY_DESCRIBE_ONLY & flags) != 0;
// Check parameters and resolve OIDs.
if (!describeOnly) {
for (ParameterList parameterList : parameterLists) {
if (parameterList != null) {
((V3ParameterList) parameterList).checkAllParametersSet();
}
}
}
boolean autosave = false;
ResultHandler handler = batchHandler;
try {
handler = sendQueryPreamble(batchHandler, flags);
autosave = sendAutomaticSavepoint(queries[0], flags);
estimatedReceiveBufferBytes = 0;
for (int i = 0; i < queries.length; ++i) {
Query query = queries[i];
V3ParameterList parameters = (V3ParameterList) parameterLists[i];
if (parameters == null) {
parameters = SimpleQuery.NO_PARAMETERS;
}
sendQuery(query, parameters, maxRows, fetchSize, flags, handler, batchHandler);
if (handler.getException() != null) {
break;
}
}
if (handler.getException() == null) {
if ((flags & QueryExecutor.QUERY_EXECUTE_AS_SIMPLE) != 0) {
// Sync message is not required for 'Q' execution as 'Q' ends with ReadyForQuery message
// on its own
} else {
sendFlush();
sendSync(true);
}
processResults(handler, flags, fetchSize, true, maxRows);
estimatedReceiveBufferBytes = 0;
}
} catch (IOException e) {
abort();
handler.handleError(
new RedshiftException(GT.tr("An I/O error occurred while sending to the backend."),
RedshiftState.CONNECTION_FAILURE, e, logger));
} catch (SQLException sqe) {
if(RedshiftLogger.isEnable())
logger.logError(sqe);
throw sqe;
}
try {
handler.handleCompletion();
if (cleanupSavePoints) {
releaseSavePoint(autosave, flags);
}
} catch (SQLException e) {
rollbackIfRequired(autosave, e);
}
}
finally {
m_executingLock.unlock();
}
} // synchronized
}
private ResultHandler sendQueryPreamble(final ResultHandler delegateHandler, int flags)
throws IOException {
// First, send CloseStatements for finalized SimpleQueries that had statement names assigned.
processDeadParsedQueries();
processDeadPortals();
// Send BEGIN on first statement in transaction.
if ((flags & QueryExecutor.QUERY_SUPPRESS_BEGIN) != 0
|| getTransactionState() != TransactionState.IDLE) {
return delegateHandler;
}
int beginFlags = QueryExecutor.QUERY_NO_METADATA;
if ((flags & QueryExecutor.QUERY_ONESHOT) != 0) {
beginFlags |= QueryExecutor.QUERY_ONESHOT;
}
beginFlags |= QueryExecutor.QUERY_EXECUTE_AS_SIMPLE;
beginFlags = updateQueryMode(beginFlags);
final SimpleQuery beginQuery = ((flags & QueryExecutor.QUERY_READ_ONLY_HINT) == 0) ? beginTransactionQuery : beginReadOnlyTransactionQuery;
sendOneQuery(beginQuery, SimpleQuery.NO_PARAMETERS, 0, 0, beginFlags);
// Insert a handler that intercepts the BEGIN.
return new ResultHandlerDelegate(delegateHandler) {
private boolean sawBegin = false;
public void handleResultRows(Query fromQuery, Field[] fields, List<Tuple> tuples,
ResultCursor cursor, RedshiftRowsBlockingQueue<Tuple> queueTuples,
int[] rowCount, Thread ringBufferThread) {
if (sawBegin) {
super.handleResultRows(fromQuery, fields, tuples, cursor, queueTuples, rowCount, ringBufferThread);
}
}
@Override
public void handleCommandStatus(String status, long updateCount, long insertOID) {
if (!sawBegin) {
sawBegin = true;
if (!status.equals("BEGIN")) {
handleError(new RedshiftException(GT.tr("Expected command status BEGIN, got {0}.", status),
RedshiftState.PROTOCOL_VIOLATION));
}
} else {
super.handleCommandStatus(status, updateCount, insertOID);
}
}
};
}
//
// Fastpath
//
public byte[] fastpathCall(int fnid, ParameterList parameters, boolean suppressBegin)
throws SQLException {
return null;
/* Not in use. TODO: Comment all references used in LargeObject.
// Wait for current ring buffer thread to finish, if any.
// Shouldn't call from synchronized method, which can cause dead-lock.
waitForRingBufferThreadToFinish(false, false, null, null);
synchronized(this) {
waitOnLock();
if (!suppressBegin) {
doSubprotocolBegin();
}
try {
sendFastpathCall(fnid, (SimpleParameterList) parameters);
return receiveFastpathResult();
} catch (IOException ioe) {
abort();
throw new RedshiftException(GT.tr("An I/O error occurred while sending to the backend."),
RedshiftState.CONNECTION_FAILURE, ioe);
}
}
*/
}
public void doSubprotocolBegin() throws SQLException {
if (getTransactionState() == TransactionState.IDLE) {
if(RedshiftLogger.isEnable())
logger.log(LogLevel.DEBUG, "Issuing BEGIN before fastpath or copy call.");
ResultHandler handler = new ResultHandlerBase() {
private boolean sawBegin = false;
@Override
public void handleCommandStatus(String status, long updateCount, long insertOID) {
if (!sawBegin) {
if (!status.equals("BEGIN")) {
handleError(
new RedshiftException(GT.tr("Expected command status BEGIN, got {0}.", status),
RedshiftState.PROTOCOL_VIOLATION));
}
sawBegin = true;
} else {
handleError(new RedshiftException(GT.tr("Unexpected command status: {0}.", status),
RedshiftState.PROTOCOL_VIOLATION));
}
}
@Override
public void handleWarning(SQLWarning warning) {
// we don't want to ignore warnings and it would be tricky
// to chain them back to the connection, so since we don't
// expect to get them in the first place, we just consider
// them errors.
handleError(warning);
}
};
try {
/* Send BEGIN with simple protocol preferred */
int beginFlags = QueryExecutor.QUERY_NO_METADATA
| QueryExecutor.QUERY_ONESHOT
| QueryExecutor.QUERY_EXECUTE_AS_SIMPLE;
beginFlags = updateQueryMode(beginFlags);
sendOneQuery(beginTransactionQuery, SimpleQuery.NO_PARAMETERS, 0, 0, beginFlags);
sendSync(true);
processResults(handler, 0, 0, false, 0);
estimatedReceiveBufferBytes = 0;
} catch (IOException ioe) {
throw new RedshiftException(GT.tr("An I/O error occurred while sending to the backend."),
RedshiftState.CONNECTION_FAILURE, ioe);
}
}
}
public ParameterList createFastpathParameters(int count) {
return new SimpleParameterList(count, this);
}
/* Not in use.
private void sendFastpathCall(int fnid, SimpleParameterList params)
throws SQLException, IOException {
if (RedshiftLogger.isEnable()) {
logger.log(LogLevel.DEBUG, " FE=> FunctionCall({0}, {1} params)", new Object[]{fnid, params.getParameterCount()});
}
//
// Total size = 4 (length)
// + 4 (function OID)
// + 2 (format code count) + N * 2 (format codes)
// + 2 (parameter count) + encodedSize (parameters)
// + 2 (result format)
int paramCount = params.getParameterCount();
int encodedSize = 0;
for (int i = 1; i <= paramCount; ++i) {
if (params.isNull(i)) {
encodedSize += 4;
} else {
encodedSize += 4 + params.getV3Length(i);
}
}
pgStream.sendChar('F');
pgStream.sendInteger4(4 + 4 + 2 + 2 * paramCount + 2 + encodedSize + 2);
pgStream.sendInteger4(fnid);
pgStream.sendInteger2(paramCount);
for (int i = 1; i <= paramCount; ++i) {
pgStream.sendInteger2(params.isBinary(i) ? 1 : 0);
}
pgStream.sendInteger2(paramCount);
for (int i = 1; i <= paramCount; i++) {
if (params.isNull(i)) {
pgStream.sendInteger4(-1);
} else {
pgStream.sendInteger4(params.getV3Length(i)); // Parameter size
params.writeV3Value(i, pgStream);
}
}
pgStream.sendInteger2(1); // Binary result format
pgStream.flush();
}
*/
// Just for API compatibility with previous versions.
public synchronized void processNotifies() throws SQLException {
processNotifies(-1);
}
/**
* @param timeoutMillis when > 0, block for this time
* when =0, block forever
* when < 0, don't block
*/
public synchronized void processNotifies(int timeoutMillis) throws SQLException {
waitOnLock();
// Asynchronous notifies only arrive when we are not in a transaction
if (getTransactionState() != TransactionState.IDLE) {
return;
}
if (hasNotifications()) {
// No need to timeout when there are already notifications. We just check for more in this case.
timeoutMillis = -1;
}
boolean useTimeout = timeoutMillis > 0;
long startTime = 0;
int oldTimeout = 0;
if (useTimeout) {
startTime = TimeUnit.NANOSECONDS.toMillis(System.nanoTime());
try {
oldTimeout = pgStream.getSocket().getSoTimeout();
} catch (SocketException e) {
throw new RedshiftException(GT.tr("An error occurred while trying to get the socket "
+ "timeout."), RedshiftState.CONNECTION_FAILURE, e);
}
}
try {
while (timeoutMillis >= 0 || pgStream.hasMessagePending()) {
if (useTimeout && timeoutMillis >= 0) {
setSocketTimeout(timeoutMillis);
}
int c = pgStream.receiveChar();
if (useTimeout && timeoutMillis >= 0) {
setSocketTimeout(0); // Don't timeout after first char
}
switch (c) {
case 'A': // Asynchronous Notify
receiveAsyncNotify();
timeoutMillis = -1;
continue;
case 'E':
// Error Response (response to pretty much everything; backend then skips until Sync)
throw receiveErrorResponse(false);
case 'N': // Notice Response (warnings / info)
SQLWarning warning = receiveNoticeResponse();
addWarning(warning);
if (useTimeout) {
long newTimeMillis = TimeUnit.NANOSECONDS.toMillis(System.nanoTime());
timeoutMillis += startTime - newTimeMillis; // Overflows after 49 days, ignore that
startTime = newTimeMillis;
if (timeoutMillis == 0) {
timeoutMillis = -1; // Don't accidentially wait forever
}
}
break;
default:
throw new RedshiftException(GT.tr("Unknown Response Type {0}.", (char) c),
RedshiftState.CONNECTION_FAILURE);
}
}
} catch (SocketTimeoutException ioe) {
// No notifications this time...
} catch (IOException ioe) {
throw new RedshiftException(GT.tr("An I/O error occurred while sending to the backend."),
RedshiftState.CONNECTION_FAILURE, ioe);
} finally {
if (useTimeout) {
setSocketTimeout(oldTimeout);
}
}
}
private void setSocketTimeout(int millis) throws RedshiftException {
try {
Socket s = pgStream.getSocket();
if (!s.isClosed()) { // Is this check required?
pgStream.setNetworkTimeout(millis);
}
} catch (IOException e) {
throw new RedshiftException(GT.tr("An error occurred while trying to reset the socket timeout."),
RedshiftState.CONNECTION_FAILURE, e);
}
}
/* Not in use.
private byte[] receiveFastpathResult() throws IOException, SQLException {
boolean endQuery = false;
SQLException error = null;
byte[] returnValue = null;
while (!endQuery) {
int c = pgStream.receiveChar();
switch (c) {
case 'A': // Asynchronous Notify
receiveAsyncNotify();
break;
case 'E':
// Error Response (response to pretty much everything; backend then skips until Sync)
SQLException newError = receiveErrorResponse(false);
if (error == null) {
error = newError;
} else {
error.setNextException(newError);
}
// keep processing
break;
case 'N': // Notice Response (warnings / info)
SQLWarning warning = receiveNoticeResponse();
addWarning(warning);
break;
case 'Z': // Ready For Query (eventual response to Sync)
receiveRFQ();
endQuery = true;
break;
case 'V': // FunctionCallResponse
int msgLen = pgStream.receiveInteger4();
int valueLen = pgStream.receiveInteger4();
if(RedshiftLogger.isEnable())
logger.log(LogLevel.DEBUG, " <=BE FunctionCallResponse({0} bytes)", valueLen);
if (valueLen != -1) {
byte[] buf = new byte[valueLen];
pgStream.receive(buf, 0, valueLen);
returnValue = buf;
}
break;
default:
throw new RedshiftException(GT.tr("Unknown Response Type {0}.", (char) c),
RedshiftState.CONNECTION_FAILURE);
}
}
// did we get an error during this query?
if (error != null) {
throw error;
}
return returnValue;
}
*/
//
// Copy subprotocol implementation
//
/**
* Sends given query to BE to start, initialize and lock connection for a CopyOperation.
*
* @param sql COPY FROM STDIN / COPY TO STDOUT statement
* @return CopyIn or CopyOut operation object
* @throws SQLException on failure
*/
public CopyOperation startCopy(String sql, boolean suppressBegin)
throws SQLException {
return copyQueryExecutor.startCopy(sql, suppressBegin);
}
/**
* Finishes a copy operation and unlocks connection discarding any exchanged data.
*
* @param op the copy operation presumably currently holding lock on this connection
* @throws SQLException on any additional failure
*/
public void cancelCopy(CopyOperationImpl op) throws SQLException {
copyQueryExecutor.cancelCopy(op);
}
/**
* Finishes writing to copy and unlocks connection.
*
* @param op the copy operation presumably currently holding lock on this connection
* @return number of rows updated for server versions 8.2 or newer
* @throws SQLException on failure
*/
public synchronized long endCopy(CopyOperationImpl op) throws SQLException {
return copyQueryExecutor.endCopy(op);
}
/**
* Sends data during a live COPY IN operation. Only unlocks the connection if server suddenly
* returns CommandComplete, which should not happen
*
* @param op the CopyIn operation presumably currently holding lock on this connection
* @param data bytes to send
* @param off index of first byte to send (usually 0)
* @param siz number of bytes to send (usually data.length)
* @throws SQLException on failure
*/
public synchronized void writeToCopy(CopyOperationImpl op, byte[] data, int off, int siz)
throws SQLException {
copyQueryExecutor.writeToCopy(op, data, off, siz);
}
/**
* Sends data during a live COPY IN operation. Only unlocks the connection if server suddenly
* returns CommandComplete, which should not happen
*
* @param op the CopyIn operation presumably currently holding lock on this connection
* @param from the source of bytes, e.g. a ByteBufferByteStreamWriter
* @throws SQLException on failure
*/
public synchronized void writeToCopy(CopyOperationImpl op, ByteStreamWriter from)
throws SQLException {
copyQueryExecutor.writeToCopy(op, from);
}
public synchronized void flushCopy(CopyOperationImpl op) throws SQLException {
copyQueryExecutor.flushCopy(op);
}
/**
* Wait for a row of data to be received from server on an active copy operation
* Connection gets unlocked by processCopyResults() at end of operation.
*
* @param op the copy operation presumably currently holding lock on this connection
* @param block whether to block waiting for input
* @throws SQLException on any failure
*/
synchronized void readFromCopy(CopyOperationImpl op, boolean block) throws SQLException {
copyQueryExecutor.readFromCopy(op, block);
}
/*
* To prevent client/server protocol deadlocks, we try to manage the estimated recv buffer size
* and force a sync +flush and process results if we think it might be getting too full.
*
* See the comments above MAX_BUFFERED_RECV_BYTES's declaration for details.
*/
private void flushIfDeadlockRisk(Query query, boolean disallowBatching,
ResultHandler resultHandler,
BatchResultHandler batchHandler,
final int flags) throws IOException {
// Assume all statements need at least this much reply buffer space,
// plus params
estimatedReceiveBufferBytes += NODATA_QUERY_RESPONSE_SIZE_BYTES;
SimpleQuery sq = (SimpleQuery) query;
if (sq.isStatementDescribed()) {
/*
* Estimate the response size of the fields and add it to the expected response size.
*
* It's impossible for us to estimate the rowcount. We'll assume one row, as that's the common
* case for batches and we're leaving plenty of breathing room in this approach. It's still
* not deadlock-proof though; see pgjdbc github issues #194 and #195.
*/
int maxResultRowSize = sq.getMaxResultRowSize();
if (maxResultRowSize >= 0) {
estimatedReceiveBufferBytes += maxResultRowSize;
} else {
if(RedshiftLogger.isEnable())
logger.log(LogLevel.DEBUG, "Couldn't estimate result size or result size unbounded, "
+ "disabling batching for this query.");
disallowBatching = true;
}
} else {
/*
* We only describe a statement if we're expecting results from it, so it's legal to batch
* unprepared statements. We'll abort later if we get any uresults from them where none are
* expected. For now all we can do is hope the user told us the truth and assume that
* NODATA_QUERY_RESPONSE_SIZE_BYTES is enough to cover it.
*/
}
if (disallowBatching || estimatedReceiveBufferBytes >= MAX_BUFFERED_RECV_BYTES) {
if(RedshiftLogger.isEnable())
logger.log(LogLevel.DEBUG, "Forcing Sync, receive buffer full or batching disallowed");
sendSync(true);
processResults(resultHandler, flags, 0, (query.getSubqueries() != null), 0);
estimatedReceiveBufferBytes = 0;
if (batchHandler != null) {
batchHandler.secureProgress();
}
}
}
/*
* Send a query to the backend.
*/
private void sendQuery(Query query, V3ParameterList parameters, int maxRows, int fetchSize,
int flags, ResultHandler resultHandler,
BatchResultHandler batchHandler) throws IOException, SQLException {
// Now the query itself.
Query[] subqueries = query.getSubqueries();
SimpleParameterList[] subparams = parameters.getSubparams();
// We know this is deprecated, but still respect it in case anyone's using it.
// PgJDBC its self no longer does.
@SuppressWarnings("deprecation")
boolean disallowBatching = (flags & QueryExecutor.QUERY_DISALLOW_BATCHING) != 0;
if (subqueries == null) {
flushIfDeadlockRisk(query, disallowBatching, resultHandler, batchHandler, flags);
// If we saw errors, don't send anything more.
if (resultHandler.getException() == null) {
sendOneQuery((SimpleQuery) query, (SimpleParameterList) parameters, maxRows, fetchSize,
flags);
}
} else {
for (int i = 0; i < subqueries.length; ++i) {
final Query subquery = subqueries[i];
flushIfDeadlockRisk(subquery, disallowBatching, resultHandler, batchHandler, flags);
// If we saw errors, don't send anything more.
if (resultHandler.getException() != null) {
break;
}
// In the situation where parameters is already
// NO_PARAMETERS it cannot know the correct
// number of array elements to return in the
// above call to getSubparams(), so it must
// return null which we check for here.
//
SimpleParameterList subparam = SimpleQuery.NO_PARAMETERS;
if (subparams != null) {
subparam = subparams[i];
}
sendOneQuery((SimpleQuery) subquery, subparam, maxRows, fetchSize, flags);
}
}
}
//
// Message sending
//
private void sendSync(boolean addInQueue) throws IOException {
if(RedshiftLogger.isEnable())
logger.log(LogLevel.DEBUG, " FE=> Sync");
pgStream.sendChar('S'); // Sync
pgStream.sendInteger4(4); // Length
pgStream.flush();
if (addInQueue) {
// Below "add queues" are likely not required at all
pendingExecuteQueue.add(new ExecuteRequest(sync, null, true));
pendingDescribePortalQueue.add(sync);
}
}
private void sendFlush() throws IOException {
if(RedshiftLogger.isEnable())
logger.log(LogLevel.DEBUG, " FE=> Flush");
pgStream.sendChar('H'); // Flush
pgStream.sendInteger4(4); // Length
pgStream.flush();
// Below "add queues" are likely not required at all
// pendingExecuteQueue.add(new ExecuteRequest(sync, null, true));
// pendingDescribePortalQueue.add(sync);
}
private void sendParse(SimpleQuery query, SimpleParameterList params, boolean oneShot)
throws IOException {
// Already parsed, or we have a Parse pending and the types are right?
int[] typeOIDs = params.getTypeOIDs();
if (query.isPreparedFor(typeOIDs, deallocateEpoch)) {
return;
}
// Clean up any existing statement, as we can't use it.
query.unprepare();
processDeadParsedQueries();
// Remove any cached Field values. The re-parsed query might report different
// fields because input parameter types may result in different type inferences
// for unspecified types.
query.setFields(null);
String statementName = null;
if (!oneShot) {
// Generate a statement name to use.
statementName = "S_" + (nextUniqueID++) + "-" + System.nanoTime();
// And prepare the new statement.
// NB: Must clone the OID array, as it's a direct reference to
// the SimpleParameterList's internal array that might be modified
// under us.
query.setStatementName(statementName, deallocateEpoch);
query.setPrepareTypes(typeOIDs);
registerParsedQuery(query, statementName);
}
byte[] encodedStatementName = query.getEncodedStatementName();
String nativeSql = query.getNativeSql();
if (RedshiftLogger.isEnable()) {
StringBuilder sbuf = new StringBuilder(" FE=> Parse(stmt=" + statementName + ",query=\"");
sbuf.append(QuerySanitizer.filterCredentials(nativeSql));
sbuf.append("\",oids={");
for (int i = 1; i <= params.getParameterCount(); ++i) {
if (i != 1) {
sbuf.append(",");
}
sbuf.append(params.getTypeOID(i));
}
sbuf.append("})");
if(RedshiftLogger.isEnable())
logger.log(LogLevel.DEBUG, sbuf.toString());
}
//
// Send Parse.
//
byte[] queryUtf8 = Utils.encodeUTF8(nativeSql);
// Total size = 4 (size field)
// + N + 1 (statement name, zero-terminated)
// + N + 1 (query, zero terminated)
// + 2 (parameter count) + N * 4 (parameter types)
int encodedSize = 4
+ (encodedStatementName == null ? 0 : encodedStatementName.length) + 1
+ queryUtf8.length + 1
+ 2 + 4 * params.getParameterCount();
pgStream.sendChar('P'); // Parse
pgStream.sendInteger4(encodedSize);
if (encodedStatementName != null) {
pgStream.send(encodedStatementName);
}
pgStream.sendChar(0); // End of statement name
pgStream.send(queryUtf8); // Query string
pgStream.sendChar(0); // End of query string.
pgStream.sendInteger2(params.getParameterCount()); // # of parameter types specified
for (int i = 1; i <= params.getParameterCount(); ++i) {
pgStream.sendInteger4(params.getTypeOID(i));
}
pendingParseQueue.add(query);
}
private void sendBind(SimpleQuery query, SimpleParameterList params, Portal portal,
boolean noBinaryTransfer) throws IOException {
//
// Send Bind.
//
String statementName = query.getStatementName();
byte[] encodedStatementName = query.getEncodedStatementName();
byte[] encodedPortalName = (portal == null ? null : portal.getEncodedPortalName());
if (RedshiftLogger.isEnable()) {
StringBuilder sbuf = new StringBuilder(" FE=> Bind(stmt=" + statementName + ",portal=" + portal);
for (int i = 1; i <= params.getParameterCount(); ++i) {
sbuf.append(",$").append(i).append("=<")
.append(params.toString(i,true))
.append(">,type=").append(Oid.toString(params.getTypeOID(i)));
}
sbuf.append(")");
if(RedshiftLogger.isEnable())
logger.log(LogLevel.DEBUG, sbuf.toString());
}
// Total size = 4 (size field) + N + 1 (destination portal)
// + N + 1 (statement name)
// + 2 (param format code count) + N * 2 (format codes)
// + 2 (param value count) + N (encoded param value size)
// + 2 (result format code count, 0)
long encodedSize = 0;
for (int i = 1; i <= params.getParameterCount(); ++i) {
if (params.isNull(i)) {
encodedSize += 4;
} else {
encodedSize += (long) 4 + params.getV3Length(i);
}
}
Field[] fields = query.getFields();
if (!noBinaryTransfer && query.needUpdateFieldFormats()) {
for (Field field : fields) {
if (useBinary(field)) {
field.setFormat(Field.BINARY_FORMAT);
query.setHasBinaryFields(true);
}
}
}
// If text-only results are required (e.g. updateable resultset), and the query has binary columns,
// flip to text format.
if (noBinaryTransfer && query.hasBinaryFields()) {
for (Field field : fields) {
if (field.getFormat() != Field.TEXT_FORMAT) {
field.setFormat(Field.TEXT_FORMAT);
}
}
query.resetNeedUpdateFieldFormats();
query.setHasBinaryFields(false);
}
// This is not the number of binary fields, but the total number
// of fields if any of them are binary or zero if all of them
// are text.
int numBinaryFields = !noBinaryTransfer && query.hasBinaryFields() ? fields.length : 0;
encodedSize = 4
+ (encodedPortalName == null ? 0 : encodedPortalName.length) + 1
+ (encodedStatementName == null ? 0 : encodedStatementName.length) + 1
+ 2 + params.getParameterCount() * 2
+ 2 + encodedSize
+ 2 + numBinaryFields * 2;
// backend's MaxAllocSize is the largest message that can
// be received from a client. If we have a bigger value
// from either very large parameters or incorrect length
// descriptions of setXXXStream we do not send the bind
// messsage.
//
if (encodedSize > 0x3fffffff) {
throw new RedshiftBindException(new IOException(GT.tr(
"Bind message length {0} too long. This can be caused by very large or incorrect length specifications on InputStream parameters.",
encodedSize)));
}
pgStream.sendChar('B'); // Bind
pgStream.sendInteger4((int) encodedSize); // Message size
if (encodedPortalName != null) {
pgStream.send(encodedPortalName); // Destination portal name.
}
pgStream.sendChar(0); // End of portal name.
if (encodedStatementName != null) {
pgStream.send(encodedStatementName); // Source statement name.
}
pgStream.sendChar(0); // End of statement name.
pgStream.sendInteger2(params.getParameterCount()); // # of parameter format codes
for (int i = 1; i <= params.getParameterCount(); ++i) {
pgStream.sendInteger2(params.isBinary(i) ? 1 : 0); // Parameter format code
}
pgStream.sendInteger2(params.getParameterCount()); // # of parameter values
// If an error occurs when reading a stream we have to
// continue pumping out data to match the length we
// said we would. Once we've done that we throw
// this exception. Multiple exceptions can occur and
// it really doesn't matter which one is reported back
// to the caller.
//
RedshiftBindException bindException = null;
for (int i = 1; i <= params.getParameterCount(); ++i) {
if (params.isNull(i)) {
pgStream.sendInteger4(-1); // Magic size of -1 means NULL
} else {
pgStream.sendInteger4(params.getV3Length(i)); // Parameter size
try {
params.writeV3Value(i, pgStream); // Parameter value
} catch (RedshiftBindException be) {
bindException = be;
}
}
}
pgStream.sendInteger2(numBinaryFields); // # of result format codes
for (int i = 0; i < numBinaryFields; ++i) {
pgStream.sendInteger2(fields[i].getFormat());
}
pendingBindQueue.add(portal == null ? UNNAMED_PORTAL : portal);
if (bindException != null) {
throw bindException;
}
}
/**
* Returns true if the specified field should be retrieved using binary encoding.
*
* @param field The field whose Oid type to analyse.
* @return True if {@link Field#BINARY_FORMAT} should be used, false if
* {@link Field#BINARY_FORMAT}.
*/
private boolean useBinary(Field field) {
int oid = field.getOID();
return useBinaryForReceive(oid);
}
private void sendDescribePortal(SimpleQuery query, Portal portal) throws IOException {
//
// Send Describe.
//
if(RedshiftLogger.isEnable())
logger.log(LogLevel.DEBUG, " FE=> Describe(portal={0})", portal);
byte[] encodedPortalName = (portal == null ? null : portal.getEncodedPortalName());
// Total size = 4 (size field) + 1 (describe type, 'P') + N + 1 (portal name)
int encodedSize = 4 + 1 + (encodedPortalName == null ? 0 : encodedPortalName.length) + 1;
pgStream.sendChar('D'); // Describe
pgStream.sendInteger4(encodedSize); // message size
pgStream.sendChar('P'); // Describe (Portal)
if (encodedPortalName != null) {
pgStream.send(encodedPortalName); // portal name to close
}
pgStream.sendChar(0); // end of portal name
pendingDescribePortalQueue.add(query);
query.setPortalDescribed(true);
}
private void sendDescribeStatement(SimpleQuery query, SimpleParameterList params,
boolean describeOnly) throws IOException {
// Send Statement Describe
if(RedshiftLogger.isEnable())
logger.log(LogLevel.DEBUG, " FE=> Describe(statement={0})", query.getStatementName());
byte[] encodedStatementName = query.getEncodedStatementName();
// Total size = 4 (size field) + 1 (describe type, 'S') + N + 1 (portal name)
int encodedSize = 4 + 1 + (encodedStatementName == null ? 0 : encodedStatementName.length) + 1;
pgStream.sendChar('D'); // Describe
pgStream.sendInteger4(encodedSize); // Message size
pgStream.sendChar('S'); // Describe (Statement);
if (encodedStatementName != null) {
pgStream.send(encodedStatementName); // Statement name
}
pgStream.sendChar(0); // end message
// Note: statement name can change over time for the same query object
// Thus we take a snapshot of the query name
pendingDescribeStatementQueue.add(
new DescribeRequest(query, params, describeOnly, query.getStatementName()));
pendingDescribePortalQueue.add(query);
query.setStatementDescribed(true);
query.setPortalDescribed(true);
}
private void sendExecute(SimpleQuery query, Portal portal, int limit) throws IOException {
//
// Send Execute.
//
if (RedshiftLogger.isEnable()) {
logger.log(LogLevel.DEBUG, " FE=> Execute(portal={0},limit={1})", new Object[]{portal, limit});
}
byte[] encodedPortalName = (portal == null ? null : portal.getEncodedPortalName());
int encodedSize = (encodedPortalName == null ? 0 : encodedPortalName.length);
// Total size = 4 (size field) + 1 + N (source portal) + 4 (max rows)
pgStream.sendChar('E'); // Execute
pgStream.sendInteger4(4 + 1 + encodedSize + 4); // message size
if (encodedPortalName != null) {
pgStream.send(encodedPortalName); // portal name
}
pgStream.sendChar(0); // portal name terminator
pgStream.sendInteger4(limit); // row limit
pendingExecuteQueue.add(new ExecuteRequest(query, portal, false));
}
private void sendClosePortal(String portalName) throws IOException {
//
// Send Close.
//
if(RedshiftLogger.isEnable())
logger.log(LogLevel.DEBUG, " FE=> ClosePortal({0})", portalName);
byte[] encodedPortalName = (portalName == null ? null : Utils.encodeUTF8(portalName));
int encodedSize = (encodedPortalName == null ? 0 : encodedPortalName.length);
// Total size = 4 (size field) + 1 (close type, 'P') + 1 + N (portal name)
pgStream.sendChar('C'); // Close
pgStream.sendInteger4(4 + 1 + 1 + encodedSize); // message size
pgStream.sendChar('P'); // Close (Portal)
if (encodedPortalName != null) {
pgStream.send(encodedPortalName);
}
pgStream.sendChar(0); // unnamed portal
}
private void sendCloseStatement(String statementName) throws IOException {
//
// Send Close.
//
if(RedshiftLogger.isEnable())
logger.log(LogLevel.DEBUG, " FE=> CloseStatement({0})", statementName);
byte[] encodedStatementName = (statementName == null)
? null
: Utils.encodeUTF8(statementName);
int encodedSize = (encodedStatementName == null ? 0 : encodedStatementName.length);
// Total size = 4 (size field) + 1 (close type, 'S') + N + 1 (statement name)
pgStream.sendChar('C'); // Close
pgStream.sendInteger4(4 + 1 + encodedSize + 1); // message size
pgStream.sendChar('S'); // Close (Statement)
if (encodedStatementName != null)
pgStream.send(encodedStatementName); // statement to close
pgStream.sendChar(0); // statement name terminator or unnamed statement
}
// sendOneQuery sends a single statement via the extended query protocol.
// Per the FE/BE docs this is essentially the same as how a simple query runs
// (except that it generates some extra acknowledgement messages, and we
// can send several queries before doing the Sync)
//
// Parse S_n from "query string with parameter placeholders"; skipped if already done previously
// or if oneshot
// Bind C_n from S_n plus parameters (or from unnamed statement for oneshot queries)
// Describe C_n; skipped if caller doesn't want metadata
// Execute C_n with maxRows limit; maxRows = 1 if caller doesn't want results
// (above repeats once per call to sendOneQuery)
// Sync (sent by caller)
//
private void sendOneQuery(SimpleQuery query, SimpleParameterList params, int maxRows,
int fetchSize, int flags) throws IOException {
boolean asSimple = (flags & QueryExecutor.QUERY_EXECUTE_AS_SIMPLE) != 0;
if (asSimple) {
assert (flags & QueryExecutor.QUERY_DESCRIBE_ONLY) == 0
: "Simple mode does not support describe requests. sql = " + query.getNativeSql()
+ ", flags = " + flags;
sendSimpleQuery(query, params);
return;
}
assert !query.getNativeQuery().multiStatement
: "Queries that might contain ; must be executed with QueryExecutor.QUERY_EXECUTE_AS_SIMPLE mode. "
+ "Given query is " + query.getNativeSql();
// Per https://www.postgresql.org/docs/current/static/protocol-flow.html#PROTOCOL-FLOW-EXT-QUERY
// A Bind message can use the unnamed prepared statement to create a named portal.
// If the Bind is successful, an Execute message can reference that named portal until either
// the end of the current transaction
// or the named portal is explicitly destroyed
boolean noResults = (flags & QueryExecutor.QUERY_NO_RESULTS) != 0;
boolean noMeta = (flags & QueryExecutor.QUERY_NO_METADATA) != 0;
boolean describeOnly = (flags & QueryExecutor.QUERY_DESCRIBE_ONLY) != 0;
boolean oneShot = (flags & QueryExecutor.QUERY_ONESHOT) != 0;
// extended queries always use a portal
// the usePortal flag controls whether or not we use a *named* portal
boolean usePortal = !oneShot ||
(
(flags & QueryExecutor.QUERY_FORWARD_CURSOR) != 0
&& !noResults
&& !noMeta
&& fetchSize > 0
&& !describeOnly
);
boolean noBinaryTransfer = (flags & QUERY_NO_BINARY_TRANSFER) != 0;
boolean forceDescribePortal = (flags & QUERY_FORCE_DESCRIBE_PORTAL) != 0;
boolean autoCommit = (flags & QueryExecutor.QUERY_SUPPRESS_BEGIN) != 0;
// Work out how many rows to fetch in this pass.
int rows;
if (noResults) {
rows = 1; // We're discarding any results anyway, so limit data transfer to a minimum
} else if (!usePortal || autoCommit) {
rows = maxRows; // Not using a portal or auto-committing -- fetchSize is irrelevant
} else if (maxRows != 0 && (enableFetchRingBuffer || fetchSize > maxRows)) {
// fetchSize > maxRows, use maxRows (nb: fetchSize cannot be 0 if usePortal == true)
rows = maxRows;
} else {
rows = (enableFetchRingBuffer)
? maxRows // Disable server cursor, when client cursor is enabled.
: fetchSize; // maxRows > fetchSize
}
if (RedshiftLogger.isEnable()) {
logger.log(LogLevel.DEBUG, " FE=> OneQuery(rows=\"{0}\")", rows);
}
sendParse(query, params, oneShot);
// Must do this after sendParse to pick up any changes to the
// query's state.
//
boolean queryHasUnknown = query.hasUnresolvedTypes();
boolean paramsHasUnknown = params.hasUnresolvedTypes();
boolean describeStatement = describeOnly
|| (!oneShot && paramsHasUnknown && queryHasUnknown && !query.isStatementDescribed());
if (!describeStatement && paramsHasUnknown && !queryHasUnknown) {
int[] queryOIDs = query.getPrepareTypes();
int[] paramOIDs = params.getTypeOIDs();
for (int i = 0; i < paramOIDs.length; i++) {
// Only supply type information when there isn't any
// already, don't arbitrarily overwrite user supplied
// type information.
if (paramOIDs[i] == Oid.UNSPECIFIED) {
params.setResolvedType(i + 1, queryOIDs[i]);
}
}
}
if (describeStatement) {
sendDescribeStatement(query, params, describeOnly);
if (describeOnly) {
return;
}
}
// Construct a new portal if needed.
Portal portal = null;
if (usePortal)
{
String portalName = "C_" + (nextUniqueID++) + "-" + System.nanoTime();
portal = new Portal(query, portalName);
}
sendBind(query, params, portal, noBinaryTransfer);
// A statement describe will also output a RowDescription,
// so don't reissue it here if we've already done so.
//
if (!noMeta && !describeStatement) {
/*
* don't send describe if we already have cached the row description from previous executions
*
* XXX Clearing the fields / unpreparing the query (in sendParse) is incorrect, see bug #267.
* We might clear the cached fields in a later execution of this query if the bind parameter
* types change, but we're assuming here that they'll still be valid when we come to process
* the results of this query, so we don't send a new describe here. We re-describe after the
* fields are cleared, but the result of that gets processed after processing the results from
* earlier executions that we didn't describe because we didn't think we had to.
*
* To work around this, force a Describe at each execution in batches where this can be a
* problem. It won't cause more round trips so the performance impact is low, and it'll ensure
* that the field information available when we decoded the results. This is undeniably a
* hack, but there aren't many good alternatives.
*/
if (!query.isPortalDescribed() || forceDescribePortal) {
sendDescribePortal(query, portal);
}
}
sendExecute(query, portal, rows);
}
private void sendSimpleQuery(SimpleQuery query, SimpleParameterList params) throws IOException {
String nativeSql = query.toString(params);
if(RedshiftLogger.isEnable())
logger.log(LogLevel.DEBUG, " FE=> SimpleQuery(query=\"{0}\")", QuerySanitizer.filterCredentials(nativeSql));
Encoding encoding = pgStream.getEncoding();
byte[] encoded = encoding.encode(nativeSql);
pgStream.sendChar('Q');
pgStream.sendInteger4(encoded.length + 4 + 1);
pgStream.send(encoded);
pgStream.sendChar(0);
pgStream.flush();
pendingExecuteQueue.add(new ExecuteRequest(query, null, true));
pendingDescribePortalQueue.add(query);
}
//
// Garbage collection of parsed statements.
//
// When a statement is successfully parsed, registerParsedQuery is called.
// This creates a PhantomReference referring to the "owner" of the statement
// (the originating Query object) and inserts that reference as a key in
// parsedQueryMap. The values of parsedQueryMap are the corresponding allocated
// statement names. The originating Query object also holds a reference to the
// PhantomReference.
//
// When the owning Query object is closed, it enqueues and clears the associated
// PhantomReference.
//
// If the owning Query object becomes unreachable (see java.lang.ref javadoc) before
// being closed, the corresponding PhantomReference is enqueued on
// parsedQueryCleanupQueue. In the Sun JVM, phantom references are only enqueued
// when a GC occurs, so this is not necessarily prompt but should eventually happen.
//
// Periodically (currently, just before query execution), the parsedQueryCleanupQueue
// is polled. For each enqueued PhantomReference we find, we remove the corresponding
// entry from parsedQueryMap, obtaining the name of the underlying statement in the
// process. Then we send a message to the backend to deallocate that statement.
//
private final HashMap<PhantomReference<SimpleQuery>, String> parsedQueryMap =
new HashMap<PhantomReference<SimpleQuery>, String>();
private final ReferenceQueue<SimpleQuery> parsedQueryCleanupQueue =
new ReferenceQueue<SimpleQuery>();
private void registerParsedQuery(SimpleQuery query, String statementName) {
if (statementName == null) {
return;
}
PhantomReference<SimpleQuery> cleanupRef =
new PhantomReference<SimpleQuery>(query, parsedQueryCleanupQueue);
parsedQueryMap.put(cleanupRef, statementName);
query.setCleanupRef(cleanupRef);
}
public void closeStatementAndPortal() {
synchronized(this) {
// First, send CloseStatements for finalized SimpleQueries that had statement names assigned.
try {
processDeadParsedQueries();
processDeadPortals();
// sendCloseStatement(null);
// sendClosePortal("unnamed");
sendFlush();
sendSync(false);
// Read SYNC response
processSyncOnClose();
} catch (IOException e) {
// Ignore the error
if (RedshiftLogger.isEnable()) {
logger.logError(e);
}
} catch (SQLException sqe) {
// Ignore the error
if (RedshiftLogger.isEnable()) {
logger.logError(sqe);
}
}
} // synchronized
}
private void processDeadParsedQueries() throws IOException {
Reference<? extends SimpleQuery> deadQuery;
while ((deadQuery = parsedQueryCleanupQueue.poll()) != null) {
String statementName = parsedQueryMap.remove(deadQuery);
sendCloseStatement(statementName);
deadQuery.clear();
}
}
//
// Essentially the same strategy is used for the cleanup of portals.
// Note that each Portal holds a reference to the corresponding Query
// that generated it, so the Query won't be collected (and the statement
// closed) until all the Portals are, too. This is required by the mechanics
// of the backend protocol: when a statement is closed, all dependent portals
// are also closed.
//
private final HashMap<PhantomReference<Portal>, String> openPortalMap =
new HashMap<PhantomReference<Portal>, String>();
private final ReferenceQueue<Portal> openPortalCleanupQueue = new ReferenceQueue<Portal>();
private static final Portal UNNAMED_PORTAL = new Portal(null, "unnamed");
private void registerOpenPortal(Portal portal) {
if (portal == UNNAMED_PORTAL) {
return; // Using the unnamed portal.
}
String portalName = portal.getPortalName();
PhantomReference<Portal> cleanupRef =
new PhantomReference<Portal>(portal, openPortalCleanupQueue);
openPortalMap.put(cleanupRef, portalName);
portal.setCleanupRef(cleanupRef);
}
private void processDeadPortals() throws IOException {
Reference<? extends Portal> deadPortal;
while ((deadPortal = openPortalCleanupQueue.poll()) != null) {
String portalName = openPortalMap.remove(deadPortal);
sendClosePortal(portalName);
deadPortal.clear();
}
}
/**
* Check for a running ring buffer thread.
*
* @return returns true if Ring buffer thread is running, otherwise false.
*/
@Override
public boolean isRingBufferThreadRunning() {
return (m_ringBufferThread != null);
}
/**
* Close the last active ring buffer thread.
*/
@Override
public void closeRingBufferThread(RedshiftRowsBlockingQueue<Tuple> queueRows, Thread ringBufferThread) {
// Abort current ring buffer thread, if any.
waitForRingBufferThreadToFinish(false, true, false, queueRows, ringBufferThread);
}
@Override
public void sendQueryCancel() throws SQLException {
super.sendQueryCancel();
}
protected void processSyncOnClose() throws IOException, SQLException {
int c;
boolean endQuery = false;
SQLException error = null;
while (!endQuery) {
c = pgStream.receiveChar();
switch (c) {
case 'A': // Asynchronous Notify
receiveAsyncNotify();
break;
case '3': // Close Complete (response to Close)
pgStream.receiveInteger4(); // len, discarded
if(RedshiftLogger.isEnable())
logger.log(LogLevel.DEBUG, " <=BE CloseComplete");
break;
case 'E':
// Error Response (response to pretty much everything; backend then skips until Sync)
SQLException newError = receiveErrorResponse(true);
if (error == null) {
error = newError;
} else {
error.setNextException(newError);
}
// keep processing
break;
case 'N': // Notice Response (warnings / info)
SQLWarning warning = receiveNoticeResponse();
addWarning(warning);
break;
case 'Z': // Ready For Query (eventual response to Sync)
receiveRFQ();
pendingExecuteQueue.clear(); // No more query executions expected.
endQuery = true;
break;
default:
throw new IOException("Unexpected packet type: " + c);
} // switch
}// while loop
// did we get an error during this query?
if (error != null) {
throw error;
}
}
protected void processResults(ResultHandler handler, int flags, int fetchSize, boolean subQueries, int maxRows) throws IOException {
processResults(handler, flags, fetchSize, subQueries, 0, maxRows);
}
protected void processResults(ResultHandler handler, int flags, int fetchSize, boolean subQueries, int initRowCount, int maxRows) throws IOException {
MessageLoopState msgLoopState = new MessageLoopState();
int[] rowCount = new int[1];
rowCount[0] = initRowCount;
// Process messages on the same application main thread.
processResultsOnThread(handler, flags, fetchSize, msgLoopState, subQueries, rowCount, maxRows);
}
private void processResultsOnThread(ResultHandler handler,
int flags, int fetchSize,
MessageLoopState msgLoopState,
boolean subQueries,
int[] rowCount,
int maxRows) throws IOException {
boolean noResults = (flags & QueryExecutor.QUERY_NO_RESULTS) != 0;
boolean bothRowsAndStatus = (flags & QueryExecutor.QUERY_BOTH_ROWS_AND_STATUS) != 0;
boolean useRingBuffer = enableFetchRingBuffer
&& (!handler.wantsScrollableResultSet()) // Scrollable cursor
&& (!subQueries) // Multiple results
&& (!bothRowsAndStatus); // RETURNING clause
List<Tuple> tuples = null;
int c;
boolean endQuery = false;
if (RedshiftLogger.isEnable()) {
logger.log(LogLevel.DEBUG, " useRingBuffer={0}, handler.wantsScrollableResultSet()={1}, subQueries={2}, bothRowsAndStatus={3}",
new Object[]{useRingBuffer, handler.wantsScrollableResultSet(), subQueries, bothRowsAndStatus});
}
while (!endQuery) {
c = pgStream.receiveChar();
switch (c) {
case 'A': // Asynchronous Notify
receiveAsyncNotify();
break;
case '1': // Parse Complete (response to Parse)
pgStream.receiveInteger4(); // len, discarded
SimpleQuery parsedQuery = pendingParseQueue.removeFirst();
String parsedStatementName = parsedQuery.getStatementName();
if(RedshiftLogger.isEnable())
logger.log(LogLevel.DEBUG, " <=BE ParseComplete [{0}]", parsedStatementName);
break;
case 't': { // ParameterDescription
pgStream.receiveInteger4(); // len, discarded
if(RedshiftLogger.isEnable())
logger.log(LogLevel.DEBUG, " <=BE ParameterDescription");
DescribeRequest describeData = pendingDescribeStatementQueue.getFirst();
SimpleQuery query = describeData.query;
SimpleParameterList params = describeData.parameterList;
boolean describeOnly = describeData.describeOnly;
// This might differ from query.getStatementName if the query was re-prepared
String origStatementName = describeData.statementName;
int numParams = pgStream.receiveInteger2();
for (int i = 1; i <= numParams; i++) {
int typeOid = pgStream.receiveInteger4();
params.setResolvedType(i, typeOid);
}
// Since we can issue multiple Parse and DescribeStatement
// messages in a single network trip, we need to make
// sure the describe results we requested are still
// applicable to the latest parsed query.
//
if ((origStatementName == null && query.getStatementName() == null)
|| (origStatementName != null
&& origStatementName.equals(query.getStatementName()))) {
query.setPrepareTypes(params.getTypeOIDs());
}
if (describeOnly) {
msgLoopState.doneAfterRowDescNoData = true;
} else {
pendingDescribeStatementQueue.removeFirst();
}
break;
}
case '2': // Bind Complete (response to Bind)
pgStream.receiveInteger4(); // len, discarded
Portal boundPortal = pendingBindQueue.removeFirst();
if(RedshiftLogger.isEnable())
logger.log(LogLevel.DEBUG, " <=BE BindComplete [{0}]", boundPortal);
registerOpenPortal(boundPortal);
break;
case '3': // Close Complete (response to Close)
pgStream.receiveInteger4(); // len, discarded
if(RedshiftLogger.isEnable())
logger.log(LogLevel.DEBUG, " <=BE CloseComplete");
break;
case 'n': // No Data (response to Describe)
pgStream.receiveInteger4(); // len, discarded
if(RedshiftLogger.isEnable())
logger.log(LogLevel.DEBUG, " <=BE NoData");
pendingDescribePortalQueue.removeFirst();
if (msgLoopState.doneAfterRowDescNoData) {
DescribeRequest describeData = pendingDescribeStatementQueue.removeFirst();
SimpleQuery currentQuery = describeData.query;
Field[] fields = currentQuery.getFields();
if (fields != null) { // There was a resultset.
tuples = new ArrayList<Tuple>();
handler.handleResultRows(currentQuery, fields, tuples, null, null, rowCount, null);
tuples = null;
msgLoopState.queueTuples = null;
}
}
break;
case 's': { // Portal Suspended (end of Execute)
// nb: this appears *instead* of CommandStatus.
// Must be a SELECT if we suspended, so don't worry about it.
pgStream.receiveInteger4(); // len, discarded
if(RedshiftLogger.isEnable())
logger.log(LogLevel.DEBUG, " <=BE PortalSuspended");
ExecuteRequest executeData = pendingExecuteQueue.removeFirst();
SimpleQuery currentQuery = executeData.query;
Portal currentPortal = executeData.portal;
Field[] fields = currentQuery.getFields();
if (fields != null
&& (tuples == null
&& msgLoopState.queueTuples == null)) {
// When no results expected, pretend an empty resultset was returned
// Not sure if new ArrayList can be always replaced with emptyList
tuples = noResults ? Collections.<Tuple>emptyList() : new ArrayList<Tuple>();
}
if (msgLoopState.queueTuples != null) {
// Mark end of result
try {
msgLoopState.queueTuples.checkAndAddEndOfRowsIndicator(currentPortal);
}
catch (InterruptedException ie) {
// Handle interrupted exception
handler.handleError(
new RedshiftException(GT.tr("Interrupted exception retrieving query results."),
RedshiftState.UNEXPECTED_ERROR, ie));
}
}
else
handler.handleResultRows(currentQuery, fields, tuples, currentPortal, null, rowCount, null);
tuples = null;
msgLoopState.queueTuples = null;
break;
}
case 'C': { // Command Status (end of Execute)
// Handle status.
String status = receiveCommandStatus();
if (isFlushCacheOnDeallocate()
&& (status.startsWith("DEALLOCATE ALL") || status.startsWith("DISCARD ALL"))) {
deallocateEpoch++;
}
msgLoopState.doneAfterRowDescNoData = false;
ExecuteRequest executeData = pendingExecuteQueue.peekFirst();
SimpleQuery currentQuery = executeData.query;
Portal currentPortal = executeData.portal;
String nativeSql = currentQuery.getNativeQuery().nativeSql;
// Certain backend versions (e.g. 12.2, 11.7, 10.12, 9.6.17, 9.5.21, etc)
// silently rollback the transaction in the response to COMMIT statement
// in case the transaction has failed.
// See discussion in pgsql-hackers: https://www.postgresql.org/message-id/b9fb50dc-0f6e-15fb-6555-8ddb86f4aa71%40postgresfriends.org
if (isRaiseExceptionOnSilentRollback()
&& handler.getException() == null
&& status.startsWith("ROLLBACK")) {
String message = null;
if (looksLikeCommit(nativeSql)) {
if (transactionFailCause == null) {
message = GT.tr("The database returned ROLLBACK, so the transaction cannot be committed. Transaction failure is not known (check server logs?)");
} else {
message = GT.tr("The database returned ROLLBACK, so the transaction cannot be committed. Transaction failure cause is <<{0}>>", transactionFailCause.getMessage());
}
} else if (looksLikePrepare(nativeSql)) {
if (transactionFailCause == null) {
message = GT.tr("The database returned ROLLBACK, so the transaction cannot be prepared. Transaction failure is not known (check server logs?)");
} else {
message = GT.tr("The database returned ROLLBACK, so the transaction cannot be prepared. Transaction failure cause is <<{0}>>", transactionFailCause.getMessage());
}
}
if (message != null) {
handler.handleError(
new RedshiftException(
message, RedshiftState.IN_FAILED_SQL_TRANSACTION, transactionFailCause));
}
}
if (status.startsWith("SET")) {
// Scan only the first 1024 characters to
// avoid big overhead for long queries.
if (nativeSql.lastIndexOf("search_path", 1024) != -1
&& !nativeSql.equals(lastSetSearchPathQuery)) {
// Search path was changed, invalidate prepared statement cache
lastSetSearchPathQuery = nativeSql;
deallocateEpoch++;
}
}
if (!executeData.asSimple) {
pendingExecuteQueue.removeFirst();
} else {
// For simple 'Q' queries, executeQueue is cleared via ReadyForQuery message
}
// we want to make sure we do not add any results from these queries to the result set
if (currentQuery == autoSaveQuery
|| currentQuery == releaseAutoSave) {
// ignore "SAVEPOINT" or RELEASE SAVEPOINT status from autosave query
break;
}
Field[] fields = currentQuery.getFields();
if (fields != null
&& (tuples == null
&& msgLoopState.queueTuples == null)) {
// When no results expected, pretend an empty resultset was returned
// Not sure if new ArrayList can be always replaced with emptyList
tuples = noResults ? Collections.<Tuple>emptyList() : new ArrayList<Tuple>();
}
// If we received tuples we must know the structure of the
// resultset, otherwise we won't be able to fetch columns
// from it, etc, later.
if (fields == null
&& (tuples != null
|| msgLoopState.queueTuples != null)) {
throw new IllegalStateException(
"Received resultset tuples, but no field structure for them");
}
if (fields != null
|| (tuples != null
|| msgLoopState.queueTuples != null)) {
// There was a resultset.
if (msgLoopState.queueTuples == null)
handler.handleResultRows(currentQuery, fields, tuples, null, null, rowCount, null);
else {
// Mark end of result
try {
msgLoopState.queueTuples.checkAndAddEndOfRowsIndicator();
} catch (InterruptedException ie) {
// Handle interrupted exception
handler.handleError(
new RedshiftException(GT.tr("Interrupted exception retrieving query results."),
RedshiftState.UNEXPECTED_ERROR, ie));
}
}
tuples = null;
msgLoopState.queueTuples = null;
rowCount = new int[1]; // Allocate for the next resultset
if (bothRowsAndStatus) {
interpretCommandStatus(status, handler);
}
} else {
interpretCommandStatus(status, handler);
}
if (executeData.asSimple) {
// Simple queries might return several resultsets, thus we clear
// fields, so queries like "select 1;update; select2" will properly
// identify that "update" did not return any results
currentQuery.setFields(null);
}
if (currentPortal != null) {
currentPortal.close();
}
break;
}
case 'D': // Data Transfer (ongoing Execute response)
boolean skipRow = false;
Tuple tuple = null;
try {
tuple = pgStream.receiveTupleV3();
} catch (OutOfMemoryError oome) {
if (!noResults) {
handler.handleError(
new RedshiftException(GT.tr("Ran out of memory retrieving query results."),
RedshiftState.OUT_OF_MEMORY, oome));
}
} catch (SQLException e) {
handler.handleError(e);
}
if (!noResults) {
if(rowCount != null) {
if(maxRows > 0 && rowCount[0] >= maxRows) {
// Ignore any more rows until server fix not to send more rows than max rows.
skipRow = true;
}
else
rowCount[0] += 1;
}
if (useRingBuffer) {
boolean firstRow = false;
if (msgLoopState.queueTuples == null) {
// i.e. First row
firstRow = true;
msgLoopState.queueTuples = new RedshiftRowsBlockingQueue<Tuple>(fetchSize, fetchRingBufferSize, logger);
}
// Add row in the queue
if(!skipRow) {
try {
msgLoopState.queueTuples.put(tuple);
} catch (InterruptedException ie) {
// Handle interrupted exception
handler.handleError(
new RedshiftException(GT.tr("Interrupted exception retrieving query results."),
RedshiftState.UNEXPECTED_ERROR, ie));
}
}
if(firstRow) {
// There was a resultset.
ExecuteRequest executeData = pendingExecuteQueue.peekFirst();
SimpleQuery currentQuery = executeData.query;
Field[] fields = currentQuery.getFields();
// Create a new ring buffer thread to process rows
m_ringBufferThread = new RingBufferThread(handler, flags, fetchSize, msgLoopState, subQueries, rowCount, maxRows);
handler.handleResultRows(currentQuery, fields, null, null, msgLoopState.queueTuples, rowCount, m_ringBufferThread);
if (RedshiftLogger.isEnable()) {
int length;
if (tuple == null) {
length = -1;
} else {
length = tuple.length();
}
logger.log(LogLevel.DEBUG, " <=BE DataRow(len={0})", length);
}
// Start the ring buffer thread
m_ringBufferThread.start();
// Return to break the message loop on the application thread
return;
}
else
if(m_ringBufferStopThread)
return; // Break the ring buffer thread loop
}
else {
if (tuples == null) {
tuples = new ArrayList<Tuple>();
}
if(!skipRow)
tuples.add(tuple);
}
}
if (RedshiftLogger.isEnable()) {
int length;
if (tuple == null) {
length = -1;
} else {
length = tuple.length();
}
logger.log(LogLevel.DEBUG, " <=BE DataRow(len={0})", length);
if (skipRow) {
logger.log(LogLevel.DEBUG, " skipRow={0}, rowCount = {1}, maxRows = {2}"
, skipRow, (rowCount!= null) ? rowCount[0] : 0, maxRows);
}
}
break;
case 'E':
// Error Response (response to pretty much everything; backend then skips until Sync)
SQLException error = receiveErrorResponse(false);
handler.handleError(error);
if (willHealViaReparse(error)) {
// prepared statement ... is not valid kind of error
// Technically speaking, the error is unexpected, thus we invalidate other
// server-prepared statements just in case.
deallocateEpoch++;
if (RedshiftLogger.isEnable()) {
logger.log(LogLevel.DEBUG, " FE: received {0}, will invalidate statements. deallocateEpoch is now {1}",
new Object[]{error.getSQLState(), deallocateEpoch});
}
}
// keep processing
break;
case 'I': { // Empty Query (end of Execute)
pgStream.receiveInteger4();
if(RedshiftLogger.isEnable())
logger.log(LogLevel.DEBUG, " <=BE EmptyQuery");
ExecuteRequest executeData = pendingExecuteQueue.removeFirst();
Portal currentPortal = executeData.portal;
handler.handleCommandStatus("EMPTY", 0, 0);
if (currentPortal != null) {
currentPortal.close();
}
break;
}
case 'N': // Notice Response
SQLWarning warning = receiveNoticeResponse();
handler.handleWarning(warning);
break;
case 'S': // Parameter Status
try {
receiveParameterStatus();
} catch (SQLException e) {
handler.handleError(e);
endQuery = true;
}
break;
case 'T': // Row Description (response to Describe)
Field[] fields = receiveFields(serverProtocolVersion);
tuples = new ArrayList<Tuple>();
SimpleQuery query = pendingDescribePortalQueue.peekFirst();
if (!pendingExecuteQueue.isEmpty() && !pendingExecuteQueue.peekFirst().asSimple) {
pendingDescribePortalQueue.removeFirst();
}
query.setFields(fields);
if (msgLoopState.doneAfterRowDescNoData) {
DescribeRequest describeData = pendingDescribeStatementQueue.removeFirst();
SimpleQuery currentQuery = describeData.query;
currentQuery.setFields(fields);
if (msgLoopState.queueTuples != null) {
// TODO: is this possible?
}
handler.handleResultRows(currentQuery, fields, tuples, null, null, rowCount, null);
tuples = null;
msgLoopState.queueTuples = null;
}
break;
case 'Z': // Ready For Query (eventual response to Sync)
receiveRFQ();
if (!pendingExecuteQueue.isEmpty() && pendingExecuteQueue.peekFirst().asSimple) {
if (msgLoopState.queueTuples != null) {
try {
msgLoopState.queueTuples.checkAndAddEndOfRowsIndicator();
} catch (InterruptedException ie) {
// Handle interrupted exception
handler.handleError(
new RedshiftException(GT.tr("Interrupted exception retrieving query results."),
RedshiftState.UNEXPECTED_ERROR, ie));
}
}
tuples = null;
msgLoopState.queueTuples = null;
pgStream.clearResultBufferCount();
ExecuteRequest executeRequest = pendingExecuteQueue.removeFirst();
// Simple queries might return several resultsets, thus we clear
// fields, so queries like "select 1;update; select2" will properly
// identify that "update" did not return any results
executeRequest.query.setFields(null);
pendingDescribePortalQueue.removeFirst();
if (!pendingExecuteQueue.isEmpty()) {
if (getTransactionState() == TransactionState.IDLE) {
handler.secureProgress();
}
// process subsequent results (e.g. for cases like batched execution of simple 'Q' queries)
break;
}
}
endQuery = true;
// Reset the statement name of Parses that failed.
while (!pendingParseQueue.isEmpty()) {
SimpleQuery failedQuery = pendingParseQueue.removeFirst();
failedQuery.unprepare();
}
pendingParseQueue.clear(); // No more ParseComplete messages expected.
// Pending "describe" requests might be there in case of error
// If that is the case, reset "described" status, so the statement is properly
// described on next execution
while (!pendingDescribeStatementQueue.isEmpty()) {
DescribeRequest request = pendingDescribeStatementQueue.removeFirst();
if(RedshiftLogger.isEnable())
logger.log(LogLevel.DEBUG, " FE marking setStatementDescribed(false) for query {0}", QuerySanitizer.filterCredentials(request.query.toString()));
request.query.setStatementDescribed(false);
}
while (!pendingDescribePortalQueue.isEmpty()) {
SimpleQuery describePortalQuery = pendingDescribePortalQueue.removeFirst();
if(RedshiftLogger.isEnable())
logger.log(LogLevel.DEBUG, " FE marking setPortalDescribed(false) for query {0}", QuerySanitizer.filterCredentials(describePortalQuery.toString()));
describePortalQuery.setPortalDescribed(false);
}
pendingBindQueue.clear(); // No more BindComplete messages expected.
pendingExecuteQueue.clear(); // No more query executions expected.
break;
case 'G': // CopyInResponse
if(RedshiftLogger.isEnable()) {
logger.log(LogLevel.DEBUG, " <=BE CopyInResponse");
logger.log(LogLevel.DEBUG, " FE=> CopyFail");
}
// COPY sub-protocol is not implemented yet
// We'll send a CopyFail message for COPY FROM STDIN so that
// server does not wait for the data.
byte[] buf = Utils.encodeUTF8(COPY_ERROR_MESSAGE);
pgStream.sendChar('f');
pgStream.sendInteger4(buf.length + 4 + 1);
pgStream.send(buf);
pgStream.sendChar(0);
pgStream.flush();
sendSync(true); // send sync message
skipMessage(); // skip the response message
break;
case 'H': // CopyOutResponse
if(RedshiftLogger.isEnable())
logger.log(LogLevel.DEBUG, " <=BE CopyOutResponse");
skipMessage();
// In case of CopyOutResponse, we cannot abort data transfer,
// so just throw an error and ignore CopyData messages
handler.handleError(
new RedshiftException(GT.tr(COPY_ERROR_MESSAGE),
RedshiftState.NOT_IMPLEMENTED));
break;
case 'c': // CopyDone
skipMessage();
if(RedshiftLogger.isEnable())
logger.log(LogLevel.DEBUG, " <=BE CopyDone");
break;
case 'd': // CopyData
skipMessage();
if(RedshiftLogger.isEnable())
logger.log(LogLevel.DEBUG, " <=BE CopyData");
break;
default:
throw new IOException("Unexpected packet type: " + c);
}
}
}
/**
* Ignore the response message by reading the message length and skipping over those bytes in the
* communication stream.
*/
void skipMessage() throws IOException {
int len = pgStream.receiveInteger4();
assert len >= 4 : "Length from skip message must be at least 4 ";
// skip len-4 (length includes the 4 bytes for message length itself
pgStream.skip(len - 4);
}
public void fetch(ResultCursor cursor, ResultHandler handler, int fetchSize, int initRowCount)
throws SQLException {
// Wait for current ring buffer thread to finish, if any.
// Shouldn't call from synchronized method, which can cause dead-lock.
waitForRingBufferThreadToFinish(false, false, false, null, null);
synchronized(this) {
waitOnLock();
try {
m_executingLock.lock();
final Portal portal = (Portal) cursor;
// Insert a ResultHandler that turns bare command statuses into empty datasets
// (if the fetch returns no rows, we see just a CommandStatus..)
final ResultHandler delegateHandler = handler;
handler = new ResultHandlerDelegate(delegateHandler) {
@Override
public void handleCommandStatus(String status, long updateCount, long insertOID) {
handleResultRows(portal.getQuery(), null, new ArrayList<Tuple>(), null, null, null, null);
}
};
// Now actually run it.
try {
processDeadParsedQueries();
processDeadPortals();
sendExecute(portal.getQuery(), portal, fetchSize);
sendFlush();
sendSync(true);
processResults(handler, 0, fetchSize, (portal.getQuery().getSubqueries() != null), initRowCount);
estimatedReceiveBufferBytes = 0;
} catch (IOException e) {
abort();
handler.handleError(
new RedshiftException(GT.tr("An I/O error occurred while sending to the backend."),
RedshiftState.CONNECTION_FAILURE, e));
}
handler.handleCompletion();
}
finally {
m_executingLock.unlock();
}
} // synchronized
}
/*
* Receive the field descriptions from the back end.
*/
private Field[] receiveFields(int serverProtocolVersion) throws IOException {
pgStream.receiveInteger4(); // MESSAGE SIZE
int size = pgStream.receiveInteger2();
Field[] fields = new Field[size];
if (RedshiftLogger.isEnable()) {
logger.log(LogLevel.DEBUG, " <=BE RowDescription({0})", size);
}
for (int i = 0; i < fields.length; i++) {
String columnLabel = pgStream.receiveString();
int tableOid = pgStream.receiveInteger4();
short positionInTable = (short) pgStream.receiveInteger2();
int typeOid = pgStream.receiveInteger4();
int typeLength = pgStream.receiveInteger2();
int typeModifier = pgStream.receiveInteger4();
int formatType = pgStream.receiveInteger2();
fields[i] = new Field(columnLabel,
typeOid, typeLength, typeModifier, tableOid, positionInTable);
fields[i].setFormat(formatType);
if (serverProtocolVersion >= ConnectionFactoryImpl.EXTENDED_RESULT_METADATA_SERVER_PROTOCOL_VERSION) {
// Read extended resultset metadata
String schemaName = pgStream.receiveString();
String tableName = pgStream.receiveString();
String columnName = pgStream.receiveString();
String catalogName = pgStream.receiveString();
int temp = pgStream.receiveInteger2();
int nullable = temp & 0x1;
int autoincrement = (temp >> 4) & 0x1;
int readOnly = (temp >> 8) & 0x1;
int searchable = (temp >> 12) & 0x1;
int caseSensitive = 0;
if (serverProtocolVersion >= ConnectionFactoryImpl.EXTENDED2_RESULT_METADATA_SERVER_PROTOCOL_VERSION) {
caseSensitive = (temp >> 1) & 0x1;
}
fields[i].setMetadata(new FieldMetadata(columnName,
tableName,
schemaName,
(nullable == 1) ? ResultSetMetaData.columnNoNulls
: ResultSetMetaData.columnNullable,
(autoincrement != 0),
catalogName,
(readOnly != 0),
(searchable != 0),
(caseSensitive != 0)
));
}
if(RedshiftLogger.isEnable())
logger.log(LogLevel.DEBUG, " {0}", fields[i]);
}
return fields;
}
void receiveAsyncNotify() throws IOException {
int len = pgStream.receiveInteger4(); // MESSAGE SIZE
assert len > 4 : "Length for AsyncNotify must be at least 4";
int pid = pgStream.receiveInteger4();
String msg = pgStream.receiveString();
String param = pgStream.receiveString();
addNotification(new com.amazon.redshift.core.Notification(msg, pid, param));
if (RedshiftLogger.isEnable()) {
logger.log(LogLevel.DEBUG, " <=BE AsyncNotify({0},{1},{2})", new Object[]{pid, msg, param});
}
}
SQLException receiveErrorResponse(boolean calledFromClose) throws IOException {
// it's possible to get more than one error message for a query
// see libpq comments wrt backend closing a connection
// so, append messages to a string buffer and keep processing
// check at the bottom to see if we need to throw an exception
int elen = pgStream.receiveInteger4();
assert elen > 4 : "Error response length must be greater than 4";
EncodingPredictor.DecodeResult totalMessage = pgStream.receiveErrorString(elen - 4);
ServerErrorMessage errorMsg = new ServerErrorMessage(totalMessage);
if (RedshiftLogger.isEnable()) {
logger.log(LogLevel.DEBUG, " <=BE ErrorMessage({0})", errorMsg.toString());
}
RedshiftException error = new RedshiftException(errorMsg, this.logServerErrorDetail);
if(!calledFromClose) {
if (transactionFailCause == null) {
transactionFailCause = error;
} else {
error.initCause(transactionFailCause);
}
}
return error;
}
SQLWarning receiveNoticeResponse() throws IOException {
int nlen = pgStream.receiveInteger4();
assert nlen > 4 : "Notice Response length must be greater than 4";
ServerErrorMessage warnMsg = new ServerErrorMessage(pgStream.receiveString(nlen - 4));
if (RedshiftLogger.isEnable()) {
logger.log(LogLevel.DEBUG, " <=BE NoticeResponse({0})", warnMsg.toString());
}
return new RedshiftWarning(warnMsg);
}
String receiveCommandStatus() throws IOException {
// TODO: better handle the msg len
int len = pgStream.receiveInteger4();
// read len -5 bytes (-4 for len and -1 for trailing \0)
String status = pgStream.receiveString(len - 5);
// now read and discard the trailing \0
pgStream.receiveChar(); // Receive(1) would allocate new byte[1], so avoid it
if(RedshiftLogger.isEnable())
logger.log(LogLevel.DEBUG, " <=BE CommandStatus({0})", status);
return status;
}
private void interpretCommandStatus(String status, ResultHandler handler) {
try {
commandCompleteParser.parse(status);
} catch (SQLException e) {
handler.handleError(e);
return;
}
long oid = commandCompleteParser.getOid();
long count = commandCompleteParser.getRows();
handler.handleCommandStatus(status, count, oid);
}
void receiveRFQ() throws IOException {
if (pgStream.receiveInteger4() != 5) {
throw new IOException("unexpected length of ReadyForQuery message");
}
char tStatus = (char) pgStream.receiveChar();
if (RedshiftLogger.isEnable()) {
logger.log(LogLevel.DEBUG, " <=BE ReadyForQuery({0})", tStatus);
}
// Update connection state.
switch (tStatus) {
case 'I':
transactionFailCause = null;
setTransactionState(TransactionState.IDLE);
break;
case 'T':
transactionFailCause = null;
setTransactionState(TransactionState.OPEN);
break;
case 'E':
setTransactionState(TransactionState.FAILED);
break;
default:
throw new IOException(
"unexpected transaction state in ReadyForQuery message: " + (int) tStatus);
}
}
@Override
protected void sendCloseMessage() throws IOException {
// Wait for current ring buffer thread to finish, if any.
waitForRingBufferThreadToFinish(true, false, false, null, null);
pgStream.sendChar('X');
pgStream.sendInteger4(4);
}
public void readStartupMessages() throws IOException, SQLException {
for (int i = 0; i < 1000; i++) {
int beresp = pgStream.receiveChar();
switch (beresp) {
case 'Z':
receiveRFQ();
// Ready For Query; we're done.
return;
case 'K':
// BackendKeyData
int msgLen = pgStream.receiveInteger4();
if (msgLen != 12) {
throw new RedshiftException(GT.tr("Protocol error. Session setup failed."),
RedshiftState.PROTOCOL_VIOLATION);
}
int pid = pgStream.receiveInteger4();
int ckey = pgStream.receiveInteger4();
if (RedshiftLogger.isEnable()) {
logger.log(LogLevel.DEBUG, " <=BE BackendKeyData(pid={0},ckey={1})", new Object[]{pid, ckey});
}
setBackendKeyData(pid, ckey);
break;
case 'E':
// Error
throw receiveErrorResponse(false);
case 'N':
// Warning
addWarning(receiveNoticeResponse());
break;
case 'S':
// ParameterStatus
receiveParameterStatus();
break;
default:
if (RedshiftLogger.isEnable()) {
logger.log(LogLevel.DEBUG, " invalid message type={0}", (char) beresp);
}
throw new RedshiftException(GT.tr("Protocol error. Session setup failed."),
RedshiftState.PROTOCOL_VIOLATION);
}
}
throw new RedshiftException(GT.tr("Protocol error. Session setup failed."),
RedshiftState.PROTOCOL_VIOLATION);
}
public void receiveParameterStatus() throws IOException, SQLException {
// ParameterStatus
pgStream.receiveInteger4(); // MESSAGE SIZE
String name = pgStream.receiveString();
String value = pgStream.receiveString();
if (RedshiftLogger.isEnable()) {
logger.log(LogLevel.DEBUG, " <=BE ParameterStatus({0} = {1})", new Object[]{name, value});
}
/* Update client-visible parameter status map for getParameterStatuses() */
if (name != null && !name.equals("")) {
onParameterStatus(name, value);
}
if (name.equals("client_encoding")) {
if (allowEncodingChanges) {
if (!value.equalsIgnoreCase("UTF8") && !value.equalsIgnoreCase("UTF-8")) {
if(RedshiftLogger.isEnable())
logger.log(LogLevel.DEBUG,
"Redshift jdbc expects client_encoding to be UTF8 for proper operation. Actual encoding is {0}",
value);
}
pgStream.setEncoding(Encoding.getDatabaseEncoding(value, logger));
} else if (!value.equalsIgnoreCase("UTF8") && !value.equalsIgnoreCase("UTF-8")) {
close(); // we're screwed now; we can't trust any subsequent string.
throw new RedshiftException(GT.tr(
"The server''s client_encoding parameter was changed to {0}. The JDBC driver requires client_encoding to be UTF8 for correct operation.",
value), RedshiftState.CONNECTION_FAILURE);
}
}
if (name.equals("DateStyle") && !value.startsWith("ISO")
&& !value.toUpperCase().startsWith("ISO")) {
close(); // we're screwed now; we can't trust any subsequent date.
throw new RedshiftException(GT.tr(
"The server''s DateStyle parameter was changed to {0}. The JDBC driver requires DateStyle to begin with ISO for correct operation.",
value), RedshiftState.CONNECTION_FAILURE);
}
if (name.equals("standard_conforming_strings")) {
if (value.equals("on")) {
setStandardConformingStrings(true);
} else if (value.equals("off")) {
setStandardConformingStrings(false);
} else {
close();
// we're screwed now; we don't know how to escape string literals
throw new RedshiftException(GT.tr(
"The server''s standard_conforming_strings parameter was reported as {0}. The JDBC driver expected on or off.",
value), RedshiftState.CONNECTION_FAILURE);
}
return;
}
if ("TimeZone".equals(name)) {
setTimeZone(TimestampUtils.parseBackendTimeZone(value));
} else if ("application_name".equals(name)) {
setApplicationName(value);
} else if ("server_version_num".equals(name)) {
setServerVersionNum(Integer.parseInt(value));
} else if ("server_version".equals(name)) {
setServerVersion(value);
} else if ("server_protocol_version".equals(name)) {
setServerProtocolVersion(value);
} else if ("integer_datetimes".equals(name)) {
if ("on".equals(value)) {
setIntegerDateTimes(true);
} else if ("off".equals(value)) {
setIntegerDateTimes(false);
} else {
throw new RedshiftException(GT.tr("Protocol error. Session setup failed."),
RedshiftState.PROTOCOL_VIOLATION);
}
}
else if ("datashare_enabled".equals(name)) {
if ("on".equals(value)) {
setDatashareEnabled(true);
}
else if ("off".equals(value)) {
setDatashareEnabled(false);
}
else {
throw new RedshiftException(GT.tr("Protocol error. Session setup failed. Invalid value of datashare_enabled parameter. Only on/off are valid values"),
RedshiftState.PROTOCOL_VIOLATION);
}
} // enable_redshift_federation
}
public void setTimeZone(TimeZone timeZone) {
this.timeZone = timeZone;
}
public TimeZone getTimeZone() {
return timeZone;
}
public void setApplicationName(String applicationName) {
this.applicationName = applicationName;
}
public String getApplicationName() {
if (applicationName == null) {
return "";
}
return applicationName;
}
@Override
public ReplicationProtocol getReplicationProtocol() {
return replicationProtocol;
}
@Override
public boolean useBinaryForReceive(int oid) {
return useBinaryReceiveForOids.contains(oid);
}
@Override
public void setBinaryReceiveOids(Set<Integer> oids) {
useBinaryReceiveForOids.clear();
useBinaryReceiveForOids.addAll(oids);
}
@Override
public boolean useBinaryForSend(int oid) {
return useBinarySendForOids.contains(oid);
}
@Override
public void setBinarySendOids(Set<Integer> oids) {
useBinarySendForOids.clear();
useBinarySendForOids.addAll(oids);
}
private void setIntegerDateTimes(boolean state) {
integerDateTimes = state;
}
public boolean getIntegerDateTimes() {
return integerDateTimes;
}
/**
* Wait for ring buffer thread to finish.
*
* @param calledFromConnectionClose true, if it called from connection.close(), false otherwise.
* @param calledFromResultsetClose true, if it called from resultset.close(), false otherwise.
* @param queueRows the blocking queue rows
* @param ringBufferThread the thread manage the blocking queue
*/
public void waitForRingBufferThreadToFinish(boolean calledFromConnectionClose,
boolean calledFromResultsetClose,
boolean calledFromStatementClose,
RedshiftRowsBlockingQueue<Tuple> queueRows,
Thread ringBufferThread)
{
synchronized(m_ringBufferThreadLock) {
try {
m_executingLock.lock();
// Wait for full read of any executing command
if(m_ringBufferThread != null)
{
try
{
if(calledFromConnectionClose)
{
// Interrupt the current thread
m_ringBufferStopThread = true;
m_ringBufferThread.interrupt();
return;
}
else
if (calledFromResultsetClose)
{
// Drain results from the socket
if (queueRows != null)
queueRows.setSkipRows();
// Wait for thread associated with result to terminate.
if (ringBufferThread != null) {
ringBufferThread.join();
}
if (queueRows != null)
queueRows.close();
}
else if(calledFromStatementClose)
{
// Drain results from the socket
if (queueRows != null)
queueRows.setSkipRows();
m_ringBufferThread.join();
}
else {
// Application is trying to execute another SQL on same connection.
// Wait for current thread to terminate.
m_ringBufferThread.join(); // joinWaitTime
}
}
catch(Throwable th)
{
// Ignore it
}
}
else {
// Buffer thread is terminated.
if (queueRows != null && calledFromResultsetClose)
queueRows.close();
}
}
finally {
m_executingLock.unlock();
}
}
}
private final Deque<SimpleQuery> pendingParseQueue = new ArrayDeque<SimpleQuery>();
private final Deque<Portal> pendingBindQueue = new ArrayDeque<Portal>();
private final Deque<ExecuteRequest> pendingExecuteQueue = new ArrayDeque<ExecuteRequest>();
private final Deque<DescribeRequest> pendingDescribeStatementQueue =
new ArrayDeque<DescribeRequest>();
private final Deque<SimpleQuery> pendingDescribePortalQueue = new ArrayDeque<SimpleQuery>();
private long nextUniqueID = 1;
private final boolean allowEncodingChanges;
private final boolean cleanupSavePoints;
/**
* <p>The estimated server response size since we last consumed the input stream from the server, in
* bytes.</p>
*
* <p>Starts at zero, reset by every Sync message. Mainly used for batches.</p>
*
* <p>Used to avoid deadlocks, see MAX_BUFFERED_RECV_BYTES.</p>
*/
private int estimatedReceiveBufferBytes = 0;
private final SimpleQuery beginTransactionQuery =
new SimpleQuery(
new NativeQuery("BEGIN", new int[0], false, SqlCommand.BLANK),
null, false, logger);
private final SimpleQuery beginReadOnlyTransactionQuery =
new SimpleQuery(
new NativeQuery("BEGIN READ ONLY", new int[0], false, SqlCommand.BLANK),
null, false, logger);
private final SimpleQuery emptyQuery =
new SimpleQuery(
new NativeQuery("", new int[0], false,
SqlCommand.createStatementTypeInfo(SqlCommandType.BLANK)
), null, false, logger);
private final SimpleQuery autoSaveQuery =
new SimpleQuery(
new NativeQuery("SAVEPOINT RSJDBC_AUTOSAVE", new int[0], false, SqlCommand.BLANK),
null, false, logger);
private final SimpleQuery releaseAutoSave =
new SimpleQuery(
new NativeQuery("RELEASE SAVEPOINT RSJDBC_AUTOSAVE", new int[0], false, SqlCommand.BLANK),
null, false, logger);
/*
In autosave mode we use this query to roll back errored transactions
*/
private final SimpleQuery restoreToAutoSave =
new SimpleQuery(
new NativeQuery("ROLLBACK TO SAVEPOINT RSJDBC_AUTOSAVE", new int[0], false, SqlCommand.BLANK),
null, false, logger);
/**
* Ring Buffer thread to call message loop.
* It's an inner class because it can access outer class member vars.
*
* @author igarish
*
*/
private class RingBufferThread extends Thread
{
// Message loop state
ResultHandler handler;
int flags;
int fetchSize;
MessageLoopState msgLoopState;
boolean subQueries;
int[] rowCount;
int maxRows;
/**
* Constructor
*
* @param msgLoopState
*/
public RingBufferThread(ResultHandler handler,
int flags, int fetchSize,
MessageLoopState msgLoopState,
boolean subQueries,
int[] rowCount,
int maxRows)
{
super("RingBufferThread");
this.handler = handler;
this.flags = flags;
this.fetchSize = fetchSize;
this.msgLoopState = msgLoopState;
this.subQueries = subQueries;
this.rowCount = rowCount;
this.maxRows = maxRows;
}
/**
* Run the thread
*/
@Override
public void run()
{
// TODO: Do we have to synchronize on this?
try
{
// Process result
processResultsOnThread(handler, flags, fetchSize, msgLoopState, subQueries, rowCount, maxRows);
}
catch(Exception ex)
{
if(m_ringBufferStopThread) {
// Clear the interrupted flag
Thread.currentThread().interrupted();
// Close the queue
if (this.msgLoopState.queueTuples != null)
this.msgLoopState.queueTuples.close();
}
else {
// Add end-of-result marker
if (this.msgLoopState.queueTuples != null) {
try {
this.msgLoopState.queueTuples.checkAndAddEndOfRowsIndicator();
} catch (Exception e) {
// Ignore
}
}
// Handle exception
handler.handleError(
new RedshiftException(GT.tr("Exception retrieving query results."),
RedshiftState.UNEXPECTED_ERROR, ex));
}
}
finally
{
// Add end-of-result marker
if (this.msgLoopState.queueTuples != null) {
try {
this.msgLoopState.queueTuples.setHandlerException(handler.getException());
this.msgLoopState.queueTuples.checkAndAddEndOfRowsIndicator();
} catch (Exception e) {
// Ignore
}
}
handler.setStatementStateIdleFromInQuery();
// Reset vars
this.msgLoopState.queueTuples = null;
this.msgLoopState = null;
this.handler = null;
m_ringBufferStopThread = false;
m_ringBufferThread = null;
}
}
} // RingBufferThread
}
| 8,382 |
0 | Create_ds/amazon-redshift-jdbc-driver/src/main/java/com/amazon/redshift/core | Create_ds/amazon-redshift-jdbc-driver/src/main/java/com/amazon/redshift/core/v3/SimpleParameterList.java | /*
* Copyright (c) 2004, PostgreSQL Global Development Group
* See the LICENSE file in the project root for more information.
*/
// Copyright (c) 2004, Open Cloud Limited.
package com.amazon.redshift.core.v3;
import com.amazon.redshift.core.Oid;
import com.amazon.redshift.core.RedshiftStream;
import com.amazon.redshift.core.ParameterList;
import com.amazon.redshift.core.Utils;
import com.amazon.redshift.geometric.RedshiftBox;
import com.amazon.redshift.geometric.RedshiftPoint;
import com.amazon.redshift.jdbc.UUIDArrayAssistant;
import com.amazon.redshift.util.ByteConverter;
import com.amazon.redshift.util.ByteStreamWriter;
import com.amazon.redshift.util.GT;
import com.amazon.redshift.util.RedshiftException;
import com.amazon.redshift.util.RedshiftState;
import com.amazon.redshift.util.StreamWrapper;
import java.io.IOException;
import java.io.InputStream;
import java.sql.SQLException;
import java.util.Arrays;
/**
* Parameter list for a single-statement V3 query.
*
* @author Oliver Jowett (oliver@opencloud.com)
*/
class SimpleParameterList implements V3ParameterList {
private static final byte IN = 1;
private static final byte OUT = 2;
private static final byte INOUT = IN | OUT;
private static final byte TEXT = 0;
private static final byte BINARY = 4;
SimpleParameterList(int paramCount, TypeTransferModeRegistry transferModeRegistry) {
this(paramCount, transferModeRegistry, null);
}
SimpleParameterList(int paramCount, TypeTransferModeRegistry transferModeRegistry,
int[] redshiftParamMarkers) {
this.paramValues = new Object[paramCount];
this.paramTypes = new int[paramCount];
this.encoded = new byte[paramCount][];
this.flags = new byte[paramCount];
this.transferModeRegistry = transferModeRegistry;
this.redshiftParamMarkers = redshiftParamMarkers;
}
@Override
public void registerOutParameter(int index, int sqlType) throws SQLException {
if (index < 1 || index > paramValues.length) {
throw new RedshiftException(
GT.tr("The column index is out of range: {0}, number of columns: {1}.",
index, paramValues.length),
RedshiftState.INVALID_PARAMETER_VALUE);
}
flags[index - 1] |= OUT;
}
private void bind(int index, Object value, int oid, byte binary) throws SQLException {
if (index < 1 || index > paramValues.length) {
throw new RedshiftException(
GT.tr("The column index is out of range: {0}, number of columns: {1}.",
index, paramValues.length),
RedshiftState.INVALID_PARAMETER_VALUE);
}
--index;
encoded[index] = null;
paramValues[index] = value;
flags[index] = (byte) (direction(index) | IN | binary);
// If we are setting something to an UNSPECIFIED NULL, don't overwrite
// our existing type for it. We don't need the correct type info to
// send this value, and we don't want to overwrite and require a
// reparse.
if (oid == Oid.UNSPECIFIED && paramTypes[index] != Oid.UNSPECIFIED && value == NULL_OBJECT) {
return;
}
paramTypes[index] = oid;
pos = index + 1;
}
public int getParameterCount() {
return paramValues.length;
}
public int getOutParameterCount() {
int count = 0;
for (int i = 0; i < paramTypes.length; i++) {
if ((direction(i) & OUT) == OUT) {
count++;
}
}
// Every function has at least one output.
if (count == 0) {
count = 1;
}
return count;
}
public int getInParameterCount() {
int count = 0;
for (int i = 0; i < paramTypes.length; i++) {
if (direction(i) != OUT) {
count++;
}
}
return count;
}
public void setIntParameter(int index, int value) throws SQLException {
byte[] data = new byte[4];
ByteConverter.int4(data, 0, value);
bind(index, data, Oid.INT4, BINARY);
}
public void setLiteralParameter(int index, String value, int oid) throws SQLException {
bind(index, value, oid, TEXT);
}
public void setStringParameter(int index, String value, int oid) throws SQLException {
bind(index, value, oid, TEXT);
}
public void setBinaryParameter(int index, byte[] value, int oid) throws SQLException {
bind(index, value, oid, BINARY);
}
@Override
public void setBytea(int index, byte[] data, int offset, int length) throws SQLException {
bind(index, new StreamWrapper(data, offset, length), Oid.BYTEA, BINARY);
}
@Override
public void setBytea(int index, InputStream stream, int length) throws SQLException {
bind(index, new StreamWrapper(stream, length), Oid.BYTEA, BINARY);
}
@Override
public void setBytea(int index, InputStream stream) throws SQLException {
bind(index, new StreamWrapper(stream), Oid.BYTEA, BINARY);
}
@Override
public void setBytea(int index, ByteStreamWriter writer) throws SQLException {
bind(index, writer, Oid.BYTEA, BINARY);
}
@Override
public void setVarbyte(int index, byte[] data, int offset, int length) throws SQLException {
bind(index, new StreamWrapper(data, offset, length), Oid.VARBYTE, BINARY);
}
@Override
public void setGeography(int index, byte[] data, int offset, int length) throws SQLException {
bind(index, new StreamWrapper(data, offset, length), Oid.GEOGRAPHY, BINARY);
}
@Override
public void setText(int index, InputStream stream) throws SQLException {
bind(index, new StreamWrapper(stream), Oid.TEXT, TEXT);
}
@Override
public void setNull(int index, int oid) throws SQLException {
byte binaryTransfer = TEXT;
if (transferModeRegistry.useBinaryForReceive(oid)) {
binaryTransfer = BINARY;
}
bind(index, NULL_OBJECT, oid, binaryTransfer);
}
@Override
public String toString(int index, boolean standardConformingStrings) {
--index;
if (paramValues[index] == null) {
return "?";
} else if (paramValues[index] == NULL_OBJECT) {
return "NULL";
} else if ((flags[index] & BINARY) == BINARY) {
// handle some of the numeric types
switch (paramTypes[index]) {
case Oid.INT2:
short s = ByteConverter.int2((byte[]) paramValues[index], 0);
return Short.toString(s);
case Oid.INT4:
int i = ByteConverter.int4((byte[]) paramValues[index], 0);
return Integer.toString(i);
case Oid.INT8:
long l = ByteConverter.int8((byte[]) paramValues[index], 0);
return Long.toString(l);
case Oid.FLOAT4:
float f = ByteConverter.float4((byte[]) paramValues[index], 0);
if (Float.isNaN(f)) {
return "'NaN'::real";
}
return Float.toString(f);
case Oid.FLOAT8:
double d = ByteConverter.float8((byte[]) paramValues[index], 0);
if (Double.isNaN(d)) {
return "'NaN'::double precision";
}
return Double.toString(d);
case Oid.UUID:
String uuid =
new UUIDArrayAssistant().buildElement((byte[]) paramValues[index], 0, 16).toString();
return "'" + uuid + "'::uuid";
case Oid.POINT:
RedshiftPoint pgPoint = new RedshiftPoint();
pgPoint.setByteValue((byte[]) paramValues[index], 0);
return "'" + pgPoint.toString() + "'::point";
case Oid.BOX:
RedshiftBox pgBox = new RedshiftBox();
pgBox.setByteValue((byte[]) paramValues[index], 0);
return "'" + pgBox.toString() + "'::box";
}
return "?";
} else {
String param = paramValues[index].toString();
// add room for quotes + potential escaping.
StringBuilder p = new StringBuilder(3 + (param.length() + 10) / 10 * 11);
// No E'..' here since escapeLiteral escapes all things and it does not use \123 kind of
// escape codes
p.append('\'');
try {
p = Utils.escapeLiteral(p, param, standardConformingStrings);
} catch (SQLException sqle) {
// This should only happen if we have an embedded null
// and there's not much we can do if we do hit one.
//
// The goal of toString isn't to be sent to the server,
// so we aren't 100% accurate (see StreamWrapper), put
// the unescaped version of the data.
//
p.append(param);
}
p.append('\'');
int paramType = paramTypes[index];
if (paramType == Oid.TIMESTAMP) {
p.append("::timestamp");
} else if (paramType == Oid.TIMESTAMPTZ) {
p.append("::timestamp with time zone");
} else if (paramType == Oid.TIME) {
p.append("::time");
} else if (paramType == Oid.TIMETZ) {
p.append("::time with time zone");
} else if (paramType == Oid.DATE) {
p.append("::date");
} else if (paramType == Oid.INTERVAL) {
p.append("::interval");
} else if (paramType == Oid.INTERVALY2M) {
p.append("::interval year to month");
} else if (paramType == Oid.INTERVALD2S) {
p.append("::interval day to second");
} else if (paramType == Oid.NUMERIC) {
p.append("::numeric");
}
return p.toString();
}
}
@Override
public void checkAllParametersSet() throws SQLException {
for (int i = 0; i < paramTypes.length; ++i) {
if (direction(i) != OUT && paramValues[i] == null) {
throw new RedshiftException(GT.tr("No value specified for parameter {0}.", i + 1),
RedshiftState.INVALID_PARAMETER_VALUE);
}
}
// Check for server parameter marker binding positions
if (redshiftParamMarkers != null
&& redshiftParamMarkers.length > 0) {
for(int i = 0; i < redshiftParamMarkers.length; i++) {
int paramIndex = redshiftParamMarkers[i] - 1;
if (
paramIndex >= paramTypes.length
||
(direction(paramIndex) != OUT
&& paramIndex < paramValues.length
&& paramValues[paramIndex] == null)) {
throw new RedshiftException(GT.tr("Not all parameters have been populated. No value specified for parameter {0}.", paramIndex + 1),
RedshiftState.INVALID_PARAMETER_VALUE);
}
}
}
}
@Override
public void convertFunctionOutParameters() {
for (int i = 0; i < paramTypes.length; ++i) {
if (direction(i) == OUT) {
paramTypes[i] = Oid.VOID;
paramValues[i] = "null";
}
}
}
//
// bytea helper
//
private static void streamBytea(RedshiftStream pgStream, StreamWrapper wrapper) throws IOException {
byte[] rawData = wrapper.getBytes();
if (rawData != null) {
pgStream.send(rawData, wrapper.getOffset(), wrapper.getLength());
return;
}
pgStream.sendStream(wrapper.getStream(), wrapper.getLength());
}
//
// byte stream writer support
//
private static void streamBytea(RedshiftStream pgStream, ByteStreamWriter writer) throws IOException {
pgStream.send(writer);
}
public int[] getTypeOIDs() {
return paramTypes;
}
//
// Package-private V3 accessors
//
int getTypeOID(int index) {
return paramTypes[index - 1];
}
boolean hasUnresolvedTypes() {
for (int paramType : paramTypes) {
if (paramType == Oid.UNSPECIFIED) {
return true;
}
}
return false;
}
void setResolvedType(int index, int oid) {
// only allow overwriting an unknown value
if (paramTypes[index - 1] == Oid.UNSPECIFIED) {
paramTypes[index - 1] = oid;
} else if (paramTypes[index - 1] != oid) {
throw new IllegalArgumentException("Can't change resolved type for param: " + index + " from "
+ paramTypes[index - 1] + " to " + oid);
}
}
boolean isNull(int index) {
return (paramValues[index - 1] == NULL_OBJECT);
}
boolean isBinary(int index) {
return (flags[index - 1] & BINARY) != 0;
}
private byte direction(int index) {
return (byte) (flags[index] & INOUT);
}
int getV3Length(int index) {
--index;
// Null?
if (paramValues[index] == NULL_OBJECT) {
throw new IllegalArgumentException("can't getV3Length() on a null parameter");
}
// Directly encoded?
if (paramValues[index] instanceof byte[]) {
return ((byte[]) paramValues[index]).length;
}
// Binary-format bytea?
if (paramValues[index] instanceof StreamWrapper) {
return ((StreamWrapper) paramValues[index]).getLength();
}
// Binary-format bytea?
if (paramValues[index] instanceof ByteStreamWriter) {
return ((ByteStreamWriter) paramValues[index]).getLength();
}
// Already encoded?
if (encoded[index] == null) {
// Encode value and compute actual length using UTF-8.
encoded[index] = Utils.encodeUTF8(paramValues[index].toString());
}
return encoded[index].length;
}
void writeV3Value(int index, RedshiftStream pgStream) throws IOException {
--index;
// Null?
if (paramValues[index] == NULL_OBJECT) {
throw new IllegalArgumentException("can't writeV3Value() on a null parameter");
}
// Directly encoded?
if (paramValues[index] instanceof byte[]) {
pgStream.send((byte[]) paramValues[index]);
return;
}
// Binary-format bytea?
if (paramValues[index] instanceof StreamWrapper) {
streamBytea(pgStream, (StreamWrapper) paramValues[index]);
return;
}
// Streamed bytea?
if (paramValues[index] instanceof ByteStreamWriter) {
streamBytea(pgStream, (ByteStreamWriter) paramValues[index]);
return;
}
// Encoded string.
if (encoded[index] == null) {
encoded[index] = Utils.encodeUTF8((String) paramValues[index]);
}
pgStream.send(encoded[index]);
}
public ParameterList copy() {
SimpleParameterList newCopy = new SimpleParameterList(paramValues.length, transferModeRegistry);
System.arraycopy(paramValues, 0, newCopy.paramValues, 0, paramValues.length);
System.arraycopy(paramTypes, 0, newCopy.paramTypes, 0, paramTypes.length);
System.arraycopy(flags, 0, newCopy.flags, 0, flags.length);
newCopy.pos = pos;
return newCopy;
}
public void clear() {
Arrays.fill(paramValues, null);
Arrays.fill(paramTypes, 0);
Arrays.fill(encoded, null);
Arrays.fill(flags, (byte) 0);
pos = 0;
}
public SimpleParameterList[] getSubparams() {
return null;
}
public Object[] getValues() {
return paramValues;
}
public int[] getParamTypes() {
return paramTypes;
}
public byte[] getFlags() {
return flags;
}
public byte[][] getEncoding() {
return encoded;
}
@Override
public void appendAll(ParameterList list) throws SQLException {
if (list instanceof com.amazon.redshift.core.v3.SimpleParameterList ) {
/* only v3.SimpleParameterList is compatible with this type
we need to create copies of our parameters, otherwise the values can be changed */
SimpleParameterList spl = (SimpleParameterList) list;
int inParamCount = spl.getInParameterCount();
if ((pos + inParamCount) > paramValues.length) {
throw new RedshiftException(
GT.tr("Added parameters index out of range: {0}, number of columns: {1}.",
(pos + inParamCount), paramValues.length),
RedshiftState.INVALID_PARAMETER_VALUE);
}
System.arraycopy(spl.getValues(), 0, this.paramValues, pos, inParamCount);
System.arraycopy(spl.getParamTypes(), 0, this.paramTypes, pos, inParamCount);
System.arraycopy(spl.getFlags(), 0, this.flags, pos, inParamCount);
System.arraycopy(spl.getEncoding(), 0, this.encoded, pos, inParamCount);
pos += inParamCount;
}
}
/**
* Useful implementation of toString.
* @return String representation of the list values
*/
@Override
public String toString() {
StringBuilder ts = new StringBuilder("<[");
if (paramValues.length > 0) {
ts.append(toString(1, true));
for (int c = 2; c <= paramValues.length; c++) {
ts.append(" ,").append(toString(c, true));
}
}
ts.append("]>");
return ts.toString();
}
private final Object[] paramValues;
private final int[] paramTypes;
private final byte[] flags;
private final byte[][] encoded;
private final TypeTransferModeRegistry transferModeRegistry;
private final int[] redshiftParamMarkers;
/**
* Marker object representing NULL; this distinguishes "parameter never set" from "parameter set
* to null".
*/
private static final Object NULL_OBJECT = new Object();
private int pos = 0;
}
| 8,383 |
0 | Create_ds/amazon-redshift-jdbc-driver/src/main/java/com/amazon/redshift/core | Create_ds/amazon-redshift-jdbc-driver/src/main/java/com/amazon/redshift/core/v3/Portal.java | /*
* Copyright (c) 2004, PostgreSQL Global Development Group
* See the LICENSE file in the project root for more information.
*/
// Copyright (c) 2004, Open Cloud Limited.
package com.amazon.redshift.core.v3;
import com.amazon.redshift.core.ResultCursor;
import com.amazon.redshift.core.Utils;
import java.lang.ref.PhantomReference;
/**
* V3 ResultCursor implementation in terms of backend Portals. This holds the state of a single
* Portal. We use a PhantomReference managed by our caller to handle resource cleanup.
*
* @author Oliver Jowett (oliver@opencloud.com)
*/
class Portal implements ResultCursor {
Portal(SimpleQuery query, String portalName) {
this.query = query;
this.portalName = portalName;
this.encodedName = Utils.encodeUTF8(portalName);
}
public void close() {
if (cleanupRef != null) {
cleanupRef.clear();
cleanupRef.enqueue();
cleanupRef = null;
}
}
String getPortalName() {
return portalName;
}
byte[] getEncodedPortalName() {
return encodedName;
}
SimpleQuery getQuery() {
return query;
}
void setCleanupRef(PhantomReference<?> cleanupRef) {
this.cleanupRef = cleanupRef;
}
public String toString() {
return portalName;
}
// Holding on to a reference to the generating query has
// the nice side-effect that while this Portal is referenced,
// so is the SimpleQuery, so the underlying statement won't
// be closed while the portal is open (the backend closes
// all open portals when the statement is closed)
private final SimpleQuery query;
private final String portalName;
private final byte[] encodedName;
private PhantomReference<?> cleanupRef;
}
| 8,384 |
0 | Create_ds/amazon-redshift-jdbc-driver/src/main/java/com/amazon/redshift/core | Create_ds/amazon-redshift-jdbc-driver/src/main/java/com/amazon/redshift/core/v3/RedshiftRowsBlockingQueue.java | package com.amazon.redshift.core.v3;
import java.sql.SQLException;
// import java.util.concurrent.ArrayBlockingQueue;
import java.util.concurrent.LinkedBlockingQueue;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.atomic.AtomicLong;
import java.util.concurrent.locks.Condition;
import java.util.concurrent.locks.ReentrantLock;
import com.amazon.redshift.core.Tuple;
import com.amazon.redshift.jdbc.RedshiftConnectionImpl;
import com.amazon.redshift.logger.LogLevel;
import com.amazon.redshift.logger.RedshiftLogger;
public class RedshiftRowsBlockingQueue<E> extends LinkedBlockingQueue<E> {
private static final long serialVersionUID = -7903933977591709194L;
private int fetchSize;
private long fetchRingBufferSizeCapacity;
private boolean limitByBufSize;
private AtomicLong totalFetchRingBufferSize;
/** Lock held by put, offer, etc */
private final ReentrantLock putLock = new ReentrantLock();
/** Wait queue for waiting puts */
private final Condition notFull = putLock.newCondition();
private boolean closed = false;
private boolean endOfResultAdded = false;
private SQLException handlerException = null;
private boolean skipRows = false;
private int currentRow = -1;
// This can be null for default constructor.
private RedshiftLogger logger;
private Portal currentSuspendedPortal;
private final double MEMORY_ESTIMATE_SCALING_FACTOR = 1.2;
public RedshiftRowsBlockingQueue(int capacity) {
super(capacity);
init(capacity, 0, null);
}
public RedshiftRowsBlockingQueue(int fetchSize, long fetchRingBufferSize, RedshiftLogger logger) {
super(
(fetchSize != 0
&& fetchRingBufferSize == 0)
? fetchSize
: Integer.MAX_VALUE);
init(fetchSize, fetchRingBufferSize, logger);
}
private void init(int fetchSize, long fetchRingBufferSize, RedshiftLogger logger) {
this.fetchSize = fetchSize;
this.fetchRingBufferSizeCapacity = fetchRingBufferSize;
this.logger = logger;
limitByBufSize = (fetchRingBufferSize != 0);
totalFetchRingBufferSize = new AtomicLong();
if (RedshiftLogger.isEnable()
&& logger != null) {
logger.log(LogLevel.DEBUG, "init(): limitByBufSize={0} , totalFetchRingBufferSize={1}, fetchRingBufferSizeCapacity = {2}, fetchSize = {3}",
limitByBufSize, totalFetchRingBufferSize.get(), fetchRingBufferSizeCapacity, fetchSize);
}
}
@Override
public void put(E e) throws InterruptedException {
if (skipRows) return;
if (limitByBufSize) {
if (e != null) {
/* if (RedshiftLogger.isEnable()
&& logger != null) {
logger.log(LogLevel.DEBUG, "put(): limitByBufSize={0} , totalFetchRingBufferSize={1}, fetchRingBufferSizeCapacity = {2}, fetchSize = {3}",
limitByBufSize, totalFetchRingBufferSize.get(), fetchRingBufferSizeCapacity, fetchSize);
} */
// Is buffer at full capacity?
if(totalFetchRingBufferSize.get() >= fetchRingBufferSizeCapacity) {
final ReentrantLock putLock = this.putLock;
putLock.lockInterruptibly();
try {
Tuple row = (Tuple)e;
long currentBufSize;
if (RedshiftLogger.isEnable()
&& logger != null) {
logger.log(LogLevel.DEBUG, "put(): Buffer full. Waiting for application to read rows and make space");
}
// Wait buffer at capacity
while (totalFetchRingBufferSize.get() >= fetchRingBufferSizeCapacity) {
if(skipRows) {
return;
}
notFull.await(1, TimeUnit.SECONDS);
}
if (RedshiftLogger.isEnable() && logger != null)
logger.log(LogLevel.DEBUG, "put(): Buffer state change from full to having some space. Now adding a new row.");
super.put(e);
currentBufSize = totalFetchRingBufferSize.addAndGet(getNodeSize(row));
if (currentBufSize < fetchRingBufferSizeCapacity)
notFull.signal();
} finally {
putLock.unlock();
}
}
else {
super.put(e);
totalFetchRingBufferSize.addAndGet(getNodeSize((Tuple)e));
}
}
} // By size
else
super.put(e);
}
@Override
public E take() throws InterruptedException {
currentRow++;
E e = super.take();
if (limitByBufSize) {
// Reduce the total buf size
Tuple row = (Tuple)e;
long currentBufSize;
boolean bufWasFull = (totalFetchRingBufferSize.get() >= fetchRingBufferSizeCapacity);
currentBufSize = totalFetchRingBufferSize.addAndGet(-getNodeSize(row));
// Signal the waiters
if (bufWasFull) {
if (currentBufSize < fetchRingBufferSizeCapacity)
signalNotFull();
}
}
return e;
}
public int getCurrentRowIndex(){
return currentRow;
}
public boolean endOfResult() {
return endOfResultAdded;
}
public void setHandlerException(SQLException ex) {
handlerException = ex;
}
public SQLException getHandlerException() {
SQLException ex = handlerException;
handlerException = null;
return ex;
}
/**
* Close the queue.
*/
public void close() {
closed = true;
super.clear();
try {
// This will unblock the row reader, if row produce
// goes away before end of result.
addEndOfRowsIndicator();
closeSuspendedPortal();
} catch (InterruptedException e) {
// Ignore
}
totalFetchRingBufferSize.set(0);
}
public void setSkipRows(){
skipRows = true;
}
/**
* Add end-of-rows indicator
*
* @throws InterruptedException throws when the thread gets interrupted.
*/
public void addEndOfRowsIndicator() throws InterruptedException {
put((E)new Tuple(0));
}
/**
* Add end-of-rows indicator, if not added.
*
* @throws InterruptedException throws when the thread gets interrupted.
*/
public void checkAndAddEndOfRowsIndicator() throws InterruptedException {
if (!endOfResultAdded) {
addEndOfRowsIndicator();
endOfResultAdded = true;
}
}
public void checkAndAddEndOfRowsIndicator(Portal currentSuspendedPortal) throws InterruptedException {
this.currentSuspendedPortal = currentSuspendedPortal;
checkAndAddEndOfRowsIndicator();
}
public Portal getSuspendedPortal() {
return currentSuspendedPortal;
}
public boolean isSuspendedPortal() {
return (currentSuspendedPortal != null);
}
public void closeSuspendedPortal() {
if (currentSuspendedPortal != null) {
currentSuspendedPortal.close();
}
}
/**
* Signals a waiting put. Called only from take/poll.
*/
private void signalNotFull() {
final ReentrantLock putLock = this.putLock;
putLock.lock();
try {
notFull.signal();
} finally {
putLock.unlock();
}
}
/**
* Returns the size in bytes of an individual node of Ring buffer queue/linked list
*/
private int getNodeSize(Tuple row) {
/**
* Node overheads are 32 bytes for 64-bit JVM and 16 bytes for 32-bit JVM
* For 64-bit JVM: (8 + 8 + 16) => 8 byte reference for Tuple object + 8 byte reference for next
* + 16 byte Node object header overhead
* Each of these are reduced to half in case of 32-bit JVM.
*/
int estimatedNodeSize = row.getTupleSize() + (RedshiftConnectionImpl.IS_64_BIT_JVM ? 32 : 16);
return (int) (estimatedNodeSize * MEMORY_ESTIMATE_SCALING_FACTOR); // using a scaling factor for avoiding OOM errors
}
}
| 8,385 |
0 | Create_ds/amazon-redshift-jdbc-driver/src/main/java/com/amazon/redshift/core | Create_ds/amazon-redshift-jdbc-driver/src/main/java/com/amazon/redshift/core/v3/DescribeRequest.java | /*
* Copyright (c) 2015, PostgreSQL Global Development Group
* See the LICENSE file in the project root for more information.
*/
package com.amazon.redshift.core.v3;
/**
* Information for "pending describe queue".
*
* @see QueryExecutorImpl#pendingDescribeStatementQueue
*/
class DescribeRequest {
public final SimpleQuery query;
public final SimpleParameterList parameterList;
public final boolean describeOnly;
public final String statementName;
DescribeRequest(SimpleQuery query, SimpleParameterList parameterList,
boolean describeOnly, String statementName) {
this.query = query;
this.parameterList = parameterList;
this.describeOnly = describeOnly;
this.statementName = statementName;
}
}
| 8,386 |
0 | Create_ds/amazon-redshift-jdbc-driver/src/main/java/com/amazon/redshift/core | Create_ds/amazon-redshift-jdbc-driver/src/main/java/com/amazon/redshift/core/v3/ExecuteRequest.java | /*
* Copyright (c) 2015, PostgreSQL Global Development Group
* See the LICENSE file in the project root for more information.
*/
package com.amazon.redshift.core.v3;
/**
* Information for "pending execute queue".
*
* @see QueryExecutorImpl#pendingExecuteQueue
*/
class ExecuteRequest {
public final SimpleQuery query;
public final Portal portal;
public final boolean asSimple;
ExecuteRequest(SimpleQuery query, Portal portal, boolean asSimple) {
this.query = query;
this.portal = portal;
this.asSimple = asSimple;
}
}
| 8,387 |
0 | Create_ds/amazon-redshift-jdbc-driver/src/main/java/com/amazon/redshift/core | Create_ds/amazon-redshift-jdbc-driver/src/main/java/com/amazon/redshift/core/v3/CompositeParameterList.java | /*
* Copyright (c) 2004, PostgreSQL Global Development Group
* See the LICENSE file in the project root for more information.
*/
// Copyright (c) 2004, Open Cloud Limited.
package com.amazon.redshift.core.v3;
import com.amazon.redshift.core.ParameterList;
import com.amazon.redshift.util.ByteStreamWriter;
import com.amazon.redshift.util.GT;
import com.amazon.redshift.util.RedshiftException;
import com.amazon.redshift.util.RedshiftState;
import java.io.InputStream;
import java.sql.SQLException;
/**
* Parameter list for V3 query strings that contain multiple statements. We delegate to one
* SimpleParameterList per statement, and translate parameter indexes as needed.
*
* @author Oliver Jowett (oliver@opencloud.com)
*/
class CompositeParameterList implements V3ParameterList {
CompositeParameterList(SimpleParameterList[] subparams, int[] offsets) {
this.subparams = subparams;
this.offsets = offsets;
this.total = offsets[offsets.length - 1] + subparams[offsets.length - 1].getInParameterCount();
}
private int findSubParam(int index) throws SQLException {
if (index < 1 || index > total) {
throw new RedshiftException(
GT.tr("The column index is out of range: {0}, number of columns: {1}.", index, total),
RedshiftState.INVALID_PARAMETER_VALUE);
}
for (int i = offsets.length - 1; i >= 0; --i) {
if (offsets[i] < index) {
return i;
}
}
throw new IllegalArgumentException("I am confused; can't find a subparam for index " + index);
}
public void registerOutParameter(int index, int sqlType) {
}
public int getDirection(int i) {
return 0;
}
public int getParameterCount() {
return total;
}
public int getInParameterCount() {
return total;
}
public int getOutParameterCount() {
return 0;
}
public int[] getTypeOIDs() {
int[] oids = new int[total];
for (int i = 0; i < offsets.length; i++) {
int[] subOids = subparams[i].getTypeOIDs();
System.arraycopy(subOids, 0, oids, offsets[i], subOids.length);
}
return oids;
}
public void setIntParameter(int index, int value) throws SQLException {
int sub = findSubParam(index);
subparams[sub].setIntParameter(index - offsets[sub], value);
}
public void setLiteralParameter(int index, String value, int oid) throws SQLException {
int sub = findSubParam(index);
subparams[sub].setStringParameter(index - offsets[sub], value, oid);
}
public void setStringParameter(int index, String value, int oid) throws SQLException {
int sub = findSubParam(index);
subparams[sub].setStringParameter(index - offsets[sub], value, oid);
}
public void setBinaryParameter(int index, byte[] value, int oid) throws SQLException {
int sub = findSubParam(index);
subparams[sub].setBinaryParameter(index - offsets[sub], value, oid);
}
public void setBytea(int index, byte[] data, int offset, int length) throws SQLException {
int sub = findSubParam(index);
subparams[sub].setBytea(index - offsets[sub], data, offset, length);
}
public void setBytea(int index, InputStream stream, int length) throws SQLException {
int sub = findSubParam(index);
subparams[sub].setBytea(index - offsets[sub], stream, length);
}
public void setBytea(int index, InputStream stream) throws SQLException {
int sub = findSubParam(index);
subparams[sub].setBytea(index - offsets[sub], stream);
}
public void setBytea(int index, ByteStreamWriter writer) throws SQLException {
int sub = findSubParam(index);
subparams[sub].setBytea(index - offsets[sub], writer);
}
public void setVarbyte(int index, byte[] data, int offset, int length) throws SQLException {
int sub = findSubParam(index);
subparams[sub].setVarbyte(index - offsets[sub], data, offset, length);
}
public void setGeography(int index, byte[] data, int offset, int length) throws SQLException {
int sub = findSubParam(index);
subparams[sub].setGeography(index - offsets[sub], data, offset, length);
}
public void setText(int index, InputStream stream) throws SQLException {
int sub = findSubParam(index);
subparams[sub].setText(index - offsets[sub], stream);
}
public void setNull(int index, int oid) throws SQLException {
int sub = findSubParam(index);
subparams[sub].setNull(index - offsets[sub], oid);
}
public String toString(int index, boolean standardConformingStrings) {
try {
int sub = findSubParam(index);
return subparams[sub].toString(index - offsets[sub], standardConformingStrings);
} catch (SQLException e) {
throw new IllegalStateException(e.getMessage());
}
}
public ParameterList copy() {
SimpleParameterList[] copySub = new SimpleParameterList[subparams.length];
for (int sub = 0; sub < subparams.length; ++sub) {
copySub[sub] = (SimpleParameterList) subparams[sub].copy();
}
return new CompositeParameterList(copySub, offsets);
}
public void clear() {
for (SimpleParameterList subparam : subparams) {
subparam.clear();
}
}
public SimpleParameterList[] getSubparams() {
return subparams;
}
public void checkAllParametersSet() throws SQLException {
for (SimpleParameterList subparam : subparams) {
subparam.checkAllParametersSet();
}
}
public byte[][] getEncoding() {
return null; // unsupported
}
public byte[] getFlags() {
return null; // unsupported
}
public int[] getParamTypes() {
return null; // unsupported
}
public Object[] getValues() {
return null; // unsupported
}
public void appendAll(ParameterList list) throws SQLException {
// no-op, unsupported
}
public void convertFunctionOutParameters() {
for (SimpleParameterList subparam : subparams) {
subparam.convertFunctionOutParameters();
}
}
private final int total;
private final SimpleParameterList[] subparams;
private final int[] offsets;
}
| 8,388 |
0 | Create_ds/amazon-redshift-jdbc-driver/src/main/java/com/amazon/redshift/core | Create_ds/amazon-redshift-jdbc-driver/src/main/java/com/amazon/redshift/core/v3/BatchedQuery.java | /*
* Copyright (c) 2003, PostgreSQL Global Development Group
* See the LICENSE file in the project root for more information.
*/
package com.amazon.redshift.core.v3;
import com.amazon.redshift.core.NativeQuery;
import com.amazon.redshift.core.ParameterList;
import com.amazon.redshift.logger.RedshiftLogger;
/**
* Purpose of this object is to support batched query re write behaviour. Responsibility for
* tracking the batch size and implement the clean up of the query fragments after the batch execute
* is complete. Intended to be used to wrap a Query that is present in the batchStatements
* collection.
*
* @author Jeremy Whiting jwhiting@redhat.com
* @author Christopher Deckers (chrriis@gmail.com)
*
*/
public class BatchedQuery extends SimpleQuery {
private String sql;
private final int valuesBraceOpenPosition;
private final int valuesBraceClosePosition;
private final int batchSize;
private BatchedQuery[] blocks;
public BatchedQuery(NativeQuery query, TypeTransferModeRegistry transferModeRegistry,
int valuesBraceOpenPosition,
int valuesBraceClosePosition, boolean sanitiserDisabled,
RedshiftLogger logger) {
super(query, transferModeRegistry, sanitiserDisabled, logger);
this.valuesBraceOpenPosition = valuesBraceOpenPosition;
this.valuesBraceClosePosition = valuesBraceClosePosition;
this.batchSize = 1;
}
private BatchedQuery(BatchedQuery src, int batchSize, RedshiftLogger logger) {
super(src, logger);
this.valuesBraceOpenPosition = src.valuesBraceOpenPosition;
this.valuesBraceClosePosition = src.valuesBraceClosePosition;
this.batchSize = batchSize;
}
public BatchedQuery deriveForMultiBatch(int valueBlock, int maxBlockCount, RedshiftLogger logger) {
if (getBatchSize() != 1) {
throw new IllegalStateException("Only the original decorator can be derived.");
}
if (valueBlock == 1) {
return this;
}
int index = Integer.numberOfTrailingZeros(valueBlock) - 1;
if (valueBlock > maxBlockCount || valueBlock != (1 << (index + 1))) {
throw new IllegalArgumentException(
"Expected value block should be a power of 2 smaller or equal to " + maxBlockCount + ". Actual block is "
+ valueBlock);
}
if (blocks == null) {
int maxBase2Exponent = (int) (Math.log(maxBlockCount)/Math.log(2));
logger.logDebug("maxBase2Exponent=" + maxBase2Exponent);
blocks = new BatchedQuery[maxBase2Exponent]; // 7
}
BatchedQuery bq = blocks[index];
if (bq == null) {
bq = new BatchedQuery(this, valueBlock, logger);
blocks[index] = bq;
}
return bq;
}
@Override
public int getBatchSize() {
return batchSize;
}
/**
* Method to return the sql based on number of batches. Skipping the initial
* batch.
*/
@Override
public String getNativeSql() {
if (sql != null) {
return sql;
}
sql = buildNativeSql(null);
return sql;
}
private String buildNativeSql(ParameterList params) {
String sql = null;
// dynamically build sql with parameters for batches
String nativeSql = super.getNativeSql();
int batchSize = getBatchSize();
if (batchSize < 2) {
sql = nativeSql;
return sql;
}
if (nativeSql == null) {
sql = "";
return sql;
}
int valuesBlockCharCount = 0;
// Split the values section around every dynamic parameter.
int[] bindPositions = getNativeQuery().bindPositions;
int[] chunkStart = new int[1 + bindPositions.length];
int[] chunkEnd = new int[1 + bindPositions.length];
chunkStart[0] = valuesBraceOpenPosition;
if (bindPositions.length == 0) {
valuesBlockCharCount = valuesBraceClosePosition - valuesBraceOpenPosition + 1;
chunkEnd[0] = valuesBraceClosePosition + 1;
} else {
chunkEnd[0] = bindPositions[0];
// valuesBlockCharCount += chunks[0].length;
valuesBlockCharCount += chunkEnd[0] - chunkStart[0];
for (int i = 0; i < bindPositions.length; i++) {
int startIndex = bindPositions[i] + 2;
int endIndex =
i < bindPositions.length - 1 ? bindPositions[i + 1] : valuesBraceClosePosition + 1;
for (; startIndex < endIndex; startIndex++) {
if (!Character.isDigit(nativeSql.charAt(startIndex))) {
break;
}
}
chunkStart[i + 1] = startIndex;
chunkEnd[i + 1] = endIndex;
// valuesBlockCharCount += chunks[i + 1].length;
valuesBlockCharCount += chunkEnd[i + 1] - chunkStart[i + 1];
}
}
int length = nativeSql.length();
//valuesBraceOpenPosition + valuesBlockCharCount;
length += NativeQuery.calculateBindLength(bindPositions.length * batchSize);
length -= NativeQuery.calculateBindLength(bindPositions.length);
length += (valuesBlockCharCount + 1 /*comma*/) * (batchSize - 1 /* initial sql */);
StringBuilder s = new StringBuilder(length);
// Add query until end of values parameter block.
int pos;
if (bindPositions.length > 0 && params == null) {
// Add the first values (...) clause, it would be values($1,..., $n), and it matches with
// the values clause of a simple non-rewritten SQL
s.append(nativeSql, 0, valuesBraceClosePosition + 1);
pos = bindPositions.length + 1;
} else {
pos = 1;
batchSize++; // do not use super.toString(params) as it does not work if query ends with --
// We need to carefully add (...),(...), and we do not want to get (...) --, (...)
// s.append(super.toString(params));
s.append(nativeSql, 0, valuesBraceOpenPosition);
}
for (int i = 2; i <= batchSize; i++) {
if (i > 2 || pos != 1) {
// For "has binds" the first valuds
s.append(',');
}
s.append(nativeSql, chunkStart[0], chunkEnd[0]);
for (int j = 1; j < chunkStart.length; j++) {
if (params == null) {
NativeQuery.appendBindName(s, pos++);
} else {
s.append(params.toString(pos++, true));
}
s.append(nativeSql, chunkStart[j], chunkEnd[j]);
}
}
// Add trailing content: final query is like original with multi values.
// This could contain "--" comments, so it is important to add them at end.
s.append(nativeSql, valuesBraceClosePosition + 1, nativeSql.length());
sql = s.toString();
// Predict length only when building sql with $1, $2, ... (that is no specific params given)
assert params != null || s.length() == length
: "Predicted length != actual: " + length + " !=" + s.length();
return sql;
}
@Override
public String toString(ParameterList params) {
if (getBatchSize() < 2) {
return super.toString(params);
}
return buildNativeSql(params);
}
}
| 8,389 |
0 | Create_ds/amazon-redshift-jdbc-driver/src/main/java/com/amazon/redshift/core | Create_ds/amazon-redshift-jdbc-driver/src/main/java/com/amazon/redshift/core/v3/CopyQueryExecutor.java | package com.amazon.redshift.core.v3;
import java.io.IOException;
import java.sql.SQLException;
import java.util.concurrent.atomic.AtomicBoolean;
import com.amazon.redshift.copy.CopyIn;
import com.amazon.redshift.copy.CopyOperation;
import com.amazon.redshift.copy.CopyOut;
import com.amazon.redshift.core.RedshiftStream;
import com.amazon.redshift.core.Utils;
import com.amazon.redshift.logger.LogLevel;
import com.amazon.redshift.logger.RedshiftLogger;
import com.amazon.redshift.util.ByteStreamWriter;
import com.amazon.redshift.util.GT;
import com.amazon.redshift.util.RedshiftException;
import com.amazon.redshift.util.RedshiftState;
//
// Copy subprotocol implementation
//
class CopyQueryExecutor {
private QueryExecutorImpl queryExecutor;
RedshiftLogger logger;
final RedshiftStream pgStream;
CopyQueryExecutor(QueryExecutorImpl queryExecutor,
RedshiftLogger logger,
RedshiftStream pgStream) {
this.queryExecutor = queryExecutor;
this.logger = logger;
this.pgStream = pgStream;
}
/**
* Sends given query to BE to start, initialize and lock connection for a CopyOperation.
*
* @param sql COPY FROM STDIN / COPY TO STDOUT statement
* @return CopyIn or CopyOut operation object
* @throws SQLException on failure
*/
CopyOperation startCopy(String sql, boolean suppressBegin)
throws SQLException {
// Wait for current ring buffer thread to finish, if any.
// Shouldn't call from synchronized method, which can cause dead-lock.
queryExecutor.waitForRingBufferThreadToFinish(false, false, false, null, null);
synchronized(queryExecutor) {
queryExecutor.waitOnLock();
if (!suppressBegin) {
queryExecutor.doSubprotocolBegin();
}
byte[] buf = Utils.encodeUTF8(sql);
try {
if(RedshiftLogger.isEnable())
logger.log(LogLevel.DEBUG, " FE=> Query(CopyStart)");
pgStream.sendChar('Q');
pgStream.sendInteger4(buf.length + 4 + 1);
pgStream.send(buf);
pgStream.sendChar(0);
pgStream.flush();
return processCopyResults(null, true);
// expect a CopyInResponse or CopyOutResponse to our query above
} catch (IOException ioe) {
throw new RedshiftException(GT.tr("Database connection failed when starting copy"),
RedshiftState.CONNECTION_FAILURE, ioe);
}
}
}
void cancelCopy(CopyOperationImpl op) throws SQLException {
if (!queryExecutor.hasLock(op)) {
throw new RedshiftException(GT.tr("Tried to cancel an inactive copy operation"),
RedshiftState.OBJECT_NOT_IN_STATE);
}
SQLException error = null;
int errors = 0;
try {
if (op instanceof CopyIn) {
synchronized (queryExecutor) {
if(RedshiftLogger.isEnable())
logger.log(LogLevel.DEBUG, "FE => CopyFail");
final byte[] msg = Utils.encodeUTF8("Copy cancel requested");
pgStream.sendChar('f'); // CopyFail
pgStream.sendInteger4(5 + msg.length);
pgStream.send(msg);
pgStream.sendChar(0);
pgStream.flush();
do {
try {
processCopyResults(op, true); // discard rest of input
} catch (SQLException se) { // expected error response to failing copy
errors++;
if (error != null) {
SQLException e = se;
SQLException next;
while ((next = e.getNextException()) != null) {
e = next;
}
e.setNextException(error);
}
error = se;
}
} while (queryExecutor.hasLock(op));
}
} else if (op instanceof CopyOut) {
queryExecutor.sendQueryCancel();
}
} catch (IOException ioe) {
throw new RedshiftException(GT.tr("Database connection failed when canceling copy operation"),
RedshiftState.CONNECTION_FAILURE, ioe);
} finally {
// Need to ensure the lock isn't held anymore, or else
// future operations, rather than failing due to the
// broken connection, will simply hang waiting for this
// lock.
synchronized (queryExecutor) {
if (queryExecutor.hasLock(op)) {
queryExecutor.unlock(op);
}
}
}
if (op instanceof CopyIn) {
if (errors < 1) {
throw new RedshiftException(GT.tr("Missing expected error response to copy cancel request"),
RedshiftState.COMMUNICATION_ERROR);
} else if (errors > 1) {
throw new RedshiftException(
GT.tr("Got {0} error responses to single copy cancel request", String.valueOf(errors)),
RedshiftState.COMMUNICATION_ERROR, error);
}
}
}
AtomicBoolean processingCopyResults = new AtomicBoolean(false);
/**
* Handles copy sub protocol responses from server. Unlocks at end of sub protocol, so operations
* on pgStream or QueryExecutor are not allowed in a method after calling this!
*
* @param block whether to block waiting for input
* @return CopyIn when COPY FROM STDIN starts; CopyOut when COPY TO STDOUT starts; null when copy
* ends; otherwise, the operation given as parameter.
* @throws SQLException in case of misuse
* @throws IOException from the underlying connection
*/
CopyOperationImpl processCopyResults(CopyOperationImpl op, boolean block)
throws SQLException, IOException {
/*
* fixes issue #1592 where one thread closes the stream and another is reading it
*/
if (pgStream.isClosed()) {
throw new RedshiftException(GT.tr("RedshiftStream is closed",
op.getClass().getName()), RedshiftState.CONNECTION_DOES_NOT_EXIST);
}
/*
* This is a hack as we should not end up here, but sometimes do with large copy operations.
*/
if ( processingCopyResults.compareAndSet(false,true) == false ) {
if(RedshiftLogger.isEnable())
logger.log(LogLevel.INFO, "Ignoring request to process copy results, already processing");
return null;
}
// put this all in a try, finally block and reset the processingCopyResults in the finally clause
try {
boolean endReceiving = false;
SQLException error = null;
SQLException errors = null;
int len;
while (!endReceiving && (block || pgStream.hasMessagePending())) {
// There is a bug in the server's implementation of the copy
// protocol. It returns command complete immediately upon
// receiving the EOF marker in the binary protocol,
// potentially before we've issued CopyDone. When we are not
// blocking, we don't think we are done, so we hold off on
// processing command complete and any subsequent messages
// until we actually are done with the copy.
//
if (!block) {
int c = pgStream.peekChar();
if (c == 'C') {
// CommandComplete
if(RedshiftLogger.isEnable())
logger.log(LogLevel.DEBUG, " <=BE CommandStatus, Ignored until CopyDone");
break;
}
}
int c = pgStream.receiveChar();
switch (c) {
case 'A': // Asynchronous Notify
if(RedshiftLogger.isEnable())
logger.log(LogLevel.DEBUG, " <=BE Asynchronous Notification while copying");
queryExecutor.receiveAsyncNotify();
break;
case 'N': // Notice Response
if(RedshiftLogger.isEnable())
logger.log(LogLevel.DEBUG, " <=BE Notification while copying");
queryExecutor.addWarning(queryExecutor.receiveNoticeResponse());
break;
case 'C': // Command Complete
String status = queryExecutor.receiveCommandStatus();
try {
if (op == null) {
throw new RedshiftException(GT
.tr("Received CommandComplete ''{0}'' without an active copy operation", status),
RedshiftState.OBJECT_NOT_IN_STATE);
}
op.handleCommandStatus(status);
} catch (SQLException se) {
error = se;
}
block = true;
break;
case 'E': // ErrorMessage (expected response to CopyFail)
error = queryExecutor.receiveErrorResponse(false);
// We've received the error and we now expect to receive
// Ready for query, but we must block because it might still be
// on the wire and not here yet.
block = true;
break;
case 'G': // CopyInResponse
if(RedshiftLogger.isEnable())
logger.log(LogLevel.DEBUG, " <=BE CopyInResponse");
if (op != null) {
error = new RedshiftException(GT.tr("Got CopyInResponse from server during an active {0}",
op.getClass().getName()), RedshiftState.OBJECT_NOT_IN_STATE);
}
op = new CopyInImpl();
initCopy(op);
endReceiving = true;
break;
case 'H': // CopyOutResponse
if(RedshiftLogger.isEnable())
logger.log(LogLevel.DEBUG, " <=BE CopyOutResponse");
if (op != null) {
error = new RedshiftException(GT.tr("Got CopyOutResponse from server during an active {0}",
op.getClass().getName()), RedshiftState.OBJECT_NOT_IN_STATE);
}
op = new CopyOutImpl();
initCopy(op);
endReceiving = true;
break;
case 'W': // CopyBothResponse
if(RedshiftLogger.isEnable())
logger.log(LogLevel.DEBUG, " <=BE CopyBothResponse");
if (op != null) {
error = new RedshiftException(GT.tr("Got CopyBothResponse from server during an active {0}",
op.getClass().getName()), RedshiftState.OBJECT_NOT_IN_STATE);
}
op = new CopyDualImpl();
initCopy(op);
endReceiving = true;
break;
case 'd': // CopyData
if(RedshiftLogger.isEnable())
logger.log(LogLevel.DEBUG, " <=BE CopyData");
len = pgStream.receiveInteger4() - 4;
assert len > 0 : "Copy Data length must be greater than 4";
byte[] buf = pgStream.receive(len);
if (op == null) {
error = new RedshiftException(GT.tr("Got CopyData without an active copy operation"),
RedshiftState.OBJECT_NOT_IN_STATE);
} else if (!(op instanceof CopyOut)) {
error = new RedshiftException(
GT.tr("Unexpected copydata from server for {0}", op.getClass().getName()),
RedshiftState.COMMUNICATION_ERROR);
} else {
op.handleCopydata(buf);
}
endReceiving = true;
break;
case 'c': // CopyDone (expected after all copydata received)
if(RedshiftLogger.isEnable())
logger.log(LogLevel.DEBUG, " <=BE CopyDone");
len = pgStream.receiveInteger4() - 4;
if (len > 0) {
pgStream.receive(len); // not in specification; should never appear
}
if (!(op instanceof CopyOut)) {
error = new RedshiftException("Got CopyDone while not copying from server",
RedshiftState.OBJECT_NOT_IN_STATE);
}
// keep receiving since we expect a CommandComplete
block = true;
break;
case 'S': // Parameter Status
try {
queryExecutor.receiveParameterStatus();
} catch (SQLException e) {
error = e;
endReceiving = true;
}
break;
case 'Z': // ReadyForQuery: After FE:CopyDone => BE:CommandComplete
queryExecutor.receiveRFQ();
if (queryExecutor.hasLock(op)) {
queryExecutor.unlock(op);
}
op = null;
endReceiving = true;
break;
// If the user sends a non-copy query, we've got to handle some additional things.
//
case 'T': // Row Description (response to Describe)
if(RedshiftLogger.isEnable())
logger.log(LogLevel.DEBUG, " <=BE RowDescription (during copy ignored)");
queryExecutor.skipMessage();
break;
case 'D': // DataRow
if(RedshiftLogger.isEnable())
logger.log(LogLevel.DEBUG, " <=BE DataRow (during copy ignored)");
queryExecutor.skipMessage();
break;
default:
throw new IOException(
GT.tr("Unexpected packet type during copy: {0}", Integer.toString(c)));
}
// Collect errors into a neat chain for completeness
if (error != null) {
if (errors != null) {
error.setNextException(errors);
}
errors = error;
error = null;
}
}
if (errors != null) {
throw errors;
}
return op;
} finally {
/*
reset here in the finally block to make sure it really is cleared
*/
processingCopyResults.set(false);
}
}
/**
* Locks connection and calls initializer for a new CopyOperation Called via startCopy ->
* processCopyResults.
*
* @param op an uninitialized CopyOperation
* @throws SQLException on locking failure
* @throws IOException on database connection failure
*/
void initCopy(CopyOperationImpl op) throws SQLException, IOException {
synchronized(queryExecutor) {
pgStream.receiveInteger4(); // length not used
int rowFormat = pgStream.receiveChar();
int numFields = pgStream.receiveInteger2();
int[] fieldFormats = new int[numFields];
for (int i = 0; i < numFields; i++) {
fieldFormats[i] = pgStream.receiveInteger2();
}
queryExecutor.lock(op);
op.init(queryExecutor, rowFormat, fieldFormats);
}
}
/**
* Finishes writing to copy and unlocks connection.
*
* @param op the copy operation presumably currently holding lock on this connection
* @return number of rows updated for server versions 8.2 or newer
* @throws SQLException on failure
*/
long endCopy(CopyOperationImpl op) throws SQLException {
synchronized(queryExecutor) {
if (!queryExecutor.hasLock(op)) {
throw new RedshiftException(GT.tr("Tried to end inactive copy"), RedshiftState.OBJECT_NOT_IN_STATE);
}
try {
if(RedshiftLogger.isEnable())
logger.log(LogLevel.DEBUG, " FE=> CopyDone");
pgStream.sendChar('c'); // CopyDone
pgStream.sendInteger4(4);
pgStream.flush();
do {
processCopyResults(op, true);
} while (queryExecutor.hasLock(op));
return op.getHandledRowCount();
} catch (IOException ioe) {
throw new RedshiftException(GT.tr("Database connection failed when ending copy"),
RedshiftState.CONNECTION_FAILURE, ioe);
}
}
}
/**
* Sends data during a live COPY IN operation. Only unlocks the connection if server suddenly
* returns CommandComplete, which should not happen
*
* @param op the CopyIn operation presumably currently holding lock on this connection
* @param data bytes to send
* @param off index of first byte to send (usually 0)
* @param siz number of bytes to send (usually data.length)
* @throws SQLException on failure
*/
void writeToCopy(CopyOperationImpl op, byte[] data, int off, int siz)
throws SQLException {
synchronized(queryExecutor) {
if (!queryExecutor.hasLock(op)) {
throw new RedshiftException(GT.tr("Tried to write to an inactive copy operation"),
RedshiftState.OBJECT_NOT_IN_STATE);
}
if(RedshiftLogger.isEnable())
logger.log(LogLevel.DEBUG, " FE=> CopyData({0})", siz);
try {
pgStream.sendChar('d');
pgStream.sendInteger4(siz + 4);
pgStream.send(data, off, siz);
} catch (IOException ioe) {
throw new RedshiftException(GT.tr("Database connection failed when writing to copy"),
RedshiftState.CONNECTION_FAILURE, ioe);
}
}
}
/**
* Sends data during a live COPY IN operation. Only unlocks the connection if server suddenly
* returns CommandComplete, which should not happen
*
* @param op the CopyIn operation presumably currently holding lock on this connection
* @param from the source of bytes, e.g. a ByteBufferByteStreamWriter
* @throws SQLException on failure
*/
public void writeToCopy(CopyOperationImpl op, ByteStreamWriter from)
throws SQLException {
synchronized(queryExecutor) {
if (!queryExecutor.hasLock(op)) {
throw new RedshiftException(GT.tr("Tried to write to an inactive copy operation"),
RedshiftState.OBJECT_NOT_IN_STATE);
}
int siz = from.getLength();
if(RedshiftLogger.isEnable())
logger.log(LogLevel.DEBUG, " FE=> CopyData({0})", siz);
try {
pgStream.sendChar('d');
pgStream.sendInteger4(siz + 4);
pgStream.send(from);
} catch (IOException ioe) {
throw new RedshiftException(GT.tr("Database connection failed when writing to copy"),
RedshiftState.CONNECTION_FAILURE, ioe);
}
}
}
public void flushCopy(CopyOperationImpl op) throws SQLException {
synchronized(queryExecutor) {
if (!queryExecutor.hasLock(op)) {
throw new RedshiftException(GT.tr("Tried to write to an inactive copy operation"),
RedshiftState.OBJECT_NOT_IN_STATE);
}
try {
pgStream.flush();
} catch (IOException ioe) {
throw new RedshiftException(GT.tr("Database connection failed when writing to copy"),
RedshiftState.CONNECTION_FAILURE, ioe);
}
}
}
/**
* Wait for a row of data to be received from server on an active copy operation
* Connection gets unlocked by processCopyResults() at end of operation.
*
* @param op the copy operation presumably currently holding lock on this connection
* @param block whether to block waiting for input
* @throws SQLException on any failure
*/
void readFromCopy(CopyOperationImpl op, boolean block) throws SQLException {
synchronized(queryExecutor) {
if (!queryExecutor.hasLock(op)) {
throw new RedshiftException(GT.tr("Tried to read from inactive copy"),
RedshiftState.OBJECT_NOT_IN_STATE);
}
try {
processCopyResults(op, block); // expect a call to handleCopydata() to store the data
} catch (IOException ioe) {
throw new RedshiftException(GT.tr("Database connection failed when reading from copy"),
RedshiftState.CONNECTION_FAILURE, ioe);
}
}
}
}
| 8,390 |
0 | Create_ds/amazon-redshift-jdbc-driver/src/main/java/com/amazon/redshift/core | Create_ds/amazon-redshift-jdbc-driver/src/main/java/com/amazon/redshift/core/v3/ConnectionFactoryImpl.java | /*
* Copyright (c) 2003, PostgreSQL Global Development Group
* See the LICENSE file in the project root for more information.
*/
// Copyright (c) 2004, Open Cloud Limited.
package com.amazon.redshift.core.v3;
import com.amazon.redshift.RedshiftProperty;
import com.amazon.redshift.core.ConnectionFactory;
import com.amazon.redshift.core.RedshiftStream;
import com.amazon.redshift.core.QueryExecutor;
import com.amazon.redshift.core.ServerVersion;
import com.amazon.redshift.core.SetupQueryRunner;
import com.amazon.redshift.core.SocketFactoryFactory;
import com.amazon.redshift.core.Tuple;
import com.amazon.redshift.core.Utils;
import com.amazon.redshift.core.Version;
import com.amazon.redshift.hostchooser.CandidateHost;
import com.amazon.redshift.hostchooser.GlobalHostStatusTracker;
import com.amazon.redshift.hostchooser.HostChooser;
import com.amazon.redshift.hostchooser.HostChooserFactory;
import com.amazon.redshift.hostchooser.HostRequirement;
import com.amazon.redshift.hostchooser.HostStatus;
import com.amazon.redshift.jdbc.SslMode;
import com.amazon.redshift.logger.LogLevel;
import com.amazon.redshift.logger.RedshiftLogger;
import com.amazon.redshift.sspi.ISSPIClient;
import com.amazon.redshift.util.DriverInfo;
import com.amazon.redshift.util.GT;
import com.amazon.redshift.util.HostSpec;
import com.amazon.redshift.util.MD5Digest;
import com.amazon.redshift.util.ExtensibleDigest;
import com.amazon.redshift.util.RedshiftConstants;
import com.amazon.redshift.util.RedshiftException;
import com.amazon.redshift.util.RedshiftState;
import com.amazon.redshift.util.ServerErrorMessage;
import com.amazonaws.util.StringUtils;
import java.io.IOException;
import java.net.ConnectException;
import java.sql.SQLException;
import java.util.ArrayList;
import java.util.Date;
import java.util.HashMap;
import java.util.Iterator;
import java.util.List;
import java.util.Map;
import java.util.Properties;
import java.util.TimeZone;
import javax.net.SocketFactory;
/**
* ConnectionFactory implementation for version 3 (7.4+) connections.
*
* @author Oliver Jowett (oliver@opencloud.com), based on the previous implementation
*/
public class ConnectionFactoryImpl extends ConnectionFactory {
private RedshiftLogger logger;
private static final int AUTH_REQ_OK = 0;
private static final int AUTH_REQ_KRB4 = 1;
private static final int AUTH_REQ_KRB5 = 2;
private static final int AUTH_REQ_PASSWORD = 3;
private static final int AUTH_REQ_CRYPT = 4;
private static final int AUTH_REQ_MD5 = 5;
private static final int AUTH_REQ_SCM = 6;
private static final int AUTH_REQ_GSS = 7;
private static final int AUTH_REQ_GSS_CONTINUE = 8;
private static final int AUTH_REQ_SSPI = 9;
private static final int AUTH_REQ_SASL = 10;
private static final int AUTH_REQ_SASL_CONTINUE = 11;
private static final int AUTH_REQ_SASL_FINAL = 12;
private static final int AUTH_REQ_DIGEST = 13;
private static final int AUTH_REQ_IDP = 14; /* Redshift Native IDP Integration */
private static final int AUTH_DIGEST_SHA256 = 0;
private static final int AUTH_DIGEST_SCRYPT = 1;
private static final int AUTH_DIGEST_ARGON2 = 2;
// Server protocol versions
public static int BASE_SERVER_PROTOCOL_VERSION = 0;
public static int EXTENDED_RESULT_METADATA_SERVER_PROTOCOL_VERSION = 1;
public static int BINARY_PROTOCOL_VERSION = 2;
public static int EXTENDED2_RESULT_METADATA_SERVER_PROTOCOL_VERSION = 3; // Case sensitivity via COLLATION_INFORMATION
public static int DEFAULT_SERVER_PROTOCOL_VERSION = EXTENDED2_RESULT_METADATA_SERVER_PROTOCOL_VERSION;
private static final String IDP_TYPE_AWS_IDC = "AwsIdc";
private static final String IDP_TYPE_OKTA = "Okta";
private static final String IDP_TYPE_AZUREAD = "AzureAD";
private static final String TOKEN_TYPE_ACCESS_TOKEN = "ACCESS_TOKEN";
private ISSPIClient createSSPI(RedshiftStream pgStream,
String spnServiceClass,
boolean enableNegotiate) {
try {
@SuppressWarnings("unchecked")
Class<ISSPIClient> c = (Class<ISSPIClient>) Class.forName("com.amazon.redshift.sspi.SSPIClient");
return c.getDeclaredConstructor(RedshiftStream.class, String.class, boolean.class)
.newInstance(pgStream, spnServiceClass, enableNegotiate);
} catch (Exception e) {
// This catched quite a lot exceptions, but until Java 7 there is no ReflectiveOperationException
throw new IllegalStateException("Unable to load com.amazon.redshift.sspi.SSPIClient."
+ " Please check that SSPIClient is included in your pgjdbc distribution.", e);
}
}
private RedshiftStream tryConnect(String user, String database,
Properties info, SocketFactory socketFactory, HostSpec hostSpec,
SslMode sslMode)
throws SQLException, IOException {
int connectTimeout = RedshiftProperty.CONNECT_TIMEOUT.getInt(info) * 1000;
RedshiftStream newStream = null;
try
{
newStream = constructNewStream(socketFactory, hostSpec, connectTimeout, logger, true, info);
// Construct and send an ssl startup packet if requested.
newStream = enableSSL(newStream, sslMode, info, connectTimeout);
List<String[]> paramList = getParametersForStartup(user, database, info, true);
sendStartupPacket(newStream, paramList);
newStream.changeStream(false, info);
// Do authentication (until AuthenticationOk).
doAuthentication(newStream, hostSpec.getHost(), user, info);
}
catch(Exception ex) {
closeStream(newStream);
throw ex;
}
return newStream;
}
public RedshiftStream constructNewStream(SocketFactory socketFactory, HostSpec hostSpec, int connectTimeout, RedshiftLogger logger, Boolean disableCompressionForSSL, Properties info) throws SQLException, IOException
{
RedshiftStream newStream = new RedshiftStream(socketFactory, hostSpec, connectTimeout, logger, disableCompressionForSSL, info);
// Set the socket timeout if the "socketTimeout" property has been set.
int socketTimeout = RedshiftProperty.SOCKET_TIMEOUT.getInt(info);
if (socketTimeout > 0) {
newStream.getSocket().setSoTimeout(socketTimeout * 1000);
}
String maxResultBuffer = RedshiftProperty.MAX_RESULT_BUFFER.get(info);
newStream.setMaxResultBuffer(maxResultBuffer);
// Enable TCP keep-alive probe if required.
boolean requireTCPKeepAlive = RedshiftProperty.TCP_KEEP_ALIVE.getBoolean(info);
newStream.getSocket().setKeepAlive(requireTCPKeepAlive);
// Try to set SO_SNDBUF and SO_RECVBUF socket options, if requested.
// If receiveBufferSize and send_buffer_size are set to a value greater
// than 0, adjust. -1 means use the system default, 0 is ignored since not
// supported.
// Set SO_RECVBUF read buffer size
int receiveBufferSize = RedshiftProperty.RECEIVE_BUFFER_SIZE.getInt(info);
if (receiveBufferSize > -1) {
// value of 0 not a valid buffer size value
if (receiveBufferSize > 0) {
newStream.getSocket().setReceiveBufferSize(receiveBufferSize);
} else {
if(RedshiftLogger.isEnable())
logger.log(LogLevel.INFO, "Ignore invalid value for receiveBufferSize: {0}", receiveBufferSize);
}
}
// Set SO_SNDBUF write buffer size
int sendBufferSize = RedshiftProperty.SEND_BUFFER_SIZE.getInt(info);
if (sendBufferSize > -1) {
if (sendBufferSize > 0) {
newStream.getSocket().setSendBufferSize(sendBufferSize);
} else {
if(RedshiftLogger.isEnable())
logger.log(LogLevel.INFO, "Ignore invalid value for sendBufferSize: {0}", sendBufferSize);
}
}
if(RedshiftLogger.isEnable()) {
logger.log(LogLevel.DEBUG, "Receive Buffer Size is {0}", newStream.getSocket().getReceiveBufferSize());
logger.log(LogLevel.DEBUG, "Send Buffer Size is {0}", newStream.getSocket().getSendBufferSize());
}
return newStream;
}
@Override
public QueryExecutor openConnectionImpl(HostSpec[] hostSpecs, String user, String database,
Properties info, RedshiftLogger logger) throws SQLException {
this.logger = logger;
SslMode sslMode = SslMode.of(info);
HostRequirement targetServerType;
String targetServerTypeStr = RedshiftProperty.TARGET_SERVER_TYPE.get(info);
try {
targetServerType = HostRequirement.getTargetServerType(targetServerTypeStr);
} catch (IllegalArgumentException ex) {
throw new RedshiftException(
GT.tr("Invalid targetServerType value: {0}", targetServerTypeStr),
RedshiftState.CONNECTION_UNABLE_TO_CONNECT);
}
SocketFactory socketFactory = SocketFactoryFactory.getSocketFactory(info);
HostChooser hostChooser =
HostChooserFactory.createHostChooser(hostSpecs, targetServerType, info);
Iterator<CandidateHost> hostIter = hostChooser.iterator();
Map<HostSpec, HostStatus> knownStates = new HashMap<HostSpec, HostStatus>();
while (hostIter.hasNext()) {
CandidateHost candidateHost = hostIter.next();
HostSpec hostSpec = candidateHost.hostSpec;
if(RedshiftLogger.isEnable())
logger.log(LogLevel.DEBUG, "Trying to establish a protocol version 3 connection to {0}", hostSpec);
// Note: per-connect-attempt status map is used here instead of GlobalHostStatusTracker
// for the case when "no good hosts" match (e.g. all the hosts are known as "connectfail")
// In that case, the system tries to connect to each host in order, thus it should not look into
// GlobalHostStatusTracker
HostStatus knownStatus = knownStates.get(hostSpec);
if (knownStatus != null && !candidateHost.targetServerType.allowConnectingTo(knownStatus)) {
if(RedshiftLogger.isEnable()) {
logger.log(LogLevel.DEBUG, "Known status of host {0} is {1}, and required status was {2}. Will try next host",
new Object[]{hostSpec, knownStatus, candidateHost.targetServerType});
}
continue;
}
//
// Establish a connection.
//
RedshiftStream newStream = null;
try {
try {
newStream = tryConnect(user, database, info, socketFactory, hostSpec, sslMode);
} catch (SQLException e) {
if (sslMode == SslMode.PREFER
&& RedshiftState.INVALID_AUTHORIZATION_SPECIFICATION.getState().equals(e.getSQLState())) {
// Try non-SSL connection to cover case like "non-ssl only db"
// Note: PREFER allows loss of encryption, so no significant harm is made
Throwable ex = null;
try {
newStream =
tryConnect(user, database, info, socketFactory, hostSpec, SslMode.DISABLE);
if(RedshiftLogger.isEnable())
logger.log(LogLevel.DEBUG, "Downgraded to non-encrypted connection for host {0}",
hostSpec);
} catch (SQLException ee) {
ex = ee;
} catch (IOException ee) {
ex = ee; // Can't use multi-catch in Java 6 :(
}
if (ex != null) {
if(RedshiftLogger.isEnable())
logger.log(LogLevel.DEBUG, ex, "sslMode==PREFER, however non-SSL connection failed as well");
// non-SSL failed as well, so re-throw original exception
//JCP! if mvn.project.property.redshift.jdbc.spec >= "JDBC4.1"
// Add non-SSL exception as suppressed
e.addSuppressed(ex);
//JCP! endif
throw e;
}
} else if (sslMode == SslMode.ALLOW
&& RedshiftState.INVALID_AUTHORIZATION_SPECIFICATION.getState().equals(e.getSQLState())) {
// Try using SSL
Throwable ex = null;
try {
newStream =
tryConnect(user, database, info, socketFactory, hostSpec, SslMode.REQUIRE);
if(RedshiftLogger.isEnable())
logger.log(LogLevel.DEBUG, "Upgraded to encrypted connection for host {0}",
hostSpec);
} catch (SQLException ee) {
ex = ee;
} catch (IOException ee) {
ex = ee; // Can't use multi-catch in Java 6 :(
}
if (ex != null) {
if(RedshiftLogger.isEnable())
logger.log(LogLevel.DEBUG, ex, "sslMode==ALLOW, however SSL connection failed as well");
// non-SSL failed as well, so re-throw original exception
//JCP! if mvn.project.property.redshift.jdbc.spec >= "JDBC4.1"
// Add SSL exception as suppressed
e.addSuppressed(ex);
//JCP! endif
throw e;
}
} else {
throw e;
}
}
int cancelSignalTimeout = RedshiftProperty.CANCEL_SIGNAL_TIMEOUT.getInt(info) * 1000;
// Do final startup.
QueryExecutor queryExecutor = new QueryExecutorImpl(newStream, user, database,
cancelSignalTimeout, info, logger);
// Check Primary or Secondary
HostStatus hostStatus = HostStatus.ConnectOK;
if (candidateHost.targetServerType != HostRequirement.any) {
hostStatus = isPrimary(queryExecutor) ? HostStatus.Primary : HostStatus.Secondary;
}
GlobalHostStatusTracker.reportHostStatus(hostSpec, hostStatus);
knownStates.put(hostSpec, hostStatus);
if (!candidateHost.targetServerType.allowConnectingTo(hostStatus)) {
queryExecutor.close();
continue;
}
runInitialQueries(queryExecutor, info);
// And we're done.
return queryExecutor;
} catch (ConnectException cex) {
// Added by Peter Mount <peter@retep.org.uk>
// ConnectException is thrown when the connection cannot be made.
// we trap this an return a more meaningful message for the end user
GlobalHostStatusTracker.reportHostStatus(hostSpec, HostStatus.ConnectFail);
knownStates.put(hostSpec, HostStatus.ConnectFail);
if (hostIter.hasNext()) {
if(RedshiftLogger.isEnable())
logger.log(LogLevel.DEBUG, cex, "ConnectException occurred while connecting to {0}", hostSpec);
// still more addresses to try
continue;
}
throw new RedshiftException(GT.tr(
"Connection to {0} refused. Check that the hostname and port are correct and that the postmaster is accepting TCP/IP connections.",
hostSpec), RedshiftState.CONNECTION_UNABLE_TO_CONNECT, cex);
} catch (IOException ioe) {
closeStream(newStream);
GlobalHostStatusTracker.reportHostStatus(hostSpec, HostStatus.ConnectFail);
knownStates.put(hostSpec, HostStatus.ConnectFail);
if (hostIter.hasNext()) {
if(RedshiftLogger.isEnable())
logger.log(LogLevel.DEBUG, ioe, "IOException occurred while connecting to {0}", hostSpec);
// still more addresses to try
continue;
}
throw new RedshiftException(GT.tr("The connection attempt failed."),
RedshiftState.CONNECTION_UNABLE_TO_CONNECT, ioe);
} catch (SQLException se) {
closeStream(newStream);
GlobalHostStatusTracker.reportHostStatus(hostSpec, HostStatus.ConnectFail);
knownStates.put(hostSpec, HostStatus.ConnectFail);
if (hostIter.hasNext()) {
if(RedshiftLogger.isEnable())
logger.log(LogLevel.DEBUG, se, "SQLException occurred while connecting to {0}", hostSpec);
// still more addresses to try
continue;
}
throw se;
}
}
throw new RedshiftException(GT
.tr("Could not find a server with specified targetServerType: {0}", targetServerType),
RedshiftState.CONNECTION_UNABLE_TO_CONNECT);
}
private List<String[]> getParametersForStartup(String user, String database, Properties info, boolean driverOsVersionParams) {
List<String[]> paramList = new ArrayList<String[]>();
boolean redshiftNativeAuth = false;
String idpType = "";
String tokenType = "";
String identityNamepsace = "";
String idcClientDisplayName = "";
String pluginName = RedshiftProperty.CREDENTIALS_PROVIDER.get(info);
if(RedshiftLogger.isEnable())
logger.log(LogLevel.INFO, "using plugin: " + pluginName);
if(pluginName != null)
{
if(pluginName.equalsIgnoreCase(RedshiftConstants.BASIC_JWT_PLUGIN)
|| pluginName.equalsIgnoreCase(RedshiftConstants.NATIVE_IDP_AZUREAD_BROWSER_PLUGIN))
{
idpType = IDP_TYPE_AZUREAD;
redshiftNativeAuth = true;
}
else if(pluginName.equalsIgnoreCase(RedshiftConstants.NATIVE_IDP_OKTA_BROWSER_PLUGIN)||
pluginName.equalsIgnoreCase(RedshiftConstants.NATIVE_IDP_OKTA_NON_BROWSER_PLUGIN))
{
idpType = IDP_TYPE_OKTA;
redshiftNativeAuth = true;
}
else if(pluginName.equalsIgnoreCase(RedshiftConstants.IDC_BROWSER_PLUGIN))
{
idpType = IDP_TYPE_AWS_IDC;
tokenType = TOKEN_TYPE_ACCESS_TOKEN;
identityNamepsace = RedshiftProperty.IDC_IDENTITY_NAMESPACE.get(info);
idcClientDisplayName = RedshiftProperty.IDC_CLIENT_DISPLAY_NAME.get(info);
redshiftNativeAuth = true;
} else if(pluginName.equalsIgnoreCase(RedshiftConstants.IDP_TOKEN_PLUGIN))
{
idpType = IDP_TYPE_AWS_IDC;
identityNamepsace = RedshiftProperty.IDC_IDENTITY_NAMESPACE.get(info);
tokenType = RedshiftProperty.TOKEN_TYPE.get(info);
redshiftNativeAuth = true;
}
}
if(!redshiftNativeAuth)
paramList.add(new String[]{"user", user});
else {
if(user != null && user.length() > 0)
paramList.add(new String[]{"user", user});
}
paramList.add(new String[]{"database", database});
paramList.add(new String[]{"client_encoding", "UTF8"});
paramList.add(new String[]{"DateStyle", "ISO"});
if(RedshiftProperty.CONNECTION_TIMEZONE.get(info).equalsIgnoreCase("LOCAL"))
{
// Sets session level timezone to JVM timezone.
paramList.add(new String[]{"TimeZone", createRedshiftTimeZone()});
}
paramList.add(new String[]{"_pq_.compression", info.getProperty("compression", RedshiftProperty.COMPRESSION.get(info))});
Version assumeVersion = ServerVersion.from(RedshiftProperty.ASSUME_MIN_SERVER_VERSION.get(info));
if (assumeVersion.getVersionNum() >= ServerVersion.v9_0.getVersionNum()) {
// User is explicitly telling us this is a 9.0+ server so set properties here:
paramList.add(new String[]{"extra_float_digits", "3"});
} else {
// User has not explicitly told us that this is a 9.0+ server so stick to old default:
paramList.add(new String[]{"extra_float_digits", "2"});
}
String appName = RedshiftProperty.APPLICATION_NAME.get(info);
if(appName == null)
{
StackTraceElement[] stacktrace = Thread.currentThread().getStackTrace();
appName = "[" + Thread.currentThread().getName() + "]"
+ stacktrace[stacktrace.length-1].toString();
}
if (appName != null) {
paramList.add(new String[]{"application_name", appName});
}
if(driverOsVersionParams) {
String driver_version = DriverInfo.DRIVER_FULL_NAME;
paramList.add(new String[]{"driver_version",driver_version});
String os_version = "";
try {
os_version = System.getProperty("os.name") + " " + System.getProperty("os.version") + " " + System.getProperty("os.arch");
}
catch (Exception e) {
os_version = "Unknown";
}
paramList.add(new String[]{"os_version",os_version});
if (pluginName != null && pluginName.length() != 0) {
paramList.add(new String[]{"plugin_name",pluginName});
}
// Send protocol version as 2, so server can support Binary protocol (v2), send optimized extended RSMD (v1).
String clientProtocolVersion = info.getProperty("client_protocol_version", Integer.toString(DEFAULT_SERVER_PROTOCOL_VERSION)); // Undocumented property to lower the protocol version.
paramList.add(new String[]{"client_protocol_version",clientProtocolVersion});
} // New parameters
// Redshift Native Auth values
if(redshiftNativeAuth) {
if(!StringUtils.isNullOrEmpty(idpType)) {
paramList.add(new String[]{"idp_type",idpType});
}
if(RedshiftLogger.isEnable())
logger.logDebug("Using idp_type=" + idpType);
String providerName = RedshiftProperty.PROVIDER_NAME.get(info);
if (!StringUtils.isNullOrEmpty(providerName)) {
paramList.add(new String[]{"provider_name",providerName});
}
if(RedshiftLogger.isEnable())
logger.logDebug("Using provider_name=" + providerName);
if(!StringUtils.isNullOrEmpty(tokenType)) {
paramList.add(new String[]{"token_type",tokenType});
}
if(RedshiftLogger.isEnable())
logger.logDebug("Using token_type=" + tokenType);
if(!StringUtils.isNullOrEmpty(identityNamepsace)) {
paramList.add(new String[]{"identity_namespace",identityNamepsace});
}
if(RedshiftLogger.isEnable())
logger.logDebug("Using identity_namespace=" + identityNamepsace);
if(!StringUtils.isNullOrEmpty(idcClientDisplayName)) {
paramList.add(new String[]{"idc_client_display_name", idcClientDisplayName});
}
if(RedshiftLogger.isEnable())
logger.logDebug("Using idc_client_display_name=" + idcClientDisplayName);
}
String replication = RedshiftProperty.REPLICATION.get(info);
if (replication != null && assumeVersion.getVersionNum() >= ServerVersion.v9_4.getVersionNum()) {
paramList.add(new String[]{"replication", replication});
}
String currentSchema = RedshiftProperty.CURRENT_SCHEMA.get(info);
if (currentSchema != null) {
paramList.add(new String[]{"search_path", currentSchema});
}
String options = RedshiftProperty.OPTIONS.get(info);
if (options != null) {
paramList.add(new String[]{"options", options});
}
return paramList;
}
/**
* Convert Java time zone to Redshift time zone. All others stay the same except that GMT+nn
* changes to GMT-nn and vise versa.
*
* @return The current JVM time zone in Redshift format.
*/
private static String createRedshiftTimeZone() {
String tz = TimeZone.getDefault().getID();
if (tz.length() <= 3 || !tz.startsWith("GMT")) {
return tz;
}
char sign = tz.charAt(3);
String start;
switch (sign) {
case '+':
start = "GMT-";
break;
case '-':
start = "GMT+";
break;
default:
// unknown type
return tz;
}
return start + tz.substring(4);
}
private RedshiftStream enableSSL(RedshiftStream pgStream, SslMode sslMode, Properties info,
int connectTimeout)
throws IOException, RedshiftException {
if (sslMode == SslMode.DISABLE) {
return pgStream;
}
if (sslMode == SslMode.ALLOW) {
// Allow ==> start with plaintext, use encryption if required by server
return pgStream;
}
if(RedshiftLogger.isEnable())
logger.log(LogLevel.DEBUG, " FE=> SSLRequest");
// Send SSL request packet
pgStream.sendInteger4(8);
pgStream.sendInteger2(1234);
pgStream.sendInteger2(5679);
pgStream.flush();
// Now get the response from the backend, one of N, E, S.
int beresp = pgStream.receiveChar();
switch (beresp) {
case 'E':
if(RedshiftLogger.isEnable())
logger.log(LogLevel.DEBUG, " <=BE SSLError");
// An error occurred, so pass the error message to the
// user.
//
int elen = pgStream.receiveInteger4();
ServerErrorMessage errorMsg =
new ServerErrorMessage(pgStream.receiveErrorString(elen - 4));
if(RedshiftLogger.isEnable())
logger.log(LogLevel.DEBUG, " <=BE ErrorMessage({0})", errorMsg);
// Server doesn't even know about the SSL handshake protocol
if (sslMode.requireEncryption()) {
throw new RedshiftException(errorMsg, RedshiftProperty.LOG_SERVER_ERROR_DETAIL.getBoolean(info));
// throw new RedshiftException(GT.tr("The server does not support SSL."),
// RedshiftState.CONNECTION_REJECTED);
}
// We have to reconnect to continue.
pgStream.close();
return new RedshiftStream(pgStream.getSocketFactory(), pgStream.getHostSpec(), connectTimeout, logger, true, info);
case 'N':
if(RedshiftLogger.isEnable())
logger.log(LogLevel.DEBUG, " <=BE SSLRefused");
// Server does not support ssl
if (sslMode.requireEncryption()) {
throw new RedshiftException(GT.tr("The server does not support SSL."),
RedshiftState.CONNECTION_REJECTED);
}
return pgStream;
case 'S':
if(RedshiftLogger.isEnable())
logger.log(LogLevel.DEBUG, " <=BE SSLOk");
// Server supports ssl
com.amazon.redshift.ssl.MakeSSL.convert(pgStream, info);
return pgStream;
default:
throw new RedshiftException(GT.tr("An error occurred while setting up the SSL connection."),
RedshiftState.PROTOCOL_VIOLATION);
}
}
private void sendStartupPacket(RedshiftStream pgStream, List<String[]> params)
throws IOException {
if(RedshiftLogger.isEnable()) {
StringBuilder details = new StringBuilder();
for (int i = 0; i < params.size(); ++i) {
if (i != 0) {
details.append(", ");
}
details.append(params.get(i)[0]);
details.append("=");
details.append(params.get(i)[1]);
}
logger.log(LogLevel.DEBUG, " FE=> StartupPacket({0})", details);
}
// Precalculate message length and encode params.
int length = 4 + 4;
byte[][] encodedParams = new byte[params.size() * 2][];
for (int i = 0; i < params.size(); ++i) {
encodedParams[i * 2] = params.get(i)[0].getBytes("UTF-8");
encodedParams[i * 2 + 1] = params.get(i)[1].getBytes("UTF-8");
length += encodedParams[i * 2].length + 1 + encodedParams[i * 2 + 1].length + 1;
}
length += 1; // Terminating \0
// Send the startup message.
pgStream.sendInteger4(length);
pgStream.sendInteger2(3); // protocol major
pgStream.sendInteger2(0); // protocol minor
for (byte[] encodedParam : encodedParams) {
pgStream.send(encodedParam);
pgStream.sendChar(0);
}
pgStream.sendChar(0);
pgStream.flush();
}
private void doAuthentication(RedshiftStream pgStream, String host, String user, Properties info) throws IOException, SQLException {
// Now get the response from the backend, either an error message
// or an authentication request
String password = RedshiftProperty.PASSWORD.get(info);
/* SSPI negotiation state, if used */
ISSPIClient sspiClient = null;
//JCP! if mvn.project.property.redshift.jdbc.spec >= "JDBC4.1"
/* SCRAM authentication state, if used */
//com.amazon.redshift.jre7.sasl.ScramAuthenticator scramAuthenticator =
// null;
//JCP! endif
try {
authloop: while (true) {
int beresp = pgStream.receiveChar();
switch (beresp) {
case 'E':
// An error occurred, so pass the error message to the
// user.
//
// The most common one to be thrown here is:
// "User authentication failed"
//
int elen = pgStream.receiveInteger4();
ServerErrorMessage errorMsg =
new ServerErrorMessage(pgStream.receiveErrorString(elen - 4));
if(RedshiftLogger.isEnable())
logger.log(LogLevel.DEBUG, " <=BE ErrorMessage({0})", errorMsg);
throw new RedshiftException(errorMsg, RedshiftProperty.LOG_SERVER_ERROR_DETAIL.getBoolean(info));
case 'R':
// Authentication request.
// Get the message length
int msgLen = pgStream.receiveInteger4();
// Get the type of request
int areq = pgStream.receiveInteger4();
// Process the request.
switch (areq) {
case AUTH_REQ_MD5: {
byte[] md5Salt = pgStream.receive(4);
if(RedshiftLogger.isEnable()) {
logger.log(LogLevel.DEBUG, " <=BE AuthenticationReqMD5");
}
if (password == null) {
throw new RedshiftException(
GT.tr(
"The server requested password-based authentication, but no password was provided."),
RedshiftState.CONNECTION_REJECTED);
}
byte[] digest =
MD5Digest.encode(user.getBytes("UTF-8"), password.getBytes("UTF-8"), md5Salt);
if(RedshiftLogger.isEnable()) {
logger.log(LogLevel.DEBUG, " FE=> Password(md5digest)");
}
pgStream.sendChar('p');
pgStream.sendInteger4(4 + digest.length + 1);
pgStream.send(digest);
pgStream.sendChar(0);
pgStream.flush();
break;
}
case AUTH_REQ_DIGEST: {
// Extensible user password hashing algorithm constant value
int algo = pgStream.receiveInteger4();
String[] algoNames = { "SHA-256" };
int saltLen = pgStream.receiveInteger4();
byte[] salt = pgStream.receive(saltLen);
int serverNonceLen = pgStream.receiveInteger4();
byte[] serverNonce = pgStream.receive(serverNonceLen);
String dateTimeString = Long.toString(new Date().getTime());
byte[] clientNonce = dateTimeString.getBytes();
if(RedshiftLogger.isEnable()) {
logger.log(LogLevel.DEBUG, " <=BE AuthenticationReqDigest: Algo:" + algo);
}
if (password == null) {
throw new RedshiftException(
GT.tr(
"The server requested password-based authentication, but no password was provided."),
RedshiftState.CONNECTION_REJECTED);
}
if (algo > algoNames.length) {
throw new RedshiftException(
GT.tr(
"The server requested password-based authentication, but requested algorithm " + algo + " is not supported."),
RedshiftState.CONNECTION_REJECTED);
}
byte[] digest =
ExtensibleDigest.encode(clientNonce,
password.getBytes("UTF-8"),
salt,
algoNames[algo],
serverNonce);
if(RedshiftLogger.isEnable()) {
logger.log(LogLevel.DEBUG, " FE=> Password(extensible digest)");
}
pgStream.sendChar('d');
pgStream.sendInteger4(4 + 4 + digest.length + 4 + clientNonce.length);
pgStream.sendInteger4(digest.length);
pgStream.send(digest);
pgStream.sendInteger4(clientNonce.length);
pgStream.send(clientNonce);
pgStream.flush();
break;
}
case AUTH_REQ_IDP: {
String idpToken = RedshiftProperty.WEB_IDENTITY_TOKEN.get(info);
if(RedshiftLogger.isEnable()) {
logger.log(LogLevel.DEBUG, " <=BE AuthenticationReqIDP");
}
if (idpToken == null || idpToken.length() == 0) {
throw new RedshiftException(
GT.tr(
"The server requested IDP token-based authentication, but no token was provided."),
RedshiftState.CONNECTION_REJECTED);
}
if(RedshiftLogger.isEnable()) {
logger.log(LogLevel.DEBUG, " FE=> IDP(IDP Token)");
}
byte[] token = idpToken.getBytes("UTF-8");
pgStream.sendChar('i');
pgStream.sendInteger4(4 + token.length + 1);
pgStream.send(token);
pgStream.sendChar(0);
pgStream.flush();
break;
}
case AUTH_REQ_PASSWORD: {
if(RedshiftLogger.isEnable()) {
logger.log(LogLevel.DEBUG, "<=BE AuthenticationReqPassword");
logger.log(LogLevel.DEBUG, " FE=> Password(password=<not shown>)");
}
if (password == null) {
throw new RedshiftException(
GT.tr(
"The server requested password-based authentication, but no password was provided."),
RedshiftState.CONNECTION_REJECTED);
}
byte[] encodedPassword = password.getBytes("UTF-8");
pgStream.sendChar('p');
pgStream.sendInteger4(4 + encodedPassword.length + 1);
pgStream.send(encodedPassword);
pgStream.sendChar(0);
pgStream.flush();
break;
}
case AUTH_REQ_GSS:
case AUTH_REQ_SSPI:
/*
* Use GSSAPI if requested on all platforms, via JSSE.
*
* For SSPI auth requests, if we're on Windows attempt native SSPI authentication if
* available, and if not disabled by setting a kerberosServerName. On other
* platforms, attempt JSSE GSSAPI negotiation with the SSPI server.
*
* Note that this is slightly different to libpq, which uses SSPI for GSSAPI where
* supported. We prefer to use the existing Java JSSE Kerberos support rather than
* going to native (via JNA) calls where possible, so that JSSE system properties
* etc continue to work normally.
*
* Note that while SSPI is often Kerberos-based there's no guarantee it will be; it
* may be NTLM or anything else. If the client responds to an SSPI request via
* GSSAPI and the other end isn't using Kerberos for SSPI then authentication will
* fail.
*/
final String gsslib = RedshiftProperty.GSS_LIB.get(info);
final boolean usespnego = RedshiftProperty.USE_SPNEGO.getBoolean(info);
boolean useSSPI = false;
/*
* Use SSPI if we're in auto mode on windows and have a request for SSPI auth, or if
* it's forced. Otherwise use gssapi. If the user has specified a Kerberos server
* name we'll always use JSSE GSSAPI.
*/
if (gsslib.equals("gssapi")) {
if(RedshiftLogger.isEnable())
logger.log(LogLevel.DEBUG, "Using JSSE GSSAPI, param gsslib=gssapi");
} else if (areq == AUTH_REQ_GSS && !gsslib.equals("sspi")) {
if(RedshiftLogger.isEnable())
logger.log(LogLevel.DEBUG,
"Using JSSE GSSAPI, gssapi requested by server and gsslib=sspi not forced");
} else {
/* Determine if SSPI is supported by the client */
sspiClient = createSSPI(pgStream, RedshiftProperty.SSPI_SERVICE_CLASS.get(info),
/* Use negotiation for SSPI, or if explicitly requested for GSS */
areq == AUTH_REQ_SSPI || (areq == AUTH_REQ_GSS && usespnego));
useSSPI = sspiClient.isSSPISupported();
if(RedshiftLogger.isEnable())
logger.log(LogLevel.DEBUG, "SSPI support detected: {0}", useSSPI);
if (!useSSPI) {
/* No need to dispose() if no SSPI used */
sspiClient = null;
if (gsslib.equals("sspi")) {
throw new RedshiftException(
"SSPI forced with gsslib=sspi, but SSPI not available; set loglevel=2 for details",
RedshiftState.CONNECTION_UNABLE_TO_CONNECT);
}
}
if(RedshiftLogger.isEnable()) {
logger.log(LogLevel.DEBUG, "Using SSPI: {0}, gsslib={1} and SSPI support detected", new Object[]{useSSPI, gsslib});
}
}
if (useSSPI) {
/* SSPI requested and detected as available */
sspiClient.startSSPI();
} else {
/* Use JGSS's GSSAPI for this request */
com.amazon.redshift.gss.MakeGSS.authenticate(pgStream, host, user, password,
RedshiftProperty.JAAS_APPLICATION_NAME.get(info),
RedshiftProperty.KERBEROS_SERVER_NAME.get(info), usespnego,
RedshiftProperty.JAAS_LOGIN.getBoolean(info),
RedshiftProperty.LOG_SERVER_ERROR_DETAIL.getBoolean(info),
logger);
}
break;
case AUTH_REQ_GSS_CONTINUE:
/*
* Only called for SSPI, as GSS is handled by an inner loop in MakeGSS.
*/
sspiClient.continueSSPI(msgLen - 8);
break;
case AUTH_REQ_SASL:
if(RedshiftLogger.isEnable())
logger.log(LogLevel.DEBUG, " <=BE AuthenticationSASL");
//JCP! if mvn.project.property.redshift.jdbc.spec >= "JDBC4.1"
// scramAuthenticator = new com.amazon.redshift.jre7.sasl.ScramAuthenticator(user, password, pgStream);
// scramAuthenticator.processServerMechanismsAndInit();
// scramAuthenticator.sendScramClientFirstMessage();
// This works as follows:
// 1. When tests is run from IDE, it is assumed SCRAM library is on the classpath
// 2. In regular build for Java < 8 this `if` is deactivated and the code always throws
if (false) {
//JCP! else
//JCP> throw new RedshiftException(GT.tr(
//JCP> "SCRAM authentication is not supported by this driver. You need JDK >= 8 and pgjdbc >= 42.2.0 (not \".jre\" versions)",
//JCP> areq), RedshiftState.CONNECTION_REJECTED);
//JCP! endif
//JCP! if mvn.project.property.redshift.jdbc.spec >= "JDBC4.1"
}
break;
//JCP! endif
//JCP! if mvn.project.property.redshift.jdbc.spec >= "JDBC4.1"
// case AUTH_REQ_SASL_CONTINUE:
// scramAuthenticator.processServerFirstMessage(msgLen - 4 - 4);
// break;
//
// case AUTH_REQ_SASL_FINAL:
// scramAuthenticator.verifyServerSignature(msgLen - 4 - 4);
// break;
//JCP! endif
case AUTH_REQ_OK:
/* Cleanup after successful authentication */
if(RedshiftLogger.isEnable())
logger.log(LogLevel.DEBUG, " <=BE AuthenticationOk");
break authloop; // We're done.
default:
if(RedshiftLogger.isEnable())
logger.log(LogLevel.DEBUG, " <=BE AuthenticationReq (unsupported type {0})", areq);
throw new RedshiftException(GT.tr(
"The authentication type {0} is not supported. Check that you have configured the pg_hba.conf file to include the client''s IP address or subnet, and that it is using an authentication scheme supported by the driver.",
areq), RedshiftState.CONNECTION_REJECTED);
}
break;
default:
throw new RedshiftException(GT.tr("Protocol error. Session setup failed."),
RedshiftState.PROTOCOL_VIOLATION);
}
}
} finally {
/* Cleanup after successful or failed authentication attempts */
if (sspiClient != null) {
try {
sspiClient.dispose();
} catch (RuntimeException ex) {
if(RedshiftLogger.isEnable())
logger.log(LogLevel.DEBUG, ex, "Unexpected error during SSPI context disposal");
}
}
}
}
private void runInitialQueries(QueryExecutor queryExecutor, Properties info)
throws SQLException {
String assumeMinServerVersion = RedshiftProperty.ASSUME_MIN_SERVER_VERSION.get(info);
if (Utils.parseServerVersionStr(assumeMinServerVersion) >= ServerVersion.v9_0.getVersionNum()) {
// We already sent the parameter values in the StartupMessage so skip this
return;
}
final int dbVersion = queryExecutor.getServerVersionNum();
if (dbVersion >= ServerVersion.v9_0.getVersionNum()) {
SetupQueryRunner.run(queryExecutor, "SET extra_float_digits = 3", false);
}
String appName = RedshiftProperty.APPLICATION_NAME.get(info);
if (appName != null && appName.length() != 0) { // && dbVersion >= ServerVersion.v9_0.getVersionNum()
StringBuilder sql = new StringBuilder();
sql.append("SET application_name = '");
Utils.escapeLiteral(sql, appName, queryExecutor.getStandardConformingStrings());
sql.append("'");
SetupQueryRunner.run(queryExecutor, sql.toString(), false);
}
String queryGroup = RedshiftProperty.QUERY_GROUP.get(info);
if (queryGroup != null && queryGroup.length() != 0) {
StringBuilder sql = new StringBuilder();
sql.append("SET query_group TO '");
Utils.escapeLiteral(sql, queryGroup, queryExecutor.getStandardConformingStrings());
sql.append("'");
SetupQueryRunner.run(queryExecutor, sql.toString(), false);
}
}
private boolean isPrimary(QueryExecutor queryExecutor) throws SQLException, IOException {
Tuple results = SetupQueryRunner.run(queryExecutor, "show transaction_read_only", true);
String value = queryExecutor.getEncoding().decode(results.get(0));
return value.equalsIgnoreCase("off");
}
}
| 8,391 |
0 | Create_ds/amazon-redshift-jdbc-driver/src/main/java/com/amazon/redshift/core | Create_ds/amazon-redshift-jdbc-driver/src/main/java/com/amazon/redshift/core/v3/CopyOutImpl.java | /*
* Copyright (c) 2009, PostgreSQL Global Development Group
* See the LICENSE file in the project root for more information.
*/
package com.amazon.redshift.core.v3;
import com.amazon.redshift.copy.CopyOut;
import java.sql.SQLException;
/**
* <p>Anticipated flow of a COPY TO STDOUT operation:</p>
*
* <p>CopyManager.copyOut() ->QueryExecutor.startCopy() - sends given query to server
* ->processCopyResults(): - receives CopyOutResponse from Server - creates new CopyOutImpl
* ->initCopy(): - receives copy metadata from server ->CopyOutImpl.init() ->lock()
* connection for this operation - if query fails an exception is thrown - if query returns wrong
* CopyOperation, copyOut() cancels it before throwing exception <-returned: new CopyOutImpl
* holding lock on connection repeat CopyOut.readFromCopy() until null
* ->CopyOutImpl.readFromCopy() ->QueryExecutorImpl.readFromCopy() ->processCopyResults() -
* on copydata row from server ->CopyOutImpl.handleCopydata() stores reference to byte array - on
* CopyDone, CommandComplete, ReadyForQuery ->unlock() connection for use by other operations
* <-returned: byte array of data received from server or null at end.</p>
*/
public class CopyOutImpl extends CopyOperationImpl implements CopyOut {
private byte[] currentDataRow;
public byte[] readFromCopy() throws SQLException {
return readFromCopy(true);
}
@Override
public byte[] readFromCopy(boolean block) throws SQLException {
currentDataRow = null;
queryExecutor.readFromCopy(this, block);
return currentDataRow;
}
protected void handleCopydata(byte[] data) {
currentDataRow = data;
}
}
| 8,392 |
0 | Create_ds/amazon-redshift-jdbc-driver/src/main/java/com/amazon/redshift/core | Create_ds/amazon-redshift-jdbc-driver/src/main/java/com/amazon/redshift/core/v3/CopyDualImpl.java | /*
* Copyright (c) 2016, PostgreSQL Global Development Group
* See the LICENSE file in the project root for more information.
*/
package com.amazon.redshift.core.v3;
import com.amazon.redshift.copy.CopyDual;
import com.amazon.redshift.util.ByteStreamWriter;
import com.amazon.redshift.util.RedshiftException;
import java.sql.SQLException;
import java.util.LinkedList;
import java.util.Queue;
public class CopyDualImpl extends CopyOperationImpl implements CopyDual {
private Queue<byte[]> received = new LinkedList<byte[]>();
public void writeToCopy(byte[] data, int off, int siz) throws SQLException {
queryExecutor.writeToCopy(this, data, off, siz);
}
public void writeToCopy(ByteStreamWriter from) throws SQLException {
queryExecutor.writeToCopy(this, from);
}
public void flushCopy() throws SQLException {
queryExecutor.flushCopy(this);
}
public long endCopy() throws SQLException {
return queryExecutor.endCopy(this);
}
public byte[] readFromCopy() throws SQLException {
return readFromCopy(true);
}
@Override
public byte[] readFromCopy(boolean block) throws SQLException {
if (received.isEmpty()) {
queryExecutor.readFromCopy(this, block);
}
return received.poll();
}
@Override
public void handleCommandStatus(String status) throws RedshiftException {
}
protected void handleCopydata(byte[] data) {
received.add(data);
}
}
| 8,393 |
0 | Create_ds/amazon-redshift-jdbc-driver/src/main/java/com/amazon/redshift/core | Create_ds/amazon-redshift-jdbc-driver/src/main/java/com/amazon/redshift/core/v3/SimpleQuery.java | /*
* Copyright (c) 2004, PostgreSQL Global Development Group
* See the LICENSE file in the project root for more information.
*/
// Copyright (c) 2004, Open Cloud Limited.
package com.amazon.redshift.core.v3;
import com.amazon.redshift.core.Field;
import com.amazon.redshift.core.NativeQuery;
import com.amazon.redshift.core.Oid;
import com.amazon.redshift.core.ParameterList;
import com.amazon.redshift.core.Query;
import com.amazon.redshift.core.SqlCommand;
import com.amazon.redshift.core.Utils;
import com.amazon.redshift.jdbc.RedshiftResultSet;
import com.amazon.redshift.logger.LogLevel;
import com.amazon.redshift.logger.RedshiftLogger;
import java.lang.ref.PhantomReference;
import java.util.BitSet;
import java.util.Map;
/**
* V3 Query implementation for a single-statement query. This also holds the state of any associated
* server-side named statement. We use a PhantomReference managed by the QueryExecutor to handle
* statement cleanup.
*
* @author Oliver Jowett (oliver@opencloud.com)
*/
class SimpleQuery implements Query {
protected RedshiftLogger logger;
SimpleQuery(SimpleQuery src, RedshiftLogger logger) {
this(src.nativeQuery, src.transferModeRegistry, src.sanitiserDisabled, logger);
}
SimpleQuery(NativeQuery query, TypeTransferModeRegistry transferModeRegistry,
boolean sanitiserDisabled, RedshiftLogger logger) {
this.logger = logger;
this.nativeQuery = query;
this.transferModeRegistry = transferModeRegistry;
this.sanitiserDisabled = sanitiserDisabled;
}
public ParameterList createParameterList() {
if (nativeQuery.bindPositions.length == 0) {
return NO_PARAMETERS;
}
return new SimpleParameterList(getBindCount(), transferModeRegistry, nativeQuery.redshiftParamMarkers);
}
public String toString(ParameterList parameters) {
return nativeQuery.toString(parameters);
}
public String toString() {
return toString(null);
}
public void close() {
unprepare();
}
public SimpleQuery[] getSubqueries() {
return null;
}
/**
* <p>Return maximum size in bytes that each result row from this query may return. Mainly used for
* batches that return results.</p>
*
* <p>Results are cached until/unless the query is re-described.</p>
*
* @return Max size of result data in bytes according to returned fields, 0 if no results, -1 if
* result is unbounded.
* @throws IllegalStateException if the query is not described
*/
public int getMaxResultRowSize() {
if (cachedMaxResultRowSize != null) {
return cachedMaxResultRowSize;
}
if (!this.statementDescribed) {
throw new IllegalStateException(
"Cannot estimate result row size on a statement that is not described");
}
int maxResultRowSize = 0;
if (fields != null) {
for (Field f : fields) {
final int fieldLength = f.getLength();
if (fieldLength < 1 || fieldLength >= 65535) {
/*
* Field length unknown or large; we can't make any safe estimates about the result size,
* so we have to fall back to sending queries individually.
*/
maxResultRowSize = -1;
break;
}
maxResultRowSize += fieldLength;
}
}
cachedMaxResultRowSize = maxResultRowSize;
return maxResultRowSize;
}
//
// Implementation guts
//
public String getNativeSql() {
return nativeQuery.nativeSql;
}
void setStatementName(String statementName, short deallocateEpoch) {
assert statementName != null : "statement name should not be null";
this.statementName = statementName;
this.encodedStatementName = Utils.encodeUTF8(statementName);
this.deallocateEpoch = deallocateEpoch;
}
void setPrepareTypes(int[] paramTypes) {
// Remember which parameters were unspecified since the parameters will be overridden later by
// ParameterDescription message
for (int i = 0; i < paramTypes.length; i++) {
int paramType = paramTypes[i];
if (paramType == Oid.UNSPECIFIED) {
if (this.unspecifiedParams == null) {
this.unspecifiedParams = new BitSet();
}
this.unspecifiedParams.set(i);
}
}
// paramTypes is changed by "describe statement" response, so we clone the array
// However, we can reuse array if there is one
if (this.preparedTypes == null) {
this.preparedTypes = paramTypes.clone();
return;
}
System.arraycopy(paramTypes, 0, this.preparedTypes, 0, paramTypes.length);
}
int[] getPrepareTypes() {
return preparedTypes;
}
String getStatementName() {
return statementName;
}
boolean isPreparedFor(int[] paramTypes, short deallocateEpoch) {
if (statementName == null || preparedTypes == null) {
return false; // Not prepared.
}
if (this.deallocateEpoch != deallocateEpoch) {
return false;
}
assert paramTypes.length == preparedTypes.length
: String.format("paramTypes:%1$d preparedTypes:%2$d", paramTypes.length,
preparedTypes.length);
// Check for compatible types.
BitSet unspecified = this.unspecifiedParams;
for (int i = 0; i < paramTypes.length; ++i) {
int paramType = paramTypes[i];
// Either paramType should match prepared type
// Or paramType==UNSPECIFIED and the prepare type was UNSPECIFIED
// Note: preparedTypes can be updated by "statement describe"
// 1) parse(name="S_01", sql="select ?::timestamp", types={UNSPECIFIED})
// 2) statement describe: bind 1 type is TIMESTAMP
// 3) SimpleQuery.preparedTypes is updated to TIMESTAMP
// ...
// 4.1) bind(name="S_01", ..., types={TIMESTAMP}) -> OK (since preparedTypes is equal to TIMESTAMP)
// 4.2) bind(name="S_01", ..., types={UNSPECIFIED}) -> OK (since the query was initially parsed with UNSPECIFIED)
// 4.3) bind(name="S_01", ..., types={DATE}) -> KO, unprepare and parse required
int preparedType = preparedTypes[i];
if (paramType != preparedType
&& (paramType != Oid.UNSPECIFIED
|| unspecified == null
|| !unspecified.get(i))) {
if (RedshiftLogger.isEnable()) {
logger.log(LogLevel.DEBUG,
"Statement {0} does not match new parameter types. Will have to un-prepare it and parse once again."
+ " To avoid performance issues, use the same data type for the same bind position. Bind index (1-based) is {1},"
+ " preparedType was {2} (after describe {3}), current bind type is {4}",
new Object[]{statementName, i + 1,
Oid.toString(unspecified != null && unspecified.get(i) ? 0 : preparedType),
Oid.toString(preparedType), Oid.toString(paramType)});
}
return false;
}
}
return true;
}
boolean hasUnresolvedTypes() {
if (preparedTypes == null) {
return true;
}
return this.unspecifiedParams != null && !this.unspecifiedParams.isEmpty();
}
byte[] getEncodedStatementName() {
return encodedStatementName;
}
/**
* Sets the fields that this query will return.
*
* @param fields The fields that this query will return.
*/
void setFields(Field[] fields) {
this.fields = fields;
this.resultSetColumnNameIndexMap = null;
this.cachedMaxResultRowSize = null;
this.needUpdateFieldFormats = fields != null;
this.hasBinaryFields = false; // just in case
}
/**
* Returns the fields that this query will return. If the result set fields are not known returns
* null.
*
* @return the fields that this query will return.
*/
Field[] getFields() {
return fields;
}
/**
* Returns true if current query needs field formats be adjusted as per connection configuration.
* Subsequent invocations would return {@code false}. The idea is to perform adjustments only
* once, not for each
* {@link QueryExecutorImpl#sendBind(SimpleQuery, SimpleParameterList, Portal, boolean)}.
*
* @return true if current query needs field formats be adjusted as per connection configuration
*/
boolean needUpdateFieldFormats() {
if (needUpdateFieldFormats) {
needUpdateFieldFormats = false;
return true;
}
return false;
}
public void resetNeedUpdateFieldFormats() {
needUpdateFieldFormats = fields != null;
}
public boolean hasBinaryFields() {
return hasBinaryFields;
}
public void setHasBinaryFields(boolean hasBinaryFields) {
this.hasBinaryFields = hasBinaryFields;
}
// Have we sent a Describe Portal message for this query yet?
boolean isPortalDescribed() {
return portalDescribed;
}
void setPortalDescribed(boolean portalDescribed) {
this.portalDescribed = portalDescribed;
this.cachedMaxResultRowSize = null;
}
// Have we sent a Describe Statement message for this query yet?
// Note that we might not have need to, so this may always be false.
public boolean isStatementDescribed() {
return statementDescribed;
}
void setStatementDescribed(boolean statementDescribed) {
this.statementDescribed = statementDescribed;
this.cachedMaxResultRowSize = null;
}
public boolean isEmpty() {
return getNativeSql().isEmpty();
}
void setCleanupRef(PhantomReference<?> cleanupRef) {
if (this.cleanupRef != null) {
this.cleanupRef.clear();
this.cleanupRef.enqueue();
}
this.cleanupRef = cleanupRef;
}
void unprepare() {
if (cleanupRef != null) {
cleanupRef.clear();
cleanupRef.enqueue();
cleanupRef = null;
}
if (this.unspecifiedParams != null) {
this.unspecifiedParams.clear();
}
statementName = null;
encodedStatementName = null;
fields = null;
this.resultSetColumnNameIndexMap = null;
portalDescribed = false;
statementDescribed = false;
cachedMaxResultRowSize = null;
}
public int getBatchSize() {
return 1;
}
NativeQuery getNativeQuery() {
return nativeQuery;
}
public final int getBindCount() {
return nativeQuery.bindPositions.length * getBatchSize();
}
private Map<String, Integer> resultSetColumnNameIndexMap;
@Override
public Map<String, Integer> getResultSetColumnNameIndexMap() {
Map<String, Integer> columnPositions = this.resultSetColumnNameIndexMap;
if (columnPositions == null && fields != null) {
columnPositions =
RedshiftResultSet.createColumnNameIndexMap(fields, sanitiserDisabled);
if (statementName != null) {
// Cache column positions for server-prepared statements only
this.resultSetColumnNameIndexMap = columnPositions;
}
}
return columnPositions;
}
@Override
public SqlCommand getSqlCommand() {
return nativeQuery.getCommand();
}
private final NativeQuery nativeQuery;
private final TypeTransferModeRegistry transferModeRegistry;
private String statementName;
private byte[] encodedStatementName;
/**
* The stored fields from previous execution or describe of a prepared statement. Always null for
* non-prepared statements.
*/
private Field[] fields;
private boolean needUpdateFieldFormats;
private boolean hasBinaryFields;
private boolean portalDescribed;
private boolean statementDescribed;
private final boolean sanitiserDisabled;
private PhantomReference<?> cleanupRef;
private int[] preparedTypes;
private BitSet unspecifiedParams;
private short deallocateEpoch;
private Integer cachedMaxResultRowSize;
static final SimpleParameterList NO_PARAMETERS = new SimpleParameterList(0, null);
}
| 8,394 |
0 | Create_ds/amazon-redshift-jdbc-driver/src/main/java/com/amazon/redshift/core/v3 | Create_ds/amazon-redshift-jdbc-driver/src/main/java/com/amazon/redshift/core/v3/replication/V3RedshiftReplicationStream.java | /*
* Copyright (c) 2016, PostgreSQL Global Development Group
* See the LICENSE file in the project root for more information.
*/
package com.amazon.redshift.core.v3.replication;
import com.amazon.redshift.copy.CopyDual;
import com.amazon.redshift.logger.LogLevel;
import com.amazon.redshift.logger.RedshiftLogger;
import com.amazon.redshift.replication.LogSequenceNumber;
import com.amazon.redshift.replication.RedshiftReplicationStream;
import com.amazon.redshift.replication.ReplicationType;
import com.amazon.redshift.util.GT;
import com.amazon.redshift.util.RedshiftException;
import com.amazon.redshift.util.RedshiftState;
import java.net.SocketTimeoutException;
import java.nio.ByteBuffer;
import java.sql.SQLException;
import java.util.Date;
import java.util.concurrent.TimeUnit;
public class V3RedshiftReplicationStream implements RedshiftReplicationStream {
private RedshiftLogger logger;
public static final long REDSHIFT_EPOCH_2000_01_01 = 946684800000L;
private static final long NANOS_PER_MILLISECOND = 1000000L;
private final CopyDual copyDual;
private final long updateInterval;
private final ReplicationType replicationType;
private long lastStatusUpdate;
private boolean closeFlag = false;
private LogSequenceNumber lastServerLSN = LogSequenceNumber.INVALID_LSN;
/**
* Last receive LSN + payload size.
*/
private volatile LogSequenceNumber lastReceiveLSN = LogSequenceNumber.INVALID_LSN;
private volatile LogSequenceNumber lastAppliedLSN = LogSequenceNumber.INVALID_LSN;
private volatile LogSequenceNumber lastFlushedLSN = LogSequenceNumber.INVALID_LSN;
/**
* @param copyDual bidirectional copy protocol
* @param startLSN the position in the WAL that we want to initiate replication from
* usually the currentLSN returned by calling pg_current_wal_lsn()for v10
* above or pg_current_xlog_location() depending on the version of the
* server
* @param updateIntervalMs the number of millisecond between status packets sent back to the
* server. A value of zero disables the periodic status updates
* completely, although an update will still be sent when requested by the
* server, to avoid timeout disconnect.
* @param replicationType LOGICAL or PHYSICAL
* @param logger the logger to log the entry for debugging.
*/
public V3RedshiftReplicationStream(CopyDual copyDual, LogSequenceNumber startLSN, long updateIntervalMs,
ReplicationType replicationType, RedshiftLogger logger
) {
this.logger = logger;
this.copyDual = copyDual;
this.updateInterval = updateIntervalMs * NANOS_PER_MILLISECOND;
this.lastStatusUpdate = System.nanoTime() - (updateIntervalMs * NANOS_PER_MILLISECOND);
this.lastReceiveLSN = startLSN;
this.replicationType = replicationType;
}
@Override
public ByteBuffer read() throws SQLException {
checkClose();
ByteBuffer payload = null;
while (payload == null && copyDual.isActive()) {
payload = readInternal(true);
}
return payload;
}
public ByteBuffer readPending() throws SQLException {
checkClose();
return readInternal(false);
}
@Override
public LogSequenceNumber getLastReceiveLSN() {
return lastReceiveLSN;
}
@Override
public LogSequenceNumber getLastFlushedLSN() {
return lastFlushedLSN;
}
@Override
public LogSequenceNumber getLastAppliedLSN() {
return lastAppliedLSN;
}
@Override
public void setFlushedLSN(LogSequenceNumber flushed) {
this.lastFlushedLSN = flushed;
}
@Override
public void setAppliedLSN(LogSequenceNumber applied) {
this.lastAppliedLSN = applied;
}
@Override
public void forceUpdateStatus() throws SQLException {
checkClose();
updateStatusInternal(lastReceiveLSN, lastFlushedLSN, lastAppliedLSN, true);
}
@Override
public boolean isClosed() {
return closeFlag || !copyDual.isActive();
}
private ByteBuffer readInternal(boolean block) throws SQLException {
boolean updateStatusRequired = false;
while (copyDual.isActive()) {
ByteBuffer buffer = receiveNextData(block);
if (updateStatusRequired || isTimeUpdate()) {
timeUpdateStatus();
}
if (buffer == null) {
return null;
}
int code = buffer.get();
switch (code) {
case 'k': //KeepAlive message
updateStatusRequired = processKeepAliveMessage(buffer);
updateStatusRequired |= updateInterval == 0;
break;
case 'w': //XLogData
return processXLogData(buffer);
default:
throw new RedshiftException(
GT.tr("Unexpected packet type during replication: {0}", Integer.toString(code)),
RedshiftState.PROTOCOL_VIOLATION
);
}
}
return null;
}
private ByteBuffer receiveNextData(boolean block) throws SQLException {
try {
byte[] message = copyDual.readFromCopy(block);
if (message != null) {
return ByteBuffer.wrap(message);
} else {
return null;
}
} catch (RedshiftException e) { //todo maybe replace on thread sleep?
if (e.getCause() instanceof SocketTimeoutException) {
//signal for keep alive
return null;
}
throw e;
}
}
private boolean isTimeUpdate() {
/* a value of 0 disables automatic updates */
if ( updateInterval == 0 ) {
return false;
}
long diff = System.nanoTime() - lastStatusUpdate;
return diff >= updateInterval;
}
private void timeUpdateStatus() throws SQLException {
updateStatusInternal(lastReceiveLSN, lastFlushedLSN, lastAppliedLSN, false);
}
private void updateStatusInternal(
LogSequenceNumber received, LogSequenceNumber flushed, LogSequenceNumber applied,
boolean replyRequired)
throws SQLException {
byte[] reply = prepareUpdateStatus(received, flushed, applied, replyRequired);
copyDual.writeToCopy(reply, 0, reply.length);
copyDual.flushCopy();
lastStatusUpdate = System.nanoTime();
}
private byte[] prepareUpdateStatus(LogSequenceNumber received, LogSequenceNumber flushed,
LogSequenceNumber applied, boolean replyRequired) {
ByteBuffer byteBuffer = ByteBuffer.allocate(1 + 8 + 8 + 8 + 8 + 1);
long now = System.nanoTime() / NANOS_PER_MILLISECOND;
long systemClock = TimeUnit.MICROSECONDS.convert((now - REDSHIFT_EPOCH_2000_01_01),
TimeUnit.MICROSECONDS);
if (RedshiftLogger.isEnable()) {
logger.log(LogLevel.DEBUG, " FE=> StandbyStatusUpdate(received: {0}, flushed: {1}, applied: {2}, clock: {3})",
new Object[]{received.asString(), flushed.asString(), applied.asString(), new Date(now)});
}
byteBuffer.put((byte) 'r');
byteBuffer.putLong(received.asLong());
byteBuffer.putLong(flushed.asLong());
byteBuffer.putLong(applied.asLong());
byteBuffer.putLong(systemClock);
if (replyRequired) {
byteBuffer.put((byte) 1);
} else {
byteBuffer.put(received == LogSequenceNumber.INVALID_LSN ? (byte) 1 : (byte) 0);
}
lastStatusUpdate = now;
return byteBuffer.array();
}
private boolean processKeepAliveMessage(ByteBuffer buffer) {
lastServerLSN = LogSequenceNumber.valueOf(buffer.getLong());
if (lastServerLSN.asLong() > lastReceiveLSN.asLong()) {
lastReceiveLSN = lastServerLSN;
}
long lastServerClock = buffer.getLong();
boolean replyRequired = buffer.get() != 0;
if (RedshiftLogger.isEnable()) {
Date clockTime = new Date(
TimeUnit.MILLISECONDS.convert(lastServerClock, TimeUnit.MICROSECONDS)
+ REDSHIFT_EPOCH_2000_01_01);
logger.log(LogLevel.DEBUG, " <=BE Keepalive(lastServerWal: {0}, clock: {1} needReply: {2})",
new Object[]{lastServerLSN.asString(), clockTime, replyRequired});
}
return replyRequired;
}
private ByteBuffer processXLogData(ByteBuffer buffer) {
long startLsn = buffer.getLong();
lastServerLSN = LogSequenceNumber.valueOf(buffer.getLong());
long systemClock = buffer.getLong();
switch (replicationType) {
case LOGICAL:
lastReceiveLSN = LogSequenceNumber.valueOf(startLsn);
break;
case PHYSICAL:
int payloadSize = buffer.limit() - buffer.position();
lastReceiveLSN = LogSequenceNumber.valueOf(startLsn + payloadSize);
break;
}
if (RedshiftLogger.isEnable()) {
logger.log(LogLevel.DEBUG, " <=BE XLogData(currWal: {0}, lastServerWal: {1}, clock: {2})",
new Object[]{lastReceiveLSN.asString(), lastServerLSN.asString(), systemClock});
}
return buffer.slice();
}
private void checkClose() throws RedshiftException {
if (isClosed()) {
throw new RedshiftException(GT.tr("This replication stream has been closed."),
RedshiftState.CONNECTION_DOES_NOT_EXIST);
}
}
public void close() throws SQLException {
if (isClosed()) {
return;
}
if(RedshiftLogger.isEnable())
logger.log(LogLevel.DEBUG, " FE=> StopReplication");
copyDual.endCopy();
closeFlag = true;
}
}
| 8,395 |
0 | Create_ds/amazon-redshift-jdbc-driver/src/main/java/com/amazon/redshift/core/v3 | Create_ds/amazon-redshift-jdbc-driver/src/main/java/com/amazon/redshift/core/v3/replication/V3ReplicationProtocol.java | /*
* Copyright (c) 2016, PostgreSQL Global Development Group
* See the LICENSE file in the project root for more information.
*/
package com.amazon.redshift.core.v3.replication;
import com.amazon.redshift.copy.CopyDual;
import com.amazon.redshift.core.RedshiftStream;
import com.amazon.redshift.core.QueryExecutor;
import com.amazon.redshift.core.ReplicationProtocol;
import com.amazon.redshift.logger.LogLevel;
import com.amazon.redshift.logger.RedshiftLogger;
import com.amazon.redshift.replication.RedshiftReplicationStream;
import com.amazon.redshift.replication.ReplicationType;
import com.amazon.redshift.replication.fluent.CommonOptions;
import com.amazon.redshift.replication.fluent.logical.LogicalReplicationOptions;
import com.amazon.redshift.replication.fluent.physical.PhysicalReplicationOptions;
import com.amazon.redshift.util.GT;
import com.amazon.redshift.util.RedshiftException;
import com.amazon.redshift.util.RedshiftState;
import java.io.IOException;
import java.sql.SQLException;
import java.util.Properties;
public class V3ReplicationProtocol implements ReplicationProtocol {
private RedshiftLogger logger;
private final QueryExecutor queryExecutor;
private final RedshiftStream pgStream;
public V3ReplicationProtocol(QueryExecutor queryExecutor, RedshiftStream pgStream) {
this.queryExecutor = queryExecutor;
this.pgStream = pgStream;
}
public RedshiftReplicationStream startLogical(LogicalReplicationOptions options, RedshiftLogger logger)
throws SQLException {
String query = createStartLogicalQuery(options);
return initializeReplication(query, options, ReplicationType.LOGICAL, logger);
}
public RedshiftReplicationStream startPhysical(PhysicalReplicationOptions options, RedshiftLogger logger)
throws SQLException {
String query = createStartPhysicalQuery(options);
return initializeReplication(query, options, ReplicationType.PHYSICAL, logger);
}
private RedshiftReplicationStream initializeReplication(String query, CommonOptions options,
ReplicationType replicationType, RedshiftLogger logger)
throws SQLException {
this.logger = logger;
if(RedshiftLogger.isEnable())
this.logger.log(LogLevel.DEBUG, " FE=> StartReplication(query: {0})", query);
configureSocketTimeout(options);
CopyDual copyDual = (CopyDual) queryExecutor.startCopy(query, true);
return new V3RedshiftReplicationStream(
copyDual,
options.getStartLSNPosition(),
options.getStatusInterval(),
replicationType,
logger
);
}
/**
* START_REPLICATION [SLOT slot_name] [PHYSICAL] XXX/XXX.
*/
private String createStartPhysicalQuery(PhysicalReplicationOptions options) {
StringBuilder builder = new StringBuilder();
builder.append("START_REPLICATION");
if (options.getSlotName() != null) {
builder.append(" SLOT ").append(options.getSlotName());
}
builder.append(" PHYSICAL ").append(options.getStartLSNPosition().asString());
return builder.toString();
}
/**
* START_REPLICATION SLOT slot_name LOGICAL XXX/XXX [ ( option_name [option_value] [, ... ] ) ]
*/
private String createStartLogicalQuery(LogicalReplicationOptions options) {
StringBuilder builder = new StringBuilder();
builder.append("START_REPLICATION SLOT ")
.append(options.getSlotName())
.append(" LOGICAL ")
.append(options.getStartLSNPosition().asString());
Properties slotOptions = options.getSlotOptions();
if (slotOptions.isEmpty()) {
return builder.toString();
}
//todo replace on java 8
builder.append(" (");
boolean isFirst = true;
for (String name : slotOptions.stringPropertyNames()) {
if (isFirst) {
isFirst = false;
} else {
builder.append(", ");
}
builder.append('\"').append(name).append('\"').append(" ")
.append('\'').append(slotOptions.getProperty(name)).append('\'');
}
builder.append(")");
return builder.toString();
}
private void configureSocketTimeout(CommonOptions options) throws RedshiftException {
if (options.getStatusInterval() == 0) {
return;
}
try {
int previousTimeOut = pgStream.getSocket().getSoTimeout();
int minimalTimeOut;
if (previousTimeOut > 0) {
minimalTimeOut = Math.min(previousTimeOut, options.getStatusInterval());
} else {
minimalTimeOut = options.getStatusInterval();
}
pgStream.getSocket().setSoTimeout(minimalTimeOut);
// Use blocking 1ms reads for `available()` checks
pgStream.setMinStreamAvailableCheckDelay(0);
} catch (IOException ioe) {
throw new RedshiftException(GT.tr("The connection attempt failed."),
RedshiftState.CONNECTION_UNABLE_TO_CONNECT, ioe);
}
}
}
| 8,396 |
0 | Create_ds/amazon-redshift-jdbc-driver/src/main/java/com/amazon/redshift | Create_ds/amazon-redshift-jdbc-driver/src/main/java/com/amazon/redshift/jdbc42/Driver.java | package com.amazon.redshift.jdbc42;
/*
* Class retained for backwards compatibility
*/
public class Driver extends com.amazon.redshift.jdbc.Driver
{
}
| 8,397 |
0 | Create_ds/amazon-redshift-jdbc-driver/src/main/java/com/amazon/redshift | Create_ds/amazon-redshift-jdbc-driver/src/main/java/com/amazon/redshift/jdbc42/DataSource.java | package com.amazon.redshift.jdbc42;
/*
* Class retained for backwards compatibility
*/
public class DataSource extends com.amazon.redshift.jdbc.DataSource
{
}
| 8,398 |
0 | Create_ds/amazon-redshift-jdbc-driver/src/main/java/com/amazon/redshift | Create_ds/amazon-redshift-jdbc-driver/src/main/java/com/amazon/redshift/logger/LogWriterHandler.java | package com.amazon.redshift.logger;
import java.io.IOException;
import java.io.Writer;
public class LogWriterHandler implements LogHandler {
private final Writer writer;
public LogWriterHandler(Writer inWriter) throws Exception {
writer = inWriter;
}
@Override
public synchronized void write(String message) throws Exception
{
writer.write(message);
writer.flush();
}
@Override
public synchronized void close() throws Exception {
// Do nothing as Writer is not created by the JDBC driver.
}
@Override
public synchronized void flush() {
if (writer != null) {
try {
writer.flush();
} catch (IOException e) {
// Ignore
}
}
}
}
| 8,399 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.