index
int64
0
0
repo_id
stringlengths
26
205
file_path
stringlengths
51
246
content
stringlengths
8
433k
__index_level_0__
int64
0
10k
0
Create_ds/amazon-redshift-jdbc-driver/src/main/java/com/amazon/redshift/plugin
Create_ds/amazon-redshift-jdbc-driver/src/main/java/com/amazon/redshift/plugin/httpserver/Server.java
package com.amazon.redshift.plugin.httpserver; import com.amazon.redshift.logger.RedshiftLogger; import com.amazon.redshift.plugin.InternalPluginException; import org.apache.http.HttpException; import org.apache.http.HttpServerConnection; import org.apache.http.config.SocketConfig; import org.apache.http.impl.DefaultBHttpServerConnectionFactory; import org.apache.http.protocol.*; import javax.net.ServerSocketFactory; import java.io.IOException; import java.net.InetAddress; import java.net.ServerSocket; import java.net.Socket; import java.net.SocketTimeoutException; import java.time.Duration; import java.util.concurrent.CountDownLatch; /** * Ad-hoc http server. Listen for one incoming connection or stop after timeout. */ public class Server { /** * Instance of connection factory. */ private final DefaultBHttpServerConnectionFactory m_connectionFactory; /** * Instance of http service. */ private final HttpService m_httpService; /** * IP address. */ private final InetAddress m_ipAddress; /** * Port number. */ private final int m_port; /** * Port on witch socket listen. * if used random port */ private int local_port; /** * Instance of Request Handler. */ private final RequestHandler m_handler; /** * Instance of ServerSocketFactory. */ private final ServerSocketFactory m_socketFactory; /** * Instance of SocketConfig. */ private final SocketConfig m_defaultSocketConfig; /** * Instance of ListenerThread. */ private ListenerThread m_listener; private RedshiftLogger m_log; private CountDownLatch m_startSignal = null; /** * Ad-hoc http server. * * @param port to listen * @param handler functional callback. put all necessary functionality here * @param waitTime how long does server wait for interaction. * @param log Redshift logger */ public Server(int port, RequestHandler handler, Duration waitTime, RedshiftLogger log) { this.m_log = log; this.m_port = port; this.m_handler = handler; this.m_ipAddress = InetAddress.getLoopbackAddress(); this.m_socketFactory = ServerSocketFactory.getDefault(); this.m_defaultSocketConfig = SocketConfig.custom() .setBacklogSize(2) .setSoKeepAlive(false) .setSoTimeout((int) waitTime.toMillis()) .build(); this.m_httpService = new HttpService( HttpProcessorBuilder.create() .add(new ResponseDate()) .add(new ResponseContent()) .add(new ResponseConnControl()) .build(), prepareRequestMapper(handler)); m_connectionFactory = DefaultBHttpServerConnectionFactory.INSTANCE; } public int getLocalPort() { return local_port; } /** * Actual start server to work. * * @throws IOException all exceptions wrapped in {@link IOException} */ public void listen() throws IOException { ServerSocket serverSocket = null; try { serverSocket = m_socketFactory.createServerSocket( this.m_port, this.m_defaultSocketConfig.getBacklogSize(), this.m_ipAddress); serverSocket.setSoTimeout(m_defaultSocketConfig.getSoTimeout()); this.local_port = serverSocket.getLocalPort(); m_listener = new ListenerThread(serverSocket); m_startSignal = new CountDownLatch(1); m_listener.start(); // Wait for listener thread to start try { m_startSignal.await(); m_startSignal = null; } catch(InterruptedException ie) { // Ignore } } catch (Throwable ex) { if (RedshiftLogger.isEnable()) m_log.logError(ex.getMessage()); if (serverSocket != null) { serverSocket.close(); } throw ex; } } /** * Wait for http callback result. * Block execution till result or timeout exited. */ public void waitForResult() { try { m_listener.join(); } catch (InterruptedException e) { // do nothing. // resources would be closed by listener thread. if (RedshiftLogger.isEnable()) m_log.logError(e); } } /** * Stops the server. */ public void stop() { m_listener.interrupt(); } private UriHttpRequestHandlerMapper prepareRequestMapper(HttpRequestHandler handler) { UriHttpRequestHandlerMapper mapper = new UriHttpRequestHandlerMapper(); mapper.register("*", handler); return mapper; } // Copy of apache-core {@link org.apache.http.impl.bootstrap.Worker} // and {@link org.apache.http.impl.bootstrap.RequestListener} /** * Http worker thread. */ public class ListenerThread extends Thread { private final ServerSocket serverSocket; ListenerThread(ServerSocket serverSocket) { super("http-listener"); this.serverSocket = serverSocket; } @Override public void run() { HttpServerConnection conn = null; try { // Signal listener thread started m_startSignal.countDown(); final Socket socket = serverSocket.accept(); socket.setKeepAlive(m_defaultSocketConfig.isSoKeepAlive()); socket.setTcpNoDelay(m_defaultSocketConfig.isTcpNoDelay()); if (m_defaultSocketConfig.getRcvBufSize() > 0) { socket.setReceiveBufferSize(m_defaultSocketConfig.getRcvBufSize()); } if (m_defaultSocketConfig.getSndBufSize() > 0) { socket.setSendBufferSize(m_defaultSocketConfig.getSndBufSize()); } // false by default if (m_defaultSocketConfig.getSoLinger() >= 0) { socket.setSoLinger(true, m_defaultSocketConfig.getSoLinger()); } conn = m_connectionFactory.createConnection(socket); final BasicHttpContext localContext = new BasicHttpContext(); final HttpCoreContext context = HttpCoreContext.adapt(localContext); m_httpService.handleRequest(conn, context); localContext.clear(); conn.close(); conn = null; } catch (SocketTimeoutException ex) { // do nothing. There was no connection during timeout if (RedshiftLogger.isEnable()) m_log.logError(ex); } catch (HttpException | IOException e) { // Thread can`t throw any checked exceptions from run(), so it needs to be wrapped // into RuntimeException. if (RedshiftLogger.isEnable()) m_log.logError(e); throw InternalServerException.wrap(e); } finally { try { if (conn != null) { conn.shutdown(); } } catch (IOException e) { // do nothing if (RedshiftLogger.isEnable()) m_log.logError(e); } try { if (!serverSocket.isClosed()) { serverSocket.close(); } } catch (IOException e) { // do nothing if (RedshiftLogger.isEnable()) m_log.logError(e); } } } } }
8,500
0
Create_ds/amazon-redshift-jdbc-driver/src/main/java/com/amazon/redshift
Create_ds/amazon-redshift-jdbc-driver/src/main/java/com/amazon/redshift/geometric/RedshiftBox.java
/* * Copyright (c) 2003, PostgreSQL Global Development Group * See the LICENSE file in the project root for more information. */ package com.amazon.redshift.geometric; import com.amazon.redshift.util.GT; import com.amazon.redshift.util.RedshiftBinaryObject; import com.amazon.redshift.util.RedshiftObject; import com.amazon.redshift.util.RedshiftTokenizer; import com.amazon.redshift.util.RedshiftException; import com.amazon.redshift.util.RedshiftState; import java.io.Serializable; import java.sql.SQLException; /** * This represents the box datatype within com.amazon.redshift. */ public class RedshiftBox extends RedshiftObject implements RedshiftBinaryObject, Serializable, Cloneable { /** * These are the two points. */ public RedshiftPoint[] point = new RedshiftPoint[2]; /** * @param x1 first x coordinate * @param y1 first y coordinate * @param x2 second x coordinate * @param y2 second y coordinate */ public RedshiftBox(double x1, double y1, double x2, double y2) { this(); this.point[0] = new RedshiftPoint(x1, y1); this.point[1] = new RedshiftPoint(x2, y2); } /** * @param p1 first point * @param p2 second point */ public RedshiftBox(RedshiftPoint p1, RedshiftPoint p2) { this(); this.point[0] = p1; this.point[1] = p2; } /** * @param s Box definition in Redshift syntax * @throws SQLException if definition is invalid */ public RedshiftBox(String s) throws SQLException { this(); setValue(s); } /** * Required constructor. */ public RedshiftBox() { setType("box"); } /** * This method sets the value of this object. It should be overidden, but still called by * subclasses. * * @param value a string representation of the value of the object * @throws SQLException thrown if value is invalid for this type */ @Override public void setValue(String value) throws SQLException { RedshiftTokenizer t = new RedshiftTokenizer(value, ','); if (t.getSize() != 2) { throw new RedshiftException( GT.tr("Conversion to type {0} failed: {1}.", type, value), RedshiftState.DATA_TYPE_MISMATCH); } point[0] = new RedshiftPoint(t.getToken(0)); point[1] = new RedshiftPoint(t.getToken(1)); } /** * @param b Definition of this point in Redshift's binary syntax */ public void setByteValue(byte[] b, int offset) { point[0] = new RedshiftPoint(); point[0].setByteValue(b, offset); point[1] = new RedshiftPoint(); point[1].setByteValue(b, offset + point[0].lengthInBytes()); } /** * @param obj Object to compare with * @return true if the two boxes are identical */ public boolean equals(Object obj) { if (obj instanceof RedshiftBox) { RedshiftBox p = (RedshiftBox) obj; // Same points. if (p.point[0].equals(point[0]) && p.point[1].equals(point[1])) { return true; } // Points swapped. if (p.point[0].equals(point[1]) && p.point[1].equals(point[0])) { return true; } // Using the opposite two points of the box: // (x1,y1),(x2,y2) -> (x1,y2),(x2,y1) if (p.point[0].x == point[0].x && p.point[0].y == point[1].y && p.point[1].x == point[1].x && p.point[1].y == point[0].y) { return true; } // Using the opposite two points of the box, and the points are swapped // (x1,y1),(x2,y2) -> (x2,y1),(x1,y2) if (p.point[0].x == point[1].x && p.point[0].y == point[0].y && p.point[1].x == point[0].x && p.point[1].y == point[1].y) { return true; } } return false; } public int hashCode() { // This relies on the behaviour of point's hashcode being an exclusive-OR of // its X and Y components; we end up with an exclusive-OR of the two X and // two Y components, which is equal whenever equals() would return true // since xor is commutative. return point[0].hashCode() ^ point[1].hashCode(); } public Object clone() throws CloneNotSupportedException { RedshiftBox newRSbox = (RedshiftBox) super.clone(); if (newRSbox.point != null) { newRSbox.point = newRSbox.point.clone(); for (int i = 0; i < newRSbox.point.length; ++i) { if (newRSbox.point[i] != null) { newRSbox.point[i] = (RedshiftPoint) newRSbox.point[i].clone(); } } } return newRSbox; } /** * @return the RedshiftBox in the syntax expected by com.amazon.redshift */ public String getValue() { return point[0].toString() + "," + point[1].toString(); } public int lengthInBytes() { return point[0].lengthInBytes() + point[1].lengthInBytes(); } public void toBytes(byte[] bytes, int offset) { point[0].toBytes(bytes, offset); point[1].toBytes(bytes, offset + point[0].lengthInBytes()); } }
8,501
0
Create_ds/amazon-redshift-jdbc-driver/src/main/java/com/amazon/redshift
Create_ds/amazon-redshift-jdbc-driver/src/main/java/com/amazon/redshift/geometric/RedshiftCircle.java
/* * Copyright (c) 2003, PostgreSQL Global Development Group * See the LICENSE file in the project root for more information. */ package com.amazon.redshift.geometric; import com.amazon.redshift.util.GT; import com.amazon.redshift.util.RedshiftObject; import com.amazon.redshift.util.RedshiftTokenizer; import com.amazon.redshift.util.RedshiftException; import com.amazon.redshift.util.RedshiftState; import java.io.Serializable; import java.sql.SQLException; /** * This represents com.amazon.redshift's circle datatype, consisting of a point and a radius. */ public class RedshiftCircle extends RedshiftObject implements Serializable, Cloneable { /** * This is the center point. */ public RedshiftPoint center; /** * This is the radius. */ public double radius; /** * @param x coordinate of center * @param y coordinate of center * @param r radius of circle */ public RedshiftCircle(double x, double y, double r) { this(new RedshiftPoint(x, y), r); } /** * @param c RedshiftPoint describing the circle's center * @param r radius of circle */ public RedshiftCircle(RedshiftPoint c, double r) { this(); this.center = c; this.radius = r; } /** * @param s definition of the circle in Redshift's syntax. * @throws SQLException on conversion failure */ public RedshiftCircle(String s) throws SQLException { this(); setValue(s); } /** * This constructor is used by the driver. */ public RedshiftCircle() { setType("circle"); } /** * @param s definition of the circle in Redshift's syntax. * @throws SQLException on conversion failure */ @Override public void setValue(String s) throws SQLException { RedshiftTokenizer t = new RedshiftTokenizer(RedshiftTokenizer.removeAngle(s), ','); if (t.getSize() != 2) { throw new RedshiftException(GT.tr("Conversion to type {0} failed: {1}.", type, s), RedshiftState.DATA_TYPE_MISMATCH); } try { center = new RedshiftPoint(t.getToken(0)); radius = Double.parseDouble(t.getToken(1)); } catch (NumberFormatException e) { throw new RedshiftException(GT.tr("Conversion to type {0} failed: {1}.", type, s), RedshiftState.DATA_TYPE_MISMATCH, e); } } /** * @param obj Object to compare with * @return true if the two circles are identical */ public boolean equals(Object obj) { if (obj instanceof RedshiftCircle) { RedshiftCircle p = (RedshiftCircle) obj; return p.center.equals(center) && p.radius == radius; } return false; } public int hashCode() { long v = Double.doubleToLongBits(radius); return (int) (center.hashCode() ^ v ^ (v >>> 32)); } public Object clone() throws CloneNotSupportedException { RedshiftCircle newRScircle = (RedshiftCircle) super.clone(); if (newRScircle.center != null) { newRScircle.center = (RedshiftPoint) newRScircle.center.clone(); } return newRScircle; } /** * @return the RedshiftCircle in the syntax expected by com.amazon.redshift */ public String getValue() { return "<" + center + "," + radius + ">"; } }
8,502
0
Create_ds/amazon-redshift-jdbc-driver/src/main/java/com/amazon/redshift
Create_ds/amazon-redshift-jdbc-driver/src/main/java/com/amazon/redshift/geometric/RedshiftPoint.java
/* * Copyright (c) 2003, PostgreSQL Global Development Group * See the LICENSE file in the project root for more information. */ package com.amazon.redshift.geometric; import com.amazon.redshift.util.ByteConverter; import com.amazon.redshift.util.GT; import com.amazon.redshift.util.RedshiftBinaryObject; import com.amazon.redshift.util.RedshiftObject; import com.amazon.redshift.util.RedshiftTokenizer; import com.amazon.redshift.util.RedshiftException; import com.amazon.redshift.util.RedshiftState; import java.awt.Point; import java.io.Serializable; import java.sql.SQLException; /** * <p>It maps to the point datatype in com.amazon.redshift.</p> * * <p>This implements a version of java.awt.Point, except it uses double to represent the coordinates.</p> */ public class RedshiftPoint extends RedshiftObject implements RedshiftBinaryObject, Serializable, Cloneable { /** * The X coordinate of the point. */ public double x; /** * The Y coordinate of the point. */ public double y; /** * @param x coordinate * @param y coordinate */ public RedshiftPoint(double x, double y) { this(); this.x = x; this.y = y; } /** * This is called mainly from the other geometric types, when a point is embedded within their * definition. * * @param value Definition of this point in Redshift's syntax * @throws SQLException if something goes wrong */ public RedshiftPoint(String value) throws SQLException { this(); setValue(value); } /** * Required by the driver. */ public RedshiftPoint() { setType("point"); } /** * @param s Definition of this point in Redshift's syntax * @throws SQLException on conversion failure */ @Override public void setValue(String s) throws SQLException { RedshiftTokenizer t = new RedshiftTokenizer(RedshiftTokenizer.removePara(s), ','); try { x = Double.parseDouble(t.getToken(0)); y = Double.parseDouble(t.getToken(1)); } catch (NumberFormatException e) { throw new RedshiftException(GT.tr("Conversion to type {0} failed: {1}.", type, s), RedshiftState.DATA_TYPE_MISMATCH, e); } } /** * @param b Definition of this point in Redshift's binary syntax */ public void setByteValue(byte[] b, int offset) { x = ByteConverter.float8(b, offset); y = ByteConverter.float8(b, offset + 8); } /** * @param obj Object to compare with * @return true if the two points are identical */ public boolean equals(Object obj) { if (obj instanceof RedshiftPoint) { RedshiftPoint p = (RedshiftPoint) obj; return x == p.x && y == p.y; } return false; } public int hashCode() { long v1 = Double.doubleToLongBits(x); long v2 = Double.doubleToLongBits(y); return (int) (v1 ^ v2 ^ (v1 >>> 32) ^ (v2 >>> 32)); } /** * @return the RedshiftPoint in the syntax expected by com.amazon.redshift */ public String getValue() { return "(" + x + "," + y + ")"; } public int lengthInBytes() { return 16; } /** * Populate the byte array with RedshiftPoint in the binary syntax expected by com.amazon.redshift. */ public void toBytes(byte[] b, int offset) { ByteConverter.float8(b, offset, x); ByteConverter.float8(b, offset + 8, y); } /** * Translate the point by the supplied amount. * * @param x integer amount to add on the x axis * @param y integer amount to add on the y axis */ public void translate(int x, int y) { translate((double) x, (double) y); } /** * Translate the point by the supplied amount. * * @param x double amount to add on the x axis * @param y double amount to add on the y axis */ public void translate(double x, double y) { this.x += x; this.y += y; } /** * Moves the point to the supplied coordinates. * * @param x integer coordinate * @param y integer coordinate */ public void move(int x, int y) { setLocation(x, y); } /** * Moves the point to the supplied coordinates. * * @param x double coordinate * @param y double coordinate */ public void move(double x, double y) { this.x = x; this.y = y; } /** * Moves the point to the supplied coordinates. refer to java.awt.Point for description of this. * * @param x integer coordinate * @param y integer coordinate * @see java.awt.Point */ public void setLocation(int x, int y) { move((double) x, (double) y); } /** * Moves the point to the supplied java.awt.Point refer to java.awt.Point for description of this. * * @param p Point to move to * @see java.awt.Point */ public void setLocation(Point p) { setLocation(p.x, p.y); } @Override public Object clone() throws CloneNotSupportedException { // squid:S2157 "Cloneables" should implement "clone return super.clone(); } }
8,503
0
Create_ds/amazon-redshift-jdbc-driver/src/main/java/com/amazon/redshift
Create_ds/amazon-redshift-jdbc-driver/src/main/java/com/amazon/redshift/geometric/RedshiftPath.java
/* * Copyright (c) 2003, PostgreSQL Global Development Group * See the LICENSE file in the project root for more information. */ package com.amazon.redshift.geometric; import com.amazon.redshift.util.GT; import com.amazon.redshift.util.RedshiftObject; import com.amazon.redshift.util.RedshiftTokenizer; import com.amazon.redshift.util.RedshiftException; import com.amazon.redshift.util.RedshiftState; import java.io.Serializable; import java.sql.SQLException; /** * This implements a path (a multiple segmented line, which may be closed). */ public class RedshiftPath extends RedshiftObject implements Serializable, Cloneable { /** * True if the path is open, false if closed. */ public boolean open; /** * The points defining this path. */ public RedshiftPoint[] points; /** * @param points the RedshiftPoints that define the path * @param open True if the path is open, false if closed */ public RedshiftPath(RedshiftPoint[] points, boolean open) { this(); this.points = points; this.open = open; } /** * Required by the driver. */ public RedshiftPath() { setType("path"); } /** * @param s definition of the path in Redshift's syntax. * @throws SQLException on conversion failure */ public RedshiftPath(String s) throws SQLException { this(); setValue(s); } /** * @param s Definition of the path in Redshift's syntax * @throws SQLException on conversion failure */ public void setValue(String s) throws SQLException { // First test to see if were open if (s.startsWith("[") && s.endsWith("]")) { open = true; s = RedshiftTokenizer.removeBox(s); } else if (s.startsWith("(") && s.endsWith(")")) { open = false; s = RedshiftTokenizer.removePara(s); } else { throw new RedshiftException(GT.tr("Cannot tell if path is open or closed: {0}.", s), RedshiftState.DATA_TYPE_MISMATCH); } RedshiftTokenizer t = new RedshiftTokenizer(s, ','); int npoints = t.getSize(); points = new RedshiftPoint[npoints]; for (int p = 0; p < npoints; p++) { points[p] = new RedshiftPoint(t.getToken(p)); } } /** * @param obj Object to compare with * @return true if the two paths are identical */ public boolean equals(Object obj) { if (obj instanceof RedshiftPath) { RedshiftPath p = (RedshiftPath) obj; if (p.points.length != points.length) { return false; } if (p.open != open) { return false; } for (int i = 0; i < points.length; i++) { if (!points[i].equals(p.points[i])) { return false; } } return true; } return false; } public int hashCode() { // XXX not very good.. int hash = 0; for (int i = 0; i < points.length && i < 5; ++i) { hash = hash ^ points[i].hashCode(); } return hash; } public Object clone() throws CloneNotSupportedException { RedshiftPath newRSpath = (RedshiftPath) super.clone(); if (newRSpath.points != null) { newRSpath.points = (RedshiftPoint[]) newRSpath.points.clone(); for (int i = 0; i < newRSpath.points.length; ++i) { newRSpath.points[i] = (RedshiftPoint) newRSpath.points[i].clone(); } } return newRSpath; } /** * This returns the path in the syntax expected by com.amazon.redshift. */ public String getValue() { StringBuilder b = new StringBuilder(open ? "[" : "("); for (int p = 0; p < points.length; p++) { if (p > 0) { b.append(","); } b.append(points[p].toString()); } b.append(open ? "]" : ")"); return b.toString(); } public boolean isOpen() { return open; } public boolean isClosed() { return !open; } public void closePath() { open = false; } public void openPath() { open = true; } }
8,504
0
Create_ds/amazon-redshift-jdbc-driver/src/main/java/com/amazon/redshift
Create_ds/amazon-redshift-jdbc-driver/src/main/java/com/amazon/redshift/geometric/RedshiftLine.java
/* * Copyright (c) 2003, PostgreSQL Global Development Group * See the LICENSE file in the project root for more information. */ package com.amazon.redshift.geometric; import com.amazon.redshift.util.GT; import com.amazon.redshift.util.RedshiftObject; import com.amazon.redshift.util.RedshiftTokenizer; import com.amazon.redshift.util.RedshiftException; import com.amazon.redshift.util.RedshiftState; import java.io.Serializable; import java.sql.SQLException; /** * This implements a line represented by the linear equation Ax + By + C = 0. **/ public class RedshiftLine extends RedshiftObject implements Serializable, Cloneable { /** * Coefficient of x. */ public double a; /** * Coefficient of y. */ public double b; /** * Constant. */ public double c; /** * @param a coefficient of x * @param b coefficient of y * @param c constant */ public RedshiftLine(double a, double b, double c) { this(); this.a = a; this.b = b; this.c = c; } /** * @param x1 coordinate for first point on the line * @param y1 coordinate for first point on the line * @param x2 coordinate for second point on the line * @param y2 coordinate for second point on the line */ public RedshiftLine(double x1, double y1, double x2, double y2) { this(); if (x1 == x2) { a = -1; b = 0; } else { a = (y2 - y1) / (x2 - x1); b = -1; } c = y1 - a * x1; } /** * @param p1 first point on the line * @param p2 second point on the line */ public RedshiftLine(RedshiftPoint p1, RedshiftPoint p2) { this(p1.x, p1.y, p2.x, p2.y); } /** * @param lseg Line segment which calls on this line. */ public RedshiftLine(RedshiftLseg lseg) { this(lseg.point[0], lseg.point[1]); } /** * @param s definition of the line in Redshift's syntax. * @throws SQLException on conversion failure */ public RedshiftLine(String s) throws SQLException { this(); setValue(s); } /** * required by the driver. */ public RedshiftLine() { setType("line"); } /** * @param s Definition of the line in Redshift's syntax * @throws SQLException on conversion failure */ @Override public void setValue(String s) throws SQLException { if (s.trim().startsWith("{")) { RedshiftTokenizer t = new RedshiftTokenizer(RedshiftTokenizer.removeCurlyBrace(s), ','); if (t.getSize() != 3) { throw new RedshiftException(GT.tr("Conversion to type {0} failed: {1}.", type, s), RedshiftState.DATA_TYPE_MISMATCH); } a = Double.parseDouble(t.getToken(0)); b = Double.parseDouble(t.getToken(1)); c = Double.parseDouble(t.getToken(2)); } else if (s.trim().startsWith("[")) { RedshiftTokenizer t = new RedshiftTokenizer(RedshiftTokenizer.removeBox(s), ','); if (t.getSize() != 2) { throw new RedshiftException(GT.tr("Conversion to type {0} failed: {1}.", type, s), RedshiftState.DATA_TYPE_MISMATCH); } RedshiftPoint point1 = new RedshiftPoint(t.getToken(0)); RedshiftPoint point2 = new RedshiftPoint(t.getToken(1)); a = point2.x - point1.x; b = point2.y - point1.y; c = point1.y; } } /** * @param obj Object to compare with * @return true if the two lines are identical */ public boolean equals(Object obj) { if (this == obj) { return true; } if (obj == null || getClass() != obj.getClass()) { return false; } if (!super.equals(obj)) { return false; } RedshiftLine pGline = (RedshiftLine) obj; return Double.compare(pGline.a, a) == 0 && Double.compare(pGline.b, b) == 0 && Double.compare(pGline.c, c) == 0; } public int hashCode() { int result = super.hashCode(); long temp; temp = Double.doubleToLongBits(a); result = 31 * result + (int) (temp ^ (temp >>> 32)); temp = Double.doubleToLongBits(b); result = 31 * result + (int) (temp ^ (temp >>> 32)); temp = Double.doubleToLongBits(c); result = 31 * result + (int) (temp ^ (temp >>> 32)); return result; } /** * @return the RedshiftLine in the syntax expected by com.amazon.redshift */ public String getValue() { return "{" + a + "," + b + "," + c + "}"; } @Override public Object clone() throws CloneNotSupportedException { // squid:S2157 "Cloneables" should implement "clone return super.clone(); } }
8,505
0
Create_ds/amazon-redshift-jdbc-driver/src/main/java/com/amazon/redshift
Create_ds/amazon-redshift-jdbc-driver/src/main/java/com/amazon/redshift/geometric/RedshiftPolygon.java
/* * Copyright (c) 2003, PostgreSQL Global Development Group * See the LICENSE file in the project root for more information. */ package com.amazon.redshift.geometric; import com.amazon.redshift.util.RedshiftObject; import com.amazon.redshift.util.RedshiftTokenizer; import java.io.Serializable; import java.sql.SQLException; /** * This implements the polygon datatype within Redshift. */ public class RedshiftPolygon extends RedshiftObject implements Serializable, Cloneable { /** * The points defining the polygon. */ public RedshiftPoint[] points; /** * Creates a polygon using an array of RedshiftPoints. * * @param points the points defining the polygon */ public RedshiftPolygon(RedshiftPoint[] points) { this(); this.points = points; } /** * @param s definition of the polygon in Redshift's syntax. * @throws SQLException on conversion failure */ public RedshiftPolygon(String s) throws SQLException { this(); setValue(s); } /** * Required by the driver. */ public RedshiftPolygon() { setType("polygon"); } /** * @param s Definition of the polygon in Redshift's syntax * @throws SQLException on conversion failure */ public void setValue(String s) throws SQLException { RedshiftTokenizer t = new RedshiftTokenizer(RedshiftTokenizer.removePara(s), ','); int npoints = t.getSize(); points = new RedshiftPoint[npoints]; for (int p = 0; p < npoints; p++) { points[p] = new RedshiftPoint(t.getToken(p)); } } /** * @param obj Object to compare with * @return true if the two polygons are identical */ public boolean equals(Object obj) { if (obj instanceof RedshiftPolygon) { RedshiftPolygon p = (RedshiftPolygon) obj; if (p.points.length != points.length) { return false; } for (int i = 0; i < points.length; i++) { if (!points[i].equals(p.points[i])) { return false; } } return true; } return false; } public int hashCode() { // XXX not very good.. int hash = 0; for (int i = 0; i < points.length && i < 5; ++i) { hash = hash ^ points[i].hashCode(); } return hash; } public Object clone() throws CloneNotSupportedException { RedshiftPolygon newRSpolygon = (RedshiftPolygon) super.clone(); if (newRSpolygon.points != null) { newRSpolygon.points = (RedshiftPoint[]) newRSpolygon.points.clone(); for (int i = 0; i < newRSpolygon.points.length; ++i) { if (newRSpolygon.points[i] != null) { newRSpolygon.points[i] = (RedshiftPoint) newRSpolygon.points[i].clone(); } } } return newRSpolygon; } /** * @return the RedshiftPolygon in the syntax expected by com.amazon.redshift */ public String getValue() { StringBuilder b = new StringBuilder(); b.append("("); for (int p = 0; p < points.length; p++) { if (p > 0) { b.append(","); } b.append(points[p].toString()); } b.append(")"); return b.toString(); } }
8,506
0
Create_ds/amazon-redshift-jdbc-driver/src/main/java/com/amazon/redshift
Create_ds/amazon-redshift-jdbc-driver/src/main/java/com/amazon/redshift/geometric/RedshiftLseg.java
/* * Copyright (c) 2003, PostgreSQL Global Development Group * See the LICENSE file in the project root for more information. */ package com.amazon.redshift.geometric; import com.amazon.redshift.util.GT; import com.amazon.redshift.util.RedshiftObject; import com.amazon.redshift.util.RedshiftTokenizer; import com.amazon.redshift.util.RedshiftException; import com.amazon.redshift.util.RedshiftState; import java.io.Serializable; import java.sql.SQLException; /** * This implements a lseg (line segment) consisting of two points. */ public class RedshiftLseg extends RedshiftObject implements Serializable, Cloneable { /** * These are the two points. */ public RedshiftPoint[] point = new RedshiftPoint[2]; /** * @param x1 coordinate for first point * @param y1 coordinate for first point * @param x2 coordinate for second point * @param y2 coordinate for second point */ public RedshiftLseg(double x1, double y1, double x2, double y2) { this(new RedshiftPoint(x1, y1), new RedshiftPoint(x2, y2)); } /** * @param p1 first point * @param p2 second point */ public RedshiftLseg(RedshiftPoint p1, RedshiftPoint p2) { this(); this.point[0] = p1; this.point[1] = p2; } /** * @param s definition of the line segment in Redshift's syntax. * @throws SQLException on conversion failure */ public RedshiftLseg(String s) throws SQLException { this(); setValue(s); } /** * required by the driver. */ public RedshiftLseg() { setType("lseg"); } /** * @param s Definition of the line segment in Redshift's syntax * @throws SQLException on conversion failure */ @Override public void setValue(String s) throws SQLException { RedshiftTokenizer t = new RedshiftTokenizer(RedshiftTokenizer.removeBox(s), ','); if (t.getSize() != 2) { throw new RedshiftException(GT.tr("Conversion to type {0} failed: {1}.", type, s), RedshiftState.DATA_TYPE_MISMATCH); } point[0] = new RedshiftPoint(t.getToken(0)); point[1] = new RedshiftPoint(t.getToken(1)); } /** * @param obj Object to compare with * @return true if the two line segments are identical */ public boolean equals(Object obj) { if (obj instanceof RedshiftLseg) { RedshiftLseg p = (RedshiftLseg) obj; return (p.point[0].equals(point[0]) && p.point[1].equals(point[1])) || (p.point[0].equals(point[1]) && p.point[1].equals(point[0])); } return false; } public int hashCode() { return point[0].hashCode() ^ point[1].hashCode(); } public Object clone() throws CloneNotSupportedException { RedshiftLseg newRSlseg = (RedshiftLseg) super.clone(); if (newRSlseg.point != null) { newRSlseg.point = (RedshiftPoint[]) newRSlseg.point.clone(); for (int i = 0; i < newRSlseg.point.length; ++i) { if (newRSlseg.point[i] != null) { newRSlseg.point[i] = (RedshiftPoint) newRSlseg.point[i].clone(); } } } return newRSlseg; } /** * @return the RedshiftLseg in the syntax expected by com.amazon.redshift */ public String getValue() { return "[" + point[0] + "," + point[1] + "]"; } }
8,507
0
Create_ds/amazon-redshift-jdbc-driver/src/main/java/com/amazon/redshift
Create_ds/amazon-redshift-jdbc-driver/src/main/java/com/amazon/redshift/xa/RedshiftXAException.java
/* * Copyright (c) 2009, PostgreSQL Global Development Group * See the LICENSE file in the project root for more information. */ package com.amazon.redshift.xa; import javax.transaction.xa.XAException; /** * A convenience subclass of <code>XAException</code> which makes it easy to create an instance of * <code>XAException</code> with a human-readable message, a <code>Throwable</code> cause, and an XA * error code. * * @author Michael S. Allman */ public class RedshiftXAException extends XAException { RedshiftXAException(String message, int errorCode) { super(message); this.errorCode = errorCode; } RedshiftXAException(String message, Throwable cause, int errorCode) { super(message); initCause(cause); this.errorCode = errorCode; } RedshiftXAException(Throwable cause, int errorCode) { super(errorCode); initCause(cause); } }
8,508
0
Create_ds/amazon-redshift-jdbc-driver/src/main/java/com/amazon/redshift
Create_ds/amazon-redshift-jdbc-driver/src/main/java/com/amazon/redshift/xa/RedshiftXAConnection.java
/* * Copyright (c) 2009, PostgreSQL Global Development Group * See the LICENSE file in the project root for more information. */ package com.amazon.redshift.xa; import com.amazon.redshift.RedshiftConnection; import com.amazon.redshift.core.BaseConnection; import com.amazon.redshift.core.TransactionState; import com.amazon.redshift.ds.RedshiftPooledConnection; import com.amazon.redshift.logger.LogLevel; import com.amazon.redshift.logger.RedshiftLogger; import com.amazon.redshift.util.GT; import com.amazon.redshift.util.RedshiftException; import com.amazon.redshift.util.RedshiftState; import java.lang.reflect.InvocationHandler; import java.lang.reflect.InvocationTargetException; import java.lang.reflect.Method; import java.lang.reflect.Proxy; import java.sql.Connection; import java.sql.ResultSet; import java.sql.SQLException; import java.sql.Statement; import java.util.LinkedList; import javax.sql.XAConnection; import javax.transaction.xa.XAException; import javax.transaction.xa.XAResource; import javax.transaction.xa.Xid; /** * <p>The Redshift implementation of {@link XAResource}.</p> * * <p>This implementation doesn't support transaction interleaving (see JTA specification, section * 3.4.4) and suspend/resume.</p> * * <p>Two-phase commit requires PostgreSQL server version 8.1 or higher.</p> * * @author Heikki Linnakangas (heikki.linnakangas@iki.fi) */ public class RedshiftXAConnection extends RedshiftPooledConnection implements XAConnection, XAResource { /** * Underlying physical database connection. It's used for issuing PREPARE TRANSACTION/ COMMIT * PREPARED/ROLLBACK PREPARED commands. */ private final BaseConnection conn; private Xid currentXid; private State state; private Xid preparedXid; private boolean committedOrRolledBack; /* * When an XA transaction is started, we put the underlying connection into non-autocommit mode. * The old setting is saved in localAutoCommitMode, so that we can restore it when the XA * transaction ends and the connection returns into local transaction mode. */ private boolean localAutoCommitMode = true; private void debug(String s) { if (RedshiftLogger.isEnable()) { conn.getLogger().log(LogLevel.DEBUG, "XAResource {0}: {1}", new Object[]{Integer.toHexString(this.hashCode()), s}); } } public RedshiftXAConnection(BaseConnection conn) throws SQLException { super(conn, true, true); this.conn = conn; this.state = State.IDLE; } /** * XAConnection interface. */ @Override public Connection getConnection() throws SQLException { if (RedshiftLogger.isEnable()) this.conn.getLogger().logFunction(true); Connection conn = super.getConnection(); // When we're outside an XA transaction, autocommit // is supposed to be true, per usual JDBC convention. // When an XA transaction is in progress, it should be // false. if (state == State.IDLE) { conn.setAutoCommit(true); } /* * Wrap the connection in a proxy to forbid application from fiddling with transaction state * directly during an XA transaction */ ConnectionHandler handler = new ConnectionHandler(conn); Connection rc = (Connection) Proxy.newProxyInstance(getClass().getClassLoader(), new Class[]{Connection.class, RedshiftConnection.class}, handler); if (RedshiftLogger.isEnable()) this.conn.getLogger().logFunction(false, rc); return rc; } @Override public XAResource getXAResource() { if (RedshiftLogger.isEnable()) this.conn.getLogger().logFunction(true); return this; } /* * A java.sql.Connection proxy class to forbid calls to transaction control methods while the * connection is used for an XA transaction. */ private class ConnectionHandler implements InvocationHandler { private final Connection con; ConnectionHandler(Connection con) { this.con = con; } @Override public Object invoke(Object proxy, Method method, Object[] args) throws Throwable { if (state != State.IDLE) { String methodName = method.getName(); if (methodName.equals("commit") || methodName.equals("rollback") || methodName.equals("setSavePoint") || (methodName.equals("setAutoCommit") && (Boolean) args[0])) { throw new RedshiftException( GT.tr( "Transaction control methods setAutoCommit(true), commit, rollback and setSavePoint not allowed while an XA transaction is active."), RedshiftState.OBJECT_NOT_IN_STATE); } } try { /* * If the argument to equals-method is also a wrapper, present the original unwrapped * connection to the underlying equals method. */ if (method.getName().equals("equals") && args.length == 1) { Object arg = args[0]; if (arg != null && Proxy.isProxyClass(arg.getClass())) { InvocationHandler h = Proxy.getInvocationHandler(arg); if (h instanceof ConnectionHandler) { // unwrap argument args = new Object[]{((ConnectionHandler) h).con}; } } } return method.invoke(con, args); } catch (InvocationTargetException ex) { throw ex.getTargetException(); } } } /** * <p>Preconditions:</p> * <ol> * <li>Flags must be one of TMNOFLAGS, TMRESUME or TMJOIN</li> * <li>xid != null</li> * <li>Connection must not be associated with a transaction</li> * <li>The TM hasn't seen the xid before</li> * </ol> * * <p>Implementation deficiency preconditions:</p> * <ol> * <li>TMRESUME not supported.</li> * <li>If flags is TMJOIN, we must be in ended state, and xid must be the current transaction</li> * <li>Unless flags is TMJOIN, previous transaction using the connection must be committed or prepared or rolled * back</li> * </ol> * * <p>Postconditions:</p> * <ol> * <li>Connection is associated with the transaction</li> * </ol> */ @Override public void start(Xid xid, int flags) throws XAException { if (RedshiftLogger.isEnable()) { debug("starting transaction xid = " + xid); } // Check preconditions if (flags != XAResource.TMNOFLAGS && flags != XAResource.TMRESUME && flags != XAResource.TMJOIN) { throw new RedshiftXAException(GT.tr("Invalid flags {0}", flags), XAException.XAER_INVAL); } if (xid == null) { throw new RedshiftXAException(GT.tr("xid must not be null"), XAException.XAER_INVAL); } if (state == State.ACTIVE) { throw new RedshiftXAException(GT.tr("Connection is busy with another transaction"), XAException.XAER_PROTO); } // We can't check precondition 4 easily, so we don't. Duplicate xid will be catched in prepare // phase. // Check implementation deficiency preconditions if (flags == TMRESUME) { throw new RedshiftXAException(GT.tr("suspend/resume not implemented"), XAException.XAER_RMERR); } // It's ok to join an ended transaction. WebLogic does that. if (flags == TMJOIN) { if (state != State.ENDED) { throw new RedshiftXAException( GT.tr( "Invalid protocol state requested. Attempted transaction interleaving is not supported. xid={0}, currentXid={1}, state={2}, flags={3}", xid, currentXid, state, flags), XAException.XAER_RMERR); } if (!xid.equals(currentXid)) { throw new RedshiftXAException( GT.tr( "Invalid protocol state requested. Attempted transaction interleaving is not supported. xid={0}, currentXid={1}, state={2}, flags={3}", xid, currentXid, state, flags), XAException.XAER_RMERR); } } else if (state == State.ENDED) { throw new RedshiftXAException(GT.tr("Invalid protocol state requested. Attempted transaction interleaving is not supported. xid={0}, currentXid={1}, state={2}, flags={3}", xid, currentXid, state, flags), XAException.XAER_RMERR); } // Only need save localAutoCommitMode for NOFLAGS, TMRESUME and TMJOIN already saved old // localAutoCommitMode. if (flags == TMNOFLAGS) { try { localAutoCommitMode = conn.getAutoCommit(); conn.setAutoCommit(false); } catch (SQLException ex) { throw new RedshiftXAException(GT.tr("Error disabling autocommit"), ex, XAException.XAER_RMERR); } } // Preconditions are met, Associate connection with the transaction state = State.ACTIVE; currentXid = xid; preparedXid = null; committedOrRolledBack = false; } /** * <p>Preconditions:</p> * <ol> * <li>Flags is one of TMSUCCESS, TMFAIL, TMSUSPEND</li> * <li>xid != null</li> * <li>Connection is associated with transaction xid</li> * </ol> * * <p>Implementation deficiency preconditions:</p> * <ol> * <li>Flags is not TMSUSPEND</li> * </ol> * * <p>Postconditions:</p> * <ol> * <li>Connection is disassociated from the transaction.</li> * </ol> */ @Override public void end(Xid xid, int flags) throws XAException { if (RedshiftLogger.isEnable()) { debug("ending transaction xid = " + xid); } // Check preconditions if (flags != XAResource.TMSUSPEND && flags != XAResource.TMFAIL && flags != XAResource.TMSUCCESS) { throw new RedshiftXAException(GT.tr("Invalid flags {0}", flags), XAException.XAER_INVAL); } if (xid == null) { throw new RedshiftXAException(GT.tr("xid must not be null"), XAException.XAER_INVAL); } if (state != State.ACTIVE || !currentXid.equals(xid)) { throw new RedshiftXAException(GT.tr("tried to call end without corresponding start call. state={0}, start xid={1}, currentXid={2}, preparedXid={3}", state, xid, currentXid, preparedXid), XAException.XAER_PROTO); } // Check implementation deficiency preconditions if (flags == XAResource.TMSUSPEND) { throw new RedshiftXAException(GT.tr("suspend/resume not implemented"), XAException.XAER_RMERR); } // We ignore TMFAIL. It's just a hint to the RM. We could roll back immediately // if TMFAIL was given. // All clear. We don't have any real work to do. state = State.ENDED; } /** * <p>Prepares transaction. Preconditions:</p> * <ol> * <li>xid != null</li> * <li>xid is in ended state</li> * </ol> * * <p>Implementation deficiency preconditions:</p> * <ol> * <li>xid was associated with this connection</li> * </ol> * * <p>Postconditions:</p> * <ol> * <li>Transaction is prepared</li> * </ol> */ @Override public int prepare(Xid xid) throws XAException { if (RedshiftLogger.isEnable()) { debug("preparing transaction xid = " + xid); } // Check preconditions if (currentXid == null && preparedXid != null) { if (RedshiftLogger.isEnable()) { debug("Prepare xid " + xid + " but current connection is not attached to a transaction" + " while it was prepared in past with prepared xid " + preparedXid); } throw new RedshiftXAException(GT.tr( "Preparing already prepared transaction, the prepared xid {0}, prepare xid={1}", preparedXid, xid), XAException.XAER_PROTO); } else if (currentXid == null) { throw new RedshiftXAException(GT.tr( "Current connection does not have an associated xid. prepare xid={0}", xid), XAException.XAER_NOTA); } if (!currentXid.equals(xid)) { if (RedshiftLogger.isEnable()) { debug("Error to prepare xid " + xid + ", the current connection already bound with xid " + currentXid); } throw new RedshiftXAException(GT.tr( "Not implemented: Prepare must be issued using the same connection that started the transaction. currentXid={0}, prepare xid={1}", currentXid, xid), XAException.XAER_RMERR); } if (state != State.ENDED) { throw new RedshiftXAException(GT.tr("Prepare called before end. prepare xid={0}, state={1}", xid), XAException.XAER_INVAL); } state = State.IDLE; preparedXid = currentXid; currentXid = null; try { String s = RecoveredXid.xidToString(xid); Statement stmt = conn.createStatement(); try { stmt.executeUpdate("PREPARE TRANSACTION '" + s + "'"); } finally { stmt.close(); } conn.setAutoCommit(localAutoCommitMode); return XA_OK; } catch (SQLException ex) { throw new RedshiftXAException(GT.tr("Error preparing transaction. prepare xid={0}", xid), ex, mapSQLStateToXAErrorCode(ex)); } } /** * <p>Recovers transaction. Preconditions:</p> * <ol> * <li>flag must be one of TMSTARTRSCAN, TMENDRSCAN, TMNOFLAGS or TMSTARTTRSCAN | TMENDRSCAN</li> * <li>If flag isn't TMSTARTRSCAN or TMSTARTRSCAN | TMENDRSCAN, a recovery scan must be in progress</li> * </ol> * * <p>Postconditions:</p> * <ol> * <li>list of prepared xids is returned</li> * </ol> */ @Override public Xid[] recover(int flag) throws XAException { if (RedshiftLogger.isEnable()) this.conn.getLogger().logFunction(true, flag); // Check preconditions if (flag != TMSTARTRSCAN && flag != TMENDRSCAN && flag != TMNOFLAGS && flag != (TMSTARTRSCAN | TMENDRSCAN)) { throw new RedshiftXAException(GT.tr("Invalid flags {0}", flag), XAException.XAER_INVAL); } // We don't check for precondition 2, because we would have to add some additional state in // this object to keep track of recovery scans. // All clear. We return all the xids in the first TMSTARTRSCAN call, and always return // an empty array otherwise. if ((flag & TMSTARTRSCAN) == 0) { return new Xid[0]; } else { try { Statement stmt = conn.createStatement(); try { // If this connection is simultaneously used for a transaction, // this query gets executed inside that transaction. It's OK, // except if the transaction is in abort-only state and the // backed refuses to process new queries. Hopefully not a problem // in practise. ResultSet rs = stmt.executeQuery( "SELECT gid FROM pg_prepared_xacts where database = current_database()"); LinkedList<Xid> l = new LinkedList<Xid>(); while (rs.next()) { Xid recoveredXid = RecoveredXid.stringToXid(rs.getString(1)); if (recoveredXid != null) { l.add(recoveredXid); } } rs.close(); return l.toArray(new Xid[0]); } finally { stmt.close(); } } catch (SQLException ex) { throw new RedshiftXAException(GT.tr("Error during recover"), ex, XAException.XAER_RMERR); } } } /** * <p>Preconditions:</p> * <ol> * <li>xid is known to the RM or it's in prepared state</li> * </ol> * * <p>Implementation deficiency preconditions:</p> * <ol> * <li>xid must be associated with this connection if it's not in prepared state.</li> * </ol> * * <p>Postconditions:</p> * <ol> * <li>Transaction is rolled back and disassociated from connection</li> * </ol> */ @Override public void rollback(Xid xid) throws XAException { if (RedshiftLogger.isEnable()) { debug("rolling back xid = " + xid); } // We don't explicitly check precondition 1. try { if (currentXid != null && currentXid.equals(xid)) { state = State.IDLE; currentXid = null; conn.rollback(); conn.setAutoCommit(localAutoCommitMode); } else { String s = RecoveredXid.xidToString(xid); conn.setAutoCommit(true); Statement stmt = conn.createStatement(); try { stmt.executeUpdate("ROLLBACK PREPARED '" + s + "'"); } finally { stmt.close(); } } committedOrRolledBack = true; } catch (SQLException ex) { int errorCode = XAException.XAER_RMERR; if (RedshiftState.UNDEFINED_OBJECT.getState().equals(ex.getSQLState())) { if (committedOrRolledBack || !xid.equals(preparedXid)) { if (RedshiftLogger.isEnable()) { debug("rolling back xid " + xid + " while the connection prepared xid is " + preparedXid + (committedOrRolledBack ? ", but the connection was already committed/rolled-back" : "")); } errorCode = XAException.XAER_NOTA; } } if (RedshiftState.isConnectionError(ex.getSQLState())) { if (RedshiftLogger.isEnable()) { debug("rollback connection failure (sql error code " + ex.getSQLState() + "), reconnection could be expected"); } errorCode = XAException.XAER_RMFAIL; } throw new RedshiftXAException(GT.tr("Error rolling back prepared transaction. rollback xid={0}, preparedXid={1}, currentXid={2}", xid, preparedXid), ex, errorCode); } } @Override public void commit(Xid xid, boolean onePhase) throws XAException { if (RedshiftLogger.isEnable()) { debug("committing xid = " + xid + (onePhase ? " (one phase) " : " (two phase)")); } if (xid == null) { throw new RedshiftXAException(GT.tr("xid must not be null"), XAException.XAER_INVAL); } if (onePhase) { commitOnePhase(xid); } else { commitPrepared(xid); } } /** * <p>Preconditions:</p> * <ol> * <li>xid must in ended state.</li> * </ol> * * <p>Implementation deficiency preconditions:</p> * <ol> * <li>this connection must have been used to run the transaction</li> * </ol> * * <p>Postconditions:</p> * <ol> * <li>Transaction is committed</li> * </ol> */ private void commitOnePhase(Xid xid) throws XAException { try { // Check preconditions if (xid.equals(preparedXid)) { // TODO: check if the condition should be negated throw new RedshiftXAException(GT.tr("One-phase commit called for xid {0} but connection was prepared with xid {1}", xid, preparedXid), XAException.XAER_PROTO); } if (currentXid == null && !committedOrRolledBack) { // In fact, we don't know if xid is bogus, or if it just wasn't associated with this connection. // Assume it's our fault. // TODO: pick proper error message. Current one does not clarify what went wrong throw new RedshiftXAException(GT.tr( "Not implemented: one-phase commit must be issued using the same connection that was used to start it", xid), XAException.XAER_RMERR); } if (!xid.equals(currentXid) || committedOrRolledBack) { throw new RedshiftXAException(GT.tr("One-phase commit with unknown xid. commit xid={0}, currentXid={1}", xid, currentXid), XAException.XAER_NOTA); } if (state != State.ENDED) { throw new RedshiftXAException(GT.tr("commit called before end. commit xid={0}, state={1}", xid, state), XAException.XAER_PROTO); } // Preconditions are met. Commit state = State.IDLE; currentXid = null; committedOrRolledBack = true; conn.commit(); conn.setAutoCommit(localAutoCommitMode); } catch (SQLException ex) { throw new RedshiftXAException(GT.tr("Error during one-phase commit. commit xid={0}", xid), ex, mapSQLStateToXAErrorCode(ex)); } } /** * <p>Commits prepared transaction. Preconditions:</p> * <ol> * <li>xid must be in prepared state in the server</li> * </ol> * * <p>Implementation deficiency preconditions:</p> * <ol> * <li>Connection must be in idle state</li> * </ol> * * <p>Postconditions:</p> * <ol> * <li>Transaction is committed</li> * </ol> */ private void commitPrepared(Xid xid) throws XAException { try { // Check preconditions. The connection mustn't be used for another // other XA or local transaction, or the COMMIT PREPARED command // would mess it up. if (state != State.IDLE || conn.getTransactionState() != TransactionState.IDLE) { throw new RedshiftXAException( GT.tr("Not implemented: 2nd phase commit must be issued using an idle connection. commit xid={0}, currentXid={1}, state={2}, transactionState={3}", xid, currentXid, state, conn.getTransactionState()), XAException.XAER_RMERR); } String s = RecoveredXid.xidToString(xid); localAutoCommitMode = conn.getAutoCommit(); conn.setAutoCommit(true); Statement stmt = conn.createStatement(); try { stmt.executeUpdate("COMMIT PREPARED '" + s + "'"); } finally { stmt.close(); conn.setAutoCommit(localAutoCommitMode); } committedOrRolledBack = true; } catch (SQLException ex) { int errorCode = XAException.XAER_RMERR; if (RedshiftState.UNDEFINED_OBJECT.getState().equals(ex.getSQLState())) { if (committedOrRolledBack || !xid.equals(preparedXid)) { if (RedshiftLogger.isEnable()) { debug("committing xid " + xid + " while the connection prepared xid is " + preparedXid + (committedOrRolledBack ? ", but the connection was already committed/rolled-back" : "")); } errorCode = XAException.XAER_NOTA; } } if (RedshiftState.isConnectionError(ex.getSQLState())) { if (RedshiftLogger.isEnable()) { debug("commit connection failure (sql error code " + ex.getSQLState() + "), reconnection could be expected"); } errorCode = XAException.XAER_RMFAIL; } throw new RedshiftXAException(GT.tr("Error committing prepared transaction. commit xid={0}, preparedXid={1}, currentXid={2}", xid, preparedXid, currentXid), ex, errorCode); } } @Override public boolean isSameRM(XAResource xares) throws XAException { if (RedshiftLogger.isEnable()) this.conn.getLogger().logFunction(true, xares); // This trivial implementation makes sure that the // application server doesn't try to use another connection // for prepare, commit and rollback commands. return xares == this; } /** * Does nothing, since we don't do heuristics. */ @Override public void forget(Xid xid) throws XAException { throw new RedshiftXAException(GT.tr("Heuristic commit/rollback not supported. forget xid={0}", xid), XAException.XAER_NOTA); } /** * We don't do transaction timeouts. Just returns 0. */ @Override public int getTransactionTimeout() { return 0; } /** * We don't do transaction timeouts. Returns false. */ @Override public boolean setTransactionTimeout(int seconds) { if (RedshiftLogger.isEnable()) this.conn.getLogger().logFunction(true, seconds); return false; } private int mapSQLStateToXAErrorCode(SQLException sqlException) { if (isRedshiftIntegrityConstraintViolation(sqlException)) { return XAException.XA_RBINTEGRITY; } if (RedshiftState.IN_FAILED_SQL_TRANSACTION.getState().equals(sqlException.getSQLState())) { return XAException.XA_RBOTHER; } return XAException.XAER_RMFAIL; } private boolean isRedshiftIntegrityConstraintViolation(SQLException sqlException) { return sqlException instanceof RedshiftException && sqlException.getSQLState().length() == 5 && sqlException.getSQLState().startsWith("23"); // Class 23 - Integrity Constraint Violation } private enum State { /** * {@code RedshiftXAConnection} not associated with a XA-transaction. You can still call {@link #getConnection()} and * use the connection outside XA. {@code currentXid} is {@code null}. autoCommit is {@code true} on a connection * by getConnection, per normal JDBC rules, though the caller can change it to {@code false} and manage * transactions itself using Connection.commit and rollback. */ IDLE, /** * {@link #start(Xid, int)} has been called, and we're associated with an XA transaction. {@code currentXid} * is valid. autoCommit is false on a connection returned by getConnection, and should not be messed with by * the caller or the XA transaction will be broken. */ ACTIVE, /** * {@link #end(Xid, int)} has been called, but the transaction has not yet been prepared. {@code currentXid} * is still valid. You shouldn't use the connection for anything else than issuing a {@link XAResource#commit(Xid, boolean)} or * rollback. */ ENDED } }
8,509
0
Create_ds/amazon-redshift-jdbc-driver/src/main/java/com/amazon/redshift
Create_ds/amazon-redshift-jdbc-driver/src/main/java/com/amazon/redshift/xa/RedshiftXADataSourceFactory.java
/* * Copyright (c) 2007, PostgreSQL Global Development Group * See the LICENSE file in the project root for more information. */ package com.amazon.redshift.xa; import com.amazon.redshift.ds.common.RedshiftObjectFactory; import java.util.Hashtable; import javax.naming.Context; import javax.naming.Name; import javax.naming.Reference; /** * An ObjectFactory implementation for RedshiftXADataSource-objects. */ public class RedshiftXADataSourceFactory extends RedshiftObjectFactory { /* * All the other Redshift DataSource use RedshiftObjectFactory directly, but we can't do that with * RedshiftXADataSource because referencing RedshiftXADataSource from RedshiftObjectFactory would break * "JDBC2 Enterprise" edition build which doesn't include RedshiftXADataSource. */ public Object getObjectInstance(Object obj, Name name, Context nameCtx, Hashtable<?, ?> environment) throws Exception { Reference ref = (Reference) obj; String className = ref.getClassName(); if (className.equals("com.amazon.redshift.xa.RedshiftXADataSource")) { return loadXADataSource(ref); } else { return null; } } private Object loadXADataSource(Reference ref) { RedshiftXADataSource ds = new RedshiftXADataSource(); return loadBaseDataSource(ds, ref); } }
8,510
0
Create_ds/amazon-redshift-jdbc-driver/src/main/java/com/amazon/redshift
Create_ds/amazon-redshift-jdbc-driver/src/main/java/com/amazon/redshift/xa/RedshiftXADataSource.java
/* * Copyright (c) 2009, PostgreSQL Global Development Group * See the LICENSE file in the project root for more information. */ package com.amazon.redshift.xa; import com.amazon.redshift.core.BaseConnection; import com.amazon.redshift.ds.common.BaseDataSource; import java.sql.Connection; import java.sql.SQLException; import javax.naming.Reference; import javax.sql.XAConnection; import javax.sql.XADataSource; /** * XA-enabled DataSource implementation. * * @author Heikki Linnakangas (heikki.linnakangas@iki.fi) */ public class RedshiftXADataSource extends BaseDataSource implements XADataSource { /** * Gets a connection to the Redshift database. The database is identified by the DataSource * properties serverName, databaseName, and portNumber. The user to connect as is identified by * the DataSource properties user and password. * * @return A valid database connection. * @throws SQLException Occurs when the database connection cannot be established. */ public XAConnection getXAConnection() throws SQLException { return getXAConnection(getUser(), getPassword()); } /** * Gets a XA-enabled connection to the Redshift database. The database is identified by the * DataSource properties serverName, databaseName, and portNumber. The user to connect as is * identified by the arguments user and password, which override the DataSource properties by the * same name. * * @return A valid database connection. * @throws SQLException Occurs when the database connection cannot be established. */ public XAConnection getXAConnection(String user, String password) throws SQLException { Connection con = super.getConnection(user, password); return new RedshiftXAConnection((BaseConnection) con); } public String getDescription() { return "XA-enabled DataSource from " + com.amazon.redshift.util.DriverInfo.DRIVER_FULL_NAME; } /** * Generates a reference using the appropriate object factory. */ protected Reference createReference() { return new Reference(getClass().getName(), RedshiftXADataSourceFactory.class.getName(), null); } }
8,511
0
Create_ds/amazon-redshift-jdbc-driver/src/main/java/com/amazon/redshift
Create_ds/amazon-redshift-jdbc-driver/src/main/java/com/amazon/redshift/xa/RecoveredXid.java
/* * Copyright (c) 2009, PostgreSQL Global Development Group * See the LICENSE file in the project root for more information. */ package com.amazon.redshift.xa; import com.amazon.redshift.util.Base64; import java.util.Arrays; import javax.transaction.xa.Xid; class RecoveredXid implements Xid { int formatId; byte[] globalTransactionId; byte[] branchQualifier; public int getFormatId() { return formatId; } public byte[] getGlobalTransactionId() { return globalTransactionId; } public byte[] getBranchQualifier() { return branchQualifier; } @Override public int hashCode() { final int prime = 31; int result = 1; result = prime * result + Arrays.hashCode(branchQualifier); result = prime * result + formatId; result = prime * result + Arrays.hashCode(globalTransactionId); return result; } public boolean equals(Object o) { if (o == this) { // optimization for the common case. return true; } if (!(o instanceof Xid)) { return false; } Xid other = (Xid) o; if (other.getFormatId() != formatId) { return false; } if (!Arrays.equals(globalTransactionId, other.getGlobalTransactionId())) { return false; } if (!Arrays.equals(branchQualifier, other.getBranchQualifier())) { return false; } return true; } /** * This is for debugging purposes only. */ public String toString() { return xidToString(this); } // --- Routines for converting xid to string and back. static String xidToString(Xid xid) { return xid.getFormatId() + "_" + Base64.encodeBytes(xid.getGlobalTransactionId(), Base64.DONT_BREAK_LINES) + "_" + Base64.encodeBytes(xid.getBranchQualifier(), Base64.DONT_BREAK_LINES); } /** * @return recovered xid, or null if s does not represent a valid xid encoded by the driver. */ static Xid stringToXid(String s) { RecoveredXid xid = new RecoveredXid(); int a = s.indexOf("_"); int b = s.lastIndexOf("_"); if (a == b) { // this also catches the case a == b == -1. return null; } try { xid.formatId = Integer.parseInt(s.substring(0, a)); xid.globalTransactionId = Base64.decode(s.substring(a + 1, b)); xid.branchQualifier = Base64.decode(s.substring(b + 1)); if (xid.globalTransactionId == null || xid.branchQualifier == null) { return null; } } catch (Exception ex) { return null; // Doesn't seem to be an xid generated by this driver. } return xid; } }
8,512
0
Create_ds/amazon-redshift-jdbc-driver/src/main/java/com/amazon/redshift
Create_ds/amazon-redshift-jdbc-driver/src/main/java/com/amazon/redshift/jdbc/RedshiftConnectionImpl.java
/* * Copyright (c) 2004, PostgreSQL Global Development Group * See the LICENSE file in the project root for more information. */ package com.amazon.redshift.jdbc; import com.amazon.redshift.AuthMech; import com.amazon.redshift.Driver; import com.amazon.redshift.RedshiftNotification; import com.amazon.redshift.RedshiftProperty; import com.amazon.redshift.copy.CopyManager; import com.amazon.redshift.core.BaseConnection; import com.amazon.redshift.core.BaseStatement; import com.amazon.redshift.core.CachedQuery; import com.amazon.redshift.core.ConnectionFactory; import com.amazon.redshift.core.Encoding; import com.amazon.redshift.core.IamHelper; import com.amazon.redshift.core.NativeAuthPluginHelper; import com.amazon.redshift.core.Oid; import com.amazon.redshift.core.Provider; import com.amazon.redshift.core.Query; import com.amazon.redshift.core.QueryExecutor; import com.amazon.redshift.core.RedshiftJDBCSettings; import com.amazon.redshift.core.ReplicationProtocol; import com.amazon.redshift.core.ResultHandlerBase; import com.amazon.redshift.core.ServerVersion; import com.amazon.redshift.core.SqlCommand; import com.amazon.redshift.core.TransactionState; import com.amazon.redshift.core.TypeInfo; import com.amazon.redshift.core.Utils; import com.amazon.redshift.core.Version; import com.amazon.redshift.fastpath.Fastpath; import com.amazon.redshift.largeobject.LargeObjectManager; import com.amazon.redshift.logger.LogLevel; import com.amazon.redshift.logger.RedshiftLogger; import com.amazon.redshift.replication.RedshiftReplicationConnection; import com.amazon.redshift.replication.RedshiftReplicationConnectionImpl; import com.amazon.redshift.ssl.NonValidatingFactory; import com.amazon.redshift.core.v3.QueryExecutorImpl; import com.amazon.redshift.util.QuerySanitizer; import com.amazon.redshift.util.ByteConverter; import com.amazon.redshift.util.GT; import com.amazon.redshift.util.HostSpec; import com.amazon.redshift.util.LruCache; import com.amazon.redshift.util.RedshiftBinaryObject; import com.amazon.redshift.util.RedshiftConstants; import com.amazon.redshift.util.RedshiftObject; import com.amazon.redshift.util.RedshiftException; import com.amazon.redshift.util.RedshiftInterval; import com.amazon.redshift.util.RedshiftIntervalYearToMonth; import com.amazon.redshift.util.RedshiftIntervalDayToSecond; import com.amazon.redshift.util.RedshiftState; import com.amazon.redshift.util.RedshiftProperties; import java.io.IOException; import java.sql.Array; import java.sql.Blob; import java.sql.CallableStatement; import java.sql.ClientInfoStatus; import java.sql.Clob; import java.sql.Connection; import java.sql.DatabaseMetaData; import java.sql.NClob; import java.sql.PreparedStatement; import java.sql.ResultSet; import java.sql.SQLClientInfoException; import java.sql.SQLException; import java.sql.SQLPermission; import java.sql.SQLWarning; import java.sql.SQLXML; import java.sql.Savepoint; import java.sql.Statement; import java.sql.Struct; // import java.sql.Types; import java.util.Arrays; import java.util.Collections; import java.util.Enumeration; import java.util.HashMap; import java.util.HashSet; import java.util.List; import java.util.Locale; import java.util.Map; import java.util.NoSuchElementException; import java.util.Properties; import java.util.Set; import java.util.StringTokenizer; import java.util.TimeZone; import java.util.Timer; import java.util.TimerTask; import java.util.concurrent.Executor; public class RedshiftConnectionImpl implements BaseConnection { private RedshiftLogger logger; private static final Set<Integer> SUPPORTED_BINARY_OIDS = getSupportedBinaryOids(); private static final SQLPermission SQL_PERMISSION_ABORT = new SQLPermission("callAbort"); private static final SQLPermission SQL_PERMISSION_NETWORK_TIMEOUT = new SQLPermission("setNetworkTimeout"); // Internal properties private enum ReadOnlyBehavior { ignore, transaction, always; } // // Data initialized on construction: // private final Properties clientInfo; /* URL we were created via */ private final String creatingURL; private final ReadOnlyBehavior readOnlyBehavior; private Throwable openStackTrace; /* Actual network handler */ private final QueryExecutor queryExecutor; /* Query that runs COMMIT */ private final Query commitQuery; /* Query that runs ROLLBACK */ private final Query rollbackQuery; private final CachedQuery setSessionReadOnly; private final CachedQuery setSessionNotReadOnly; private final TypeInfo typeCache; private boolean disableColumnSanitiser = false; private boolean disableIsValidQuery = false; // Default statement prepare threshold. protected int prepareThreshold; // Default enable generated name for statement and portal. protected boolean enableGeneratedName; /** * Default fetch size for statement. * * @see RedshiftProperty#DEFAULT_ROW_FETCH_SIZE */ protected int defaultFetchSize; // Default forcebinary option. protected boolean forcebinary = false; private int rsHoldability = ResultSet.CLOSE_CURSORS_AT_COMMIT; private int savepointId = 0; // Connection's autocommit state. private boolean autoCommit = true; // Connection's readonly state. private boolean readOnly = false; // Override getTables metadata type private Integer overrideSchemaPatternType ; // Filter out database objects for which the current user has no privileges granted from the DatabaseMetaData private boolean hideUnprivilegedObjects ; // Bind String to UNSPECIFIED or VARCHAR? private final boolean bindStringAsVarchar; // Current warnings; there might be more on queryExecutor too. private SQLWarning firstWarning = null; // Timer for scheduling TimerTasks for this connection. // Only instantiated if a task is actually scheduled. private volatile Timer cancelTimer = null; /** * Replication protocol in current version postgresql(10devel) supports a limited number of * commands. */ private final boolean replicationConnection; private final LruCache<FieldMetadata.Key, FieldMetadata> fieldMetadataCache; /** * The connection settings. */ private RedshiftJDBCSettings m_settings; private int reWriteBatchedInsertsSize; private boolean databaseMetadataCurrentDbOnly; public static String NON_VALIDATING_SSL_FACTORY = "org.postgresql.ssl.NonValidatingFactory"; public static final boolean IS_64_BIT_JVM = checkIs64bitJVM(); public static final List<String> NON_IAM_PLUGINS_LIST = Collections.unmodifiableList(Arrays.asList( RedshiftConstants.NATIVE_IDP_AZUREAD_BROWSER_PLUGIN, RedshiftConstants.NATIVE_IDP_OKTA_BROWSER_PLUGIN, RedshiftConstants.IDC_BROWSER_PLUGIN, RedshiftConstants.IDP_TOKEN_PLUGIN)); final CachedQuery borrowQuery(String sql) throws SQLException { return queryExecutor.borrowQuery(sql); } final CachedQuery borrowCallableQuery(String sql) throws SQLException { return queryExecutor.borrowCallableQuery(sql); } private CachedQuery borrowReturningQuery(String sql, String[] columnNames) throws SQLException { return queryExecutor.borrowReturningQuery(sql, columnNames); } @Override public CachedQuery createQuery(String sql, boolean escapeProcessing, boolean isParameterized, String... columnNames) throws SQLException { return queryExecutor.createQuery(sql, escapeProcessing, isParameterized, columnNames); } void releaseQuery(CachedQuery cachedQuery) { queryExecutor.releaseQuery(cachedQuery); } @Override public void setFlushCacheOnDeallocate(boolean flushCacheOnDeallocate) { queryExecutor.setFlushCacheOnDeallocate(flushCacheOnDeallocate); if(RedshiftLogger.isEnable()) logger.log(LogLevel.DEBUG, " setFlushCacheOnDeallocate = {0}", flushCacheOnDeallocate); } // // Ctor. // public RedshiftConnectionImpl(HostSpec[] hostSpecs, String user, String database, RedshiftProperties info, String url, RedshiftLogger logger) throws SQLException { this.logger = logger; // Print out the driver version number and whether its 32-bit or 64-bit JVM if(RedshiftLogger.isEnable()) { logger.log(LogLevel.DEBUG, com.amazon.redshift.util.DriverInfo.DRIVER_FULL_NAME); logger.log(LogLevel.DEBUG, "JVM architecture is " + (RedshiftConnectionImpl.IS_64_BIT_JVM ? "64-bit" : "32-bit")); } RedshiftProperties.evaluateProperties(info); m_settings = new RedshiftJDBCSettings(); // IAM boolean sslExplicitlyDisabled = setAuthMech(info); boolean redshiftNativeAuth = false; // This need to be called after setAuthMech() and before checking some required settings. // host, port, username and password may be set in setIAMProperties(). String iamAuth = getOptionalSetting(RedshiftProperty.IAM_AUTH.getName(), info); m_settings.m_iamAuth = (iamAuth == null) ? false : Boolean.parseBoolean(iamAuth); if (m_settings.m_iamAuth) { String iamCredentialProvider = RedshiftConnectionImpl.getOptionalConnSetting( RedshiftProperty.CREDENTIALS_PROVIDER.getName(), info); if(iamCredentialProvider != null && (iamCredentialProvider.equalsIgnoreCase(RedshiftConstants.IDC_BROWSER_PLUGIN) || iamCredentialProvider.equalsIgnoreCase(RedshiftConstants.IDP_TOKEN_PLUGIN))) { throw new RedshiftException(GT.tr("You can not use this authentication plugin with IAM enabled."), RedshiftState.UNEXPECTED_ERROR); } if (sslExplicitlyDisabled) { throw new RedshiftException(GT.tr("SSL should be enable in IAM authentication."), RedshiftState.UNEXPECTED_ERROR); } if (RedshiftLogger.isEnable()) logger.log(LogLevel.DEBUG, "Start IAM authentication"); // Check for JWT and convert into Redshift Native Auth if(iamCredentialProvider != null && (iamCredentialProvider.equalsIgnoreCase("com.amazon.redshift.plugin.BasicJwtCredentialsProvider") || iamCredentialProvider.equalsIgnoreCase("com.amazon.redshift.plugin.BasicNativeSAMLCredentialsProvider"))) { redshiftNativeAuth = true; } if(!redshiftNativeAuth) { info = IamHelper.setIAMProperties(info, m_settings, logger); // if (RedshiftLogger.isEnable()) // logger.log(LogLevel.DEBUG, "info after setIAMProperties" + info); // Set the user name and temporary password in the property RedshiftProperties updatedInfo = new RedshiftProperties(); updatedInfo.putAll(info); if(m_settings.m_username != null) { updatedInfo.put(RedshiftProperty.USER.getName(), m_settings.m_username); user = m_settings.m_username; } if(m_settings.m_password != null) updatedInfo.put(RedshiftProperty.PASSWORD.getName(), m_settings.m_password); if(m_settings.m_host != null) { updatedInfo.putIfAbsent(RedshiftProperty.HOST.getName(), m_settings.m_host); } if(m_settings.m_port != 0) { updatedInfo.putIfAbsent(RedshiftProperty.PORT.getName(), String.valueOf(m_settings.m_port)); } if (hostSpecs == null) { hostSpecs = Driver.hostSpecs(updatedInfo); } info = updatedInfo; } // !Redshift Native Auth } // IAM auth else { // Check for non IAM authentication plugins String nonIamCredentialProvider = RedshiftConnectionImpl.getOptionalConnSetting( RedshiftProperty.CREDENTIALS_PROVIDER.getName(), info); if (nonIamCredentialProvider != null && NON_IAM_PLUGINS_LIST.stream().anyMatch(nonIamCredentialProvider::equalsIgnoreCase)) { redshiftNativeAuth = true; if (sslExplicitlyDisabled) { throw new RedshiftException(GT.tr("Authentication must use an SSL connection."), RedshiftState.UNEXPECTED_ERROR); } // Call OAuth2 plugin and get the access token info = NativeAuthPluginHelper.setNativeAuthPluginProperties(info, m_settings, logger); } } this.creatingURL = url; this.readOnlyBehavior = getReadOnlyBehavior(RedshiftProperty.READ_ONLY_MODE.get(info)); int dfltRowFetchSizeProp = RedshiftProperty.DEFAULT_ROW_FETCH_SIZE.getInt(info); int blockingRowsMode = RedshiftProperty.BLOCKING_ROWS_MODE.getInt(info); int dfltRowFetchSize = (dfltRowFetchSizeProp != 0) ? dfltRowFetchSizeProp : blockingRowsMode; setDefaultFetchSize(dfltRowFetchSize); setPrepareThreshold(RedshiftProperty.PREPARE_THRESHOLD.getInt(info)); if (prepareThreshold == -1) { setForceBinary(true); } setGeneratedName(RedshiftProperty.ENABLE_GENERATED_NAME_FOR_PREPARED_STATEMENT.getBoolean(info)); // Now make the initial connection and set up local state this.queryExecutor = ConnectionFactory.openConnection(hostSpecs, user, database, info, logger); setSessionReadOnly = createQuery("SET readonly=1", false, true); // SET SESSION CHARACTERISTICS AS TRANSACTION READ ONLY setSessionNotReadOnly = createQuery("SET readonly=0", false, true); // SET SESSION CHARACTERISTICS AS TRANSACTION READ WRITE // Set read-only early if requested if (RedshiftProperty.READ_ONLY.getBoolean(info)) { setReadOnly(true); } this.databaseMetadataCurrentDbOnly = RedshiftProperty.DATABASE_METADATA_CURRENT_DB_ONLY.getBoolean(info); this.hideUnprivilegedObjects = RedshiftProperty.HIDE_UNPRIVILEGED_OBJECTS.getBoolean(info); this.overrideSchemaPatternType = RedshiftProperty.OVERRIDE_SCHEMA_PATTERN_TYPE.getInteger(info); this.reWriteBatchedInsertsSize = RedshiftProperty.REWRITE_BATCHED_INSERTS_SIZE.getInt(info); Set<Integer> binaryOids = getBinaryOids(info); // split for receive and send for better control Set<Integer> useBinarySendForOids = new HashSet<Integer>(binaryOids); Set<Integer> useBinaryReceiveForOids = new HashSet<Integer>(binaryOids); /* * Does not pass unit tests because unit tests expect setDate to have millisecond accuracy * whereas the binary transfer only supports date accuracy. */ useBinarySendForOids.remove(Oid.DATE); queryExecutor.setBinaryReceiveOids(useBinaryReceiveForOids); queryExecutor.setBinarySendOids(useBinarySendForOids); if (RedshiftLogger.isEnable()) { logger.log(LogLevel.DEBUG, " types using binary send = {0}", oidsToString(useBinarySendForOids)); logger.log(LogLevel.DEBUG, " types using binary receive = {0}", oidsToString(useBinaryReceiveForOids)); logger.log(LogLevel.DEBUG, " integer date/time = {0}", queryExecutor.getIntegerDateTimes()); } queryExecutor.setRaiseExceptionOnSilentRollback( RedshiftProperty.RAISE_EXCEPTION_ON_SILENT_ROLLBACK.getBoolean(info) ); // // String -> text or unknown? // String stringType = RedshiftProperty.STRING_TYPE.get(info); if (stringType != null) { if (stringType.equalsIgnoreCase("unspecified")) { bindStringAsVarchar = false; } else if (stringType.equalsIgnoreCase("varchar")) { bindStringAsVarchar = true; } else { throw new RedshiftException( GT.tr("Unsupported value for stringtype parameter: {0}", stringType), RedshiftState.INVALID_PARAMETER_VALUE); } } else { bindStringAsVarchar = true; } // Initialize timestamp stuff timestampUtils = new TimestampUtils(!queryExecutor.getIntegerDateTimes(), new Provider<TimeZone>() { @Override public TimeZone get() { return queryExecutor.getTimeZone(); } }); // Initialize common queries. // isParameterized==true so full parse is performed and the engine knows the query // is not a compound query with ; inside, so it could use parse/bind/exec messages commitQuery = createQuery("COMMIT", false, true).query; rollbackQuery = createQuery("ROLLBACK", false, true).query; int unknownLength = RedshiftProperty.UNKNOWN_LENGTH.getInt(info); // Initialize object handling typeCache = createTypeInfo(this, unknownLength); initObjectTypes(info); if (RedshiftProperty.LOG_UNCLOSED_CONNECTIONS.getBoolean(info)) { openStackTrace = new Throwable("Connection was created at this point:"); } this.disableColumnSanitiser = RedshiftProperty.DISABLE_COLUMN_SANITISER.getBoolean(info); this.disableIsValidQuery = RedshiftProperty.DISABLE_ISVALID_QUERY.getBoolean(info); /* if (haveMinimumServerVersion(ServerVersion.v8_3)) { typeCache.addCoreType("uuid", Oid.UUID, Types.OTHER, "java.util.UUID", Oid.UUID_ARRAY); typeCache.addCoreType("xml", Oid.XML, Types.SQLXML, "java.sql.SQLXML", Oid.XML_ARRAY); } */ this.clientInfo = new Properties(); /* if (haveMinimumServerVersion(ServerVersion.v9_0)) */ { String appName = RedshiftProperty.APPLICATION_NAME.get(info); if (appName == null) { appName = ""; } this.clientInfo.put("ApplicationName", appName); } fieldMetadataCache = new LruCache<FieldMetadata.Key, FieldMetadata>( Math.max(0, RedshiftProperty.DATABASE_METADATA_CACHE_FIELDS.getInt(info)), Math.max(0, RedshiftProperty.DATABASE_METADATA_CACHE_FIELDS_MIB.getInt(info) * 1024 * 1024), false); replicationConnection = RedshiftProperty.REPLICATION.get(info) != null; } private static ReadOnlyBehavior getReadOnlyBehavior(String property) { try { return ReadOnlyBehavior.valueOf(property); } catch (IllegalArgumentException e) { try { return ReadOnlyBehavior.valueOf(property.toLowerCase(Locale.US)); } catch (IllegalArgumentException e2) { return ReadOnlyBehavior.transaction; } } } private static Set<Integer> getSupportedBinaryOids() { return new HashSet<Integer>(Arrays.asList( Oid.BYTEA, Oid.INT2, Oid.INT4, Oid.INT8, Oid.FLOAT4, Oid.FLOAT8, Oid.TIME, Oid.DATE, Oid.TIMETZ, Oid.TIMESTAMP, Oid.TIMESTAMPTZ, Oid.INTERVALY2M, Oid.INTERVALD2S, Oid.INT2_ARRAY, Oid.INT4_ARRAY, Oid.INT8_ARRAY, Oid.FLOAT4_ARRAY, Oid.FLOAT8_ARRAY, Oid.VARCHAR_ARRAY, Oid.TEXT_ARRAY, Oid.POINT, Oid.BOX, Oid.UUID)); } private static Set<Integer> getBinaryOids(Properties info) throws RedshiftException { boolean binaryTransfer = RedshiftProperty.BINARY_TRANSFER.getBoolean(info); // Formats that currently have binary protocol support Set<Integer> binaryOids = new HashSet<Integer>(32); if (binaryTransfer) { binaryOids.addAll(SUPPORTED_BINARY_OIDS); } binaryOids.addAll(getOidSet(RedshiftProperty.BINARY_TRANSFER_ENABLE.get(info))); binaryOids.removeAll(getOidSet(RedshiftProperty.BINARY_TRANSFER_DISABLE.get(info))); binaryOids.retainAll(SUPPORTED_BINARY_OIDS); return binaryOids; } private static Set<Integer> getOidSet(String oidList) throws RedshiftException { Set<Integer> oids = new HashSet<Integer>(); StringTokenizer tokenizer = new StringTokenizer(oidList, ","); while (tokenizer.hasMoreTokens()) { String oid = tokenizer.nextToken(); oids.add(Oid.valueOf(oid)); } return oids; } private String oidsToString(Set<Integer> oids) { StringBuilder sb = new StringBuilder(); for (Integer oid : oids) { sb.append(Oid.toString(oid)); sb.append(','); } if (sb.length() > 0) { sb.setLength(sb.length() - 1); } else { sb.append(" <none>"); } return sb.toString(); } private final TimestampUtils timestampUtils; public TimestampUtils getTimestampUtils() { return timestampUtils; } /** * The current type mappings. */ protected Map<String, Class<?>> typemap; @Override public Statement createStatement() throws SQLException { if (RedshiftLogger.isEnable()) logger.logFunction(true); // We now follow the spec and default to TYPE_FORWARD_ONLY. Statement stmt = createStatement(ResultSet.TYPE_FORWARD_ONLY, ResultSet.CONCUR_READ_ONLY); if (RedshiftLogger.isEnable()) logger.logFunction(false, stmt); return stmt; } @Override public PreparedStatement prepareStatement(String sql) throws SQLException { if (RedshiftLogger.isEnable()) { logger.logFunction(true, QuerySanitizer.filterCredentials(sql)); } PreparedStatement pstmt = prepareStatement(sql, ResultSet.TYPE_FORWARD_ONLY, ResultSet.CONCUR_READ_ONLY); if (RedshiftLogger.isEnable()) logger.logFunction(false, QuerySanitizer.filterCredentials(pstmt.toString())); return pstmt; } @Override public CallableStatement prepareCall(String sql) throws SQLException { if (RedshiftLogger.isEnable()) logger.logFunction(true, QuerySanitizer.filterCredentials(sql)); CallableStatement cstmt = prepareCall(sql, ResultSet.TYPE_FORWARD_ONLY, ResultSet.CONCUR_READ_ONLY); if (RedshiftLogger.isEnable()) logger.logFunction(false, QuerySanitizer.filterCredentials(cstmt.toString())); return cstmt; } @Override public Map<String, Class<?>> getTypeMap() throws SQLException { checkClosed(); return typemap; } public QueryExecutor getQueryExecutor() { return queryExecutor; } public ReplicationProtocol getReplicationProtocol() { return queryExecutor.getReplicationProtocol(); } /** * This adds a warning to the warning chain. * * @param warn warning to add */ public void addWarning(SQLWarning warn) { // Add the warning to the chain if (firstWarning != null) { firstWarning.setNextWarning(warn); } else { firstWarning = warn; } } @Override public ResultSet execSQLQuery(String s) throws SQLException { return execSQLQuery(s, ResultSet.TYPE_FORWARD_ONLY, ResultSet.CONCUR_READ_ONLY); } @Override public Long getBytesReadFromStream() { RedshiftConnectionImpl redshiftConnectionImpl = this; if(null != redshiftConnectionImpl && null != redshiftConnectionImpl.getQueryExecutor()) { QueryExecutorImpl queryExecutorImpl = (QueryExecutorImpl) redshiftConnectionImpl.getQueryExecutor(); long bytes = queryExecutorImpl.getBytesReadFromStream(); return bytes; } return 0L; } @Override public ResultSet execSQLQuery(String s, int resultSetType, int resultSetConcurrency) throws SQLException { BaseStatement stat = (BaseStatement) createStatement(resultSetType, resultSetConcurrency); boolean hasResultSet = stat.executeWithFlags(s, QueryExecutor.QUERY_SUPPRESS_BEGIN); while (!hasResultSet && stat.getUpdateCount() != -1) { hasResultSet = stat.getMoreResults(); } if (!hasResultSet) { throw new RedshiftException(GT.tr("No results were returned by the query."), RedshiftState.NO_DATA); } // Transfer warnings to the connection, since the user never // has a chance to see the statement itself. SQLWarning warnings = stat.getWarnings(); if (warnings != null) { addWarning(warnings); } return stat.getResultSet(); } @Override public void execSQLUpdate(String s) throws SQLException { BaseStatement stmt = (BaseStatement) createStatement(); if (stmt.executeWithFlags(s, QueryExecutor.QUERY_NO_METADATA | QueryExecutor.QUERY_NO_RESULTS | QueryExecutor.QUERY_SUPPRESS_BEGIN)) { throw new RedshiftException(GT.tr("A result was returned when none was expected."), RedshiftState.TOO_MANY_RESULTS); } // Transfer warnings to the connection, since the user never // has a chance to see the statement itself. SQLWarning warnings = stmt.getWarnings(); if (warnings != null) { addWarning(warnings); } stmt.close(); } void execSQLUpdate(CachedQuery query) throws SQLException { BaseStatement stmt = (BaseStatement) createStatement(); if (stmt.executeWithFlags(query, QueryExecutor.QUERY_NO_METADATA | QueryExecutor.QUERY_NO_RESULTS | QueryExecutor.QUERY_SUPPRESS_BEGIN)) { throw new RedshiftException(GT.tr("A result was returned when none was expected."), RedshiftState.TOO_MANY_RESULTS); } // Transfer warnings to the connection, since the user never // has a chance to see the statement itself. SQLWarning warnings = stmt.getWarnings(); if (warnings != null) { addWarning(warnings); } stmt.close(); } /** * <p>In SQL, a result table can be retrieved through a cursor that is named. The current row of a * result can be updated or deleted using a positioned update/delete statement that references the * cursor name.</p> * * <p>We do not support positioned update/delete, so this is a no-op.</p> * * @param cursor the cursor name * @throws SQLException if a database access error occurs */ public void setCursorName(String cursor) throws SQLException { checkClosed(); // No-op. } /** * getCursorName gets the cursor name. * * @return the current cursor name * @throws SQLException if a database access error occurs */ public String getCursorName() throws SQLException { checkClosed(); return null; } /** * <p>We are required to bring back certain information by the DatabaseMetaData class. These * functions do that.</p> * * <p>Method getURL() brings back the URL (good job we saved it)</p> * * @return the url * @throws SQLException just in case... */ public String getURL() throws SQLException { return creatingURL; } /** * Method getUserName() brings back the User Name (again, we saved it). * * @return the user name * @throws SQLException just in case... */ public String getUserName() throws SQLException { return queryExecutor.getUser(); } public Fastpath getFastpathAPI() throws SQLException { checkClosed(); if (fastpath == null) { fastpath = new Fastpath(this); } return fastpath; } // This holds a reference to the Fastpath API if already open private Fastpath fastpath = null; public LargeObjectManager getLargeObjectAPI() throws SQLException { checkClosed(); if (largeobject == null) { largeobject = new LargeObjectManager(this); } return largeobject; } // This holds a reference to the LargeObject API if already open private LargeObjectManager largeobject = null; /* * This method is used internally to return an object based around com.amazon.redshift's more unique * data types. * * <p>It uses an internal HashMap to get the handling class. If the type is not supported, then an * instance of com.amazon.redshift.util.RedshiftObject is returned. * * You can use the getValue() or setValue() methods to handle the returned object. Custom objects * can have their own methods. * * @return RedshiftObject for this type, and set to value * * @exception SQLException if value is not correct for this type */ @Override public Object getObject(String type, String value, byte[] byteValue) throws SQLException { if (typemap != null) { Class<?> c = typemap.get(type); if (c != null) { // Handle the type (requires SQLInput & SQLOutput classes to be implemented) throw new RedshiftException(GT.tr("Custom type maps are not supported."), RedshiftState.NOT_IMPLEMENTED); } } RedshiftObject obj = null; if (RedshiftLogger.isEnable()) { logger.log(LogLevel.DEBUG, "Constructing object from type={0} value=<{1}>", new Object[]{type, value}); } try { Class<? extends RedshiftObject> klass = typeCache.getRSobject(type); // If className is not null, then try to instantiate it, // It must be basetype RedshiftObject // This is used to implement the com.amazon.redshift unique types (like lseg, // point, etc). if (klass != null) { obj = klass.newInstance(); obj.setType(type); if (byteValue != null && obj instanceof RedshiftBinaryObject) { RedshiftBinaryObject binObj = (RedshiftBinaryObject) obj; binObj.setByteValue(byteValue, 0); } else if (byteValue != null && obj instanceof RedshiftInterval) { RedshiftInterval intervalObj = (RedshiftInterval) obj; // Binary format is 8 bytes time and 4 byes months long time = ByteConverter.int8(byteValue, 0); int month = ByteConverter.int4(byteValue, 8); intervalObj.setValue(month, time); // intervalObj.setValue(new String(byteValue)); } else { obj.setValue(value); } } else { // If className is null, then the type is unknown. // so return a RedshiftOject with the type set, and the value set obj = new RedshiftObject(); obj.setType(type); obj.setValue(value); } return obj; } catch (SQLException sx) { // rethrow the exception. Done because we capture any others next throw sx; } catch (Exception ex) { throw new RedshiftException(GT.tr("Failed to create object for: {0}.", type), RedshiftState.CONNECTION_FAILURE, ex); } } protected TypeInfo createTypeInfo(BaseConnection conn, int unknownLength) { return new TypeInfoCache(conn, unknownLength); } public TypeInfo getTypeInfo() { return typeCache; } @Override public void addDataType(String type, String name) { try { addDataType(type, Class.forName(name).asSubclass(RedshiftObject.class)); } catch (Exception e) { throw new RuntimeException("Cannot register new type: " + e); } } @Override public void addDataType(String type, Class<? extends RedshiftObject> klass) throws SQLException { checkClosed(); typeCache.addDataType(type, klass); } // This initialises the objectTypes hash map private void initObjectTypes(Properties info) throws SQLException { // Add in the types that come packaged with the driver. // These can be overridden later if desired. addDataType("box", com.amazon.redshift.geometric.RedshiftBox.class); addDataType("circle", com.amazon.redshift.geometric.RedshiftCircle.class); addDataType("line", com.amazon.redshift.geometric.RedshiftLine.class); addDataType("lseg", com.amazon.redshift.geometric.RedshiftLseg.class); addDataType("path", com.amazon.redshift.geometric.RedshiftPath.class); addDataType("point", com.amazon.redshift.geometric.RedshiftPoint.class); addDataType("polygon", com.amazon.redshift.geometric.RedshiftPolygon.class); addDataType("money", com.amazon.redshift.util.RedshiftMoney.class); addDataType("interval", com.amazon.redshift.util.RedshiftInterval.class); // intervaly2m and intervald2s are not object types rather they are // binary types native to Redshift, hence they are added in TypeInfoCache. Enumeration<?> e = info.propertyNames(); while (e.hasMoreElements()) { String propertyName = (String) e.nextElement(); if (propertyName.startsWith("datatype.")) { String typeName = propertyName.substring(9); String className = info.getProperty(propertyName); Class<?> klass; try { klass = Class.forName(className); } catch (ClassNotFoundException cnfe) { throw new RedshiftException( GT.tr("Unable to load the class {0} responsible for the datatype {1}", className, typeName), RedshiftState.SYSTEM_ERROR, cnfe); } addDataType(typeName, klass.asSubclass(RedshiftObject.class)); } } } /** * <B>Note:</B> even though {@code Statement} is automatically closed when it is garbage * collected, it is better to close it explicitly to lower resource consumption. * * {@inheritDoc} */ @Override public void close() throws SQLException { if (RedshiftLogger.isEnable()) logger.logFunction(true); if (queryExecutor == null) { // This might happen in case constructor throws an exception (e.g. host being not available). // When that happens the connection is still registered in the finalizer queue, so it gets finalized if (RedshiftLogger.isEnable()) { logger.logFunction(false); logger.close(); } return; } releaseTimer(); queryExecutor.close(); openStackTrace = null; // Close the logger stream if(RedshiftLogger.isEnable()) { logger.logFunction(false); logger.close(); } } @Override public String nativeSQL(String sql) throws SQLException { checkClosed(); CachedQuery cachedQuery = queryExecutor.createQuery(sql, false, true); return cachedQuery.query.getNativeSql(); } @Override public synchronized SQLWarning getWarnings() throws SQLException { checkClosed(); SQLWarning newWarnings = queryExecutor.getWarnings(); // NB: also clears them. if (firstWarning == null) { firstWarning = newWarnings; } else { firstWarning.setNextWarning(newWarnings); // Chain them on. } return firstWarning; } @Override public synchronized void clearWarnings() throws SQLException { checkClosed(); queryExecutor.getWarnings(); // Clear and discard. firstWarning = null; } public void setDatabaseMetadataCurrentDbOnly(boolean databaseMetadataCurrentDbOnly) throws SQLException { this.databaseMetadataCurrentDbOnly = databaseMetadataCurrentDbOnly; } public boolean isDatabaseMetadataCurrentDbOnly() { return databaseMetadataCurrentDbOnly; } @Override public void setReadOnly(boolean readOnly) throws SQLException { if (RedshiftLogger.isEnable()) logger.logFunction(true, readOnly); checkClosed(); if (queryExecutor.getTransactionState() != TransactionState.IDLE) { throw new RedshiftException( GT.tr("Cannot change transaction read-only property in the middle of a transaction."), RedshiftState.ACTIVE_SQL_TRANSACTION); } if (readOnly != this.readOnly && autoCommit && this.readOnlyBehavior == ReadOnlyBehavior.always) { execSQLUpdate(readOnly ? setSessionReadOnly : setSessionNotReadOnly); } this.readOnly = readOnly; if(RedshiftLogger.isEnable()) logger.log(LogLevel.DEBUG, " setReadOnly = {0}", readOnly); } @Override public boolean isReadOnly() throws SQLException { if (RedshiftLogger.isEnable()) logger.logFunction(true); checkClosed(); if (RedshiftLogger.isEnable()) logger.logFunction(false, readOnly); return readOnly; } @Override public boolean hintReadOnly() { return readOnly && readOnlyBehavior != ReadOnlyBehavior.ignore; } @Override public void setAutoCommit(boolean autoCommit) throws SQLException { if (RedshiftLogger.isEnable()) logger.logFunction(true, autoCommit); checkClosed(); if (this.autoCommit == autoCommit) { return; } if (!this.autoCommit) { commit(); } // if the connection is read only, we need to make sure session settings are // correct when autocommit status changed if (this.readOnly && readOnlyBehavior == ReadOnlyBehavior.always) { // if we are turning on autocommit, we need to set session // to read only if (autoCommit) { this.autoCommit = true; execSQLUpdate(setSessionReadOnly); } else { // if we are turning auto commit off, we need to // disable session execSQLUpdate(setSessionNotReadOnly); } } this.autoCommit = autoCommit; if(RedshiftLogger.isEnable()) { logger.log(LogLevel.DEBUG, " setAutoCommit = {0}", autoCommit); logger.logFunction(false); } } @Override public boolean getAutoCommit() throws SQLException { if (RedshiftLogger.isEnable()) logger.logFunction(true); checkClosed(); boolean rc = this.autoCommit; if (RedshiftLogger.isEnable()) logger.logFunction(false, rc); return rc; } private void executeTransactionCommand(Query query) throws SQLException { int flags = QueryExecutor.QUERY_NO_METADATA | QueryExecutor.QUERY_NO_RESULTS | QueryExecutor.QUERY_SUPPRESS_BEGIN; if (prepareThreshold == 0) { flags |= QueryExecutor.QUERY_ONESHOT; } try { getQueryExecutor().execute(query, null, new TransactionCommandHandler(), 0, 0, flags); } catch (SQLException e) { // Don't retry composite queries as it might get partially executed if (query.getSubqueries() != null || !queryExecutor.willHealOnRetry(e)) { throw e; } query.close(); // retry getQueryExecutor().execute(query, null, new TransactionCommandHandler(), 0, 0, flags); } } @Override public void commit() throws SQLException { if (RedshiftLogger.isEnable()) logger.logFunction(true); checkClosed(); if (autoCommit) { throw new RedshiftException(GT.tr("Cannot commit when autoCommit is enabled."), RedshiftState.NO_ACTIVE_SQL_TRANSACTION); } if (queryExecutor.getTransactionState() != TransactionState.IDLE) { executeTransactionCommand(commitQuery); } if (RedshiftLogger.isEnable()) logger.logFunction(false); } protected void checkClosed() throws SQLException { if (isClosed()) { throw new RedshiftException(GT.tr("This connection has been closed."), RedshiftState.CONNECTION_DOES_NOT_EXIST); } } @Override public void rollback() throws SQLException { if (RedshiftLogger.isEnable()) logger.logFunction(true); checkClosed(); if (autoCommit) { throw new RedshiftException(GT.tr("Cannot rollback when autoCommit is enabled."), RedshiftState.NO_ACTIVE_SQL_TRANSACTION); } if (queryExecutor.getTransactionState() != TransactionState.IDLE) { executeTransactionCommand(rollbackQuery); } else { // just log for debugging if(RedshiftLogger.isEnable()) logger.log(LogLevel.DEBUG, "Rollback requested but no transaction in progress"); } if (RedshiftLogger.isEnable()) logger.logFunction(false); } public TransactionState getTransactionState() { return queryExecutor.getTransactionState(); } public int getTransactionIsolation() throws SQLException { return Connection.TRANSACTION_SERIALIZABLE; } public void setTransactionIsolation(int level) throws SQLException { if(RedshiftLogger.isEnable()) logger.logFunction(true, level); checkClosed(); if (queryExecutor.getTransactionState() != TransactionState.IDLE) { throw new RedshiftException( GT.tr("Cannot change transaction isolation level in the middle of a transaction."), RedshiftState.ACTIVE_SQL_TRANSACTION); } String isolationLevelName = getIsolationLevelName(level); if (isolationLevelName == null) { throw new RedshiftException(GT.tr("Transaction isolation level {0} not supported.", level), RedshiftState.NOT_IMPLEMENTED); } String isolationLevelSQL = "SET SESSION CHARACTERISTICS AS TRANSACTION ISOLATION LEVEL " + isolationLevelName; execSQLUpdate(isolationLevelSQL); // nb: no BEGIN triggered if(RedshiftLogger.isEnable()) logger.log(LogLevel.DEBUG, " setTransactionIsolation = {0}", isolationLevelName); } protected String getIsolationLevelName(int level) { switch (level) { case Connection.TRANSACTION_READ_COMMITTED: return "READ COMMITTED"; case Connection.TRANSACTION_SERIALIZABLE: return "SERIALIZABLE"; case Connection.TRANSACTION_READ_UNCOMMITTED: return "READ UNCOMMITTED"; case Connection.TRANSACTION_REPEATABLE_READ: return "REPEATABLE READ"; default: return null; } } public void setCatalog(String catalog) throws SQLException { checkClosed(); // no-op } public String getCatalog() throws SQLException { checkClosed(); return queryExecutor.getDatabase(); } public boolean getHideUnprivilegedObjects() { return hideUnprivilegedObjects; } /** * <p>Overrides finalize(). If called, it closes the connection.</p> * * <p>This was done at the request of <a href="mailto:rachel@enlarion.demon.co.uk">Rachel * Greenham</a> who hit a problem where multiple clients didn't close the connection, and once a * fortnight enough clients were open to kill the postgres server.</p> */ protected void finalize() throws Throwable { try { if (openStackTrace != null) { if(RedshiftLogger.isEnable()) logger.log(LogLevel.INFO, GT.tr("Finalizing a Connection that was never closed:"), openStackTrace); } close(); } finally { super.finalize(); } } /** * Get server version number. * * @return server version number */ public String getDBVersionNumber() { return queryExecutor.getServerVersion(); } /** * Get server major version. * * @return server major version */ public int getServerMajorVersion() { try { StringTokenizer versionTokens = new StringTokenizer(queryExecutor.getServerVersion(), "."); // aaXbb.ccYdd return integerPart(versionTokens.nextToken()); // return X } catch (NoSuchElementException e) { return 0; } } /** * Get server minor version. * * @return server minor version */ public int getServerMinorVersion() { try { StringTokenizer versionTokens = new StringTokenizer(queryExecutor.getServerVersion(), "."); // aaXbb.ccYdd versionTokens.nextToken(); // Skip aaXbb return integerPart(versionTokens.nextToken()); // return Y } catch (NoSuchElementException e) { return 0; } } @Override public boolean haveMinimumServerVersion(int ver) { return queryExecutor.getServerVersionNum() >= ver; } @Override public boolean haveMinimumServerVersion(Version ver) { return haveMinimumServerVersion(ver.getVersionNum()); } @Override public Encoding getEncoding() { return queryExecutor.getEncoding(); } @Override public byte[] encodeString(String str) throws SQLException { try { return getEncoding().encode(str); } catch (IOException ioe) { throw new RedshiftException(GT.tr("Unable to translate data into the desired encoding."), RedshiftState.DATA_ERROR, ioe); } } @Override public String escapeString(String str) throws SQLException { return Utils.escapeLiteral(null, str, queryExecutor.getStandardConformingStrings()) .toString(); } @Override public String escapeOnlyQuotesString(String str) throws SQLException { return Utils.escapeLiteral(null, str, queryExecutor.getStandardConformingStrings(),true) .toString(); } @Override public boolean getStandardConformingStrings() { return queryExecutor.getStandardConformingStrings(); } // This is a cache of the DatabaseMetaData instance for this connection protected java.sql.DatabaseMetaData metadata; @Override public boolean isClosed() throws SQLException { return queryExecutor.isClosed(); } @Override public void cancelQuery() throws SQLException { checkClosed(); queryExecutor.sendQueryCancel(); if (RedshiftLogger.isEnable()) logger.logError("Send query cancel to server"); } @Override public RedshiftNotification[] getNotifications() throws SQLException { return getNotifications(-1); } @Override public RedshiftNotification[] getNotifications(int timeoutMillis) throws SQLException { checkClosed(); getQueryExecutor().processNotifies(timeoutMillis); // Backwards-compatibility hand-holding. RedshiftNotification[] notifications = queryExecutor.getNotifications(); return (notifications.length == 0 ? null : notifications); } /** * Handler for transaction queries. */ private class TransactionCommandHandler extends ResultHandlerBase { public void handleCompletion() throws SQLException { SQLWarning warning = getWarning(); if (warning != null) { RedshiftConnectionImpl.this.addWarning(warning); } super.handleCompletion(); } } public int getPrepareThreshold() { return prepareThreshold; } public void setDefaultFetchSize(int fetchSize) throws SQLException { if (fetchSize < 0) { throw new RedshiftException(GT.tr("Fetch size must be a value greater to or equal to 0."), RedshiftState.INVALID_PARAMETER_VALUE); } this.defaultFetchSize = fetchSize; if(RedshiftLogger.isEnable()) logger.log(LogLevel.DEBUG, " setDefaultFetchSize = {0}", fetchSize); } public int getDefaultFetchSize() { return defaultFetchSize; } public int getReWriteBatchedInsertsSize() { return this.reWriteBatchedInsertsSize; } public Integer getOverrideSchemaPatternType() { return this.overrideSchemaPatternType; } public void setPrepareThreshold(int newThreshold) { this.prepareThreshold = newThreshold; if(RedshiftLogger.isEnable()) logger.log(LogLevel.DEBUG, " setPrepareThreshold = {0}", newThreshold); } public void setGeneratedName(boolean enable) { enableGeneratedName = enable; } public boolean getGeneratedName() { return enableGeneratedName; } public boolean getForceBinary() { return forcebinary; } public void setForceBinary(boolean newValue) { this.forcebinary = newValue; if(RedshiftLogger.isEnable()) logger.log(LogLevel.DEBUG, " setForceBinary = {0}", newValue); } public void setTypeMapImpl(Map<String, Class<?>> map) throws SQLException { typemap = map; } public RedshiftLogger getLogger() { return logger; } public int getProtocolVersion() { return queryExecutor.getProtocolVersion(); } public boolean getStringVarcharFlag() { return bindStringAsVarchar; } private CopyManager copyManager = null; public CopyManager getCopyAPI() throws SQLException { checkClosed(); if (copyManager == null) { copyManager = new CopyManager(this); } return copyManager; } public boolean binaryTransferSend(int oid) { return queryExecutor.useBinaryForSend(oid); } public int getBackendPID() { return queryExecutor.getBackendPID(); } public boolean isColumnSanitiserDisabled() { return this.disableColumnSanitiser; } public void setDisableColumnSanitiser(boolean disableColumnSanitiser) { this.disableColumnSanitiser = disableColumnSanitiser; if(RedshiftLogger.isEnable()) logger.log(LogLevel.DEBUG, " setDisableColumnSanitiser = {0}", disableColumnSanitiser); } @Override public PreferQueryMode getPreferQueryMode() { return queryExecutor.getPreferQueryMode(); } @Override public AutoSave getAutosave() { return queryExecutor.getAutoSave(); } @Override public void setAutosave(AutoSave autoSave) { queryExecutor.setAutoSave(autoSave); if(RedshiftLogger.isEnable()) logger.log(LogLevel.DEBUG, " setAutosave = {0}", autoSave.value()); } protected void abort() { queryExecutor.abort(); } private synchronized Timer getTimer() { if (cancelTimer == null) { cancelTimer = Driver.getSharedTimer().getTimer(); } return cancelTimer; } private synchronized void releaseTimer() { if (cancelTimer != null) { cancelTimer = null; Driver.getSharedTimer().releaseTimer(); } } @Override public void addTimerTask(TimerTask timerTask, long milliSeconds) { Timer timer = getTimer(); timer.schedule(timerTask, milliSeconds); } @Override public void purgeTimerTasks() { Timer timer = cancelTimer; if (timer != null) { timer.purge(); } } @Override public String escapeIdentifier(String identifier) throws SQLException { return Utils.escapeIdentifier(null, identifier).toString(); } @Override public String escapeLiteral(String literal) throws SQLException { return Utils.escapeLiteral(null, literal, queryExecutor.getStandardConformingStrings()) .toString(); } @Override public LruCache<FieldMetadata.Key, FieldMetadata> getFieldMetadataCache() { return fieldMetadataCache; } @Override public RedshiftReplicationConnection getReplicationAPI() { return new RedshiftReplicationConnectionImpl(this); } private static void appendArray(StringBuilder sb, Object elements, char delim) { sb.append('{'); int nElements = java.lang.reflect.Array.getLength(elements); for (int i = 0; i < nElements; i++) { if (i > 0) { sb.append(delim); } Object o = java.lang.reflect.Array.get(elements, i); if (o == null) { sb.append("NULL"); } else if (o.getClass().isArray()) { final PrimitiveArraySupport arraySupport = PrimitiveArraySupport.getArraySupport(o); if (arraySupport != null) { arraySupport.appendArray(sb, delim, o); } else { appendArray(sb, o, delim); } } else { String s = o.toString(); RedshiftArray.escapeArrayElement(sb, s); } } sb.append('}'); } // Parse a "dirty" integer surrounded by non-numeric characters private static int integerPart(String dirtyString) { int start = 0; while (start < dirtyString.length() && !Character.isDigit(dirtyString.charAt(start))) { ++start; } int end = start; while (end < dirtyString.length() && Character.isDigit(dirtyString.charAt(end))) { ++end; } if (start == end) { return 0; } return Integer.parseInt(dirtyString.substring(start, end)); } @Override public Statement createStatement(int resultSetType, int resultSetConcurrency, int resultSetHoldability) throws SQLException { if (RedshiftLogger.isEnable()) logger.logFunction(true, resultSetType, resultSetConcurrency, resultSetHoldability); checkClosed(); Statement stmt = new RedshiftStatementImpl(this, resultSetType, resultSetConcurrency, resultSetHoldability); if (RedshiftLogger.isEnable()) logger.logFunction(false, stmt); return stmt; } @Override public PreparedStatement prepareStatement(String sql, int resultSetType, int resultSetConcurrency, int resultSetHoldability) throws SQLException { if (RedshiftLogger.isEnable()) logger.logFunction(true, QuerySanitizer.filterCredentials(sql), resultSetType, resultSetConcurrency, resultSetHoldability); checkClosed(); PreparedStatement pstmt = new RedshiftPreparedStatement(this, sql, resultSetType, resultSetConcurrency, resultSetHoldability); if (RedshiftLogger.isEnable()) logger.logFunction(false, QuerySanitizer.filterCredentials(pstmt.toString())); return pstmt; } @Override public CallableStatement prepareCall(String sql, int resultSetType, int resultSetConcurrency, int resultSetHoldability) throws SQLException { if (RedshiftLogger.isEnable()) logger.logFunction(true, QuerySanitizer.filterCredentials(sql), resultSetType, resultSetConcurrency, resultSetHoldability); checkClosed(); CallableStatement cstmt= new RedshiftCallableStatement(this, sql, resultSetType, resultSetConcurrency, resultSetHoldability); if (RedshiftLogger.isEnable()) logger.logFunction(false, QuerySanitizer.filterCredentials(cstmt.toString())); return cstmt; } @Override public DatabaseMetaData getMetaData() throws SQLException { checkClosed(); if (metadata == null) { metadata = new RedshiftDatabaseMetaData(this); } return metadata; } @Override public void setTypeMap(Map<String, Class<?>> map) throws SQLException { setTypeMapImpl(map); if(RedshiftLogger.isEnable()) logger.log(LogLevel.DEBUG, " setTypeMap = {0}", map); } protected Array makeArray(int oid, String fieldString) throws SQLException { return new RedshiftArray(this, oid, fieldString); } protected Blob makeBlob(long oid) throws SQLException { return new RedshiftBlob(this, oid); } protected Clob makeClob(long oid) throws SQLException { return new RedshiftClob(this, oid); } protected SQLXML makeSQLXML() throws SQLException { return new RedshiftSQLXML(this); } @Override public Clob createClob() throws SQLException { checkClosed(); throw com.amazon.redshift.Driver.notImplemented(this.getClass(), "createClob()"); } @Override public Blob createBlob() throws SQLException { checkClosed(); throw com.amazon.redshift.Driver.notImplemented(this.getClass(), "createBlob()"); } @Override public NClob createNClob() throws SQLException { checkClosed(); throw com.amazon.redshift.Driver.notImplemented(this.getClass(), "createNClob()"); } @Override public SQLXML createSQLXML() throws SQLException { checkClosed(); return makeSQLXML(); } @Override public Struct createStruct(String typeName, Object[] attributes) throws SQLException { checkClosed(); throw com.amazon.redshift.Driver.notImplemented(this.getClass(), "createStruct(String, Object[])"); } @Override public Array createArrayOf(String typeName, Object elements) throws SQLException { checkClosed(); final TypeInfo typeInfo = getTypeInfo(); final int oid = typeInfo.getRSArrayType(typeName); final char delim = typeInfo.getArrayDelimiter(oid); if (oid == Oid.UNSPECIFIED) { throw new RedshiftException(GT.tr("Unable to find server array type for provided name {0}.", typeName), RedshiftState.INVALID_NAME); } if (elements == null) { return makeArray(oid, null); } final String arrayString; final PrimitiveArraySupport arraySupport = PrimitiveArraySupport.getArraySupport(elements); if (arraySupport != null) { // if the oid for the given type matches the default type, we might be // able to go straight to binary representation if (oid == arraySupport.getDefaultArrayTypeOid(typeInfo) && arraySupport.supportBinaryRepresentation() && getPreferQueryMode() != PreferQueryMode.SIMPLE) { return new RedshiftArray(this, oid, arraySupport.toBinaryRepresentation(this, elements)); } arrayString = arraySupport.toArrayString(delim, elements); } else { final Class<?> clazz = elements.getClass(); if (!clazz.isArray()) { throw new RedshiftException(GT.tr("Invalid elements {0}", elements), RedshiftState.INVALID_PARAMETER_TYPE); } StringBuilder sb = new StringBuilder(); appendArray(sb, elements, delim); arrayString = sb.toString(); } return makeArray(oid, arrayString); } @Override public Array createArrayOf(String typeName, Object[] elements) throws SQLException { checkClosed(); int oid = getTypeInfo().getRSArrayType(typeName); if (oid == Oid.UNSPECIFIED) { throw new RedshiftException( GT.tr("Unable to find server array type for provided name {0}.", typeName), RedshiftState.INVALID_NAME); } if (elements == null) { return makeArray(oid, null); } char delim = getTypeInfo().getArrayDelimiter(oid); StringBuilder sb = new StringBuilder(); appendArray(sb, elements, delim); return makeArray(oid, sb.toString()); } @Override public boolean isValid(int timeout) throws SQLException { if (timeout < 0) { throw new RedshiftException(GT.tr("Invalid timeout ({0}<0).", timeout), RedshiftState.INVALID_PARAMETER_VALUE); } if (isClosed()) { return false; } try { if (!disableIsValidQuery) { int savedNetworkTimeOut = getNetworkTimeout(); try { setNetworkTimeout(null, timeout * 1000); if (replicationConnection) { Statement statement = createStatement(); statement.execute("IDENTIFY_SYSTEM"); statement.close(); } else { PreparedStatement checkConnectionQuery; synchronized (this) { checkConnectionQuery = prepareStatement(""); } checkConnectionQuery.setQueryTimeout(timeout); checkConnectionQuery.executeUpdate(); checkConnectionQuery.close(); } return true; } finally { setNetworkTimeout(null, savedNetworkTimeOut); } } else return true; } catch (SQLException e) { if (RedshiftState.IN_FAILED_SQL_TRANSACTION.getState().equals(e.getSQLState())) { // "current transaction aborted", assume the connection is up and running return true; } if(RedshiftLogger.isEnable()) logger.log(LogLevel.DEBUG, GT.tr("Validating connection."), e); } return false; } @Override public void setClientInfo(String name, String value) throws SQLClientInfoException { try { checkClosed(); } catch (final SQLException cause) { Map<String, ClientInfoStatus> failures = new HashMap<String, ClientInfoStatus>(); failures.put(name, ClientInfoStatus.REASON_UNKNOWN); throw new SQLClientInfoException(GT.tr("This connection has been closed."), failures, cause); } if ("ApplicationName".equals(name)) // haveMinimumServerVersion(ServerVersion.v9_0) && { if (value == null) { value = ""; } final String oldValue = queryExecutor.getApplicationName(); if (value.equals(oldValue)) { return; } try { StringBuilder sql = new StringBuilder("SET application_name = '"); Utils.escapeLiteral(sql, value, getStandardConformingStrings()); sql.append("'"); execSQLUpdate(sql.toString()); } catch (SQLException sqle) { Map<String, ClientInfoStatus> failures = new HashMap<String, ClientInfoStatus>(); failures.put(name, ClientInfoStatus.REASON_UNKNOWN); throw new SQLClientInfoException( GT.tr("Failed to set ClientInfo property: {0}", "ApplicationName"), sqle.getSQLState(), failures, sqle); } clientInfo.put(name, value); return; } addWarning(new SQLWarning(GT.tr("ClientInfo property not supported."), RedshiftState.NOT_IMPLEMENTED.getState())); } @Override public void setClientInfo(Properties properties) throws SQLClientInfoException { try { checkClosed(); } catch (final SQLException cause) { Map<String, ClientInfoStatus> failures = new HashMap<String, ClientInfoStatus>(); for (Map.Entry<Object, Object> e : properties.entrySet()) { failures.put((String) e.getKey(), ClientInfoStatus.REASON_UNKNOWN); } throw new SQLClientInfoException(GT.tr("This connection has been closed."), failures, cause); } Map<String, ClientInfoStatus> failures = new HashMap<String, ClientInfoStatus>(); for (String name : new String[]{"ApplicationName"}) { try { setClientInfo(name, properties.getProperty(name, null)); } catch (SQLClientInfoException e) { failures.putAll(e.getFailedProperties()); } } if (!failures.isEmpty()) { throw new SQLClientInfoException(GT.tr("One or more ClientInfo failed."), RedshiftState.NOT_IMPLEMENTED.getState(), failures); } } @Override public String getClientInfo(String name) throws SQLException { checkClosed(); clientInfo.put("ApplicationName", queryExecutor.getApplicationName()); return clientInfo.getProperty(name); } @Override public Properties getClientInfo() throws SQLException { checkClosed(); clientInfo.put("ApplicationName", queryExecutor.getApplicationName()); return clientInfo; } public <T> T createQueryObject(Class<T> ifc) throws SQLException { checkClosed(); throw com.amazon.redshift.Driver.notImplemented(this.getClass(), "createQueryObject(Class<T>)"); } @Override public boolean isWrapperFor(Class<?> iface) throws SQLException { checkClosed(); return iface.isAssignableFrom(getClass()); } @Override public <T> T unwrap(Class<T> iface) throws SQLException { checkClosed(); if (iface.isAssignableFrom(getClass())) { return iface.cast(this); } throw new SQLException("Cannot unwrap to " + iface.getName()); } public String getSchema() throws SQLException { checkClosed(); Statement stmt = createStatement(); try { ResultSet rs = stmt.executeQuery("select current_schema()"); try { if (!rs.next()) { return null; // Is it ever possible? } return rs.getString(1); } finally { rs.close(); } } finally { stmt.close(); } } public void setSchema(String schema) throws SQLException { if (RedshiftLogger.isEnable()) logger.logFunction(true, schema); checkClosed(); Statement stmt = createStatement(); try { if (schema == null) { stmt.executeUpdate("SET SESSION search_path TO DEFAULT"); } else { StringBuilder sb = new StringBuilder(); sb.append("SET SESSION search_path TO '"); Utils.escapeLiteral(sb, schema, getStandardConformingStrings()); sb.append("'"); stmt.executeUpdate(sb.toString()); if(RedshiftLogger.isEnable()) logger.log(LogLevel.DEBUG, " setSchema = {0}", schema); } } finally { stmt.close(); } } public class AbortCommand implements Runnable { public void run() { abort(); } } public void abort(Executor executor) throws SQLException { if (RedshiftLogger.isEnable()) logger.logFunction(true, executor); if (executor == null) { throw new SQLException("executor is null"); } if (isClosed()) { if (RedshiftLogger.isEnable()) logger.logFunction(false); return; } SQL_PERMISSION_ABORT.checkGuard(this); AbortCommand command = new AbortCommand(); executor.execute(command); if (RedshiftLogger.isEnable()) logger.logFunction(false); } public void setNetworkTimeout(Executor executor /*not used*/, int milliseconds) throws SQLException { if (RedshiftLogger.isEnable()) logger.logFunction(true, executor, milliseconds); checkClosed(); if (milliseconds < 0) { throw new RedshiftException(GT.tr("Network timeout must be a value greater than or equal to 0."), RedshiftState.INVALID_PARAMETER_VALUE); } SecurityManager securityManager = System.getSecurityManager(); if (securityManager != null) { securityManager.checkPermission(SQL_PERMISSION_NETWORK_TIMEOUT); } try { queryExecutor.setNetworkTimeout(milliseconds); } catch (IOException ioe) { throw new RedshiftException(GT.tr("Unable to set network timeout."), RedshiftState.COMMUNICATION_ERROR, ioe); } if (RedshiftLogger.isEnable()) logger.logFunction(false); } public int getNetworkTimeout() throws SQLException { checkClosed(); try { return queryExecutor.getNetworkTimeout(); } catch (IOException ioe) { throw new RedshiftException(GT.tr("Unable to get network timeout."), RedshiftState.COMMUNICATION_ERROR, ioe); } } @Override public void setHoldability(int holdability) throws SQLException { checkClosed(); switch (holdability) { case ResultSet.CLOSE_CURSORS_AT_COMMIT: rsHoldability = holdability; break; case ResultSet.HOLD_CURSORS_OVER_COMMIT: rsHoldability = holdability; break; default: throw new RedshiftException(GT.tr("Unknown ResultSet holdability setting: {0}.", holdability), RedshiftState.INVALID_PARAMETER_VALUE); } if(RedshiftLogger.isEnable()) logger.log(LogLevel.DEBUG, " setHoldability = {0}", holdability); } @Override public int getHoldability() throws SQLException { checkClosed(); return rsHoldability; } @Override public Savepoint setSavepoint() throws SQLException { if (RedshiftLogger.isEnable()) logger.logFunction(true); checkClosed(); String pgName; if (getAutoCommit()) { throw new RedshiftException(GT.tr("Cannot establish a savepoint in auto-commit mode."), RedshiftState.NO_ACTIVE_SQL_TRANSACTION); } RedshiftSavepoint savepoint = new RedshiftSavepoint(savepointId++); pgName = savepoint.getRSName(); // Note we can't use execSQLUpdate because we don't want // to suppress BEGIN. Statement stmt = createStatement(); stmt.executeUpdate("SAVEPOINT " + pgName); stmt.close(); return savepoint; } @Override public Savepoint setSavepoint(String name) throws SQLException { if (RedshiftLogger.isEnable()) logger.logFunction(true, name); checkClosed(); if (getAutoCommit()) { throw new RedshiftException(GT.tr("Cannot establish a savepoint in auto-commit mode."), RedshiftState.NO_ACTIVE_SQL_TRANSACTION); } RedshiftSavepoint savepoint = new RedshiftSavepoint(name); // Note we can't use execSQLUpdate because we don't want // to suppress BEGIN. Statement stmt = createStatement(); stmt.executeUpdate("SAVEPOINT " + savepoint.getRSName()); stmt.close(); return savepoint; } @Override public void rollback(Savepoint savepoint) throws SQLException { if (RedshiftLogger.isEnable()) logger.logFunction(true, savepoint); checkClosed(); RedshiftSavepoint pgSavepoint = (RedshiftSavepoint) savepoint; execSQLUpdate("ROLLBACK TO SAVEPOINT " + pgSavepoint.getRSName()); if (RedshiftLogger.isEnable()) logger.logFunction(false); } @Override public void releaseSavepoint(Savepoint savepoint) throws SQLException { checkClosed(); RedshiftSavepoint pgSavepoint = (RedshiftSavepoint) savepoint; execSQLUpdate("RELEASE SAVEPOINT " + pgSavepoint.getRSName()); pgSavepoint.invalidate(); } @Override public Statement createStatement(int resultSetType, int resultSetConcurrency) throws SQLException { if (RedshiftLogger.isEnable()) logger.logFunction(true, resultSetType, resultSetConcurrency); checkClosed(); Statement stmt = createStatement(resultSetType, resultSetConcurrency, getHoldability()); if (RedshiftLogger.isEnable()) logger.logFunction(false, stmt); return stmt; } @Override public PreparedStatement prepareStatement(String sql, int resultSetType, int resultSetConcurrency) throws SQLException { if (RedshiftLogger.isEnable()) logger.logFunction(true, QuerySanitizer.filterCredentials(sql), resultSetType, resultSetConcurrency); checkClosed(); PreparedStatement pstmt = prepareStatement(sql, resultSetType, resultSetConcurrency, getHoldability()); if (RedshiftLogger.isEnable()) logger.logFunction(false, QuerySanitizer.filterCredentials(pstmt.toString())); return pstmt; } @Override public CallableStatement prepareCall(String sql, int resultSetType, int resultSetConcurrency) throws SQLException { if (RedshiftLogger.isEnable()) logger.logFunction(true, QuerySanitizer.filterCredentials(sql), resultSetType, resultSetConcurrency); checkClosed(); CallableStatement cstmt = prepareCall(sql, resultSetType, resultSetConcurrency, getHoldability()); if (RedshiftLogger.isEnable()) logger.logFunction(false, QuerySanitizer.filterCredentials(cstmt.toString())); return cstmt; } @Override public PreparedStatement prepareStatement(String sql, int autoGeneratedKeys) throws SQLException { PreparedStatement pstmt; if (RedshiftLogger.isEnable()) logger.logFunction(true, QuerySanitizer.filterCredentials(sql), autoGeneratedKeys); if (autoGeneratedKeys != Statement.RETURN_GENERATED_KEYS) { pstmt = prepareStatement(sql); } else { pstmt = prepareStatement(sql, (String[]) null); ((RedshiftPreparedStatement)pstmt).setAutoGeneratedKeys(autoGeneratedKeys); } if (RedshiftLogger.isEnable()) logger.logFunction(false, QuerySanitizer.filterCredentials(pstmt.toString())); return pstmt; } @Override public PreparedStatement prepareStatement(String sql, int[] columnIndexes) throws SQLException { if (RedshiftLogger.isEnable()) logger.logFunction(true, QuerySanitizer.filterCredentials(sql), columnIndexes); if (columnIndexes == null || columnIndexes.length == 0) { PreparedStatement pstmt = prepareStatement(sql); if (RedshiftLogger.isEnable()) logger.logFunction(false, pstmt); return pstmt; } checkClosed(); throw new RedshiftException(GT.tr("Returning autogenerated keys is not supported."), RedshiftState.NOT_IMPLEMENTED); } @Override public PreparedStatement prepareStatement(String sql, String[] columnNames) throws SQLException { PreparedStatement pstmt; if (RedshiftLogger.isEnable()) logger.logFunction(true, QuerySanitizer.filterCredentials(sql), columnNames); if (columnNames == null || columnNames.length == 0) { pstmt = prepareStatement(sql); } else { // throw new RedshiftException(GT.tr("Returning autogenerated keys by column name is not supported."), // RedshiftState.NOT_IMPLEMENTED); CachedQuery cachedQuery = borrowReturningQuery(sql, columnNames); RedshiftPreparedStatement ps = new RedshiftPreparedStatement(this, cachedQuery, ResultSet.TYPE_FORWARD_ONLY, ResultSet.CONCUR_READ_ONLY, getHoldability()); Query query = cachedQuery.query; SqlCommand sqlCommand = query.getSqlCommand(); if (sqlCommand != null) { ps.wantsGeneratedKeysAlways = sqlCommand.isReturningKeywordPresent(); } else { // If composite query is given, just ignore "generated keys" arguments } pstmt = ps; } if (RedshiftLogger.isEnable()) logger.logFunction(false, QuerySanitizer.filterCredentials(pstmt.toString())); return pstmt; } @Override public final Map<String,String> getParameterStatuses() { return queryExecutor.getParameterStatuses(); } @Override public final String getParameterStatus(String parameterName) { return queryExecutor.getParameterStatus(parameterName); } /** * Get the optional setting. * * @param key The name of the setting to retrieve. * @param info The connection settings generated by a call to * UpdateConnectionSettings(). * * @return The String representing the specified setting, or null if the setting isn't present. */ public static String getOptionalSetting(String key, Properties info) { return info.getProperty(key); } public static String getOptionalConnSetting(String key, Properties info) { return getOptionalSetting(key, info); } /** * Get the required setting, and throw an exception if it isn't present. * * @param key The name of the setting to retrieve. * @param info The connection settings generated by a call to * UpdateConnectionSettings(). * * @return The Variant representing the specified setting. * * @throws RedshiftException If the required setting isn't present. */ public static String getRequiredSetting(String key, Properties info) throws RedshiftException { String setting = info.getProperty(key); if (null == setting) { throw new RedshiftException( GT.tr("The required connection property does not found {0}", key), RedshiftState.UNEXPECTED_ERROR); } return setting; } public static String getRequiredConnSetting(String key, Properties info) throws RedshiftException { return getRequiredSetting(key, info); } /** * Helper function to break out AuthMech setting logic which is overly complicated in order to * remain backwards compatible with earlier releases, and add the "sslmode" feature. * * @param info Redshift settings used to authenticate if connection * should be granted. * * @throws RedshiftException If an unspecified error occurs. */ private boolean setAuthMech(Properties info) throws RedshiftException { //If key word ssl is specified in connection string either with nothing or true, //SSL is set to be required. boolean sslExplicitlyEnabled = false; boolean sslExplicitlyDisabled = false; String ssl = getOptionalSetting(RedshiftProperty.SSL.getName(), info); if (null != ssl) { if (Boolean.parseBoolean(ssl) || ssl.equals("")) { sslExplicitlyEnabled = true; m_settings.m_authMech = AuthMech.VERIFY_CA; } else if (!Boolean.parseBoolean(ssl)) { sslExplicitlyDisabled = true; } } String sslFactory = getOptionalSetting(RedshiftProperty.SSL_FACTORY.getName(), info); boolean sslFactorySet = false; // older releases would take sslfactory setting as a trigger to enable SSL. if ((null != sslFactory) && (isNonValidationFactory(sslFactory))) { // decrease authmech from "VERIFY_CA" to "REQUIRE" sslFactorySet = true; m_settings.m_authMech = AuthMech.REQUIRE; } String sslModeProp = getOptionalSetting(RedshiftProperty.SSL_MODE.getName(), info); String authMechProp = getOptionalSetting(RedshiftProperty.AUTH_MECH.getName(), info); String sslMode = (sslModeProp != null) ? sslModeProp : authMechProp; boolean sslModeSet = false; if (null != sslMode) { sslModeSet = true; } if (sslModeSet) { // SSL is now set to true by default. This should only fail if someone has explicitly // disabled SSL. if (sslExplicitlyDisabled) { throw new RedshiftException(GT.tr("Conflict in connection property setting {0} and {1}", RedshiftProperty.SSL_MODE.getName(), RedshiftProperty.SSL.getName()), RedshiftState.UNEXPECTED_ERROR); } if (sslFactorySet) { throw new RedshiftException(GT.tr("Conflict in connection property setting {0} and {1}", RedshiftProperty.SSL_MODE.getName(), RedshiftProperty.SSL_FACTORY.getName()), RedshiftState.UNEXPECTED_ERROR); } if (sslMode.equalsIgnoreCase(SslMode.VERIFY_FULL.value)) { // The user specifically asked for hostname validation m_settings.m_authMech = AuthMech.VERIFY_FULL; } else if (sslMode.equalsIgnoreCase(SslMode.VERIFY_CA.value)) { // By default, if is ssl is enabled, the server hostname validation // is not enabled. m_settings.m_authMech = AuthMech.VERIFY_CA; } else { RedshiftException err = new RedshiftException(GT.tr("Invalid connection property value {0} : {1}", RedshiftProperty.SSL_MODE.getName(), sslMode), RedshiftState.UNEXPECTED_ERROR); if(RedshiftLogger.isEnable()) logger.log(LogLevel.ERROR, err.toString()); throw err; } } // If none of above is set, default to enable SSL if (!sslExplicitlyEnabled && !sslExplicitlyDisabled && !sslFactorySet && !sslModeSet) { m_settings.m_authMech = AuthMech.VERIFY_CA; } return sslExplicitlyDisabled; } /** * Returns true if the given factory is non validating. False otherwise. * * @param factory The factory. * * @return true if the given factory is non validating. False otherwise. */ private boolean isNonValidationFactory(String factory) { boolean result = false; // The valid non validating factory names are the one in the driver or the legacy one if (factory.equals(NON_VALIDATING_SSL_FACTORY) || factory.equals(NonValidatingFactory.class.getName())) { result = true; } return result; } /** * Tries to find whether the JVM is 64bit or not. * If it returns true, the JVM can be assumed to be 64-bit. * If it returns false, the JVM can be assumed to be 32-bit. * Returns true (i.e. 64-bit) by default, if it is not able to find the bitness of the JVM. * * @return true if it is 64-bit JVM, false if it is 32-bit JVM */ private static boolean checkIs64bitJVM() { String bitness = System.getProperty("sun.arch.data.model"); if (bitness != null && bitness.contains("32")) { return false; } // in other cases we can't conclude if its 32-bit JVM, hence assume 64-bit return true; } }
8,513
0
Create_ds/amazon-redshift-jdbc-driver/src/main/java/com/amazon/redshift
Create_ds/amazon-redshift-jdbc-driver/src/main/java/com/amazon/redshift/jdbc/RedshiftCallableStatement.java
/* * Copyright (c) 2004, PostgreSQL Global Development Group * See the LICENSE file in the project root for more information. */ package com.amazon.redshift.jdbc; import com.amazon.redshift.Driver; import com.amazon.redshift.core.ParameterList; import com.amazon.redshift.core.Query; import com.amazon.redshift.logger.LogLevel; import com.amazon.redshift.logger.RedshiftLogger; import com.amazon.redshift.util.GT; import com.amazon.redshift.util.RedshiftException; import com.amazon.redshift.util.RedshiftState; import java.io.InputStream; import java.io.Reader; import java.math.BigDecimal; import java.sql.Array; import java.sql.Blob; import java.sql.CallableStatement; import java.sql.Clob; import java.sql.NClob; import java.sql.Ref; import java.sql.ResultSet; import java.sql.RowId; import java.sql.SQLException; import java.sql.SQLXML; import java.sql.Time; import java.sql.Timestamp; import java.sql.Types; import java.util.Calendar; import java.util.Map; public class RedshiftCallableStatement extends RedshiftPreparedStatement implements CallableStatement { // Used by the callablestatement style methods private boolean isFunction; // functionReturnType contains the user supplied value to check // testReturn contains a modified version to make it easier to // check the getXXX methods.. private int[] functionReturnType; private int[] testReturn; // returnTypeSet is true when a proper call to registerOutParameter has been made private boolean returnTypeSet; protected Object[] callResult; private int lastIndex = 0; RedshiftCallableStatement(RedshiftConnectionImpl connection, String sql, int rsType, int rsConcurrency, int rsHoldability) throws SQLException { super(connection, connection.borrowCallableQuery(sql), rsType, rsConcurrency, rsHoldability); this.isFunction = preparedQuery.isFunction; if (this.isFunction) { int inParamCount = this.preparedParameters.getInParameterCount() + 1; this.testReturn = new int[inParamCount]; this.functionReturnType = new int[inParamCount]; } } public int executeUpdate() throws SQLException { if (isFunction) { executeWithFlags(0); return 0; } return super.executeUpdate(); } public Object getObject(int i, Map<String, Class<?>> map) throws SQLException { if (RedshiftLogger.isEnable()) connection.getLogger().logFunction(true, i, map); return getObjectImpl(i, map); } public Object getObject(String s, Map<String, Class<?>> map) throws SQLException { return getObjectImpl(s, map); } @Override public boolean executeWithFlags(int flags) throws SQLException { boolean hasResultSet = super.executeWithFlags(flags); if (!isFunction || !returnTypeSet) { return hasResultSet; } // If we are executing and there are out parameters // callable statement function set the return data if (!hasResultSet) { throw new RedshiftException(GT.tr("A CallableStatement was executed with nothing returned."), RedshiftState.NO_DATA); } ResultSet rs; synchronized (this) { checkClosed(); rs = result.getResultSet(); } if (!rs.next()) { throw new RedshiftException(GT.tr("A CallableStatement was executed with nothing returned."), RedshiftState.NO_DATA); } // figure out how many columns int cols = rs.getMetaData().getColumnCount(); int outParameterCount = preparedParameters.getOutParameterCount(); if (cols != outParameterCount) { throw new RedshiftException( GT.tr("A CallableStatement was executed with an invalid number of parameters"), RedshiftState.SYNTAX_ERROR); } // reset last result fetched (for wasNull) lastIndex = 0; // allocate enough space for all possible parameters without regard to in/out callResult = new Object[preparedParameters.getParameterCount() + 1]; // move them into the result set for (int i = 0, j = 0; i < cols; i++, j++) { // find the next out parameter, the assumption is that the functionReturnType // array will be initialized with 0 and only out parameters will have values // other than 0. 0 is the value for java.sql.Types.NULL, which should not // conflict while (j < functionReturnType.length && functionReturnType[j] == 0) { j++; } callResult[j] = rs.getObject(i + 1); int columnType = rs.getMetaData().getColumnType(i + 1); if (columnType != functionReturnType[j]) { // this is here for the sole purpose of passing the cts if (columnType == Types.DOUBLE && functionReturnType[j] == Types.REAL) { // return it as a float if (callResult[j] != null) { callResult[j] = ((Double) callResult[j]).floatValue(); } //JCP! if mvn.project.property.redshift.jdbc.spec >= "JDBC4.2" } else if (columnType == Types.REF_CURSOR && functionReturnType[j] == Types.OTHER) { // For backwards compatibility reasons we support that ref cursors can be // registered with both Types.OTHER and Types.REF_CURSOR so we allow // this specific mismatch //JCP! endif } else { throw new RedshiftException(GT.tr( "A CallableStatement function was executed and the out parameter {0} was of type {1} however type {2} was registered.", i + 1, "java.sql.Types=" + columnType, "java.sql.Types=" + functionReturnType[j]), RedshiftState.DATA_TYPE_MISMATCH); } } } rs.close(); synchronized (this) { result = null; } return false; } /** * {@inheritDoc} * * <p>Before executing a stored procedure call you must explicitly call registerOutParameter to * register the java.sql.Type of each out parameter.</p> * * <p>Note: When reading the value of an out parameter, you must use the getXXX method whose Java * type XXX corresponds to the parameter's registered SQL type.</p> * * <p>ONLY 1 RETURN PARAMETER if {?= call ..} syntax is used</p> * * @param parameterIndex the first parameter is 1, the second is 2,... * @param sqlType SQL type code defined by java.sql.Types; for parameters of type Numeric or * Decimal use the version of registerOutParameter that accepts a scale value * @throws SQLException if a database-access error occurs. */ @Override public void registerOutParameter(int parameterIndex, int sqlType) throws SQLException { if (RedshiftLogger.isEnable()) connection.getLogger().logFunction(true, parameterIndex, sqlType); checkClosed(); switch (sqlType) { case Types.TINYINT: // we don't have a TINYINT type use SMALLINT sqlType = Types.SMALLINT; break; case Types.LONGVARCHAR: sqlType = Types.VARCHAR; break; case Types.DECIMAL: sqlType = Types.NUMERIC; break; case Types.FLOAT: // float is the same as double sqlType = Types.DOUBLE; break; case Types.VARBINARY: case Types.LONGVARBINARY: sqlType = Types.BINARY; break; case Types.BOOLEAN: sqlType = Types.BIT; break; default: break; } if (!isFunction) { throw new RedshiftException( GT.tr( "This statement does not declare an OUT parameter. Use '{' ?= call ... '}' to declare one."), RedshiftState.STATEMENT_NOT_ALLOWED_IN_FUNCTION_CALL); } checkIndex(parameterIndex, false); preparedParameters.registerOutParameter(parameterIndex, sqlType); // functionReturnType contains the user supplied value to check // testReturn contains a modified version to make it easier to // check the getXXX methods.. functionReturnType[parameterIndex - 1] = sqlType; testReturn[parameterIndex - 1] = sqlType; if (functionReturnType[parameterIndex - 1] == Types.CHAR || functionReturnType[parameterIndex - 1] == Types.LONGVARCHAR) { testReturn[parameterIndex - 1] = Types.VARCHAR; } else if (functionReturnType[parameterIndex - 1] == Types.FLOAT) { testReturn[parameterIndex - 1] = Types.REAL; // changes to streamline later error checking } returnTypeSet = true; } public boolean wasNull() throws SQLException { if (RedshiftLogger.isEnable()) connection.getLogger().logFunction(true); if (lastIndex == 0) { throw new RedshiftException(GT.tr("wasNull cannot be call before fetching a result."), RedshiftState.OBJECT_NOT_IN_STATE); } // check to see if the last access threw an exception return (callResult[lastIndex - 1] == null); } public String getString(int parameterIndex) throws SQLException { if (RedshiftLogger.isEnable()) connection.getLogger().logFunction(true, parameterIndex); checkClosed(); checkIndex(parameterIndex, Types.VARCHAR, "String"); return (String) callResult[parameterIndex - 1]; } public boolean getBoolean(int parameterIndex) throws SQLException { if (RedshiftLogger.isEnable()) connection.getLogger().logFunction(true, parameterIndex); checkClosed(); checkIndex(parameterIndex, Types.BIT, "Boolean"); if (callResult[parameterIndex - 1] == null) { return false; } return (Boolean) callResult[parameterIndex - 1]; } public byte getByte(int parameterIndex) throws SQLException { if (RedshiftLogger.isEnable()) connection.getLogger().logFunction(true, parameterIndex); checkClosed(); // fake tiny int with smallint checkIndex(parameterIndex, Types.SMALLINT, "Byte"); if (callResult[parameterIndex - 1] == null) { return 0; } return ((Integer) callResult[parameterIndex - 1]).byteValue(); } public short getShort(int parameterIndex) throws SQLException { if (RedshiftLogger.isEnable()) connection.getLogger().logFunction(true, parameterIndex); checkClosed(); checkIndex(parameterIndex, Types.SMALLINT, "Short"); if (callResult[parameterIndex - 1] == null) { return 0; } return ((Integer) callResult[parameterIndex - 1]).shortValue(); } public int getInt(int parameterIndex) throws SQLException { if (RedshiftLogger.isEnable()) connection.getLogger().logFunction(true, parameterIndex); checkClosed(); checkIndex(parameterIndex, Types.INTEGER, "Int"); if (callResult[parameterIndex - 1] == null) { return 0; } return (Integer) callResult[parameterIndex - 1]; } public long getLong(int parameterIndex) throws SQLException { if (RedshiftLogger.isEnable()) connection.getLogger().logFunction(true, parameterIndex); checkClosed(); checkIndex(parameterIndex, Types.BIGINT, "Long"); if (callResult[parameterIndex - 1] == null) { return 0; } return (Long) callResult[parameterIndex - 1]; } public float getFloat(int parameterIndex) throws SQLException { if (RedshiftLogger.isEnable()) connection.getLogger().logFunction(true, parameterIndex); checkClosed(); checkIndex(parameterIndex, Types.REAL, "Float"); if (callResult[parameterIndex - 1] == null) { return 0; } return (Float) callResult[parameterIndex - 1]; } public double getDouble(int parameterIndex) throws SQLException { if (RedshiftLogger.isEnable()) connection.getLogger().logFunction(true, parameterIndex); checkClosed(); checkIndex(parameterIndex, Types.DOUBLE, "Double"); if (callResult[parameterIndex - 1] == null) { return 0; } return (Double) callResult[parameterIndex - 1]; } public BigDecimal getBigDecimal(int parameterIndex, int scale) throws SQLException { if (RedshiftLogger.isEnable()) connection.getLogger().logFunction(true, parameterIndex, scale); checkClosed(); checkIndex(parameterIndex, Types.NUMERIC, "BigDecimal"); return ((BigDecimal) callResult[parameterIndex - 1]); } public byte[] getBytes(int parameterIndex) throws SQLException { if (RedshiftLogger.isEnable()) connection.getLogger().logFunction(true, parameterIndex); checkClosed(); checkIndex(parameterIndex, Types.VARBINARY, Types.BINARY, "Bytes"); return ((byte[]) callResult[parameterIndex - 1]); } public java.sql.Date getDate(int parameterIndex) throws SQLException { if (RedshiftLogger.isEnable()) connection.getLogger().logFunction(true, parameterIndex); checkClosed(); checkIndex(parameterIndex, Types.DATE, "Date"); return (java.sql.Date) callResult[parameterIndex - 1]; } public java.sql.Time getTime(int parameterIndex) throws SQLException { if (RedshiftLogger.isEnable()) connection.getLogger().logFunction(true, parameterIndex); checkClosed(); checkIndex(parameterIndex, Types.TIME, "Time"); return (java.sql.Time) callResult[parameterIndex - 1]; } public java.sql.Timestamp getTimestamp(int parameterIndex) throws SQLException { if (RedshiftLogger.isEnable()) connection.getLogger().logFunction(true, parameterIndex); checkClosed(); checkIndex(parameterIndex, Types.TIMESTAMP, "Timestamp"); return (java.sql.Timestamp) callResult[parameterIndex - 1]; } public Object getObject(int parameterIndex) throws SQLException { if (RedshiftLogger.isEnable()) connection.getLogger().logFunction(true, parameterIndex); checkClosed(); checkIndex(parameterIndex); return callResult[parameterIndex - 1]; } /** * helperfunction for the getXXX calls to check isFunction and index == 1 Compare BOTH type fields * against the return type. * * @param parameterIndex parameter index (1-based) * @param type1 type 1 * @param type2 type 2 * @param getName getter name * @throws SQLException if something goes wrong */ protected void checkIndex(int parameterIndex, int type1, int type2, String getName) throws SQLException { checkIndex(parameterIndex); if (type1 != this.testReturn[parameterIndex - 1] && type2 != this.testReturn[parameterIndex - 1]) { throw new RedshiftException( GT.tr("Parameter of type {0} was registered, but call to get{1} (sqltype={2}) was made.", "java.sql.Types=" + testReturn[parameterIndex - 1], getName, "java.sql.Types=" + type1), RedshiftState.MOST_SPECIFIC_TYPE_DOES_NOT_MATCH); } } /** * Helper function for the getXXX calls to check isFunction and index == 1. * * @param parameterIndex parameter index (1-based) * @param type type * @param getName getter name * @throws SQLException if given index is not valid */ protected void checkIndex(int parameterIndex, int type, String getName) throws SQLException { checkIndex(parameterIndex); if (type != this.testReturn[parameterIndex - 1]) { throw new RedshiftException( GT.tr("Parameter of type {0} was registered, but call to get{1} (sqltype={2}) was made.", "java.sql.Types=" + testReturn[parameterIndex - 1], getName, "java.sql.Types=" + type), RedshiftState.MOST_SPECIFIC_TYPE_DOES_NOT_MATCH); } } private void checkIndex(int parameterIndex) throws SQLException { checkIndex(parameterIndex, true); } /** * Helper function for the getXXX calls to check isFunction and index == 1. * * @param parameterIndex index of getXXX (index) check to make sure is a function and index == 1 * @param fetchingData fetching data */ private void checkIndex(int parameterIndex, boolean fetchingData) throws SQLException { if (!isFunction) { throw new RedshiftException( GT.tr( "A CallableStatement was declared, but no call to registerOutParameter(1, <some type>) was made."), RedshiftState.STATEMENT_NOT_ALLOWED_IN_FUNCTION_CALL); } if (fetchingData) { if (!returnTypeSet) { throw new RedshiftException(GT.tr("No function outputs were registered."), RedshiftState.OBJECT_NOT_IN_STATE); } if (callResult == null) { throw new RedshiftException( GT.tr("Results cannot be retrieved from a CallableStatement before it is executed."), RedshiftState.NO_DATA); } lastIndex = parameterIndex; } } @Override protected BatchResultHandler createBatchHandler(Query[] queries, ParameterList[] parameterLists) { return new CallableBatchResultHandler(this, queries, parameterLists); } public java.sql.Array getArray(int i) throws SQLException { checkClosed(); checkIndex(i, Types.ARRAY, "Array"); return (Array) callResult[i - 1]; } public java.math.BigDecimal getBigDecimal(int parameterIndex) throws SQLException { if (RedshiftLogger.isEnable()) connection.getLogger().logFunction(true, parameterIndex); checkClosed(); checkIndex(parameterIndex, Types.NUMERIC, "BigDecimal"); return ((BigDecimal) callResult[parameterIndex - 1]); } public Blob getBlob(int i) throws SQLException { throw Driver.notImplemented(this.getClass(), "getBlob(int)"); } public Clob getClob(int i) throws SQLException { throw Driver.notImplemented(this.getClass(), "getClob(int)"); } public Object getObjectImpl(int i, Map<String, Class<?>> map) throws SQLException { if (map == null || map.isEmpty()) { return getObject(i); } throw Driver.notImplemented(this.getClass(), "getObjectImpl(int,Map)"); } public Ref getRef(int i) throws SQLException { throw Driver.notImplemented(this.getClass(), "getRef(int)"); } public java.sql.Date getDate(int i, java.util.Calendar cal) throws SQLException { if (RedshiftLogger.isEnable()) connection.getLogger().logFunction(true, i, cal); checkClosed(); checkIndex(i, Types.DATE, "Date"); if (callResult[i - 1] == null) { return null; } String value = callResult[i - 1].toString(); return connection.getTimestampUtils().toDate(cal, value); } public Time getTime(int i, java.util.Calendar cal) throws SQLException { if (RedshiftLogger.isEnable()) connection.getLogger().logFunction(true, i, cal); checkClosed(); checkIndex(i, Types.TIME, "Time"); if (callResult[i - 1] == null) { return null; } String value = callResult[i - 1].toString(); return connection.getTimestampUtils().toTime(cal, value); } public Timestamp getTimestamp(int i, java.util.Calendar cal) throws SQLException { if (RedshiftLogger.isEnable()) connection.getLogger().logFunction(true, i, cal); checkClosed(); checkIndex(i, Types.TIMESTAMP, "Timestamp"); if (callResult[i - 1] == null) { return null; } String value = callResult[i - 1].toString(); return connection.getTimestampUtils().toTimestamp(cal, value); } public void registerOutParameter(int parameterIndex, int sqlType, String typeName) throws SQLException { throw Driver.notImplemented(this.getClass(), "registerOutParameter(int,int,String)"); } //JCP! if mvn.project.property.redshift.jdbc.spec >= "JDBC4.2" public void setObject(String parameterName, Object x, java.sql.SQLType targetSqlType, int scaleOrLength) throws SQLException { throw Driver.notImplemented(this.getClass(), "setObject"); } public void setObject(String parameterName, Object x, java.sql.SQLType targetSqlType) throws SQLException { throw Driver.notImplemented(this.getClass(), "setObject"); } public void registerOutParameter(int parameterIndex, java.sql.SQLType sqlType) throws SQLException { throw Driver.notImplemented(this.getClass(), "registerOutParameter"); } public void registerOutParameter(int parameterIndex, java.sql.SQLType sqlType, int scale) throws SQLException { throw Driver.notImplemented(this.getClass(), "registerOutParameter"); } public void registerOutParameter(int parameterIndex, java.sql.SQLType sqlType, String typeName) throws SQLException { throw Driver.notImplemented(this.getClass(), "registerOutParameter"); } public void registerOutParameter(String parameterName, java.sql.SQLType sqlType) throws SQLException { throw Driver.notImplemented(this.getClass(), "registerOutParameter"); } public void registerOutParameter(String parameterName, java.sql.SQLType sqlType, int scale) throws SQLException { throw Driver.notImplemented(this.getClass(), "registerOutParameter"); } public void registerOutParameter(String parameterName, java.sql.SQLType sqlType, String typeName) throws SQLException { throw Driver.notImplemented(this.getClass(), "registerOutParameter"); } //JCP! endif public RowId getRowId(int parameterIndex) throws SQLException { throw Driver.notImplemented(this.getClass(), "getRowId(int)"); } public RowId getRowId(String parameterName) throws SQLException { throw Driver.notImplemented(this.getClass(), "getRowId(String)"); } public void setRowId(String parameterName, RowId x) throws SQLException { throw Driver.notImplemented(this.getClass(), "setRowId(String, RowId)"); } public void setNString(String parameterName, String value) throws SQLException { throw Driver.notImplemented(this.getClass(), "setNString(String, String)"); } public void setNCharacterStream(String parameterName, Reader value, long length) throws SQLException { throw Driver.notImplemented(this.getClass(), "setNCharacterStream(String, Reader, long)"); } public void setNCharacterStream(String parameterName, Reader value) throws SQLException { throw Driver.notImplemented(this.getClass(), "setNCharacterStream(String, Reader)"); } public void setCharacterStream(String parameterName, Reader value, long length) throws SQLException { throw Driver.notImplemented(this.getClass(), "setCharacterStream(String, Reader, long)"); } public void setCharacterStream(String parameterName, Reader value) throws SQLException { throw Driver.notImplemented(this.getClass(), "setCharacterStream(String, Reader)"); } public void setBinaryStream(String parameterName, InputStream value, long length) throws SQLException { throw Driver.notImplemented(this.getClass(), "setBinaryStream(String, InputStream, long)"); } public void setBinaryStream(String parameterName, InputStream value) throws SQLException { throw Driver.notImplemented(this.getClass(), "setBinaryStream(String, InputStream)"); } public void setAsciiStream(String parameterName, InputStream value, long length) throws SQLException { throw Driver.notImplemented(this.getClass(), "setAsciiStream(String, InputStream, long)"); } public void setAsciiStream(String parameterName, InputStream value) throws SQLException { throw Driver.notImplemented(this.getClass(), "setAsciiStream(String, InputStream)"); } public void setNClob(String parameterName, NClob value) throws SQLException { throw Driver.notImplemented(this.getClass(), "setNClob(String, NClob)"); } public void setClob(String parameterName, Reader reader, long length) throws SQLException { throw Driver.notImplemented(this.getClass(), "setClob(String, Reader, long)"); } public void setClob(String parameterName, Reader reader) throws SQLException { throw Driver.notImplemented(this.getClass(), "setClob(String, Reader)"); } public void setBlob(String parameterName, InputStream inputStream, long length) throws SQLException { throw Driver.notImplemented(this.getClass(), "setBlob(String, InputStream, long)"); } public void setBlob(String parameterName, InputStream inputStream) throws SQLException { throw Driver.notImplemented(this.getClass(), "setBlob(String, InputStream)"); } public void setBlob(String parameterName, Blob x) throws SQLException { throw Driver.notImplemented(this.getClass(), "setBlob(String, Blob)"); } public void setClob(String parameterName, Clob x) throws SQLException { throw Driver.notImplemented(this.getClass(), "setClob(String, Clob)"); } public void setNClob(String parameterName, Reader reader, long length) throws SQLException { throw Driver.notImplemented(this.getClass(), "setNClob(String, Reader, long)"); } public void setNClob(String parameterName, Reader reader) throws SQLException { throw Driver.notImplemented(this.getClass(), "setNClob(String, Reader)"); } public NClob getNClob(int parameterIndex) throws SQLException { throw Driver.notImplemented(this.getClass(), "getNClob(int)"); } public NClob getNClob(String parameterName) throws SQLException { throw Driver.notImplemented(this.getClass(), "getNClob(String)"); } public void setSQLXML(String parameterName, SQLXML xmlObject) throws SQLException { throw Driver.notImplemented(this.getClass(), "setSQLXML(String, SQLXML)"); } public SQLXML getSQLXML(int parameterIndex) throws SQLException { if (RedshiftLogger.isEnable()) connection.getLogger().logFunction(true, parameterIndex); checkClosed(); checkIndex(parameterIndex, Types.SQLXML, "SQLXML"); return (SQLXML) callResult[parameterIndex - 1]; } public SQLXML getSQLXML(String parameterIndex) throws SQLException { throw Driver.notImplemented(this.getClass(), "getSQLXML(String)"); } public String getNString(int parameterIndex) throws SQLException { throw Driver.notImplemented(this.getClass(), "getNString(int)"); } public String getNString(String parameterName) throws SQLException { throw Driver.notImplemented(this.getClass(), "getNString(String)"); } public Reader getNCharacterStream(int parameterIndex) throws SQLException { throw Driver.notImplemented(this.getClass(), "getNCharacterStream(int)"); } public Reader getNCharacterStream(String parameterName) throws SQLException { throw Driver.notImplemented(this.getClass(), "getNCharacterStream(String)"); } public Reader getCharacterStream(int parameterIndex) throws SQLException { throw Driver.notImplemented(this.getClass(), "getCharacterStream(int)"); } public Reader getCharacterStream(String parameterName) throws SQLException { throw Driver.notImplemented(this.getClass(), "getCharacterStream(String)"); } public <T> T getObject(int parameterIndex, Class<T> type) throws SQLException { if (RedshiftLogger.isEnable()) connection.getLogger().logFunction(true, parameterIndex, type); if (type == ResultSet.class) { return type.cast(getObject(parameterIndex)); } throw new RedshiftException(GT.tr("Unsupported type conversion to {1}.", type), RedshiftState.INVALID_PARAMETER_VALUE); } public <T> T getObject(String parameterName, Class<T> type) throws SQLException { throw Driver.notImplemented(this.getClass(), "getObject(String, Class<T>)"); } public void registerOutParameter(String parameterName, int sqlType) throws SQLException { throw Driver.notImplemented(this.getClass(), "registerOutParameter(String,int)"); } public void registerOutParameter(String parameterName, int sqlType, int scale) throws SQLException { throw Driver.notImplemented(this.getClass(), "registerOutParameter(String,int,int)"); } public void registerOutParameter(String parameterName, int sqlType, String typeName) throws SQLException { throw Driver.notImplemented(this.getClass(), "registerOutParameter(String,int,String)"); } public java.net.URL getURL(int parameterIndex) throws SQLException { throw Driver.notImplemented(this.getClass(), "getURL(String)"); } public void setURL(String parameterName, java.net.URL val) throws SQLException { throw Driver.notImplemented(this.getClass(), "setURL(String,URL)"); } public void setNull(String parameterName, int sqlType) throws SQLException { throw Driver.notImplemented(this.getClass(), "setNull(String,int)"); } public void setBoolean(String parameterName, boolean x) throws SQLException { throw Driver.notImplemented(this.getClass(), "setBoolean(String,boolean)"); } public void setByte(String parameterName, byte x) throws SQLException { throw Driver.notImplemented(this.getClass(), "setByte(String,byte)"); } public void setShort(String parameterName, short x) throws SQLException { throw Driver.notImplemented(this.getClass(), "setShort(String,short)"); } public void setInt(String parameterName, int x) throws SQLException { throw Driver.notImplemented(this.getClass(), "setInt(String,int)"); } public void setLong(String parameterName, long x) throws SQLException { throw Driver.notImplemented(this.getClass(), "setLong(String,long)"); } public void setFloat(String parameterName, float x) throws SQLException { throw Driver.notImplemented(this.getClass(), "setFloat(String,float)"); } public void setDouble(String parameterName, double x) throws SQLException { throw Driver.notImplemented(this.getClass(), "setDouble(String,double)"); } public void setBigDecimal(String parameterName, BigDecimal x) throws SQLException { throw Driver.notImplemented(this.getClass(), "setBigDecimal(String,BigDecimal)"); } public void setString(String parameterName, String x) throws SQLException { throw Driver.notImplemented(this.getClass(), "setString(String,String)"); } public void setBytes(String parameterName, byte[] x) throws SQLException { throw Driver.notImplemented(this.getClass(), "setBytes(String,byte)"); } public void setDate(String parameterName, java.sql.Date x) throws SQLException { throw Driver.notImplemented(this.getClass(), "setDate(String,Date)"); } public void setTime(String parameterName, Time x) throws SQLException { throw Driver.notImplemented(this.getClass(), "setTime(String,Time)"); } public void setTimestamp(String parameterName, Timestamp x) throws SQLException { throw Driver.notImplemented(this.getClass(), "setTimestamp(String,Timestamp)"); } public void setAsciiStream(String parameterName, InputStream x, int length) throws SQLException { throw Driver.notImplemented(this.getClass(), "setAsciiStream(String,InputStream,int)"); } public void setBinaryStream(String parameterName, InputStream x, int length) throws SQLException { throw Driver.notImplemented(this.getClass(), "setBinaryStream(String,InputStream,int)"); } public void setObject(String parameterName, Object x, int targetSqlType, int scale) throws SQLException { throw Driver.notImplemented(this.getClass(), "setObject(String,Object,int,int)"); } public void setObject(String parameterName, Object x, int targetSqlType) throws SQLException { throw Driver.notImplemented(this.getClass(), "setObject(String,Object,int)"); } public void setObject(String parameterName, Object x) throws SQLException { throw Driver.notImplemented(this.getClass(), "setObject(String,Object)"); } public void setCharacterStream(String parameterName, Reader reader, int length) throws SQLException { throw Driver.notImplemented(this.getClass(), "setCharacterStream(String,Reader,int)"); } public void setDate(String parameterName, java.sql.Date x, Calendar cal) throws SQLException { throw Driver.notImplemented(this.getClass(), "setDate(String,Date,Calendar)"); } public void setTime(String parameterName, Time x, Calendar cal) throws SQLException { throw Driver.notImplemented(this.getClass(), "setTime(String,Time,Calendar)"); } public void setTimestamp(String parameterName, Timestamp x, Calendar cal) throws SQLException { throw Driver.notImplemented(this.getClass(), "setTimestamp(String,Timestamp,Calendar)"); } public void setNull(String parameterName, int sqlType, String typeName) throws SQLException { throw Driver.notImplemented(this.getClass(), "setNull(String,int,String)"); } public String getString(String parameterName) throws SQLException { throw Driver.notImplemented(this.getClass(), "getString(String)"); } public boolean getBoolean(String parameterName) throws SQLException { throw Driver.notImplemented(this.getClass(), "getBoolean(String)"); } public byte getByte(String parameterName) throws SQLException { throw Driver.notImplemented(this.getClass(), "getByte(String)"); } public short getShort(String parameterName) throws SQLException { throw Driver.notImplemented(this.getClass(), "getShort(String)"); } public int getInt(String parameterName) throws SQLException { throw Driver.notImplemented(this.getClass(), "getInt(String)"); } public long getLong(String parameterName) throws SQLException { throw Driver.notImplemented(this.getClass(), "getLong(String)"); } public float getFloat(String parameterName) throws SQLException { throw Driver.notImplemented(this.getClass(), "getFloat(String)"); } public double getDouble(String parameterName) throws SQLException { throw Driver.notImplemented(this.getClass(), "getDouble(String)"); } public byte[] getBytes(String parameterName) throws SQLException { throw Driver.notImplemented(this.getClass(), "getBytes(String)"); } public java.sql.Date getDate(String parameterName) throws SQLException { throw Driver.notImplemented(this.getClass(), "getDate(String)"); } public Time getTime(String parameterName) throws SQLException { throw Driver.notImplemented(this.getClass(), "getTime(String)"); } public Timestamp getTimestamp(String parameterName) throws SQLException { throw Driver.notImplemented(this.getClass(), "getTimestamp(String)"); } public Object getObject(String parameterName) throws SQLException { throw Driver.notImplemented(this.getClass(), "getObject(String)"); } public BigDecimal getBigDecimal(String parameterName) throws SQLException { throw Driver.notImplemented(this.getClass(), "getBigDecimal(String)"); } public Object getObjectImpl(String parameterName, Map<String, Class<?>> map) throws SQLException { throw Driver.notImplemented(this.getClass(), "getObject(String,Map)"); } public Ref getRef(String parameterName) throws SQLException { throw Driver.notImplemented(this.getClass(), "getRef(String)"); } public Blob getBlob(String parameterName) throws SQLException { throw Driver.notImplemented(this.getClass(), "getBlob(String)"); } public Clob getClob(String parameterName) throws SQLException { throw Driver.notImplemented(this.getClass(), "getClob(String)"); } public Array getArray(String parameterName) throws SQLException { throw Driver.notImplemented(this.getClass(), "getArray(String)"); } public java.sql.Date getDate(String parameterName, Calendar cal) throws SQLException { throw Driver.notImplemented(this.getClass(), "getDate(String,Calendar)"); } public Time getTime(String parameterName, Calendar cal) throws SQLException { throw Driver.notImplemented(this.getClass(), "getTime(String,Calendar)"); } public Timestamp getTimestamp(String parameterName, Calendar cal) throws SQLException { throw Driver.notImplemented(this.getClass(), "getTimestamp(String,Calendar)"); } public java.net.URL getURL(String parameterName) throws SQLException { throw Driver.notImplemented(this.getClass(), "getURL(String)"); } public void registerOutParameter(int parameterIndex, int sqlType, int scale) throws SQLException { // ignore scale for now registerOutParameter(parameterIndex, sqlType); } }
8,514
0
Create_ds/amazon-redshift-jdbc-driver/src/main/java/com/amazon/redshift
Create_ds/amazon-redshift-jdbc-driver/src/main/java/com/amazon/redshift/jdbc/CallableBatchResultHandler.java
/* * Copyright (c) 2016, PostgreSQL Global Development Group * See the LICENSE file in the project root for more information. */ package com.amazon.redshift.jdbc; import com.amazon.redshift.core.Field; import com.amazon.redshift.core.ParameterList; import com.amazon.redshift.core.Query; import com.amazon.redshift.core.ResultCursor; import com.amazon.redshift.core.Tuple; import com.amazon.redshift.core.v3.MessageLoopState; import com.amazon.redshift.core.v3.RedshiftRowsBlockingQueue; import java.util.List; class CallableBatchResultHandler extends BatchResultHandler { CallableBatchResultHandler(RedshiftStatementImpl statement, Query[] queries, ParameterList[] parameterLists) { super(statement, queries, parameterLists, false); } public void handleResultRows(Query fromQuery, Field[] fields, List<Tuple> tuples, ResultCursor cursor, RedshiftRowsBlockingQueue<Tuple> queueTuples, int[] rowCount, Thread ringBufferThread) { /* ignore */ } }
8,515
0
Create_ds/amazon-redshift-jdbc-driver/src/main/java/com/amazon/redshift
Create_ds/amazon-redshift-jdbc-driver/src/main/java/com/amazon/redshift/jdbc/RedshiftPreparedStatement.java
/* * Copyright (c) 2004, PostgreSQL Global Development Group * See the LICENSE file in the project root for more information. */ package com.amazon.redshift.jdbc; import com.amazon.redshift.Driver; import com.amazon.redshift.core.BaseConnection; import com.amazon.redshift.core.CachedQuery; import com.amazon.redshift.core.Oid; import com.amazon.redshift.core.ParameterList; import com.amazon.redshift.core.Query; import com.amazon.redshift.core.QueryExecutor; import com.amazon.redshift.core.TypeInfo; import com.amazon.redshift.core.v3.BatchedQuery; import com.amazon.redshift.largeobject.LargeObject; import com.amazon.redshift.largeobject.LargeObjectManager; import com.amazon.redshift.logger.RedshiftLogger; import com.amazon.redshift.util.ByteConverter; import com.amazon.redshift.util.ByteStreamWriter; import com.amazon.redshift.util.GT; import com.amazon.redshift.util.HStoreConverter; import com.amazon.redshift.util.RedshiftBinaryObject; import com.amazon.redshift.util.RedshiftTime; import com.amazon.redshift.util.RedshiftTimestamp; import com.amazon.redshift.util.RedshiftIntervalYearToMonth; import com.amazon.redshift.util.RedshiftIntervalDayToSecond; import com.amazon.redshift.util.RedshiftObject; import com.amazon.redshift.util.RedshiftException; import com.amazon.redshift.util.RedshiftState; import com.amazon.redshift.util.ReaderInputStream; import java.io.IOException; import java.io.InputStream; import java.io.InputStreamReader; import java.io.OutputStream; import java.io.OutputStreamWriter; import java.io.Reader; import java.io.UnsupportedEncodingException; import java.io.Writer; import java.math.BigDecimal; import java.math.BigInteger; import java.math.RoundingMode; import java.nio.charset.Charset; import java.sql.Array; import java.sql.Blob; import java.sql.Clob; import java.sql.NClob; import java.sql.ParameterMetaData; import java.sql.PreparedStatement; import java.sql.Ref; import java.sql.ResultSet; import java.sql.ResultSetMetaData; import java.sql.RowId; import java.sql.SQLException; import java.sql.SQLXML; import java.sql.Statement; import java.sql.Time; import java.sql.Timestamp; import java.sql.Types; //JCP! if mvn.project.property.redshift.jdbc.spec >= "JDBC4.2" import java.time.LocalDate; import java.time.LocalDateTime; import java.time.LocalTime; import java.time.OffsetDateTime; //JCP! endif import java.util.ArrayList; import java.util.Calendar; import java.util.Map; import java.util.TimeZone; // import java.util.UUID; public class RedshiftPreparedStatement extends RedshiftStatementImpl implements PreparedStatement { protected final CachedQuery preparedQuery; // Query fragments for prepared statement. protected final ParameterList preparedParameters; // Parameter values for prepared statement. private TimeZone defaultTimeZone; protected boolean enableGeneratedName; RedshiftPreparedStatement(RedshiftConnectionImpl connection, String sql, int rsType, int rsConcurrency, int rsHoldability) throws SQLException { this(connection, connection.borrowQuery(sql), rsType, rsConcurrency, rsHoldability); } RedshiftPreparedStatement(RedshiftConnectionImpl connection, CachedQuery query, int rsType, int rsConcurrency, int rsHoldability) throws SQLException { super(connection, rsType, rsConcurrency, rsHoldability); this.preparedQuery = query; this.preparedParameters = this.preparedQuery.query.createParameterList(); // TODO: this.wantsGeneratedKeysAlways = true; setPoolable(true); // As per JDBC spec: prepared and callable statements are poolable by enableGeneratedName = connection.getGeneratedName(); } @Override public ResultSet executeQuery(String sql) throws SQLException { throw new RedshiftException( GT.tr("Can''t use query methods that take a query string on a PreparedStatement."), RedshiftState.WRONG_OBJECT_TYPE); } /* * A Prepared SQL query is executed and its ResultSet is returned * * @return a ResultSet that contains the data produced by the * query - never null * * @exception SQLException if a database access error occurs */ @Override public ResultSet executeQuery() throws SQLException { if (RedshiftLogger.isEnable()) connection.getLogger().logFunction(true); if (!executeWithFlags(0)) { throw new RedshiftException(GT.tr("No results were returned by the query."), RedshiftState.NO_DATA); } ResultSet rs = getSingleResultSet(); if (RedshiftLogger.isEnable()) connection.getLogger().logFunction(false, rs); return rs; } @Override public int executeUpdate(String sql) throws SQLException { throw new RedshiftException( GT.tr("Can''t use query methods that take a query string on a PreparedStatement."), RedshiftState.WRONG_OBJECT_TYPE); } @Override public int executeUpdate() throws SQLException { if (RedshiftLogger.isEnable()) connection.getLogger().logFunction(true); if(this.autoGeneratedKeys == Statement.RETURN_GENERATED_KEYS) wantsGeneratedKeysOnce = true; executeWithFlags(QueryExecutor.QUERY_NO_RESULTS); checkNoResultUpdate(); int rc = getUpdateCount(); if (RedshiftLogger.isEnable()) connection.getLogger().logFunction(false, rc); return rc; } //JCP! if mvn.project.property.redshift.jdbc.spec >= "JDBC4.2" @Override public long executeLargeUpdate() throws SQLException { if (RedshiftLogger.isEnable()) connection.getLogger().logFunction(true); if(this.autoGeneratedKeys == Statement.RETURN_GENERATED_KEYS) wantsGeneratedKeysOnce = true; executeWithFlags(QueryExecutor.QUERY_NO_RESULTS); checkNoResultUpdate(); long rc = getLargeUpdateCount(); if (RedshiftLogger.isEnable()) connection.getLogger().logFunction(false, rc); return rc; } //JCP! endif @Override public boolean execute(String sql) throws SQLException { throw new RedshiftException( GT.tr("Can''t use query methods that take a query string on a PreparedStatement."), RedshiftState.WRONG_OBJECT_TYPE); } @Override public boolean execute() throws SQLException { if (RedshiftLogger.isEnable()) connection.getLogger().logFunction(true); boolean rc = executeWithFlags(0); if (RedshiftLogger.isEnable()) connection.getLogger().logFunction(false, rc); return rc; } public boolean executeWithFlags(int flags) throws SQLException { try { checkClosed(); if (connection.getPreferQueryMode() == PreferQueryMode.SIMPLE) { flags |= QueryExecutor.QUERY_EXECUTE_AS_SIMPLE; } execute(preparedQuery, preparedParameters, flags); synchronized (this) { checkClosed(); return (result != null && result.getResultSet() != null); } } finally { defaultTimeZone = null; } } protected boolean isOneShotQuery(CachedQuery cachedQuery) { if (cachedQuery == null) { cachedQuery = preparedQuery; } boolean rc = super.isOneShotQuery(cachedQuery); // Prepare query can return as !OneShot based on enableGeneratedName setting, // So the driver can user server side statement using // generated statement name and same way portal. return enableGeneratedName ? false : rc; } @Override public void closeImpl() throws SQLException { if (preparedQuery != null) { ((RedshiftConnectionImpl) connection).releaseQuery(preparedQuery); super.closeImpl(); } } public void setNull(int parameterIndex, int sqlType) throws SQLException { if (RedshiftLogger.isEnable()) connection.getLogger().logFunction(true, parameterIndex, sqlType); checkClosed(); if (parameterIndex < 1 || parameterIndex > preparedParameters.getParameterCount()) { throw new RedshiftException( GT.tr("The column index is out of range: {0}, number of columns: {1}.", parameterIndex, preparedParameters.getParameterCount()), RedshiftState.INVALID_PARAMETER_VALUE); } int oid; switch (sqlType) { case Types.SQLXML: oid = Oid.XML; break; case Types.INTEGER: oid = Oid.INT4; break; case Types.TINYINT: case Types.SMALLINT: oid = Oid.INT2; break; case Types.BIGINT: oid = Oid.INT8; break; case Types.REAL: oid = Oid.FLOAT4; break; case Types.DOUBLE: case Types.FLOAT: oid = Oid.FLOAT8; break; case Types.DECIMAL: case Types.NUMERIC: oid = Oid.NUMERIC; break; case Types.CHAR: oid = Oid.BPCHAR; break; case Types.VARCHAR: case Types.LONGVARCHAR: oid = connection.getStringVarcharFlag() ? Oid.VARCHAR : Oid.UNSPECIFIED; break; case Types.DATE: oid = Oid.DATE; break; case Types.TIME: //JCP! if mvn.project.property.redshift.jdbc.spec >= "JDBC4.2" case Types.TIME_WITH_TIMEZONE: case Types.TIMESTAMP_WITH_TIMEZONE: //JCP! endif case Types.TIMESTAMP: oid = Oid.UNSPECIFIED; break; case Types.BOOLEAN: case Types.BIT: oid = Oid.BOOL; break; case Types.BINARY: case Types.VARBINARY: case Types.BLOB: oid = Oid.BYTEA; break; case Types.LONGVARBINARY: oid = Oid.VARBYTE; // For NULL it's ambiguity which one to use as both (Oid.VARBYTE & Oid.GEOGRAPHY) map to same SQL type. break; case Types.CLOB: { // In case of NULL, CLOB can be seen as VARCHAR // This is useful in application like Spark dataframe which generates // code to setNull as CLOB without seeing data source support it or not // as dataframe read must have happen using a CLOB supported database like MySQL or SQL Server. oid = Oid.VARCHAR; break; } case Types.ARRAY: case Types.DISTINCT: case Types.STRUCT: case Types.NULL: case Types.OTHER: oid = Oid.UNSPECIFIED; break; default: // Bad Types value. throw new RedshiftException(GT.tr("Unknown Types value."), RedshiftState.INVALID_PARAMETER_TYPE); } preparedParameters.setNull(parameterIndex, oid); } public void setBoolean(int parameterIndex, boolean x) throws SQLException { if (RedshiftLogger.isEnable()) connection.getLogger().logFunction(true, parameterIndex, x); checkClosed(); // The key words TRUE and FALSE are the preferred (SQL-compliant) usage. bindLiteral(parameterIndex, x ? "TRUE" : "FALSE", Oid.BOOL); } public void setByte(int parameterIndex, byte x) throws SQLException { if (RedshiftLogger.isEnable()) connection.getLogger().logFunction(true, parameterIndex, x); setShort(parameterIndex, x); } public void setShort(int parameterIndex, short x) throws SQLException { if (RedshiftLogger.isEnable()) connection.getLogger().logFunction(true, parameterIndex, x); checkClosed(); if (connection.binaryTransferSend(Oid.INT2)) { byte[] val = new byte[2]; ByteConverter.int2(val, 0, x); bindBytes(parameterIndex, val, Oid.INT2); return; } bindLiteral(parameterIndex, Integer.toString(x), Oid.INT2); } public void setInt(int parameterIndex, int x) throws SQLException { if (RedshiftLogger.isEnable()) connection.getLogger().logFunction(true, parameterIndex, x); checkClosed(); if (connection.binaryTransferSend(Oid.INT4)) { byte[] val = new byte[4]; ByteConverter.int4(val, 0, x); bindBytes(parameterIndex, val, Oid.INT4); return; } bindLiteral(parameterIndex, Integer.toString(x), Oid.INT4); } public void setLong(int parameterIndex, long x) throws SQLException { if (RedshiftLogger.isEnable()) connection.getLogger().logFunction(true, parameterIndex, x); checkClosed(); if (connection.binaryTransferSend(Oid.INT8)) { byte[] val = new byte[8]; ByteConverter.int8(val, 0, x); bindBytes(parameterIndex, val, Oid.INT8); return; } bindLiteral(parameterIndex, Long.toString(x), Oid.INT8); } public void setFloat(int parameterIndex, float x) throws SQLException { if (RedshiftLogger.isEnable()) connection.getLogger().logFunction(true, parameterIndex, x); checkClosed(); if (connection.binaryTransferSend(Oid.FLOAT4)) { byte[] val = new byte[4]; ByteConverter.float4(val, 0, x); bindBytes(parameterIndex, val, Oid.FLOAT4); return; } bindLiteral(parameterIndex, Float.toString(x), Oid.FLOAT8); } public void setDouble(int parameterIndex, double x) throws SQLException { if (RedshiftLogger.isEnable()) connection.getLogger().logFunction(true, parameterIndex, x); checkClosed(); if (connection.binaryTransferSend(Oid.FLOAT8)) { byte[] val = new byte[8]; ByteConverter.float8(val, 0, x); bindBytes(parameterIndex, val, Oid.FLOAT8); return; } bindLiteral(parameterIndex, Double.toString(x), Oid.FLOAT8); } public void setBigDecimal(int parameterIndex, BigDecimal x) throws SQLException { if (RedshiftLogger.isEnable()) connection.getLogger().logFunction(true, parameterIndex, x); setNumber(parameterIndex, x); } public void setString(int parameterIndex, String x) throws SQLException { if (RedshiftLogger.isEnable()) connection.getLogger().logFunction(true, parameterIndex, x); checkClosed(); setString(parameterIndex, x, getStringType()); } private int getStringType() { return (connection.getStringVarcharFlag() ? Oid.VARCHAR : Oid.UNSPECIFIED); } protected void setString(int parameterIndex, String x, int oid) throws SQLException { // if the passed string is null, then set this column to null checkClosed(); if (x == null) { preparedParameters.setNull(parameterIndex, oid); } else { bindString(parameterIndex, x, oid); } } public void setBytes(int parameterIndex, byte[] x) throws SQLException { if (RedshiftLogger.isEnable()) connection.getLogger().logFunction(true, parameterIndex, x); checkClosed(); if (null == x) { setNull(parameterIndex, Types.VARBINARY); return; } // Version 7.2 supports the bytea datatype for byte arrays byte[] copy = new byte[x.length]; System.arraycopy(x, 0, copy, 0, x.length); preparedParameters.setBytea(parameterIndex, copy, 0, x.length); } private void setByteStreamWriter(int parameterIndex, ByteStreamWriter x) throws SQLException { preparedParameters.setBytea(parameterIndex, x); } public void setVarbyte(int parameterIndex, byte[] x) throws SQLException { if (RedshiftLogger.isEnable()) connection.getLogger().logFunction(true, parameterIndex, x); checkClosed(); if (null == x) { setNull(parameterIndex, Types.VARBINARY); return; } byte[] copy = new byte[x.length]; System.arraycopy(x, 0, copy, 0, x.length); preparedParameters.setVarbyte(parameterIndex, copy, 0, x.length); } public void setGeography(int parameterIndex, byte[] x) throws SQLException { if (RedshiftLogger.isEnable()) connection.getLogger().logFunction(true, parameterIndex, x); checkClosed(); if (null == x) { setNull(parameterIndex, Types.VARBINARY); return; } byte[] copy = new byte[x.length]; System.arraycopy(x, 0, copy, 0, x.length); preparedParameters.setGeography(parameterIndex, copy, 0, x.length); } public void setDate(int parameterIndex, java.sql.Date x) throws SQLException { if (RedshiftLogger.isEnable()) connection.getLogger().logFunction(true, parameterIndex, x); setDate(parameterIndex, x, null); } public void setTime(int parameterIndex, Time x) throws SQLException { if (RedshiftLogger.isEnable()) connection.getLogger().logFunction(true, parameterIndex, x); setTime(parameterIndex, x, null); } public void setTimestamp(int parameterIndex, Timestamp x) throws SQLException { if (RedshiftLogger.isEnable()) connection.getLogger().logFunction(true, parameterIndex, x); setTimestamp(parameterIndex, x, null); } public void setIntervalYearToMonth(int parameterIndex, RedshiftIntervalYearToMonth x) throws SQLException { if (RedshiftLogger.isEnable()) connection.getLogger().logFunction(true, parameterIndex, x); if (x == null) { setNull(parameterIndex, Types.OTHER); return; } if (connection.binaryTransferSend(Oid.INTERVALY2M)) { byte[] bytes = new byte[4]; ByteConverter.int4(bytes, 0, (int) x.totalMonths()); preparedParameters.setBinaryParameter(parameterIndex, bytes, Oid.INTERVALY2M); return; } bindString(parameterIndex, x.getValue(), Oid.UNSPECIFIED); } public void setIntervalDayToSecond(int parameterIndex, RedshiftIntervalDayToSecond x) throws SQLException { if (RedshiftLogger.isEnable()) connection.getLogger().logFunction(true, parameterIndex, x); if (x == null) { setNull(parameterIndex, Types.OTHER); return; } if (connection.binaryTransferSend(Oid.INTERVALD2S)) { byte[] bytes = new byte[8]; ByteConverter.int8(bytes, 0, (long) x.totalMicroseconds()); preparedParameters.setBinaryParameter(parameterIndex, bytes, Oid.INTERVALD2S); return; } bindString(parameterIndex, x.getValue(), Oid.UNSPECIFIED); } private void setCharacterStreamPost71(int parameterIndex, InputStream x, int length, String encoding) throws SQLException { if (x == null) { setNull(parameterIndex, Types.VARCHAR); return; } if (length < 0) { throw new RedshiftException(GT.tr("Invalid stream length {0}.", length), RedshiftState.INVALID_PARAMETER_VALUE); } // Version 7.2 supports AsciiStream for all RS text types (char, varchar, text) // As the spec/javadoc for this method indicate this is to be used for // large String values (i.e. LONGVARCHAR) RS doesn't have a separate // long varchar datatype, but with toast all text datatypes are capable of // handling very large values. Thus the implementation ends up calling // setString() since there is no current way to stream the value to the server try { InputStreamReader inStream = new InputStreamReader(x, encoding); char[] chars = new char[length]; int charsRead = 0; while (true) { int n = inStream.read(chars, charsRead, length - charsRead); if (n == -1) { break; } charsRead += n; if (charsRead == length) { break; } } setString(parameterIndex, new String(chars, 0, charsRead), Oid.VARCHAR); } catch (UnsupportedEncodingException uee) { throw new RedshiftException(GT.tr("The JVM claims not to support the {0} encoding.", encoding), RedshiftState.UNEXPECTED_ERROR, uee); } catch (IOException ioe) { throw new RedshiftException(GT.tr("Provided InputStream failed."), RedshiftState.UNEXPECTED_ERROR, ioe); } } public void setAsciiStream(int parameterIndex, InputStream x, int length) throws SQLException { checkClosed(); setCharacterStreamPost71(parameterIndex, x, length, "ASCII"); } public void setUnicodeStream(int parameterIndex, InputStream x, int length) throws SQLException { if (RedshiftLogger.isEnable()) connection.getLogger().logFunction(true, parameterIndex, x, length); checkClosed(); setCharacterStreamPost71(parameterIndex, x, length, "UTF-8"); } public void setBinaryStream(int parameterIndex, InputStream x, int length) throws SQLException { checkClosed(); if (x == null) { setNull(parameterIndex, Types.VARBINARY); return; } if (length < 0) { throw new RedshiftException(GT.tr("Invalid stream length {0}.", length), RedshiftState.INVALID_PARAMETER_VALUE); } // Version 7.2 supports BinaryStream for for the RS bytea type // As the spec/javadoc for this method indicate this is to be used for // large binary values (i.e. LONGVARBINARY) RS doesn't have a separate // long binary datatype, but with toast the bytea datatype is capable of // handling very large values. preparedParameters.setBytea(parameterIndex, x, length); } public void clearParameters() throws SQLException { if (RedshiftLogger.isEnable()) connection.getLogger().logFunction(true); preparedParameters.clear(); if (RedshiftLogger.isEnable()) connection.getLogger().logFunction(false); } // Helper method for setting parameters to RedshiftObject subclasses. private void setRedshiftObject(int parameterIndex, RedshiftObject x) throws SQLException { String typename = x.getType(); int oid = connection.getTypeInfo().getRSType(typename); if (oid == Oid.UNSPECIFIED) { throw new RedshiftException(GT.tr("Unknown type {0}.", typename), RedshiftState.INVALID_PARAMETER_TYPE); } if ((x instanceof RedshiftBinaryObject) && connection.binaryTransferSend(oid)) { RedshiftBinaryObject binObj = (RedshiftBinaryObject) x; byte[] data = new byte[binObj.lengthInBytes()]; binObj.toBytes(data, 0); bindBytes(parameterIndex, data, oid); } else { setString(parameterIndex, x.getValue(), oid); } } private void setMap(int parameterIndex, Map<?, ?> x) throws SQLException { int oid = connection.getTypeInfo().getRSType("hstore"); if (oid == Oid.UNSPECIFIED) { throw new RedshiftException(GT.tr("No hstore extension installed."), RedshiftState.INVALID_PARAMETER_TYPE); } if (connection.binaryTransferSend(oid)) { byte[] data = HStoreConverter.toBytes(x, connection.getEncoding()); bindBytes(parameterIndex, data, oid); } else { setString(parameterIndex, HStoreConverter.toString(x), oid); } } private void setNumber(int parameterIndex, Number x) throws SQLException { checkClosed(); if (x == null) { setNull(parameterIndex, Types.DECIMAL); } else { if(x instanceof BigInteger) setString(parameterIndex, x.toString()); else bindLiteral(parameterIndex, x.toString(), Oid.NUMERIC); } } @Override public void setObject(int parameterIndex, Object in, int targetSqlType, int scale) throws SQLException { if (RedshiftLogger.isEnable()) connection.getLogger().logFunction(true, parameterIndex, in, targetSqlType, scale); checkClosed(); if (in == null) { setNull(parameterIndex, targetSqlType); return; } /* if (targetSqlType == Types.OTHER && in instanceof UUID && connection.haveMinimumServerVersion(ServerVersion.v8_3)) { setUuid(parameterIndex, (UUID) in); return; } */ switch (targetSqlType) { case Types.SQLXML: if (in instanceof SQLXML) { setSQLXML(parameterIndex, (SQLXML) in); } else { setSQLXML(parameterIndex, new RedshiftSQLXML(connection, in.toString())); } break; case Types.INTEGER: setInt(parameterIndex, castToInt(in)); break; case Types.TINYINT: case Types.SMALLINT: setShort(parameterIndex, castToShort(in)); break; case Types.BIGINT: setLong(parameterIndex, castToLong(in)); break; case Types.REAL: setFloat(parameterIndex, castToFloat(in)); break; case Types.DOUBLE: case Types.FLOAT: setDouble(parameterIndex, castToDouble(in)); break; case Types.DECIMAL: case Types.NUMERIC: setBigDecimal(parameterIndex, castToBigDecimal(in, scale)); break; case Types.CHAR: setString(parameterIndex, castToString(in), Oid.BPCHAR); break; case Types.VARCHAR: setString(parameterIndex, castToString(in), getStringType()); break; case Types.LONGVARCHAR: if (in instanceof InputStream) { preparedParameters.setText(parameterIndex, (InputStream)in); } else { setString(parameterIndex, castToString(in), getStringType()); } break; case Types.DATE: if (in instanceof java.sql.Date) { setDate(parameterIndex, (java.sql.Date) in); } else { java.sql.Date tmpd; if (in instanceof java.util.Date) { tmpd = new java.sql.Date(((java.util.Date) in).getTime()); //JCP! if mvn.project.property.redshift.jdbc.spec >= "JDBC4.2" } else if (in instanceof LocalDate) { setDate(parameterIndex, (LocalDate) in); break; //JCP! endif } else { tmpd = connection.getTimestampUtils().toDate(getDefaultCalendar(), in.toString()); } setDate(parameterIndex, tmpd); } break; case Types.TIME: if (in instanceof java.sql.Time) { setTime(parameterIndex, (java.sql.Time) in); } else { java.sql.Time tmpt; if (in instanceof java.util.Date) { tmpt = new java.sql.Time(((java.util.Date) in).getTime()); //JCP! if mvn.project.property.redshift.jdbc.spec >= "JDBC4.2" } else if (in instanceof LocalTime) { setTime(parameterIndex, (LocalTime) in); break; //JCP! endif } else { tmpt = connection.getTimestampUtils().toTime(getDefaultCalendar(), in.toString()); } setTime(parameterIndex, tmpt); } break; case Types.TIMESTAMP: if (in instanceof RedshiftTimestamp) { setObject(parameterIndex, in); } else if (in instanceof java.sql.Timestamp) { setTimestamp(parameterIndex, (java.sql.Timestamp) in); } else { java.sql.Timestamp tmpts; if (in instanceof java.util.Date) { tmpts = new java.sql.Timestamp(((java.util.Date) in).getTime()); //JCP! if mvn.project.property.redshift.jdbc.spec >= "JDBC4.2" } else if (in instanceof LocalDateTime) { setTimestamp(parameterIndex, (LocalDateTime) in); break; //JCP! endif } else { tmpts = connection.getTimestampUtils().toTimestamp(getDefaultCalendar(), in.toString()); } setTimestamp(parameterIndex, tmpts); } break; //JCP! if mvn.project.property.redshift.jdbc.spec >= "JDBC4.2" case Types.TIMESTAMP_WITH_TIMEZONE: if (in instanceof OffsetDateTime) { setTimestamp(parameterIndex, (OffsetDateTime) in); } else if (in instanceof RedshiftTimestamp) { setObject(parameterIndex, in); } else { throw new RedshiftException( GT.tr("Cannot cast an instance of {0} to type {1}", in.getClass().getName(), "Types.TIMESTAMP_WITH_TIMEZONE"), RedshiftState.INVALID_PARAMETER_TYPE); } break; //JCP! endif case Types.BOOLEAN: case Types.BIT: setBoolean(parameterIndex, BooleanTypeUtil.castToBoolean(in)); break; case Types.BINARY: case Types.VARBINARY: case Types.LONGVARBINARY: setObject(parameterIndex, in); break; case Types.BLOB: if (in instanceof Blob) { setBlob(parameterIndex, (Blob) in); } else if (in instanceof InputStream) { long oid = createBlob(parameterIndex, (InputStream) in, -1); setLong(parameterIndex, oid); } else { throw new RedshiftException( GT.tr("Cannot cast an instance of {0} to type {1}", in.getClass().getName(), "Types.BLOB"), RedshiftState.INVALID_PARAMETER_TYPE); } break; case Types.CLOB: if (in instanceof Clob) { setClob(parameterIndex, (Clob) in); } else { throw new RedshiftException( GT.tr("Cannot cast an instance of {0} to type {1}", in.getClass().getName(), "Types.CLOB"), RedshiftState.INVALID_PARAMETER_TYPE); } break; case Types.ARRAY: if (in instanceof Array) { setArray(parameterIndex, (Array) in); } else if (PrimitiveArraySupport.isSupportedPrimitiveArray(in)) { setPrimitiveArray(parameterIndex, in); } else { throw new RedshiftException( GT.tr("Cannot cast an instance of {0} to type {1}", in.getClass().getName(), "Types.ARRAY"), RedshiftState.INVALID_PARAMETER_TYPE); } break; case Types.DISTINCT: bindString(parameterIndex, in.toString(), Oid.UNSPECIFIED); break; case Types.OTHER: if (in instanceof RedshiftObject) { setRedshiftObject(parameterIndex, (RedshiftObject) in); } else if (in instanceof Map) { setMap(parameterIndex, (Map<?, ?>) in); } else { bindString(parameterIndex, in.toString(), Oid.UNSPECIFIED); } break; default: throw new RedshiftException(GT.tr("Unsupported Types value: {0}", targetSqlType), RedshiftState.INVALID_PARAMETER_TYPE); } } private <A> void setPrimitiveArray(int parameterIndex, A in) throws SQLException { final PrimitiveArraySupport<A> arrayToString = PrimitiveArraySupport.getArraySupport(in); final TypeInfo typeInfo = connection.getTypeInfo(); final int oid = arrayToString.getDefaultArrayTypeOid(typeInfo); if (arrayToString.supportBinaryRepresentation() && connection.getPreferQueryMode() != PreferQueryMode.SIMPLE) { bindBytes(parameterIndex, arrayToString.toBinaryRepresentation(connection, in), oid); } else { final char delim = typeInfo.getArrayDelimiter(oid); setString(parameterIndex, arrayToString.toArrayString(delim, in), oid); } } private static String asString(final Clob in) throws SQLException { return in.getSubString(1, (int) in.length()); } private static int castToInt(final Object in) throws SQLException { try { if (in instanceof String) { return Integer.parseInt((String) in); } if (in instanceof Number) { return ((Number) in).intValue(); } if (in instanceof java.util.Date) { return (int) ((java.util.Date) in).getTime(); } if (in instanceof Boolean) { return (Boolean) in ? 1 : 0; } if (in instanceof Clob) { return Integer.parseInt(asString((Clob) in)); } if (in instanceof Character) { return Integer.parseInt(in.toString()); } } catch (final Exception e) { throw cannotCastException(in.getClass().getName(), "int", e); } throw cannotCastException(in.getClass().getName(), "int"); } private static short castToShort(final Object in) throws SQLException { try { if (in instanceof String) { return Short.parseShort((String) in); } if (in instanceof Number) { return ((Number) in).shortValue(); } if (in instanceof java.util.Date) { return (short) ((java.util.Date) in).getTime(); } if (in instanceof Boolean) { return (Boolean) in ? (short) 1 : (short) 0; } if (in instanceof Clob) { return Short.parseShort(asString((Clob) in)); } if (in instanceof Character) { return Short.parseShort(in.toString()); } } catch (final Exception e) { throw cannotCastException(in.getClass().getName(), "short", e); } throw cannotCastException(in.getClass().getName(), "short"); } private static long castToLong(final Object in) throws SQLException { try { if (in instanceof String) { return Long.parseLong((String) in); } if (in instanceof Number) { return ((Number) in).longValue(); } if (in instanceof java.util.Date) { return ((java.util.Date) in).getTime(); } if (in instanceof Boolean) { return (Boolean) in ? 1L : 0L; } if (in instanceof Clob) { return Long.parseLong(asString((Clob) in)); } if (in instanceof Character) { return Long.parseLong(in.toString()); } } catch (final Exception e) { throw cannotCastException(in.getClass().getName(), "long", e); } throw cannotCastException(in.getClass().getName(), "long"); } private static float castToFloat(final Object in) throws SQLException { try { if (in instanceof String) { return Float.parseFloat((String) in); } if (in instanceof Number) { return ((Number) in).floatValue(); } if (in instanceof java.util.Date) { return ((java.util.Date) in).getTime(); } if (in instanceof Boolean) { return (Boolean) in ? 1f : 0f; } if (in instanceof Clob) { return Float.parseFloat(asString((Clob) in)); } if (in instanceof Character) { return Float.parseFloat(in.toString()); } } catch (final Exception e) { throw cannotCastException(in.getClass().getName(), "float", e); } throw cannotCastException(in.getClass().getName(), "float"); } private static double castToDouble(final Object in) throws SQLException { try { if (in instanceof String) { return Double.parseDouble((String) in); } if (in instanceof Number) { return ((Number) in).doubleValue(); } if (in instanceof java.util.Date) { return ((java.util.Date) in).getTime(); } if (in instanceof Boolean) { return (Boolean) in ? 1d : 0d; } if (in instanceof Clob) { return Double.parseDouble(asString((Clob) in)); } if (in instanceof Character) { return Double.parseDouble(in.toString()); } } catch (final Exception e) { throw cannotCastException(in.getClass().getName(), "double", e); } throw cannotCastException(in.getClass().getName(), "double"); } private static BigDecimal castToBigDecimal(final Object in, final int scale) throws SQLException { try { BigDecimal rc = null; if (in instanceof String) { rc = new BigDecimal((String) in); } else if (in instanceof BigDecimal) { rc = ((BigDecimal) in); } else if (in instanceof BigInteger) { rc = new BigDecimal((BigInteger) in); } else if (in instanceof Long || in instanceof Integer || in instanceof Short || in instanceof Byte) { rc = BigDecimal.valueOf(((Number) in).longValue()); } else if (in instanceof Double || in instanceof Float) { rc = BigDecimal.valueOf(((Number) in).doubleValue()); } else if (in instanceof java.util.Date) { rc = BigDecimal.valueOf(((java.util.Date) in).getTime()); } else if (in instanceof Boolean) { rc = (Boolean) in ? BigDecimal.ONE : BigDecimal.ZERO; } else if (in instanceof Clob) { rc = new BigDecimal(asString((Clob) in)); } else if (in instanceof Character) { rc = new BigDecimal(new char[]{(Character) in}); } if (rc != null) { if (scale >= 0) { rc = rc.setScale(scale, RoundingMode.HALF_UP); } return rc; } } catch (final Exception e) { throw cannotCastException(in.getClass().getName(), "BigDecimal", e); } throw cannotCastException(in.getClass().getName(), "BigDecimal"); } private static String castToString(final Object in) throws SQLException { try { if (in instanceof String) { return (String) in; } if (in instanceof Clob) { return asString((Clob) in); } // convert any unknown objects to string. return in.toString(); } catch (final Exception e) { throw cannotCastException(in.getClass().getName(), "String", e); } } private static RedshiftException cannotCastException(final String fromType, final String toType) { return cannotCastException(fromType, toType, null); } private static RedshiftException cannotCastException(final String fromType, final String toType, final Exception cause) { return new RedshiftException( GT.tr("Cannot convert an instance of {0} to type {1}", fromType, toType), RedshiftState.INVALID_PARAMETER_TYPE, cause); } public void setObject(int parameterIndex, Object x, int targetSqlType) throws SQLException { if (RedshiftLogger.isEnable()) connection.getLogger().logFunction(true, parameterIndex, x, targetSqlType); setObject(parameterIndex, x, targetSqlType, -1); } /* * This stores an Object into a parameter. */ public void setObject(int parameterIndex, Object x) throws SQLException { if (RedshiftLogger.isEnable()) connection.getLogger().logFunction(true, parameterIndex, x); checkClosed(); if (x == null) { setNull(parameterIndex, Types.OTHER); /* } else if (x instanceof UUID && connection.haveMinimumServerVersion(ServerVersion.v8_3)) { setUuid(parameterIndex, (UUID) x); */ } else if (x instanceof SQLXML) { setSQLXML(parameterIndex, (SQLXML) x); } else if (x instanceof String) { setString(parameterIndex, (String) x); } else if (x instanceof BigDecimal) { setBigDecimal(parameterIndex, (BigDecimal) x); } else if (x instanceof Short) { setShort(parameterIndex, (Short) x); } else if (x instanceof Integer) { setInt(parameterIndex, (Integer) x); } else if (x instanceof Long) { setLong(parameterIndex, (Long) x); } else if (x instanceof Float) { setFloat(parameterIndex, (Float) x); } else if (x instanceof Double) { setDouble(parameterIndex, (Double) x); } else if (x instanceof byte[]) { setBytes(parameterIndex, (byte[]) x); } else if (x instanceof ByteStreamWriter) { setByteStreamWriter(parameterIndex, (ByteStreamWriter) x); } else if (x instanceof java.sql.Date) { setDate(parameterIndex, (java.sql.Date) x); } else if (x instanceof Time) { setTime(parameterIndex, (Time) x); } else if (x instanceof Timestamp) { setTimestamp(parameterIndex, (Timestamp) x); } else if (x instanceof RedshiftIntervalYearToMonth) { setIntervalYearToMonth(parameterIndex, (RedshiftIntervalYearToMonth) x); } else if (x instanceof RedshiftIntervalDayToSecond) { setIntervalDayToSecond(parameterIndex, (RedshiftIntervalDayToSecond) x); } else if (x instanceof Boolean) { setBoolean(parameterIndex, (Boolean) x); } else if (x instanceof Byte) { setByte(parameterIndex, (Byte) x); } else if (x instanceof Blob) { setBlob(parameterIndex, (Blob) x); } else if (x instanceof Clob) { setClob(parameterIndex, (Clob) x); } else if (x instanceof Array) { setArray(parameterIndex, (Array) x); } else if (x instanceof RedshiftObject) { setRedshiftObject(parameterIndex, (RedshiftObject) x); } else if (x instanceof Character) { setString(parameterIndex, ((Character) x).toString()); //JCP! if mvn.project.property.redshift.jdbc.spec >= "JDBC4.2" } else if (x instanceof LocalDate) { setDate(parameterIndex, (LocalDate) x); } else if (x instanceof LocalTime) { setTime(parameterIndex, (LocalTime) x); } else if (x instanceof LocalDateTime) { setTimestamp(parameterIndex, (LocalDateTime) x); } else if (x instanceof OffsetDateTime) { setTimestamp(parameterIndex, (OffsetDateTime) x); //JCP! endif } else if (x instanceof Map) { setMap(parameterIndex, (Map<?, ?>) x); } else if (x instanceof Number) { setNumber(parameterIndex, (Number) x); } else if (PrimitiveArraySupport.isSupportedPrimitiveArray(x)) { setPrimitiveArray(parameterIndex, x); } else { // Can't infer a type. throw new RedshiftException(GT.tr( "Can''t infer the SQL type to use for an instance of {0}. Use setObject() with an explicit Types value to specify the type to use.", x.getClass().getName()), RedshiftState.INVALID_PARAMETER_TYPE); } } /** * Returns the SQL statement with the current template values substituted. * * @return SQL statement with the current template values substituted */ public String toString() { if (preparedQuery == null) { return super.toString(); } return preparedQuery.query.toString(preparedParameters); } /** * Note if s is a String it should be escaped by the caller to avoid SQL injection attacks. It is * not done here for efficiency reasons as most calls to this method do not require escaping as * the source of the string is known safe (i.e. {@code Integer.toString()}) * * @param paramIndex parameter index * @param s value (the value should already be escaped) * @param oid type oid * @throws SQLException if something goes wrong */ protected void bindLiteral(int paramIndex, String s, int oid) throws SQLException { preparedParameters.setLiteralParameter(paramIndex, s, oid); } protected void bindBytes(int paramIndex, byte[] b, int oid) throws SQLException { preparedParameters.setBinaryParameter(paramIndex, b, oid); } /** * This version is for values that should turn into strings e.g. setString directly calls * bindString with no escaping; the per-protocol ParameterList does escaping as needed. * * @param paramIndex parameter index * @param s value * @param oid type oid * @throws SQLException if something goes wrong */ private void bindString(int paramIndex, String s, int oid) throws SQLException { preparedParameters.setStringParameter(paramIndex, s, oid); } @Override public boolean isUseServerPrepare() { return (preparedQuery != null && mPrepareThreshold != 0 && preparedQuery.getExecuteCount() + 1 >= mPrepareThreshold); } @Override public void addBatch(String sql) throws SQLException { checkClosed(); throw new RedshiftException( GT.tr("Can''t use query methods that take a query string on a PreparedStatement."), RedshiftState.WRONG_OBJECT_TYPE); } @Override public void addBatch() throws SQLException { if (RedshiftLogger.isEnable()) connection.getLogger().logFunction(true); checkClosed(); if (batchStatements == null) { batchStatements = new ArrayList<Query>(); batchParameters = new ArrayList<ParameterList>(); } // we need to create copies of our parameters, otherwise the values can be changed batchParameters.add(preparedParameters.copy()); Query query = preparedQuery.query; if (!(query instanceof BatchedQuery) || batchStatements.isEmpty()) { batchStatements.add(query); } if (RedshiftLogger.isEnable()) connection.getLogger().logFunction(false); } public ResultSetMetaData getMetaData() throws SQLException { if (RedshiftLogger.isEnable()) connection.getLogger().logFunction(true); checkClosed(); ResultSet rs = getResultSet(); if (rs == null || ((RedshiftResultSet) rs).isResultSetClosed()) { // OK, we haven't executed it yet, or it was closed // we've got to go to the backend // for more info. We send the full query, but just don't // execute it. int flags = QueryExecutor.QUERY_ONESHOT | QueryExecutor.QUERY_DESCRIBE_ONLY | QueryExecutor.QUERY_SUPPRESS_BEGIN; StatementResultHandler handler = new StatementResultHandler(this); connection.getQueryExecutor().execute(preparedQuery.query, preparedParameters, handler, 0, 0, flags); ResultWrapper wrapper = handler.getResults(); if (wrapper != null) { // Keep reference to close the result firstUnclosedResult = wrapper; rs = wrapper.getResultSet(); } // Describe only execution is done. handler.setStatementStateIdleFromInQuery(); } ResultSetMetaData rc; if (rs != null) { rc = rs.getMetaData(); } else rc = null; if (RedshiftLogger.isEnable()) connection.getLogger().logFunction(false, rc); return rc; } public void setArray(int i, java.sql.Array x) throws SQLException { if (RedshiftLogger.isEnable()) connection.getLogger().logFunction(true, i, x); checkClosed(); if (null == x) { setNull(i, Types.ARRAY); return; } // This only works for Array implementations that return a valid array // literal from Array.toString(), such as the implementation we return // from ResultSet.getArray(). Eventually we need a proper implementation // here that works for any Array implementation. String typename = x.getBaseTypeName(); int oid = connection.getTypeInfo().getRSArrayType(typename); if (oid == Oid.UNSPECIFIED) { throw new RedshiftException(GT.tr("Unknown type {0}.", typename), RedshiftState.INVALID_PARAMETER_TYPE); } if (x instanceof RedshiftArray) { RedshiftArray arr = (RedshiftArray) x; if (arr.isBinary()) { bindBytes(i, arr.toBytes(), oid); return; } } setString(i, x.toString(), oid); } protected long createBlob(int i, InputStream inputStream, long length) throws SQLException { LargeObjectManager lom = connection.getLargeObjectAPI(); long oid = lom.createLO(); LargeObject lob = lom.open(oid); OutputStream outputStream = lob.getOutputStream(); byte[] buf = new byte[4096]; try { long remaining; if (length > 0) { remaining = length; } else { remaining = Long.MAX_VALUE; } int numRead = inputStream.read(buf, 0, (length > 0 && remaining < buf.length ? (int) remaining : buf.length)); while (numRead != -1 && remaining > 0) { remaining -= numRead; outputStream.write(buf, 0, numRead); numRead = inputStream.read(buf, 0, (length > 0 && remaining < buf.length ? (int) remaining : buf.length)); } } catch (IOException se) { throw new RedshiftException(GT.tr("Unexpected error writing large object to database."), RedshiftState.UNEXPECTED_ERROR, se); } finally { try { outputStream.close(); } catch (Exception e) { } } return oid; } public void setBlob(int i, Blob x) throws SQLException { checkClosed(); if (x == null) { setNull(i, Types.BLOB); return; } InputStream inStream = x.getBinaryStream(); try { long oid = createBlob(i, inStream, x.length()); setLong(i, oid); } finally { try { inStream.close(); } catch (Exception e) { } } } private String readerToString(Reader value, int maxLength) throws SQLException { try { int bufferSize = Math.min(maxLength, 1024); StringBuilder v = new StringBuilder(bufferSize); char[] buf = new char[bufferSize]; int nRead = 0; while (nRead > -1 && v.length() < maxLength) { nRead = value.read(buf, 0, Math.min(bufferSize, maxLength - v.length())); if (nRead > 0) { v.append(buf, 0, nRead); } } return v.toString(); } catch (IOException ioe) { throw new RedshiftException(GT.tr("Provided Reader failed."), RedshiftState.UNEXPECTED_ERROR, ioe); } } public void setCharacterStream(int i, java.io.Reader x, int length) throws SQLException { if (RedshiftLogger.isEnable()) connection.getLogger().logFunction(true, i, x, length); checkClosed(); if (x == null) { setNull(i, Types.VARCHAR); return; } if (length < 0) { throw new RedshiftException(GT.tr("Invalid stream length {0}.", length), RedshiftState.INVALID_PARAMETER_VALUE); } // Version 7.2 supports CharacterStream for for the RS text types // As the spec/javadoc for this method indicate this is to be used for // large text values (i.e. LONGVARCHAR) RS doesn't have a separate // long varchar datatype, but with toast all the text datatypes are capable of // handling very large values. Thus the implementation ends up calling // setString() since there is no current way to stream the value to the server setString(i, readerToString(x, length)); } @Override public void setClob(int i, Clob x) throws SQLException { checkClosed(); if (x == null) { setNull(i, Types.CLOB); return; } Reader inStream = x.getCharacterStream(); int length = (int) x.length(); LargeObjectManager lom = connection.getLargeObjectAPI(); long oid = lom.createLO(); LargeObject lob = lom.open(oid); Charset connectionCharset = Charset.forName(connection.getEncoding().name()); OutputStream los = lob.getOutputStream(); Writer lw = new OutputStreamWriter(los, connectionCharset); try { // could be buffered, but then the OutputStream returned by LargeObject // is buffered internally anyhow, so there would be no performance // boost gained, if anything it would be worse! int c = inStream.read(); int p = 0; while (c > -1 && p < length) { lw.write(c); c = inStream.read(); p++; } lw.close(); } catch (IOException se) { throw new RedshiftException(GT.tr("Unexpected error writing large object to database."), RedshiftState.UNEXPECTED_ERROR, se); } // lob is closed by the stream so don't call lob.close() setLong(i, oid); } public void setNull(int parameterIndex, int t, String typeName) throws SQLException { if (RedshiftLogger.isEnable()) connection.getLogger().logFunction(true, parameterIndex, t, typeName); if (typeName == null) { setNull(parameterIndex, t); return; } checkClosed(); TypeInfo typeInfo = connection.getTypeInfo(); int oid = typeInfo.getRSType(typeName); preparedParameters.setNull(parameterIndex, oid); } public void setRef(int i, Ref x) throws SQLException { throw Driver.notImplemented(this.getClass(), "setRef(int,Ref)"); } public void setDate(int i, java.sql.Date d, java.util.Calendar cal) throws SQLException { if (RedshiftLogger.isEnable()) connection.getLogger().logFunction(true, i, d, cal); checkClosed(); if (d == null) { setNull(i, Types.DATE); return; } if (connection.binaryTransferSend(Oid.DATE)) { byte[] val = new byte[4]; TimeZone tz = cal != null ? cal.getTimeZone() : null; connection.getTimestampUtils().toBinDate(tz, val, d); preparedParameters.setBinaryParameter(i, val, Oid.DATE); return; } // We must use UNSPECIFIED here, or inserting a Date-with-timezone into a // timestamptz field does an unexpected rotation by the server's TimeZone: // // We want to interpret 2005/01/01 with calendar +0100 as // "local midnight in +0100", but if we go via date it interprets it // as local midnight in the server's timezone: // template1=# select '2005-01-01+0100'::timestamptz; // timestamptz // ------------------------ // 2005-01-01 02:00:00+03 // (1 row) // template1=# select '2005-01-01+0100'::date::timestamptz; // timestamptz // ------------------------ // 2005-01-01 00:00:00+03 // (1 row) if (cal == null) { cal = getDefaultCalendar(); } bindString(i, connection.getTimestampUtils().toString(cal, d), Oid.UNSPECIFIED); } public void setTime(int i, Time t, java.util.Calendar cal) throws SQLException { if (RedshiftLogger.isEnable()) connection.getLogger().logFunction(true, i, t, cal); checkClosed(); if (t == null) { setNull(i, Types.TIME); return; } int oid = Oid.UNSPECIFIED; // If a RedshiftTime is used, we can define the OID explicitly. if (t instanceof RedshiftTime) { RedshiftTime rsTime = (RedshiftTime) t; if (rsTime.getCalendar() == null) { oid = Oid.TIME; } else { oid = Oid.TIMETZ; cal = rsTime.getCalendar(); } } if (cal == null) { cal = getDefaultCalendar(); } bindString(i, connection.getTimestampUtils().toString(cal, t), oid); } public void setTimestamp(int i, Timestamp t, java.util.Calendar cal) throws SQLException { if (RedshiftLogger.isEnable()) connection.getLogger().logFunction(true, i, t, cal); checkClosed(); if (t == null) { setNull(i, Types.TIMESTAMP); return; } int oid = Oid.UNSPECIFIED; // Use UNSPECIFIED as a compromise to get both TIMESTAMP and TIMESTAMPTZ working. // This is because you get this in a +1300 timezone: // // template1=# select '2005-01-01 15:00:00 +1000'::timestamptz; // timestamptz // ------------------------ // 2005-01-01 18:00:00+13 // (1 row) // template1=# select '2005-01-01 15:00:00 +1000'::timestamp; // timestamp // --------------------- // 2005-01-01 15:00:00 // (1 row) // template1=# select '2005-01-01 15:00:00 +1000'::timestamptz::timestamp; // timestamp // --------------------- // 2005-01-01 18:00:00 // (1 row) // So we want to avoid doing a timestamptz -> timestamp conversion, as that // will first convert the timestamptz to an equivalent time in the server's // timezone (+1300, above), then turn it into a timestamp with the "wrong" // time compared to the string we originally provided. But going straight // to timestamp is OK as the input parser for timestamp just throws away // the timezone part entirely. Since we don't know ahead of time what type // we're actually dealing with, UNSPECIFIED seems the lesser evil, even if it // does give more scope for type-mismatch errors being silently hidden. // If a RedshiftTimestamp is used, we can define the OID explicitly. if (t instanceof RedshiftTimestamp) { RedshiftTimestamp rsTimestamp = (RedshiftTimestamp) t; if (rsTimestamp.getCalendar() == null) { oid = Oid.TIMESTAMP; } else { oid = Oid.TIMESTAMPTZ; cal = rsTimestamp.getCalendar(); } } if (cal == null) { cal = getDefaultCalendar(); } bindString(i, connection.getTimestampUtils().toString(cal, t), oid); } //JCP! if mvn.project.property.redshift.jdbc.spec >= "JDBC4.2" private void setDate(int i, LocalDate localDate) throws SQLException { int oid = Oid.DATE; bindString(i, connection.getTimestampUtils().toString(localDate), oid); } private void setTime(int i, LocalTime localTime) throws SQLException { int oid = Oid.TIME; bindString(i, connection.getTimestampUtils().toString(localTime), oid); } private void setTimestamp(int i, LocalDateTime localDateTime) throws SQLException { int oid = Oid.TIMESTAMP; bindString(i, connection.getTimestampUtils().toString(localDateTime), oid); } private void setTimestamp(int i, OffsetDateTime offsetDateTime) throws SQLException { int oid = Oid.TIMESTAMPTZ; bindString(i, connection.getTimestampUtils().toString(offsetDateTime), oid); } //JCP! endif public ParameterMetaData createParameterMetaData(BaseConnection conn, int[] oids) throws SQLException { return new RedshiftParameterMetaData(conn, oids); } //JCP! if mvn.project.property.redshift.jdbc.spec >= "JDBC4.2" public void setObject(int parameterIndex, Object x, java.sql.SQLType targetSqlType, int scaleOrLength) throws SQLException { throw Driver.notImplemented(this.getClass(), "setObject"); } public void setObject(int parameterIndex, Object x, java.sql.SQLType targetSqlType) throws SQLException { throw Driver.notImplemented(this.getClass(), "setObject"); } //JCP! endif public void setRowId(int parameterIndex, RowId x) throws SQLException { throw Driver.notImplemented(this.getClass(), "setRowId(int, RowId)"); } public void setNString(int parameterIndex, String value) throws SQLException { throw Driver.notImplemented(this.getClass(), "setNString(int, String)"); } public void setNCharacterStream(int parameterIndex, Reader value, long length) throws SQLException { throw Driver.notImplemented(this.getClass(), "setNCharacterStream(int, Reader, long)"); } public void setNCharacterStream(int parameterIndex, Reader value) throws SQLException { throw Driver.notImplemented(this.getClass(), "setNCharacterStream(int, Reader)"); } public void setCharacterStream(int parameterIndex, Reader value, long length) throws SQLException { throw Driver.notImplemented(this.getClass(), "setCharacterStream(int, Reader, long)"); } public void setCharacterStream(int parameterIndex, Reader value) throws SQLException { if (RedshiftLogger.isEnable()) connection.getLogger().logFunction(true, parameterIndex, value); if (connection.getPreferQueryMode() == PreferQueryMode.SIMPLE) { String s = (value != null) ? readerToString(value, Integer.MAX_VALUE) : null; setString(parameterIndex, s); return; } InputStream is = (value != null) ? new ReaderInputStream(value) : null; setObject(parameterIndex, is, Types.LONGVARCHAR); } public void setBinaryStream(int parameterIndex, InputStream value, long length) throws SQLException { if (length > Integer.MAX_VALUE) { throw new RedshiftException(GT.tr("Object is too large to send over the protocol."), RedshiftState.NUMERIC_CONSTANT_OUT_OF_RANGE); } preparedParameters.setBytea(parameterIndex, value, (int) length); } public void setBinaryStream(int parameterIndex, InputStream value) throws SQLException { preparedParameters.setBytea(parameterIndex, value); } public void setAsciiStream(int parameterIndex, InputStream value, long length) throws SQLException { throw Driver.notImplemented(this.getClass(), "setAsciiStream(int, InputStream, long)"); } public void setAsciiStream(int parameterIndex, InputStream value) throws SQLException { throw Driver.notImplemented(this.getClass(), "setAsciiStream(int, InputStream)"); } public void setNClob(int parameterIndex, NClob value) throws SQLException { throw Driver.notImplemented(this.getClass(), "setNClob(int, NClob)"); } public void setClob(int parameterIndex, Reader reader, long length) throws SQLException { throw Driver.notImplemented(this.getClass(), "setClob(int, Reader, long)"); } public void setClob(int parameterIndex, Reader reader) throws SQLException { throw Driver.notImplemented(this.getClass(), "setClob(int, Reader)"); } public void setBlob(int parameterIndex, InputStream inputStream, long length) throws SQLException { checkClosed(); if (inputStream == null) { setNull(parameterIndex, Types.BLOB); return; } if (length < 0) { throw new RedshiftException(GT.tr("Invalid stream length {0}.", length), RedshiftState.INVALID_PARAMETER_VALUE); } long oid = createBlob(parameterIndex, inputStream, length); setLong(parameterIndex, oid); } public void setBlob(int parameterIndex, InputStream inputStream) throws SQLException { checkClosed(); if (inputStream == null) { setNull(parameterIndex, Types.BLOB); return; } long oid = createBlob(parameterIndex, inputStream, -1); setLong(parameterIndex, oid); } public void setNClob(int parameterIndex, Reader reader, long length) throws SQLException { throw Driver.notImplemented(this.getClass(), "setNClob(int, Reader, long)"); } public void setNClob(int parameterIndex, Reader reader) throws SQLException { throw Driver.notImplemented(this.getClass(), "setNClob(int, Reader)"); } public void setSQLXML(int parameterIndex, SQLXML xmlObject) throws SQLException { if (RedshiftLogger.isEnable()) connection.getLogger().logFunction(true, parameterIndex, xmlObject); checkClosed(); String stringValue = xmlObject == null ? null : xmlObject.getString(); if (stringValue == null) { setNull(parameterIndex, Types.SQLXML); } else { setString(parameterIndex, stringValue, Oid.XML); } } /* private void setUuid(int parameterIndex, UUID uuid) throws SQLException { if (connection.binaryTransferSend(Oid.UUID)) { byte[] val = new byte[16]; ByteConverter.int8(val, 0, uuid.getMostSignificantBits()); ByteConverter.int8(val, 8, uuid.getLeastSignificantBits()); bindBytes(parameterIndex, val, Oid.UUID); } else { bindLiteral(parameterIndex, uuid.toString(), Oid.UUID); } } */ public void setURL(int parameterIndex, java.net.URL x) throws SQLException { throw Driver.notImplemented(this.getClass(), "setURL(int,URL)"); } @Override public int[] executeBatch() throws SQLException { try { // Note: in batch prepared statements batchStatements == 1, and batchParameters is equal // to the number of addBatch calls // batchParameters might be empty in case of empty batch if (batchParameters != null && batchParameters.size() > 1 && mPrepareThreshold > 0) { // Use server-prepared statements when there's more than one statement in a batch // Technically speaking, it might cause to create a server-prepared statement // just for 2 executions even for prepareThreshold=5. That however should be // acceptable since prepareThreshold is a optimization kind of parameter. this.preparedQuery.increaseExecuteCount(mPrepareThreshold); } return super.executeBatch(); } finally { defaultTimeZone = null; } } private Calendar getDefaultCalendar() { TimestampUtils timestampUtils = connection.getTimestampUtils(); if (timestampUtils.hasFastDefaultTimeZone()) { return timestampUtils.getSharedCalendar(null); } Calendar sharedCalendar = timestampUtils.getSharedCalendar(defaultTimeZone); if (defaultTimeZone == null) { defaultTimeZone = sharedCalendar.getTimeZone(); } return sharedCalendar; } public ParameterMetaData getParameterMetaData() throws SQLException { if (RedshiftLogger.isEnable()) connection.getLogger().logFunction(true); int flags = QueryExecutor.QUERY_ONESHOT | QueryExecutor.QUERY_DESCRIBE_ONLY | QueryExecutor.QUERY_SUPPRESS_BEGIN; StatementResultHandler handler = new StatementResultHandler(this); connection.getQueryExecutor().execute(preparedQuery.query, preparedParameters, handler, 0, 0, flags); int[] oids = preparedParameters.getTypeOIDs(); ParameterMetaData rc; if (oids != null) { rc = createParameterMetaData(connection, oids); } else rc = null; if (RedshiftLogger.isEnable()) connection.getLogger().logFunction(false, rc); return rc; } @Override protected void transformQueriesAndParameters() throws SQLException { if (batchParameters.size() <= 1 || !(preparedQuery.query instanceof BatchedQuery)) { return; } BatchedQuery originalQuery = (BatchedQuery) preparedQuery.query; // Single query cannot have more than {@link Short#MAX_VALUE} binds, thus // the number of multi-values blocks should be capped. // Typically, it does not make much sense to batch more than 128 rows: performance // does not improve much after updating 128 statements with 1 multi-valued one, thus // we cap maximum batch size and split there. final int bindCount = originalQuery.getBindCount(); final int highestBlockCount = ((RedshiftConnectionImpl)connection).getReWriteBatchedInsertsSize(); // 128; final int maxValueBlocks = bindCount == 0 ? 1024 /* if no binds, use 1024 rows */ : Integer.highestOneBit( // deriveForMultiBatch supports powers of two only Math.min(Math.max(1, (Short.MAX_VALUE - 1) / bindCount), highestBlockCount)); int unprocessedBatchCount = batchParameters.size(); final int fullValueBlocksCount = unprocessedBatchCount / maxValueBlocks; final int partialValueBlocksCount = Integer.bitCount(unprocessedBatchCount % maxValueBlocks); final int count = fullValueBlocksCount + partialValueBlocksCount; ArrayList<Query> newBatchStatements = new ArrayList<Query>(count); ArrayList<ParameterList> newBatchParameters = new ArrayList<ParameterList>(count); int offset = 0; for (int i = 0; i < count; i++) { int valueBlock; if (unprocessedBatchCount >= maxValueBlocks) { valueBlock = maxValueBlocks; } else { valueBlock = Integer.highestOneBit(unprocessedBatchCount); } // Find appropriate batch for block count. BatchedQuery bq = originalQuery.deriveForMultiBatch(valueBlock, highestBlockCount, connection.getLogger()); ParameterList newPl = bq.createParameterList(); for (int j = 0; j < valueBlock; j++) { ParameterList pl = batchParameters.get(offset++); newPl.appendAll(pl); } newBatchStatements.add(bq); newBatchParameters.add(newPl); unprocessedBatchCount -= valueBlock; } batchStatements = newBatchStatements; batchParameters = newBatchParameters; } }
8,516
0
Create_ds/amazon-redshift-jdbc-driver/src/main/java/com/amazon/redshift
Create_ds/amazon-redshift-jdbc-driver/src/main/java/com/amazon/redshift/jdbc/Driver.java
package com.amazon.redshift.jdbc; /** * Backward compatible Driver class. * * @author iggarish * */ public class Driver extends com.amazon.redshift.Driver { }
8,517
0
Create_ds/amazon-redshift-jdbc-driver/src/main/java/com/amazon/redshift
Create_ds/amazon-redshift-jdbc-driver/src/main/java/com/amazon/redshift/jdbc/PreferQueryMode.java
/* * Copyright (c) 2016, PostgreSQL Global Development Group * See the LICENSE file in the project root for more information. */ package com.amazon.redshift.jdbc; /** * <p>Specifies which mode is used to execute queries to database: simple means ('Q' execute, no parse, no bind, text mode only), * extended means always use bind/execute messages, extendedForPrepared means extended for prepared statements only.</p> * * <p>Note: this is for debugging purposes only.</p> * * @see com.amazon.redshift.RedshiftProperty#PREFER_QUERY_MODE */ public enum PreferQueryMode { SIMPLE("simple"), EXTENDED_FOR_PREPARED("extendedForPrepared"), EXTENDED("extended"), EXTENDED_CACHE_EVERYTHING("extendedCacheEverything"); private final String value; PreferQueryMode(String value) { this.value = value; } public static PreferQueryMode of(String mode) { for (PreferQueryMode preferQueryMode : values()) { if (preferQueryMode.value.equals(mode)) { return preferQueryMode; } } return EXTENDED; } public String value() { return value; } }
8,518
0
Create_ds/amazon-redshift-jdbc-driver/src/main/java/com/amazon/redshift
Create_ds/amazon-redshift-jdbc-driver/src/main/java/com/amazon/redshift/jdbc/BatchResultHandler.java
/* * Copyright (c) 2016, PostgreSQL Global Development Group * See the LICENSE file in the project root for more information. */ package com.amazon.redshift.jdbc; import com.amazon.redshift.core.Field; import com.amazon.redshift.core.ParameterList; import com.amazon.redshift.core.Query; import com.amazon.redshift.core.ResultCursor; import com.amazon.redshift.core.ResultHandlerBase; import com.amazon.redshift.core.Tuple; import com.amazon.redshift.core.v3.BatchedQuery; import com.amazon.redshift.core.v3.MessageLoopState; import com.amazon.redshift.core.v3.RedshiftRowsBlockingQueue; import com.amazon.redshift.util.GT; import com.amazon.redshift.util.RedshiftException; import com.amazon.redshift.util.RedshiftState; import java.sql.BatchUpdateException; import java.sql.ResultSet; import java.sql.SQLException; import java.sql.SQLWarning; import java.sql.Statement; import java.util.ArrayList; import java.util.Arrays; import java.util.List; /** * Internal class, it is not a part of public API. */ public class BatchResultHandler extends ResultHandlerBase { private final RedshiftStatementImpl rsStatement; private int resultIndex = 0; private final Query[] queries; private final long[] longUpdateCounts; private final ParameterList[] parameterLists; private final boolean expectGeneratedKeys; private RedshiftResultSet generatedKeys; private int committedRows; // 0 means no rows committed. 1 means row 0 was committed, and so on private final List<List<Tuple>> allGeneratedRows; private List<Tuple> latestGeneratedRows; private RedshiftResultSet latestGeneratedKeysRs; BatchResultHandler(RedshiftStatementImpl rsStatement, Query[] queries, ParameterList[] parameterLists, boolean expectGeneratedKeys) { this.rsStatement = rsStatement; this.queries = queries; this.parameterLists = parameterLists; this.longUpdateCounts = new long[queries.length]; this.expectGeneratedKeys = expectGeneratedKeys; this.allGeneratedRows = !expectGeneratedKeys ? null : new ArrayList<List<Tuple>>(); } @Override public void setStatementStateIdleFromInQuery() { rsStatement.updateStatementCancleState(StatementCancelState.IN_QUERY, StatementCancelState.IDLE); } @Override public void setStatementStateInQueryFromIdle() { rsStatement.updateStatementCancleState(StatementCancelState.IDLE, StatementCancelState.IN_QUERY); } @Override public void handleResultRows(Query fromQuery, Field[] fields, List<Tuple> tuples, ResultCursor cursor, RedshiftRowsBlockingQueue<Tuple> queueTuples, int[] rowCount, Thread ringBufferThread) { // If SELECT, then handleCommandStatus call would just be missing resultIndex++; if (!expectGeneratedKeys) { // No rows expected -> just ignore rows return; } if (generatedKeys == null) { try { // If SELECT, the resulting ResultSet is not valid // Thus it is up to handleCommandStatus to decide if resultSet is good enough latestGeneratedKeysRs = (RedshiftResultSet) rsStatement.createResultSet(fromQuery, fields, new ArrayList<Tuple>(), cursor, queueTuples, rowCount, ringBufferThread); } catch (SQLException e) { handleError(e); } } latestGeneratedRows = tuples; } @Override public void handleCommandStatus(String status, long updateCount, long insertOID) { if (latestGeneratedRows != null) { // We have DML. Decrease resultIndex that was just increased in handleResultRows resultIndex--; // If exception thrown, no need to collect generated keys // Note: some generated keys might be secured in generatedKeys if (updateCount > 0 && (getException() == null || isAutoCommit())) { allGeneratedRows.add(latestGeneratedRows); if (generatedKeys == null) { generatedKeys = latestGeneratedKeysRs; } } latestGeneratedRows = null; } if (resultIndex >= queries.length) { handleError(new RedshiftException(GT.tr("Too many update results were returned."), RedshiftState.TOO_MANY_RESULTS)); return; } latestGeneratedKeysRs = null; longUpdateCounts[resultIndex++] = updateCount; } private boolean isAutoCommit() { try { return rsStatement.getConnection().getAutoCommit(); } catch (SQLException e) { assert false : "pgStatement.getConnection().getAutoCommit() should not throw"; return false; } } @Override public void secureProgress() { if (isAutoCommit()) { committedRows = resultIndex; updateGeneratedKeys(); } } private void updateGeneratedKeys() { if (allGeneratedRows == null || allGeneratedRows.isEmpty()) { return; } for (List<Tuple> rows : allGeneratedRows) { generatedKeys.addRows(rows); } allGeneratedRows.clear(); } @Override public void handleWarning(SQLWarning warning) { rsStatement.addWarning(warning); } @Override public void handleError(SQLException newError) { if (getException() == null) { Arrays.fill(longUpdateCounts, committedRows, longUpdateCounts.length, Statement.EXECUTE_FAILED); if (allGeneratedRows != null) { allGeneratedRows.clear(); } String queryString = "<unknown>"; if (resultIndex < queries.length) { queryString = queries[resultIndex].toString(parameterLists[resultIndex]); } BatchUpdateException batchException; //JCP! if mvn.project.property.redshift.jdbc.spec >= "JDBC4.2" batchException = new BatchUpdateException( GT.tr("Batch entry {0} {1} was aborted: {2} Call getNextException to see other errors in the batch.", resultIndex, queryString, newError.getMessage()), newError.getSQLState(), 0, uncompressLongUpdateCount(), newError); //JCP! else //JCP> batchException = new BatchUpdateException( //JCP> GT.tr("Batch entry {0} {1} was aborted: {2} Call getNextException to see other errors in the batch.", //JCP> resultIndex, queryString, newError.getMessage()), //JCP> newError.getSQLState(), 0, uncompressUpdateCount(), newError); //JCP! endif super.handleError(batchException); } resultIndex++; super.handleError(newError); } @Override public void handleCompletion() throws SQLException { updateGeneratedKeys(); SQLException batchException = getException(); if (batchException != null) { if (isAutoCommit()) { // Re-create batch exception since rows after exception might indeed succeed. BatchUpdateException newException; //JCP! if mvn.project.property.redshift.jdbc.spec >= "JDBC4.2" newException = new BatchUpdateException( batchException.getMessage(), batchException.getSQLState(), 0, uncompressLongUpdateCount(), batchException.getCause() ); //JCP! else //JCP> newException = new BatchUpdateException( //JCP> batchException.getMessage(), //JCP> batchException.getSQLState(), 0, //JCP> uncompressUpdateCount(), //JCP> batchException.getCause() //JCP> ); //JCP! endif SQLException next = batchException.getNextException(); if (next != null) { newException.setNextException(next); } batchException = newException; } throw batchException; } } public ResultSet getGeneratedKeys() { return generatedKeys; } private int[] uncompressUpdateCount() { long[] original = uncompressLongUpdateCount(); int[] copy = new int[original.length]; for (int i = 0; i < original.length; i++) { copy[i] = original[i] > Integer.MAX_VALUE ? Statement.SUCCESS_NO_INFO : (int) original[i]; } return copy; } public int[] getUpdateCount() { return uncompressUpdateCount(); } private long[] uncompressLongUpdateCount() { if (!(queries[0] instanceof BatchedQuery)) { return longUpdateCounts; } int totalRows = 0; boolean hasRewrites = false; for (Query query : queries) { int batchSize = query.getBatchSize(); totalRows += batchSize; hasRewrites |= batchSize > 1; } if (!hasRewrites) { return longUpdateCounts; } /* In this situation there is a batch that has been rewritten. Substitute * the running total returned by the database with a status code to * indicate successful completion for each row the driver client added * to the batch. */ long[] newUpdateCounts = new long[totalRows]; int offset = 0; for (int i = 0; i < queries.length; i++) { Query query = queries[i]; int batchSize = query.getBatchSize(); long superBatchResult = longUpdateCounts[i]; if (batchSize == 1) { newUpdateCounts[offset++] = superBatchResult; continue; } if (superBatchResult > 0) { // If some rows inserted, we do not really know how did they spread over individual // statements superBatchResult = Statement.SUCCESS_NO_INFO; } Arrays.fill(newUpdateCounts, offset, offset + batchSize, superBatchResult); offset += batchSize; } return newUpdateCounts; } public long[] getLargeUpdateCount() { return uncompressLongUpdateCount(); } }
8,519
0
Create_ds/amazon-redshift-jdbc-driver/src/main/java/com/amazon/redshift
Create_ds/amazon-redshift-jdbc-driver/src/main/java/com/amazon/redshift/jdbc/RedshiftSavepoint.java
/* * Copyright (c) 2004, PostgreSQL Global Development Group * See the LICENSE file in the project root for more information. */ package com.amazon.redshift.jdbc; import com.amazon.redshift.core.Utils; import com.amazon.redshift.util.GT; import com.amazon.redshift.util.RedshiftException; import com.amazon.redshift.util.RedshiftState; import java.sql.SQLException; import java.sql.Savepoint; public class RedshiftSavepoint implements Savepoint { private boolean isValid; private final boolean isNamed; private int id; private String name; public RedshiftSavepoint(int id) { this.isValid = true; this.isNamed = false; this.id = id; } public RedshiftSavepoint(String name) { this.isValid = true; this.isNamed = true; this.name = name; } @Override public int getSavepointId() throws SQLException { if (!isValid) { throw new RedshiftException(GT.tr("Cannot reference a savepoint after it has been released."), RedshiftState.INVALID_SAVEPOINT_SPECIFICATION); } if (isNamed) { throw new RedshiftException(GT.tr("Cannot retrieve the id of a named savepoint."), RedshiftState.WRONG_OBJECT_TYPE); } return id; } @Override public String getSavepointName() throws SQLException { if (!isValid) { throw new RedshiftException(GT.tr("Cannot reference a savepoint after it has been released."), RedshiftState.INVALID_SAVEPOINT_SPECIFICATION); } if (!isNamed) { throw new RedshiftException(GT.tr("Cannot retrieve the name of an unnamed savepoint."), RedshiftState.WRONG_OBJECT_TYPE); } return name; } public void invalidate() { isValid = false; } public String getRSName() throws SQLException { if (!isValid) { throw new RedshiftException(GT.tr("Cannot reference a savepoint after it has been released."), RedshiftState.INVALID_SAVEPOINT_SPECIFICATION); } if (isNamed) { // We need to quote and escape the name in case it // contains spaces/quotes/etc. // return Utils.escapeIdentifier(null, name).toString(); } return "JDBC_SAVEPOINT_" + id; } }
8,520
0
Create_ds/amazon-redshift-jdbc-driver/src/main/java/com/amazon/redshift
Create_ds/amazon-redshift-jdbc-driver/src/main/java/com/amazon/redshift/jdbc/AutoSave.java
/* * Copyright (c) 2005, PostgreSQL Global Development Group * See the LICENSE file in the project root for more information. */ package com.amazon.redshift.jdbc; public enum AutoSave { NEVER, ALWAYS, CONSERVATIVE; private final String value; AutoSave() { value = this.name().toLowerCase(); } public String value() { return value; } public static AutoSave of(String value) { return valueOf(value.toUpperCase()); } }
8,521
0
Create_ds/amazon-redshift-jdbc-driver/src/main/java/com/amazon/redshift
Create_ds/amazon-redshift-jdbc-driver/src/main/java/com/amazon/redshift/jdbc/EscapeSyntaxCallMode.java
/* * Copyright (c) 2019, PostgreSQL Global Development Group * See the LICENSE file in the project root for more information. */ package com.amazon.redshift.jdbc; /** * <p>Specifies whether a SELECT/CALL statement is used for the underlying SQL for JDBC escape call syntax: 'select' means to * always use SELECT, 'callIfNoReturn' means to use CALL if there is no return parameter (otherwise use SELECT), and 'call' means * to always use CALL.</p> * * @see com.amazon.redshift.RedshiftProperty#ESCAPE_SYNTAX_CALL_MODE */ public enum EscapeSyntaxCallMode { SELECT("select"), CALL_IF_NO_RETURN("callIfNoReturn"), CALL("call"); private final String value; EscapeSyntaxCallMode(String value) { this.value = value; } public static EscapeSyntaxCallMode of(String mode) { for (EscapeSyntaxCallMode escapeSyntaxCallMode : values()) { if (escapeSyntaxCallMode.value.equals(mode)) { return escapeSyntaxCallMode; } } return SELECT; } public String value() { return value; } }
8,522
0
Create_ds/amazon-redshift-jdbc-driver/src/main/java/com/amazon/redshift
Create_ds/amazon-redshift-jdbc-driver/src/main/java/com/amazon/redshift/jdbc/ResultWrapper.java
/* * Copyright (c) 2004, PostgreSQL Global Development Group * See the LICENSE file in the project root for more information. */ // Copyright (c) 2004, Open Cloud Limited. package com.amazon.redshift.jdbc; import java.sql.ResultSet; /** * Helper class that storing result info. This handles both the ResultSet and no-ResultSet result * cases with a single interface for inspecting and stepping through them. * * @author Oliver Jowett (oliver@opencloud.com) */ public class ResultWrapper { public ResultWrapper(ResultSet rs) { this.rs = rs; this.updateCount = -1; this.insertOID = -1; } public ResultWrapper(long updateCount, long insertOID) { this.rs = null; this.updateCount = updateCount; this.insertOID = insertOID; } public ResultSet getResultSet() { return rs; } public long getUpdateCount() { return updateCount; } public long getInsertOID() { return insertOID; } public ResultWrapper getNext() { return next; } public void append(ResultWrapper newResult) { ResultWrapper tail = this; while (tail.next != null) { tail = tail.next; } tail.next = newResult; } private final ResultSet rs; private final long updateCount; private final long insertOID; private ResultWrapper next; }
8,523
0
Create_ds/amazon-redshift-jdbc-driver/src/main/java/com/amazon/redshift
Create_ds/amazon-redshift-jdbc-driver/src/main/java/com/amazon/redshift/jdbc/RedshiftArray.java
/* * Copyright (c) 2004, PostgreSQL Global Development Group * See the LICENSE file in the project root for more information. */ package com.amazon.redshift.jdbc; import com.amazon.redshift.core.BaseConnection; import com.amazon.redshift.core.BaseStatement; import com.amazon.redshift.core.Encoding; import com.amazon.redshift.core.Field; import com.amazon.redshift.core.Oid; import com.amazon.redshift.core.Tuple; import com.amazon.redshift.jdbc2.ArrayAssistant; import com.amazon.redshift.jdbc2.ArrayAssistantRegistry; import com.amazon.redshift.logger.LogLevel; import com.amazon.redshift.logger.RedshiftLogger; import com.amazon.redshift.util.ByteConverter; import com.amazon.redshift.util.GT; import com.amazon.redshift.util.RedshiftException; import com.amazon.redshift.util.RedshiftState; import java.io.IOException; import java.math.BigDecimal; import java.sql.ResultSet; import java.sql.SQLException; import java.sql.Types; import java.util.ArrayList; import java.util.List; import java.util.Map; /** * <p>Array is used collect one column of query result data.</p> * * <p>Read a field of type Array into either a natively-typed Java array object or a ResultSet. * Accessor methods provide the ability to capture array slices.</p> * * <p>Other than the constructor all methods are direct implementations of those specified for * java.sql.Array. Please refer to the javadoc for java.sql.Array for detailed descriptions of the * functionality and parameters of the methods of this class.</p> * * @see ResultSet#getArray */ public class RedshiftArray implements java.sql.Array { static { ArrayAssistantRegistry.register(Oid.UUID, new UUIDArrayAssistant()); ArrayAssistantRegistry.register(Oid.UUID_ARRAY, new UUIDArrayAssistant()); } /** * Array list implementation specific for storing RS array elements. */ private static class RsArrayList extends ArrayList<Object> { private static final long serialVersionUID = 2052783752654562677L; /** * How many dimensions. */ int dimensionsCount = 1; } /** * A database connection. */ protected BaseConnection connection = null; /** * The OID of this field. */ private int oid; /** * Field value as String. */ protected String fieldString = null; /** * Whether Object[] should be used instead primitive arrays. Object[] can contain null elements. * It should be set to <Code>true</Code> if * {@link BaseConnection#haveMinimumCompatibleVersion(String)} returns <Code>true</Code> for * argument "8.3". */ private final boolean useObjects; /** * Value of field as {@link RsArrayList}. Will be initialized only once within * {@link #buildArrayList()}. */ protected RsArrayList arrayList; protected byte[] fieldBytes; private RedshiftArray(BaseConnection connection, int oid) throws SQLException { this.connection = connection; this.oid = oid; this.useObjects = true; } /** * Create a new Array. * * @param connection a database connection * @param oid the oid of the array datatype * @param fieldString the array data in string form * @throws SQLException if something wrong happens */ public RedshiftArray(BaseConnection connection, int oid, String fieldString) throws SQLException { this(connection, oid); this.fieldString = fieldString; } /** * Create a new Array. * * @param connection a database connection * @param oid the oid of the array datatype * @param fieldBytes the array data in byte form * @throws SQLException if something wrong happens */ public RedshiftArray(BaseConnection connection, int oid, byte[] fieldBytes) throws SQLException { this(connection, oid); this.fieldBytes = fieldBytes; } public Object getArray() throws SQLException { return getArrayImpl(1, 0, null); } public Object getArray(long index, int count) throws SQLException { return getArrayImpl(index, count, null); } public Object getArrayImpl(Map<String, Class<?>> map) throws SQLException { return getArrayImpl(1, 0, map); } public Object getArray(Map<String, Class<?>> map) throws SQLException { return getArrayImpl(map); } public Object getArray(long index, int count, Map<String, Class<?>> map) throws SQLException { return getArrayImpl(index, count, map); } public Object getArrayImpl(long index, int count, Map<String, Class<?>> map) throws SQLException { // for now maps aren't supported. if (map != null && !map.isEmpty()) { throw com.amazon.redshift.Driver.notImplemented(this.getClass(), "getArrayImpl(long,int,Map)"); } // array index is out of range if (index < 1) { throw new RedshiftException(GT.tr("The array index is out of range: {0}", index), RedshiftState.DATA_ERROR); } if (fieldBytes != null) { return readBinaryArray((int) index, count); } if (fieldString == null) { return null; } buildArrayList(); if (count == 0) { count = arrayList.size(); } // array index out of range if ((--index) + count > arrayList.size()) { throw new RedshiftException( GT.tr("The array index is out of range: {0}, number of elements: {1}.", index + count, (long) arrayList.size()), RedshiftState.DATA_ERROR); } return buildArray(arrayList, (int) index, count); } private Object readBinaryArray(int index, int count) throws SQLException { int dimensions = ByteConverter.int4(fieldBytes, 0); // int flags = ByteConverter.int4(fieldBytes, 4); // bit 0: 0=no-nulls, 1=has-nulls int elementOid = ByteConverter.int4(fieldBytes, 8); int pos = 12; int[] dims = new int[dimensions]; for (int d = 0; d < dimensions; ++d) { dims[d] = ByteConverter.int4(fieldBytes, pos); pos += 4; /* int lbound = ByteConverter.int4(fieldBytes, pos); */ pos += 4; } if (dimensions == 0) { return java.lang.reflect.Array.newInstance(elementOidToClass(elementOid), 0); } if (count > 0) { dims[0] = Math.min(count, dims[0]); } Object arr = java.lang.reflect.Array.newInstance(elementOidToClass(elementOid), dims); try { storeValues((Object[]) arr, elementOid, dims, pos, 0, index); } catch (IOException ioe) { throw new RedshiftException( GT.tr( "Invalid character data was found. This is most likely caused by stored data containing characters that are invalid for the character set the database was created in. The most common example of this is storing 8bit data in a SQL_ASCII database."), RedshiftState.DATA_ERROR, ioe); } return arr; } private int storeValues(final Object[] arr, int elementOid, final int[] dims, int pos, final int thisDimension, int index) throws SQLException, IOException { if (thisDimension == dims.length - 1) { for (int i = 1; i < index; ++i) { int len = ByteConverter.int4(fieldBytes, pos); pos += 4; if (len != -1) { pos += len; } } for (int i = 0; i < dims[thisDimension]; ++i) { int len = ByteConverter.int4(fieldBytes, pos); pos += 4; if (len == -1) { continue; } switch (elementOid) { case Oid.INT2: arr[i] = ByteConverter.int2(fieldBytes, pos); break; case Oid.INT4: arr[i] = ByteConverter.int4(fieldBytes, pos); break; case Oid.INT8: arr[i] = ByteConverter.int8(fieldBytes, pos); break; case Oid.FLOAT4: arr[i] = ByteConverter.float4(fieldBytes, pos); break; case Oid.FLOAT8: arr[i] = ByteConverter.float8(fieldBytes, pos); break; case Oid.NUMERIC: arr[i] = ByteConverter.numeric(fieldBytes, pos, len); break; case Oid.TEXT: case Oid.VARCHAR: case Oid.CHAR: case Oid.ACLITEM: Encoding encoding = connection.getEncoding(); arr[i] = encoding.decode(fieldBytes, pos, len); break; case Oid.BOOL: arr[i] = ByteConverter.bool(fieldBytes, pos); break; default: ArrayAssistant arrAssistant = ArrayAssistantRegistry.getAssistant(elementOid); if (arrAssistant != null) { arr[i] = arrAssistant.buildElement(fieldBytes, pos, len); } } pos += len; } } else { for (int i = 0; i < dims[thisDimension]; ++i) { pos = storeValues((Object[]) arr[i], elementOid, dims, pos, thisDimension + 1, 0); } } return pos; } private ResultSet readBinaryResultSet(int index, int count) throws SQLException { int dimensions = ByteConverter.int4(fieldBytes, 0); // int flags = ByteConverter.int4(fieldBytes, 4); // bit 0: 0=no-nulls, 1=has-nulls int elementOid = ByteConverter.int4(fieldBytes, 8); int pos = 12; int[] dims = new int[dimensions]; for (int d = 0; d < dimensions; ++d) { dims[d] = ByteConverter.int4(fieldBytes, pos); pos += 4; /* int lbound = ByteConverter.int4(fieldBytes, pos); */ pos += 4; } if (count > 0 && dimensions > 0) { dims[0] = Math.min(count, dims[0]); } List<Tuple> rows = new ArrayList<Tuple>(); Field[] fields = new Field[2]; storeValues(rows, fields, elementOid, dims, pos, 0, index); BaseStatement stat = (BaseStatement) connection .createStatement(ResultSet.TYPE_SCROLL_INSENSITIVE, ResultSet.CONCUR_READ_ONLY); return stat.createDriverResultSet(fields, rows); } private int storeValues(List<Tuple> rows, Field[] fields, int elementOid, final int[] dims, int pos, final int thisDimension, int index) throws SQLException { // handle an empty array if (dims.length == 0) { fields[0] = new Field("INDEX", Oid.INT4); fields[0].setFormat(Field.BINARY_FORMAT); fields[1] = new Field("VALUE", elementOid); fields[1].setFormat(Field.BINARY_FORMAT); for (int i = 1; i < index; ++i) { int len = ByteConverter.int4(fieldBytes, pos); pos += 4; if (len != -1) { pos += len; } } } else if (thisDimension == dims.length - 1) { fields[0] = new Field("INDEX", Oid.INT4); fields[0].setFormat(Field.BINARY_FORMAT); fields[1] = new Field("VALUE", elementOid); fields[1].setFormat(Field.BINARY_FORMAT); for (int i = 1; i < index; ++i) { int len = ByteConverter.int4(fieldBytes, pos); pos += 4; if (len != -1) { pos += len; } } for (int i = 0; i < dims[thisDimension]; ++i) { byte[][] rowData = new byte[2][]; rowData[0] = new byte[4]; ByteConverter.int4(rowData[0], 0, i + index); rows.add(new Tuple(rowData)); int len = ByteConverter.int4(fieldBytes, pos); pos += 4; if (len == -1) { continue; } rowData[1] = new byte[len]; System.arraycopy(fieldBytes, pos, rowData[1], 0, rowData[1].length); pos += len; } } else { fields[0] = new Field("INDEX", Oid.INT4); fields[0].setFormat(Field.BINARY_FORMAT); fields[1] = new Field("VALUE", oid); fields[1].setFormat(Field.BINARY_FORMAT); int nextDimension = thisDimension + 1; int dimensionsLeft = dims.length - nextDimension; for (int i = 1; i < index; ++i) { pos = calcRemainingDataLength(dims, pos, elementOid, nextDimension); } for (int i = 0; i < dims[thisDimension]; ++i) { byte[][] rowData = new byte[2][]; rowData[0] = new byte[4]; ByteConverter.int4(rowData[0], 0, i + index); rows.add(new Tuple(rowData)); int dataEndPos = calcRemainingDataLength(dims, pos, elementOid, nextDimension); int dataLength = dataEndPos - pos; rowData[1] = new byte[12 + 8 * dimensionsLeft + dataLength]; ByteConverter.int4(rowData[1], 0, dimensionsLeft); System.arraycopy(fieldBytes, 4, rowData[1], 4, 8); System.arraycopy(fieldBytes, 12 + nextDimension * 8, rowData[1], 12, dimensionsLeft * 8); System.arraycopy(fieldBytes, pos, rowData[1], 12 + dimensionsLeft * 8, dataLength); pos = dataEndPos; } } return pos; } private int calcRemainingDataLength(int[] dims, int pos, int elementOid, int thisDimension) { if (thisDimension == dims.length - 1) { for (int i = 0; i < dims[thisDimension]; ++i) { int len = ByteConverter.int4(fieldBytes, pos); pos += 4; if (len == -1) { continue; } pos += len; } } else { pos = calcRemainingDataLength(dims, elementOid, pos, thisDimension + 1); } return pos; } private Class<?> elementOidToClass(int oid) throws SQLException { switch (oid) { case Oid.INT2: return Short.class; case Oid.INT4: return Integer.class; case Oid.INT8: return Long.class; case Oid.FLOAT4: return Float.class; case Oid.FLOAT8: return Double.class; case Oid.NUMERIC: return BigDecimal.class; case Oid.TEXT: case Oid.VARCHAR: case Oid.CHAR: case Oid.ACLITEM: return String.class; case Oid.BOOL: return Boolean.class; default: ArrayAssistant arrElemBuilder = ArrayAssistantRegistry.getAssistant(oid); if (arrElemBuilder != null) { return arrElemBuilder.baseType(); } throw com.amazon.redshift.Driver.notImplemented(this.getClass(), "readBinaryArray(data,oid)"); } } /** * Build {@link ArrayList} from field's string input. As a result of this method * {@link #arrayList} is build. Method can be called many times in order to make sure that array * list is ready to use, however {@link #arrayList} will be set only once during first call. */ private synchronized void buildArrayList() throws SQLException { if (arrayList != null) { return; } arrayList = new RsArrayList(); char delim = connection.getTypeInfo().getArrayDelimiter(oid); if (fieldString != null) { char[] chars = fieldString.toCharArray(); StringBuilder buffer = null; boolean insideString = false; boolean wasInsideString = false; // needed for checking if NULL // value occurred List<RsArrayList> dims = new ArrayList<RsArrayList>(); // array dimension arrays RsArrayList curArray = arrayList; // currently processed array // Starting with 8.0 non-standard (beginning index // isn't 1) bounds the dimensions are returned in the // data formatted like so "[0:3]={0,1,2,3,4}". // Older versions simply do not return the bounds. // // Right now we ignore these bounds, but we could // consider allowing these index values to be used // even though the JDBC spec says 1 is the first // index. I'm not sure what a client would like // to see, so we just retain the old behavior. int startOffset = 0; { if (chars[0] == '[') { while (chars[startOffset] != '=') { startOffset++; } startOffset++; // skip = } } for (int i = startOffset; i < chars.length; i++) { // escape character that we need to skip if (chars[i] == '\\') { i++; } else if (!insideString && chars[i] == '{') { // subarray start if (dims.isEmpty()) { dims.add(arrayList); } else { RsArrayList a = new RsArrayList(); RsArrayList p = dims.get(dims.size() - 1); p.add(a); dims.add(a); } curArray = dims.get(dims.size() - 1); // number of dimensions { for (int t = i + 1; t < chars.length; t++) { if (Character.isWhitespace(chars[t])) { continue; } else if (chars[t] == '{') { curArray.dimensionsCount++; } else { break; } } } buffer = new StringBuilder(); continue; } else if (chars[i] == '"') { // quoted element insideString = !insideString; wasInsideString = true; continue; } else if (!insideString && Character.isWhitespace(chars[i])) { // white space continue; } else if ((!insideString && (chars[i] == delim || chars[i] == '}')) || i == chars.length - 1) { // array end or element end // when character that is a part of array element if (chars[i] != '"' && chars[i] != '}' && chars[i] != delim && buffer != null) { buffer.append(chars[i]); } String b = buffer == null ? null : buffer.toString(); // add element to current array if (b != null && (!b.isEmpty() || wasInsideString)) { curArray.add(!wasInsideString && b.equals("NULL") ? null : b); } wasInsideString = false; buffer = new StringBuilder(); // when end of an array if (chars[i] == '}') { dims.remove(dims.size() - 1); // when multi-dimension if (!dims.isEmpty()) { curArray = dims.get(dims.size() - 1); } buffer = null; } continue; } if (buffer != null) { buffer.append(chars[i]); } } } } /** * Convert {@link ArrayList} to array. * * @param input list to be converted into array */ private Object buildArray(RsArrayList input, int index, int count) throws SQLException { if (count < 0) { count = input.size(); } // array to be returned Object ret = null; // how many dimensions int dims = input.dimensionsCount; // dimensions length array (to be used with java.lang.reflect.Array.newInstance(Class<?>, // int[])) int[] dimsLength = dims > 1 ? new int[dims] : null; if (dims > 1) { for (int i = 0; i < dims; i++) { dimsLength[i] = (i == 0 ? count : 0); } } // array elements counter int length = 0; // array elements type final int type = connection.getTypeInfo().getSQLType(connection.getTypeInfo().getRSArrayElement(oid)); if (type == Types.BIT) { boolean[] pa = null; // primitive array Object[] oa = null; // objects array if (dims > 1 || useObjects) { ret = oa = (dims > 1 ? (Object[]) java.lang.reflect.Array .newInstance(useObjects ? Boolean.class : boolean.class, dimsLength) : new Boolean[count]); } else { ret = pa = new boolean[count]; } // add elements for (; count > 0; count--) { Object o = input.get(index++); if (dims > 1 || useObjects) { oa[length++] = o == null ? null : (dims > 1 ? buildArray((RsArrayList) o, 0, -1) : BooleanTypeUtil.castToBoolean((String) o)); } else { pa[length++] = o == null ? false : BooleanTypeUtil.castToBoolean((String) o); } } } else if (type == Types.SMALLINT) { short[] pa = null; Object[] oa = null; if (dims > 1 || useObjects) { ret = oa = (dims > 1 ? (Object[]) java.lang.reflect.Array .newInstance(useObjects ? Short.class : short.class, dimsLength) : new Short[count]); } else { ret = pa = new short[count]; } for (; count > 0; count--) { Object o = input.get(index++); if (dims > 1 || useObjects) { oa[length++] = o == null ? null : (dims > 1 ? buildArray((RsArrayList) o, 0, -1) : RedshiftResultSet.toShort((String) o)); } else { pa[length++] = o == null ? 0 : RedshiftResultSet.toShort((String) o); } } } else if (type == Types.INTEGER) { int[] pa = null; Object[] oa = null; if (dims > 1 || useObjects) { ret = oa = (dims > 1 ? (Object[]) java.lang.reflect.Array .newInstance(useObjects ? Integer.class : int.class, dimsLength) : new Integer[count]); } else { ret = pa = new int[count]; } for (; count > 0; count--) { Object o = input.get(index++); if (dims > 1 || useObjects) { oa[length++] = o == null ? null : (dims > 1 ? buildArray((RsArrayList) o, 0, -1) : RedshiftResultSet.toInt((String) o)); } else { pa[length++] = o == null ? 0 : RedshiftResultSet.toInt((String) o); } } } else if (type == Types.BIGINT) { long[] pa = null; Object[] oa = null; if (dims > 1 || useObjects) { ret = oa = (dims > 1 ? (Object[]) java.lang.reflect.Array .newInstance(useObjects ? Long.class : long.class, dimsLength) : new Long[count]); } else { ret = pa = new long[count]; } for (; count > 0; count--) { Object o = input.get(index++); if (dims > 1 || useObjects) { oa[length++] = o == null ? null : (dims > 1 ? buildArray((RsArrayList) o, 0, -1) : RedshiftResultSet.toLong((String) o)); } else { pa[length++] = o == null ? 0L : RedshiftResultSet.toLong((String) o); } } } else if (type == Types.NUMERIC) { Object[] oa = null; ret = oa = (dims > 1 ? (Object[]) java.lang.reflect.Array.newInstance(BigDecimal.class, dimsLength) : new BigDecimal[count]); for (; count > 0; count--) { Object v = input.get(index++); oa[length++] = dims > 1 && v != null ? buildArray((RsArrayList) v, 0, -1) : (v == null ? null : RedshiftResultSet.toBigDecimal((String) v)); } } else if (type == Types.REAL) { float[] pa = null; Object[] oa = null; if (dims > 1 || useObjects) { ret = oa = (dims > 1 ? (Object[]) java.lang.reflect.Array .newInstance(useObjects ? Float.class : float.class, dimsLength) : new Float[count]); } else { ret = pa = new float[count]; } for (; count > 0; count--) { Object o = input.get(index++); if (dims > 1 || useObjects) { oa[length++] = o == null ? null : (dims > 1 ? buildArray((RsArrayList) o, 0, -1) : RedshiftResultSet.toFloat((String) o)); } else { pa[length++] = o == null ? 0f : RedshiftResultSet.toFloat((String) o); } } } else if (type == Types.DOUBLE) { double[] pa = null; Object[] oa = null; if (dims > 1 || useObjects) { ret = oa = (dims > 1 ? (Object[]) java.lang.reflect.Array .newInstance(useObjects ? Double.class : double.class, dimsLength) : new Double[count]); } else { ret = pa = new double[count]; } for (; count > 0; count--) { Object o = input.get(index++); if (dims > 1 || useObjects) { oa[length++] = o == null ? null : (dims > 1 ? buildArray((RsArrayList) o, 0, -1) : RedshiftResultSet.toDouble((String) o)); } else { pa[length++] = o == null ? 0d : RedshiftResultSet.toDouble((String) o); } } } else if (type == Types.CHAR || type == Types.VARCHAR || oid == Oid.JSONB_ARRAY) { Object[] oa = null; ret = oa = (dims > 1 ? (Object[]) java.lang.reflect.Array.newInstance(String.class, dimsLength) : new String[count]); for (; count > 0; count--) { Object v = input.get(index++); oa[length++] = dims > 1 && v != null ? buildArray((RsArrayList) v, 0, -1) : v; } } else if (type == Types.DATE) { Object[] oa = null; ret = oa = (dims > 1 ? (Object[]) java.lang.reflect.Array.newInstance(java.sql.Date.class, dimsLength) : new java.sql.Date[count]); for (; count > 0; count--) { Object v = input.get(index++); oa[length++] = dims > 1 && v != null ? buildArray((RsArrayList) v, 0, -1) : (v == null ? null : connection.getTimestampUtils().toDate(null, (String) v)); } } else if (type == Types.TIME) { Object[] oa = null; ret = oa = (dims > 1 ? (Object[]) java.lang.reflect.Array.newInstance(java.sql.Time.class, dimsLength) : new java.sql.Time[count]); for (; count > 0; count--) { Object v = input.get(index++); oa[length++] = dims > 1 && v != null ? buildArray((RsArrayList) v, 0, -1) : (v == null ? null : connection.getTimestampUtils().toTime(null, (String) v)); } } else if (type == Types.TIMESTAMP) { Object[] oa = null; ret = oa = (dims > 1 ? (Object[]) java.lang.reflect.Array.newInstance(java.sql.Timestamp.class, dimsLength) : new java.sql.Timestamp[count]); for (; count > 0; count--) { Object v = input.get(index++); oa[length++] = dims > 1 && v != null ? buildArray((RsArrayList) v, 0, -1) : (v == null ? null : connection.getTimestampUtils().toTimestamp(null, (String) v)); } } else if (ArrayAssistantRegistry.getAssistant(oid) != null) { ArrayAssistant arrAssistant = ArrayAssistantRegistry.getAssistant(oid); Object[] oa = null; ret = oa = (dims > 1) ? (Object[]) java.lang.reflect.Array.newInstance(arrAssistant.baseType(), dimsLength) : (Object[]) java.lang.reflect.Array.newInstance(arrAssistant.baseType(), count); for (; count > 0; count--) { Object v = input.get(index++); oa[length++] = (dims > 1 && v != null) ? buildArray((RsArrayList) v, 0, -1) : (v == null ? null : arrAssistant.buildElement((String) v)); } } else if (dims == 1) { Object[] oa = new Object[count]; String typeName = getBaseTypeName(); for (; count > 0; count--) { Object v = input.get(index++); if (v instanceof String) { oa[length++] = connection.getObject(typeName, (String) v, null); } else if (v instanceof byte[]) { oa[length++] = connection.getObject(typeName, null, (byte[]) v); } else if (v == null) { oa[length++] = null; } else { throw com.amazon.redshift.Driver.notImplemented(this.getClass(), "getArrayImpl(long,int,Map)"); } } ret = oa; } else { // other datatypes not currently supported if (RedshiftLogger.isEnable()) connection.getLogger().log(LogLevel.DEBUG, "getArrayImpl(long,int,Map) with {0}", getBaseTypeName()); throw com.amazon.redshift.Driver.notImplemented(this.getClass(), "getArrayImpl(long,int,Map)"); } return ret; } public int getBaseType() throws SQLException { return connection.getTypeInfo().getSQLType(getBaseTypeName()); } public String getBaseTypeName() throws SQLException { buildArrayList(); int elementOID = connection.getTypeInfo().getRSArrayElement(oid); return connection.getTypeInfo().getRSType(elementOID); } public java.sql.ResultSet getResultSet() throws SQLException { return getResultSetImpl(1, 0, null); } public java.sql.ResultSet getResultSet(long index, int count) throws SQLException { return getResultSetImpl(index, count, null); } public ResultSet getResultSet(Map<String, Class<?>> map) throws SQLException { return getResultSetImpl(map); } public ResultSet getResultSet(long index, int count, Map<String, Class<?>> map) throws SQLException { return getResultSetImpl(index, count, map); } public ResultSet getResultSetImpl(Map<String, Class<?>> map) throws SQLException { return getResultSetImpl(1, 0, map); } public ResultSet getResultSetImpl(long index, int count, Map<String, Class<?>> map) throws SQLException { // for now maps aren't supported. if (map != null && !map.isEmpty()) { throw com.amazon.redshift.Driver.notImplemented(this.getClass(), "getResultSetImpl(long,int,Map)"); } // array index is out of range if (index < 1) { throw new RedshiftException(GT.tr("The array index is out of range: {0}", index), RedshiftState.DATA_ERROR); } if (fieldBytes != null) { return readBinaryResultSet((int) index, count); } buildArrayList(); if (count == 0) { count = arrayList.size(); } // array index out of range if ((--index) + count > arrayList.size()) { throw new RedshiftException( GT.tr("The array index is out of range: {0}, number of elements: {1}.", index + count, (long) arrayList.size()), RedshiftState.DATA_ERROR); } List<Tuple> rows = new ArrayList<Tuple>(); Field[] fields = new Field[2]; // one dimensional array if (arrayList.dimensionsCount <= 1) { // array element type final int baseOid = connection.getTypeInfo().getRSArrayElement(oid); fields[0] = new Field("INDEX", Oid.INT4); fields[1] = new Field("VALUE", baseOid); for (int i = 0; i < count; i++) { int offset = (int) index + i; byte[][] t = new byte[2][0]; String v = (String) arrayList.get(offset); t[0] = connection.encodeString(Integer.toString(offset + 1)); t[1] = v == null ? null : connection.encodeString(v); rows.add(new Tuple(t)); } } else { // when multi-dimensional fields[0] = new Field("INDEX", Oid.INT4); fields[1] = new Field("VALUE", oid); for (int i = 0; i < count; i++) { int offset = (int) index + i; byte[][] t = new byte[2][0]; Object v = arrayList.get(offset); t[0] = connection.encodeString(Integer.toString(offset + 1)); t[1] = v == null ? null : connection.encodeString(toString((RsArrayList) v)); rows.add(new Tuple(t)); } } BaseStatement stat = (BaseStatement) connection .createStatement(ResultSet.TYPE_SCROLL_INSENSITIVE, ResultSet.CONCUR_READ_ONLY); return stat.createDriverResultSet(fields, rows); } public String toString() { if (fieldString == null && fieldBytes != null) { try { Object array = readBinaryArray(1, 0); final PrimitiveArraySupport arraySupport = PrimitiveArraySupport.getArraySupport(array); if (arraySupport != null) { fieldString = arraySupport.toArrayString(connection.getTypeInfo().getArrayDelimiter(oid), array); } else { java.sql.Array tmpArray = connection.createArrayOf(getBaseTypeName(), (Object[]) array); fieldString = tmpArray.toString(); } } catch (SQLException e) { fieldString = "NULL"; // punt } } return fieldString; } /** * Convert array list to RS String representation (e.g. {0,1,2}). */ private String toString(RsArrayList list) throws SQLException { if (list == null) { return "NULL"; } StringBuilder b = new StringBuilder().append('{'); char delim = connection.getTypeInfo().getArrayDelimiter(oid); for (int i = 0; i < list.size(); i++) { Object v = list.get(i); if (i > 0) { b.append(delim); } if (v == null) { b.append("NULL"); } else if (v instanceof RsArrayList) { b.append(toString((RsArrayList) v)); } else { escapeArrayElement(b, (String) v); } } b.append('}'); return b.toString(); } public static void escapeArrayElement(StringBuilder b, String s) { b.append('"'); for (int j = 0; j < s.length(); j++) { char c = s.charAt(j); if (c == '"' || c == '\\') { b.append('\\'); } b.append(c); } b.append('"'); } public boolean isBinary() { return fieldBytes != null; } public byte[] toBytes() { return fieldBytes; } public void free() throws SQLException { connection = null; fieldString = null; fieldBytes = null; arrayList = null; } }
8,524
0
Create_ds/amazon-redshift-jdbc-driver/src/main/java/com/amazon/redshift
Create_ds/amazon-redshift-jdbc-driver/src/main/java/com/amazon/redshift/jdbc/TypeInfoCache.java
/* * Copyright (c) 2005, PostgreSQL Global Development Group * See the LICENSE file in the project root for more information. */ package com.amazon.redshift.jdbc; import com.amazon.redshift.core.BaseConnection; import com.amazon.redshift.core.BaseStatement; import com.amazon.redshift.core.Oid; import com.amazon.redshift.core.QueryExecutor; import com.amazon.redshift.core.ServerVersion; import com.amazon.redshift.core.TypeInfo; import com.amazon.redshift.logger.LogLevel; import com.amazon.redshift.logger.RedshiftLogger; import com.amazon.redshift.util.GT; import com.amazon.redshift.util.RedshiftObject; import com.amazon.redshift.util.RedshiftException; import com.amazon.redshift.util.RedshiftState; import java.sql.PreparedStatement; import java.sql.ResultSet; import java.sql.SQLException; import java.sql.Types; import java.util.Collections; import java.util.HashMap; import java.util.Iterator; import java.util.Map; public class TypeInfoCache implements TypeInfo { // rsname (String) -> java.sql.Types (Integer) private Map<String, Integer> rsNameToSQLType; // rsname (String) -> java class name (String) // ie "text" -> "java.lang.String" private Map<String, String> rsNameToJavaClass; // oid (Integer) -> rsname (String) private Map<Integer, String> oidToRsName; // rsname (String) -> oid (Integer) private Map<String, Integer> rsNameToOid; // rsname (String) -> extension rsobject (Class) private Map<String, Class<? extends RedshiftObject>> rsNameToRsObject; // type array oid -> base type's oid private Map<Integer, Integer> rsArrayToRsType; // array type oid -> base type array element delimiter private Map<Integer, Character> arrayOidToDelimiter; private BaseConnection conn; private final int unknownLength; private PreparedStatement getOidStatementSimple; private PreparedStatement getOidStatementComplexNonArray; private PreparedStatement getOidStatementComplexArray; private PreparedStatement getNameStatement; private PreparedStatement getArrayElementOidStatement; private PreparedStatement getArrayDelimiterStatement; private PreparedStatement getTypeInfoStatement; private PreparedStatement getAllTypeInfoStatement; // Geometry public static final String GEOMETRY_NAME = "geometry"; public static final int GEOMETRYOID = Oid.GEOMETRY; public static final int GEOMETRYHEXOID = Oid.GEOMETRYHEX; // super (previous name Omni) public static final String SUPER_NAME = "super"; public static final int SUPEROID = Oid.SUPER; public static final String VARBYTE_NAME = "varbyte"; public static final int VARBYTEOID = Oid.VARBYTE; public static final String GEOGRAPHY_NAME = "geography"; public static final int GEOGRAPHYOID = Oid.GEOGRAPHY; public static final String TID_NAME = "tid"; public static final String TID_ARRAY_NAME = "_tid"; public static final String XID_NAME = "xid"; public static final String XID_ARRAY_NAME = "_xid"; // basic rs types info: // 0 - type name // 1 - type oid // 2 - sql type // 3 - java class // 4 - array type oid private static final Object[][] types = { // Aliases of sql types first, so map has actual type later in the array {"oid", Oid.OID, Types.BIGINT, "java.lang.Long", Oid.OID_ARRAY}, {"money", Oid.MONEY, Types.DOUBLE, "java.lang.Double", Oid.MONEY_ARRAY}, {"double precision", Oid.FLOAT8, Types.DOUBLE, "java.lang.Double", Oid.FLOAT8_ARRAY}, {"bpchar", Oid.BPCHAR, Types.CHAR, "java.lang.String", Oid.BPCHAR_ARRAY}, {"text", Oid.TEXT, Types.VARCHAR, "java.lang.String", Oid.TEXT_ARRAY}, {"name", Oid.NAME, Types.VARCHAR, "java.lang.String", Oid.NAME_ARRAY}, {"character varying", Oid.VARCHAR, Types.VARCHAR, "java.lang.String", Oid.VARCHAR_ARRAY}, {"bit", Oid.BIT, Types.BIT, "java.lang.Boolean", Oid.BIT_ARRAY}, {"time without time zone", Oid.TIME, Types.TIME, "java.sql.Time", Oid.TIME_ARRAY}, {"timestamp without time zone", Oid.TIMESTAMP, Types.TIMESTAMP, "java.sql.Timestamp", Oid.TIMESTAMP_ARRAY}, {"timestamp with time zone", Oid.TIMESTAMPTZ, Types.TIMESTAMP, "java.sql.Timestamp", Oid.TIMESTAMPTZ_ARRAY}, {GEOMETRY_NAME, Oid.GEOMETRYHEX, Types.LONGVARBINARY, "[B", Oid.GEOMETRYHEX_ARRAY}, {XID_NAME, Oid.XIDOID, Types.BIGINT, "java.lang.Long", Oid.XIDARRAYOID}, {TID_NAME, Oid.TIDOID, Types.VARCHAR, "java.lang.String", Oid.TIDARRAYOID}, {"abstime", Oid.ABSTIMEOID, Types.TIMESTAMP, "java.sql.Timestamp", Oid.ABSTIMEARRAYOID}, // Actual types {"int2", Oid.INT2, Types.SMALLINT, "java.lang.Integer", Oid.INT2_ARRAY}, {"int4", Oid.INT4, Types.INTEGER, "java.lang.Integer", Oid.INT4_ARRAY}, {"int8", Oid.INT8, Types.BIGINT, "java.lang.Long", Oid.INT8_ARRAY}, {"numeric", Oid.NUMERIC, Types.NUMERIC, "java.math.BigDecimal", Oid.NUMERIC_ARRAY}, {"float4", Oid.FLOAT4, Types.REAL, "java.lang.Float", Oid.FLOAT4_ARRAY}, {"float8", Oid.FLOAT8, Types.DOUBLE, "java.lang.Double", Oid.FLOAT8_ARRAY}, {"char", Oid.CHAR, Types.CHAR, "java.lang.String", Oid.CHAR_ARRAY}, {"varchar", Oid.VARCHAR, Types.VARCHAR, "java.lang.String", Oid.VARCHAR_ARRAY}, {"bytea", Oid.BYTEA, Types.BINARY, "[B", Oid.BYTEA_ARRAY}, {"bool", Oid.BOOL, Types.BIT, "java.lang.Boolean", Oid.BOOL_ARRAY}, {"date", Oid.DATE, Types.DATE, "java.sql.Date", Oid.DATE_ARRAY}, {"time", Oid.TIME, Types.TIME, "java.sql.Time", Oid.TIME_ARRAY}, {"time with time zone", Oid.TIMETZ, Types.TIME, "java.sql.Time", Oid.TIMETZ_ARRAY}, {"timetz", Oid.TIMETZ, Types.TIME, "java.sql.Time", Oid.TIMETZ_ARRAY}, {"timestamp", Oid.TIMESTAMP, Types.TIMESTAMP, "java.sql.Timestamp", Oid.TIMESTAMP_ARRAY}, {"timestamptz", Oid.TIMESTAMPTZ, Types.TIMESTAMP, "java.sql.Timestamp", Oid.TIMESTAMPTZ_ARRAY}, {"intervaly2m", Oid.INTERVALY2M, Types.OTHER, "com.amazon.redshift.util.RedshiftIntervalYearToMonth", Oid.INTERVALY2M_ARRAY}, {"intervald2s", Oid.INTERVALD2S, Types.OTHER, "com.amazon.redshift.util.RedshiftIntervalDayToSecond", Oid.INTERVALD2S_ARRAY}, //JCP! if mvn.project.property.redshift.jdbc.spec >= "JDBC4.2" {"refcursor", Oid.REF_CURSOR, Types.REF_CURSOR, "java.sql.ResultSet", Oid.REF_CURSOR_ARRAY}, //JCP! endif {"aclitem", Oid.ACLITEM, Types.OTHER, "java.lang.Object", Oid.ACLITEM_ARRAY}, {"regproc", Oid.REGPROC, Types.OTHER, "java.lang.Object", Oid.REGPROC_ARRAY}, {"oidvector", Oid.OIDVECTOR, Types.VARCHAR, "java.lang.Object", Oid.OIDVECTOR_ARRAY}, {"json", Oid.JSON, Types.OTHER, "com.amazon.redshift.util.RedshiftObject", Oid.JSON_ARRAY}, {"point", Oid.POINT, Types.OTHER, "com.amazon.redshift.geometric.RedshiftPoint", Oid.POINT_ARRAY}, {GEOMETRY_NAME, Oid.GEOMETRY, Types.LONGVARBINARY, "[B", Oid.GEOMETRY_ARRAY}, {SUPER_NAME, Oid.SUPER, Types.LONGVARCHAR, "java.lang.String", Oid.SUPER_ARRAY}, {VARBYTE_NAME, Oid.VARBYTE, Types.LONGVARBINARY, "[B", Oid.VARBYTE_ARRAY}, {GEOGRAPHY_NAME, Oid.GEOGRAPHY, Types.LONGVARBINARY, "[B", Oid.GEOGRAPHY_ARRAY} }; /** * RS maps several alias to real type names. When we do queries against pg_catalog, we must use * the real type, not an alias, so use this mapping. */ private static final HashMap<String, String> typeAliases; static { typeAliases = new HashMap<String, String>(); typeAliases.put("smallint", "int2"); typeAliases.put("integer", "int4"); typeAliases.put("int", "int4"); typeAliases.put("bigint", "int8"); typeAliases.put("float", "float8"); typeAliases.put("boolean", "bool"); typeAliases.put("decimal", "numeric"); } public TypeInfoCache(BaseConnection conn, int unknownLength) { this.conn = conn; this.unknownLength = unknownLength; oidToRsName = new HashMap<Integer, String>((int) Math.round(types.length * 1.5)); rsNameToOid = new HashMap<String, Integer>((int) Math.round(types.length * 1.5)); rsNameToJavaClass = new HashMap<String, String>((int) Math.round(types.length * 1.5)); rsNameToRsObject = new HashMap<String, Class<? extends RedshiftObject>>((int) Math.round(types.length * 1.5)); rsArrayToRsType = new HashMap<Integer, Integer>((int) Math.round(types.length * 1.5)); arrayOidToDelimiter = new HashMap<Integer, Character>((int) Math.round(types.length * 2.5)); // needs to be synchronized because the iterator is returned // from getRSTypeNamesWithSQLTypes() rsNameToSQLType = Collections.synchronizedMap(new HashMap<String, Integer>((int) Math.round(types.length * 1.5))); for (Object[] type : types) { String pgTypeName = (String) type[0]; Integer oid = (Integer) type[1]; Integer sqlType = (Integer) type[2]; String javaClass = (String) type[3]; Integer arrayOid = (Integer) type[4]; addCoreType(pgTypeName, oid, sqlType, javaClass, arrayOid); } rsNameToJavaClass.put("hstore", Map.class.getName()); } public synchronized void addCoreType(String rsTypeName, Integer oid, Integer sqlType, String javaClass, Integer arrayOid) { rsNameToJavaClass.put(rsTypeName, javaClass); rsNameToOid.put(rsTypeName, oid); oidToRsName.put(oid, rsTypeName); rsArrayToRsType.put(arrayOid, oid); rsNameToSQLType.put(rsTypeName, sqlType); // Currently we hardcode all core types array delimiter // to a comma. In a stock install the only exception is // the box datatype and it's not a JDBC core type. // Character delim = ','; arrayOidToDelimiter.put(oid, delim); arrayOidToDelimiter.put(arrayOid, delim); String pgArrayTypeName = rsTypeName + "[]"; rsNameToJavaClass.put(pgArrayTypeName, "java.sql.Array"); rsNameToSQLType.put(pgArrayTypeName, Types.ARRAY); rsNameToOid.put(pgArrayTypeName, arrayOid); pgArrayTypeName = "_" + rsTypeName; if (!rsNameToJavaClass.containsKey(pgArrayTypeName)) { rsNameToJavaClass.put(pgArrayTypeName, "java.sql.Array"); rsNameToSQLType.put(pgArrayTypeName, Types.ARRAY); rsNameToOid.put(pgArrayTypeName, arrayOid); oidToRsName.put(arrayOid, pgArrayTypeName); } } public synchronized void addDataType(String type, Class<? extends RedshiftObject> klass) throws SQLException { rsNameToRsObject.put(type, klass); rsNameToJavaClass.put(type, klass.getName()); } public Iterator<String> getRSTypeNamesWithSQLTypes() { return rsNameToSQLType.keySet().iterator(); } private String getSQLTypeQuery(boolean typnameParam) { // There's no great way of telling what's an array type. // People can name their own types starting with _. // Other types use typelem that aren't actually arrays, like box. // // in case of multiple records (in different schemas) choose the one from the current // schema, // otherwise take the last version of a type that is at least more deterministic then before // (keeping old behaviour of finding types, that should not be found without correct search // path) StringBuilder sql = new StringBuilder(); sql.append("SELECT typinput='array_in'::regproc as is_array, typtype, typname "); sql.append(" FROM pg_catalog.pg_type "); sql.append(" LEFT JOIN (select ns.oid as nspoid, ns.nspname, r.r "); sql.append(" from pg_namespace as ns "); // -- go with older way of unnesting array to be compatible with 8.0 sql.append(" join ( select s.r, (current_schemas(false))[s.r] as nspname "); sql.append(" from generate_series(1, array_upper(current_schemas(false), 1)) as s(r) ) as r "); sql.append(" using ( nspname ) "); sql.append(" ) as sp "); sql.append(" ON sp.nspoid = typnamespace "); if (typnameParam) { sql.append(" WHERE typname = ? "); } sql.append(" ORDER BY sp.r, pg_type.oid DESC;"); return sql.toString(); } private int getSQLTypeFromQueryResult(ResultSet rs, RedshiftLogger logger) throws SQLException { Integer type = null; boolean isArray = rs.getBoolean("is_array"); String typtype = rs.getString("typtype"); String typname = rs.getString("typname"); if (isArray) { type = Types.ARRAY; } else if ("c".equals(typtype)) { type = Types.STRUCT; } else if ("d".equals(typtype)) { type = Types.DISTINCT; } else if ("e".equals(typtype)) { type = Types.VARCHAR; } else if ("p".equals(typtype)) { type = Types.VARCHAR; } else if ("b".equals(typtype) && typname.equals("oidvector")) { type = Types.VARCHAR; } if (type == null) { if(RedshiftLogger.isEnable() && logger != null) logger.log(LogLevel.DEBUG, " isArray=" + isArray + " typname= " + typname + " typtype=" + typtype); type = Types.OTHER; } return type; } public void cacheSQLTypes(RedshiftLogger logger) throws SQLException { if(RedshiftLogger.isEnable()) logger.log(LogLevel.DEBUG, "caching all SQL typecodes"); if (getAllTypeInfoStatement == null) { getAllTypeInfoStatement = conn.prepareStatement(getSQLTypeQuery(false)); } // Go through BaseStatement to avoid transaction start. if (!((BaseStatement) getAllTypeInfoStatement) .executeWithFlags(QueryExecutor.QUERY_SUPPRESS_BEGIN)) { throw new RedshiftException(GT.tr("No results were returned by the query."), RedshiftState.NO_DATA); } ResultSet rs = getAllTypeInfoStatement.getResultSet(); while (rs.next()) { String typeName = rs.getString("typname"); Integer type = getSQLTypeFromQueryResult(rs, logger); if (!rsNameToSQLType.containsKey(typeName)) { rsNameToSQLType.put(typeName, type); } } rs.close(); } public int getSQLType(int oid) throws SQLException { return getSQLType(getRSType(oid)); } public synchronized int getSQLType(String pgTypeName) throws SQLException { if (pgTypeName.endsWith("[]")) { return Types.ARRAY; } Integer i = rsNameToSQLType.get(pgTypeName); if (i != null) { return i; } if(RedshiftLogger.isEnable() && conn.getLogger()!=null){ conn.getLogger().log(LogLevel.INFO, "Unknown pgTypeName found when retrieving the SQL Type --" + pgTypeName); } if (getTypeInfoStatement == null) { getTypeInfoStatement = conn.prepareStatement(getSQLTypeQuery(true)); } getTypeInfoStatement.setString(1, pgTypeName); // Go through BaseStatement to avoid transaction start. if (!((BaseStatement) getTypeInfoStatement) .executeWithFlags(QueryExecutor.QUERY_SUPPRESS_BEGIN)) { throw new RedshiftException(GT.tr("No results were returned by the query."), RedshiftState.NO_DATA); } ResultSet rs = getTypeInfoStatement.getResultSet(); Integer type = Types.OTHER; if (rs.next()) { type = getSQLTypeFromQueryResult(rs, conn.getLogger()); } rs.close(); rsNameToSQLType.put(pgTypeName, type); return type; } private PreparedStatement getOidStatement(String pgTypeName) throws SQLException { boolean isArray = pgTypeName.endsWith("[]"); boolean hasQuote = pgTypeName.contains("\""); int dotIndex = pgTypeName.indexOf('.'); if (dotIndex == -1 && !hasQuote && !isArray) { if (getOidStatementSimple == null) { String sql; // see comments in @getSQLType() // -- go with older way of unnesting array to be compatible with 8.0 sql = "SELECT pg_type.oid, typname " + " FROM pg_catalog.pg_type " + " LEFT " + " JOIN (select ns.oid as nspoid, ns.nspname, r.r " + " from pg_namespace as ns " + " join ( select s.r, (current_schemas(false))[s.r] as nspname " + " from generate_series(1, array_upper(current_schemas(false), 1)) as s(r) ) as r " + " using ( nspname ) " + " ) as sp " + " ON sp.nspoid = typnamespace " + " WHERE typname = ? " + " ORDER BY sp.r, pg_type.oid DESC LIMIT 1;"; getOidStatementSimple = conn.prepareStatement(sql); } // coerce to lower case to handle upper case type names String lcName = pgTypeName.toLowerCase(); // default arrays are represented with _ as prefix ... this dont even work for public schema // fully getOidStatementSimple.setString(1, lcName); return getOidStatementSimple; } PreparedStatement oidStatementComplex; if (isArray) { if (getOidStatementComplexArray == null) { String sql; /* if (conn.haveMinimumServerVersion(ServerVersion.v8_3)) { sql = "SELECT t.typarray, arr.typname " + " FROM pg_catalog.pg_type t" + " JOIN pg_catalog.pg_namespace n ON t.typnamespace = n.oid" + " JOIN pg_catalog.pg_type arr ON arr.oid = t.typarray" + " WHERE t.typname = ? AND (n.nspname = ? OR ? AND n.nspname = ANY (current_schemas(true)))" + " ORDER BY t.oid DESC LIMIT 1"; } else */ { sql = "SELECT t.oid, t.typname " + " FROM pg_catalog.pg_type t" + " JOIN pg_catalog.pg_namespace n ON t.typnamespace = n.oid" + " WHERE t.typelem = (SELECT oid FROM pg_catalog.pg_type WHERE typname = ?)" + " AND substring(t.typname, 1, 1) = '_' AND t.typlen = -1" + " AND (n.nspname = ? OR ? AND n.nspname = ANY (current_schemas(true)))" + " ORDER BY t.typelem DESC LIMIT 1"; } getOidStatementComplexArray = conn.prepareStatement(sql); } oidStatementComplex = getOidStatementComplexArray; } else { if (getOidStatementComplexNonArray == null) { String sql = "SELECT t.oid, t.typname " + " FROM pg_catalog.pg_type t" + " JOIN pg_catalog.pg_namespace n ON t.typnamespace = n.oid" + " WHERE t.typname = ? AND (n.nspname = ? OR ? AND n.nspname = ANY (current_schemas(true)))" + " ORDER BY t.oid DESC LIMIT 1"; getOidStatementComplexNonArray = conn.prepareStatement(sql); } oidStatementComplex = getOidStatementComplexNonArray; } //type name requested may be schema specific, of the form "{schema}"."typeName", //or may check across all schemas where a schema is not specified. String fullName = isArray ? pgTypeName.substring(0, pgTypeName.length() - 2) : pgTypeName; String schema; String name; // simple use case if (dotIndex == -1) { schema = null; name = fullName; } else { if (fullName.startsWith("\"")) { if (fullName.endsWith("\"")) { String[] parts = fullName.split("\"\\.\""); schema = parts.length == 2 ? parts[0] + "\"" : null; name = parts.length == 2 ? "\"" + parts[1] : parts[0]; } else { int lastDotIndex = fullName.lastIndexOf('.'); name = fullName.substring(lastDotIndex + 1); schema = fullName.substring(0, lastDotIndex); } } else { schema = fullName.substring(0, dotIndex); name = fullName.substring(dotIndex + 1); } } if (schema != null && schema.startsWith("\"") && schema.endsWith("\"")) { schema = schema.substring(1, schema.length() - 1); } else if (schema != null) { schema = schema.toLowerCase(); } if (name.startsWith("\"") && name.endsWith("\"")) { name = name.substring(1, name.length() - 1); } else { name = name.toLowerCase(); } oidStatementComplex.setString(1, name); oidStatementComplex.setString(2, schema); oidStatementComplex.setBoolean(3, schema == null); return oidStatementComplex; } public synchronized int getRSType(String pgTypeName) throws SQLException { Integer oid = rsNameToOid.get(pgTypeName); if (oid != null) { return oid; } if(RedshiftLogger.isEnable() && conn.getLogger()!=null){ conn.getLogger().log(LogLevel.INFO, "Unknown pgTypeName found when retrieving the RedShift Type -- " + pgTypeName); } PreparedStatement oidStatement = getOidStatement(pgTypeName); // Go through BaseStatement to avoid transaction start. if (!((BaseStatement) oidStatement).executeWithFlags(QueryExecutor.QUERY_SUPPRESS_BEGIN)) { throw new RedshiftException(GT.tr("No results were returned by the query."), RedshiftState.NO_DATA); } oid = Oid.UNSPECIFIED; ResultSet rs = oidStatement.getResultSet(); if (rs.next()) { oid = (int) rs.getLong(1); String internalName = rs.getString(2); oidToRsName.put(oid, internalName); rsNameToOid.put(internalName, oid); } rsNameToOid.put(pgTypeName, oid); rs.close(); return oid; } public synchronized String getRSType(int oid) throws SQLException { if (oid == Oid.UNSPECIFIED) { return null; } String rsTypeName = oidToRsName.get(oid); if (rsTypeName != null) { return rsTypeName; } if(RedshiftLogger.isEnable() && conn.getLogger()!=null) conn.getLogger().log(LogLevel.INFO, "Unknown oid found when retrieving the RedShift Type --" + oid); if (getNameStatement == null) { String sql; sql = "SELECT n.nspname = ANY(current_schemas(true)), n.nspname, t.typname " + "FROM pg_catalog.pg_type t " + "JOIN pg_catalog.pg_namespace n ON t.typnamespace = n.oid WHERE t.oid = ?"; getNameStatement = conn.prepareStatement(sql); } getNameStatement.setInt(1, oid); // Go through BaseStatement to avoid transaction start. if (!((BaseStatement) getNameStatement).executeWithFlags(QueryExecutor.QUERY_SUPPRESS_BEGIN)) { throw new RedshiftException(GT.tr("No results were returned by the query."), RedshiftState.NO_DATA); } ResultSet rs = getNameStatement.getResultSet(); if (rs.next()) { boolean onPath = rs.getBoolean(1); String schema = rs.getString(2); String name = rs.getString(3); if (onPath) { rsTypeName = name; rsNameToOid.put(schema + "." + name, oid); } else { // TODO: escaping !? rsTypeName = "\"" + schema + "\".\"" + name + "\""; // if all is lowercase add special type info // TODO: should probably check for all special chars if (schema.equals(schema.toLowerCase()) && schema.indexOf('.') == -1 && name.equals(name.toLowerCase()) && name.indexOf('.') == -1) { rsNameToOid.put(schema + "." + name, oid); } } rsNameToOid.put(rsTypeName, oid); oidToRsName.put(oid, rsTypeName); } rs.close(); return rsTypeName; } public int getRSArrayType(String elementTypeName) throws SQLException { elementTypeName = getTypeForAlias(elementTypeName); return getRSType(elementTypeName + "[]"); } /** * Return the oid of the array's base element if it's an array, if not return the provided oid. * This doesn't do any database lookups, so it's only useful for the originally provided type * mappings. This is fine for it's intended uses where we only have intimate knowledge of types * that are already known to the driver. * * @param oid input oid * @return oid of the array's base element or the provided oid (if not array) */ protected synchronized int convertArrayToBaseOid(int oid) { Integer i = rsArrayToRsType.get(oid); if (i == null) { return oid; } return i; } public synchronized char getArrayDelimiter(int oid) throws SQLException { if (oid == Oid.UNSPECIFIED) { return ','; } Character delim = arrayOidToDelimiter.get(oid); if (delim != null) { return delim; } if(RedshiftLogger.isEnable() && conn.getLogger()!=null) conn.getLogger().log(LogLevel.INFO, "Unknown oid found when retrieving the Array Delimiter Type --" + oid); if (getArrayDelimiterStatement == null) { String sql; sql = "SELECT e.typdelim FROM pg_catalog.pg_type t, pg_catalog.pg_type e " + "WHERE t.oid = ? and t.typelem = e.oid"; getArrayDelimiterStatement = conn.prepareStatement(sql); } getArrayDelimiterStatement.setInt(1, oid); // Go through BaseStatement to avoid transaction start. if (!((BaseStatement) getArrayDelimiterStatement) .executeWithFlags(QueryExecutor.QUERY_SUPPRESS_BEGIN)) { throw new RedshiftException(GT.tr("No results were returned by the query."), RedshiftState.NO_DATA); } ResultSet rs = getArrayDelimiterStatement.getResultSet(); if (!rs.next()) { throw new RedshiftException(GT.tr("No results were returned by the query."), RedshiftState.NO_DATA); } String s = rs.getString(1); delim = s.charAt(0); arrayOidToDelimiter.put(oid, delim); rs.close(); return delim; } public synchronized int getRSArrayElement(int oid) throws SQLException { if (oid == Oid.UNSPECIFIED) { return Oid.UNSPECIFIED; } Integer rsType = rsArrayToRsType.get(oid); if (rsType != null) { return rsType; } if(RedshiftLogger.isEnable() && conn.getLogger()!=null) conn.getLogger().log(LogLevel.INFO, "Unknown oid found when retrieving the RS Array Element --" + oid); if (getArrayElementOidStatement == null) { String sql; sql = "SELECT e.oid, n.nspname = ANY(current_schemas(true)), n.nspname, e.typname " + "FROM pg_catalog.pg_type t JOIN pg_catalog.pg_type e ON t.typelem = e.oid " + "JOIN pg_catalog.pg_namespace n ON t.typnamespace = n.oid WHERE t.oid = ?"; getArrayElementOidStatement = conn.prepareStatement(sql); } getArrayElementOidStatement.setInt(1, oid); // Go through BaseStatement to avoid transaction start. if (!((BaseStatement) getArrayElementOidStatement) .executeWithFlags(QueryExecutor.QUERY_SUPPRESS_BEGIN)) { throw new RedshiftException(GT.tr("No results were returned by the query."), RedshiftState.NO_DATA); } ResultSet rs = getArrayElementOidStatement.getResultSet(); if (!rs.next()) { throw new RedshiftException(GT.tr("No results were returned by the query."), RedshiftState.NO_DATA); } rsType = (int) rs.getLong(1); boolean onPath = rs.getBoolean(2); String schema = rs.getString(3); String name = rs.getString(4); rsArrayToRsType.put(oid, rsType); rsNameToOid.put(schema + "." + name, rsType); String fullName = "\"" + schema + "\".\"" + name + "\""; rsNameToOid.put(fullName, rsType); if (onPath && name.equals(name.toLowerCase())) { oidToRsName.put(rsType, name); rsNameToOid.put(name, rsType); } else { oidToRsName.put(rsType, fullName); } rs.close(); return rsType; } public synchronized Class<? extends RedshiftObject> getRSobject(String type) { return rsNameToRsObject.get(type); } public synchronized String getJavaClass(int oid) throws SQLException { String pgTypeName = getRSType(oid); String result = rsNameToJavaClass.get(pgTypeName); if (result != null) { return result; } if(RedshiftLogger.isEnable() && conn.getLogger()!=null) conn.getLogger().log(LogLevel.INFO, "Unknown oid found when retrieving the java class Type --" + oid); if (getSQLType(pgTypeName) == Types.ARRAY) { result = "java.sql.Array"; rsNameToJavaClass.put(pgTypeName, result); } return result; } public String getTypeForAlias(String alias) { String type = typeAliases.get(alias); if (type != null) { return type; } if (alias.indexOf('"') == -1) { type = typeAliases.get(alias.toLowerCase()); if (type != null) { return type; } } return alias; } public int getPrecision(int oid, int typmod) { oid = convertArrayToBaseOid(oid); switch (oid) { case Oid.INT2: return 5; case Oid.OID: case Oid.INT4: return 10; case Oid.INT8: case Oid.XIDOID: return 19; case Oid.FLOAT4: // For float4 and float8, we can normally only get 6 and 15 // significant digits out, but extra_float_digits may raise // that number by up to two digits. return 8; case Oid.FLOAT8: return 17; case Oid.NUMERIC: if (typmod == -1) { return 0; } return ((typmod - 4) & 0xFFFF0000) >> 16; case Oid.CHAR: case Oid.BOOL: return 1; case Oid.BPCHAR: case Oid.VARCHAR: case Oid.SUPER: case Oid.VARBYTE: case Oid.GEOGRAPHY: case Oid.TIDOID: case Oid.ABSTIMEOID: if (typmod == -1) { return unknownLength; } return typmod - 4; // datetime types get the // "length in characters of the String representation" case Oid.DATE: case Oid.TIME: case Oid.TIMETZ: case Oid.INTERVAL: case Oid.INTERVALY2M: case Oid.INTERVALD2S: case Oid.TIMESTAMP: case Oid.TIMESTAMPTZ: return getDisplaySize(oid, typmod); case Oid.BIT: return typmod; case Oid.VARBIT: if (typmod == -1) { return unknownLength; } return typmod; case Oid.NAME: return 64; case Oid.TEXT: case Oid.BYTEA: default: return unknownLength; } } public int getScale(int oid, int typmod) { oid = convertArrayToBaseOid(oid); switch (oid) { case Oid.FLOAT4: return 8; case Oid.FLOAT8: return 17; case Oid.NUMERIC: if (typmod == -1) { return 0; } return (typmod - 4) & 0xFFFF; case Oid.TIME: case Oid.TIMETZ: case Oid.TIMESTAMP: case Oid.TIMESTAMPTZ: if (typmod == -1) { return 6; } return typmod; case Oid.INTERVAL: if (typmod == -1) { return 6; } return typmod & 0xFFFF; default: return 0; } } public boolean isCaseSensitive(int oid) { oid = convertArrayToBaseOid(oid); switch (oid) { case Oid.OID: case Oid.INT2: case Oid.INT4: case Oid.INT8: case Oid.FLOAT4: case Oid.FLOAT8: case Oid.NUMERIC: case Oid.BOOL: case Oid.BIT: case Oid.VARBIT: case Oid.DATE: case Oid.TIME: case Oid.TIMETZ: case Oid.TIMESTAMP: case Oid.TIMESTAMPTZ: case Oid.INTERVAL: case Oid.INTERVALY2M: case Oid.INTERVALD2S: case Oid.GEOGRAPHY: return false; default: return true; } } public boolean isSigned(int oid) { oid = convertArrayToBaseOid(oid); switch (oid) { case Oid.INT2: case Oid.INT4: case Oid.INT8: case Oid.FLOAT4: case Oid.FLOAT8: case Oid.NUMERIC: return true; default: return false; } } public int getDisplaySize(int oid, int typmod) { oid = convertArrayToBaseOid(oid); switch (oid) { case Oid.INT2: return 6; // -32768 to +32767 case Oid.INT4: return 11; // -2147483648 to +2147483647 case Oid.OID: return 10; // 0 to 4294967295 case Oid.INT8: case Oid.XIDOID: return 20; // -9223372036854775808 to +9223372036854775807 case Oid.FLOAT4: // varies based upon the extra_float_digits GUC. // These values are for the longest possible length. return 15; // sign + 9 digits + decimal point + e + sign + 2 digits case Oid.FLOAT8: return 25; // sign + 18 digits + decimal point + e + sign + 3 digits case Oid.CHAR: return 1; case Oid.BOOL: return 1; case Oid.DATE: return 13; // "4713-01-01 BC" to "01/01/4713 BC" - "31/12/32767" case Oid.TIME: case Oid.TIMETZ: case Oid.TIMESTAMP: case Oid.TIMESTAMPTZ: // Calculate the number of decimal digits + the decimal point. int secondSize; switch (typmod) { case -1: secondSize = 6 + 1; break; case 0: secondSize = 0; break; case 1: // Bizarrely SELECT '0:0:0.1'::time(1); returns 2 digits. secondSize = 2 + 1; break; default: secondSize = typmod + 1; break; } // We assume the worst case scenario for all of these. // time = '00:00:00' = 8 // date = '5874897-12-31' = 13 (although at large values second precision is lost) // date = '294276-11-20' = 12 --enable-integer-datetimes // zone = '+11:30' = 6; switch (oid) { case Oid.TIME: return 8 + secondSize; case Oid.TIMETZ: return 8 + secondSize + 6; case Oid.TIMESTAMP: return 13 + 1 + 8 + secondSize; case Oid.TIMESTAMPTZ: return 13 + 1 + 8 + secondSize + 6; } case Oid.INTERVAL: case Oid.INTERVALY2M: case Oid.INTERVALD2S: // SELECT LENGTH('-123456789 years 11 months 33 days 23 hours 10.123456 seconds'::interval); return 49; case Oid.VARCHAR: case Oid.BPCHAR: case Oid.SUPER: case Oid.VARBYTE: case Oid.GEOGRAPHY: case Oid.TIDOID: case Oid.ABSTIMEOID: if (typmod == -1) { return unknownLength; } return typmod - 4; case Oid.NUMERIC: if (typmod == -1) { return 131089; // SELECT LENGTH(pow(10::numeric,131071)); 131071 = 2^17-1 } int precision = (typmod - 4 >> 16) & 0xffff; int scale = (typmod - 4) & 0xffff; // sign + digits + decimal point (only if we have nonzero scale) return 1 + precision + (scale != 0 ? 1 : 0); case Oid.BIT: return typmod; case Oid.VARBIT: if (typmod == -1) { return unknownLength; } return typmod; case Oid.NAME: return 64; case Oid.TEXT: case Oid.BYTEA: return unknownLength; default: return unknownLength; } } public int getMaximumPrecision(int oid) { oid = convertArrayToBaseOid(oid); switch (oid) { case Oid.NUMERIC: return 1000; case Oid.TIME: case Oid.TIMETZ: // Technically this depends on the --enable-integer-datetimes // configure setting. It is 6 with integer and 10 with float. return 6; case Oid.TIMESTAMP: case Oid.TIMESTAMPTZ: case Oid.INTERVAL: case Oid.INTERVALD2S: return 6; case Oid.BPCHAR: case Oid.VARCHAR: return 10485760; case Oid.SUPER: return 4194304; case Oid.VARBYTE: case Oid.GEOGRAPHY: return 1000000; case Oid.BIT: case Oid.VARBIT: return 83886080; default: return 0; } } public boolean requiresQuoting(int oid) throws SQLException { int sqlType = getSQLType(oid); return requiresQuotingSqlType(sqlType); } /** * Returns true if particular sqlType requires quoting. * This method is used internally by the driver, so it might disappear without notice. * * @param sqlType sql type as in java.sql.Types * @return true if the type requires quoting * @throws SQLException if something goes wrong */ public boolean requiresQuotingSqlType(int sqlType) throws SQLException { switch (sqlType) { case Types.BIGINT: case Types.DOUBLE: case Types.FLOAT: case Types.INTEGER: case Types.REAL: case Types.SMALLINT: case Types.TINYINT: case Types.NUMERIC: case Types.DECIMAL: return false; } return true; } }
8,525
0
Create_ds/amazon-redshift-jdbc-driver/src/main/java/com/amazon/redshift
Create_ds/amazon-redshift-jdbc-driver/src/main/java/com/amazon/redshift/jdbc/AbstractBlobClob.java
/* * Copyright (c) 2005, PostgreSQL Global Development Group * See the LICENSE file in the project root for more information. */ package com.amazon.redshift.jdbc; import com.amazon.redshift.core.BaseConnection; import com.amazon.redshift.core.ServerVersion; import com.amazon.redshift.largeobject.LargeObject; import com.amazon.redshift.largeobject.LargeObjectManager; import com.amazon.redshift.util.GT; import com.amazon.redshift.util.RedshiftException; import com.amazon.redshift.util.RedshiftState; import java.io.InputStream; import java.io.OutputStream; import java.sql.Blob; import java.sql.SQLException; import java.util.ArrayList; /** * This class holds all of the methods common to both Blobs and Clobs. * * @author <a href="mailto:mike@middlesoft.co.uk">Michael Barker</a> */ public abstract class AbstractBlobClob { protected BaseConnection conn; private LargeObject currentLo; private boolean currentLoIsWriteable; private boolean support64bit; /** * We create separate LargeObjects for methods that use streams so they won't interfere with each * other. */ private ArrayList<LargeObject> subLOs; private final long oid; public AbstractBlobClob(BaseConnection conn, long oid) throws SQLException { this.conn = conn; this.oid = oid; this.currentLo = null; this.currentLoIsWriteable = false; support64bit = conn.haveMinimumServerVersion(90300); subLOs = new ArrayList<LargeObject>(); } public synchronized void free() throws SQLException { if (currentLo != null) { currentLo.close(); currentLo = null; currentLoIsWriteable = false; } for (LargeObject subLO : subLOs) { subLO.close(); } subLOs = null; } /** * For Blobs this should be in bytes while for Clobs it should be in characters. Since we really * haven't figured out how to handle character sets for Clobs the current implementation uses * bytes for both Blobs and Clobs. * * @param len maximum length * @throws SQLException if operation fails */ public synchronized void truncate(long len) throws SQLException { checkFreed(); if (!conn.haveMinimumServerVersion(ServerVersion.v8_3)) { throw new RedshiftException( GT.tr("Truncation of large objects is only implemented in 8.3 and later servers."), RedshiftState.NOT_IMPLEMENTED); } if (len < 0) { throw new RedshiftException(GT.tr("Cannot truncate LOB to a negative length."), RedshiftState.INVALID_PARAMETER_VALUE); } if (len > Integer.MAX_VALUE) { if (support64bit) { getLo(true).truncate64(len); } else { throw new RedshiftException(GT.tr("Redshift LOBs can only index to: {0}", Integer.MAX_VALUE), RedshiftState.INVALID_PARAMETER_VALUE); } } else { getLo(true).truncate((int) len); } } public synchronized long length() throws SQLException { checkFreed(); if (support64bit) { return getLo(false).size64(); } else { return getLo(false).size(); } } public synchronized byte[] getBytes(long pos, int length) throws SQLException { assertPosition(pos); getLo(false).seek((int) (pos - 1), LargeObject.SEEK_SET); return getLo(false).read(length); } public synchronized InputStream getBinaryStream() throws SQLException { checkFreed(); LargeObject subLO = getLo(false).copy(); addSubLO(subLO); subLO.seek(0, LargeObject.SEEK_SET); return subLO.getInputStream(); } public synchronized OutputStream setBinaryStream(long pos) throws SQLException { assertPosition(pos); LargeObject subLO = getLo(true).copy(); addSubLO(subLO); subLO.seek((int) (pos - 1)); return subLO.getOutputStream(); } /** * Iterate over the buffer looking for the specified pattern. * * @param pattern A pattern of bytes to search the blob for * @param start The position to start reading from * @return position of the specified pattern * @throws SQLException if something wrong happens */ public synchronized long position(byte[] pattern, long start) throws SQLException { assertPosition(start, pattern.length); int position = 1; int patternIdx = 0; long result = -1; int tmpPosition = 1; for (LOIterator i = new LOIterator(start - 1); i.hasNext(); position++) { byte b = i.next(); if (b == pattern[patternIdx]) { if (patternIdx == 0) { tmpPosition = position; } patternIdx++; if (patternIdx == pattern.length) { result = tmpPosition; break; } } else { patternIdx = 0; } } return result; } /** * Iterates over a large object returning byte values. Will buffer the data from the large object. */ private class LOIterator { private static final int BUFFER_SIZE = 8096; private byte[] buffer = new byte[BUFFER_SIZE]; private int idx = BUFFER_SIZE; private int numBytes = BUFFER_SIZE; LOIterator(long start) throws SQLException { getLo(false).seek((int) start); } public boolean hasNext() throws SQLException { boolean result; if (idx < numBytes) { result = true; } else { numBytes = getLo(false).read(buffer, 0, BUFFER_SIZE); idx = 0; result = (numBytes > 0); } return result; } private byte next() { return buffer[idx++]; } } /** * This is simply passing the byte value of the pattern Blob. * * @param pattern search pattern * @param start start position * @return position of given pattern * @throws SQLException if something goes wrong */ public synchronized long position(Blob pattern, long start) throws SQLException { return position(pattern.getBytes(1, (int) pattern.length()), start); } /** * Throws an exception if the pos value exceeds the max value by which the large object API can * index. * * @param pos Position to write at. * @throws SQLException if something goes wrong */ protected void assertPosition(long pos) throws SQLException { assertPosition(pos, 0); } /** * Throws an exception if the pos value exceeds the max value by which the large object API can * index. * * @param pos Position to write at. * @param len number of bytes to write. * @throws SQLException if something goes wrong */ protected void assertPosition(long pos, long len) throws SQLException { checkFreed(); if (pos < 1) { throw new RedshiftException(GT.tr("LOB positioning offsets start at 1."), RedshiftState.INVALID_PARAMETER_VALUE); } if (pos + len - 1 > Integer.MAX_VALUE) { throw new RedshiftException(GT.tr("Redshift LOBs can only index to: {0}", Integer.MAX_VALUE), RedshiftState.INVALID_PARAMETER_VALUE); } } /** * Checks that this LOB hasn't been free()d already. * * @throws SQLException if LOB has been freed. */ protected void checkFreed() throws SQLException { if (subLOs == null) { throw new RedshiftException(GT.tr("free() was called on this LOB previously"), RedshiftState.OBJECT_NOT_IN_STATE); } } protected synchronized LargeObject getLo(boolean forWrite) throws SQLException { if (this.currentLo != null) { if (forWrite && !currentLoIsWriteable) { // Reopen the stream in read-write, at the same pos. int currentPos = this.currentLo.tell(); LargeObjectManager lom = conn.getLargeObjectAPI(); LargeObject newLo = lom.open(oid, LargeObjectManager.READWRITE); this.subLOs.add(this.currentLo); this.currentLo = newLo; if (currentPos != 0) { this.currentLo.seek(currentPos); } } return this.currentLo; } LargeObjectManager lom = conn.getLargeObjectAPI(); currentLo = lom.open(oid, forWrite ? LargeObjectManager.READWRITE : LargeObjectManager.READ); currentLoIsWriteable = forWrite; return currentLo; } protected void addSubLO(LargeObject subLO) { subLOs.add(subLO); } }
8,526
0
Create_ds/amazon-redshift-jdbc-driver/src/main/java/com/amazon/redshift
Create_ds/amazon-redshift-jdbc-driver/src/main/java/com/amazon/redshift/jdbc/BooleanTypeUtil.java
/* * Copyright (c) 2017, PostgreSQL Global Development Group * See the LICENSE file in the project root for more information. */ package com.amazon.redshift.jdbc; import com.amazon.redshift.util.GT; import com.amazon.redshift.util.RedshiftException; import com.amazon.redshift.util.RedshiftState; /** * <p>Helper class to handle boolean type of Redshift.</p> * * <p>Based on values accepted by the Redshift server: * https://www.postgresql.org/docs/current/static/datatype-boolean.html</p> */ class BooleanTypeUtil { private BooleanTypeUtil() { } /** * Cast an Object value to the corresponding boolean value. * * @param in Object to cast into boolean * @return boolean value corresponding to the cast of the object * @throws RedshiftException RedshiftState.CANNOT_COERCE */ static boolean castToBoolean(final Object in) throws RedshiftException { if (in instanceof Boolean) { return (Boolean) in; } if (in instanceof String) { return fromString((String) in); } if (in instanceof Character) { return fromCharacter((Character) in); } if (in instanceof Number) { return fromNumber((Number) in); } throw new RedshiftException("Cannot cast to boolean", RedshiftState.CANNOT_COERCE); } private static boolean fromString(final String strval) throws RedshiftException { // Leading or trailing whitespace is ignored, and case does not matter. final String val = strval.trim(); if ("1".equals(val) || "1.0".equals(val) || "true".equalsIgnoreCase(val) || "t".equalsIgnoreCase(val) || "yes".equalsIgnoreCase(val) || "y".equalsIgnoreCase(val) || "on".equalsIgnoreCase(val)) { return true; } if ("0".equals(val) || "0.0".equals(val) || "false".equalsIgnoreCase(val) || "f".equalsIgnoreCase(val) || "no".equalsIgnoreCase(val) || "n".equalsIgnoreCase(val) || "off".equalsIgnoreCase(val)) { return false; } try { return (!val.equalsIgnoreCase("false") && !val.equals("0") && !val.equals("0.0") && !val.equalsIgnoreCase("f")); }catch(Exception ex) { throw cannotCoerceException(strval); } } private static boolean fromCharacter(final Character charval) throws RedshiftException { if ('1' == charval || 't' == charval || 'T' == charval || 'y' == charval || 'Y' == charval) { return true; } if ('0' == charval || 'f' == charval || 'F' == charval || 'n' == charval || 'N' == charval) { return false; } throw cannotCoerceException(charval); } private static boolean fromNumber(final Number numval) throws RedshiftException { // Handles BigDecimal, Byte, Short, Integer, Long Float, Double // based on the widening primitive conversions. final double value = numval.doubleValue(); if (value == 1.0d) { return true; } if (value == 0.0d) { return false; } try { String str = String.valueOf(numval); return fromString(str); } catch(Exception ex) { throw cannotCoerceException(numval); } } private static RedshiftException cannotCoerceException(final Object value) { return new RedshiftException(GT.tr("Cannot cast to boolean: \"{0}\"", String.valueOf(value)), RedshiftState.CANNOT_COERCE); } }
8,527
0
Create_ds/amazon-redshift-jdbc-driver/src/main/java/com/amazon/redshift
Create_ds/amazon-redshift-jdbc-driver/src/main/java/com/amazon/redshift/jdbc/PrimitiveArraySupport.java
/* * Copyright (c) 2004, PostgreSQL Global Development Group * See the LICENSE file in the project root for more information. */ package com.amazon.redshift.jdbc; import com.amazon.redshift.core.Oid; import com.amazon.redshift.core.TypeInfo; import com.amazon.redshift.util.ByteConverter; import java.sql.Connection; import java.sql.SQLFeatureNotSupportedException; import java.util.HashMap; import java.util.Map; abstract class PrimitiveArraySupport<A> { public abstract int getDefaultArrayTypeOid(TypeInfo tiCache); public abstract String toArrayString(char delim, A array); public abstract void appendArray(StringBuilder sb, char delim, A array); public boolean supportBinaryRepresentation() { return true; } public abstract byte[] toBinaryRepresentation(Connection connection, A array) throws SQLFeatureNotSupportedException; private static final PrimitiveArraySupport<long[]> LONG_ARRAY = new PrimitiveArraySupport<long[]>() { /** * {@inheritDoc} */ @Override public int getDefaultArrayTypeOid(TypeInfo tiCache) { return Oid.INT8_ARRAY; } @Override public String toArrayString(char delim, long[] array) { final StringBuilder sb = new StringBuilder(Math.max(64, array.length * 8)); appendArray(sb, delim, array); return sb.toString(); } /** * {@inheritDoc} */ @Override public void appendArray(StringBuilder sb, char delim, long[] array) { sb.append('{'); for (int i = 0; i < array.length; ++i) { if (i > 0) { sb.append(delim); } sb.append(array[i]); } sb.append('}'); } /** * {@inheritDoc} */ @Override public byte[] toBinaryRepresentation(Connection connection, long[] array) { int length = 20 + (12 * array.length); final byte[] bytes = new byte[length]; // 1 dimension ByteConverter.int4(bytes, 0, 1); // no null ByteConverter.int4(bytes, 4, 0); // oid ByteConverter.int4(bytes, 8, Oid.INT8); // length ByteConverter.int4(bytes, 12, array.length); int idx = 20; for (int i = 0; i < array.length; ++i) { bytes[idx + 3] = 8; ByteConverter.int8(bytes, idx + 4, array[i]); idx += 12; } return bytes; } }; private static final PrimitiveArraySupport<int[]> INT_ARRAY = new PrimitiveArraySupport<int[]>() { /** * {@inheritDoc} */ @Override public int getDefaultArrayTypeOid(TypeInfo tiCache) { return Oid.INT4_ARRAY; } @Override public String toArrayString(char delim, int[] array) { final StringBuilder sb = new StringBuilder(Math.max(32, array.length * 6)); appendArray(sb, delim, array); return sb.toString(); } /** * {@inheritDoc} */ @Override public void appendArray(StringBuilder sb, char delim, int[] array) { sb.append('{'); for (int i = 0; i < array.length; ++i) { if (i > 0) { sb.append(delim); } sb.append(array[i]); } sb.append('}'); } /** * {@inheritDoc} */ @Override public byte[] toBinaryRepresentation(Connection connection, int[] array) { int length = 20 + (8 * array.length); final byte[] bytes = new byte[length]; // 1 dimension ByteConverter.int4(bytes, 0, 1); // no null ByteConverter.int4(bytes, 4, 0); // oid ByteConverter.int4(bytes, 8, Oid.INT4); // length ByteConverter.int4(bytes, 12, array.length); int idx = 20; for (int i = 0; i < array.length; ++i) { bytes[idx + 3] = 4; ByteConverter.int4(bytes, idx + 4, array[i]); idx += 8; } return bytes; } }; private static final PrimitiveArraySupport<short[]> SHORT_ARRAY = new PrimitiveArraySupport<short[]>() { /** * {@inheritDoc} */ @Override public int getDefaultArrayTypeOid(TypeInfo tiCache) { return Oid.INT2_ARRAY; } @Override public String toArrayString(char delim, short[] array) { final StringBuilder sb = new StringBuilder(Math.max(32, array.length * 4)); appendArray(sb, delim, array); return sb.toString(); } /** * {@inheritDoc} */ @Override public void appendArray(StringBuilder sb, char delim, short[] array) { sb.append('{'); for (int i = 0; i < array.length; ++i) { if (i > 0) { sb.append(delim); } sb.append(array[i]); } sb.append('}'); } /** * {@inheritDoc} */ @Override public byte[] toBinaryRepresentation(Connection connection, short[] array) { int length = 20 + (6 * array.length); final byte[] bytes = new byte[length]; // 1 dimension ByteConverter.int4(bytes, 0, 1); // no null ByteConverter.int4(bytes, 4, 0); // oid ByteConverter.int4(bytes, 8, Oid.INT2); // length ByteConverter.int4(bytes, 12, array.length); int idx = 20; for (int i = 0; i < array.length; ++i) { bytes[idx + 3] = 2; ByteConverter.int2(bytes, idx + 4, array[i]); idx += 6; } return bytes; } }; private static final PrimitiveArraySupport<double[]> DOUBLE_ARRAY = new PrimitiveArraySupport<double[]>() { /** * {@inheritDoc} */ @Override public int getDefaultArrayTypeOid(TypeInfo tiCache) { return Oid.FLOAT8_ARRAY; } @Override public String toArrayString(char delim, double[] array) { final StringBuilder sb = new StringBuilder(Math.max(64, array.length * 8)); appendArray(sb, delim, array); return sb.toString(); } /** * {@inheritDoc} */ @Override public void appendArray(StringBuilder sb, char delim, double[] array) { sb.append('{'); for (int i = 0; i < array.length; ++i) { if (i > 0) { sb.append(delim); } // use quotes to account for any issues with scientific notation sb.append('"'); sb.append(array[i]); sb.append('"'); } sb.append('}'); } /** * {@inheritDoc} */ @Override public byte[] toBinaryRepresentation(Connection connection, double[] array) { int length = 20 + (12 * array.length); final byte[] bytes = new byte[length]; // 1 dimension ByteConverter.int4(bytes, 0, 1); // no null ByteConverter.int4(bytes, 4, 0); // oid ByteConverter.int4(bytes, 8, Oid.FLOAT8); // length ByteConverter.int4(bytes, 12, array.length); int idx = 20; for (int i = 0; i < array.length; ++i) { bytes[idx + 3] = 8; ByteConverter.float8(bytes, idx + 4, array[i]); idx += 12; } return bytes; } }; private static final PrimitiveArraySupport<float[]> FLOAT_ARRAY = new PrimitiveArraySupport<float[]>() { /** * {@inheritDoc} */ @Override public int getDefaultArrayTypeOid(TypeInfo tiCache) { return Oid.FLOAT4_ARRAY; } @Override public String toArrayString(char delim, float[] array) { final StringBuilder sb = new StringBuilder(Math.max(64, array.length * 8)); appendArray(sb, delim, array); return sb.toString(); } /** * {@inheritDoc} */ @Override public void appendArray(StringBuilder sb, char delim, float[] array) { sb.append('{'); for (int i = 0; i < array.length; ++i) { if (i > 0) { sb.append(delim); } // use quotes to account for any issues with scientific notation sb.append('"'); sb.append(array[i]); sb.append('"'); } sb.append('}'); } /** * {@inheritDoc} */ @Override public byte[] toBinaryRepresentation(Connection connection, float[] array) { int length = 20 + (8 * array.length); final byte[] bytes = new byte[length]; // 1 dimension ByteConverter.int4(bytes, 0, 1); // no null ByteConverter.int4(bytes, 4, 0); // oid ByteConverter.int4(bytes, 8, Oid.FLOAT4); // length ByteConverter.int4(bytes, 12, array.length); int idx = 20; for (int i = 0; i < array.length; ++i) { bytes[idx + 3] = 4; ByteConverter.float4(bytes, idx + 4, array[i]); idx += 8; } return bytes; } }; private static final PrimitiveArraySupport<boolean[]> BOOLEAN_ARRAY = new PrimitiveArraySupport<boolean[]>() { /** * {@inheritDoc} */ @Override public int getDefaultArrayTypeOid(TypeInfo tiCache) { return Oid.BOOL_ARRAY; } @Override public String toArrayString(char delim, boolean[] array) { final StringBuilder sb = new StringBuilder(Math.max(64, array.length * 8)); appendArray(sb, delim, array); return sb.toString(); } /** * {@inheritDoc} */ @Override public void appendArray(StringBuilder sb, char delim, boolean[] array) { sb.append('{'); for (int i = 0; i < array.length; ++i) { if (i > 0) { sb.append(delim); } sb.append(array[i] ? '1' : '0'); } sb.append('}'); } /** * {@inheritDoc} * * @throws SQLFeatureNotSupportedException * Because this feature is not supported. */ @Override public byte[] toBinaryRepresentation(Connection connection, boolean[] array) throws SQLFeatureNotSupportedException { int length = 20 + (5 * array.length); final byte[] bytes = new byte[length]; // 1 dimension ByteConverter.int4(bytes, 0, 1); // no null ByteConverter.int4(bytes, 4, 0); // oid ByteConverter.int4(bytes, 8, Oid.BOOL); // length ByteConverter.int4(bytes, 12, array.length); int idx = 20; for (int i = 0; i < array.length; ++i) { bytes[idx + 3] = 1; ByteConverter.bool(bytes, idx + 4, array[i]); idx += 5; } return bytes; } }; private static final PrimitiveArraySupport<String[]> STRING_ARRAY = new PrimitiveArraySupport<String[]>() { /** * {@inheritDoc} */ @Override public int getDefaultArrayTypeOid(TypeInfo tiCache) { return Oid.VARCHAR_ARRAY; } @Override public String toArrayString(char delim, String[] array) { final StringBuilder sb = new StringBuilder(Math.max(64, array.length * 8)); appendArray(sb, delim, array); return sb.toString(); } /** * {@inheritDoc} */ @Override public void appendArray(StringBuilder sb, char delim, String[] array) { sb.append('{'); for (int i = 0; i < array.length; ++i) { if (i > 0) { sb.append(delim); } if (array[i] == null) { sb.append('N'); sb.append('U'); sb.append('L'); sb.append('L'); } else { RedshiftArray.escapeArrayElement(sb, array[i]); } } sb.append('}'); } /** * {@inheritDoc} */ @Override public boolean supportBinaryRepresentation() { return false; } /** * {@inheritDoc} * * @throws SQLFeatureNotSupportedException * Because this feature is not supported. */ @Override public byte[] toBinaryRepresentation(Connection connection, String[] array) throws SQLFeatureNotSupportedException { throw new SQLFeatureNotSupportedException(); } }; private static final Map<Class, PrimitiveArraySupport> ARRAY_CLASS_TO_SUPPORT = new HashMap<Class, PrimitiveArraySupport>((int) (7 / .75) + 1); static { ARRAY_CLASS_TO_SUPPORT.put(long[].class, LONG_ARRAY); ARRAY_CLASS_TO_SUPPORT.put(int[].class, INT_ARRAY); ARRAY_CLASS_TO_SUPPORT.put(short[].class, SHORT_ARRAY); ARRAY_CLASS_TO_SUPPORT.put(double[].class, DOUBLE_ARRAY); ARRAY_CLASS_TO_SUPPORT.put(float[].class, FLOAT_ARRAY); ARRAY_CLASS_TO_SUPPORT.put(boolean[].class, BOOLEAN_ARRAY); ARRAY_CLASS_TO_SUPPORT.put(String[].class, STRING_ARRAY); } public static boolean isSupportedPrimitiveArray(Object obj) { return obj != null && ARRAY_CLASS_TO_SUPPORT.containsKey(obj.getClass()); } public static <A> PrimitiveArraySupport<A> getArraySupport(A array) { return ARRAY_CLASS_TO_SUPPORT.get(array.getClass()); } }
8,528
0
Create_ds/amazon-redshift-jdbc-driver/src/main/java/com/amazon/redshift
Create_ds/amazon-redshift-jdbc-driver/src/main/java/com/amazon/redshift/jdbc/RedshiftStatementImpl.java
/* * Copyright (c) 2004, PostgreSQL Global Development Group * See the LICENSE file in the project root for more information. */ package com.amazon.redshift.jdbc; import com.amazon.redshift.Driver; import com.amazon.redshift.core.BaseConnection; import com.amazon.redshift.core.BaseStatement; import com.amazon.redshift.core.CachedQuery; import com.amazon.redshift.core.Field; import com.amazon.redshift.core.ParameterList; import com.amazon.redshift.core.Query; import com.amazon.redshift.core.QueryExecutor; import com.amazon.redshift.core.ResultCursor; import com.amazon.redshift.core.ResultHandlerBase; import com.amazon.redshift.core.SqlCommand; import com.amazon.redshift.core.Tuple; import com.amazon.redshift.core.v3.MessageLoopState; import com.amazon.redshift.core.v3.RedshiftRowsBlockingQueue; import com.amazon.redshift.logger.RedshiftLogger; import com.amazon.redshift.util.GT; import com.amazon.redshift.util.QuerySanitizer; import com.amazon.redshift.util.RedshiftException; import com.amazon.redshift.util.RedshiftState; import java.sql.Connection; import java.sql.ResultSet; import java.sql.SQLException; import java.sql.SQLWarning; import java.sql.Statement; import java.util.ArrayList; import java.util.List; import java.util.TimerTask; import java.util.concurrent.atomic.AtomicIntegerFieldUpdater; import java.util.concurrent.atomic.AtomicReferenceFieldUpdater; public class RedshiftStatementImpl implements Statement, BaseStatement { private static final String[] NO_RETURNING_COLUMNS = new String[0]; /** * Default state for use or not binary transfers. Can use only for testing purposes */ private static final boolean DEFAULT_FORCE_BINARY_TRANSFERS = Boolean.getBoolean("com.amazon.redshift.forceBinary"); // only for testing purposes. even single shot statements will use binary transfers private boolean forceBinaryTransfers = DEFAULT_FORCE_BINARY_TRANSFERS; protected ArrayList<Query> batchStatements = null; protected ArrayList<ParameterList> batchParameters = null; protected final int resultsettype; // the resultset type to return (ResultSet.TYPE_xxx) protected final int concurrency; // is it updateable or not? (ResultSet.CONCUR_xxx) private final int rsHoldability; private boolean poolable; private boolean closeOnCompletion = false; protected int fetchdirection = ResultSet.FETCH_FORWARD; protected int autoGeneratedKeys = Statement.NO_GENERATED_KEYS; // fetch direction hint (currently ignored) /** * Protects current statement from cancelTask starting, waiting for a bit, and waking up exactly * on subsequent query execution. The idea is to atomically compare and swap the reference to the * task, so the task can detect that statement executes different query than the one the * cancelTask was created. Note: the field must be set/get/compareAndSet via * {@link #CANCEL_TIMER_UPDATER} as per {@link AtomicReferenceFieldUpdater} javadoc. */ private volatile TimerTask cancelTimerTask = null; private static final AtomicReferenceFieldUpdater<RedshiftStatementImpl, TimerTask> CANCEL_TIMER_UPDATER = AtomicReferenceFieldUpdater.newUpdater(RedshiftStatementImpl.class, TimerTask.class, "cancelTimerTask"); /** * Protects statement from out-of-order cancels. It protects from both * {@link #setQueryTimeout(int)} and {@link #cancel()} induced ones. * * {@link #execute(String)} and friends change the field to * {@link StatementCancelState#IN_QUERY} during execute. {@link #cancel()} * ignores cancel request if state is {@link StatementCancelState#IDLE}. * In case {@link #execute(String)} observes non-{@link StatementCancelState#IDLE} state as it * completes the query, it waits till {@link StatementCancelState#CANCELLED}. Note: the field must be * set/get/compareAndSet via {@link #STATE_UPDATER} as per {@link AtomicIntegerFieldUpdater} * javadoc. */ private volatile StatementCancelState statementState = StatementCancelState.IDLE; private static final AtomicReferenceFieldUpdater<RedshiftStatementImpl, StatementCancelState> STATE_UPDATER = AtomicReferenceFieldUpdater.newUpdater(RedshiftStatementImpl.class, StatementCancelState.class, "statementState"); /** * Does the caller of execute/executeUpdate want generated keys for this execution? This is set by * Statement methods that have generated keys arguments and cleared after execution is complete. */ protected boolean wantsGeneratedKeysOnce = false; /** * Was this PreparedStatement created to return generated keys for every execution? This is set at * creation time and never cleared by execution. */ public boolean wantsGeneratedKeysAlways = false; // The connection who created us protected final BaseConnection connection; /** * The warnings chain. */ protected volatile RedshiftWarningWrapper warnings = null; /** * Maximum number of rows to return, 0 = unlimited. */ protected int maxrows = 0; /** * Number of rows to get in a batch. */ protected int fetchSize = 0; /** * Timeout (in milliseconds) for a query. */ protected long timeout = 0; protected boolean replaceProcessingEnabled = true; /** * The current results. */ protected ResultWrapper result = null; /** * The first unclosed result. */ protected volatile ResultWrapper firstUnclosedResult = null; /** * Results returned by a statement that wants generated keys. */ protected ResultWrapper generatedKeys = null; protected int mPrepareThreshold; // Reuse threshold to enable use of PREPARE protected int maxFieldSize = 0; RedshiftStatementImpl(RedshiftConnectionImpl c, int rsType, int rsConcurrency, int rsHoldability) throws SQLException { this.connection = c; forceBinaryTransfers |= c.getForceBinary(); resultsettype = rsType; concurrency = rsConcurrency; setFetchSize(c.getDefaultFetchSize()); setPrepareThreshold(c.getPrepareThreshold()); this.rsHoldability = rsHoldability; } public ResultSet createResultSet(Query originalQuery, Field[] fields, List<Tuple> tuples, ResultCursor cursor, RedshiftRowsBlockingQueue<Tuple> queueTuples, int[] rowCount, Thread ringBufferThread) throws SQLException { RedshiftResultSet newResult = new RedshiftResultSet(originalQuery, this, fields, tuples, cursor, getMaxRows(), getMaxFieldSize(), getResultSetType(), getResultSetConcurrency(), getResultSetHoldability(), queueTuples, rowCount, ringBufferThread); newResult.setFetchSize(getFetchSize()); newResult.setFetchDirection(getFetchDirection()); return newResult; } public BaseConnection getRedshiftConnection() { return connection; } public String getFetchingCursorName() { return null; } public int getFetchSize() { return fetchSize; } protected boolean wantsScrollableResultSet() { return resultsettype != ResultSet.TYPE_FORWARD_ONLY; } protected boolean wantsHoldableResultSet() { // FIXME: false if not supported return rsHoldability == ResultSet.HOLD_CURSORS_OVER_COMMIT; } /** * Internal use only. * * @param oldState old state of the statement * @param newState new state of the statement */ public void updateStatementCancleState(StatementCancelState oldState, StatementCancelState newState) { STATE_UPDATER.compareAndSet(this, oldState, newState); } /** * ResultHandler implementations for updates, queries, and either-or. */ public class StatementResultHandler extends ResultHandlerBase { private ResultWrapper results; private ResultWrapper lastResult; private Statement stmt; public StatementResultHandler(Statement stmt) { this.stmt = stmt; } @Override public void setStatementStateIdleFromInQuery() { ((RedshiftStatementImpl)stmt).updateStatementCancleState(StatementCancelState.IN_QUERY, StatementCancelState.IDLE); } @Override public void setStatementStateInQueryFromIdle() { ((RedshiftStatementImpl)stmt).updateStatementCancleState(StatementCancelState.IDLE, StatementCancelState.IN_QUERY); } @Override public boolean wantsScrollableResultSet() { return RedshiftStatementImpl.this.wantsScrollableResultSet(); } ResultWrapper getResults() { return results; } private void append(ResultWrapper newResult) { if (results == null) { lastResult = results = newResult; } else { lastResult.append(newResult); } } @Override public void handleResultRows(Query fromQuery, Field[] fields, List<Tuple> tuples, ResultCursor cursor, RedshiftRowsBlockingQueue<Tuple> queueTuples, int[] rowCount, Thread ringBufferThread) { try { ResultSet rs = RedshiftStatementImpl.this.createResultSet(fromQuery, fields, tuples, cursor, queueTuples, rowCount, ringBufferThread); append(new ResultWrapper(rs)); } catch (SQLException e) { handleError(e); } } @Override public void handleCommandStatus(String status, long updateCount, long insertOID) { append(new ResultWrapper(updateCount, insertOID)); } @Override public void handleWarning(SQLWarning warning) { RedshiftStatementImpl.this.addWarning(warning); } } @Override public ResultSet executeQuery(String sql) throws SQLException { if (RedshiftLogger.isEnable()) connection.getLogger().logFunction(true, sql); if (!executeWithFlags(sql, 0)) { throw new RedshiftException(GT.tr("No results were returned by the query."), RedshiftState.NO_DATA); } ResultSet rs = getSingleResultSet(); if (RedshiftLogger.isEnable()) connection.getLogger().logFunction(false, rs); return rs; } protected ResultSet getSingleResultSet() throws SQLException { synchronized (this) { checkClosed(); if (result.getNext() != null) { throw new RedshiftException(GT.tr("Multiple ResultSets were returned by the query."), RedshiftState.TOO_MANY_RESULTS); } return result.getResultSet(); } } @Override public int executeUpdate(String sql) throws SQLException { if (RedshiftLogger.isEnable()) connection.getLogger().logFunction(true, sql); executeWithFlags(sql, QueryExecutor.QUERY_NO_RESULTS); checkNoResultUpdate(); int rc = getUpdateCount(); if (RedshiftLogger.isEnable()) connection.getLogger().logFunction(false, rc); return rc; } protected final void checkNoResultUpdate() throws SQLException { synchronized (this) { checkClosed(); if (this.autoGeneratedKeys == Statement.NO_GENERATED_KEYS) { ResultWrapper iter = result; while (iter != null) { if (iter.getResultSet() != null) { throw new RedshiftException(GT.tr("A result was returned when none was expected."), RedshiftState.TOO_MANY_RESULTS); } iter = iter.getNext(); } } } // Synchronized } @Override public boolean execute(String sql) throws SQLException { if (RedshiftLogger.isEnable()) connection.getLogger().logFunction(true, QuerySanitizer.filterCredentials(sql)); boolean rc = executeWithFlags(sql, 0); if (RedshiftLogger.isEnable()) connection.getLogger().logFunction(false); return rc; } @Override public boolean executeWithFlags(String sql, int flags) throws SQLException { return executeCachedSql(sql, flags, NO_RETURNING_COLUMNS); } private boolean executeCachedSql(String sql, int flags, String[] columnNames) throws SQLException { PreferQueryMode preferQueryMode = connection.getPreferQueryMode(); // Simple statements should not replace ?, ? with $1, $2 boolean shouldUseParameterized = false; QueryExecutor queryExecutor = connection.getQueryExecutor(); Object key = queryExecutor .createQueryKey(sql, replaceProcessingEnabled, shouldUseParameterized, columnNames); CachedQuery cachedQuery; boolean shouldCache = preferQueryMode == PreferQueryMode.EXTENDED_CACHE_EVERYTHING; if (shouldCache) { cachedQuery = queryExecutor.borrowQueryByKey(key); } else { cachedQuery = queryExecutor.createQueryByKey(key); } if (wantsGeneratedKeysOnce) { SqlCommand sqlCommand = cachedQuery.query.getSqlCommand(); wantsGeneratedKeysOnce = sqlCommand != null && sqlCommand.isReturningKeywordPresent(); } boolean res; try { res = executeWithFlags(cachedQuery, flags); } finally { if (shouldCache) { queryExecutor.releaseQuery(cachedQuery); } } return res; } public boolean executeWithFlags(CachedQuery simpleQuery, int flags) throws SQLException { checkClosed(); if (connection.getPreferQueryMode().compareTo(PreferQueryMode.EXTENDED) < 0) { flags |= QueryExecutor.QUERY_EXECUTE_AS_SIMPLE; } execute(simpleQuery, null, flags); synchronized (this) { checkClosed(); return (result != null && result.getResultSet() != null); } } public boolean executeWithFlags(int flags) throws SQLException { checkClosed(); throw new RedshiftException(GT.tr("Can''t use executeWithFlags(int) on a Statement."), RedshiftState.WRONG_OBJECT_TYPE); } protected void closeForNextExecution() throws SQLException { // Every statement execution clears any previous warnings. clearWarnings(); // Close any existing resultsets associated with this statement. synchronized (this) { while (firstUnclosedResult != null) { RedshiftResultSet rs = (RedshiftResultSet)firstUnclosedResult.getResultSet(); if (rs != null) { rs.closeInternally(); } firstUnclosedResult = firstUnclosedResult.getNext(); } result = null; if (generatedKeys != null) { if (generatedKeys.getResultSet() != null) { generatedKeys.getResultSet().close(); } generatedKeys = null; } } } /** * Returns true if query is unlikely to be reused. * * @param cachedQuery to check (null if current query) * @return true if query is unlikely to be reused */ protected boolean isOneShotQuery(CachedQuery cachedQuery) { if (cachedQuery == null) { return true; } cachedQuery.increaseExecuteCount(); if ((mPrepareThreshold == 0 || cachedQuery.getExecuteCount() < mPrepareThreshold) && !getForceBinaryTransfer()) { return true; } return false; } protected final void execute(CachedQuery cachedQuery, ParameterList queryParameters, int flags) throws SQLException { try { executeInternal(cachedQuery, queryParameters, flags); } catch (SQLException e) { // Don't retry composite queries as it might get partially executed if (cachedQuery.query.getSubqueries() != null || !connection.getQueryExecutor().willHealOnRetry(e)) { throw e; } cachedQuery.query.close(); // Execute the query one more time executeInternal(cachedQuery, queryParameters, flags); } } private void executeInternal(CachedQuery cachedQuery, ParameterList queryParameters, int flags) throws SQLException { closeForNextExecution(); // Enable cursor-based resultset if possible. if (fetchSize > 0 && !wantsScrollableResultSet() && !connection.getAutoCommit() && !wantsHoldableResultSet()) { flags |= QueryExecutor.QUERY_FORWARD_CURSOR; } if (wantsGeneratedKeysOnce || wantsGeneratedKeysAlways) { flags |= QueryExecutor.QUERY_BOTH_ROWS_AND_STATUS; // If the no results flag is set (from executeUpdate) // clear it so we get the generated keys results. // if ((flags & QueryExecutor.QUERY_NO_RESULTS) != 0) { flags &= ~(QueryExecutor.QUERY_NO_RESULTS); } } if (isOneShotQuery(cachedQuery)) { flags |= QueryExecutor.QUERY_ONESHOT; } // Only use named statements after we hit the threshold. Note that only // named statements can be transferred in binary format. if (connection.getAutoCommit()) { flags |= QueryExecutor.QUERY_SUPPRESS_BEGIN; } if (connection.hintReadOnly()) { flags |= QueryExecutor.QUERY_READ_ONLY_HINT; } // updateable result sets do not yet support binary updates if (concurrency != ResultSet.CONCUR_READ_ONLY) { flags |= QueryExecutor.QUERY_NO_BINARY_TRANSFER; } Query queryToExecute = cachedQuery.query; if (queryToExecute.isEmpty()) { flags |= QueryExecutor.QUERY_SUPPRESS_BEGIN; } if (!queryToExecute.isStatementDescribed() && forceBinaryTransfers && (flags & QueryExecutor.QUERY_EXECUTE_AS_SIMPLE) == 0) { // Simple 'Q' execution does not need to know parameter types // When binaryTransfer is forced, then we need to know resulting parameter and column types, // thus sending a describe request. int flags2 = flags | QueryExecutor.QUERY_DESCRIBE_ONLY; StatementResultHandler handler2 = new StatementResultHandler(this); connection.getQueryExecutor().execute(queryToExecute, queryParameters, handler2, 0, 0, flags2); ResultWrapper result2 = handler2.getResults(); if (result2 != null) { result2.getResultSet().close(); } } StatementResultHandler handler = new StatementResultHandler(this); synchronized (this) { result = null; } try { startTimer(); connection.getQueryExecutor().execute(queryToExecute, queryParameters, handler, maxrows, fetchSize, flags); } finally { killTimerTask(connection.getQueryExecutor().isRingBufferThreadRunning()); } synchronized (this) { checkClosed(); result = firstUnclosedResult = handler.getResults(); if (wantsGeneratedKeysOnce || wantsGeneratedKeysAlways) { if (result.getNext() != null) { generatedKeys = result; result = result.getNext(); } else { // PreparedStatement set Statement.RETURN_GENERATED_KEYS, but no RETURNING clause in sql generatedKeys = null; } if (wantsGeneratedKeysOnce) { wantsGeneratedKeysOnce = false; } } } } public void setCursorName(String name) throws SQLException { checkClosed(); // No-op. } private volatile boolean isClosed = false; @Override public int getUpdateCount() throws SQLException { synchronized (this) { checkClosed(); if (result == null || result.getResultSet() != null) { return -1; } long count = result.getUpdateCount(); return count > Integer.MAX_VALUE ? Statement.SUCCESS_NO_INFO : (int) count; } } public boolean getMoreResults() throws SQLException { synchronized (this) { checkClosed(); if (result == null) { return false; } result = result.getNext(); // Close preceding resultsets. while (firstUnclosedResult != result) { if (firstUnclosedResult.getResultSet() != null) { firstUnclosedResult.getResultSet().close(); } firstUnclosedResult = firstUnclosedResult.getNext(); } return (result != null && result.getResultSet() != null); } } public int getMaxRows() throws SQLException { checkClosed(); return maxrows; } public void setMaxRows(int max) throws SQLException { if (RedshiftLogger.isEnable()) connection.getLogger().logFunction(true, max); checkClosed(); if (max < 0) { throw new RedshiftException( GT.tr("Maximum number of rows must be a value grater than or equal to 0."), RedshiftState.INVALID_PARAMETER_VALUE); } maxrows = max; } public void setEscapeProcessing(boolean enable) throws SQLException { if (RedshiftLogger.isEnable()) connection.getLogger().logFunction(true, enable); checkClosed(); replaceProcessingEnabled = enable; if (RedshiftLogger.isEnable()) connection.getLogger().logFunction(false); } public int getQueryTimeout() throws SQLException { checkClosed(); long seconds = timeout / 1000; if (seconds >= Integer.MAX_VALUE) { return Integer.MAX_VALUE; } return (int) seconds; } public void setQueryTimeout(int seconds) throws SQLException { if (RedshiftLogger.isEnable()) connection.getLogger().logFunction(true, seconds); setQueryTimeoutMs(seconds * 1000L); } /** * The queryTimeout limit is the number of milliseconds the driver will wait for a Statement to * execute. If the limit is exceeded, a SQLException is thrown. * * @return the current query timeout limit in milliseconds; 0 = unlimited * @throws SQLException if a database access error occurs */ public long getQueryTimeoutMs() throws SQLException { checkClosed(); return timeout; } /** * Sets the queryTimeout limit. * * @param millis - the new query timeout limit in milliseconds * @throws SQLException if a database access error occurs */ public void setQueryTimeoutMs(long millis) throws SQLException { checkClosed(); if (millis < 0) { throw new RedshiftException(GT.tr("Query timeout must be a value greater than or equals to 0."), RedshiftState.INVALID_PARAMETER_VALUE); } timeout = millis; } /** * <p>Either initializes new warning wrapper, or adds warning onto the chain.</p> * * <p>Although warnings are expected to be added sequentially, the warnings chain may be cleared * concurrently at any time via {@link #clearWarnings()}, therefore it is possible that a warning * added via this method is placed onto the end of the previous warning chain</p> * * @param warn warning to add */ public void addWarning(SQLWarning warn) { //copy reference to avoid NPE from concurrent modification of this.warnings final RedshiftWarningWrapper warnWrap = this.warnings; if (warnWrap == null) { this.warnings = new RedshiftWarningWrapper(warn); } else { warnWrap.addWarning(warn); } } public SQLWarning getWarnings() throws SQLException { checkClosed(); //copy reference to avoid NPE from concurrent modification of this.warnings final RedshiftWarningWrapper warnWrap = this.warnings; return warnWrap != null ? warnWrap.getFirstWarning() : null; } @Override public int getMaxFieldSize() throws SQLException { return maxFieldSize; } @Override public void setMaxFieldSize(int max) throws SQLException { if (RedshiftLogger.isEnable()) connection.getLogger().logFunction(true, max); checkClosed(); if (max < 0) { throw new RedshiftException( GT.tr("The maximum field size must be a value greater than or equal to 0."), RedshiftState.INVALID_PARAMETER_VALUE); } maxFieldSize = max; } /** * <p>Clears the warning chain.</p> * <p>Note that while it is safe to clear warnings while the query is executing, warnings that are * added between calls to {@link #getWarnings()} and #clearWarnings() may be missed. * Therefore you should hold a reference to the tail of the previous warning chain * and verify if its {@link SQLWarning#getNextWarning()} value is holds any new value.</p> */ public void clearWarnings() throws SQLException { warnings = null; } public ResultSet getResultSet() throws SQLException { synchronized (this) { checkClosed(); if (result == null) { return null; } return result.getResultSet(); } } /** * <B>Note:</B> even though {@code Statement} is automatically closed when it is garbage * collected, it is better to close it explicitly to lower resource consumption. * * {@inheritDoc} */ public final void close() throws SQLException { if (RedshiftLogger.isEnable()) connection.getLogger().logFunction(true); // closing an already closed Statement is a no-op. synchronized (this) { if (isClosed) { if (RedshiftLogger.isEnable()) connection.getLogger().logFunction(false); return; } isClosed = true; } // The user has closed the statement and does not want to read any more results. We send a cancellation request to // the server. It is possible to have a situation where if the driver's ring buffer is not // reading from the stream and the server's send buffer is full. The server treats this as a client side hang and // closes the connection. To avoid this, we start reading from the stream again, but we throw away the rows we read // because the user no longer needs them. Note, this is a temporary fix until a permanent server-side fix can be // provided. resumeReadAndDiscardResults(); cancel(); closeForNextExecution(); closeImpl(); if (RedshiftLogger.isEnable()) { connection.getLogger().logFunction(false); connection.getLogger().flush(); } } /** * This is guaranteed to be called exactly once even in case of concurrent {@link #close()} calls. * @throws SQLException in case of error */ protected void closeImpl() throws SQLException { connection.getQueryExecutor().closeStatementAndPortal(); } /* * * The following methods are postgres extensions and are defined in the interface BaseStatement * */ public long getLastOID() throws SQLException { synchronized (this) { checkClosed(); if (result == null) { return 0; } return result.getInsertOID(); } } @Override public void setPrepareThreshold(int newThreshold) throws SQLException { checkClosed(); if (newThreshold < 0) { forceBinaryTransfers = true; newThreshold = 1; } this.mPrepareThreshold = newThreshold; } @Override public int getPrepareThreshold() { return mPrepareThreshold; } @Override public void setUseServerPrepare(boolean flag) throws SQLException { setPrepareThreshold(flag ? 1 : 0); } void setAutoGeneratedKeys(int autoGeneratedKeys) { this.autoGeneratedKeys = autoGeneratedKeys; } int getAutoGeneratedKeys() { return this.autoGeneratedKeys; } @Override public boolean isUseServerPrepare() { return false; } protected void checkClosed() throws SQLException { if (isClosed()) { throw new RedshiftException(GT.tr("This statement has been closed."), RedshiftState.OBJECT_NOT_IN_STATE); } } // ** JDBC 2 Extensions ** @Override public void addBatch(String sql) throws SQLException { if (RedshiftLogger.isEnable()) connection.getLogger().logFunction(true, sql); checkClosed(); if (batchStatements == null) { batchStatements = new ArrayList<Query>(); batchParameters = new ArrayList<ParameterList>(); } // Simple statements should not replace ?, ? with $1, $2 boolean shouldUseParameterized = false; CachedQuery cachedQuery = connection.createQuery(sql, replaceProcessingEnabled, shouldUseParameterized); batchStatements.add(cachedQuery.query); batchParameters.add(null); if (RedshiftLogger.isEnable()) connection.getLogger().logFunction(false); } @Override public void clearBatch() throws SQLException { if (RedshiftLogger.isEnable()) connection.getLogger().logFunction(true); checkClosed(); if (batchStatements != null) { batchStatements.clear(); batchParameters.clear(); } if (RedshiftLogger.isEnable()) connection.getLogger().logFunction(false); } protected BatchResultHandler createBatchHandler(Query[] queries, ParameterList[] parameterLists) { return new BatchResultHandler(this, queries, parameterLists, wantsGeneratedKeysAlways); } private BatchResultHandler internalExecuteBatch() throws SQLException { // Construct query/parameter arrays. transformQueriesAndParameters(); // Empty arrays should be passed to toArray // see http://shipilev.net/blog/2016/arrays-wisdom-ancients/ Query[] queries = batchStatements.toArray(new Query[0]); ParameterList[] parameterLists = batchParameters.toArray(new ParameterList[0]); batchStatements.clear(); batchParameters.clear(); int flags; // Force a Describe before any execution? We need to do this if we're going // to send anything dependent on the Describe results, e.g. binary parameters. boolean preDescribe = false; if (wantsGeneratedKeysAlways) { /* * This batch will return generated keys, tell the executor to expect result rows. We also * force a Describe later so we know the size of the results to expect. * * If the parameter type(s) change between batch entries and the default binary-mode changes * we might get mixed binary and text in a single result set column, which we cannot handle. * To prevent this, disable binary transfer mode in batches that return generated keys. See * GitHub issue #267 */ flags = QueryExecutor.QUERY_BOTH_ROWS_AND_STATUS | QueryExecutor.QUERY_NO_BINARY_TRANSFER; } else { // If a batch hasn't specified that it wants generated keys, using the appropriate // Connection.createStatement(...) interfaces, disallow any result set. flags = QueryExecutor.QUERY_NO_RESULTS; } PreferQueryMode preferQueryMode = connection.getPreferQueryMode(); if (preferQueryMode == PreferQueryMode.SIMPLE || (preferQueryMode == PreferQueryMode.EXTENDED_FOR_PREPARED && parameterLists[0] == null)) { flags |= QueryExecutor.QUERY_EXECUTE_AS_SIMPLE; } boolean sameQueryAhead = queries.length > 1 && queries[0] == queries[1]; if (!sameQueryAhead // If executing the same query twice in a batch, make sure the statement // is server-prepared. In other words, "oneshot" only if the query is one in the batch // or the queries are different || isOneShotQuery(null)) { flags |= QueryExecutor.QUERY_ONESHOT; } else { // If a batch requests generated keys and isn't already described, // force a Describe of the query before proceeding. That way we can // determine the appropriate size of each batch by estimating the // maximum data returned. Without that, we don't know how many queries // we'll be able to queue up before we risk a deadlock. // (see v3.QueryExecutorImpl's MAX_BUFFERED_RECV_BYTES) // SameQueryAhead is just a quick way to issue pre-describe for batch execution // TODO: It should be reworked into "pre-describe if query has unknown parameter // types and same query is ahead". preDescribe = (wantsGeneratedKeysAlways || sameQueryAhead) && !queries[0].isStatementDescribed(); /* * It's also necessary to force a Describe on the first execution of the new statement, even * though we already described it, to work around bug #267. */ flags |= QueryExecutor.QUERY_FORCE_DESCRIBE_PORTAL; } if (connection.getAutoCommit()) { flags |= QueryExecutor.QUERY_SUPPRESS_BEGIN; } if (connection.hintReadOnly()) { flags |= QueryExecutor.QUERY_READ_ONLY_HINT; } BatchResultHandler handler; handler = createBatchHandler(queries, parameterLists); if ((preDescribe || forceBinaryTransfers) && (flags & QueryExecutor.QUERY_EXECUTE_AS_SIMPLE) == 0) { // Do a client-server round trip, parsing and describing the query so we // can determine its result types for use in binary parameters, batch sizing, // etc. int flags2 = flags | QueryExecutor.QUERY_DESCRIBE_ONLY; StatementResultHandler handler2 = new StatementResultHandler(this); try { connection.getQueryExecutor().execute(queries[0], parameterLists[0], handler2, 0, 0, flags2); } catch (SQLException e) { // Unable to parse the first statement -> throw BatchUpdateException handler.handleError(e); handler.handleCompletion(); // Will not reach here (see above) } ResultWrapper result2 = handler2.getResults(); if (result2 != null) { result2.getResultSet().close(); } } synchronized (this) { result = null; } try { startTimer(); connection.getQueryExecutor().execute(queries, parameterLists, handler, maxrows, fetchSize, flags); } finally { killTimerTask(connection.getQueryExecutor().isRingBufferThreadRunning()); // There might be some rows generated even in case of failures synchronized (this) { checkClosed(); if (wantsGeneratedKeysAlways) { generatedKeys = new ResultWrapper(handler.getGeneratedKeys()); } } } return handler; } public int[] executeBatch() throws SQLException { int[] rc; if (RedshiftLogger.isEnable()) connection.getLogger().logFunction(true); checkClosed(); closeForNextExecution(); if (batchStatements == null || batchStatements.isEmpty()) { rc = new int[0]; } else rc = internalExecuteBatch().getUpdateCount(); if (RedshiftLogger.isEnable()) connection.getLogger().logFunction(false, rc); return rc; } public void cancel() throws SQLException { if (RedshiftLogger.isEnable()) connection.getLogger().logFunction(true); if (statementState == StatementCancelState.IDLE) { if (RedshiftLogger.isEnable()) { connection.getLogger().logError("statementState is StatementCancelState.IDLE"); connection.getLogger().logFunction(false); } return; } if (!STATE_UPDATER.compareAndSet(this, StatementCancelState.IN_QUERY, StatementCancelState.CANCELING)) { // Not in query, there's nothing to cancel if (RedshiftLogger.isEnable()) { connection.getLogger().logError("statementState is not StatementCancelState.IN_QUERY"); connection.getLogger().logFunction(false); } return; } // Synchronize on connection to avoid spinning in killTimerTask synchronized (connection) { try { connection.cancelQuery(); } finally { STATE_UPDATER.set(this, StatementCancelState.CANCELLED); connection.notifyAll(); // wake-up killTimerTask } } if (RedshiftLogger.isEnable()) connection.getLogger().logFunction(false); } public Connection getConnection() throws SQLException { checkClosed(); return connection; } public int getFetchDirection() throws SQLException { checkClosed(); return fetchdirection; } public int getResultSetConcurrency() throws SQLException { checkClosed(); return concurrency; } public int getResultSetType() throws SQLException { checkClosed(); return resultsettype; } public void setFetchDirection(int direction) throws SQLException { if (RedshiftLogger.isEnable()) connection.getLogger().logFunction(true, direction); switch (direction) { case ResultSet.FETCH_FORWARD: case ResultSet.FETCH_REVERSE: case ResultSet.FETCH_UNKNOWN: fetchdirection = direction; break; default: throw new RedshiftException(GT.tr("Invalid fetch direction constant: {0}.", direction), RedshiftState.INVALID_PARAMETER_VALUE); } if (RedshiftLogger.isEnable()) connection.getLogger().logFunction(false); } public void setFetchSize(int rows) throws SQLException { if (RedshiftLogger.isEnable()) connection.getLogger().logFunction(true, rows); checkClosed(); if (rows < 0) { throw new RedshiftException(GT.tr("Fetch size must be a value greater to or equal to 0."), RedshiftState.INVALID_PARAMETER_VALUE); } fetchSize = rows; if (RedshiftLogger.isEnable()) connection.getLogger().logFunction(false); } private void startTimer() { /* * there shouldn't be any previous timer active, but better safe than sorry. */ cleanupTimer(); STATE_UPDATER.set(this, StatementCancelState.IN_QUERY); if (timeout == 0) { return; } TimerTask cancelTask = new TimerTask() { public void run() { try { if (!CANCEL_TIMER_UPDATER.compareAndSet(RedshiftStatementImpl.this, this, null)) { // Nothing to do here, statement has already finished and cleared // cancelTimerTask reference return; } RedshiftStatementImpl.this.cancel(); } catch (SQLException e) { } } }; CANCEL_TIMER_UPDATER.set(this, cancelTask); connection.addTimerTask(cancelTask, timeout); } /** * Clears {@link #cancelTimerTask} if any. Returns true if and only if "cancel" timer task would * never invoke {@link #cancel()}. */ private boolean cleanupTimer() { TimerTask timerTask = CANCEL_TIMER_UPDATER.get(this); if (timerTask == null) { // If timeout is zero, then timer task did not exist, so we safely report "all clear" return timeout == 0; } if (!CANCEL_TIMER_UPDATER.compareAndSet(this, timerTask, null)) { // Failed to update reference -> timer has just fired, so we must wait for the query state to // become "cancelling". return false; } timerTask.cancel(); connection.purgeTimerTasks(); // All clear return true; } private void killTimerTask(boolean isRingBufferThreadRunning) { boolean timerTaskIsClear = cleanupTimer(); // The order is important here: in case we need to wait for the cancel task, the state must be // kept StatementCancelState.IN_QUERY, so cancelTask would be able to cancel the query. // It is believed that this case is very rare, so "additional cancel and wait below" would not // harm it. if (timerTaskIsClear && isRingBufferThreadRunning) return; if (timerTaskIsClear && STATE_UPDATER.compareAndSet(this, StatementCancelState.IN_QUERY, StatementCancelState.IDLE)) { return; } if (timerTaskIsClear && (STATE_UPDATER.get(this) == StatementCancelState.IDLE)) return; // Being here means someone managed to call .cancel() and our connection did not receive // "timeout error" // We wait till state becomes "cancelled" boolean interrupted = false; synchronized (connection) { // state check is performed under synchronized so it detects "cancelled" state faster // In other words, it prevents unnecessary ".wait()" call while (!STATE_UPDATER.compareAndSet(this, StatementCancelState.CANCELLED, StatementCancelState.IDLE)) { try { // Note: wait timeout here is irrelevant since synchronized(connection) would block until // .cancel finishes connection.wait(10); } catch (InterruptedException e) { // NOSONAR // Either re-interrupt this method or rethrow the "InterruptedException" interrupted = true; } } } if (interrupted) { Thread.currentThread().interrupt(); } } protected boolean getForceBinaryTransfer() { return forceBinaryTransfers; } //JCP! if mvn.project.property.redshift.jdbc.spec >= "JDBC4.2" @Override public long getLargeUpdateCount() throws SQLException { synchronized (this) { checkClosed(); if (result == null || result.getResultSet() != null) { return -1; } return result.getUpdateCount(); } } public void setLargeMaxRows(long max) throws SQLException { throw Driver.notImplemented(this.getClass(), "setLargeMaxRows"); } public long getLargeMaxRows() throws SQLException { throw Driver.notImplemented(this.getClass(), "getLargeMaxRows"); } @Override public long[] executeLargeBatch() throws SQLException { long[] rc; if (RedshiftLogger.isEnable()) connection.getLogger().logFunction(true); checkClosed(); closeForNextExecution(); if (batchStatements == null || batchStatements.isEmpty()) { rc = new long[0]; } else rc = internalExecuteBatch().getLargeUpdateCount(); if (RedshiftLogger.isEnable()) connection.getLogger().logFunction(false, rc); return rc; } @Override public long executeLargeUpdate(String sql) throws SQLException { if (RedshiftLogger.isEnable()) connection.getLogger().logFunction(true, sql); executeWithFlags(sql, QueryExecutor.QUERY_NO_RESULTS); checkNoResultUpdate(); long rc = getLargeUpdateCount(); if (RedshiftLogger.isEnable()) connection.getLogger().logFunction(false, rc); return rc; } @Override public long executeLargeUpdate(String sql, int autoGeneratedKeys) throws SQLException { long rc; if (RedshiftLogger.isEnable()) connection.getLogger().logFunction(true, sql, autoGeneratedKeys); this.autoGeneratedKeys = autoGeneratedKeys; if (autoGeneratedKeys == Statement.NO_GENERATED_KEYS) { rc = executeLargeUpdate(sql); } else { rc = executeLargeUpdate(sql, (String[]) null); } if (RedshiftLogger.isEnable()) connection.getLogger().logFunction(false, rc); return rc; } @Override public long executeLargeUpdate(String sql, int[] columnIndexes) throws SQLException { if (RedshiftLogger.isEnable()) connection.getLogger().logFunction(true, sql, columnIndexes); if (columnIndexes == null || columnIndexes.length == 0) { long rc = executeLargeUpdate(sql); if (RedshiftLogger.isEnable()) connection.getLogger().logFunction(false, rc); return rc; } throw new RedshiftException(GT.tr("Returning autogenerated keys by column index is not supported."), RedshiftState.NOT_IMPLEMENTED); } @Override public long executeLargeUpdate(String sql, String[] columnNames) throws SQLException { long rc; if (RedshiftLogger.isEnable()) connection.getLogger().logFunction(true, sql, columnNames); if (columnNames == null || columnNames.length == 0) { if(this.autoGeneratedKeys == Statement.RETURN_GENERATED_KEYS) wantsGeneratedKeysOnce = true; rc = executeLargeUpdate(sql); } else { // throw new RedshiftException(GT.tr("Returning autogenerated keys by column name is not supported."), // RedshiftState.NOT_IMPLEMENTED); wantsGeneratedKeysOnce = true; if (!executeCachedSql(sql, 0, columnNames)) { // no resultset returned. What's a pity! } rc = getLargeUpdateCount(); } if (RedshiftLogger.isEnable()) connection.getLogger().logFunction(false, rc); return rc; } //JCP! endif public boolean isClosed() throws SQLException { return isClosed; } public void setPoolable(boolean poolable) throws SQLException { if (RedshiftLogger.isEnable()) connection.getLogger().logFunction(true, poolable); checkClosed(); this.poolable = poolable; } public boolean isPoolable() throws SQLException { checkClosed(); return poolable; } public boolean isWrapperFor(Class<?> iface) throws SQLException { return iface.isAssignableFrom(getClass()); } public <T> T unwrap(Class<T> iface) throws SQLException { if (iface.isAssignableFrom(getClass())) { return iface.cast(this); } throw new SQLException("Cannot unwrap to " + iface.getName()); } public void closeOnCompletion() throws SQLException { if (RedshiftLogger.isEnable()) connection.getLogger().logFunction(true); checkClosed(); closeOnCompletion = true; if (RedshiftLogger.isEnable()) connection.getLogger().logFunction(false); } public boolean isCloseOnCompletion() throws SQLException { checkClosed(); return closeOnCompletion; } protected void checkCompletion() throws SQLException { if (!closeOnCompletion) { return; } synchronized (this) { ResultWrapper result = firstUnclosedResult; while (result != null) { if (result.getResultSet() != null && !result.getResultSet().isClosed()) { return; } result = result.getNext(); } } // prevent all ResultSet.close arising from Statement.close to loop here closeOnCompletion = false; try { close(); } finally { // restore the status if one rely on isCloseOnCompletion closeOnCompletion = true; } } public boolean getMoreResults(int current) throws SQLException { synchronized (this) { checkClosed(); // CLOSE_CURRENT_RESULT if (current == Statement.CLOSE_CURRENT_RESULT && result != null && result.getResultSet() != null) { result.getResultSet().close(); } // Advance resultset. if (result != null) { result = result.getNext(); } // CLOSE_ALL_RESULTS if (current == Statement.CLOSE_ALL_RESULTS) { // Close preceding resultsets. while (firstUnclosedResult != result) { if (firstUnclosedResult.getResultSet() != null) { firstUnclosedResult.getResultSet().close(); } firstUnclosedResult = firstUnclosedResult.getNext(); } } // Done. return (result != null && result.getResultSet() != null); } } public ResultSet getGeneratedKeys() throws SQLException { synchronized (this) { checkClosed(); if (generatedKeys == null || generatedKeys.getResultSet() == null) { return createDriverResultSet(new Field[0], new ArrayList<Tuple>()); } return generatedKeys.getResultSet(); } } public int executeUpdate(String sql, int autoGeneratedKeys) throws SQLException { int rc; if (RedshiftLogger.isEnable()) connection.getLogger().logFunction(true, sql, autoGeneratedKeys); this.autoGeneratedKeys = autoGeneratedKeys; if (autoGeneratedKeys == Statement.NO_GENERATED_KEYS) { rc = executeUpdate(sql); } else { rc = executeUpdate(sql, (String[]) null); } if (RedshiftLogger.isEnable()) connection.getLogger().logFunction(false, rc); return rc; } public int executeUpdate(String sql, int[] columnIndexes) throws SQLException { if (RedshiftLogger.isEnable()) connection.getLogger().logFunction(true, sql, columnIndexes); if (columnIndexes == null || columnIndexes.length == 0) { int rc = executeUpdate(sql); if (RedshiftLogger.isEnable()) connection.getLogger().logFunction(false, rc); return rc; } throw new RedshiftException(GT.tr("Returning autogenerated keys by column index is not supported."), RedshiftState.NOT_IMPLEMENTED); } public int executeUpdate(String sql, String[] columnNames) throws SQLException { int rc; if (RedshiftLogger.isEnable()) connection.getLogger().logFunction(true, sql, columnNames); if (columnNames == null || columnNames.length == 0) { if(this.autoGeneratedKeys == Statement.RETURN_GENERATED_KEYS) wantsGeneratedKeysOnce = true; rc = executeUpdate(sql); } else { // throw new RedshiftException(GT.tr("Returning autogenerated keys by column index is not supported."), // RedshiftState.NOT_IMPLEMENTED); wantsGeneratedKeysOnce = true; if (!executeCachedSql(sql, 0, columnNames)) { // no resultset returned. What's a pity! } rc = getUpdateCount(); } if (RedshiftLogger.isEnable()) connection.getLogger().logFunction(false, rc); return rc; } public boolean execute(String sql, int autoGeneratedKeys) throws SQLException { if (RedshiftLogger.isEnable()) connection.getLogger().logFunction(true, QuerySanitizer.filterCredentials(sql), autoGeneratedKeys); if (autoGeneratedKeys == Statement.NO_GENERATED_KEYS) { return execute(sql); } return execute(sql, (String[]) null); } public boolean execute(String sql, int[] columnIndexes) throws SQLException { if (RedshiftLogger.isEnable()) connection.getLogger().logFunction(true, QuerySanitizer.filterCredentials(sql), columnIndexes); if (columnIndexes != null && columnIndexes.length == 0) { return execute(sql); } throw new RedshiftException(GT.tr("Returning autogenerated keys by column index is not supported."), RedshiftState.NOT_IMPLEMENTED); } public boolean execute(String sql, String[] columnNames) throws SQLException { boolean rc; if (RedshiftLogger.isEnable()) connection.getLogger().logFunction(true, QuerySanitizer.filterCredentials(sql), columnNames); if (columnNames == null || columnNames.length == 0) { rc = execute(sql); } else { // throw new RedshiftException(GT.tr("Returning autogenerated keys by column name is not supported."), // RedshiftState.NOT_IMPLEMENTED); wantsGeneratedKeysOnce = true; rc = executeCachedSql(sql, 0, columnNames); } if (RedshiftLogger.isEnable()) connection.getLogger().logFunction(false, rc); return rc; } public int getResultSetHoldability() throws SQLException { checkClosed(); return rsHoldability; } public ResultSet createDriverResultSet(Field[] fields, List<Tuple> tuples) throws SQLException { return createResultSet(null, fields, tuples, null, null, null, null); } protected void transformQueriesAndParameters() throws SQLException { } private void resumeReadAndDiscardResults() { if (connection.getQueryExecutor().isRingBufferThreadRunning()) { RedshiftResultSet rs = null; if (firstUnclosedResult != null) { rs = (RedshiftResultSet) firstUnclosedResult.getResultSet(); } if (rs != null && rs.queueRows != null) { boolean endOfResult = rs.queueRows.endOfResult(); if(!endOfResult) { rs.queueRows.setSkipRows(); // We sleep here for 2 seconds to give the ring buffer adequate time to start reading results again once it wakes up, // and the server adequate time to detect that send buffer is no longer full try { Thread.sleep(2000); } catch (InterruptedException e) { } } } } } }
8,529
0
Create_ds/amazon-redshift-jdbc-driver/src/main/java/com/amazon/redshift
Create_ds/amazon-redshift-jdbc-driver/src/main/java/com/amazon/redshift/jdbc/RedshiftParameterMetaData.java
/* * Copyright (c) 2004, PostgreSQL Global Development Group * See the LICENSE file in the project root for more information. */ package com.amazon.redshift.jdbc; import com.amazon.redshift.core.BaseConnection; import com.amazon.redshift.logger.RedshiftLogger; import com.amazon.redshift.util.GT; import com.amazon.redshift.util.RedshiftException; import com.amazon.redshift.util.RedshiftState; import java.sql.ParameterMetaData; import java.sql.SQLException; public class RedshiftParameterMetaData implements ParameterMetaData { private final BaseConnection connection; private final int[] oids; public RedshiftParameterMetaData(BaseConnection connection, int[] oids) { this.connection = connection; this.oids = oids; } @Override public String getParameterClassName(int param) throws SQLException { checkParamIndex(param); return connection.getTypeInfo().getJavaClass(oids[param - 1]); } @Override public int getParameterCount() { int rc = oids.length; if (RedshiftLogger.isEnable()) connection.getLogger().logFunction(false, rc); return rc; } /** * {@inheritDoc} For now report all parameters as inputs. CallableStatements may have one output, * but ignore that for now. */ public int getParameterMode(int param) throws SQLException { checkParamIndex(param); return ParameterMetaData.parameterModeIn; } @Override public int getParameterType(int param) throws SQLException { checkParamIndex(param); return connection.getTypeInfo().getSQLType(oids[param - 1]); } @Override public String getParameterTypeName(int param) throws SQLException { checkParamIndex(param); return connection.getTypeInfo().getRSType(oids[param - 1]); } // we don't know this public int getPrecision(int param) throws SQLException { checkParamIndex(param); return 0; } // we don't know this public int getScale(int param) throws SQLException { checkParamIndex(param); return 0; } // we can't tell anything about nullability public int isNullable(int param) throws SQLException { checkParamIndex(param); return ParameterMetaData.parameterNullableUnknown; } /** * {@inheritDoc} Redshift doesn't have unsigned numbers */ @Override public boolean isSigned(int param) throws SQLException { checkParamIndex(param); return connection.getTypeInfo().isSigned(oids[param - 1]); } private void checkParamIndex(int param) throws RedshiftException { if (param < 1 || param > oids.length) { throw new RedshiftException( GT.tr("The parameter index is out of range: {0}, number of parameters: {1}.", param, oids.length), RedshiftState.INVALID_PARAMETER_VALUE); } } public boolean isWrapperFor(Class<?> iface) throws SQLException { return iface.isAssignableFrom(getClass()); } public <T> T unwrap(Class<T> iface) throws SQLException { if (iface.isAssignableFrom(getClass())) { return iface.cast(this); } throw new SQLException("Cannot unwrap to " + iface.getName()); } }
8,530
0
Create_ds/amazon-redshift-jdbc-driver/src/main/java/com/amazon/redshift
Create_ds/amazon-redshift-jdbc-driver/src/main/java/com/amazon/redshift/jdbc/RedshiftResultSet.java
/* * Copyright (c) 2004, PostgreSQL Global Development Group * See the LICENSE file in the project root for more information. */ package com.amazon.redshift.jdbc; import com.amazon.redshift.RedshiftResultSetMetaData; import com.amazon.redshift.RedshiftStatement; import com.amazon.redshift.core.BaseConnection; import com.amazon.redshift.core.BaseStatement; import com.amazon.redshift.core.Encoding; import com.amazon.redshift.core.Field; import com.amazon.redshift.core.Oid; import com.amazon.redshift.core.Query; import com.amazon.redshift.core.ResultCursor; import com.amazon.redshift.core.ResultHandlerBase; import com.amazon.redshift.core.Tuple; import com.amazon.redshift.core.TypeInfo; import com.amazon.redshift.core.Utils; import com.amazon.redshift.core.v3.MessageLoopState; import com.amazon.redshift.core.v3.RedshiftRowsBlockingQueue; import com.amazon.redshift.logger.LogLevel; import com.amazon.redshift.logger.RedshiftLogger; import com.amazon.redshift.util.ByteConverter; import com.amazon.redshift.util.GT; import com.amazon.redshift.util.HStoreConverter; import com.amazon.redshift.util.RedshiftBytea; import com.amazon.redshift.util.RedshiftObject; import com.amazon.redshift.util.RedshiftTokenizer; import com.amazon.redshift.util.RedshiftVarbyte; import com.amazon.redshift.util.RedshiftException; import com.amazon.redshift.util.RedshiftGeography; import com.amazon.redshift.util.RedshiftGeometry; import com.amazon.redshift.util.RedshiftIntervalYearToMonth; import com.amazon.redshift.util.RedshiftIntervalDayToSecond; import com.amazon.redshift.util.RedshiftState; import java.io.ByteArrayInputStream; import java.io.CharArrayReader; import java.io.IOException; import java.io.InputStream; import java.io.InputStreamReader; import java.io.Reader; import java.io.UnsupportedEncodingException; import java.math.BigDecimal; import java.math.BigInteger; import java.math.RoundingMode; import java.net.InetAddress; import java.net.UnknownHostException; import java.sql.Array; import java.sql.Blob; import java.sql.Clob; import java.sql.Date; import java.sql.NClob; import java.sql.PreparedStatement; import java.sql.Ref; import java.sql.ResultSet; import java.sql.ResultSetMetaData; import java.sql.RowId; import java.sql.SQLException; import java.sql.SQLWarning; import java.sql.SQLXML; import java.sql.Statement; import java.sql.Time; import java.sql.Timestamp; import java.sql.Types; //JCP! if mvn.project.property.redshift.jdbc.spec >= "JDBC4.2" import java.time.LocalDate; import java.time.LocalDateTime; import java.time.LocalTime; import java.time.OffsetDateTime; //JCP! endif import java.util.ArrayList; import java.util.Arrays; import java.util.Calendar; import java.util.HashMap; import java.util.Iterator; import java.util.List; import java.util.Locale; import java.util.Map; import java.util.StringTokenizer; import java.util.TimeZone; import java.util.UUID; import java.util.concurrent.TimeUnit; public class RedshiftResultSet implements ResultSet, com.amazon.redshift.RedshiftRefCursorResultSet { // needed for updateable result set support private boolean updateable = false; private boolean doingUpdates = false; private HashMap<String, Object> updateValues = null; private boolean usingOID = false; // are we using the OID for the primary key? private List<PrimaryKey> primaryKeys; // list of primary keys private boolean singleTable = false; private String onlyTable = ""; private String tableName = null; private PreparedStatement updateStatement = null; private PreparedStatement insertStatement = null; private PreparedStatement deleteStatement = null; private PreparedStatement selectStatement = null; private final int resultsettype; private final int resultsetconcurrency; private int fetchdirection = ResultSet.FETCH_UNKNOWN; private TimeZone defaultTimeZone; protected final BaseConnection connection; // the connection we belong to protected final BaseStatement statement; // the statement we belong to protected final Field[] fields; // Field metadata for this resultset. protected final Query originalQuery; // Query we originated from protected final int maxRows; // Maximum rows in this resultset (might be 0). protected final int maxFieldSize; // Maximum field size in this resultset (might be 0). protected List<Tuple> rows; // Current page of results. protected RedshiftRowsBlockingQueue<Tuple> queueRows; // Results in a blocking queue. protected int[] rowCount; protected Thread ringBufferThread; protected int currentRow = -1; // Index into 'rows' of our currrent row (0-based) protected int rowOffset; // Offset of row 0 in the actual resultset protected Tuple thisRow; // copy of the current result row protected SQLWarning warnings = null; // The warning chain /** * True if the last obtained column value was SQL NULL as specified by {@link #wasNull}. The value * is always updated by the {@link #checkResultSet} method. */ protected boolean wasNullFlag = false; protected boolean onInsertRow = false; // are we on the insert row (for JDBC2 updatable resultsets)? private Tuple rowBuffer = null; // updateable rowbuffer protected int fetchSize; // Current fetch size (might be 0). protected ResultCursor cursor; // Cursor for fetching additional data. private Map<String, Integer> columnNameIndexMap; // Speed up findColumn by caching lookups private ResultSetMetaData rsMetaData; protected ResultSetMetaData createMetaData() throws SQLException { return new RedshiftResultSetMetaDataImpl(connection, fields); } public ResultSetMetaData getMetaData() throws SQLException { checkClosed(); if (rsMetaData == null) { rsMetaData = createMetaData(); } return rsMetaData; } RedshiftResultSet(Query originalQuery, BaseStatement statement, Field[] fields, List<Tuple> tuples, ResultCursor cursor, int maxRows, int maxFieldSize, int rsType, int rsConcurrency, int rsHoldability, RedshiftRowsBlockingQueue<Tuple> queueTuples, int[] rowCount, Thread ringBufferThread) throws SQLException { // Fail-fast on invalid null inputs if (tuples == null && queueTuples == null) { throw new NullPointerException("tuples or queueTuples must be non-null"); } if (fields == null) { throw new NullPointerException("fields must be non-null"); } this.originalQuery = originalQuery; this.connection = (BaseConnection) statement.getConnection(); this.statement = statement; this.fields = fields; this.rows = tuples; this.cursor = cursor; this.maxRows = maxRows; this.maxFieldSize = maxFieldSize; this.resultsettype = rsType; this.resultsetconcurrency = rsConcurrency; this.queueRows = queueTuples; this.rowCount = rowCount; this.ringBufferThread = ringBufferThread; } /** * Returns the number of rows in the result set. The value returned is undefined if the row * count is unknown. * <p> * The value returned must fit into a 32-bit integer when targeting a 32-bit platform, or a * 64-bit integer for a 64-bit platform. The value must be non-negative, except if the row count * is unknown. * <p> * * This is not a JDBC specification method. * This is for backward compatibility only. * * @return Number of rows in the result set. * @throws SQLException * If an error occurs. */ public long getRowCount() throws SQLException { checkClosed(); return (rowCount != null) ? rowCount[0] : -1; } public java.net.URL getURL(int columnIndex) throws SQLException { if (RedshiftLogger.isEnable()) connection.getLogger().log(LogLevel.DEBUG, " getURL columnIndex: {0}", columnIndex); checkClosed(); throw com.amazon.redshift.Driver.notImplemented(this.getClass(), "getURL(int)"); } public java.net.URL getURL(String columnName) throws SQLException { return getURL(findColumn(columnName)); } protected Object internalGetObject(int columnIndex, Field field) throws SQLException { switch (getSQLType(columnIndex)) { case Types.BOOLEAN: case Types.BIT: return getBoolean(columnIndex); case Types.SQLXML: return getSQLXML(columnIndex); case Types.TINYINT: case Types.SMALLINT: case Types.INTEGER: return getInt(columnIndex); case Types.BIGINT: return getLong(columnIndex); case Types.NUMERIC: case Types.DECIMAL: return getNumeric(columnIndex, (field.getMod() == -1) ? -1 : ((field.getMod() - 4) & 0xffff), true); case Types.REAL: return getFloat(columnIndex); case Types.FLOAT: case Types.DOUBLE: return getDouble(columnIndex); case Types.CHAR: case Types.VARCHAR: case Types.LONGVARCHAR: return getString(columnIndex); case Types.DATE: return getDate(columnIndex); case Types.TIME: return getTime(columnIndex); case Types.TIMESTAMP: return getTimestamp(columnIndex, null); case Types.BINARY: case Types.VARBINARY: case Types.LONGVARBINARY: return getBytes(columnIndex); case Types.ARRAY: return getArray(columnIndex); case Types.CLOB: return getClob(columnIndex); case Types.BLOB: return getBlob(columnIndex); case Types.OTHER: if (field.getOID() == Oid.INTERVALY2M) return getIntervalYearToMonth(columnIndex); if (field.getOID() == Oid.INTERVALD2S) return getIntervalDayToSecond(columnIndex); default: String type = getRSType(columnIndex); // if the backend doesn't know the type then coerce to String if (type.equals("unknown")) { return getString(columnIndex); } if (type.equals("uuid")) { if (isBinary(columnIndex)) { return getUUID(thisRow.get(columnIndex - 1)); } return getUUID(getString(columnIndex)); } // Specialized support for ref cursors is neater. if (type.equals("refcursor")) { // Fetch all results. String cursorName = getString(columnIndex); StringBuilder sb = new StringBuilder("FETCH ALL IN "); Utils.escapeIdentifier(sb, cursorName); // nb: no BEGIN triggered here. This is fine. If someone // committed, and the cursor was not holdable (closing the // cursor), we avoid starting a new xact and promptly causing // it to fail. If the cursor *was* holdable, we don't want a // new xact anyway since holdable cursor state isn't affected // by xact boundaries. If our caller didn't commit at all, or // autocommit was on, then we wouldn't issue a BEGIN anyway. // // We take the scrollability from the statement, but until // we have updatable cursors it must be readonly. ResultSet rs = connection.execSQLQuery(sb.toString(), resultsettype, ResultSet.CONCUR_READ_ONLY); // // In long running transactions these backend cursors take up memory space // we could close in rs.close(), but if the transaction is closed before the result set, // then // the cursor no longer exists sb.setLength(0); sb.append("CLOSE "); Utils.escapeIdentifier(sb, cursorName); connection.execSQLUpdate(sb.toString()); ((RedshiftResultSet) rs).setRefCursor(cursorName); return rs; } if ("hstore".equals(type)) { if (isBinary(columnIndex)) { return HStoreConverter.fromBytes(thisRow.get(columnIndex - 1), connection.getEncoding()); } return HStoreConverter.fromString(getString(columnIndex)); } // Caller determines what to do (JDBC3 overrides in this case) return null; } } private void checkScrollable() throws SQLException { checkClosed(); if (resultsettype == ResultSet.TYPE_FORWARD_ONLY) { throw new RedshiftException( GT.tr("Operation requires a scrollable ResultSet, but this ResultSet is FORWARD_ONLY."), RedshiftState.INVALID_CURSOR_STATE); } } @Override public boolean absolute(int index) throws SQLException { checkScrollable(); // index is 1-based, but internally we use 0-based indices int internalIndex; if (index == 0) { beforeFirst(); return false; } final int rows_size = rows.size(); // if index<0, count from the end of the result set, but check // to be sure that it is not beyond the first index if (index < 0) { if (index >= -rows_size) { internalIndex = rows_size + index; } else { beforeFirst(); return false; } } else { // must be the case that index>0, // find the correct place, assuming that // the index is not too large if (index <= rows_size) { internalIndex = index - 1; } else { afterLast(); return false; } } currentRow = internalIndex; initRowBuffer(); onInsertRow = false; return true; } @Override public void afterLast() throws SQLException { checkScrollable(); final int rows_size = rows.size(); if (rows_size > 0) { currentRow = rows_size; } onInsertRow = false; thisRow = null; rowBuffer = null; } @Override public void beforeFirst() throws SQLException { checkScrollable(); if (!rows.isEmpty()) { currentRow = -1; } onInsertRow = false; thisRow = null; rowBuffer = null; } @Override public boolean first() throws SQLException { checkScrollable(); if (rows.size() <= 0) { return false; } currentRow = 0; initRowBuffer(); onInsertRow = false; return true; } @Override public Array getArray(String colName) throws SQLException { return getArray(findColumn(colName)); } protected Array makeArray(int oid, byte[] value) throws SQLException { return new RedshiftArray(connection, oid, value); } protected Array makeArray(int oid, String value) throws SQLException { return new RedshiftArray(connection, oid, value); } @Override public Array getArray(int i) throws SQLException { checkResultSet(i); if (wasNullFlag) { return null; } int oid = fields[i - 1].getOID(); if (isBinary(i)) { return makeArray(oid, thisRow.get(i - 1)); } return makeArray(oid, getFixedString(i)); } public java.math.BigDecimal getBigDecimal(int columnIndex) throws SQLException { return getBigDecimal(columnIndex, -1); } public java.math.BigDecimal getBigDecimal(String columnName) throws SQLException { return getBigDecimal(findColumn(columnName)); } public Blob getBlob(String columnName) throws SQLException { return getBlob(findColumn(columnName)); } protected Blob makeBlob(long oid) throws SQLException { return new RedshiftBlob(connection, oid); } public Blob getBlob(int i) throws SQLException { checkResultSet(i); if (wasNullFlag) { return null; } return makeBlob(getLong(i)); } public java.io.Reader getCharacterStream(String columnName) throws SQLException { return getCharacterStream(findColumn(columnName)); } public java.io.Reader getCharacterStream(int i) throws SQLException { checkResultSet(i); if (wasNullFlag) { return null; } // Version 7.2 supports AsciiStream for all the RS text types // As the spec/javadoc for this method indicate this is to be used for // large text values (i.e. LONGVARCHAR) RS doesn't have a separate // long string datatype, but with toast the text datatype is capable of // handling very large values. Thus the implementation ends up calling // getString() since there is no current way to stream the value from the server return new CharArrayReader(getString(i).toCharArray()); } public Clob getClob(String columnName) throws SQLException { return getClob(findColumn(columnName)); } protected Clob makeClob(long oid) throws SQLException { return new RedshiftClob(connection, oid); } public Clob getClob(int i) throws SQLException { checkResultSet(i); if (wasNullFlag) { return null; } return makeClob(getLong(i)); } public int getConcurrency() throws SQLException { checkClosed(); return resultsetconcurrency; } @Override public java.sql.Date getDate(int i, java.util.Calendar cal) throws SQLException { checkResultSet(i); if (wasNullFlag) { return null; } if (cal == null) { cal = getDefaultCalendar(); } if (isBinary(i)) { int col = i - 1; int oid = fields[col].getOID(); TimeZone tz = cal.getTimeZone(); if (oid == Oid.DATE) { return connection.getTimestampUtils().toDateBin(tz, thisRow.get(col)); } else if (oid == Oid.TIMESTAMP || oid == Oid.TIMESTAMPTZ) { // If backend provides just TIMESTAMP, we use "cal" timezone // If backend provides TIMESTAMPTZ, we ignore "cal" as we know true instant value Timestamp timestamp = getTimestamp(i, cal); // Here we just truncate date to 00:00 in a given time zone return connection.getTimestampUtils().convertToDate(timestamp.getTime(), tz); } else { return connection.getTimestampUtils().toDate(cal, getString(i)); } } return connection.getTimestampUtils().toDate(cal, getString(i)); } @Override public Time getTime(int i, java.util.Calendar cal) throws SQLException { checkResultSet(i); if (wasNullFlag) { return null; } if (cal == null) { cal = getDefaultCalendar(); } if (isBinary(i)) { int col = i - 1; int oid = fields[col].getOID(); TimeZone tz = cal.getTimeZone(); if (oid == Oid.TIME || oid == Oid.TIMETZ) { return connection.getTimestampUtils().toTimeBin(tz, thisRow.get(col)); } else if (oid == Oid.TIMESTAMP || oid == Oid.TIMESTAMPTZ) { // If backend provides just TIMESTAMP, we use "cal" timezone // If backend provides TIMESTAMPTZ, we ignore "cal" as we know true instant value Timestamp timestamp = getTimestamp(i, cal); long timeMillis = timestamp.getTime(); if (oid == Oid.TIMESTAMPTZ) { // time zone == UTC since BINARY "timestamp with time zone" is always sent in UTC // So we truncate days return new Time(timeMillis % TimeUnit.DAYS.toMillis(1)); } // Here we just truncate date part return connection.getTimestampUtils().convertToTime(timeMillis, tz); } else { throw new RedshiftException( GT.tr("Cannot convert the column of type {0} to requested type {1}.", Oid.toString(oid), "time"), RedshiftState.DATA_TYPE_MISMATCH); } } String string = getString(i); return connection.getTimestampUtils().toTime(cal, string); } //JCP! if mvn.project.property.redshift.jdbc.spec >= "JDBC4.2" private LocalTime getLocalTime(int i) throws SQLException { checkResultSet(i); if (wasNullFlag) { return null; } if (isBinary(i)) { int col = i - 1; int oid = fields[col].getOID(); if (oid == Oid.TIME) { return connection.getTimestampUtils().toLocalTimeBin(thisRow.get(col)); } else { throw new RedshiftException( GT.tr("Cannot convert the column of type {0} to requested type {1}.", Oid.toString(oid), "time"), RedshiftState.DATA_TYPE_MISMATCH); } } String string = getString(i); return connection.getTimestampUtils().toLocalTime(string); } //JCP! endif @Override public Timestamp getTimestamp(int i, java.util.Calendar cal) throws SQLException { checkResultSet(i); if (wasNullFlag) { return null; } if (cal == null) { cal = getDefaultCalendar(); } int col = i - 1; int oid = fields[col].getOID(); if (isBinary(i)) { if (oid == Oid.TIMESTAMPTZ || oid == Oid.TIMESTAMP || oid == Oid.ABSTIMEOID) { boolean hasTimeZone = oid == Oid.TIMESTAMPTZ; TimeZone tz = cal.getTimeZone(); if(oid == Oid.ABSTIMEOID) return connection.getTimestampUtils().toTimestampAbsTimeBin(tz, thisRow.get(col), hasTimeZone, cal); else return connection.getTimestampUtils().toTimestampBin(tz, thisRow.get(col), hasTimeZone, cal); } else { // JDBC spec says getTimestamp of Time and Date must be supported long millis; if (oid == Oid.TIME || oid == Oid.TIMETZ) { millis = getTime(i, cal).getTime(); return new Timestamp(millis); } else if (oid == Oid.DATE) { millis = getDate(i, cal).getTime(); return new Timestamp(millis); } } } // If this is actually a timestamptz, the server-provided timezone will override // the one we pass in, which is the desired behaviour. Otherwise, we'll // interpret the timezone-less value in the provided timezone. String string = getString(i); if (oid == Oid.TIME || oid == Oid.TIMETZ) { // If server sends us a TIME, we ensure java counterpart has date of 1970-01-01 return new Timestamp(connection.getTimestampUtils().toTime(cal, string).getTime()); } return connection.getTimestampUtils().toTimestamp(cal, string); } //JCP! if mvn.project.property.redshift.jdbc.spec >= "JDBC4.2" private OffsetDateTime getOffsetDateTime(int i) throws SQLException { checkResultSet(i); if (wasNullFlag) { return null; } int col = i - 1; int oid = fields[col].getOID(); if (isBinary(i)) { if (oid == Oid.TIMESTAMPTZ || oid == Oid.TIMESTAMP) { return connection.getTimestampUtils().toOffsetDateTimeBin(thisRow.get(col)); } else if (oid == Oid.TIMETZ) { // JDBC spec says timetz must be supported Time time = getTime(i); return connection.getTimestampUtils().toOffsetDateTime(time); } else { throw new RedshiftException( GT.tr("Cannot convert the column of type {0} to requested type {1}.", Oid.toString(oid), "timestamptz"), RedshiftState.DATA_TYPE_MISMATCH); } } // If this is actually a timestamptz, the server-provided timezone will override // the one we pass in, which is the desired behaviour. Otherwise, we'll // interpret the timezone-less value in the provided timezone. String string = getString(i); if (oid == Oid.TIMETZ) { // JDBC spec says timetz must be supported // If server sends us a TIMETZ, we ensure java counterpart has date of 1970-01-01 Calendar cal = getDefaultCalendar(); Time time = connection.getTimestampUtils().toTime(cal, string); return connection.getTimestampUtils().toOffsetDateTime(time); } return connection.getTimestampUtils().toOffsetDateTime(string); } private LocalDateTime getLocalDateTime(int i) throws SQLException { checkResultSet(i); if (wasNullFlag) { return null; } int col = i - 1; int oid = fields[col].getOID(); if (oid != Oid.TIMESTAMP) { throw new RedshiftException( GT.tr("Cannot convert the column of type {0} to requested type {1}.", Oid.toString(oid), "timestamp"), RedshiftState.DATA_TYPE_MISMATCH); } if (isBinary(i)) { return connection.getTimestampUtils().toLocalDateTimeBin(thisRow.get(col)); } String string = getString(i); return connection.getTimestampUtils().toLocalDateTime(string); } //JCP! endif public java.sql.Date getDate(String c, java.util.Calendar cal) throws SQLException { return getDate(findColumn(c), cal); } public Time getTime(String c, java.util.Calendar cal) throws SQLException { return getTime(findColumn(c), cal); } public Timestamp getTimestamp(String c, java.util.Calendar cal) throws SQLException { return getTimestamp(findColumn(c), cal); } public int getFetchDirection() throws SQLException { checkClosed(); return fetchdirection; } public Object getObjectImpl(String columnName, Map<String, Class<?>> map) throws SQLException { return getObjectImpl(findColumn(columnName), map); } /* * This checks against map for the type of column i, and if found returns an object based on that * mapping. The class must implement the SQLData interface. */ public Object getObjectImpl(int i, Map<String, Class<?>> map) throws SQLException { checkClosed(); if (map == null || map.isEmpty()) { return getObject(i); } throw com.amazon.redshift.Driver.notImplemented(this.getClass(), "getObjectImpl(int,Map)"); } public Ref getRef(String columnName) throws SQLException { return getRef(findColumn(columnName)); } public Ref getRef(int i) throws SQLException { checkClosed(); // The backend doesn't yet have SQL3 REF types throw com.amazon.redshift.Driver.notImplemented(this.getClass(), "getRef(int)"); } @Override public int getRow() throws SQLException { checkClosed(); if (onInsertRow) { return 0; } if (queueRows != null) { int rowIndex = queueRows.getCurrentRowIndex(); if (rowIndex < 0 || rowIndex >= getRowCount()) return 0; else return rowIndex + 1; } else { final int rows_size = rows.size(); if (currentRow < 0 || currentRow >= rows_size) { return 0; } return rowOffset + currentRow + 1; } } // This one needs some thought, as not all ResultSets come from a statement public Statement getStatement() throws SQLException { checkClosed(); return statement; } public int getType() throws SQLException { checkClosed(); return resultsettype; } @Override public boolean isAfterLast() throws SQLException { checkClosed(); if (onInsertRow) { return false; } if (queueRows != null) { if(getRowCount() == 0) return false; return(queueRows.endOfResult() && queueRows.getCurrentRowIndex() >= getRowCount()); } else { final int rows_size = rows.size(); if (rowOffset + rows_size == 0) { return false; } return (currentRow >= rows_size); } } @Override public boolean isBeforeFirst() throws SQLException { checkClosed(); if (onInsertRow) { return false; } if (queueRows != null) { return (queueRows.getCurrentRowIndex() < 0 && getRowCount() > 0); } else { return ((rowOffset + currentRow) < 0 && !rows.isEmpty()); } } @Override public boolean isFirst() throws SQLException { checkClosed(); if (onInsertRow) { return false; } if (queueRows != null) { if(getRowCount() == 0) return false; return(queueRows.getCurrentRowIndex() == 0); } else { final int rows_size = rows.size(); if (rowOffset + rows_size == 0) { return false; } return ((rowOffset + currentRow) == 0); } } @Override public boolean isLast() throws SQLException { checkClosed(); if (onInsertRow) { return false; } if (queueRows != null) { if(getRowCount() == 0) return false; return(queueRows.endOfResult() && ((queueRows.getCurrentRowIndex() + 1) == getRowCount())); } else { final int rows_size = rows.size(); if (rows_size == 0) { return false; // No rows. } if (currentRow != (rows_size - 1)) { return false; // Not on the last row of this block. } // We are on the last row of the current block. if (cursor == null) { // This is the last block and therefore the last row. return true; } if (maxRows > 0 && rowOffset + currentRow == maxRows) { // We are implicitly limited by maxRows. return true; } // Now the more painful case begins. // We are on the last row of the current block, but we don't know if the // current block is the last block; we must try to fetch some more data to // find out. // We do a fetch of the next block, then prepend the current row to that // block (so currentRow == 0). This works as the current row // must be the last row of the current block if we got this far. rowOffset += rows_size - 1; // Discarding all but one row. // Work out how many rows maxRows will let us fetch. int fetchRows = fetchSize; if (maxRows != 0) { if (fetchRows == 0 || rowOffset + fetchRows > maxRows) { // Fetch would exceed maxRows, limit it. fetchRows = maxRows - rowOffset; } } // Do the actual fetch. connection.getQueryExecutor().fetch(cursor, new CursorResultHandler(), fetchRows, 0); // Now prepend our one saved row and move to it. rows.add(0, thisRow); currentRow = 0; // Finally, now we can tell if we're the last row or not. return (rows.size() == 1); } } @Override public boolean last() throws SQLException { checkScrollable(); final int rows_size = rows.size(); if (rows_size <= 0) { return false; } currentRow = rows_size - 1; initRowBuffer(); onInsertRow = false; return true; } @Override public boolean previous() throws SQLException { checkScrollable(); if (onInsertRow) { throw new RedshiftException(GT.tr("Can''t use relative move methods while on the insert row."), RedshiftState.INVALID_CURSOR_STATE); } if (currentRow - 1 < 0) { currentRow = -1; thisRow = null; rowBuffer = null; return false; } else { currentRow--; } initRowBuffer(); return true; } @Override public boolean relative(int rows) throws SQLException { checkScrollable(); if (onInsertRow) { throw new RedshiftException(GT.tr("Can''t use relative move methods while on the insert row."), RedshiftState.INVALID_CURSOR_STATE); } // have to add 1 since absolute expects a 1-based index int index = currentRow + 1 + rows; if (index < 0) { beforeFirst(); return false; } return absolute(index); } public void setFetchDirection(int direction) throws SQLException { if (RedshiftLogger.isEnable()) connection.getLogger().logFunction(true, direction); checkClosed(); switch (direction) { case ResultSet.FETCH_FORWARD: break; case ResultSet.FETCH_REVERSE: case ResultSet.FETCH_UNKNOWN: checkScrollable(); break; default: throw new RedshiftException(GT.tr("Invalid fetch direction constant: {0}.", direction), RedshiftState.INVALID_PARAMETER_VALUE); } this.fetchdirection = direction; } public synchronized void cancelRowUpdates() throws SQLException { checkClosed(); if (onInsertRow) { throw new RedshiftException(GT.tr("Cannot call cancelRowUpdates() when on the insert row."), RedshiftState.INVALID_CURSOR_STATE); } if (doingUpdates) { doingUpdates = false; clearRowBuffer(true); } } public synchronized void deleteRow() throws SQLException { checkUpdateable(); if (onInsertRow) { throw new RedshiftException(GT.tr("Cannot call deleteRow() when on the insert row."), RedshiftState.INVALID_CURSOR_STATE); } if (isBeforeFirst()) { throw new RedshiftException( GT.tr( "Currently positioned before the start of the ResultSet. You cannot call deleteRow() here."), RedshiftState.INVALID_CURSOR_STATE); } if (isAfterLast()) { throw new RedshiftException( GT.tr( "Currently positioned after the end of the ResultSet. You cannot call deleteRow() here."), RedshiftState.INVALID_CURSOR_STATE); } if(queueRows != null) { throw new RedshiftException(GT.tr("Cannot call deleteRow() when enableFetchRingBuffer is true."), RedshiftState.INVALID_CURSOR_STATE); } else { if (rows.isEmpty()) { throw new RedshiftException(GT.tr("There are no rows in this ResultSet."), RedshiftState.INVALID_CURSOR_STATE); } int numKeys = primaryKeys.size(); if (deleteStatement == null) { StringBuilder deleteSQL = new StringBuilder("DELETE FROM ").append(onlyTable).append(tableName).append(" where "); for (int i = 0; i < numKeys; i++) { Utils.escapeIdentifier(deleteSQL, primaryKeys.get(i).name); deleteSQL.append(" = ?"); if (i < numKeys - 1) { deleteSQL.append(" and "); } } deleteStatement = connection.prepareStatement(deleteSQL.toString()); } deleteStatement.clearParameters(); for (int i = 0; i < numKeys; i++) { deleteStatement.setObject(i + 1, primaryKeys.get(i).getValue()); } deleteStatement.executeUpdate(); rows.remove(currentRow); currentRow--; moveToCurrentRow(); } } @Override public synchronized void insertRow() throws SQLException { checkUpdateable(); if (!onInsertRow) { throw new RedshiftException(GT.tr("Not on the insert row."), RedshiftState.INVALID_CURSOR_STATE); } else if (updateValues.isEmpty()) { throw new RedshiftException(GT.tr("You must specify at least one column value to insert a row."), RedshiftState.INVALID_PARAMETER_VALUE); } else if(queueRows != null) { throw new RedshiftException(GT.tr("Cannot call insertRow() when enableFetchRingBuffer is true."), RedshiftState.INVALID_CURSOR_STATE); } else { // loop through the keys in the insertTable and create the sql statement // we have to create the sql every time since the user could insert different // columns each time StringBuilder insertSQL = new StringBuilder("INSERT INTO ").append(tableName).append(" ("); StringBuilder paramSQL = new StringBuilder(") values ("); Iterator<String> columnNames = updateValues.keySet().iterator(); int numColumns = updateValues.size(); for (int i = 0; columnNames.hasNext(); i++) { String columnName = columnNames.next(); Utils.escapeIdentifier(insertSQL, columnName); if (i < numColumns - 1) { insertSQL.append(", "); paramSQL.append("?,"); } else { paramSQL.append("?)"); } } insertSQL.append(paramSQL.toString()); insertStatement = connection.prepareStatement(insertSQL.toString()); Iterator<Object> values = updateValues.values().iterator(); for (int i = 1; values.hasNext(); i++) { insertStatement.setObject(i, values.next()); } insertStatement.executeUpdate(); if (usingOID) { // we have to get the last inserted OID and put it in the resultset long insertedOID = ((RedshiftStatementImpl) insertStatement).getLastOID(); updateValues.put("oid", insertedOID); } // update the underlying row to the new inserted data updateRowBuffer(); rows.add(rowBuffer); // we should now reflect the current data in thisRow // that way getXXX will get the newly inserted data thisRow = rowBuffer; // need to clear this in case of another insert clearRowBuffer(false); } } @Override public synchronized void moveToCurrentRow() throws SQLException { checkUpdateable(); if (currentRow < 0 || currentRow >= rows.size()) { thisRow = null; rowBuffer = null; } else { initRowBuffer(); } onInsertRow = false; doingUpdates = false; } @Override public synchronized void moveToInsertRow() throws SQLException { checkUpdateable(); if (insertStatement != null) { insertStatement = null; } // make sure the underlying data is null clearRowBuffer(false); onInsertRow = true; doingUpdates = false; } // rowBuffer is the temporary storage for the row private synchronized void clearRowBuffer(boolean copyCurrentRow) throws SQLException { // inserts want an empty array while updates want a copy of the current row if (copyCurrentRow) { rowBuffer = thisRow.updateableCopy(); } else { rowBuffer = new Tuple(fields.length); } // clear the updateValues hash map for the next set of updates updateValues.clear(); } public boolean rowDeleted() throws SQLException { checkClosed(); return false; } public boolean rowInserted() throws SQLException { checkClosed(); return false; } public boolean rowUpdated() throws SQLException { checkClosed(); return false; } public synchronized void updateAsciiStream(int columnIndex, java.io.InputStream x, int length) throws SQLException { if (x == null) { updateNull(columnIndex); return; } try { InputStreamReader reader = new InputStreamReader(x, "ASCII"); char[] data = new char[length]; int numRead = 0; while (true) { int n = reader.read(data, numRead, length - numRead); if (n == -1) { break; } numRead += n; if (numRead == length) { break; } } updateString(columnIndex, new String(data, 0, numRead)); } catch (UnsupportedEncodingException uee) { throw new RedshiftException(GT.tr("The JVM claims not to support the encoding: {0}", "ASCII"), RedshiftState.UNEXPECTED_ERROR, uee); } catch (IOException ie) { throw new RedshiftException(GT.tr("Provided InputStream failed."), null, ie); } } public synchronized void updateBigDecimal(int columnIndex, java.math.BigDecimal x) throws SQLException { updateValue(columnIndex, x); } public synchronized void updateBinaryStream(int columnIndex, java.io.InputStream x, int length) throws SQLException { if (x == null) { updateNull(columnIndex); return; } byte[] data = new byte[length]; int numRead = 0; try { while (true) { int n = x.read(data, numRead, length - numRead); if (n == -1) { break; } numRead += n; if (numRead == length) { break; } } } catch (IOException ie) { throw new RedshiftException(GT.tr("Provided InputStream failed."), null, ie); } if (numRead == length) { updateBytes(columnIndex, data); } else { // the stream contained less data than they said // perhaps this is an error? byte[] data2 = new byte[numRead]; System.arraycopy(data, 0, data2, 0, numRead); updateBytes(columnIndex, data2); } } public synchronized void updateBoolean(int columnIndex, boolean x) throws SQLException { updateValue(columnIndex, x); } public synchronized void updateByte(int columnIndex, byte x) throws SQLException { updateValue(columnIndex, String.valueOf(x)); } public synchronized void updateBytes(int columnIndex, byte[] x) throws SQLException { updateValue(columnIndex, x); } public synchronized void updateCharacterStream(int columnIndex, java.io.Reader x, int length) throws SQLException { if (x == null) { updateNull(columnIndex); return; } try { char[] data = new char[length]; int numRead = 0; while (true) { int n = x.read(data, numRead, length - numRead); if (n == -1) { break; } numRead += n; if (numRead == length) { break; } } updateString(columnIndex, new String(data, 0, numRead)); } catch (IOException ie) { throw new RedshiftException(GT.tr("Provided Reader failed."), null, ie); } } public synchronized void updateDate(int columnIndex, java.sql.Date x) throws SQLException { updateValue(columnIndex, x); } public synchronized void updateDouble(int columnIndex, double x) throws SQLException { updateValue(columnIndex, x); } public synchronized void updateFloat(int columnIndex, float x) throws SQLException { updateValue(columnIndex, x); } public synchronized void updateInt(int columnIndex, int x) throws SQLException { updateValue(columnIndex, x); } public synchronized void updateLong(int columnIndex, long x) throws SQLException { updateValue(columnIndex, x); } public synchronized void updateNull(int columnIndex) throws SQLException { checkColumnIndex(columnIndex); String columnTypeName = getRSType(columnIndex); updateValue(columnIndex, new NullObject(columnTypeName)); } public synchronized void updateObject(int columnIndex, Object x) throws SQLException { updateValue(columnIndex, x); } public synchronized void updateObject(int columnIndex, Object x, int scale) throws SQLException { this.updateObject(columnIndex, x); } @Override public void refreshRow() throws SQLException { checkUpdateable(); if (onInsertRow) { throw new RedshiftException(GT.tr("Can''t refresh the insert row."), RedshiftState.INVALID_CURSOR_STATE); } else if(queueRows != null) { throw new RedshiftException(GT.tr("Can''t refresh when enableFetchRingBuffer is true."), RedshiftState.INVALID_CURSOR_STATE); } if (isBeforeFirst() || isAfterLast() || rows.isEmpty()) { return; } StringBuilder selectSQL = new StringBuilder("select "); ResultSetMetaData rsmd = getMetaData(); RedshiftResultSetMetaData pgmd = (RedshiftResultSetMetaData) rsmd; for (int i = 1; i <= rsmd.getColumnCount(); i++) { if (i > 1) { selectSQL.append(", "); } selectSQL.append(pgmd.getBaseColumnName(i)); } selectSQL.append(" from ").append(onlyTable).append(tableName).append(" where "); int numKeys = primaryKeys.size(); for (int i = 0; i < numKeys; i++) { PrimaryKey primaryKey = primaryKeys.get(i); selectSQL.append(primaryKey.name).append("= ?"); if (i < numKeys - 1) { selectSQL.append(" and "); } } String sqlText = selectSQL.toString(); if (RedshiftLogger.isEnable()) { connection.getLogger().log(LogLevel.DEBUG, "selecting {0}", sqlText); } // because updateable result sets do not yet support binary transfers we must request refresh // with updateable result set to get field data in correct format selectStatement = connection.prepareStatement(sqlText, ResultSet.TYPE_SCROLL_INSENSITIVE, ResultSet.CONCUR_UPDATABLE); for (int j = 0, i = 1; j < numKeys; j++, i++) { selectStatement.setObject(i, primaryKeys.get(j).getValue()); } RedshiftResultSet rs = (RedshiftResultSet) selectStatement.executeQuery(); if (rs.next()) { rowBuffer = rs.thisRow; } rows.set(currentRow, rowBuffer); thisRow = rowBuffer; if (RedshiftLogger.isEnable()) connection.getLogger().log(LogLevel.DEBUG, "done updates"); rs.close(); selectStatement.close(); selectStatement = null; } @Override public synchronized void updateRow() throws SQLException { checkUpdateable(); if (onInsertRow) { throw new RedshiftException(GT.tr("Cannot call updateRow() when on the insert row."), RedshiftState.INVALID_CURSOR_STATE); } else if(queueRows != null) { throw new RedshiftException(GT.tr("Cannot call updateRow() when enableFetchRingBuffer is true."), RedshiftState.INVALID_CURSOR_STATE); } if (isBeforeFirst() || isAfterLast() || rows.isEmpty()) { throw new RedshiftException( GT.tr( "Cannot update the ResultSet because it is either before the start or after the end of the results."), RedshiftState.INVALID_CURSOR_STATE); } if (!doingUpdates) { return; // No work pending. } StringBuilder updateSQL = new StringBuilder("UPDATE " + onlyTable + tableName + " SET "); int numColumns = updateValues.size(); Iterator<String> columns = updateValues.keySet().iterator(); for (int i = 0; columns.hasNext(); i++) { String column = columns.next(); Utils.escapeIdentifier(updateSQL, column); updateSQL.append(" = ?"); if (i < numColumns - 1) { updateSQL.append(", "); } } updateSQL.append(" WHERE "); int numKeys = primaryKeys.size(); for (int i = 0; i < numKeys; i++) { PrimaryKey primaryKey = primaryKeys.get(i); Utils.escapeIdentifier(updateSQL, primaryKey.name); updateSQL.append(" = ?"); if (i < numKeys - 1) { updateSQL.append(" and "); } } String sqlText = updateSQL.toString(); if (RedshiftLogger.isEnable()) { connection.getLogger().log(LogLevel.DEBUG, "updating {0}", sqlText); } updateStatement = connection.prepareStatement(sqlText); int i = 0; Iterator<Object> iterator = updateValues.values().iterator(); for (; iterator.hasNext(); i++) { Object o = iterator.next(); updateStatement.setObject(i + 1, o); } for (int j = 0; j < numKeys; j++, i++) { updateStatement.setObject(i + 1, primaryKeys.get(j).getValue()); } updateStatement.executeUpdate(); updateStatement.close(); updateStatement = null; updateRowBuffer(); if (RedshiftLogger.isEnable()) connection.getLogger().log(LogLevel.DEBUG, "copying data"); thisRow = rowBuffer.readOnlyCopy(); rows.set(currentRow, rowBuffer); if (RedshiftLogger.isEnable()) connection.getLogger().log(LogLevel.DEBUG, "done updates"); updateValues.clear(); doingUpdates = false; } public synchronized void updateShort(int columnIndex, short x) throws SQLException { updateValue(columnIndex, x); } public synchronized void updateString(int columnIndex, String x) throws SQLException { updateValue(columnIndex, x); } public synchronized void updateTime(int columnIndex, Time x) throws SQLException { updateValue(columnIndex, x); } public synchronized void updateTimestamp(int columnIndex, Timestamp x) throws SQLException { updateValue(columnIndex, x); } public synchronized void updateNull(String columnName) throws SQLException { updateNull(findColumn(columnName)); } public synchronized void updateBoolean(String columnName, boolean x) throws SQLException { updateBoolean(findColumn(columnName), x); } public synchronized void updateByte(String columnName, byte x) throws SQLException { updateByte(findColumn(columnName), x); } public synchronized void updateShort(String columnName, short x) throws SQLException { updateShort(findColumn(columnName), x); } public synchronized void updateInt(String columnName, int x) throws SQLException { updateInt(findColumn(columnName), x); } public synchronized void updateLong(String columnName, long x) throws SQLException { updateLong(findColumn(columnName), x); } public synchronized void updateFloat(String columnName, float x) throws SQLException { updateFloat(findColumn(columnName), x); } public synchronized void updateDouble(String columnName, double x) throws SQLException { updateDouble(findColumn(columnName), x); } public synchronized void updateBigDecimal(String columnName, BigDecimal x) throws SQLException { updateBigDecimal(findColumn(columnName), x); } public synchronized void updateString(String columnName, String x) throws SQLException { updateString(findColumn(columnName), x); } public synchronized void updateBytes(String columnName, byte[] x) throws SQLException { updateBytes(findColumn(columnName), x); } public synchronized void updateDate(String columnName, java.sql.Date x) throws SQLException { updateDate(findColumn(columnName), x); } public synchronized void updateTime(String columnName, java.sql.Time x) throws SQLException { updateTime(findColumn(columnName), x); } public synchronized void updateTimestamp(String columnName, java.sql.Timestamp x) throws SQLException { updateTimestamp(findColumn(columnName), x); } public synchronized void updateAsciiStream(String columnName, java.io.InputStream x, int length) throws SQLException { updateAsciiStream(findColumn(columnName), x, length); } public synchronized void updateBinaryStream(String columnName, java.io.InputStream x, int length) throws SQLException { updateBinaryStream(findColumn(columnName), x, length); } public synchronized void updateCharacterStream(String columnName, java.io.Reader reader, int length) throws SQLException { updateCharacterStream(findColumn(columnName), reader, length); } public synchronized void updateObject(String columnName, Object x, int scale) throws SQLException { updateObject(findColumn(columnName), x); } public synchronized void updateObject(String columnName, Object x) throws SQLException { updateObject(findColumn(columnName), x); } /** * Is this ResultSet updateable? */ boolean isUpdateable() throws SQLException { checkClosed(); if (resultsetconcurrency == ResultSet.CONCUR_READ_ONLY) { throw new RedshiftException( GT.tr("ResultSets with concurrency CONCUR_READ_ONLY cannot be updated."), RedshiftState.INVALID_CURSOR_STATE); } if (updateable) { return true; } if (RedshiftLogger.isEnable()) connection.getLogger().log(LogLevel.DEBUG, "checking if rs is updateable"); parseQuery(); if (!singleTable) { if (RedshiftLogger.isEnable()) connection.getLogger().log(LogLevel.DEBUG, "not a single table"); return false; } if (RedshiftLogger.isEnable()) connection.getLogger().log(LogLevel.DEBUG, "getting primary keys"); // // Contains the primary key? // primaryKeys = new ArrayList<PrimaryKey>(); // this is not strictly jdbc spec, but it will make things much faster if used // the user has to select oid, * from table and then we will just use oid // with oids has been removed in version 12 // FIXME: with oids does not automatically create an index, should check for primary keys first usingOID = false; int oidIndex = findColumnIndex("oid"); // 0 if not present int i = 0; int numPKcolumns = 0; // if we find the oid then just use it // oidIndex will be >0 if the oid was in the select list if (oidIndex > 0) { i++; numPKcolumns++; primaryKeys.add(new PrimaryKey(oidIndex, "oid")); usingOID = true; } else { // otherwise go and get the primary keys and create a list of keys String[] s = quotelessTableName(tableName); String quotelessTableName = s[0]; String quotelessSchemaName = s[1]; java.sql.ResultSet rs = connection.getMetaData().getPrimaryKeys("", quotelessSchemaName, quotelessTableName); while (rs.next()) { numPKcolumns++; String columnName = rs.getString(4); // get the columnName int index = findColumnIndex(columnName); if (index > 0) { i++; primaryKeys.add(new PrimaryKey(index, columnName)); // get the primary key information } } rs.close(); } if (RedshiftLogger.isEnable()) connection.getLogger().log(LogLevel.DEBUG, "no of keys={0}", i); if (i < 1) { throw new RedshiftException(GT.tr("No primary key found for table {0}.", tableName), RedshiftState.DATA_ERROR); } updateable = (i == numPKcolumns); if (RedshiftLogger.isEnable()) connection.getLogger().log(LogLevel.DEBUG, "checking primary key {0}", updateable); return updateable; } /** * Cracks out the table name and schema (if it exists) from a fully qualified table name. * * @param fullname string that we are trying to crack. Test cases: * * <pre> * * Table: table * () * * "Table": Table * () * * Schema.Table: * table (schema) * * "Schema"."Table": Table * (Schema) * * "Schema"."Dot.Table": Dot.Table * (Schema) * * Schema."Dot.Table": Dot.Table * (schema) * * </pre> * * @return String array with element zero always being the tablename and element 1 the schema name * which may be a zero length string. */ public static String[] quotelessTableName(String fullname) { String[] parts = new String[]{null, ""}; StringBuilder acc = new StringBuilder(); boolean betweenQuotes = false; for (int i = 0; i < fullname.length(); i++) { char c = fullname.charAt(i); switch (c) { case '"': if ((i < fullname.length() - 1) && (fullname.charAt(i + 1) == '"')) { // two consecutive quotes - keep one i++; acc.append(c); // keep the quote } else { // Discard it betweenQuotes = !betweenQuotes; } break; case '.': if (betweenQuotes) { // Keep it acc.append(c); } else { // Have schema name parts[1] = acc.toString(); acc = new StringBuilder(); } break; default: acc.append((betweenQuotes) ? c : Character.toLowerCase(c)); break; } } // Always put table in slot 0 parts[0] = acc.toString(); return parts; } private void parseQuery() { String sql = originalQuery.toString(null); StringTokenizer st = new StringTokenizer(sql, " \r\t\n"); boolean tableFound = false; boolean tablesChecked = false; String name = ""; singleTable = true; while (!tableFound && !tablesChecked && st.hasMoreTokens()) { name = st.nextToken(); if ("from".equalsIgnoreCase(name)) { tableName = st.nextToken(); if ("only".equalsIgnoreCase(tableName)) { tableName = st.nextToken(); onlyTable = "ONLY "; } tableFound = true; } } } private void updateRowBuffer() throws SQLException { for (Map.Entry<String, Object> entry : updateValues.entrySet()) { int columnIndex = findColumn(entry.getKey()) - 1; Object valueObject = entry.getValue(); if (valueObject instanceof RedshiftObject) { String value = ((RedshiftObject) valueObject).getValue(); rowBuffer.set(columnIndex, (value == null) ? null : connection.encodeString(value)); } else { switch (getSQLType(columnIndex + 1)) { // boolean needs to be formatted as t or f instead of true or false case Types.BIT: case Types.BOOLEAN: if (isBinary(columnIndex + 1) && valueObject != null) { byte[] val = new byte[1]; ByteConverter.bool(val, 0, ((Boolean) valueObject).booleanValue()); rowBuffer.set(columnIndex, val); } else { rowBuffer.set(columnIndex, connection .encodeString(((Boolean) valueObject).booleanValue() ? "t" : "f")); } break; // // toString() isn't enough for date and time types; we must format it correctly // or we won't be able to re-parse it. // case Types.DATE: if (isBinary(columnIndex + 1) && valueObject != null) { byte[] val = new byte[4]; TimeZone tz = null; connection.getTimestampUtils().toBinDate(tz, val, (Date) valueObject); rowBuffer.set(columnIndex, val); } else { rowBuffer.set(columnIndex, connection .encodeString( connection.getTimestampUtils().toString( getDefaultCalendar(), (Date) valueObject))); } break; case Types.TIME: rowBuffer.set(columnIndex, connection .encodeString( connection.getTimestampUtils().toString( getDefaultCalendar(), (Time) valueObject))); break; case Types.TIMESTAMP: if (isBinary(columnIndex + 1) && valueObject != null) { byte[] val = new byte[8]; connection.getTimestampUtils().toBinTimestamp(null, val, (Timestamp) valueObject); rowBuffer.set(columnIndex, val); } else { rowBuffer.set(columnIndex, connection.encodeString( connection.getTimestampUtils().toString( getDefaultCalendar(), (Timestamp) valueObject))); } break; case Types.NULL: // Should never happen? break; case Types.BINARY: case Types.LONGVARBINARY: case Types.VARBINARY: if (isBinary(columnIndex + 1)) { rowBuffer.set(columnIndex, (byte[]) valueObject); } else { try { rowBuffer.set(columnIndex, RedshiftBytea.toRSString((byte[]) valueObject).getBytes("ISO-8859-1")); } catch (UnsupportedEncodingException e) { throw new RedshiftException( GT.tr("The JVM claims not to support the encoding: {0}", "ISO-8859-1"), RedshiftState.UNEXPECTED_ERROR, e); } } break; case Types.SMALLINT: if (isBinary(columnIndex + 1) && valueObject != null) { byte[] val = new byte[2]; ByteConverter.int2(val, 0, Integer.valueOf(String.valueOf(valueObject))); rowBuffer.set(columnIndex, val); } else { // Does as default switch case rowBuffer.set(columnIndex, connection.encodeString(String.valueOf(valueObject))); } break; case Types.INTEGER: if (isBinary(columnIndex + 1) && valueObject != null) { byte[] val = new byte[4]; ByteConverter.int4(val, 0, Integer.valueOf(String.valueOf(valueObject))); rowBuffer.set(columnIndex, val); } else { // Does as default switch case rowBuffer.set(columnIndex, connection.encodeString(String.valueOf(valueObject))); } break; case Types.BIGINT: if (isBinary(columnIndex + 1) && valueObject != null) { byte[] val = new byte[8]; ByteConverter.int8(val, 0, Long.valueOf(String.valueOf(valueObject))); rowBuffer.set(columnIndex, val); } else { // Does as default switch case rowBuffer.set(columnIndex, connection.encodeString(String.valueOf(valueObject))); } break; case Types.FLOAT: if (isBinary(columnIndex + 1) && valueObject != null) { byte[] val = new byte[4]; ByteConverter.float4(val, 0, Float.parseFloat(String.valueOf(valueObject))); rowBuffer.set(columnIndex, val); } else { // Does as default switch case rowBuffer.set(columnIndex, connection.encodeString(String.valueOf(valueObject))); } case Types.DOUBLE: if (isBinary(columnIndex + 1) && valueObject != null) { byte[] val = new byte[8]; ByteConverter.float8(val, 0, Double.parseDouble(String.valueOf(valueObject))); rowBuffer.set(columnIndex, val); } else { // Does as default switch case rowBuffer.set(columnIndex, connection.encodeString(String.valueOf(valueObject))); } break; case Types.DECIMAL: case Types.NUMERIC: if (isBinary(columnIndex + 1) && valueObject != null) { Field field = fields[columnIndex]; int mod = field.getMod(); int serverPrecision; int serverScale; serverPrecision = (mod == -1) ? 0 : ((mod - 4) & 0xFFFF0000) >> 16; serverScale = (mod == -1) ? 0 : (mod - 4) & 0xFFFF; byte[] val = ByteConverter.redshiftNumeric(new BigDecimal(String.valueOf(valueObject)), serverPrecision, serverScale); rowBuffer.set(columnIndex, val); } else { // Does as default switch case rowBuffer.set(columnIndex, connection.encodeString(String.valueOf(valueObject))); } break; default: rowBuffer.set(columnIndex, connection.encodeString(String.valueOf(valueObject))); break; } } } } public class CursorResultHandler extends ResultHandlerBase { int resultsettype; public CursorResultHandler() { this(0); } public CursorResultHandler(int resultsettype) { this.resultsettype = resultsettype; } @Override public void handleResultRows(Query fromQuery, Field[] fields, List<Tuple> tuples, ResultCursor cursor, RedshiftRowsBlockingQueue<Tuple> queueTuples, int[] rowCount, Thread ringBufferThread) { RedshiftResultSet.this.rows = tuples; RedshiftResultSet.this.cursor = cursor; RedshiftResultSet.this.queueRows = queueTuples; RedshiftResultSet.this.rowCount = rowCount; RedshiftResultSet.this.ringBufferThread = ringBufferThread; } @Override public void handleCommandStatus(String status, long updateCount, long insertOID) { handleError(new RedshiftException(GT.tr("Unexpected command status: {0}.", status), RedshiftState.PROTOCOL_VIOLATION)); } @Override public void handleCompletion() throws SQLException { SQLWarning warning = getWarning(); if (warning != null) { RedshiftResultSet.this.addWarning(warning); } super.handleCompletion(); } @Override public boolean wantsScrollableResultSet() { if(resultsettype !=0 ) return resultsettype != ResultSet.TYPE_FORWARD_ONLY; else return true; // Used in isLast() method. } } public BaseStatement getRedshiftStatement() { return statement; } // // Backwards compatibility with RedshiftRefCursorResultSet // private String refCursorName; public String getRefCursor() { // Can't check this because the RedshiftRefCursorResultSet // interface doesn't allow throwing a SQLException // // checkClosed(); return refCursorName; } private void setRefCursor(String refCursorName) { this.refCursorName = refCursorName; } public void setFetchSize(int rows) throws SQLException { if (RedshiftLogger.isEnable()) connection.getLogger().logFunction(true, rows); checkClosed(); if (rows < 0) { throw new RedshiftException(GT.tr("Fetch size must be a value greater to or equal to 0."), RedshiftState.INVALID_PARAMETER_VALUE); } fetchSize = rows; } public int getFetchSize() throws SQLException { checkClosed(); return fetchSize; } @Override public boolean next() throws SQLException { checkClosed(); if (onInsertRow) { throw new RedshiftException(GT.tr("Can''t use relative move methods while on the insert row."), RedshiftState.INVALID_CURSOR_STATE); } if (queueRows != null) { currentRow = 0; try { thisRow = queueRows.take(); if (thisRow == null || thisRow.fieldCount() == 0) { // End of result // Set suspended cursor, if any if(cursor == null) cursor = queueRows.getSuspendedPortal(); // Check for any error resetBufAndCheckForAnyErrorInQueue(); // Read more rows, if portal suspended if(cursor != null && queueRows.isSuspendedPortal()) { boolean moreRows = fetchMoreInQueueFromSuspendedPortal(); if(!moreRows) return false; } // Suspended portal else return false; // End of the resultset. } else { // System.out.print("R"); } } catch (InterruptedException ie) { throw new RedshiftException(GT.tr("Interrupted exception retrieving query results."), RedshiftState.UNEXPECTED_ERROR, ie); } } else { if (currentRow + 1 >= rows.size()) { if (cursor == null || (maxRows > 0 && rowOffset + rows.size() >= maxRows)) { currentRow = rows.size(); thisRow = null; rowBuffer = null; return false; // End of the resultset. } // Ask for some more data. rowOffset += rows.size(); // We are discarding some data. int fetchRows = fetchSize; if (maxRows != 0) { if (fetchRows == 0 || rowOffset + fetchRows > maxRows) { // Fetch would exceed maxRows, limit it. fetchRows = maxRows - rowOffset; } } // Execute the fetch and update this resultset. connection.getQueryExecutor().fetch(cursor, new CursorResultHandler(), fetchRows, 0); currentRow = 0; // Test the new rows array. if (rows.isEmpty()) { thisRow = null; rowBuffer = null; return false; } } else { currentRow++; } }// !queueRows initRowBuffer(); return true; } private void resetBufAndCheckForAnyErrorInQueue() throws SQLException, InterruptedException { SQLException ex = queueRows.getHandlerException(); queueRows.addEndOfRowsIndicator(); // Keep End of result indicator for repeated next() call. rowBuffer = null; thisRow = null; if (ex != null) throw ex; } private boolean fetchMoreInQueueFromSuspendedPortal() throws SQLException { long rowCount = getRowCount(); if ((maxRows > 0 && rowCount >= maxRows)) { return false; // End of the resultset. } // Calculate fetch size based on max rows. int fetchRows = fetchSize; if (maxRows != 0) { if (fetchRows == 0 || rowCount + fetchRows > maxRows) { // Fetch would exceed maxRows, limit it. fetchRows = maxRows - (int)rowCount; } } // Update statement state, so one can cancel the result fetch. ((RedshiftStatementImpl)statement).updateStatementCancleState(StatementCancelState.IDLE, StatementCancelState.IN_QUERY); // Execute the fetch and update this resultset. connection.getQueryExecutor().fetch(cursor, new CursorResultHandler(resultsettype), fetchRows, (int)rowCount); // We should get a new queue if (queueRows != null) { currentRow = 0; try { thisRow = queueRows.take(); if (thisRow == null || thisRow.fieldCount() == 0) { // End of result // Update statement state. ((RedshiftStatementImpl)statement).updateStatementCancleState(StatementCancelState.IN_QUERY, StatementCancelState.IDLE); // Check for any error resetBufAndCheckForAnyErrorInQueue(); return false; } else return true; } catch (InterruptedException ie) { throw new RedshiftException(GT.tr("Interrupted exception retrieving query results."), RedshiftState.UNEXPECTED_ERROR, ie); } } // Do we have queue? else return false; } public void close() throws SQLException { if (RedshiftLogger.isEnable()) connection.getLogger().logFunction(true); try { closeInternally(); } finally { ((RedshiftStatementImpl) statement).checkCompletion(); } if (RedshiftLogger.isEnable()) { connection.getLogger().logFunction(false); connection.getLogger().flush(); } } /* used by PgStatement.closeForNextExecution to avoid closing the firstUnclosedResult twice. checkCompletion above modifies firstUnclosedResult fixes issue #684 */ protected void closeInternally() throws SQLException { // release resources held (memory for tuples) rows = null; // Close ring buffer thread associated with this result, if any. connection.getQueryExecutor().closeRingBufferThread(queueRows, ringBufferThread); // release resources held (memory for queue) queueRows = null; rowCount = null; if (cursor != null) { cursor.close(); cursor = null; } } public boolean wasNull() throws SQLException { if (RedshiftLogger.isEnable()) connection.getLogger().logFunction(true); checkClosed(); return wasNullFlag; } private boolean isCharType(int columnIndex) throws SQLException { int colType = getSQLType(columnIndex); return (colType == Types.VARCHAR || colType == Types.CHAR || colType == Types.LONGVARCHAR || colType == Types.NVARCHAR || colType == Types.NCHAR || colType == Types.LONGNVARCHAR || colType == Types.REF_CURSOR || (colType == Types.OTHER && !isInterval(columnIndex) && !isIntervalYearToMonth(columnIndex) && !isIntervalDayToSecond(columnIndex))); } @Override public String getString(int columnIndex) throws SQLException { if (RedshiftLogger.isEnable()) connection.getLogger().log(LogLevel.DEBUG, " getString columnIndex: {0}", columnIndex); checkResultSet(columnIndex); if (wasNullFlag) { return null; } // varchar in binary is same as text, other binary fields are converted to their text format if (isBinary(columnIndex) && !isCharType(columnIndex) && !isGeometry(columnIndex) && !isGeometryHex(columnIndex)) { Field field = fields[columnIndex - 1]; Object obj = internalGetObject(columnIndex, field); if (obj == null) { // internalGetObject() knows jdbc-types and some extra like hstore. It does not know of // RedshiftObject based types like geometric types but getObject does obj = getObject(columnIndex); if (obj == null) { return null; } return obj.toString(); } // hack to be compatible with text protocol if (obj instanceof java.util.Date) { int oid = field.getOID(); return connection.getTimestampUtils().timeToString((java.util.Date) obj, oid == Oid.TIMESTAMPTZ || oid == Oid.TIMETZ); } if (obj instanceof RedshiftIntervalYearToMonth) { RedshiftIntervalYearToMonth ym = (RedshiftIntervalYearToMonth) obj; return ym.getValue(); } if (obj instanceof RedshiftIntervalDayToSecond) { RedshiftIntervalDayToSecond ds = (RedshiftIntervalDayToSecond) obj; return ds.getValue(); } if ("hstore".equals(getRSType(columnIndex))) { return HStoreConverter.toString((Map<?, ?>) obj); } else if(isVarbyte(columnIndex)) { // Convert raw binary to HEX return trimString(columnIndex, RedshiftVarbyte.convertToString((byte[])obj)); } // VARBYTE else if(isGeography(columnIndex)) { // Convert raw binary to HEX return trimString(columnIndex, RedshiftGeography.convertToString((byte[])obj)); } // GEOGRAPHY return trimString(columnIndex, obj.toString()); } if (isGeometry(columnIndex) || isGeometryHex(columnIndex)) { Field field = fields[columnIndex - 1]; Object obj = internalGetObject(columnIndex, field); byte[] colData; if (obj == null) { // internalGetObject() knows jdbc-types and some extra like hstore. It does not know of // RedshiftObject based types like geometric types but getObject does obj = getObject(columnIndex); if (obj == null) { return null; } } if(isGeometry(columnIndex)) colData = (byte[])obj; else { byte[] ewktfData = (byte[])obj; colData = RedshiftGeometry.transformEWKTFormat(ewktfData, 0, ewktfData.length); } return RedshiftGeometry.convertToString(colData); } // Geometry else { Encoding encoding = connection.getEncoding(); try { String rc = trimString(columnIndex, encoding.decode(thisRow.get(columnIndex - 1))); if (fields[columnIndex - 1].getOID() == Oid.FLOAT8) { // Convert values like 20.19999999 to 20.2 Double val = toDouble(rc); return val.toString(); } else return rc; } catch (IOException ioe) { throw new RedshiftException( GT.tr( "Invalid character data was found. This is most likely caused by stored data containing characters that are invalid for the character set the database was created in. The most common example of this is storing 8bit data in a SQL_ASCII database."), RedshiftState.DATA_ERROR, ioe); } } } /** * <p>Retrieves the value of the designated column in the current row of this <code>ResultSet</code> * object as a <code>boolean</code> in the Java programming language.</p> * * <p>If the designated column has a Character datatype and is one of the following values: "1", * "true", "t", "yes", "y" or "on", a value of <code>true</code> is returned. If the designated * column has a Character datatype and is one of the following values: "0", "false", "f", "no", * "n" or "off", a value of <code>false</code> is returned. Leading or trailing whitespace is * ignored, and case does not matter.</p> * * <p>If the designated column has a Numeric datatype and is a 1, a value of <code>true</code> is * returned. If the designated column has a Numeric datatype and is a 0, a value of * <code>false</code> is returned.</p> * * @param columnIndex the first column is 1, the second is 2, ... * @return the column value; if the value is SQL <code>NULL</code>, the value returned is * <code>false</code> * @exception SQLException if the columnIndex is not valid; if a database access error occurs; if * this method is called on a closed result set or is an invalid cast to boolean type. * @see <a href="https://www.postgresql.org/docs/current/static/datatype-boolean.html">PostgreSQL * Boolean Type</a> */ @Override public boolean getBoolean(int columnIndex) throws SQLException { if (RedshiftLogger.isEnable()) connection.getLogger().log(LogLevel.DEBUG, " getBoolean columnIndex: {0}", columnIndex); checkResultSet(columnIndex); if (wasNullFlag) { return false; // SQL NULL } int col = columnIndex - 1; if (Oid.BOOL == fields[col].getOID() || Oid.BIT == fields[col].getOID()) { final byte[] v = thisRow.get(col); if (isBinary(columnIndex)) { return (1 == v.length) && (1 == v[0]); } else { return (1 == v.length) && (116 == v[0] // 116 = 't' || 1 == v[0] || '1' == v[0]); } } if (isBinary(columnIndex)) { try { return BooleanTypeUtil.castToBoolean(readDoubleValue(thisRow.get(col), fields[col].getOID(), "boolean", columnIndex)); } catch (RedshiftException ex) { // Try using getObject. The readDoubleValue() call fails, when a column type is VARCHAR. return BooleanTypeUtil.castToBoolean(getObject(columnIndex)); } } return BooleanTypeUtil.castToBoolean(getObject(columnIndex)); } private static final BigInteger BYTEMAX = new BigInteger(Byte.toString(Byte.MAX_VALUE)); private static final BigInteger BYTEMIN = new BigInteger(Byte.toString(Byte.MIN_VALUE)); @Override public byte getByte(int columnIndex) throws SQLException { if (RedshiftLogger.isEnable()) connection.getLogger().log(LogLevel.DEBUG, " getByte columnIndex: {0}", columnIndex); checkResultSet(columnIndex); if (wasNullFlag) { return 0; // SQL NULL } if (isBinary(columnIndex)) { int col = columnIndex - 1; // there is no Oid for byte so must always do conversion from // some other numeric type return (byte) readLongValue(thisRow.get(col), fields[col].getOID(), Byte.MIN_VALUE, Byte.MAX_VALUE, "byte", columnIndex); } String s = getString(columnIndex); if (s != null) { s = s.trim(); if (s.isEmpty()) { return 0; } try { // try the optimal parse return Byte.parseByte(s); } catch (NumberFormatException e) { // didn't work, assume the column is not a byte try { BigDecimal n = new BigDecimal(s); BigInteger i = n.toBigInteger(); int gt = i.compareTo(BYTEMAX); int lt = i.compareTo(BYTEMIN); if (gt > 0 || lt < 0) { throw new RedshiftException(GT.tr("Bad value for type {0} : {1}", "byte", s), RedshiftState.NUMERIC_VALUE_OUT_OF_RANGE); } return i.byteValue(); } catch (NumberFormatException ex) { throw new RedshiftException(GT.tr("Bad value for type {0} : {1}", "byte", s), RedshiftState.NUMERIC_VALUE_OUT_OF_RANGE); } } } return 0; // SQL NULL } @Override public short getShort(int columnIndex) throws SQLException { if (RedshiftLogger.isEnable()) connection.getLogger().log(LogLevel.DEBUG, " getShort columnIndex: {0}", columnIndex); checkResultSet(columnIndex); if (wasNullFlag) { return 0; // SQL NULL } if (isBinary(columnIndex)) { int col = columnIndex - 1; int oid = fields[col].getOID(); if (oid == Oid.INT2) { return ByteConverter.int2(thisRow.get(col), 0); } return (short) readLongValue(thisRow.get(col), oid, Short.MIN_VALUE, Short.MAX_VALUE, "short", columnIndex); } return toShort(getFixedString(columnIndex)); } @Override public int getInt(int columnIndex) throws SQLException { if (RedshiftLogger.isEnable()) connection.getLogger().log(LogLevel.DEBUG, " getInt columnIndex: {0}", columnIndex); checkResultSet(columnIndex); if (wasNullFlag) { return 0; // SQL NULL } if (isBinary(columnIndex)) { int col = columnIndex - 1; int oid = fields[col].getOID(); if (oid == Oid.INT4) { return ByteConverter.int4(thisRow.get(col), 0); } return (int) readLongValue(thisRow.get(col), oid, Integer.MIN_VALUE, Integer.MAX_VALUE, "int", columnIndex); } Encoding encoding = connection.getEncoding(); if (encoding.hasAsciiNumbers()) { try { return getFastInt(columnIndex); } catch (NumberFormatException ex) { } } return toInt(getFixedString(columnIndex)); } @Override public long getLong(int columnIndex) throws SQLException { if (RedshiftLogger.isEnable()) connection.getLogger().log(LogLevel.DEBUG, " getLong columnIndex: {0}", columnIndex); checkResultSet(columnIndex); if (wasNullFlag) { return 0; // SQL NULL } if (isBinary(columnIndex)) { int col = columnIndex - 1; int oid = fields[col].getOID(); if (oid == Oid.INT8) { return ByteConverter.int8(thisRow.get(col), 0); } return readLongValue(thisRow.get(col), oid, Long.MIN_VALUE, Long.MAX_VALUE, "long", columnIndex); } Encoding encoding = connection.getEncoding(); if (encoding.hasAsciiNumbers()) { try { return getFastLong(columnIndex); } catch (NumberFormatException ex) { } } return toLong(getFixedString(columnIndex)); } /** * A dummy exception thrown when fast byte[] to number parsing fails and no value can be returned. * The exact stack trace does not matter because the exception is always caught and is not visible * to users. */ private static final NumberFormatException FAST_NUMBER_FAILED = new NumberFormatException() { // Override fillInStackTrace to prevent memory leak via Throwable.backtrace hidden field // The field is not observable via reflection, however when throwable contains stacktrace, it // does // hold strong references to user objects (e.g. classes -> classloaders), thus it might lead to // OutOfMemory conditions. @Override public synchronized Throwable fillInStackTrace() { return this; } }; /** * Optimised byte[] to number parser. This code does not handle null values, so the caller must do * checkResultSet and handle null values prior to calling this function. * * @param columnIndex The column to parse. * @return The parsed number. * @throws SQLException If an error occurs while fetching column. * @throws NumberFormatException If the number is invalid or the out of range for fast parsing. * The value must then be parsed by {@link #toLong(String)}. */ private long getFastLong(int columnIndex) throws SQLException, NumberFormatException { byte[] bytes = thisRow.get(columnIndex - 1); if (bytes.length == 0) { throw FAST_NUMBER_FAILED; } long val = 0; int start; boolean neg; if (bytes[0] == '-') { neg = true; start = 1; if (bytes.length == 1 || bytes.length > 19) { throw FAST_NUMBER_FAILED; } } else { start = 0; neg = false; if (bytes.length > 18) { throw FAST_NUMBER_FAILED; } } while (start < bytes.length) { byte b = bytes[start++]; if (b < '0' || b > '9') { throw FAST_NUMBER_FAILED; } val *= 10; val += b - '0'; } if (neg) { val = -val; } return val; } /** * Optimised byte[] to number parser. This code does not handle null values, so the caller must do * checkResultSet and handle null values prior to calling this function. * * @param columnIndex The column to parse. * @return The parsed number. * @throws SQLException If an error occurs while fetching column. * @throws NumberFormatException If the number is invalid or the out of range for fast parsing. * The value must then be parsed by {@link #toInt(String)}. */ private int getFastInt(int columnIndex) throws SQLException, NumberFormatException { byte[] bytes = thisRow.get(columnIndex - 1); if (bytes.length == 0) { throw FAST_NUMBER_FAILED; } int val = 0; int start; boolean neg; if (bytes[0] == '-') { neg = true; start = 1; if (bytes.length == 1 || bytes.length > 10) { throw FAST_NUMBER_FAILED; } } else { start = 0; neg = false; if (bytes.length > 9) { throw FAST_NUMBER_FAILED; } } while (start < bytes.length) { byte b = bytes[start++]; if (b < '0' || b > '9') { throw FAST_NUMBER_FAILED; } val *= 10; val += b - '0'; } if (neg) { val = -val; } return val; } /** * Optimised byte[] to number parser. This code does not handle null values, so the caller must do * checkResultSet and handle null values prior to calling this function. * * @param columnIndex The column to parse. * @return The parsed number. * @throws SQLException If an error occurs while fetching column. * @throws NumberFormatException If the number is invalid or the out of range for fast parsing. * The value must then be parsed by {@link #toBigDecimal(String, int)}. */ private BigDecimal getFastBigDecimal(int columnIndex) throws SQLException, NumberFormatException { byte[] bytes = thisRow.get(columnIndex - 1); if (bytes.length == 0) { throw FAST_NUMBER_FAILED; } int scale = 0; long val = 0; int start; boolean neg; if (bytes[0] == '-') { neg = true; start = 1; if (bytes.length == 1 || bytes.length > 19) { throw FAST_NUMBER_FAILED; } } else { start = 0; neg = false; if (bytes.length > 18) { throw FAST_NUMBER_FAILED; } } int periodsSeen = 0; while (start < bytes.length) { byte b = bytes[start++]; if (b < '0' || b > '9') { if (b == '.') { scale = bytes.length - start; periodsSeen++; continue; } else { throw FAST_NUMBER_FAILED; } } val *= 10; val += b - '0'; } int numNonSignChars = neg ? bytes.length - 1 : bytes.length; if (periodsSeen > 1 || periodsSeen == numNonSignChars) { throw FAST_NUMBER_FAILED; } if (neg) { val = -val; } return BigDecimal.valueOf(val, scale); } @Override public float getFloat(int columnIndex) throws SQLException { if (RedshiftLogger.isEnable()) connection.getLogger().log(LogLevel.DEBUG, " getFloat columnIndex: {0}", columnIndex); checkResultSet(columnIndex); if (wasNullFlag) { return 0; // SQL NULL } if (isBinary(columnIndex)) { int col = columnIndex - 1; int oid = fields[col].getOID(); if (oid == Oid.FLOAT4) { return ByteConverter.float4(thisRow.get(col), 0); } return (float) readDoubleValue(thisRow.get(col), oid, "float", columnIndex); } return toFloat(getFixedString(columnIndex)); } @Override public double getDouble(int columnIndex) throws SQLException { if (RedshiftLogger.isEnable()) connection.getLogger().log(LogLevel.DEBUG, " getDouble columnIndex: {0}", columnIndex); checkResultSet(columnIndex); if (wasNullFlag) { return 0; // SQL NULL } if (isBinary(columnIndex)) { int col = columnIndex - 1; int oid = fields[col].getOID(); if (oid == Oid.FLOAT8) { return ByteConverter.float8(thisRow.get(col), 0); } return readDoubleValue(thisRow.get(col), oid, "double", columnIndex); } return toDouble(getFixedString(columnIndex)); } public BigDecimal getBigDecimal(int columnIndex, int scale) throws SQLException { if (RedshiftLogger.isEnable()) connection.getLogger().log(LogLevel.DEBUG, " getBigDecimal columnIndex: {0}", columnIndex); return (BigDecimal) getNumeric(columnIndex, scale, false); } private Number getRedshiftNumeric(int columnIndex) { Field field = fields[columnIndex - 1]; int mod = field.getMod(); int serverPrecision; int serverScale; serverPrecision = (mod == -1) ? 0 : ((mod - 4) & 0xFFFF0000) >> 16; serverScale = (mod == -1) ? 0 : (mod - 4) & 0xFFFF; return ByteConverter.redshiftNumeric(thisRow.get(columnIndex - 1), serverPrecision, serverScale); } private Number getNumeric(int columnIndex, int scale, boolean allowNaN) throws SQLException { checkResultSet(columnIndex); if (wasNullFlag) { return null; } if (isBinary(columnIndex)) { int sqlType = getSQLType(columnIndex); if (sqlType != Types.NUMERIC && sqlType != Types.DECIMAL) { Object obj = internalGetObject(columnIndex, fields[columnIndex - 1]); if (obj == null) { return null; } if (obj instanceof Long || obj instanceof Integer || obj instanceof Byte) { BigDecimal res = BigDecimal.valueOf(((Number) obj).longValue()); res = scaleBigDecimal(res, scale); return res; } return toBigDecimal(trimMoney(String.valueOf(obj)), scale); } else { // Number num = ByteConverter.numeric(thisRow.get(columnIndex - 1)); Number num = getRedshiftNumeric(columnIndex); if (allowNaN && Double.isNaN(num.doubleValue())) { return Double.NaN; } return num; } } Encoding encoding = connection.getEncoding(); if (encoding.hasAsciiNumbers()) { try { BigDecimal res = getFastBigDecimal(columnIndex); res = scaleBigDecimal(res, scale); return res; } catch (NumberFormatException ignore) { } } String stringValue = getFixedString(columnIndex); if (allowNaN && "NaN".equalsIgnoreCase(stringValue)) { return Double.NaN; } return toBigDecimal(stringValue, scale); } /** * {@inheritDoc} * * <p>In normal use, the bytes represent the raw values returned by the backend. However, if the * column is an OID, then it is assumed to refer to a Large Object, and that object is returned as * a byte array.</p> * * <p><b>Be warned</b> If the large object is huge, then you may run out of memory.</p> */ @Override public byte[] getBytes(int columnIndex) throws SQLException { if (RedshiftLogger.isEnable()) connection.getLogger().log(LogLevel.DEBUG, " getBytes columnIndex: {0}", columnIndex); checkResultSet(columnIndex); if (wasNullFlag) { return null; } if (isBinary(columnIndex)) { // If the data is already binary then just return it return trimBytes(columnIndex, thisRow.get(columnIndex - 1)); } if (fields[columnIndex - 1].getOID() == Oid.BYTEA) { return trimBytes(columnIndex, RedshiftBytea.toBytes(thisRow.get(columnIndex - 1))); } else if (fields[columnIndex - 1].getOID() == Oid.VARBYTE) { return trimBytes(columnIndex, RedshiftVarbyte.toBytes(thisRow.get(columnIndex - 1))); } else if (fields[columnIndex - 1].getOID() == Oid.GEOGRAPHY) { return trimBytes(columnIndex, RedshiftGeography.toBytes(thisRow.get(columnIndex - 1))); } else { return trimBytes(columnIndex, thisRow.get(columnIndex - 1)); } } public java.sql.Date getDate(int columnIndex) throws SQLException { if (RedshiftLogger.isEnable()) connection.getLogger().log(LogLevel.DEBUG, " getDate columnIndex: {0}", columnIndex); return getDate(columnIndex, null); } public Time getTime(int columnIndex) throws SQLException { if (RedshiftLogger.isEnable()) connection.getLogger().log(LogLevel.DEBUG, " getTime columnIndex: {0}", columnIndex); return getTime(columnIndex, null); } public Timestamp getTimestamp(int columnIndex) throws SQLException { if (RedshiftLogger.isEnable()) connection.getLogger().log(LogLevel.DEBUG, " getTimestamp columnIndex: {0}", columnIndex); return getTimestamp(columnIndex, null); } public RedshiftIntervalYearToMonth getIntervalYearToMonth(int columnIndex) throws SQLException{ if (RedshiftLogger.isEnable()) connection.getLogger().log(LogLevel.DEBUG, " getIntervalYearToMonth columnIndex: {0}", columnIndex); checkResultSet(columnIndex); if (wasNullFlag) { return null; } if (isBinary(columnIndex)) { return new RedshiftIntervalYearToMonth(ByteConverter.int4(thisRow.get(columnIndex - 1), 0)); } String str = getString(columnIndex); return new RedshiftIntervalYearToMonth(getString(columnIndex)); } public RedshiftIntervalDayToSecond getIntervalDayToSecond(int columnIndex) throws SQLException{ if (RedshiftLogger.isEnable()) connection.getLogger().log(LogLevel.DEBUG, " getIntervalDayToSecond columnIndex: {0}", columnIndex); checkResultSet(columnIndex); if (wasNullFlag) { return null; } if (isBinary(columnIndex)) { return new RedshiftIntervalDayToSecond(ByteConverter.int8(thisRow.get(columnIndex - 1), 0)); } String str = getString(columnIndex); return new RedshiftIntervalDayToSecond(getString(columnIndex)); } public InputStream getAsciiStream(int columnIndex) throws SQLException { if (RedshiftLogger.isEnable()) connection.getLogger().log(LogLevel.DEBUG, " getAsciiStream columnIndex: {0}", columnIndex); checkResultSet(columnIndex); if (wasNullFlag) { return null; } // Version 7.2 supports AsciiStream for all the RS text types // As the spec/javadoc for this method indicate this is to be used for // large text values (i.e. LONGVARCHAR) RS doesn't have a separate // long string datatype, but with toast the text datatype is capable of // handling very large values. Thus the implementation ends up calling // getString() since there is no current way to stream the value from the server try { return new ByteArrayInputStream(getString(columnIndex).getBytes("ASCII")); } catch (UnsupportedEncodingException l_uee) { throw new RedshiftException(GT.tr("The JVM claims not to support the encoding: {0}", "ASCII"), RedshiftState.UNEXPECTED_ERROR, l_uee); } } public InputStream getUnicodeStream(int columnIndex) throws SQLException { if (RedshiftLogger.isEnable()) connection.getLogger().log(LogLevel.DEBUG, " getUnicodeStream columnIndex: {0}", columnIndex); checkResultSet(columnIndex); if (wasNullFlag) { return null; } // Version 7.2 supports AsciiStream for all the RS text types // As the spec/javadoc for this method indicate this is to be used for // large text values (i.e. LONGVARCHAR) RS doesn't have a separate // long string datatype, but with toast the text datatype is capable of // handling very large values. Thus the implementation ends up calling // getString() since there is no current way to stream the value from the server try { return new ByteArrayInputStream(getString(columnIndex).getBytes("UTF-8")); } catch (UnsupportedEncodingException l_uee) { throw new RedshiftException(GT.tr("The JVM claims not to support the encoding: {0}", "UTF-8"), RedshiftState.UNEXPECTED_ERROR, l_uee); } } public InputStream getBinaryStream(int columnIndex) throws SQLException { if (RedshiftLogger.isEnable()) connection.getLogger().log(LogLevel.DEBUG, " getBinaryStream columnIndex: {0}", columnIndex); checkResultSet(columnIndex); if (wasNullFlag) { return null; } // Version 7.2 supports BinaryStream for all RS bytea type // As the spec/javadoc for this method indicate this is to be used for // large binary values (i.e. LONGVARBINARY) RS doesn't have a separate // long binary datatype, but with toast the bytea datatype is capable of // handling very large values. Thus the implementation ends up calling // getBytes() since there is no current way to stream the value from the server byte[] b = getBytes(columnIndex); if (b != null) { return new ByteArrayInputStream(b); } return null; } public String getString(String columnName) throws SQLException { return getString(findColumn(columnName)); } @Override public boolean getBoolean(String columnName) throws SQLException { return getBoolean(findColumn(columnName)); } public byte getByte(String columnName) throws SQLException { return getByte(findColumn(columnName)); } public short getShort(String columnName) throws SQLException { return getShort(findColumn(columnName)); } public int getInt(String columnName) throws SQLException { return getInt(findColumn(columnName)); } public long getLong(String columnName) throws SQLException { return getLong(findColumn(columnName)); } public float getFloat(String columnName) throws SQLException { return getFloat(findColumn(columnName)); } public double getDouble(String columnName) throws SQLException { return getDouble(findColumn(columnName)); } public BigDecimal getBigDecimal(String columnName, int scale) throws SQLException { return getBigDecimal(findColumn(columnName), scale); } public byte[] getBytes(String columnName) throws SQLException { return getBytes(findColumn(columnName)); } public java.sql.Date getDate(String columnName) throws SQLException { return getDate(findColumn(columnName), null); } public Time getTime(String columnName) throws SQLException { return getTime(findColumn(columnName), null); } public Timestamp getTimestamp(String columnName) throws SQLException { return getTimestamp(findColumn(columnName), null); } public RedshiftIntervalYearToMonth getIntervalYearToMonth(String columnName) throws SQLException { return getIntervalYearToMonth(findColumn(columnName)); } public RedshiftIntervalDayToSecond getIntervalDayToSecond(String columnName) throws SQLException { return getIntervalDayToSecond(findColumn(columnName)); } public InputStream getAsciiStream(String columnName) throws SQLException { return getAsciiStream(findColumn(columnName)); } public InputStream getUnicodeStream(String columnName) throws SQLException { return getUnicodeStream(findColumn(columnName)); } public InputStream getBinaryStream(String columnName) throws SQLException { return getBinaryStream(findColumn(columnName)); } public SQLWarning getWarnings() throws SQLException { checkClosed(); return warnings; } public void clearWarnings() throws SQLException { checkClosed(); warnings = null; } protected void addWarning(SQLWarning warnings) { if (this.warnings != null) { this.warnings.setNextWarning(warnings); } else { this.warnings = warnings; } } public String getCursorName() throws SQLException { checkClosed(); return null; } @Override public Object getObject(int columnIndex) throws SQLException { if (RedshiftLogger.isEnable()) connection.getLogger().log(LogLevel.DEBUG, " getObject columnIndex: {0}", columnIndex); Field field; checkResultSet(columnIndex); if (wasNullFlag) { return null; } field = fields[columnIndex - 1]; // some fields can be null, mainly from those returned by MetaData methods if (field == null) { wasNullFlag = true; return null; } Object result = internalGetObject(columnIndex, field); if (result != null) { return result; } if (isBinary(columnIndex)) { return connection.getObject(getRSType(columnIndex), null, thisRow.get(columnIndex - 1)); } return connection.getObject(getRSType(columnIndex), getString(columnIndex), null); } public Object getObject(String columnName) throws SQLException { return getObject(findColumn(columnName)); } public int findColumn(String columnName) throws SQLException { checkClosed(); int col = findColumnIndex(columnName); if (col == 0) { throw new RedshiftException( GT.tr("The column name {0} was not found in this ResultSet.", columnName), RedshiftState.UNDEFINED_COLUMN); } return col; } public static Map<String, Integer> createColumnNameIndexMap(Field[] fields, boolean isSanitiserDisabled) { Map<String, Integer> columnNameIndexMap = new HashMap<String, Integer>(fields.length * 2); // The JDBC spec says when you have duplicate columns names, // the first one should be returned. So load the map in // reverse order so the first ones will overwrite later ones. for (int i = fields.length - 1; i >= 0; i--) { String columnLabel = fields[i].getColumnLabel(); if (isSanitiserDisabled) { columnNameIndexMap.put(columnLabel, i + 1); } else { columnNameIndexMap.put(columnLabel.toLowerCase(Locale.US), i + 1); } } return columnNameIndexMap; } private int findColumnIndex(String columnName) { if (columnNameIndexMap == null) { if (originalQuery != null) { columnNameIndexMap = originalQuery.getResultSetColumnNameIndexMap(); } if (columnNameIndexMap == null) { columnNameIndexMap = createColumnNameIndexMap(fields, connection.isColumnSanitiserDisabled()); } } Integer index = columnNameIndexMap.get(columnName); if (index != null) { return index; } index = columnNameIndexMap.get(columnName.toLowerCase(Locale.US)); if (index != null) { columnNameIndexMap.put(columnName, index); return index; } index = columnNameIndexMap.get(columnName.toUpperCase(Locale.US)); if (index != null) { columnNameIndexMap.put(columnName, index); return index; } return 0; } /** * Returns the OID of a field. It is used internally by the driver. * * @param field field index * @return OID of a field */ public int getColumnOID(int field) { return fields[field - 1].getOID(); } /** * <p>This is used to fix get*() methods on Money fields. It should only be used by those methods!</p> * * <p>It converts ($##.##) to -##.## and $##.## to ##.##</p> * * @param col column position (1-based) * @return numeric-parsable representation of money string literal * @throws SQLException if something wrong happens */ public String getFixedString(int col) throws SQLException { return trimMoney(getString(col)); } private String trimMoney(String s) { if (s == null) { return null; } // if we don't have at least 2 characters it can't be money. if (s.length() < 2) { return s; } // Handle Money char ch = s.charAt(0); // optimise for non-money type: return immediately with one check // if the first char cannot be '(', '$' or '-' if (ch > '-') { return s; } if (ch == '(') { s = "-" + RedshiftTokenizer.removePara(s).substring(1); } else if (ch == '$') { s = s.substring(1); } else if (ch == '-' && s.charAt(1) == '$') { s = "-" + s.substring(2); } return s; } protected String getRSType(int column) throws SQLException { Field field = fields[column - 1]; initSqlType(field); return field.getRSType(); } protected int getSQLType(int column) throws SQLException { Field field = fields[column - 1]; initSqlType(field); return field.getSQLType(); } private void initSqlType(Field field) throws SQLException { if (field.isTypeInitialized()) { return; } TypeInfo typeInfo = connection.getTypeInfo(); int oid = field.getOID(); String pgType = typeInfo.getRSType(oid); int sqlType = typeInfo.getSQLType(pgType); field.setSQLType(sqlType); field.setRSType(pgType); } private void checkUpdateable() throws SQLException { checkClosed(); if (!isUpdateable()) { throw new RedshiftException( GT.tr( "ResultSet is not updateable. The query that generated this result set must select only one table, and must select all primary keys from that table. See the JDBC 2.1 API Specification, section 5.6 for more details."), RedshiftState.INVALID_CURSOR_STATE); } if (updateValues == null) { // allow every column to be updated without a rehash. updateValues = new HashMap<String, Object>((int) (fields.length / 0.75), 0.75f); } } protected void checkClosed() throws SQLException { if (rows == null && queueRows == null) { throw new RedshiftException(GT.tr("This ResultSet is closed."), RedshiftState.OBJECT_NOT_IN_STATE); } } /* * for jdbc3 to call internally */ protected boolean isResultSetClosed() { return rows == null && queueRows == null; } protected void checkColumnIndex(int column) throws SQLException { if (column < 1 || column > fields.length) { throw new RedshiftException( GT.tr("The column index is out of range: {0}, number of columns: {1}.", column, fields.length), RedshiftState.INVALID_PARAMETER_VALUE); } } /** * Checks that the result set is not closed, it's positioned on a valid row and that the given * column number is valid. Also updates the {@link #wasNullFlag} to correct value. * * @param column The column number to check. Range starts from 1. * @throws SQLException If state or column is invalid. */ protected void checkResultSet(int column) throws SQLException { checkClosed(); if (thisRow == null) { throw new RedshiftException( GT.tr("ResultSet not positioned properly, perhaps you need to call next."), RedshiftState.INVALID_CURSOR_STATE); } checkColumnIndex(column); wasNullFlag = (thisRow.get(column - 1) == null); } /** * Returns true if the value of the given column is in binary format. * * @param column The column to check. Range starts from 1. * @return True if the column is in binary format. */ protected boolean isBinary(int column) { return fields[column - 1].getFormat() == Field.BINARY_FORMAT; } protected boolean isGeometry(int column) { return (fields[column - 1].getOID() == Oid.GEOMETRY); } protected boolean isGeometryHex(int column) { return (fields[column - 1].getOID() == Oid.GEOMETRYHEX); } protected boolean isSuper(int column) { return (fields[column - 1].getOID() == Oid.SUPER); } protected boolean isVarbyte(int column) { return (fields[column - 1].getOID() == Oid.VARBYTE); } protected boolean isGeography(int column) { return (fields[column - 1].getOID() == Oid.GEOGRAPHY); } protected boolean isInterval(int column) { return (fields[column - 1].getOID() == Oid.INTERVAL); } protected boolean isIntervalYearToMonth(int column) { return (fields[column - 1].getOID() == Oid.INTERVALY2M); } protected boolean isIntervalDayToSecond(int column) { return (fields[column - 1].getOID() == Oid.INTERVALD2S); } // ----------------- Formatting Methods ------------------- private static final BigInteger SHORTMAX = new BigInteger(Short.toString(Short.MAX_VALUE)); private static final BigInteger SHORTMIN = new BigInteger(Short.toString(Short.MIN_VALUE)); public static short toShort(String s) throws SQLException { if (s != null) { try { s = s.trim(); return Short.parseShort(s); } catch (NumberFormatException e) { try { BigDecimal n = new BigDecimal(s); BigInteger i = n.toBigInteger(); int gt = i.compareTo(SHORTMAX); int lt = i.compareTo(SHORTMIN); if (gt > 0 || lt < 0) { throw new RedshiftException(GT.tr("Bad value for type {0} : {1}", "short", s), RedshiftState.NUMERIC_VALUE_OUT_OF_RANGE); } return i.shortValue(); } catch (NumberFormatException ne) { throw new RedshiftException(GT.tr("Bad value for type {0} : {1}", "short", s), RedshiftState.NUMERIC_VALUE_OUT_OF_RANGE); } } } return 0; // SQL NULL } private static final BigInteger INTMAX = new BigInteger(Integer.toString(Integer.MAX_VALUE)); private static final BigInteger INTMIN = new BigInteger(Integer.toString(Integer.MIN_VALUE)); public static int toInt(String s) throws SQLException { if (s != null) { try { s = s.trim(); return Integer.parseInt(s); } catch (NumberFormatException e) { try { BigDecimal n = new BigDecimal(s); BigInteger i = n.toBigInteger(); int gt = i.compareTo(INTMAX); int lt = i.compareTo(INTMIN); if (gt > 0 || lt < 0) { throw new RedshiftException(GT.tr("Bad value for type {0} : {1}", "int", s), RedshiftState.NUMERIC_VALUE_OUT_OF_RANGE); } return i.intValue(); } catch (NumberFormatException ne) { throw new RedshiftException(GT.tr("Bad value for type {0} : {1}", "int", s), RedshiftState.NUMERIC_VALUE_OUT_OF_RANGE); } } } return 0; // SQL NULL } private static final BigInteger LONGMAX = new BigInteger(Long.toString(Long.MAX_VALUE)); private static final BigInteger LONGMIN = new BigInteger(Long.toString(Long.MIN_VALUE)); public static long toLong(String s) throws SQLException { if (s != null) { try { s = s.trim(); return Long.parseLong(s); } catch (NumberFormatException e) { try { BigDecimal n = new BigDecimal(s); BigInteger i = n.toBigInteger(); int gt = i.compareTo(LONGMAX); int lt = i.compareTo(LONGMIN); if (gt > 0 || lt < 0) { throw new RedshiftException(GT.tr("Bad value for type {0} : {1}", "long", s), RedshiftState.NUMERIC_VALUE_OUT_OF_RANGE); } return i.longValue(); } catch (NumberFormatException ne) { throw new RedshiftException(GT.tr("Bad value for type {0} : {1}", "long", s), RedshiftState.NUMERIC_VALUE_OUT_OF_RANGE); } } } return 0; // SQL NULL } public static BigDecimal toBigDecimal(String s) throws SQLException { if (s == null) { return null; } try { s = s.trim(); return new BigDecimal(s); } catch (NumberFormatException e) { throw new RedshiftException(GT.tr("Bad value for type {0} : {1}", "BigDecimal", s), RedshiftState.NUMERIC_VALUE_OUT_OF_RANGE); } } public BigDecimal toBigDecimal(String s, int scale) throws SQLException { if (s == null) { return null; } BigDecimal val = toBigDecimal(s); return scaleBigDecimal(val, scale); } private BigDecimal scaleBigDecimal(BigDecimal val, int scale) throws RedshiftException { if (scale == -1) { return val; } try { return val.setScale(scale); } catch (ArithmeticException e) { throw new RedshiftException( GT.tr("Bad value for type {0} : {1}", "BigDecimal", val), RedshiftState.NUMERIC_VALUE_OUT_OF_RANGE); } } public static float toFloat(String s) throws SQLException { if (s != null) { try { s = s.trim(); return Float.parseFloat(s); } catch (NumberFormatException e) { throw new RedshiftException(GT.tr("Bad value for type {0} : {1}", "float", s), RedshiftState.NUMERIC_VALUE_OUT_OF_RANGE); } } return 0; // SQL NULL } public static double toDouble(String s) throws SQLException { if (s != null) { try { s = s.trim(); return Double.parseDouble(s); } catch (NumberFormatException e) { throw new RedshiftException(GT.tr("Bad value for type {0} : {1}", "double", s), RedshiftState.NUMERIC_VALUE_OUT_OF_RANGE); } } return 0; // SQL NULL } private void initRowBuffer() { if (queueRows == null) thisRow = rows.get(currentRow); // We only need a copy of the current row if we're going to // modify it via an updatable resultset. if (resultsetconcurrency == ResultSet.CONCUR_UPDATABLE) { if (thisRow != null) rowBuffer = thisRow.updateableCopy(); else rowBuffer = null; } else { rowBuffer = null; } } private boolean isColumnTrimmable(int columnIndex) throws SQLException { switch (getSQLType(columnIndex)) { case Types.CHAR: case Types.VARCHAR: case Types.LONGVARCHAR: case Types.BINARY: case Types.VARBINARY: case Types.LONGVARBINARY: return true; } return false; } private byte[] trimBytes(int columnIndex, byte[] bytes) throws SQLException { // we need to trim if maxsize is set and the length is greater than maxsize and the // type of this column is a candidate for trimming if (maxFieldSize > 0 && bytes.length > maxFieldSize && isColumnTrimmable(columnIndex)) { byte[] newBytes = new byte[maxFieldSize]; System.arraycopy(bytes, 0, newBytes, 0, maxFieldSize); return newBytes; } else { return bytes; } } private String trimString(int columnIndex, String string) throws SQLException { // we need to trim if maxsize is set and the length is greater than maxsize and the // type of this column is a candidate for trimming if (maxFieldSize > 0 && string.length() > maxFieldSize && isColumnTrimmable(columnIndex)) { return string.substring(0, maxFieldSize); } else { return string; } } /** * Converts any numeric binary field to double value. This method does no overflow checking. * * @param bytes The bytes of the numeric field. * @param oid The oid of the field. * @param targetType The target type. Used for error reporting. * @return The value as double. * @throws RedshiftException If the field type is not supported numeric type. */ private double readDoubleValue(byte[] bytes, int oid, String targetType, int columnIndex) throws RedshiftException { // currently implemented binary encoded fields switch (oid) { case Oid.INT2: return ByteConverter.int2(bytes, 0); case Oid.INT4: return ByteConverter.int4(bytes, 0); case Oid.INT8: // might not fit but there still should be no overflow checking return ByteConverter.int8(bytes, 0); case Oid.FLOAT4: return ByteConverter.float4(bytes, 0); case Oid.FLOAT8: return ByteConverter.float8(bytes, 0); case Oid.NUMERIC: // return ByteConverter.numeric(bytes).doubleValue(); return getRedshiftNumeric(columnIndex).doubleValue(); } throw new RedshiftException(GT.tr("Cannot convert the column of type {0} to requested type {1}.", Oid.toString(oid), targetType), RedshiftState.DATA_TYPE_MISMATCH); } /** * <p>Converts any numeric binary field to long value.</p> * * <p>This method is used by getByte,getShort,getInt and getLong. It must support a subset of the * following java types that use Binary encoding. (fields that use text encoding use a different * code path). * * <code>byte,short,int,long,float,double,BigDecimal,boolean,string</code>. * </p> * * @param bytes The bytes of the numeric field. * @param oid The oid of the field. * @param minVal the minimum value allowed. * @param maxVal the maximum value allowed. * @param targetType The target type. Used for error reporting. * @return The value as long. * @throws RedshiftException If the field type is not supported numeric type or if the value is out of * range. */ private long readLongValue(byte[] bytes, int oid, long minVal, long maxVal, String targetType, int columnIndex) throws RedshiftException { long val; // currently implemented binary encoded fields switch (oid) { case Oid.INT2: val = ByteConverter.int2(bytes, 0); break; case Oid.INT4: case Oid.OID: val = ByteConverter.int4(bytes, 0); break; case Oid.INT8: case Oid.XIDOID: val = ByteConverter.int8(bytes, 0); break; case Oid.FLOAT4: val = (long) ByteConverter.float4(bytes, 0); break; case Oid.FLOAT8: val = (long) ByteConverter.float8(bytes, 0); break; case Oid.NUMERIC: // Number num = ByteConverter.numeric(bytes); Number num = getRedshiftNumeric(columnIndex); if (num instanceof BigDecimal) { val = ((BigDecimal) num).setScale(0 , RoundingMode.DOWN).longValueExact(); } else { val = num.longValue(); } break; default: throw new RedshiftException( GT.tr("Cannot convert the column of type {0} to requested type {1}.", Oid.toString(oid), targetType), RedshiftState.DATA_TYPE_MISMATCH); } if (val < minVal || val > maxVal) { throw new RedshiftException(GT.tr("Bad value for type {0} : {1}", targetType, val), RedshiftState.NUMERIC_VALUE_OUT_OF_RANGE); } return val; } protected void updateValue(int columnIndex, Object value) throws SQLException { if (RedshiftLogger.isEnable()) connection.getLogger().logFunction(true, columnIndex, value); checkUpdateable(); if(queueRows != null) { throw new RedshiftException(GT.tr("Cannot call updateValue() when enableFetchRingBuffer is true."), RedshiftState.INVALID_CURSOR_STATE); } else if (!onInsertRow && (isBeforeFirst() || isAfterLast() || rows.isEmpty())) { throw new RedshiftException( GT.tr( "Cannot update the ResultSet because it is either before the start or after the end of the results."), RedshiftState.INVALID_CURSOR_STATE); } checkColumnIndex(columnIndex); doingUpdates = !onInsertRow; if (value == null) { updateNull(columnIndex); } else { RedshiftResultSetMetaData md = (RedshiftResultSetMetaData) getMetaData(); updateValues.put(md.getBaseColumnName(columnIndex), value); } } protected Object getUUID(String data) throws SQLException { UUID uuid; try { uuid = UUID.fromString(data); } catch (IllegalArgumentException iae) { throw new RedshiftException(GT.tr("Invalid UUID data."), RedshiftState.INVALID_PARAMETER_VALUE, iae); } return uuid; } protected Object getUUID(byte[] data) throws SQLException { return new UUID(ByteConverter.int8(data, 0), ByteConverter.int8(data, 8)); } private class PrimaryKey { int index; // where in the result set is this primaryKey String name; // what is the columnName of this primary Key PrimaryKey(int index, String name) { this.index = index; this.name = name; } Object getValue() throws SQLException { return getObject(index); } } // // We need to specify the type of NULL when updating a column to NULL, so // NullObject is a simple extension of RedshiftObject that always returns null // values but retains column type info. // static class NullObject extends RedshiftObject { NullObject(String type) { setType(type); } public String getValue() { return null; } } /** * Used to add rows to an already existing ResultSet that exactly match the existing rows. * Currently only used for assembling generated keys from batch statement execution. */ void addRows(List<Tuple> tuples) { rows.addAll(tuples); } public void updateRef(int columnIndex, Ref x) throws SQLException { throw com.amazon.redshift.Driver.notImplemented(this.getClass(), "updateRef(int,Ref)"); } public void updateRef(String columnName, Ref x) throws SQLException { throw com.amazon.redshift.Driver.notImplemented(this.getClass(), "updateRef(String,Ref)"); } public void updateBlob(int columnIndex, Blob x) throws SQLException { throw com.amazon.redshift.Driver.notImplemented(this.getClass(), "updateBlob(int,Blob)"); } public void updateBlob(String columnName, Blob x) throws SQLException { throw com.amazon.redshift.Driver.notImplemented(this.getClass(), "updateBlob(String,Blob)"); } public void updateClob(int columnIndex, Clob x) throws SQLException { throw com.amazon.redshift.Driver.notImplemented(this.getClass(), "updateClob(int,Clob)"); } public void updateClob(String columnName, Clob x) throws SQLException { throw com.amazon.redshift.Driver.notImplemented(this.getClass(), "updateClob(String,Clob)"); } public void updateArray(int columnIndex, Array x) throws SQLException { updateObject(columnIndex, x); } public void updateArray(String columnName, Array x) throws SQLException { updateArray(findColumn(columnName), x); } public <T> T getObject(int columnIndex, Class<T> type) throws SQLException { if (type == null) { throw new SQLException("type is null"); } int sqlType = getSQLType(columnIndex); if (type == BigDecimal.class) { if (sqlType == Types.NUMERIC || sqlType == Types.DECIMAL) { return type.cast(getBigDecimal(columnIndex)); } else { throw new RedshiftException(GT.tr("conversion to {0} from {1} not supported", type, getRSType(columnIndex)), RedshiftState.INVALID_PARAMETER_VALUE); } } else if (type == String.class) { if (sqlType == Types.CHAR || sqlType == Types.VARCHAR) { return type.cast(getString(columnIndex)); } else { throw new RedshiftException(GT.tr("conversion to {0} from {1} not supported", type, getRSType(columnIndex)), RedshiftState.INVALID_PARAMETER_VALUE); } } else if (type == Boolean.class) { if (sqlType == Types.BOOLEAN || sqlType == Types.BIT) { boolean booleanValue = getBoolean(columnIndex); if (wasNull()) { return null; } return type.cast(booleanValue); } if (sqlType == Types.DATE || sqlType == Types.TIME || sqlType == Types.TIMESTAMP || sqlType == Types.TIMESTAMP_WITH_TIMEZONE || sqlType == Types.BINARY) { if (wasNull()) { return null; } throw new RedshiftException(GT.tr("conversion to {0} from {1} not supported", type, getRSType(columnIndex)), RedshiftState.INVALID_PARAMETER_VALUE); } else { String booleanStrValue = getString(columnIndex); if (wasNull()) { return null; } return type.cast(BooleanTypeUtil.castToBoolean(booleanStrValue)); // throw new RedshiftException(GT.tr("conversion to {0} from {1} not supported", type, getRSType(columnIndex)), // RedshiftState.INVALID_PARAMETER_VALUE); } } else if (type == Short.class) { if (sqlType == Types.SMALLINT) { short shortValue = getShort(columnIndex); if (wasNull()) { return null; } return type.cast(shortValue); } else { throw new RedshiftException(GT.tr("conversion to {0} from {1} not supported", type, getRSType(columnIndex)), RedshiftState.INVALID_PARAMETER_VALUE); } } else if (type == Integer.class) { if (sqlType == Types.INTEGER || sqlType == Types.SMALLINT) { int intValue = getInt(columnIndex); if (wasNull()) { return null; } return type.cast(intValue); } else { throw new RedshiftException(GT.tr("conversion to {0} from {1} not supported", type, getRSType(columnIndex)), RedshiftState.INVALID_PARAMETER_VALUE); } } else if (type == Long.class) { if (sqlType == Types.BIGINT) { long longValue = getLong(columnIndex); if (wasNull()) { return null; } return type.cast(longValue); } else { throw new RedshiftException(GT.tr("conversion to {0} from {1} not supported", type, getRSType(columnIndex)), RedshiftState.INVALID_PARAMETER_VALUE); } } else if (type == BigInteger.class) { if (sqlType == Types.BIGINT) { long longValue = getLong(columnIndex); if (wasNull()) { return null; } return type.cast(BigInteger.valueOf(longValue)); } else { throw new RedshiftException(GT.tr("conversion to {0} from {1} not supported", type, getRSType(columnIndex)), RedshiftState.INVALID_PARAMETER_VALUE); } } else if (type == Float.class) { if (sqlType == Types.REAL) { float floatValue = getFloat(columnIndex); if (wasNull()) { return null; } return type.cast(floatValue); } else { throw new RedshiftException(GT.tr("conversion to {0} from {1} not supported", type, getRSType(columnIndex)), RedshiftState.INVALID_PARAMETER_VALUE); } } else if (type == Double.class) { if (sqlType == Types.FLOAT || sqlType == Types.DOUBLE) { double doubleValue = getDouble(columnIndex); if (wasNull()) { return null; } return type.cast(doubleValue); } else { throw new RedshiftException(GT.tr("conversion to {0} from {1} not supported", type, getRSType(columnIndex)), RedshiftState.INVALID_PARAMETER_VALUE); } } else if (type == Date.class) { if (sqlType == Types.DATE) { return type.cast(getDate(columnIndex)); } else { throw new RedshiftException(GT.tr("conversion to {0} from {1} not supported", type, getRSType(columnIndex)), RedshiftState.INVALID_PARAMETER_VALUE); } } else if (type == Time.class) { if (sqlType == Types.TIME) { return type.cast(getTime(columnIndex)); } else { throw new RedshiftException(GT.tr("conversion to {0} from {1} not supported", type, getRSType(columnIndex)), RedshiftState.INVALID_PARAMETER_VALUE); } } else if (type == Timestamp.class) { if (sqlType == Types.TIMESTAMP //JCP! if mvn.project.property.redshift.jdbc.spec >= "JDBC4.2" || sqlType == Types.TIMESTAMP_WITH_TIMEZONE //JCP! endif ) { return type.cast(getTimestamp(columnIndex)); } else { throw new RedshiftException(GT.tr("conversion to {0} from {1} not supported", type, getRSType(columnIndex)), RedshiftState.INVALID_PARAMETER_VALUE); } } else if (type == Calendar.class) { if (sqlType == Types.TIMESTAMP //JCP! if mvn.project.property.redshift.jdbc.spec >= "JDBC4.2" || sqlType == Types.TIMESTAMP_WITH_TIMEZONE //JCP! endif ) { Timestamp timestampValue = getTimestamp(columnIndex); if (wasNull()) { return null; } Calendar calendar = Calendar.getInstance(getDefaultCalendar().getTimeZone()); calendar.setTimeInMillis(timestampValue.getTime()); return type.cast(calendar); } else { throw new RedshiftException(GT.tr("conversion to {0} from {1} not supported", type, getRSType(columnIndex)), RedshiftState.INVALID_PARAMETER_VALUE); } } else if (type == Blob.class) { if (sqlType == Types.BLOB || sqlType == Types.BINARY || sqlType == Types.BIGINT) { return type.cast(getBlob(columnIndex)); } else { throw new RedshiftException(GT.tr("conversion to {0} from {1} not supported", type, getRSType(columnIndex)), RedshiftState.INVALID_PARAMETER_VALUE); } } else if (type == Clob.class) { if (sqlType == Types.CLOB || sqlType == Types.BIGINT) { return type.cast(getClob(columnIndex)); } else { throw new RedshiftException(GT.tr("conversion to {0} from {1} not supported", type, getRSType(columnIndex)), RedshiftState.INVALID_PARAMETER_VALUE); } } else if (type == java.util.Date.class) { if (sqlType == Types.TIMESTAMP) { Timestamp timestamp = getTimestamp(columnIndex); if (wasNull()) { return null; } return type.cast(new java.util.Date(timestamp.getTime())); } else { throw new RedshiftException(GT.tr("conversion to {0} from {1} not supported", type, getRSType(columnIndex)), RedshiftState.INVALID_PARAMETER_VALUE); } } else if (type == Array.class) { if (sqlType == Types.ARRAY) { return type.cast(getArray(columnIndex)); } else { throw new RedshiftException(GT.tr("conversion to {0} from {1} not supported", type, getRSType(columnIndex)), RedshiftState.INVALID_PARAMETER_VALUE); } } else if (type == SQLXML.class) { if (sqlType == Types.SQLXML) { return type.cast(getSQLXML(columnIndex)); } else { throw new RedshiftException(GT.tr("conversion to {0} from {1} not supported", type, getRSType(columnIndex)), RedshiftState.INVALID_PARAMETER_VALUE); } } else if (type == UUID.class) { return type.cast(getObject(columnIndex)); } else if (type == InetAddress.class) { String inetText = getString(columnIndex); if (inetText == null) { return null; } int slash = inetText.indexOf("/"); try { return type.cast(InetAddress.getByName(slash < 0 ? inetText : inetText.substring(0, slash))); } catch (UnknownHostException ex) { throw new RedshiftException(GT.tr("Invalid Inet data."), RedshiftState.INVALID_PARAMETER_VALUE, ex); } // JSR-310 support //JCP! if mvn.project.property.redshift.jdbc.spec >= "JDBC4.2" } else if (type == LocalDate.class) { if (sqlType == Types.DATE) { Date dateValue = getDate(columnIndex); if (wasNull()) { return null; } long time = dateValue.getTime(); if (time == RedshiftStatement.DATE_POSITIVE_INFINITY) { return type.cast(LocalDate.MAX); } if (time == RedshiftStatement.DATE_NEGATIVE_INFINITY) { return type.cast(LocalDate.MIN); } return type.cast(dateValue.toLocalDate()); } else if (sqlType == Types.TIMESTAMP) { LocalDateTime localDateTimeValue = getLocalDateTime(columnIndex); if (wasNull()) { return null; } return type.cast(localDateTimeValue.toLocalDate()); } else { throw new RedshiftException(GT.tr("conversion to {0} from {1} not supported", type, getRSType(columnIndex)), RedshiftState.INVALID_PARAMETER_VALUE); } } else if (type == LocalTime.class) { if (sqlType == Types.TIME) { return type.cast(getLocalTime(columnIndex)); } else { throw new RedshiftException(GT.tr("conversion to {0} from {1} not supported", type, getRSType(columnIndex)), RedshiftState.INVALID_PARAMETER_VALUE); } } else if (type == LocalDateTime.class) { if (sqlType == Types.TIMESTAMP) { return type.cast(getLocalDateTime(columnIndex)); } else { throw new RedshiftException(GT.tr("conversion to {0} from {1} not supported", type, getRSType(columnIndex)), RedshiftState.INVALID_PARAMETER_VALUE); } } else if (type == OffsetDateTime.class) { if (sqlType == Types.TIMESTAMP_WITH_TIMEZONE || sqlType == Types.TIMESTAMP) { OffsetDateTime offsetDateTime = getOffsetDateTime(columnIndex); return type.cast(offsetDateTime); } else { throw new RedshiftException(GT.tr("conversion to {0} from {1} not supported", type, getRSType(columnIndex)), RedshiftState.INVALID_PARAMETER_VALUE); } //JCP! endif } else if (RedshiftObject.class.isAssignableFrom(type)) { Object object; if (isBinary(columnIndex)) { object = connection.getObject(getRSType(columnIndex), null, thisRow.get(columnIndex - 1)); } else { object = connection.getObject(getRSType(columnIndex), getString(columnIndex), null); } return type.cast(object); } throw new RedshiftException(GT.tr("conversion to {0} from {1} not supported", type, getRSType(columnIndex)), RedshiftState.INVALID_PARAMETER_VALUE); } public <T> T getObject(String columnLabel, Class<T> type) throws SQLException { return getObject(findColumn(columnLabel), type); } public Object getObject(String s, Map<String, Class<?>> map) throws SQLException { return getObjectImpl(s, map); } public Object getObject(int i, Map<String, Class<?>> map) throws SQLException { return getObjectImpl(i, map); } //JCP! if mvn.project.property.redshift.jdbc.spec >= "JDBC4.2" public void updateObject(int columnIndex, Object x, java.sql.SQLType targetSqlType, int scaleOrLength) throws SQLException { throw com.amazon.redshift.Driver.notImplemented(this.getClass(), "updateObject"); } public void updateObject(String columnLabel, Object x, java.sql.SQLType targetSqlType, int scaleOrLength) throws SQLException { throw com.amazon.redshift.Driver.notImplemented(this.getClass(), "updateObject"); } public void updateObject(int columnIndex, Object x, java.sql.SQLType targetSqlType) throws SQLException { throw com.amazon.redshift.Driver.notImplemented(this.getClass(), "updateObject"); } public void updateObject(String columnLabel, Object x, java.sql.SQLType targetSqlType) throws SQLException { throw com.amazon.redshift.Driver.notImplemented(this.getClass(), "updateObject"); } //JCP! endif public RowId getRowId(int columnIndex) throws SQLException { if (RedshiftLogger.isEnable()) connection.getLogger().log(LogLevel.DEBUG, " getRowId columnIndex: {0}", columnIndex); throw com.amazon.redshift.Driver.notImplemented(this.getClass(), "getRowId(int)"); } public RowId getRowId(String columnName) throws SQLException { return getRowId(findColumn(columnName)); } public void updateRowId(int columnIndex, RowId x) throws SQLException { throw com.amazon.redshift.Driver.notImplemented(this.getClass(), "updateRowId(int, RowId)"); } public void updateRowId(String columnName, RowId x) throws SQLException { updateRowId(findColumn(columnName), x); } public int getHoldability() throws SQLException { throw com.amazon.redshift.Driver.notImplemented(this.getClass(), "getHoldability()"); } public boolean isClosed() throws SQLException { return (rows == null && queueRows == null); } public void updateNString(int columnIndex, String nString) throws SQLException { throw com.amazon.redshift.Driver.notImplemented(this.getClass(), "updateNString(int, String)"); } public void updateNString(String columnName, String nString) throws SQLException { updateNString(findColumn(columnName), nString); } public void updateNClob(int columnIndex, NClob nClob) throws SQLException { throw com.amazon.redshift.Driver.notImplemented(this.getClass(), "updateNClob(int, NClob)"); } public void updateNClob(String columnName, NClob nClob) throws SQLException { updateNClob(findColumn(columnName), nClob); } public void updateNClob(int columnIndex, Reader reader) throws SQLException { throw com.amazon.redshift.Driver.notImplemented(this.getClass(), "updateNClob(int, Reader)"); } public void updateNClob(String columnName, Reader reader) throws SQLException { updateNClob(findColumn(columnName), reader); } public void updateNClob(int columnIndex, Reader reader, long length) throws SQLException { throw com.amazon.redshift.Driver.notImplemented(this.getClass(), "updateNClob(int, Reader, long)"); } public void updateNClob(String columnName, Reader reader, long length) throws SQLException { updateNClob(findColumn(columnName), reader, length); } public NClob getNClob(int columnIndex) throws SQLException { if (RedshiftLogger.isEnable()) connection.getLogger().log(LogLevel.DEBUG, " getNClob columnIndex: {0}", columnIndex); throw com.amazon.redshift.Driver.notImplemented(this.getClass(), "getNClob(int)"); } public NClob getNClob(String columnName) throws SQLException { return getNClob(findColumn(columnName)); } public void updateBlob(int columnIndex, InputStream inputStream, long length) throws SQLException { throw com.amazon.redshift.Driver.notImplemented(this.getClass(), "updateBlob(int, InputStream, long)"); } public void updateBlob(String columnName, InputStream inputStream, long length) throws SQLException { updateBlob(findColumn(columnName), inputStream, length); } public void updateBlob(int columnIndex, InputStream inputStream) throws SQLException { throw com.amazon.redshift.Driver.notImplemented(this.getClass(), "updateBlob(int, InputStream)"); } public void updateBlob(String columnName, InputStream inputStream) throws SQLException { updateBlob(findColumn(columnName), inputStream); } public void updateClob(int columnIndex, Reader reader, long length) throws SQLException { throw com.amazon.redshift.Driver.notImplemented(this.getClass(), "updateClob(int, Reader, long)"); } public void updateClob(String columnName, Reader reader, long length) throws SQLException { updateClob(findColumn(columnName), reader, length); } public void updateClob(int columnIndex, Reader reader) throws SQLException { throw com.amazon.redshift.Driver.notImplemented(this.getClass(), "updateClob(int, Reader)"); } public void updateClob(String columnName, Reader reader) throws SQLException { updateClob(findColumn(columnName), reader); } public SQLXML getSQLXML(int columnIndex) throws SQLException { if (RedshiftLogger.isEnable()) connection.getLogger().log(LogLevel.DEBUG, " getSQLXML columnIndex: {0}", columnIndex); String data = getString(columnIndex); if (data == null) { return null; } return new RedshiftSQLXML(connection, data); } public SQLXML getSQLXML(String columnName) throws SQLException { return getSQLXML(findColumn(columnName)); } public void updateSQLXML(int columnIndex, SQLXML xmlObject) throws SQLException { updateValue(columnIndex, xmlObject); } public void updateSQLXML(String columnName, SQLXML xmlObject) throws SQLException { updateSQLXML(findColumn(columnName), xmlObject); } public String getNString(int columnIndex) throws SQLException { if (RedshiftLogger.isEnable()) connection.getLogger().log(LogLevel.DEBUG, " getNString columnIndex: {0}", columnIndex); throw com.amazon.redshift.Driver.notImplemented(this.getClass(), "getNString(int)"); } public String getNString(String columnName) throws SQLException { return getNString(findColumn(columnName)); } public Reader getNCharacterStream(int columnIndex) throws SQLException { if (RedshiftLogger.isEnable()) connection.getLogger().log(LogLevel.DEBUG, " getNCharacterStream columnIndex: {0}", columnIndex); throw com.amazon.redshift.Driver.notImplemented(this.getClass(), "getNCharacterStream(int)"); } public Reader getNCharacterStream(String columnName) throws SQLException { return getNCharacterStream(findColumn(columnName)); } public void updateNCharacterStream(int columnIndex, Reader x, int length) throws SQLException { throw com.amazon.redshift.Driver.notImplemented(this.getClass(), "updateNCharacterStream(int, Reader, int)"); } public void updateNCharacterStream(String columnName, Reader x, int length) throws SQLException { updateNCharacterStream(findColumn(columnName), x, length); } public void updateNCharacterStream(int columnIndex, Reader x) throws SQLException { throw com.amazon.redshift.Driver.notImplemented(this.getClass(), "updateNCharacterStream(int, Reader)"); } public void updateNCharacterStream(String columnName, Reader x) throws SQLException { updateNCharacterStream(findColumn(columnName), x); } public void updateNCharacterStream(int columnIndex, Reader x, long length) throws SQLException { throw com.amazon.redshift.Driver.notImplemented(this.getClass(), "updateNCharacterStream(int, Reader, long)"); } public void updateNCharacterStream(String columnName, Reader x, long length) throws SQLException { updateNCharacterStream(findColumn(columnName), x, length); } public void updateCharacterStream(int columnIndex, Reader reader, long length) throws SQLException { throw com.amazon.redshift.Driver.notImplemented(this.getClass(), "updateCharaceterStream(int, Reader, long)"); } public void updateCharacterStream(String columnName, Reader reader, long length) throws SQLException { updateCharacterStream(findColumn(columnName), reader, length); } public void updateCharacterStream(int columnIndex, Reader reader) throws SQLException { throw com.amazon.redshift.Driver.notImplemented(this.getClass(), "updateCharaceterStream(int, Reader)"); } public void updateCharacterStream(String columnName, Reader reader) throws SQLException { updateCharacterStream(findColumn(columnName), reader); } public void updateBinaryStream(int columnIndex, InputStream inputStream, long length) throws SQLException { throw com.amazon.redshift.Driver.notImplemented(this.getClass(), "updateBinaryStream(int, InputStream, long)"); } public void updateBinaryStream(String columnName, InputStream inputStream, long length) throws SQLException { updateBinaryStream(findColumn(columnName), inputStream, length); } public void updateBinaryStream(int columnIndex, InputStream inputStream) throws SQLException { throw com.amazon.redshift.Driver.notImplemented(this.getClass(), "updateBinaryStream(int, InputStream)"); } public void updateBinaryStream(String columnName, InputStream inputStream) throws SQLException { updateBinaryStream(findColumn(columnName), inputStream); } public void updateAsciiStream(int columnIndex, InputStream inputStream, long length) throws SQLException { throw com.amazon.redshift.Driver.notImplemented(this.getClass(), "updateAsciiStream(int, InputStream, long)"); } public void updateAsciiStream(String columnName, InputStream inputStream, long length) throws SQLException { updateAsciiStream(findColumn(columnName), inputStream, length); } public void updateAsciiStream(int columnIndex, InputStream inputStream) throws SQLException { throw com.amazon.redshift.Driver.notImplemented(this.getClass(), "updateAsciiStream(int, InputStream)"); } public void updateAsciiStream(String columnName, InputStream inputStream) throws SQLException { updateAsciiStream(findColumn(columnName), inputStream); } public boolean isWrapperFor(Class<?> iface) throws SQLException { return iface.isAssignableFrom(getClass()); } public <T> T unwrap(Class<T> iface) throws SQLException { if (iface.isAssignableFrom(getClass())) { return iface.cast(this); } throw new SQLException("Cannot unwrap to " + iface.getName()); } private Calendar getDefaultCalendar() { TimestampUtils timestampUtils = connection.getTimestampUtils(); if (timestampUtils.hasFastDefaultTimeZone()) { return timestampUtils.getSharedCalendar(null); } Calendar sharedCalendar = timestampUtils.getSharedCalendar(defaultTimeZone); if (defaultTimeZone == null) { defaultTimeZone = sharedCalendar.getTimeZone(); } return sharedCalendar; } }
8,531
0
Create_ds/amazon-redshift-jdbc-driver/src/main/java/com/amazon/redshift
Create_ds/amazon-redshift-jdbc-driver/src/main/java/com/amazon/redshift/jdbc/RedshiftDatabaseMetaData.java
/* * Copyright (c) 2004, PostgreSQL Global Development Group * See the LICENSE file in the project root for more information. */ package com.amazon.redshift.jdbc; import com.amazon.redshift.core.BaseStatement; import com.amazon.redshift.core.Field; import com.amazon.redshift.core.Oid; import com.amazon.redshift.core.Tuple; import com.amazon.redshift.core.TypeInfo; import com.amazon.redshift.logger.RedshiftLogger; import com.amazon.redshift.util.ByteConverter; import com.amazon.redshift.util.GT; import com.amazon.redshift.util.JdbcBlackHole; import com.amazon.redshift.util.RedshiftException; import com.amazon.redshift.util.RedshiftState; import java.math.BigInteger; import java.sql.Array; import java.sql.Connection; import java.sql.DatabaseMetaData; import java.sql.ResultSet; import java.sql.RowIdLifetime; import java.sql.SQLException; import java.sql.Statement; import java.sql.Types; import java.util.ArrayList; import java.util.Arrays; import java.util.Collections; import java.util.Comparator; import java.util.HashMap; import java.util.Iterator; import java.util.List; import java.util.Map; import java.util.Optional; import java.util.StringTokenizer; public class RedshiftDatabaseMetaData implements DatabaseMetaData { // Universal (local+external), local , and external schema indicators. private final int NO_SCHEMA_UNIVERSAL_QUERY = 0; private final int LOCAL_SCHEMA_QUERY = 1; private final int EXTERNAL_SCHEMA_QUERY = 2; public RedshiftDatabaseMetaData(RedshiftConnectionImpl conn) { this.connection = conn; } private String keywords; protected final RedshiftConnectionImpl connection; // The connection association private int nameDataLength = 0; // length for name datatype private int indexMaxKeys = 0; // maximum number of keys in an index. protected int getMaxIndexKeys() throws SQLException { if (indexMaxKeys == 0) { /* dev=# show max_index_keys; max_index_keys ---------------- 32 (1 row) */ indexMaxKeys = 32; /* String sql; sql = "SELECT setting FROM pg_catalog.pg_settings WHERE name='max_index_keys'"; Statement stmt = connection.createStatement(); ResultSet rs = null; try { rs = stmt.executeQuery(sql); if (!rs.next()) { stmt.close(); throw new RedshiftException( GT.tr( "Unable to determine a value for MaxIndexKeys due to missing system catalog data."), RedshiftState.UNEXPECTED_ERROR); } indexMaxKeys = rs.getInt(1); } finally { JdbcBlackHole.close(rs); JdbcBlackHole.close(stmt); } */ } return indexMaxKeys; } protected int getMaxNameLength() throws SQLException { if (nameDataLength == 0) { String sql; sql = "SELECT t.typlen FROM pg_catalog.pg_type t, pg_catalog.pg_namespace n " + "WHERE t.typnamespace=n.oid AND t.typname='name' AND n.nspname='pg_catalog'"; Statement stmt = connection.createStatement(); ResultSet rs = null; try { rs = stmt.executeQuery(sql); if (!rs.next()) { throw new RedshiftException(GT.tr("Unable to find name datatype in the system catalogs."), RedshiftState.UNEXPECTED_ERROR); } nameDataLength = rs.getInt("typlen"); } finally { JdbcBlackHole.close(rs); JdbcBlackHole.close(stmt); } } return nameDataLength - 1; } public boolean allProceduresAreCallable() throws SQLException { return true; // For now... } public boolean allTablesAreSelectable() throws SQLException { return true; // For now... } public String getURL() throws SQLException { return connection.getURL(); } public String getUserName() throws SQLException { return connection.getUserName(); } public boolean isReadOnly() throws SQLException { return connection.isReadOnly(); } public boolean nullsAreSortedHigh() throws SQLException { return true; } public boolean nullsAreSortedLow() throws SQLException { return false; } public boolean nullsAreSortedAtStart() throws SQLException { return false; } public boolean nullsAreSortedAtEnd() throws SQLException { return false; } /** * Retrieves the name of this database product. We hope that it is Redshift, so we return that * explicitly. * * @return "Redshift" */ @Override public String getDatabaseProductName() throws SQLException { return "Redshift"; // "PostgreSQL" "Redshift"; } @Override public String getDatabaseProductVersion() throws SQLException { return connection.getDBVersionNumber(); } @Override public String getDriverName() { return com.amazon.redshift.util.DriverInfo.DRIVER_NAME; } @Override public String getDriverVersion() { return com.amazon.redshift.util.DriverInfo.DRIVER_VERSION; } @Override public int getDriverMajorVersion() { return com.amazon.redshift.util.DriverInfo.MAJOR_VERSION; } @Override public int getDriverMinorVersion() { return com.amazon.redshift.util.DriverInfo.MINOR_VERSION; } /** * Does the database store tables in a local file? No - it stores them in a file on the server. * * @return true if so * @throws SQLException if a database access error occurs */ public boolean usesLocalFiles() throws SQLException { return false; } /** * Does the database use a file for each table? Well, not really, since it doesn't use local files. * * @return true if so * @throws SQLException if a database access error occurs */ public boolean usesLocalFilePerTable() throws SQLException { return false; } /** * Does the database treat mixed case unquoted SQL identifiers as case sensitive and as a result * store them in mixed case? A JDBC-Compliant driver will always return false. * * @return true if so * @throws SQLException if a database access error occurs */ public boolean supportsMixedCaseIdentifiers() throws SQLException { return false; } public boolean storesUpperCaseIdentifiers() throws SQLException { return false; } public boolean storesLowerCaseIdentifiers() throws SQLException { return true; } public boolean storesMixedCaseIdentifiers() throws SQLException { return false; } /** * Does the database treat mixed case quoted SQL identifiers as case sensitive and as a result * store them in mixed case? A JDBC compliant driver will always return true. * * @return true if so * @throws SQLException if a database access error occurs */ public boolean supportsMixedCaseQuotedIdentifiers() throws SQLException { return true; } public boolean storesUpperCaseQuotedIdentifiers() throws SQLException { return false; } public boolean storesLowerCaseQuotedIdentifiers() throws SQLException { return false; } public boolean storesMixedCaseQuotedIdentifiers() throws SQLException { return false; } /** * What is the string used to quote SQL identifiers? This returns a space if identifier quoting * isn't supported. A JDBC Compliant driver will always use a double quote character. * * @return the quoting string * @throws SQLException if a database access error occurs */ public String getIdentifierQuoteString() throws SQLException { return "\""; } /** * {@inheritDoc} * * <p>From PostgreSQL 9.0+ return the keywords from pg_catalog.pg_get_keywords()</p> * * @return a comma separated list of keywords we use * @throws SQLException if a database access error occurs */ @Override public String getSQLKeywords() throws SQLException { // Static list from PG8.2 src/backend/parser/keywords.c with SQL:2003 excluded. String keywords = "abort,access,aggregate,also,analyse,analyze,backward,bit,cache,checkpoint,class," + "cluster,comment,concurrently,connection,conversion,copy,csv,database,delimiter," + "delimiters,disable,do,enable,encoding,encrypted,exclusive,explain,force,forward,freeze," + "greatest,handler,header,if,ilike,immutable,implicit,index,indexes,inherit,inherits," + "instead,isnull,least,limit,listen,load,location,lock,mode,move,nothing,notify,notnull," + "nowait,off,offset,oids,operator,owned,owner,password,prepared,procedural,quote,reassign," + "recheck,reindex,rename,replace,reset,restrict,returning,rule,setof,share,show,stable," + "statistics,stdin,stdout,storage,strict,sysid,tablespace,temp,template,truncate,trusted," + "unencrypted,unlisten,until,vacuum,valid,validator,verbose,volatile"; return keywords; } public String getNumericFunctions() throws SQLException { return EscapedFunctions.ABS + ',' + EscapedFunctions.ACOS + ',' + EscapedFunctions.ASIN + ',' + EscapedFunctions.ATAN + ',' + EscapedFunctions.ATAN2 + ',' + EscapedFunctions.CEILING + ',' + EscapedFunctions.COS + ',' + EscapedFunctions.COT + ',' + EscapedFunctions.DEGREES + ',' + EscapedFunctions.EXP + ',' + EscapedFunctions.FLOOR + ',' + EscapedFunctions.LOG + ',' + EscapedFunctions.LOG10 + ',' + EscapedFunctions.MOD + ',' + EscapedFunctions.PI + ',' + EscapedFunctions.POWER + ',' + EscapedFunctions.RADIANS + ',' + EscapedFunctions.RANDOM + ',' + EscapedFunctions.ROUND + ',' + EscapedFunctions.SIGN + ',' + EscapedFunctions.SIN + ',' + EscapedFunctions.SQRT + ',' + EscapedFunctions.TAN + ',' + EscapedFunctions.TRUNCATE; } public String getStringFunctions() throws SQLException { String funcs = EscapedFunctions.ASCII + ',' + EscapedFunctions.CHAR + ',' + EscapedFunctions.CHAR_LENGTH + ',' + EscapedFunctions.CHARACTER_LENGTH + ',' + EscapedFunctions.CONCAT + ',' + EscapedFunctions.LCASE + ',' + EscapedFunctions.LEFT + ',' + EscapedFunctions.LENGTH + ',' + EscapedFunctions.LTRIM + ',' + EscapedFunctions.OCTET_LENGTH + ',' + EscapedFunctions.POSITION + ',' + EscapedFunctions.REPEAT + ',' + EscapedFunctions.RIGHT + ',' + EscapedFunctions.RTRIM + ',' + EscapedFunctions.SPACE + ',' + EscapedFunctions.SUBSTRING + ',' + EscapedFunctions.UCASE; // Currently these don't work correctly with parameterized // arguments, so leave them out. They reorder the arguments // when rewriting the query, but no translation layer is provided, // so a setObject(N, obj) will not go to the correct parameter. // ','+EscapedFunctions.INSERT+','+EscapedFunctions.LOCATE+ // ','+EscapedFunctions.RIGHT+ funcs += ',' + EscapedFunctions.REPLACE; return funcs; } public String getSystemFunctions() throws SQLException { return EscapedFunctions.DATABASE + ',' + EscapedFunctions.IFNULL + ',' + EscapedFunctions.USER; } public String getTimeDateFunctions() throws SQLException { String timeDateFuncs = EscapedFunctions.CURDATE + ',' + EscapedFunctions.CURTIME + ',' + EscapedFunctions.DAYNAME + ',' + EscapedFunctions.DAYOFMONTH + ',' + EscapedFunctions.DAYOFWEEK + ',' + EscapedFunctions.DAYOFYEAR + ',' + EscapedFunctions.HOUR + ',' + EscapedFunctions.MINUTE + ',' + EscapedFunctions.MONTH + ',' + EscapedFunctions.MONTHNAME + ',' + EscapedFunctions.NOW + ',' + EscapedFunctions.QUARTER + ',' + EscapedFunctions.SECOND + ',' + EscapedFunctions.WEEK + ',' + EscapedFunctions.YEAR; timeDateFuncs += ',' + EscapedFunctions.TIMESTAMPADD; // +','+EscapedFunctions.TIMESTAMPDIFF; return timeDateFuncs; } public String getSearchStringEscape() throws SQLException { // This method originally returned "\\\\" assuming that it // would be fed directly into pg's input parser so it would // need two backslashes. This isn't how it's supposed to be // used though. If passed as a PreparedStatement parameter // or fed to a DatabaseMetaData method then double backslashes // are incorrect. If you're feeding something directly into // a query you are responsible for correctly escaping it. // With 8.2+ this escaping is a little trickier because you // must know the setting of standard_conforming_strings, but // that's not our problem. return "\\"; } /** * {@inheritDoc} * * <p>Redshift allows any high-bit character to be used in an unquoted identifier, so we can't * possibly list them all.</p> * * <p>From the file src/backend/parser/scan.l, an identifier is ident_start [A-Za-z\200-\377_] * ident_cont [A-Za-z\200-\377_0-9\$] identifier {ident_start}{ident_cont}*</p> * * @return a string containing the extra characters * @throws SQLException if a database access error occurs */ public String getExtraNameCharacters() throws SQLException { return ""; } /** * {@inheritDoc} * * @return true */ public boolean supportsAlterTableWithAddColumn() throws SQLException { return true; } /** * {@inheritDoc} * * @return true */ public boolean supportsAlterTableWithDropColumn() throws SQLException { return true; } public boolean supportsColumnAliasing() throws SQLException { return true; } public boolean nullPlusNonNullIsNull() throws SQLException { return true; } public boolean supportsConvert() throws SQLException { return false; } public boolean supportsConvert(int fromType, int toType) throws SQLException { return false; } public boolean supportsTableCorrelationNames() throws SQLException { return true; } public boolean supportsDifferentTableCorrelationNames() throws SQLException { return false; } public boolean supportsExpressionsInOrderBy() throws SQLException { return true; } /** * {@inheritDoc} * * @return true */ public boolean supportsOrderByUnrelated() throws SQLException { return true; } public boolean supportsGroupBy() throws SQLException { return true; } /** * {@inheritDoc} * * @return true */ public boolean supportsGroupByUnrelated() throws SQLException { return true; } /* * {@inheritDoc} * * @return true */ public boolean supportsGroupByBeyondSelect() throws SQLException { return true; } /* * {@inheritDoc} * * @return true */ public boolean supportsLikeEscapeClause() throws SQLException { return true; } public boolean supportsMultipleResultSets() throws SQLException { return true; } public boolean supportsMultipleTransactions() throws SQLException { return true; } public boolean supportsNonNullableColumns() throws SQLException { return true; } /** * {@inheritDoc} * * <p>This grammar is defined at: * <a href="http://www.microsoft.com/msdn/sdk/platforms/doc/odbc/src/intropr.htm"> * http://www.microsoft.com/msdn/sdk/platforms/doc/odbc/src/intropr.htm</a></p> * * <p>In Appendix C. From this description, we seem to support the ODBC minimal (Level 0) grammar.</p> * * @return true */ public boolean supportsMinimumSQLGrammar() throws SQLException { return true; } /** * Does this driver support the Core ODBC SQL grammar. We need SQL-92 conformance for this. * * @return false * @throws SQLException if a database access error occurs */ public boolean supportsCoreSQLGrammar() throws SQLException { return false; } /** * Does this driver support the Extended (Level 2) ODBC SQL grammar. We don't conform to the Core * (Level 1), so we can't conform to the Extended SQL Grammar. * * @return false * @throws SQLException if a database access error occurs */ public boolean supportsExtendedSQLGrammar() throws SQLException { return false; } /** * Does this driver support the ANSI-92 entry level SQL grammar? All JDBC Compliant drivers must * return true. We currently report false until 'schema' support is added. Then this should be * changed to return true, since we will be mostly compliant (probably more compliant than many * other databases) And since this is a requirement for all JDBC drivers we need to get to the * point where we can return true. * * @return true * @throws SQLException if a database access error occurs */ public boolean supportsANSI92EntryLevelSQL() throws SQLException { return true; } /** * {@inheritDoc} * * @return false */ public boolean supportsANSI92IntermediateSQL() throws SQLException { return false; } /** * {@inheritDoc} * * @return false */ public boolean supportsANSI92FullSQL() throws SQLException { return false; } /* * Is the SQL Integrity Enhancement Facility supported? Our best guess is that this means support * for constraints * * @return true * * @exception SQLException if a database access error occurs */ public boolean supportsIntegrityEnhancementFacility() throws SQLException { return true; } /** * {@inheritDoc} * * @return true */ public boolean supportsOuterJoins() throws SQLException { return true; } /** * {@inheritDoc} * * @return true */ public boolean supportsFullOuterJoins() throws SQLException { return true; } /** * {@inheritDoc} * * @return true */ public boolean supportsLimitedOuterJoins() throws SQLException { return true; } /** * {@inheritDoc} * <p>Redshift doesn't have schemas, but when it does, we'll use the term "schema".</p> * * @return {@code "schema"} */ public String getSchemaTerm() throws SQLException { return "schema"; } /** * {@inheritDoc} * * @return {@code "procedure"} */ public String getProcedureTerm() throws SQLException { return "procedure"; // function } /** * {@inheritDoc} * * @return {@code "database"} */ public String getCatalogTerm() throws SQLException { return "database"; } public boolean isCatalogAtStart() throws SQLException { return true; } public String getCatalogSeparator() throws SQLException { return "."; } /** * {@inheritDoc} * * @return true */ public boolean supportsSchemasInDataManipulation() throws SQLException { return true; } /** * {@inheritDoc} * * @return true */ public boolean supportsSchemasInProcedureCalls() throws SQLException { return true; } /** * {@inheritDoc} * * @return true */ public boolean supportsSchemasInTableDefinitions() throws SQLException { return true; } /** * {@inheritDoc} * * @return true */ public boolean supportsSchemasInIndexDefinitions() throws SQLException { return true; } /** * {@inheritDoc} * * @return true */ public boolean supportsSchemasInPrivilegeDefinitions() throws SQLException { return true; } public boolean supportsCatalogsInDataManipulation() throws SQLException { return true; } public boolean supportsCatalogsInProcedureCalls() throws SQLException { return true; } public boolean supportsCatalogsInTableDefinitions() throws SQLException { return true; } public boolean supportsCatalogsInIndexDefinitions() throws SQLException { return true; } public boolean supportsCatalogsInPrivilegeDefinitions() throws SQLException { return true; } /** * We support cursors for gets only it seems. I dont see a method to get a positioned delete. * * @return false * @throws SQLException if a database access error occurs */ public boolean supportsPositionedDelete() throws SQLException { return false; // For now... } public boolean supportsPositionedUpdate() throws SQLException { return false; // For now... } /** * {@inheritDoc} * * @return true */ public boolean supportsSelectForUpdate() throws SQLException { return true; } public boolean supportsStoredProcedures() throws SQLException { return true; } public boolean supportsSubqueriesInComparisons() throws SQLException { return true; } public boolean supportsSubqueriesInExists() throws SQLException { return true; } public boolean supportsSubqueriesInIns() throws SQLException { return true; } public boolean supportsSubqueriesInQuantifieds() throws SQLException { return true; } /** * {@inheritDoc} * * @return true */ public boolean supportsCorrelatedSubqueries() throws SQLException { return true; } /** * {@inheritDoc} * * @return true */ public boolean supportsUnion() throws SQLException { return true; // since 6.3 } /** * {@inheritDoc} * * @return true */ public boolean supportsUnionAll() throws SQLException { return true; } /** * {@inheritDoc} In Redshift, Cursors are only open within transactions. */ public boolean supportsOpenCursorsAcrossCommit() throws SQLException { return false; } public boolean supportsOpenCursorsAcrossRollback() throws SQLException { return false; } /** * {@inheritDoc} * <p>Can statements remain open across commits? They may, but this driver cannot guarantee that. In * further reflection. we are talking a Statement object here, so the answer is yes, since the * Statement is only a vehicle to ExecSQL()</p> * * @return true */ public boolean supportsOpenStatementsAcrossCommit() throws SQLException { return true; } /** * {@inheritDoc} * <p>Can statements remain open across rollbacks? They may, but this driver cannot guarantee that. * In further contemplation, we are talking a Statement object here, so the answer is yes, since * the Statement is only a vehicle to ExecSQL() in Connection</p> * * @return true */ public boolean supportsOpenStatementsAcrossRollback() throws SQLException { return true; } public int getMaxCharLiteralLength() throws SQLException { return 0; // no limit } public int getMaxBinaryLiteralLength() throws SQLException { return 0; // no limit } public int getMaxColumnNameLength() throws SQLException { return getMaxNameLength(); } public int getMaxColumnsInGroupBy() throws SQLException { return 0; // no limit } public int getMaxColumnsInIndex() throws SQLException { return getMaxIndexKeys(); } public int getMaxColumnsInOrderBy() throws SQLException { return 0; // no limit } public int getMaxColumnsInSelect() throws SQLException { return 0; // no limit } /** * {@inheritDoc} What is the maximum number of columns in a table? From the CREATE TABLE reference * page... * * <p>"The new class is created as a heap with no initial data. A class can have no more than 1600 * attributes (realistically, this is limited by the fact that tuple sizes must be less than 8192 * bytes)..."</p> * * @return the max columns * @throws SQLException if a database access error occurs */ public int getMaxColumnsInTable() throws SQLException { return 1600; } /** * {@inheritDoc} How many active connection can we have at a time to this database? Well, since it * depends on postmaster, which just does a listen() followed by an accept() and fork(), its * basically very high. Unless the system runs out of processes, it can be 65535 (the number of * aux. ports on a TCP/IP system). I will return 8192 since that is what even the largest system * can realistically handle, * * @return the maximum number of connections * @throws SQLException if a database access error occurs */ public int getMaxConnections() throws SQLException { return 0; // 8192 } public int getMaxCursorNameLength() throws SQLException { return getMaxNameLength(); } public int getMaxIndexLength() throws SQLException { return 0; // no limit (larger than an int anyway) } public int getMaxSchemaNameLength() throws SQLException { return getMaxNameLength(); } public int getMaxProcedureNameLength() throws SQLException { return getMaxNameLength(); } public int getMaxCatalogNameLength() throws SQLException { return getMaxNameLength(); } public int getMaxRowSize() throws SQLException { return 1073741824; // 1 GB } public boolean doesMaxRowSizeIncludeBlobs() throws SQLException { return false; } public int getMaxStatementLength() throws SQLException { return 0; // actually whatever fits in size_t } public int getMaxStatements() throws SQLException { return 0; } public int getMaxTableNameLength() throws SQLException { return getMaxNameLength(); } public int getMaxTablesInSelect() throws SQLException { return 0; // no limit } public int getMaxUserNameLength() throws SQLException { return getMaxNameLength(); } public int getDefaultTransactionIsolation() throws SQLException { return Connection.TRANSACTION_SERIALIZABLE; // TRANSACTION_READ_COMMITTED; } public boolean supportsTransactions() throws SQLException { return true; } /** * {@inheritDoc} * <p>We only support TRANSACTION_SERIALIZABLE and TRANSACTION_READ_COMMITTED before 8.0; from 8.0 * READ_UNCOMMITTED and REPEATABLE_READ are accepted aliases for READ_COMMITTED.</p> */ public boolean supportsTransactionIsolationLevel(int level) throws SQLException { return (level == Connection.TRANSACTION_SERIALIZABLE); } public boolean supportsDataDefinitionAndDataManipulationTransactions() throws SQLException { return true; } public boolean supportsDataManipulationTransactionsOnly() throws SQLException { return false; } /** * <p>Does a data definition statement within a transaction force the transaction to commit? It seems * to mean something like:</p> * * <pre> * CREATE TABLE T (A INT); * INSERT INTO T (A) VALUES (2); * BEGIN; * UPDATE T SET A = A + 1; * CREATE TABLE X (A INT); * SELECT A FROM T INTO X; * COMMIT; * </pre> * * <p>Does the CREATE TABLE call cause a commit? The answer is no.</p> * * @return true if so * @throws SQLException if a database access error occurs */ public boolean dataDefinitionCausesTransactionCommit() throws SQLException { return false; } public boolean dataDefinitionIgnoredInTransactions() throws SQLException { return false; } /** * Turn the provided value into a valid string literal for direct inclusion into a query. This * includes the single quotes needed around it. * * @param s input value * * @return string literal for direct inclusion into a query * @throws SQLException if something wrong happens */ protected String escapeQuotes(String s) throws SQLException { StringBuilder sb = new StringBuilder(); /* if (!connection.getStandardConformingStrings()) { sb.append("E"); } */ sb.append("'"); sb.append(connection.escapeString(s)); sb.append("'"); return sb.toString(); } protected String escapeOnlyQuotes(String s) throws SQLException { StringBuilder sb = new StringBuilder(); /* if (!connection.getStandardConformingStrings()) { sb.append("E"); } */ sb.append("'"); sb.append(connection.escapeOnlyQuotesString(s)); sb.append("'"); return sb.toString(); } public ResultSet getProcedures(String catalog, String schemaPattern, String procedureNamePattern) throws SQLException { String sql; if (RedshiftLogger.isEnable()) connection.getLogger().logFunction(true, catalog, schemaPattern, procedureNamePattern); sql = "SELECT current_database() AS PROCEDURE_CAT, n.nspname AS PROCEDURE_SCHEM, p.proname AS PROCEDURE_NAME, " + "NULL, NULL, NULL, d.description AS REMARKS, " + " CASE " + " WHEN p.prokind='f' or p.proargmodes is not null THEN 2 " + " WHEN p.prokind='p' THEN 1 " + " ELSE 0 " + " END AS PROCEDURE_TYPE, " + " p.proname || '_' || p.prooid AS SPECIFIC_NAME " + " FROM pg_catalog.pg_namespace n, pg_catalog.pg_proc_info p " + " LEFT JOIN pg_catalog.pg_description d ON (p.prooid=d.objoid) " + " LEFT JOIN pg_catalog.pg_class c ON (d.classoid=c.oid AND c.relname='pg_proc') " + " LEFT JOIN pg_catalog.pg_namespace pn ON (c.relnamespace=pn.oid AND pn.nspname='pg_catalog') " + " WHERE p.pronamespace=n.oid "; /* if (connection.haveMinimumServerVersion(ServerVersion.v11)) { sql += " AND p.prokind='p'"; } */ sql += getCatalogFilterCondition(catalog); if (schemaPattern != null && !schemaPattern.isEmpty()) { sql += " AND n.nspname LIKE " + escapeQuotes(schemaPattern); } else { /* limit to current schema if no schema given */ sql += "and pg_function_is_visible(p.prooid)"; } if (procedureNamePattern != null && !procedureNamePattern.isEmpty()) { sql += " AND p.proname LIKE " + escapeQuotes(procedureNamePattern); } if (connection.getHideUnprivilegedObjects()) { sql += " AND has_function_privilege(p.prooid,'EXECUTE')"; } sql += " ORDER BY PROCEDURE_SCHEM, PROCEDURE_NAME, p.prooid::text "; ResultSet rs = createMetaDataStatement().executeQuery(sql); if (RedshiftLogger.isEnable()) connection.getLogger().logFunction(false, rs); return rs; } public ResultSet getProcedureColumns(String catalog, String schemaPattern, String procedureNamePattern, String columnNamePattern) throws SQLException { String sql; final String unknownColumnSize = "2147483647"; StringBuilder procedureColQuery = new StringBuilder(); if (RedshiftLogger.isEnable()) connection.getLogger().logFunction(true, catalog, schemaPattern, procedureNamePattern, columnNamePattern); procedureColQuery.append( "SELECT PROCEDURE_CAT , PROCEDURE_SCHEM , PROCEDURE_NAME, COLUMN_NAME, " + " COLUMN_TYPE, DATA_TYPE, TYPE_NAME, COLUMN_SIZE AS PRECISION, LENGTH , DECIMAL_DIGITS AS SCALE, " + " NUM_PREC_RADIX AS RADIX, NULLABLE, REMARKS, COLUMN_DEF, SQL_DATA_TYPE, SQL_DATETIME_SUB, " + " CHAR_OCTET_LENGTH, ORDINAL_POSITION, IS_NULLABLE, SPECIFIC_NAME " + " FROM ("); procedureColQuery.append("SELECT current_database() AS PROCEDURE_CAT, " + " n.nspname as PROCEDURE_SCHEM, " + " p.proname AS PROCEDURE_NAME, " + " CAST(CASE ((array_upper(proargnames, 0) - array_lower(proargnames, 0)) > 0) " + " WHEN 't' THEN proargnames[array_upper(proargnames, 1)] " + " ELSE '' " + " END AS VARCHAR(256)) AS COLUMN_NAME, " + " CAST(CASE p.proretset " + " WHEN 't' THEN 3 " + " ELSE 0 " + " END AS SMALLINT) AS COLUMN_TYPE, " + " CAST(CASE pg_catalog.format_type(p.prorettype, NULL) " + " WHEN 'text' THEN 12 " + " WHEN 'bit' THEN -7 " + " WHEN 'bool' THEN -7 " + " WHEN 'boolean' THEN -7 " + " WHEN 'varchar' THEN 12 " + " WHEN 'character varying' THEN 12 " + " WHEN '\"char\"' THEN 1" + " WHEN 'char' THEN 1 " + " WHEN 'character' THEN 1 " + " WHEN 'nchar' THEN 1 " + " WHEN 'bpchar' THEN 1 " + " WHEN 'nvarchar' THEN 12 " + " WHEN 'date' THEN 91 " + " WHEN 'time' THEN 92 " + " WHEN 'time without time zone' THEN 92 " + " WHEN 'timetz' THEN 2013 " + " WHEN 'time with time zone' THEN 2013 " + " WHEN 'timestamp' THEN 93 " + " WHEN 'timestamp without time zone' THEN 93 " + " WHEN 'timestamptz' THEN 2014 " + " WHEN 'timestamp with time zone' THEN 2014 " + " WHEN 'smallint' THEN 5 " + " WHEN 'int2' THEN 5 " + " WHEN 'integer' THEN 4 " + " WHEN 'int' THEN 4 " + " WHEN 'int4' THEN 4 " + " WHEN 'bigint' THEN -5 " + " WHEN 'int8' THEN -5 " + " WHEN 'real' THEN 7 " + " WHEN 'float4' THEN 7 " + " WHEN 'double precision' THEN 6 " + " WHEN 'float8' THEN 6 " + " WHEN 'float' THEN 6 " + " WHEN 'decimal' THEN 3 " + " WHEN 'numeric' THEN 2 " + " WHEN '_float4' THEN 2003 " + " WHEN '_aclitem' THEN 2003 " + " WHEN '_text' THEN 2003 " + " WHEN 'bytea' THEN -2 " + " WHEN 'oid' THEN -5 " + " WHEN 'name' THEN 12 " + " WHEN '_int4' THEN 2003 " + " WHEN '_int2' THEN 2003 " + " WHEN 'ARRAY' THEN 2003 " + " WHEN 'geometry' THEN -4 " + " WHEN 'super' THEN -16 " + " WHEN 'varbyte' THEN -4 " + " WHEN 'geography' THEN -4 " + " WHEN 'intervaly2m' THEN 1111 " + " WHEN 'intervald2s' THEN 1111 " + " ELSE 1111 " + " END AS SMALLINT) AS DATA_TYPE, " + " pg_catalog.format_type(p.prorettype, NULL) AS TYPE_NAME, " + " CASE pg_catalog.format_type(p.prorettype, NULL) " + " WHEN 'text' THEN NULL " + " WHEN 'varchar' THEN NULL " + " WHEN 'character varying' THEN NULL " + " WHEN '\"char\"' THEN NULL " + " WHEN 'character' THEN NULL " + " WHEN 'nchar' THEN NULL " + " WHEN 'bpchar' THEN NULL " + " WHEN 'nvarchar' THEN NULL " + " WHEN 'date' THEN 10 " + " WHEN 'time' THEN 15 " + " WHEN 'time without time zone' THEN 15 " + " WHEN 'timetz' THEN 21 " + " WHEN 'time with time zone' THEN 21 " + " WHEN 'timestamp' THEN 29 " + " WHEN 'timestamp without time zone' THEN 29 " + " WHEN 'timestamptz' THEN 35 " + " WHEN 'timestamp with time zone' THEN 35 " + " WHEN 'smallint' THEN 5 " + " WHEN 'int2' THEN 5 " + " WHEN 'integer' THEN 10 " + " WHEN 'int' THEN 10 " + " WHEN 'int4' THEN 10 " + " WHEN 'bigint' THEN 19 " + " WHEN 'int8' THEN 19 " + " WHEN 'decimal' THEN 38 " + " WHEN 'real' THEN 24 " + " WHEN 'float4' THEN 53 " + " WHEN 'double precision' THEN 53 " + " WHEN 'float8' THEN 53 " + " WHEN 'float' THEN 53 " + " WHEN 'geometry' THEN NULL " + " WHEN 'super' THEN 4194304 " + " WHEN 'varbyte' THEN NULL " + " WHEN 'geography' THEN NULL " + " WHEN 'intervaly2m' THEN 32 " + " WHEN 'intervald2s' THEN 64 " + " ELSE " + unknownColumnSize + " END AS COLUMN_SIZE, " + " CASE pg_catalog.format_type(p.prorettype, NULL) " + " WHEN 'text' THEN NULL " + " WHEN 'varchar' THEN NULL " + " WHEN 'character varying' THEN NULL " + " WHEN '\"char\"' THEN NULL " + " WHEN 'character' THEN NULL " + " WHEN 'nchar' THEN NULL " + " WHEN 'bpchar' THEN NULL " + " WHEN 'nvarchar' THEN NULL " + " WHEN 'date' THEN 6 " + " WHEN 'time' THEN 15 " + " WHEN 'time without time zone' THEN 15 " + " WHEN 'timetz' THEN 21 " + " WHEN 'time with time zone' THEN 21 " + " WHEN 'timestamp' THEN 6 " + " WHEN 'timestamp without time zone' THEN 6 " + " WHEN 'timestamptz' THEN 35 " + " WHEN 'timestamp with time zone' THEN 35 " + " WHEN 'smallint' THEN 2 " + " WHEN 'int2' THEN 2 " + " WHEN 'integer' THEN 4 " + " WHEN 'int' THEN 4 " + " WHEN 'int4' THEN 4 " + " WHEN 'bigint' THEN 20 " + " WHEN 'int8' THEN 20 " + " WHEN 'decimal' THEN 8 " + " WHEN 'real' THEN 4 " + " WHEN 'float4' THEN 8 " + " WHEN 'double precision' THEN 8 " + " WHEN 'float8' THEN 8 " + " WHEN 'float' THEN 8 " + " WHEN 'geometry' THEN NULL " + " WHEN 'super' THEN 4194304 " + " WHEN 'varbyte' THEN NULL " + " WHEN 'geography' THEN NULL " + " WHEN 'intervaly2m' THEN 4 " + " WHEN 'intervald2s' THEN 8 " + " END AS LENGTH, " + " CAST(CASE pg_catalog.format_type(p.prorettype, NULL) " + " WHEN 'smallint' THEN 0 " + " WHEN 'int2' THEN 0 " + " WHEN 'integer' THEN 0 " + " WHEN 'int' THEN 0 " + " WHEN 'int4' THEN 0 " + " WHEN 'bigint' THEN 0 " + " WHEN 'int8' THEN 0 " + " WHEN 'decimal' THEN 0 " + " WHEN 'real' THEN 8 " + " WHEN 'float4' THEN 8 " + " WHEN 'double precision' THEN 17 " + " WHEN 'float' THEN 17 " + " WHEN 'float8' THEN 17 " + " WHEN 'time' THEN 6 " + " WHEN 'time without time zone' THEN 6 " + " WHEN 'timetz' THEN 6 " + " WHEN 'time with time zone' THEN 6 " + " WHEN 'timestamp' THEN 6 " + " WHEN 'timestamp without time zone' THEN 6 " + " WHEN 'timestamptz' THEN 6 " + " WHEN 'timestamp with time zone' THEN 6 " + " WHEN 'intervaly2m' THEN 0 " + " WHEN 'intervald2s' THEN 6 " + " ELSE NULL END AS SMALLINT) AS DECIMAL_DIGITS, " + " 10 AS NUM_PREC_RADIX, " + " CAST(2 AS SMALLINT) AS NULLABLE, " + " CAST('' AS VARCHAR(256)) AS REMARKS, " + " NULL AS COLUMN_DEF, " + " CAST(CASE pg_catalog.format_type(p.prorettype, NULL)" + " WHEN 'text' THEN 12 " + " WHEN 'bit' THEN -7 " + " WHEN 'bool' THEN -7 " + " WHEN 'boolean' THEN -7 " + " WHEN 'varchar' THEN 12 " + " WHEN 'character varying' THEN 12 " + " WHEN '\"char\"' THEN 1" + " WHEN 'char' THEN 1 " + " WHEN 'character' THEN 1 " + " WHEN 'nchar' THEN 1 " + " WHEN 'bpchar' THEN 1 " + " WHEN 'nvarchar' THEN 12 " + " WHEN 'date' THEN 91 " + " WHEN 'time' THEN 92 " + " WHEN 'time without time zone' THEN 92 " + " WHEN 'timetz' THEN 2013 " + " WHEN 'time with time zone' THEN 2013 " + " WHEN 'timestamp' THEN 93 " + " WHEN 'timestamp without time zone' THEN 93 " + " WHEN 'timestamptz' THEN 2014 " + " WHEN 'timestamp with time zone' THEN 2014 " + " WHEN 'smallint' THEN 5 " + " WHEN 'int2' THEN 5 " + " WHEN 'integer' THEN 4 " + " WHEN 'int' THEN 4 " + " WHEN 'int4' THEN 4 " + " WHEN 'bigint' THEN -5 " + " WHEN 'int8' THEN -5 " + " WHEN 'real' THEN 7 " + " WHEN 'float4' THEN 7 " + " WHEN 'double precision' THEN 6 " + " WHEN 'float8' THEN 6 " + " WHEN 'float' THEN 6 " + " WHEN 'decimal' THEN 3 " + " WHEN 'numeric' THEN 2 " + " WHEN 'bytea' THEN -2 " + " WHEN 'oid' THEN -5 " + " WHEN 'name' THEN 12 " + " WHEN 'ARRAY' THEN 2003 " + " WHEN 'geometry' THEN -4 " + " WHEN 'super' THEN -16 " + " WHEN 'varbyte' THEN -4 " + " WHEN 'geography' THEN -4 " + " WHEN 'intervaly2m' THEN 1111 " + " WHEN 'intervald2s' THEN 1111 " + " END AS SMALLINT) AS SQL_DATA_TYPE, " + " CAST(NULL AS SMALLINT) AS SQL_DATETIME_SUB, " + " CAST(NULL AS SMALLINT) AS CHAR_OCTET_LENGTH, " + " CAST(0 AS SMALLINT) AS ORDINAL_POSITION, " + " CAST('' AS VARCHAR(256)) AS IS_NULLABLE, " + " p.proname || '_' || p.prooid AS SPECIFIC_NAME, " + " p.prooid as PROOID, " + " -1 AS PROARGINDEX " + " FROM pg_catalog.pg_proc_info p LEFT JOIN pg_namespace n ON n.oid = p.pronamespace " + " WHERE pg_catalog.format_type(p.prorettype, NULL) != 'void' "); procedureColQuery.append(getCatalogFilterCondition(catalog)); if (schemaPattern != null && !schemaPattern.isEmpty()) { procedureColQuery.append(" AND n.nspname LIKE " + escapeQuotes(schemaPattern)); } if (procedureNamePattern != null && !procedureNamePattern.isEmpty()) { procedureColQuery.append(" AND proname LIKE " + escapeQuotes(procedureNamePattern)); } if (columnNamePattern != null && !columnNamePattern.isEmpty()) { procedureColQuery.append(" AND COLUMN_NAME LIKE " + escapeQuotes(columnNamePattern)); } procedureColQuery.append(" UNION ALL "); procedureColQuery.append(" SELECT DISTINCT current_database() AS PROCEDURE_CAT, " + " PROCEDURE_SCHEM, " + " PROCEDURE_NAME, " + "CAST(CASE (char_length(COLUMN_NAME) > 0) WHEN 't' THEN COLUMN_NAME " + "ELSE '' " + "END AS VARCHAR(256)) AS COLUMN_NAME, " + " CAST( CASE COLUMN_TYPE " + " WHEN 105 THEN 1 " + " WHEN 98 THEN 2 " + " WHEN 111 THEN 4 " + " ELSE 0 END AS SMALLINT) AS COLUMN_TYPE, " + " CAST(CASE DATA_TYPE " + " WHEN 'text' THEN 12 " + " WHEN 'bit' THEN -7 " + " WHEN 'bool' THEN -7 " + " WHEN 'boolean' THEN -7 " + " WHEN 'varchar' THEN 12 " + " WHEN 'character varying' THEN 12 " + " WHEN '\"char\"' THEN 1 " + " WHEN 'char' THEN 1 " + " WHEN 'character' THEN 1 " + " WHEN 'nchar' THEN 1 " + " WHEN 'bpchar' THEN 1 " + " WHEN 'nvarchar' THEN 12 " + " WHEN 'date' THEN 91 " + " WHEN 'time' THEN 92 " + " WHEN 'time without time zone' THEN 92 " + " WHEN 'timetz' THEN 2013 " + " WHEN 'time with time zone' THEN 2013 " + " WHEN 'timestamp' THEN 93 " + " WHEN 'timestamp without time zone' THEN 93 " + " WHEN 'timestamptz' THEN 2014 " + " WHEN 'timestamp with time zone' THEN 2014 " + " WHEN 'smallint' THEN 5 " + " WHEN 'int2' THEN 5 " + " WHEN 'integer' THEN 4 " + " WHEN 'int' THEN 4 " + " WHEN 'int4' THEN 4 " + " WHEN 'bigint' THEN -5 " + " WHEN 'int8' THEN -5 " + " WHEN 'real' THEN 7 " + " WHEN 'float4' THEN 7 " + " WHEN 'double precision' THEN 6 " + " WHEN 'float8' THEN 6 " + " WHEN 'float' THEN 6 " + " WHEN 'decimal' THEN 3 " + " WHEN 'numeric' THEN 2 " + " WHEN 'bytea' THEN -2 " + " WHEN 'oid' THEN -5 " + " WHEN 'name' THEN 12 " + " WHEN 'ARRAY' THEN 2003 " + " WHEN 'geometry' THEN -4 " + " WHEN 'super' THEN -16 " + " WHEN 'varbyte' THEN -4 " + " WHEN 'geography' THEN -4 " + " WHEN 'intervaly2m' THEN 1111 " + " WHEN 'intervald2s' THEN 1111 " + " ELSE 1111 " + " END AS SMALLINT) AS DATA_TYPE, " + " TYPE_NAME, " + " CASE COLUMN_SIZE " + " WHEN 'text' THEN COLUMN_BYTES " + " WHEN 'varchar' THEN COLUMN_BYTES " + " WHEN 'character varying' THEN COLUMN_BYTES " + " WHEN '\"char\"' THEN COLUMN_BYTES " + " WHEN 'character' THEN COLUMN_BYTES " + " WHEN 'nchar' THEN COLUMN_BYTES " + " WHEN 'bpchar' THEN COLUMN_BYTES " + " WHEN 'nvarchar' THEN COLUMN_BYTES " + " WHEN 'date' THEN 10 " + " WHEN 'time' THEN 15 " + " WHEN 'time without time zone' THEN 15 " + " WHEN 'timetz' THEN 21 " + " WHEN 'time with time zone' THEN 21 " + " WHEN 'timestamp' THEN 6 " + " WHEN 'timestamp without time zone' THEN 6 " + " WHEN 'timestamptz' THEN 35 " + " WHEN 'timestamp with time zone' THEN 35 " + " WHEN 'smallint' THEN 5 " + " WHEN 'int2' THEN 5 " + " WHEN 'integer' THEN 10 " + " WHEN 'int' THEN 10 " + " WHEN 'int4' THEN 10 " + " WHEN 'bigint' THEN 19 " + " WHEN 'int8' THEN 19 " + " WHEN 'decimal' THEN 38 " + " WHEN 'real' THEN 24 " + " WHEN 'float4' THEN 53 " + " WHEN 'double precision' THEN 53 " + " WHEN 'float8' THEN 53 " + " WHEN 'float' THEN 53 " + " WHEN 'numeric' THEN NUMERIC_PRECISION " + " WHEN 'geometry' THEN NULL " + " WHEN 'super' THEN 4194304 " + " WHEN 'varbyte' THEN NULL " + " WHEN 'geography' THEN NULL " + " WHEN 'intervaly2m' THEN 32 " + " WHEN 'intervald2s' THEN 64 " + " ELSE " + unknownColumnSize + " END AS COLUMN_SIZE, " + " CASE LENGTH " + " WHEN 'text' THEN COLUMN_BYTES " + " WHEN 'varchar' THEN COLUMN_BYTES " + " WHEN 'character varying' THEN COLUMN_BYTES " + " WHEN '\"char\"' THEN COLUMN_BYTES " + " WHEN 'character' THEN COLUMN_BYTES " + " WHEN 'nchar' THEN COLUMN_BYTES " + " WHEN 'bpchar' THEN COLUMN_BYTES " + " WHEN 'nvarchar' THEN COLUMN_BYTES " + " WHEN 'date' THEN 6 " + " WHEN 'time' THEN 6 " + " WHEN 'time without time zone' THEN 6 " + " WHEN 'timetz' THEN 6 " + " WHEN 'time with time zone' THEN 6 " + " WHEN 'timestamp' THEN 6 " + " WHEN 'timestamp without time zone' THEN 6 " + " WHEN 'timestamptz' THEN 6 " + " WHEN 'timestamp with time zone' THEN 6 " + " WHEN 'smallint' THEN 2 " + " WHEN 'int2' THEN 2 " + " WHEN 'integer' THEN 4 " + " WHEN 'int' THEN 4 " + " WHEN 'int4' THEN 4 " + " WHEN 'bigint' THEN 20 " + " WHEN 'int8' THEN 20 " + " WHEN 'decimal' THEN 8 " + " WHEN 'real' THEN 4 " + " WHEN 'float4' THEN 8 " + " WHEN 'double precision' THEN 8 " + " WHEN 'float8' THEN 8 " + " WHEN 'float' THEN 8 " + " WHEN 'geometry' THEN NULL " + " WHEN 'super' THEN 4194304 " + " WHEN 'varbyte' THEN NULL " + " WHEN 'geography' THEN NULL " + " WHEN 'intervaly2m' THEN 4 " + " WHEN 'intervald2s' THEN 8 " + " END AS LENGTH, " + " CAST(CASE DECIMAL_DIGITS " + " WHEN 'smallint' THEN 0 " + " WHEN 'int2' THEN 0 " + " WHEN 'integer' THEN 0 " + " WHEN 'int' THEN 0 " + " WHEN 'int4' THEN 0 " + " WHEN 'bigint' THEN 0 " + " WHEN 'int8' THEN 0 " + " WHEN 'decimal' THEN 0 " + " WHEN 'real' THEN 8 " + " WHEN 'float4' THEN 8 " + " WHEN 'double precision' THEN 17 " + " WHEN 'float' THEN 17 " + " WHEN 'float8' THEN 17 " + " WHEN 'numeric' THEN NUMERIC_SCALE " + " WHEN 'time' THEN 6 " + " WHEN 'time without time zone' THEN 6 " + " WHEN 'timetz' THEN 6 " + " WHEN 'time with time zone' THEN 6 " + " WHEN 'timestamp' THEN 6 " + " WHEN 'timestamp without time zone' THEN 6 " + " WHEN 'timestamptz' THEN 6 " + " WHEN 'timestamp with time zone' THEN 6 " + " WHEN 'intervaly2m' THEN 0 " + " WHEN 'intervald2s' THEN 6 " + " ELSE NULL END AS SMALLINT) AS DECIMAL_DIGITS, " + " 10 AS NUM_PREC_RADIX, " + " CAST(2 AS SMALLINT) AS NULLABLE, " + " CAST(''AS VARCHAR(256)) AS REMARKS, " + " NULL AS COLUMN_DEF," + " CAST( CASE SQL_DATA_TYPE" + " WHEN 'text' THEN 12 " + " WHEN 'bit' THEN -7 " + " WHEN 'bool' THEN -7 " + " WHEN 'boolean' THEN -7 " + " WHEN 'varchar' THEN 12 " + " WHEN 'character varying' THEN 12 " + " WHEN '\"char\"' THEN 1 " + " WHEN 'char' THEN 1 " + " WHEN 'character' THEN 1 " + " WHEN 'nchar' THEN 1 " + " WHEN 'bpchar' THEN 1 " + " WHEN 'nvarchar' THEN 12 " + " WHEN 'date' THEN 91 " + " WHEN 'time' THEN 92 " + " WHEN 'time without time zone' THEN 92 " + " WHEN 'timetz' THEN 2013 " + " WHEN 'time with time zone' THEN 2013 " + " WHEN 'timestamp' THEN 93 " + " WHEN 'timestamp without time zone' THEN 93 " + " WHEN 'timestamptz' THEN 2014 " + " WHEN 'timestamp with time zone' THEN 2014 " + " WHEN 'smallint' THEN 5 " + " WHEN 'int2' THEN 5 " + " WHEN 'integer' THEN 4 " + " WHEN 'int' THEN 4 " + " WHEN 'int4' THEN 4 " + " WHEN 'bigint' THEN -5 " + " WHEN 'int8' THEN -5 " + " WHEN 'real' THEN 7 " + " WHEN 'float4' THEN 7 " + " WHEN 'double precision' THEN 6 " + " WHEN 'float8' THEN 6 " + " WHEN 'float' THEN 6 " + " WHEN 'decimal' THEN 3 " + " WHEN 'numeric' THEN 2 " + " WHEN 'bytea' THEN -2 " + " WHEN 'oid' THEN -5 " + " WHEN 'name' THEN 12 " + " WHEN 'ARRAY' THEN 2003 " + " WHEN 'geometry' THEN -4 " + " WHEN 'super' THEN -16 " + " WHEN 'varbyte' THEN -4 " + " WHEN 'geography' THEN -4 " + " WHEN 'intervaly2m' THEN 1111 " + " WHEN 'intervald2s' THEN 1111 " + " END AS SMALLINT) AS SQL_DATA_TYPE, " + " CAST(NULL AS SMALLINT) AS SQL_DATETIME_SUB, " + " CAST(NULL AS SMALLINT) AS CHAR_OCTET_LENGTH, " + " PROARGINDEX AS ORDINAL_POSITION, " + " CAST(''AS VARCHAR(256)) AS IS_NULLABLE, " + " SPECIFIC_NAME, PROOID, PROARGINDEX " + " FROM ( " + " SELECT current_database() AS PROCEDURE_CAT," + " n.nspname AS PROCEDURE_SCHEM, " + " proname AS PROCEDURE_NAME, " + " CASE WHEN (proallargtypes is NULL) THEN proargnames[pos+1] " + " ELSE proargnames[pos] END AS COLUMN_NAME," + " CASE WHEN proargmodes is NULL THEN 105 " + " ELSE CAST(proargmodes[pos] AS INT) END AS COLUMN_TYPE, " + " CASE WHEN proallargtypes is NULL THEN pg_catalog.format_type(proargtypes[pos], NULL)" + " ELSE pg_catalog.format_type(proallargtypes[pos], NULL) END AS DATA_TYPE," + " CASE WHEN proallargtypes is NULL THEN pg_catalog.format_type(proargtypes[pos], NULL) " + " ELSE pg_catalog.format_type(proallargtypes[pos], NULL) END AS TYPE_NAME," + " CASE WHEN proallargtypes is NULL THEN pg_catalog.format_type(proargtypes[pos], NULL)" + " ELSE pg_catalog.format_type(proallargtypes[pos], NULL) END AS COLUMN_SIZE," + " CASE WHEN (proallargtypes IS NOT NULL) and prokind='p' AND proallargtypes[pos] IN (1042, 1700, 1043) " + " THEN (string_to_array(textin(byteaout(substring(probin from 1 for length(probin)-3))),','))[pos]::integer " + " WHEN (proallargtypes IS NULL) AND prokind='p' AND proargtypes[pos] IN (1042,1700,1043) " + " THEN (string_to_array(textin(byteaout(substring(probin FROM 1 FOR length(probin)-3))), ',')) [pos+1]::integer " + " END AS PROBIN_BYTES, " + " CASE " + " WHEN (PROBIN_BYTES IS NOT NULL) " + " AND (proallargtypes[pos] IN (1042, 1043) or proargtypes[pos] in (1042,1043)) " + " THEN PROBIN_BYTES-4 " + " END AS COLUMN_BYTES, " + " CASE WHEN proallargtypes is NULL THEN pg_catalog.format_type(proargtypes[pos], NULL)" + " ELSE pg_catalog.format_type(proallargtypes[pos], NULL) END AS LENGTH," + " CASE WHEN proallargtypes is NULL THEN pg_catalog.format_type(proargtypes[pos], NULL)" + " ELSE pg_catalog.format_type(proallargtypes[pos], NULL) END AS DECIMAL_DIGITS," + " CASE WHEN proallargtypes is NULL THEN pg_catalog.format_type(proargtypes[pos], NULL)" + " ELSE pg_catalog.format_type(proallargtypes[pos], NULL) END AS RADIX," + " CAST(2 AS SMALLINT) AS NULLABLE," + " CAST(''AS VARCHAR(256)) AS REMARKS," + " CAST(NULL AS SMALLINT) AS COLUMN_DEF," + " pg_catalog.format_type(proargtypes[pos], NULL) AS SQL_DATA_TYPE," + " CAST(NULL AS SMALLINT) AS SQL_DATETIME_SUB," + " pg_catalog.format_type(proargtypes[pos], NULL) AS CHAR_OCTET_LENGTH," + " CASE WHEN (proallargtypes is NULL) THEN pos+1" + " WHEN pos = array_upper(proallargtypes, 1) THEN 0" + " ELSE pos END AS ORDINAL_POSITION," + " CAST('' AS VARCHAR(256)) AS IS_NULLABLE," + " p.prooid AS PROOID," + " CASE WHEN (proallargtypes is NULL) THEN pos+1" + " WHEN prokind = 'f' AND pos = array_upper(proallargtypes, 1) THEN 0" + " ELSE pos END AS PROARGINDEX, " + " CASE WHEN (proallargtypes IS NULL AND proargtypes[pos] = 1700 AND prokind='p') OR (proallargtypes IS NOT NULL AND proallargtypes[pos] = 1700 AND prokind='p' AND proallargtypes[pos] = 1700) THEN (PROBIN_BYTES-4)/65536 END as NUMERIC_PRECISION, " + " CASE WHEN (proallargtypes IS NULL AND proargtypes[pos] = 1700 AND prokind='p') OR (proallargtypes IS NOT NULL AND proallargtypes[pos] = 1700 AND prokind='p' AND proallargtypes[pos] = 1700) THEN (((PROBIN_BYTES::numeric-4)/65536 - (PROBIN_BYTES-4)/65536) * 65536)::INT END as NUMERIC_SCALE, " + " p.proname || '_' || p.prooid AS SPECIFIC_NAME " + " FROM (pg_catalog.pg_proc_info p LEFT JOIN pg_namespace n" + " ON n.oid = p.pronamespace)" + " LEFT JOIN (SELECT " + " CASE WHEN (proallargtypes IS NULL) " + " THEN generate_series(array_lower(proargnames, 1), array_upper(proargnames, 1))-1" + " ELSE generate_series(array_lower(proargnames, 1), array_upper(proargnames, 1)+1)-1 " + " END AS pos" + " FROM pg_catalog.pg_proc_info p ) AS s ON (pos >= 0)"); procedureColQuery.append(" WHERE true "); procedureColQuery.append(getCatalogFilterCondition(catalog)); if (schemaPattern != null && !schemaPattern.isEmpty()) { procedureColQuery.append(" AND n.nspname LIKE " + escapeQuotes(schemaPattern)); } if (procedureNamePattern != null && !procedureNamePattern.isEmpty()) { procedureColQuery.append(" AND proname LIKE " + escapeQuotes(procedureNamePattern)); } if (columnNamePattern != null && !columnNamePattern.isEmpty()) { procedureColQuery.append(" AND COLUMN_NAME LIKE " + escapeQuotes(columnNamePattern)); } procedureColQuery.append(" ) AS INPUT_PARAM_TABLE" + " WHERE ORDINAL_POSITION IS NOT NULL" + " ) AS RESULT_SET WHERE (DATA_TYPE != 1111 OR (TYPE_NAME IS NOT NULL AND TYPE_NAME != '-'))" + " ORDER BY PROCEDURE_CAT ,PROCEDURE_SCHEM," + " PROCEDURE_NAME, PROOID, PROARGINDEX, COLUMN_TYPE DESC"); sql = procedureColQuery.toString(); ResultSet rs = createMetaDataStatement().executeQuery(sql); if (RedshiftLogger.isEnable()) connection.getLogger().logFunction(false, rs); return rs; } @Override public ResultSet getTables(String catalog, String schemaPattern, String tableNamePattern, String[] types) throws SQLException { String sql = null; if (RedshiftLogger.isEnable()) connection.getLogger().logFunction(true, catalog, schemaPattern, tableNamePattern, types); int schemaPatternType = Optional.ofNullable(connection.getOverrideSchemaPatternType()).orElse(getExtSchemaPatternMatch(schemaPattern)); if (RedshiftLogger.isEnable()) connection.getLogger().logInfo("schemaPatternType = {0}", schemaPatternType); if (schemaPatternType == LOCAL_SCHEMA_QUERY) { // Join on pg_catalog sql = buildLocalSchemaTablesQuery(catalog, schemaPattern, tableNamePattern, types); } else if (schemaPatternType == NO_SCHEMA_UNIVERSAL_QUERY) { if (isSingleDatabaseMetaData()) { // svv_tables sql = buildUniversalSchemaTablesQuery(catalog, schemaPattern, tableNamePattern, types); } else { // svv_all_tables sql = buildUniversalAllSchemaTablesQuery(catalog, schemaPattern, tableNamePattern, types); } } else if (schemaPatternType == EXTERNAL_SCHEMA_QUERY) { // svv_external_tables sql = buildExternalSchemaTablesQuery(catalog, schemaPattern, tableNamePattern, types); } ResultSet rs = createMetaDataStatement().executeQuery(sql); if (RedshiftLogger.isEnable()) connection.getLogger().logFunction(false, rs); return rs; } private String buildLocalSchemaTablesQuery(String catalog, String schemaPattern, String tableNamePattern, String[] types) throws SQLException { String select; select = "SELECT CAST(current_database() AS VARCHAR(124)) AS TABLE_CAT, n.nspname AS TABLE_SCHEM, c.relname AS TABLE_NAME, " + " CASE n.nspname ~ '^pg_' OR n.nspname = 'information_schema' " + " WHEN true THEN CASE " + " WHEN n.nspname = 'pg_catalog' OR n.nspname = 'information_schema' THEN CASE c.relkind " + " WHEN 'r' THEN 'SYSTEM TABLE' " + " WHEN 'v' THEN 'SYSTEM VIEW' " + " WHEN 'i' THEN 'SYSTEM INDEX' " + " ELSE NULL " + " END " + " WHEN n.nspname = 'pg_toast' THEN CASE c.relkind " + " WHEN 'r' THEN 'SYSTEM TOAST TABLE' " + " WHEN 'i' THEN 'SYSTEM TOAST INDEX' " + " ELSE NULL " + " END " + " ELSE CASE c.relkind " + " WHEN 'r' THEN 'TEMPORARY TABLE' " + " WHEN 'p' THEN 'TEMPORARY TABLE' " + " WHEN 'i' THEN 'TEMPORARY INDEX' " + " WHEN 'S' THEN 'TEMPORARY SEQUENCE' " + " WHEN 'v' THEN 'TEMPORARY VIEW' " + " ELSE NULL " + " END " + " END " + " WHEN false THEN CASE c.relkind " + " WHEN 'r' THEN 'TABLE' " + " WHEN 'p' THEN 'PARTITIONED TABLE' " + " WHEN 'i' THEN 'INDEX' " + " WHEN 'S' THEN 'SEQUENCE' " + " WHEN 'v' THEN 'VIEW' " + " WHEN 'c' THEN 'TYPE' " + " WHEN 'f' THEN 'FOREIGN TABLE' " + " WHEN 'm' THEN 'MATERIALIZED VIEW' " + " ELSE NULL " + " END " + " ELSE NULL " + " END " + " AS TABLE_TYPE, d.description AS REMARKS, " + " '' as TYPE_CAT, '' as TYPE_SCHEM, '' as TYPE_NAME, " + "'' AS SELF_REFERENCING_COL_NAME, '' AS REF_GENERATION " + " FROM pg_catalog.pg_namespace n, pg_catalog.pg_class c " + " LEFT JOIN pg_catalog.pg_description d ON (c.oid = d.objoid AND d.objsubid = 0) " + " LEFT JOIN pg_catalog.pg_class dc ON (d.classoid=dc.oid AND dc.relname='pg_class') " + " LEFT JOIN pg_catalog.pg_namespace dn ON (dn.oid=dc.relnamespace AND dn.nspname='pg_catalog') " + " WHERE c.relnamespace = n.oid "; String filterClause = getTableFilterClause(catalog, schemaPattern, tableNamePattern, types, LOCAL_SCHEMA_QUERY, true, null); String orderby = " ORDER BY TABLE_TYPE,TABLE_SCHEM,TABLE_NAME "; return select + filterClause + orderby; } private String getTableFilterClause(String catalog, String schemaPattern, String tableNamePattern, String[] types, int schemaPatternType, boolean apiSupportedOnlyForConnectedDatabase, String databaseColName) throws SQLException { String filterClause = ""; String useSchemas = "SCHEMAS"; filterClause += getCatalogFilterCondition(catalog, apiSupportedOnlyForConnectedDatabase, databaseColName); if (schemaPattern != null && !schemaPattern.isEmpty()) { filterClause += " AND TABLE_SCHEM LIKE " + escapeQuotes(schemaPattern); } if (tableNamePattern != null && !tableNamePattern.isEmpty()) { filterClause += " AND TABLE_NAME LIKE " + escapeQuotes(tableNamePattern); } if (types != null) { if (schemaPatternType == LOCAL_SCHEMA_QUERY) { filterClause += " AND (false "; StringBuilder orclause = new StringBuilder(); for (String type : types) { Map<String, String> clauses = tableTypeClauses.get(type); if (clauses != null) { String clause = clauses.get(useSchemas); orclause.append(" OR ( ").append(clause).append(" ) "); } } filterClause += orclause.toString() + ") "; } else if (schemaPatternType == NO_SCHEMA_UNIVERSAL_QUERY || schemaPatternType == EXTERNAL_SCHEMA_QUERY) { filterClause += (" AND TABLE_TYPE IN ( "); int len = types.length; for (String type : types) { filterClause += escapeQuotes(type); len--; if(len > 0) filterClause +=", "; } filterClause += ") "; } } if (schemaPatternType == LOCAL_SCHEMA_QUERY) { if (connection.getHideUnprivilegedObjects()) { filterClause += " AND has_table_privilege(c.oid, " + " 'SELECT, INSERT, UPDATE, DELETE, RULE, REFERENCES, TRIGGER')"; } } return filterClause; } private String buildUniversalSchemaTablesQuery(String catalog, String schemaPattern, String tableNamePattern, String[] types) throws SQLException { // Basic query, without the join operation and subquery name appended to the end StringBuilder tableQuery = new StringBuilder(2048); tableQuery.append("SELECT * FROM (SELECT CAST(current_database() AS VARCHAR(124)) AS TABLE_CAT," + " table_schema AS TABLE_SCHEM," + " table_name AS TABLE_NAME," + " CAST(" + " CASE table_type" + " WHEN 'BASE TABLE' THEN CASE" + " WHEN table_schema = 'pg_catalog' OR table_schema = 'information_schema' THEN 'SYSTEM TABLE'" + " WHEN table_schema = 'pg_toast' THEN 'SYSTEM TOAST TABLE'" + " WHEN table_schema ~ '^pg_' AND table_schema != 'pg_toast' THEN 'TEMPORARY TABLE'" + " ELSE 'TABLE'" + " END" + " WHEN 'VIEW' THEN CASE" + " WHEN table_schema = 'pg_catalog' OR table_schema = 'information_schema' THEN 'SYSTEM VIEW'" + " WHEN table_schema = 'pg_toast' THEN NULL" + " WHEN table_schema ~ '^pg_' AND table_schema != 'pg_toast' THEN 'TEMPORARY VIEW'" + " ELSE 'VIEW'" + " END" + " WHEN 'EXTERNAL TABLE' THEN 'EXTERNAL TABLE'" + " END" + " AS VARCHAR(124)) AS TABLE_TYPE," + " REMARKS," + " '' as TYPE_CAT," + " '' as TYPE_SCHEM," + " '' as TYPE_NAME, " + " '' AS SELF_REFERENCING_COL_NAME," + " '' AS REF_GENERATION " + " FROM svv_tables)"); tableQuery.append( " WHERE true "); String filterClause = getTableFilterClause(catalog, schemaPattern, tableNamePattern, types, NO_SCHEMA_UNIVERSAL_QUERY, true, null); String orderby = " ORDER BY TABLE_TYPE,TABLE_SCHEM,TABLE_NAME "; tableQuery.append(filterClause); tableQuery.append(orderby); return tableQuery.toString(); } // Datashare/Cross-db support svv_all_tables view private String buildUniversalAllSchemaTablesQuery(String catalog, String schemaPattern, String tableNamePattern, String[] types) throws SQLException { StringBuilder tableQuery = new StringBuilder(2048); tableQuery.append("SELECT * FROM (SELECT CAST(DATABASE_NAME AS VARCHAR(124)) AS TABLE_CAT," + " SCHEMA_NAME AS TABLE_SCHEM," + " TABLE_NAME AS TABLE_NAME," + " CAST(" + " CASE " + " WHEN SCHEMA_NAME='information_schema' " + " AND TABLE_TYPE='TABLE' THEN 'SYSTEM TABLE' " + " WHEN SCHEMA_NAME='information_schema' " + " AND TABLE_TYPE='VIEW' THEN 'SYSTEM VIEW' " + " ELSE TABLE_TYPE " + " END " + " AS VARCHAR(124)) AS TABLE_TYPE," + " REMARKS," + " '' as TYPE_CAT," + " '' as TYPE_SCHEM," + " '' as TYPE_NAME, " + " '' AS SELF_REFERENCING_COL_NAME," + " '' AS REF_GENERATION " + " FROM PG_CATALOG.SVV_ALL_TABLES)"); tableQuery.append( " WHERE true "); String filterClause = getTableFilterClause(catalog, schemaPattern, tableNamePattern, types, NO_SCHEMA_UNIVERSAL_QUERY, false, "TABLE_CAT"); String orderby = " ORDER BY TABLE_TYPE, TABLE_CAT, TABLE_SCHEM, TABLE_NAME "; tableQuery.append(filterClause); tableQuery.append(orderby); return tableQuery.toString(); } private String buildExternalSchemaTablesQuery(String catalog, String schemaPattern, String tableNamePattern, String[] types) throws SQLException { // Basic query, without the join operation and subquery name appended to the end StringBuilder tableQuery = new StringBuilder(2048); tableQuery .append("SELECT * FROM (SELECT CAST(current_database() AS VARCHAR(124)) AS TABLE_CAT," + " schemaname AS table_schem," + " tablename AS TABLE_NAME," + " 'EXTERNAL TABLE' AS TABLE_TYPE," + " NULL AS REMARKS," + " '' as TYPE_CAT," + " '' as TYPE_SCHEM," + " '' as TYPE_NAME, " + " '' AS SELF_REFERENCING_COL_NAME," + " '' AS REF_GENERATION " + " FROM svv_external_tables)"); tableQuery.append( " WHERE true "); String filterClause = getTableFilterClause(catalog, schemaPattern, tableNamePattern, types, EXTERNAL_SCHEMA_QUERY, true, null); String orderby = " ORDER BY TABLE_TYPE,TABLE_SCHEM,TABLE_NAME "; tableQuery.append(filterClause); tableQuery.append(orderby); return tableQuery.toString(); } private static final Map<String, Map<String, String>> tableTypeClauses; static { tableTypeClauses = new HashMap<String, Map<String, String>>(); Map<String, String> ht = new HashMap<String, String>(); tableTypeClauses.put("TABLE", ht); ht.put("SCHEMAS", "c.relkind = 'r' AND n.nspname !~ '^pg_' AND n.nspname <> 'information_schema'"); ht.put("NOSCHEMAS", "c.relkind = 'r' AND c.relname !~ '^pg_'"); /* ht = new HashMap<String, String>(); tableTypeClauses.put("PARTITIONED TABLE", ht); ht.put("SCHEMAS", "c.relkind = 'p' AND n.nspname !~ '^pg_' AND n.nspname <> 'information_schema'"); ht.put("NOSCHEMAS", "c.relkind = 'p' AND c.relname !~ '^pg_'"); */ ht = new HashMap<String, String>(); tableTypeClauses.put("VIEW", ht); ht.put("SCHEMAS", "c.relkind = 'v' AND n.nspname <> 'pg_catalog' AND n.nspname <> 'information_schema'"); ht.put("NOSCHEMAS", "c.relkind = 'v' AND c.relname !~ '^pg_'"); ht = new HashMap<String, String>(); tableTypeClauses.put("INDEX", ht); ht.put("SCHEMAS", "c.relkind = 'i' AND n.nspname !~ '^pg_' AND n.nspname <> 'information_schema'"); ht.put("NOSCHEMAS", "c.relkind = 'i' AND c.relname !~ '^pg_'"); ht = new HashMap<String, String>(); tableTypeClauses.put("SEQUENCE", ht); ht.put("SCHEMAS", "c.relkind = 'S'"); ht.put("NOSCHEMAS", "c.relkind = 'S'"); ht = new HashMap<String, String>(); tableTypeClauses.put("TYPE", ht); ht.put("SCHEMAS", "c.relkind = 'c' AND n.nspname !~ '^pg_' AND n.nspname <> 'information_schema'"); ht.put("NOSCHEMAS", "c.relkind = 'c' AND c.relname !~ '^pg_'"); ht = new HashMap<String, String>(); tableTypeClauses.put("SYSTEM TABLE", ht); ht.put("SCHEMAS", "c.relkind = 'r' AND (n.nspname = 'pg_catalog' OR n.nspname = 'information_schema')"); ht.put("NOSCHEMAS", "c.relkind = 'r' AND c.relname ~ '^pg_' AND c.relname !~ '^pg_toast_' AND c.relname !~ '^pg_temp_'"); ht = new HashMap<String, String>(); tableTypeClauses.put("SYSTEM TOAST TABLE", ht); ht.put("SCHEMAS", "c.relkind = 'r' AND n.nspname = 'pg_toast'"); ht.put("NOSCHEMAS", "c.relkind = 'r' AND c.relname ~ '^pg_toast_'"); ht = new HashMap<String, String>(); tableTypeClauses.put("SYSTEM TOAST INDEX", ht); ht.put("SCHEMAS", "c.relkind = 'i' AND n.nspname = 'pg_toast'"); ht.put("NOSCHEMAS", "c.relkind = 'i' AND c.relname ~ '^pg_toast_'"); ht = new HashMap<String, String>(); tableTypeClauses.put("SYSTEM VIEW", ht); ht.put("SCHEMAS", "c.relkind = 'v' AND (n.nspname = 'pg_catalog' OR n.nspname = 'information_schema') "); ht.put("NOSCHEMAS", "c.relkind = 'v' AND c.relname ~ '^pg_'"); ht = new HashMap<String, String>(); tableTypeClauses.put("SYSTEM INDEX", ht); ht.put("SCHEMAS", "c.relkind = 'i' AND (n.nspname = 'pg_catalog' OR n.nspname = 'information_schema') "); ht.put("NOSCHEMAS", "c.relkind = 'v' AND c.relname ~ '^pg_' AND c.relname !~ '^pg_toast_' AND c.relname !~ '^pg_temp_'"); ht = new HashMap<String, String>(); tableTypeClauses.put("TEMPORARY TABLE", ht); ht.put("SCHEMAS", "c.relkind IN ('r','p') AND n.nspname ~ '^pg_temp_' "); ht.put("NOSCHEMAS", "c.relkind IN ('r','p') AND c.relname ~ '^pg_temp_' "); ht = new HashMap<String, String>(); tableTypeClauses.put("TEMPORARY INDEX", ht); ht.put("SCHEMAS", "c.relkind = 'i' AND n.nspname ~ '^pg_temp_' "); ht.put("NOSCHEMAS", "c.relkind = 'i' AND c.relname ~ '^pg_temp_' "); ht = new HashMap<String, String>(); tableTypeClauses.put("TEMPORARY VIEW", ht); ht.put("SCHEMAS", "c.relkind = 'v' AND n.nspname ~ '^pg_temp_' "); ht.put("NOSCHEMAS", "c.relkind = 'v' AND c.relname ~ '^pg_temp_' "); ht = new HashMap<String, String>(); tableTypeClauses.put("TEMPORARY SEQUENCE", ht); ht.put("SCHEMAS", "c.relkind = 'S' AND n.nspname ~ '^pg_temp_' "); ht.put("NOSCHEMAS", "c.relkind = 'S' AND c.relname ~ '^pg_temp_' "); /* ht = new HashMap<String, String>(); tableTypeClauses.put("FOREIGN TABLE", ht); ht.put("SCHEMAS", "c.relkind = 'f'"); ht.put("NOSCHEMAS", "c.relkind = 'f'"); ht = new HashMap<String, String>(); tableTypeClauses.put("MATERIALIZED VIEW", ht); ht.put("SCHEMAS", "c.relkind = 'm'"); ht.put("NOSCHEMAS", "c.relkind = 'm'"); */ tableTypeClauses.put("EXTERNAL TABLE", null); } @Override public ResultSet getSchemas() throws SQLException { return getSchemas(null, null); } @Override public ResultSet getSchemas(String catalog, String schemaPattern) throws SQLException { String sql; if (RedshiftLogger.isEnable()) connection.getLogger().logFunction(true, catalog, schemaPattern); if (isSingleDatabaseMetaData()) { sql = "SELECT nspname AS TABLE_SCHEM, current_database() AS TABLE_CATALOG FROM pg_catalog.pg_namespace " + " WHERE nspname <> 'pg_toast' AND (nspname !~ '^pg_temp_' " + " OR nspname = (pg_catalog.current_schemas(true))[1]) AND (nspname !~ '^pg_toast_temp_' " + " OR nspname = replace((pg_catalog.current_schemas(true))[1], 'pg_temp_', 'pg_toast_temp_')) "; sql += getCatalogFilterCondition(catalog); if (schemaPattern != null && !schemaPattern.isEmpty()) { sql += " AND nspname LIKE " + escapeQuotes(schemaPattern); } if (connection.getHideUnprivilegedObjects()) { sql += " AND has_schema_privilege(nspname, 'USAGE, CREATE')"; } sql += " ORDER BY TABLE_SCHEM"; } else { sql = "SELECT CAST(schema_name AS varchar(124)) AS TABLE_SCHEM, " + " CAST(database_name AS varchar(124)) AS TABLE_CATALOG " + " FROM PG_CATALOG.SVV_ALL_SCHEMAS " + " WHERE TRUE "; sql += getCatalogFilterCondition(catalog, false, null); if (schemaPattern != null && !schemaPattern.isEmpty()) { sql += " AND schema_name LIKE " + escapeQuotes(schemaPattern); } sql += " ORDER BY TABLE_CATALOG, TABLE_SCHEM"; } ResultSet rs = createMetaDataStatement().executeQuery(sql); if (RedshiftLogger.isEnable()) connection.getLogger().logFunction(false, rs); return rs; } /** * Redshift does not support multiple catalogs from a single connection, so to reduce confusion * we only return the current catalog. {@inheritDoc} */ @Override public ResultSet getCatalogs() throws SQLException { if (RedshiftLogger.isEnable()) connection.getLogger().logFunction(true); String sql; if (isSingleDatabaseMetaData()) { // Behavious same as before i.e. returns only single database. Field[] f = new Field[1]; List<Tuple> v = new ArrayList<Tuple>(); f[0] = new Field("TABLE_CAT", Oid.VARCHAR); byte[][] tuple = new byte[1][]; tuple[0] = connection.encodeString(connection.getCatalog()); v.add(new Tuple(tuple)); return ((BaseStatement) createMetaDataStatement()).createDriverResultSet(f, v); } else { // Datasharing/federation support enable, so get databases using the new view. sql = "SELECT CAST(database_name AS varchar(124)) AS TABLE_CAT FROM PG_CATALOG.SVV_REDSHIFT_DATABASES "; } sql += " ORDER BY TABLE_CAT"; ResultSet rs = createMetaDataStatement().executeQuery(sql); if (RedshiftLogger.isEnable()) connection.getLogger().logFunction(false, rs); return rs; } @Override public ResultSet getTableTypes() throws SQLException { String[] types = tableTypeClauses.keySet().toArray(new String[0]); Arrays.sort(types); Field[] f = new Field[1]; List<Tuple> v = new ArrayList<Tuple>(); f[0] = new Field("TABLE_TYPE", Oid.VARCHAR); for (String type : types) { byte[][] tuple = new byte[1][]; tuple[0] = connection.encodeString(type); v.add(new Tuple(tuple)); } return ((BaseStatement) createMetaDataStatement()).createDriverResultSet(f, v); } public ResultSet getColumns(String catalog, String schemaPattern, String tableNamePattern, String columnNamePattern) throws SQLException { String sql = null; if (RedshiftLogger.isEnable()) connection.getLogger().logFunction(true, catalog, schemaPattern, tableNamePattern, columnNamePattern); int schemaPatternType = getExtSchemaPatternMatch(schemaPattern); if (RedshiftLogger.isEnable()) connection.getLogger().logInfo("schemaPatternType = {0}", schemaPatternType); if (schemaPatternType == LOCAL_SCHEMA_QUERY) { // Join on pg_catalog union with pg_late_binding_view sql = buildLocalSchemaColumnsQuery(catalog, schemaPattern, tableNamePattern, columnNamePattern); } else if (schemaPatternType == NO_SCHEMA_UNIVERSAL_QUERY) { if (isSingleDatabaseMetaData()) { // svv_columns sql = buildUniversalSchemaColumnsQuery(catalog, schemaPattern, tableNamePattern, columnNamePattern); } else { // svv_all_columns sql = buildUniversalAllSchemaColumnsQuery(catalog, schemaPattern, tableNamePattern, columnNamePattern); } } else if (schemaPatternType == EXTERNAL_SCHEMA_QUERY) { // svv_external_columns sql = buildExternalSchemaColumnsQuery(catalog, schemaPattern, tableNamePattern, columnNamePattern); } ResultSet rs = createMetaDataStatement().executeQuery(sql); if (RedshiftLogger.isEnable()) connection.getLogger().logFunction(false, rs); return rs; } private String buildLocalSchemaColumnsQuery(String catalog, String schemaPattern, String tableNamePattern, String columnNamePattern) throws SQLException { StringBuilder result = new StringBuilder(); result.append("SELECT * FROM ( "); result.append("SELECT current_database() AS TABLE_CAT, "); result.append("n.nspname AS TABLE_SCHEM, "); result.append("c.relname as TABLE_NAME , "); result.append("a.attname as COLUMN_NAME, " ); result.append("CAST(case typname "); result.append("when 'text' THEN 12 "); result.append("when 'bit' THEN -7 "); result.append("when 'bool' THEN -7 "); result.append("when 'boolean' THEN -7 "); result.append("when 'varchar' THEN 12 "); result.append("when 'character varying' THEN 12 "); result.append("when 'char' THEN 1 "); result.append("when '\"char\"' THEN 1 "); result.append("when 'character' THEN 1 "); result.append("when 'nchar' THEN 12 "); result.append("when 'bpchar' THEN 1 "); result.append("when 'nvarchar' THEN 12 "); result.append("when 'date' THEN 91 "); result.append("when 'time' THEN 92 "); result.append("when 'time without time zone' THEN 92 "); result.append("when 'timetz' THEN 2013 "); result.append("when 'time with time zone' THEN 2013 "); result.append("when 'timestamp' THEN 93 "); result.append("when 'timestamp without time zone' THEN 93 "); result.append("when 'timestamptz' THEN 2014 "); result.append("when 'timestamp with time zone' THEN 2014 "); result.append("when 'smallint' THEN 5 "); result.append("when 'int2' THEN 5 "); result.append("when 'integer' THEN 4 "); result.append("when 'int' THEN 4 "); result.append("when 'int4' THEN 4 "); result.append("when 'bigint' THEN -5 "); result.append("when 'int8' THEN -5 "); result.append("when 'decimal' THEN 3 "); result.append("when 'real' THEN 7 "); result.append("when 'float4' THEN 7 "); result.append("when 'double precision' THEN 8 "); result.append("when 'float8' THEN 8 "); result.append("when 'float' THEN 6 "); result.append("when 'numeric' THEN 2 "); result.append("when '_float4' THEN 2003 "); result.append("when '_aclitem' THEN 2003 "); result.append("when '_text' THEN 2003 "); result.append("when 'bytea' THEN -2 "); result.append("when 'oid' THEN -5 "); result.append("when 'name' THEN 12 "); result.append("when '_int4' THEN 2003 "); result.append("when '_int2' THEN 2003 "); result.append("when 'ARRAY' THEN 2003 "); result.append("when 'geometry' THEN -4 "); result.append("when 'super' THEN -16 "); result.append("when 'varbyte' THEN -4 "); result.append("when 'geography' THEN -4 "); result.append("when 'intervaly2m' THEN 1111 "); result.append("when 'intervald2s' THEN 1111 "); result.append("else 1111 END as SMALLINT) AS DATA_TYPE, "); result.append("t.typname as TYPE_NAME, "); result.append("case typname "); result.append("when 'int4' THEN 10 "); result.append("when 'bit' THEN 1 "); result.append("when 'bool' THEN 1 "); result.append("when 'varchar' THEN CASE atttypmod WHEN -1 THEN 0 ELSE atttypmod -4 END "); result.append("when 'character varying' THEN CASE atttypmod WHEN -1 THEN 0 ELSE atttypmod -4 END "); result.append("when 'char' THEN CASE atttypmod WHEN -1 THEN 0 ELSE atttypmod -4 END "); result.append("when 'character' THEN CASE atttypmod WHEN -1 THEN 0 ELSE atttypmod -4 END "); result.append("when 'nchar' THEN CASE atttypmod WHEN -1 THEN 0 ELSE atttypmod -4 END "); result.append("when 'bpchar' THEN CASE atttypmod WHEN -1 THEN 0 ELSE atttypmod -4 END "); result.append("when 'nvarchar' THEN CASE atttypmod WHEN -1 THEN 0 ELSE atttypmod -4 END "); result.append("when 'date' THEN 13 "); result.append("when 'time' THEN 15 "); result.append("when 'time without time zone' THEN 15 "); result.append("when 'timetz' THEN 21 "); result.append("when 'time with time zone' THEN 21 "); result.append("when 'timestamp' THEN 29 "); result.append("when 'timestamp without time zone' THEN 29 "); result.append("when 'timestamptz' THEN 35 "); result.append("when 'timestamp with time zone' THEN 35 "); result.append("when 'smallint' THEN 5 "); result.append("when 'int2' THEN 5 "); result.append("when 'integer' THEN 10 "); result.append("when 'int' THEN 10 "); result.append("when 'int4' THEN 10 "); result.append("when 'bigint' THEN 19 "); result.append("when 'int8' THEN 19 "); result.append("when 'decimal' then (atttypmod - 4) >> 16 "); result.append("when 'real' THEN 8 "); result.append("when 'float4' THEN 8 "); result.append("when 'double precision' THEN 17 "); result.append("when 'float8' THEN 17 "); result.append("when 'float' THEN 17 "); result.append("when 'numeric' THEN (atttypmod - 4) >> 16 "); result.append("when '_float4' THEN 8 "); result.append("when 'oid' THEN 10 "); result.append("when '_int4' THEN 10 "); result.append("when '_int2' THEN 5 "); result.append("when 'geometry' THEN NULL "); result.append("when 'super' THEN NULL "); result.append("when 'varbyte' THEN NULL "); result.append("when 'geography' THEN NULL "); result.append("when 'intervaly2m' THEN 32 "); result.append("when 'intervald2s' THEN 64 "); // if (connSettings.m_unknownLength == null) { result.append("else 2147483647 end as COLUMN_SIZE , "); } /* else { result.append("else "); result.append(connSettings.m_unknownLength); result.append(" end as COLUMN_SIZE , "); } */ result.append("null as BUFFER_LENGTH , "); result.append("case typname "); result.append("when 'float4' then 8 "); result.append("when 'float8' then 17 "); result.append("when 'numeric' then (atttypmod - 4) & 65535 "); result.append("when 'time without time zone' then 6 "); result.append("when 'timetz' then 6 "); result.append("when 'time with time zone' then 6 "); result.append("when 'timestamp without time zone' then 6 "); result.append("when 'timestamp' then 6 "); result.append("when 'geometry' then NULL "); result.append("when 'super' then NULL "); result.append("when 'varbyte' then NULL "); result.append("when 'geography' then NULL "); result.append("else 0 end as DECIMAL_DIGITS, "); result.append("case typname "); result.append("when 'varbyte' then 2 "); result.append("when 'geography' then 2 "); result.append("when 'intervaly2m' then 0 "); result.append("when 'intervald2s' then 6 "); result.append("else 10 end as NUM_PREC_RADIX, "); result.append("case a.attnotnull OR (t.typtype = 'd' AND t.typnotnull) "); result.append("when 'false' then 1 "); result.append("when NULL then 2 "); result.append("else 0 end AS NULLABLE , "); result.append("dsc.description as REMARKS , "); result.append("pg_catalog.pg_get_expr(def.adbin, def.adrelid) AS COLUMN_DEF, "); result.append("CAST(case typname "); result.append("when 'text' THEN 12 "); result.append("when 'bit' THEN -7 "); result.append("when 'bool' THEN -7 "); result.append("when 'boolean' THEN -7 "); result.append("when 'varchar' THEN 12 "); result.append("when 'character varying' THEN 12 "); result.append("when '\"char\"' THEN 1 "); result.append("when 'char' THEN 1 "); result.append("when 'character' THEN 1 "); result.append("when 'nchar' THEN 1 "); result.append("when 'bpchar' THEN 1 "); result.append("when 'nvarchar' THEN 12 "); result.append("when 'date' THEN 91 "); result.append("when 'time' THEN 92 "); result.append("when 'time without time zone' THEN 92 "); result.append("when 'timetz' THEN 2013 "); result.append("when 'time with time zone' THEN 2013 "); result.append("when 'timestamp with time zone' THEN 2014 "); result.append("when 'timestamp' THEN 93 "); result.append("when 'timestamp without time zone' THEN 93 "); result.append("when 'smallint' THEN 5 "); result.append("when 'int2' THEN 5 "); result.append("when 'integer' THEN 4 "); result.append("when 'int' THEN 4 "); result.append("when 'int4' THEN 4 "); result.append("when 'bigint' THEN -5 "); result.append("when 'int8' THEN -5 "); result.append("when 'decimal' THEN 3 "); result.append("when 'real' THEN 7 "); result.append("when 'float4' THEN 7 "); result.append("when 'double precision' THEN 8 "); result.append("when 'float8' THEN 8 "); result.append("when 'float' THEN 6 "); result.append("when 'numeric' THEN 2 "); result.append("when '_float4' THEN 2003 "); result.append("when 'timestamptz' THEN 2014 "); result.append("when 'timestamp with time zone' THEN 2014 "); result.append("when '_aclitem' THEN 2003 "); result.append("when '_text' THEN 2003 "); result.append("when 'bytea' THEN -2 "); result.append("when 'oid' THEN -5 "); result.append("when 'name' THEN 12 "); result.append("when '_int4' THEN 2003 "); result.append("when '_int2' THEN 2003 "); result.append("when 'ARRAY' THEN 2003 "); result.append("when 'geometry' THEN -4 "); result.append("when 'super' THEN -16 "); result.append("when 'varbyte' THEN -4 "); result.append("when 'geography' THEN -4 "); result.append("when 'intervaly2m' then 1111 "); result.append("when 'intervald2s' then 1111 "); result.append("else 1111 END as SMALLINT) AS SQL_DATA_TYPE, "); result.append("CAST(NULL AS SMALLINT) as SQL_DATETIME_SUB , "); result.append("case typname "); result.append("when 'int4' THEN 10 "); result.append("when 'bit' THEN 1 "); result.append("when 'bool' THEN 1 "); result.append("when 'varchar' THEN CASE atttypmod WHEN -1 THEN 0 ELSE atttypmod -4 END "); result.append("when 'character varying' THEN CASE atttypmod WHEN -1 THEN 0 ELSE atttypmod -4 END "); result.append("when 'char' THEN CASE atttypmod WHEN -1 THEN 0 ELSE atttypmod -4 END "); result.append("when 'character' THEN CASE atttypmod WHEN -1 THEN 0 ELSE atttypmod -4 END "); result.append("when 'nchar' THEN CASE atttypmod WHEN -1 THEN 0 ELSE atttypmod -4 END "); result.append("when 'bpchar' THEN CASE atttypmod WHEN -1 THEN 0 ELSE atttypmod -4 END "); result.append("when 'nvarchar' THEN CASE atttypmod WHEN -1 THEN 0 ELSE atttypmod -4 END "); result.append("when 'date' THEN 13 "); result.append("when 'time' THEN 15 "); result.append("when 'time without time zone' THEN 15 "); result.append("when 'timetz' THEN 21 "); result.append("when 'time with time zone' THEN 21 "); result.append("when 'timestamp' THEN 29 "); result.append("when 'timestamp without time zone' THEN 29 "); result.append("when 'timestamptz' THEN 35 "); result.append("when 'timestamp with time zone' THEN 35 "); result.append("when 'smallint' THEN 5 "); result.append("when 'int2' THEN 5 "); result.append("when 'integer' THEN 10 "); result.append("when 'int' THEN 10 "); result.append("when 'int4' THEN 10 "); result.append("when 'bigint' THEN 19 "); result.append("when 'int8' THEN 19 "); result.append("when 'decimal' then ((atttypmod - 4) >> 16) & 65535 "); result.append("when 'real' THEN 8 "); result.append("when 'float4' THEN 8 "); result.append("when 'double precision' THEN 17 "); result.append("when 'float8' THEN 17 "); result.append("when 'float' THEN 17 "); result.append("when 'numeric' THEN ((atttypmod - 4) >> 16) & 65535 "); result.append("when '_float4' THEN 8 "); result.append("when 'oid' THEN 10 "); result.append("when '_int4' THEN 10 "); result.append("when '_int2' THEN 5 "); result.append("when 'geometry' THEN NULL "); result.append("when 'super' THEN NULL "); result.append("when 'varbyte' THEN NULL "); result.append("when 'geography' THEN NULL "); result.append("when 'intervaly2m' THEN 32 "); result.append("when 'intervald2s' THEN 64 "); // if (connSettings.m_unknownLength == null) { result.append("else 2147483647 end as CHAR_OCTET_LENGTH , "); } /* else { result.append("else "); result.append(connSettings.m_unknownLength); result.append(" end as CHAR_OCTET_LENGTH , "); } */ result.append("a.attnum AS ORDINAL_POSITION, "); result.append("case a.attnotnull OR (t.typtype = 'd' AND t.typnotnull) "); result.append("when 'false' then 'YES' "); result.append("when NULL then '' "); result.append("else 'NO' end AS IS_NULLABLE, "); result.append("null as SCOPE_CATALOG , "); result.append("null as SCOPE_SCHEMA , "); result.append("null as SCOPE_TABLE, "); result.append("t.typbasetype AS SOURCE_DATA_TYPE , "); result.append("CASE WHEN left(pg_catalog.pg_get_expr(def.adbin, def.adrelid), 16) = 'default_identity' THEN 'YES' "); result.append("ELSE 'NO' END AS IS_AUTOINCREMENT, "); result.append("IS_AUTOINCREMENT AS IS_GENERATEDCOLUMN "); result.append("FROM pg_catalog.pg_namespace n JOIN pg_catalog.pg_class c ON (c.relnamespace = n.oid) "); result.append("JOIN pg_catalog.pg_attribute a ON (a.attrelid=c.oid) "); result.append("JOIN pg_catalog.pg_type t ON (a.atttypid = t.oid) "); result.append("LEFT JOIN pg_catalog.pg_attrdef def ON (a.attrelid=def.adrelid AND a.attnum = def.adnum) "); result.append("LEFT JOIN pg_catalog.pg_description dsc ON (c.oid=dsc.objoid AND a.attnum = dsc.objsubid) "); result.append("LEFT JOIN pg_catalog.pg_class dc ON (dc.oid=dsc.classoid AND dc.relname='pg_class') "); result.append("LEFT JOIN pg_catalog.pg_namespace dn ON (dc.relnamespace=dn.oid AND dn.nspname='pg_catalog') "); result.append("WHERE a.attnum > 0 AND NOT a.attisdropped "); result.append(getCatalogFilterCondition(catalog)); if (schemaPattern != null && !schemaPattern.isEmpty()) { result.append(" AND n.nspname LIKE " + escapeQuotes(schemaPattern)); } if (tableNamePattern != null && !tableNamePattern.isEmpty()) { result.append(" AND c.relname LIKE " + escapeQuotes(tableNamePattern)); } /* if (connection.haveMinimumServerVersion(ServerVersion.v8_4)) { sql += ") c WHERE true "; } */ if (columnNamePattern != null && !columnNamePattern.isEmpty()) { result.append(" AND attname LIKE " + escapeQuotes(columnNamePattern)); } result.append(" ORDER BY TABLE_SCHEM,c.relname,attnum ) "); // This part uses redshift method PG_GET_LATE_BINDING_VIEW_COLS() to // get the column list for late binding view. result.append(" UNION ALL "); result.append("SELECT current_database()::VARCHAR(128) AS TABLE_CAT, "); result.append("schemaname::varchar(128) AS table_schem, "); result.append("tablename::varchar(128) AS table_name, "); result.append("columnname::varchar(128) AS column_name, "); result.append("CAST(CASE columntype_rep "); result.append("WHEN 'text' THEN 12 "); result.append("WHEN 'bit' THEN -7 "); result.append("WHEN 'bool' THEN -7 "); result.append("WHEN 'boolean' THEN -7 "); result.append("WHEN 'varchar' THEN 12 "); result.append("WHEN 'character varying' THEN 12 "); result.append("WHEN 'char' THEN 1 "); result.append("WHEN 'character' THEN 1 "); result.append("WHEN 'nchar' THEN 1 "); result.append("WHEN 'bpchar' THEN 1 "); result.append("WHEN 'nvarchar' THEN 12 "); result.append("WHEN '\"char\"' THEN 1 "); result.append("WHEN 'date' THEN 91 "); result.append("when 'time' THEN 92 "); result.append("when 'time without time zone' THEN 92 "); result.append("when 'timetz' THEN 2013 "); result.append("when 'time with time zone' THEN 2013 "); result.append("WHEN 'timestamp' THEN 93 "); result.append("WHEN 'timestamp without time zone' THEN 93 "); result.append("when 'timestamptz' THEN 2014 "); result.append("WHEN 'timestamp with time zone' THEN 2014 "); result.append("WHEN 'smallint' THEN 5 "); result.append("WHEN 'int2' THEN 5 "); result.append("WHEN 'integer' THEN 4 "); result.append("WHEN 'int' THEN 4 "); result.append("WHEN 'int4' THEN 4 "); result.append("WHEN 'bigint' THEN -5 "); result.append("WHEN 'int8' THEN -5 "); result.append("WHEN 'decimal' THEN 3 "); result.append("WHEN 'real' THEN 7 "); result.append("WHEN 'float4' THEN 7 "); result.append("WHEN 'double precision' THEN 8 "); result.append("WHEN 'float8' THEN 8 "); result.append("WHEN 'float' THEN 6 "); result.append("WHEN 'numeric' THEN 2 "); result.append("WHEN 'timestamptz' THEN 2014 "); result.append("WHEN 'bytea' THEN -2 "); result.append("WHEN 'oid' THEN -5 "); result.append("WHEN 'name' THEN 12 "); result.append("WHEN 'ARRAY' THEN 2003 "); result.append("WHEN 'geometry' THEN -4 "); result.append("WHEN 'super' THEN -16 "); result.append("WHEN 'varbyte' THEN -4 "); result.append("WHEN 'geography' THEN -4 "); result.append("WHEN 'intervaly2m' THEN 1111 "); result.append("WHEN 'intervald2s' THEN 1111 "); result.append("ELSE 1111 END AS SMALLINT) AS DATA_TYPE, "); result.append("COALESCE(NULL,CASE columntype WHEN 'boolean' THEN 'bool' "); result.append("WHEN 'character varying' THEN 'varchar' "); result.append("WHEN '\"char\"' THEN 'char' "); result.append("WHEN 'smallint' THEN 'int2' "); result.append("WHEN 'integer' THEN 'int4'"); result.append("WHEN 'bigint' THEN 'int8' "); result.append("WHEN 'real' THEN 'float4' "); result.append("WHEN 'double precision' THEN 'float8' "); result.append("WHEN 'time without time zone' THEN 'time' "); result.append("WHEN 'time with time zone' THEN 'timetz' "); result.append("WHEN 'timestamp without time zone' THEN 'timestamp' "); result.append("WHEN 'timestamp with time zone' THEN 'timestamptz' "); result.append("ELSE columntype END) AS TYPE_NAME, "); result.append("CASE columntype_rep "); result.append("WHEN 'int4' THEN 10 "); result.append("WHEN 'bit' THEN 1 "); result.append("WHEN 'bool' THEN 1"); result.append("WHEN 'boolean' THEN 1"); result.append("WHEN 'varchar' THEN isnull(nullif(regexp_substr (columntype,'[0-9]+',7),''),'0')::INTEGER "); result.append("WHEN 'character varying' THEN isnull(nullif(regexp_substr (columntype,'[0-9]+',7),''),'0')::INTEGER "); result.append("WHEN 'char' THEN isnull(nullif(regexp_substr (columntype,'[0-9]+',4),''),'0')::INTEGER "); result.append("WHEN 'character' THEN isnull(nullif(regexp_substr (columntype,'[0-9]+',4),''),'0')::INTEGER "); result.append("WHEN 'nchar' THEN isnull(nullif(regexp_substr (columntype,'[0-9]+',7),''),'0')::INTEGER "); result.append("WHEN 'bpchar' THEN isnull(nullif(regexp_substr (columntype,'[0-9]+',7),''),'0')::INTEGER "); result.append("WHEN 'nvarchar' THEN isnull(nullif(regexp_substr (columntype,'[0-9]+',7),''),'0')::INTEGER "); result.append("WHEN 'date' THEN 13 "); result.append("WHEN 'time' THEN 15 "); result.append("WHEN 'time without time zone' THEN 15 "); result.append("WHEN 'timetz' THEN 21 "); result.append("WHEN 'timestamp' THEN 29 "); result.append("WHEN 'timestamp without time zone' THEN 29 "); result.append("WHEN 'time with time zone' THEN 21 "); result.append("WHEN 'timestamptz' THEN 35 "); result.append("WHEN 'timestamp with time zone' THEN 35 "); result.append("WHEN 'smallint' THEN 5 "); result.append("WHEN 'int2' THEN 5 "); result.append("WHEN 'integer' THEN 10 "); result.append("WHEN 'int' THEN 10 "); result.append("WHEN 'int4' THEN 10 "); result.append("WHEN 'bigint' THEN 19 "); result.append("WHEN 'int8' THEN 19 "); result.append("WHEN 'decimal' THEN isnull(nullif(regexp_substr (columntype,'[0-9]+',7),''),'0')::INTEGER "); result.append("WHEN 'real' THEN 8 "); result.append("WHEN 'float4' THEN 8 "); result.append("WHEN 'double precision' THEN 17 "); result.append("WHEN 'float8' THEN 17 "); result.append("WHEN 'float' THEN 17"); result.append("WHEN 'numeric' THEN isnull(nullif(regexp_substr (columntype,'[0-9]+',7),''),'0')::INTEGER "); result.append("WHEN '_float4' THEN 8 "); result.append("WHEN 'oid' THEN 10 "); result.append("WHEN '_int4' THEN 10 "); result.append("WHEN '_int2' THEN 5 "); result.append("WHEN 'geometry' THEN NULL "); result.append("WHEN 'super' THEN NULL "); result.append("WHEN 'varbyte' THEN NULL "); result.append("WHEN 'geography' THEN NULL "); result.append("WHEN 'intervaly2m' THEN 32 "); result.append("WHEN 'intervald2s' THEN 64 "); result.append("ELSE 2147483647 END AS COLUMN_SIZE, "); result.append("NULL AS BUFFER_LENGTH, "); result.append("CASE REGEXP_REPLACE(columntype,'[()0-9,]') "); result.append("WHEN 'real' THEN 8 "); result.append("WHEN 'float4' THEN 8 "); result.append("WHEN 'double precision' THEN 17 "); result.append("WHEN 'float8' THEN 17 "); result.append("WHEN 'timestamp' THEN 6 "); result.append("WHEN 'timestamp without time zone' THEN 6 "); result.append("WHEN 'geometry' THEN NULL "); result.append("WHEN 'super' THEN NULL "); result.append("WHEN 'numeric' THEN isnull(nullif(regexp_substr (columntype,'[0-9]+',charindex (',',columntype)),''),'0')::INTEGER "); result.append("WHEN 'varbyte' THEN NULL "); result.append("WHEN 'geography' THEN NULL "); result.append("ELSE 0 END AS DECIMAL_DIGITS, "); result.append("CASE columntype "); result.append("WHEN 'varbyte' THEN 2 "); result.append("WHEN 'geography' THEN 2 "); result.append("ELSE 10 END AS NUM_PREC_RADIX, "); result.append("NULL AS NULLABLE, NULL AS REMARKS, NULL AS COLUMN_DEF, "); result.append("CAST(CASE columntype_rep "); result.append("WHEN 'text' THEN 12 "); result.append("WHEN 'bit' THEN -7 "); result.append("WHEN 'bool' THEN -7 "); result.append("WHEN 'boolean' THEN -7 "); result.append("WHEN 'varchar' THEN 12 "); result.append("WHEN 'character varying' THEN 12 "); result.append("WHEN 'char' THEN 1 "); result.append("WHEN 'character' THEN 1 "); result.append("WHEN 'nchar' THEN 12 "); result.append("WHEN 'bpchar' THEN 1 "); result.append("WHEN 'nvarchar' THEN 12 "); result.append("WHEN '\"char\"' THEN 1 "); result.append("WHEN 'date' THEN 91 "); result.append("WHEN 'time' THEN 92 "); result.append("WHEN 'time without time zone' THEN 92 "); result.append("WHEN 'timetz' THEN 2013 "); result.append("WHEN 'time with time zone' THEN 2013 "); result.append("WHEN 'timestamp' THEN 93 "); result.append("WHEN 'timestamp without time zone' THEN 93 "); result.append("WHEN 'timestamptz' THEN 2014 "); result.append("WHEN 'timestamp with time zone' THEN 2014 "); result.append("WHEN 'smallint' THEN 5 "); result.append("WHEN 'int2' THEN 5 "); result.append("WHEN 'integer' THEN 4 "); result.append("WHEN 'int' THEN 4 "); result.append("WHEN 'int4' THEN 4 "); result.append("WHEN 'bigint' THEN -5 "); result.append("WHEN 'int8' THEN -5 "); result.append("WHEN 'decimal' THEN 3 "); result.append("WHEN 'real' THEN 7 "); result.append("WHEN 'float4' THEN 7 "); result.append("WHEN 'double precision' THEN 8 "); result.append("WHEN 'float8' THEN 8 "); result.append("WHEN 'float' THEN 6 "); result.append("WHEN 'numeric' THEN 2 "); result.append("WHEN 'bytea' THEN -2 "); result.append("WHEN 'oid' THEN -5 "); result.append("WHEN 'name' THEN 12 "); result.append("WHEN 'ARRAY' THEN 2003 "); result.append("WHEN 'geometry' THEN -4 "); result.append("WHEN 'super' THEN -16 "); result.append("WHEN 'varbyte' THEN -4 "); result.append("WHEN 'geography' THEN -4 "); result.append("WHEN 'intervaly2m' THEN 1111 "); result.append("WHEN 'intervald2s' THEN 1111 "); result.append("ELSE 1111 END AS SMALLINT) AS SQL_DATA_TYPE, "); result.append("CAST(NULL AS SMALLINT) AS SQL_DATETIME_SUB, CASE "); result.append("WHEN LEFT (columntype,7) = 'varchar' THEN isnull(nullif(regexp_substr (columntype,'[0-9]+',7),''),'0')::INTEGER "); result.append("WHEN LEFT (columntype,4) = 'char' THEN isnull(nullif(regexp_substr (columntype,'[0-9]+',4),''),'0')::INTEGER "); result.append("WHEN columntype = 'string' THEN 16383 ELSE NULL "); result.append("END AS CHAR_OCTET_LENGTH, columnnum AS ORDINAL_POSITION, "); result.append("NULL AS IS_NULLABLE, NULL AS SCOPE_CATALOG, NULL AS SCOPE_SCHEMA, "); result.append("NULL AS SCOPE_TABLE, NULL AS SOURCE_DATA_TYPE, 'NO' AS IS_AUTOINCREMENT, "); result.append("'NO' as IS_GENERATEDCOLUMN "); result.append("FROM (select lbv_cols.schemaname, "); result.append("lbv_cols.tablename, lbv_cols.columnname,"); result.append("REGEXP_REPLACE(REGEXP_REPLACE(lbv_cols.columntype,'\\\\(.*\\\\)'),'^_.+','ARRAY') as columntype_rep,"); result.append("columntype, "); result.append("lbv_cols.columnnum "); result.append("from pg_get_late_binding_view_cols() lbv_cols( "); result.append("schemaname name, tablename name, columnname name, "); result.append("columntype text, columnnum int)) lbv_columns "); result.append(" WHERE true "); // Apply the filters to the column list for late binding view. result.append(getCatalogFilterCondition(catalog)); if (schemaPattern != null && !schemaPattern.isEmpty()) { result.append(" AND schemaname LIKE " + escapeQuotes(schemaPattern)); } if (tableNamePattern != null && !tableNamePattern.isEmpty()) { result.append(" AND tablename LIKE " + escapeQuotes(tableNamePattern)); } /* if (connection.haveMinimumServerVersion(ServerVersion.v8_4)) { sql += ") c WHERE true "; } */ if (columnNamePattern != null && !columnNamePattern.isEmpty()) { result.append(" AND columnname LIKE " + escapeQuotes(columnNamePattern)); } return result.toString(); } private String buildUniversalAllSchemaColumnsQuery(String catalog, String schemaPattern, String tableNamePattern, String columnNamePattern) throws SQLException { final String unknownColumnSize = "2147483647"; StringBuilder result = new StringBuilder(8192); result.append("SELECT database_name AS TABLE_CAT, " + " schema_name AS TABLE_SCHEM, " + " table_name, " + " COLUMN_NAME, " + " CAST(CASE regexp_replace(data_type, '^_.', 'ARRAY') " + " WHEN 'text' THEN 12 " + " WHEN 'bit' THEN -7 " + " WHEN 'bool' THEN -7 " + " WHEN 'boolean' THEN -7 " + " WHEN 'varchar' THEN 12 " + " WHEN 'character varying' THEN 12 " + " WHEN 'char' THEN 1 " + " WHEN 'character' THEN 1 " + " WHEN 'nchar' THEN 1 " + " WHEN 'bpchar' THEN 1 " + " WHEN 'nvarchar' THEN 12 " + " WHEN '\"char\"' THEN 1 " + " WHEN 'date' THEN 91 " + " WHEN 'time' THEN 92 " + " WHEN 'time without time zone' THEN 92 " + " WHEN 'timetz' THEN 2013 " + " WHEN 'time with time zone' THEN 2013 " + " WHEN 'timestamp' THEN 93 " + " WHEN 'timestamp without time zone' THEN 93 " + " WHEN 'timestamptz' THEN 2014 " + " WHEN 'timestamp with time zone' THEN 2014 " + " WHEN 'smallint' THEN 5 " + " WHEN 'int2' THEN 5 " + " WHEN 'integer' THEN 4 " + " WHEN 'int' THEN 4 " + " WHEN 'int4' THEN 4 " + " WHEN 'bigint' THEN -5 " + " WHEN 'int8' THEN -5 " + " WHEN 'decimal' THEN 3 " + " WHEN 'real' THEN 7 " + " WHEN 'float4' THEN 7 " + " WHEN 'double precision' THEN 8 " + " WHEN 'float8' THEN 8 " + " WHEN 'float' THEN 6 " + " WHEN 'numeric' THEN 2 " + " WHEN 'bytea' THEN -2 " + " WHEN 'oid' THEN -5 " + " WHEN 'name' THEN 12 " + " WHEN 'ARRAY' THEN 2003 " + " WHEN 'geometry' THEN -4 " + " WHEN 'super' THEN -16 " + " WHEN 'varbyte' THEN -4 " + " WHEN 'geography' THEN -4 " + " WHEN 'intervaly2m' THEN 1111 " + " WHEN 'intervald2s' THEN 1111 " + " ELSE 1111 END AS SMALLINT) AS DATA_TYPE, " + " CASE data_type " + " WHEN 'boolean' THEN 'bool' " + " WHEN 'character varying' THEN 'varchar' " + " WHEN '\"char\"' THEN 'char' " + " WHEN 'smallint' THEN 'int2' " + " WHEN 'integer' THEN 'int4' " + " WHEN 'bigint' THEN 'int8' " + " WHEN 'real' THEN 'float4' " + " WHEN 'double precision' THEN 'float8' " + " WHEN 'time without time zone' THEN 'time' " + " WHEN 'time with time zone' THEN 'timetz' " + " WHEN 'timestamp without time zone' THEN 'timestamp' " + " WHEN 'timestamp with time zone' THEN 'timestamptz' " + " ELSE data_type " + " END AS TYPE_NAME, " + " CASE data_type " + " WHEN 'int4' THEN 10 " + " WHEN 'bit' THEN 1 " + " WHEN 'bool' THEN 1 " + " WHEN 'boolean' THEN 1 " + " WHEN 'varchar' THEN character_maximum_length " + " WHEN 'character varying' THEN character_maximum_length " + " WHEN 'char' THEN character_maximum_length " + " WHEN 'character' THEN character_maximum_length " + " WHEN 'nchar' THEN character_maximum_length " + " WHEN 'bpchar' THEN character_maximum_length " + " WHEN 'nvarchar' THEN character_maximum_length " + " WHEN 'date' THEN 13 " + " WHEN 'time' THEN 15 " + " WHEN 'time without time zone' THEN 15 " + " WHEN 'timetz' THEN 21 " + " WHEN 'time with time zone' THEN 21 " + " WHEN 'timestamp' THEN 29 " + " WHEN 'timestamp without time zone' THEN 29 " + " WHEN 'timestamptz' THEN 35 " + " WHEN 'timestamp with time zone' THEN 35 " + " WHEN 'smallint' THEN 5 " + " WHEN 'int2' THEN 5 " + " WHEN 'integer' THEN 10 " + " WHEN 'int' THEN 10 " + " WHEN 'int4' THEN 10 " + " WHEN 'bigint' THEN 19 " + " WHEN 'int8' THEN 19 " + " WHEN 'decimal' THEN numeric_precision " + " WHEN 'real' THEN 8 " + " WHEN 'float4' THEN 8 " + " WHEN 'double precision' THEN 17 " + " WHEN 'float8' THEN 17 " + " WHEN 'float' THEN 17 " + " WHEN 'numeric' THEN numeric_precision " + " WHEN '_float4' THEN 8 " + " WHEN 'oid' THEN 10 " + " WHEN '_int4' THEN 10 " + " WHEN '_int2' THEN 5 " + " WHEN 'geometry' THEN NULL " + " WHEN 'super' THEN NULL " + " WHEN 'varbyte' THEN NULL " + " WHEN 'geography' THEN NULL " + " WHEN 'intervaly2m' THEN 32 " + " WHEN 'intervald2s' THEN 64 " + " ELSE 2147483647 " + " END AS COLUMN_SIZE, " + " NULL AS BUFFER_LENGTH, " + " CASE data_type " + " WHEN 'real' THEN 8 " + " WHEN 'float4' THEN 8 " + " WHEN 'double precision' THEN 17 " + " WHEN 'float8' THEN 17 " + " WHEN 'numeric' THEN numeric_scale " + " WHEN 'time' THEN 6 " + " WHEN 'time without time zone' THEN 6 " + " WHEN 'timetz' THEN 6 " + " WHEN 'time with time zone' THEN 6 " + " WHEN 'timestamp' THEN 6 " + " WHEN 'timestamp without time zone' THEN 6 " + " WHEN 'timestamptz' THEN 6 " + " WHEN 'timestamp with time zone' THEN 6 " + " WHEN 'geometry' THEN NULL " + " WHEN 'super' THEN NULL " + " WHEN 'varbyte' THEN NULL " + " WHEN 'geography' THEN NULL " + " WHEN 'intervaly2m' THEN 0 " + " WHEN 'intervald2s' THEN 6 " + " ELSE 0 " + " END AS DECIMAL_DIGITS, " + " CASE data_type " + " WHEN 'varbyte' THEN 2 " + " WHEN 'geography' THEN 2 " + " ELSE 10 " + " END AS NUM_PREC_RADIX, " + " CASE is_nullable WHEN 'YES' THEN 1 " + " WHEN 'NO' THEN 0 " + " ELSE 2 end AS NULLABLE, " + " REMARKS, " + " column_default AS COLUMN_DEF, " + " CAST(CASE regexp_replace(data_type, '^_.', 'ARRAY') " + " WHEN 'text' THEN 12 " + " WHEN 'bit' THEN -7 " + " WHEN 'bool' THEN -7 " + " WHEN 'boolean' THEN -7 " + " WHEN 'varchar' THEN 12 " + " WHEN 'character varying' THEN 12 " + " WHEN 'char' THEN 1 " + " WHEN 'character' THEN 1 " + " WHEN 'nchar' THEN 1 " + " WHEN 'bpchar' THEN 1 " + " WHEN 'nvarchar' THEN 12 " + " WHEN '\"char\"' THEN 1 " + " WHEN 'date' THEN 91 " + " WHEN 'time' THEN 92 " + " WHEN 'time without time zone' THEN 92 " + " WHEN 'timetz' THEN 2013 " + " WHEN 'time with time zone' THEN 2013 " + " WHEN 'timestamp' THEN 93 " + " WHEN 'timestamp without time zone' THEN 93 " + " WHEN 'timestamptz' THEN 2014 " + " WHEN 'timestamp with time zone' THEN 2014 " + " WHEN 'smallint' THEN 5 " + " WHEN 'int2' THEN 5 " + " WHEN 'integer' THEN 4 " + " WHEN 'int' THEN 4 " + " WHEN 'int4' THEN 4 " + " WHEN 'bigint' THEN -5 " + " WHEN 'int8' THEN -5 " + " WHEN 'decimal' THEN 3 " + " WHEN 'real' THEN 7 " + " WHEN 'float4' THEN 7 " + " WHEN 'double precision' THEN 8 " + " WHEN 'float8' THEN 8 " + " WHEN 'float' THEN 6 " + " WHEN 'numeric' THEN 2 " + " WHEN 'bytea' THEN -2 " + " WHEN 'oid' THEN -5 " + " WHEN 'name' THEN 12 " + " WHEN 'ARRAY' THEN 2003 " + " WHEN 'geometry' THEN -4 " + " WHEN 'super' THEN -16 " + " WHEN 'varbyte' THEN -4 " + " WHEN 'geography' THEN -4 " + " WHEN 'intervaly2m' THEN 1111 " + " WHEN 'intervald2s' THEN 1111 " + " ELSE 1111 END AS SMALLINT) AS SQL_DATA_TYPE, " + " CAST(NULL AS SMALLINT) AS SQL_DATETIME_SUB, " + " CASE data_type " + " WHEN 'int4' THEN 10 " + " WHEN 'bit' THEN 1 " + " WHEN 'bool' THEN 1 " + " WHEN 'boolean' THEN 1 " + " WHEN 'varchar' THEN character_maximum_length " + " WHEN 'character varying' THEN character_maximum_length " + " WHEN 'char' THEN character_maximum_length " + " WHEN 'character' THEN character_maximum_length " + " WHEN 'nchar' THEN character_maximum_length " + " WHEN 'bpchar' THEN character_maximum_length " + " WHEN 'nvarchar' THEN character_maximum_length " + " WHEN 'date' THEN 13 " + " WHEN 'time' THEN 15 " + " WHEN 'time without time zone' THEN 15 " + " WHEN 'timetz' THEN 21 " + " WHEN 'time with time zone' THEN 21 " + " WHEN 'timestamp' THEN 29 " + " WHEN 'timestamp without time zone' THEN 29 " + " WHEN 'timestamptz' THEN 35 " + " WHEN 'timestamp with time zone' THEN 35 " + " WHEN 'smallint' THEN 5 " + " WHEN 'int2' THEN 5 " + " WHEN 'integer' THEN 10 " + " WHEN 'int' THEN 10 " + " WHEN 'int4' THEN 10 " + " WHEN 'bigint' THEN 19 " + " WHEN 'int8' THEN 19 " + " WHEN 'decimal' THEN numeric_precision " + " WHEN 'real' THEN 8 " + " WHEN 'float4' THEN 8 " + " WHEN 'double precision' THEN 17 " + " WHEN 'float8' THEN 17 " + " WHEN 'float' THEN 17 " + " WHEN 'numeric' THEN numeric_precision " + " WHEN '_float4' THEN 8 " + " WHEN 'oid' THEN 10 " + " WHEN '_int4' THEN 10 " + " WHEN '_int2' THEN 5 " + " WHEN 'geometry' THEN NULL " + " WHEN 'super' THEN NULL " + " WHEN 'varbyte' THEN NULL " + " WHEN 'geography' THEN NULL " + " WHEN 'intervaly2m' THEN 32 " + " WHEN 'intervald2s' THEN 64 " + " ELSE 2147483647 " + " END AS CHAR_OCTET_LENGTH, " + " ordinal_position AS ORDINAL_POSITION, " + " is_nullable AS IS_NULLABLE, " + " NULL AS SCOPE_CATALOG, " + " NULL AS SCOPE_SCHEMA, " + " NULL AS SCOPE_TABLE, " + " data_type as SOURCE_DATA_TYPE, " + " CASE WHEN left(column_default, 10) = '\"identity\"' THEN 'YES' " + " WHEN left(column_default, 16) = 'default_identity' THEN 'YES' " + " ELSE 'NO' END AS IS_AUTOINCREMENT, " + " IS_AUTOINCREMENT AS IS_GENERATEDCOLUMN " + " FROM PG_CATALOG.svv_all_columns "); result.append( " WHERE true "); result.append(getCatalogFilterCondition(catalog, false, null)); if (schemaPattern != null && !schemaPattern.isEmpty()) { result.append(" AND schema_name LIKE " + escapeQuotes(schemaPattern)); } if (tableNamePattern != null && !tableNamePattern.isEmpty()) { result.append(" AND table_name LIKE " + escapeQuotes(tableNamePattern)); } if (columnNamePattern != null && !columnNamePattern.isEmpty()) { result.append(" AND COLUMN_NAME LIKE " + escapeQuotes(columnNamePattern)); } result.append(" ORDER BY TABLE_CAT, TABLE_SCHEM, TABLE_NAME, ORDINAL_POSITION "); return result.toString(); } private String buildUniversalSchemaColumnsQuery(String catalog, String schemaPattern, String tableNamePattern, String columnNamePattern) throws SQLException { final String unknownColumnSize = "2147483647"; StringBuilder result = new StringBuilder(8192); // NOTE: Explicit cast on current_database() prevents bug where data returned from server // has incorrect length and displays random characters. [JDBC-529] result.append("SELECT current_database()::varchar(128) AS TABLE_CAT," + " table_schema AS TABLE_SCHEM," + " table_name," + " COLUMN_NAME," + " CAST(CASE regexp_replace(data_type, '^_.+', 'ARRAY')" + " WHEN 'text' THEN 12" + " WHEN 'bit' THEN -7" + " WHEN 'bool' THEN -7" + " WHEN 'boolean' THEN -7" + " WHEN 'varchar' THEN 12" + " WHEN 'character varying' THEN 12" + " WHEN 'char' THEN 1" + " WHEN 'character' THEN 1" + " WHEN 'nchar' THEN 1" + " WHEN 'bpchar' THEN 1" + " WHEN 'nvarchar' THEN 12" + " WHEN '\"char\"' THEN 1" + " WHEN 'date' THEN 91" + " WHEN 'time' THEN 92 " + " WHEN 'time without time zone' THEN 92 " + " WHEN 'timetz' THEN 2013 " + " WHEN 'time with time zone' THEN 2013 " + " WHEN 'timestamp' THEN 93" + " WHEN 'timestamp without time zone' THEN 93" + " WHEN 'timestamptz' THEN 2014" + " WHEN 'timestamp with time zone' THEN 2014" + " WHEN 'smallint' THEN 5" + " WHEN 'int2' THEN 5" + " WHEN 'integer' THEN 4" + " WHEN 'int' THEN 4" + " WHEN 'int4' THEN 4" + " WHEN 'bigint' THEN -5" + " WHEN 'int8' THEN -5" + " WHEN 'decimal' THEN 3" + " WHEN 'real' THEN 7" + " WHEN 'float4' THEN 7" + " WHEN 'double precision' THEN 8" + " WHEN 'float8' THEN 8" + " WHEN 'float' THEN 6" + " WHEN 'numeric' THEN 2" + " WHEN 'bytea' THEN -2" + " WHEN 'oid' THEN -5" + " WHEN 'name' THEN 12" + " WHEN 'ARRAY' THEN 2003" + " WHEN 'geometry' THEN -4 " + " WHEN 'super' THEN -16 " + " WHEN 'varbyte' THEN -4 " + " WHEN 'geography' THEN -4 " + " WHEN 'intervaly2m' THEN 1111 " + " WHEN 'intervald2s' THEN 1111 " + " ELSE 1111 END AS SMALLINT) AS DATA_TYPE," + " COALESCE(" + " domain_name," + " CASE data_type" + " WHEN 'boolean' THEN 'bool'" + " WHEN 'character varying' THEN 'varchar'" + " WHEN '\"char\"' THEN 'char'" + " WHEN 'smallint' THEN 'int2'" + " WHEN 'integer' THEN 'int4'" + " WHEN 'bigint' THEN 'int8'" + " WHEN 'real' THEN 'float4'" + " WHEN 'double precision' THEN 'float8'" + " WHEN 'time without time zone' THEN 'time'" + " WHEN 'time with time zone' THEN 'timetz'" + " WHEN 'timestamp without time zone' THEN 'timestamp'" + " WHEN 'timestamp with time zone' THEN 'timestamptz'" + " ELSE data_type" + " END) AS TYPE_NAME," + " CASE data_type" + " WHEN 'int4' THEN 10" + " WHEN 'bit' THEN 1" + " WHEN 'bool' THEN 1" + " WHEN 'boolean' THEN 1" + " WHEN 'varchar' THEN character_maximum_length" + " WHEN 'character varying' THEN character_maximum_length" + " WHEN 'char' THEN character_maximum_length" + " WHEN 'character' THEN character_maximum_length" + " WHEN 'nchar' THEN character_maximum_length" + " WHEN 'bpchar' THEN character_maximum_length" + " WHEN 'nvarchar' THEN character_maximum_length" + " WHEN 'date' THEN 13" + " WHEN 'time' THEN 15 " + " WHEN 'time without time zone' THEN 15 " + " WHEN 'timetz' THEN 21 " + " WHEN 'time with time zone' THEN 21 " + " WHEN 'timestamp' THEN 29" + " WHEN 'timestamp without time zone' THEN 29" + " WHEN 'timestamptz' THEN 35" + " WHEN 'timestamp with time zone' THEN 35" + " WHEN 'smallint' THEN 5" + " WHEN 'int2' THEN 5" + " WHEN 'integer' THEN 10" + " WHEN 'int' THEN 10" + " WHEN 'int4' THEN 10" + " WHEN 'bigint' THEN 19" + " WHEN 'int8' THEN 19" + " WHEN 'decimal' THEN numeric_precision" + " WHEN 'real' THEN 8" + " WHEN 'float4' THEN 8" + " WHEN 'double precision' THEN 17" + " WHEN 'float8' THEN 17" + " WHEN 'float' THEN 17" + " WHEN 'numeric' THEN numeric_precision" + " WHEN '_float4' THEN 8" + " WHEN 'oid' THEN 10" + " WHEN '_int4' THEN 10" + " WHEN '_int2' THEN 5" + " WHEN 'geometry' THEN NULL" + " WHEN 'super' THEN NULL" + " WHEN 'varbyte' THEN NULL" + " WHEN 'geography' THEN NULL" + " WHEN 'intervaly2m' THEN 32 " + " WHEN 'intervald2s' THEN 64 " + " ELSE " + unknownColumnSize + " END AS COLUMN_SIZE," + " NULL AS BUFFER_LENGTH," + " CASE data_type" + " WHEN 'real' THEN 8" + " WHEN 'float4' THEN 8" + " WHEN 'double precision' THEN 17" + " WHEN 'float8' THEN 17" + " WHEN 'numeric' THEN numeric_scale" + " WHEN 'time' THEN 6" + " WHEN 'time without time zone' THEN 6" + " WHEN 'timetz' THEN 6" + " WHEN 'time with time zone' THEN 6" + " WHEN 'timestamp' THEN 6" + " WHEN 'timestamp without time zone' THEN 6" + " WHEN 'timestamptz' THEN 6" + " WHEN 'timestamp with time zone' THEN 6" + " WHEN 'geometry' THEN NULL" + " WHEN 'super' THEN NULL" + " WHEN 'varbyte' THEN NULL" + " WHEN 'geography' THEN NULL" + " WHEN 'intervaly2m' THEN 0 " + " WHEN 'intervald2s' THEN 6 " + " ELSE 0" + " END AS DECIMAL_DIGITS," + " CASE data_type" + " WHEN 'varbyte' THEN 2" + " WHEN 'geography' THEN 2" + " ELSE 10" + " END AS NUM_PREC_RADIX," + " CASE is_nullable WHEN 'YES' THEN 1" + " WHEN 'NO' THEN 0" + " ELSE 2 end AS NULLABLE," + " REMARKS," + " column_default AS COLUMN_DEF," + " CAST(CASE regexp_replace(data_type, '^_.+', 'ARRAY')" + " WHEN 'text' THEN 12" + " WHEN 'bit' THEN -7" + " WHEN 'bool' THEN -7" + " WHEN 'boolean' THEN -7" + " WHEN 'varchar' THEN 12" + " WHEN 'character varying' THEN 12" + " WHEN 'char' THEN 1" + " WHEN 'character' THEN 1" + " WHEN 'nchar' THEN 1" + " WHEN 'bpchar' THEN 1" + " WHEN 'nvarchar' THEN 12" + " WHEN '\"char\"' THEN 1" + " WHEN 'date' THEN 91" + " WHEN 'time' THEN 92 " + " WHEN 'time without time zone' THEN 92 " + " WHEN 'timetz' THEN 2013 " + " WHEN 'time with time zone' THEN 2013 " + " WHEN 'timestamp' THEN 93" + " WHEN 'timestamp without time zone' THEN 93" + " WHEN 'timestamptz' THEN 2014" + " WHEN 'timestamp with time zone' THEN 2014" + " WHEN 'smallint' THEN 5" + " WHEN 'int2' THEN 5" + " WHEN 'integer' THEN 4" + " WHEN 'int' THEN 4" + " WHEN 'int4' THEN 4" + " WHEN 'bigint' THEN -5" + " WHEN 'int8' THEN -5" + " WHEN 'decimal' THEN 3" + " WHEN 'real' THEN 7" + " WHEN 'float4' THEN 7" + " WHEN 'double precision' THEN 8" + " WHEN 'float8' THEN 8" + " WHEN 'float' THEN 6" + " WHEN 'numeric' THEN 2" + " WHEN 'bytea' THEN -2" + " WHEN 'oid' THEN -5" + " WHEN 'name' THEN 12" + " WHEN 'ARRAY' THEN 2003" + " WHEN 'geometry' THEN -4" + " WHEN 'super' THEN -16" + " WHEN 'varbyte' THEN -4" + " WHEN 'geography' THEN -4" + " WHEN 'intervaly2m' THEN 1111" + " WHEN 'intervald2s' THEN 1111" + " ELSE 1111 END AS SMALLINT) AS SQL_DATA_TYPE," + " CAST(NULL AS SMALLINT) AS SQL_DATETIME_SUB," + " CASE data_type" + " WHEN 'int4' THEN 10" + " WHEN 'bit' THEN 1" + " WHEN 'bool' THEN 1" + " WHEN 'boolean' THEN 1" + " WHEN 'varchar' THEN character_maximum_length" + " WHEN 'character varying' THEN character_maximum_length" + " WHEN 'char' THEN character_maximum_length" + " WHEN 'character' THEN character_maximum_length" + " WHEN 'nchar' THEN character_maximum_length" + " WHEN 'bpchar' THEN character_maximum_length" + " WHEN 'nvarchar' THEN character_maximum_length" + " WHEN 'date' THEN 13" + " WHEN 'time' THEN 15" + " WHEN 'time without time zone' THEN 15" + " WHEN 'timetz' THEN 21" + " WHEN 'time with time zone' THEN 21" + " WHEN 'timestamp' THEN 29" + " WHEN 'timestamp without time zone' THEN 29" + " WHEN 'timestamptz' THEN 35" + " WHEN 'timestamp with time zone' THEN 35" + " WHEN 'smallint' THEN 5" + " WHEN 'int2' THEN 5" + " WHEN 'integer' THEN 10" + " WHEN 'int' THEN 10" + " WHEN 'int4' THEN 10" + " WHEN 'bigint' THEN 19" + " WHEN 'int8' THEN 19" + " WHEN 'decimal' THEN numeric_precision" + " WHEN 'real' THEN 8" + " WHEN 'float4' THEN 8" + " WHEN 'double precision' THEN 17" + " WHEN 'float8' THEN 17" + " WHEN 'float' THEN 17" + " WHEN 'numeric' THEN numeric_precision" + " WHEN '_float4' THEN 8" + " WHEN 'oid' THEN 10" + " WHEN '_int4' THEN 10" + " WHEN '_int2' THEN 5" + " WHEN 'geometry' THEN NULL" + " WHEN 'super' THEN NULL" + " WHEN 'varbyte' THEN NULL" + " WHEN 'geography' THEN NULL" + " WHEN 'intervaly2m' THEN 32" + " WHEN 'intervald2s' THEN 64" + " ELSE " + unknownColumnSize + " END AS CHAR_OCTET_LENGTH," + " ordinal_position AS ORDINAL_POSITION," + " is_nullable AS IS_NULLABLE," + " NULL AS SCOPE_CATALOG," + " NULL AS SCOPE_SCHEMA," + " NULL AS SCOPE_TABLE," + " CASE" + " WHEN domain_name is not null THEN data_type" + " END AS SOURCE_DATA_TYPE," + " CASE WHEN left(column_default, 10) = '\\\"identity\\\"' THEN 'YES'" + " WHEN left(column_default, 16) = 'default_identity' THEN 'YES' " + " ELSE 'NO' END AS IS_AUTOINCREMENT," + " IS_AUTOINCREMENT AS IS_GENERATEDCOLUMN" + " FROM svv_columns"); result.append( " WHERE true "); result.append(getCatalogFilterCondition(catalog)); if (schemaPattern != null && !schemaPattern.isEmpty()) { result.append(" AND table_schema LIKE " + escapeQuotes(schemaPattern)); } if (tableNamePattern != null && !tableNamePattern.isEmpty()) { result.append(" AND table_name LIKE " + escapeQuotes(tableNamePattern)); } if (columnNamePattern != null && !columnNamePattern.isEmpty()) { result.append(" AND COLUMN_NAME LIKE " + escapeQuotes(columnNamePattern)); } result.append(" ORDER BY table_schem,table_name,ORDINAL_POSITION "); return result.toString(); } private String buildExternalSchemaColumnsQuery(String catalog, String schemaPattern, String tableNamePattern, String columnNamePattern) throws SQLException { final String unknownColumnSize = "2147483647"; StringBuilder result = new StringBuilder(8192); // NOTE: Explicit cast on current_database() prevents bug where data returned from server // has incorrect length and displays random characters. [JDBC-529] result.append("SELECT current_database()::varchar(128) AS TABLE_CAT," + " schemaname AS TABLE_SCHEM," + " tablename AS TABLE_NAME," + " columnname AS COLUMN_NAME," + " CAST(CASE WHEN external_type = 'text' THEN 12" + " WHEN external_type = 'bit' THEN -7" + " WHEN external_type = 'bool' THEN -7" + " WHEN external_type = 'boolean' THEN -7" + " WHEN left(external_type, 7) = 'varchar' THEN 12" + " WHEN left(external_type, 17) = 'character varying' THEN 12" + " WHEN left(external_type, 4) = 'char' THEN 1" + " WHEN left(external_type, 9) = 'character' THEN 1" + " WHEN left(external_type, 5) = 'nchar' THEN 1" + " WHEN left(external_type, 6) = 'bpchar' THEN 1" + " WHEN left(external_type, 8) = 'nvarchar' THEN 12" + " WHEN external_type = '\"char\"' THEN 1" + " WHEN external_type = 'date' THEN 91" + " WHEN external_type = 'time' THEN 92 " + " WHEN external_type = 'time without time zone' THEN 92 " + " WHEN external_type = 'timetz' THEN 2013 " + " WHEN external_type = 'time with time zone' THEN 2013 " + " WHEN external_type = 'timestamp' THEN 93" + " WHEN external_type = 'timestamp without time zone' THEN 93" + " WHEN external_type = 'timestamptz' THEN 2014" + " WHEN external_type = 'timestamp with time zone' THEN 2014" + " WHEN external_type = 'smallint' THEN 5" + " WHEN external_type = 'int2' THEN 5" + " WHEN external_type = '_int2' THEN 5" + " WHEN external_type = 'integer' THEN 4" + " WHEN external_type = 'int' THEN 4" + " WHEN external_type = 'int4' THEN 4" + " WHEN external_type = '_int4' THEN 4" + " WHEN external_type = 'bigint' THEN -5" + " WHEN external_type = 'int8' THEN -5" + " WHEN left(external_type, 7) = 'decimal' THEN 2" + " WHEN external_type = 'real' THEN 7" + " WHEN external_type = 'float4' THEN 7" + " WHEN external_type = '_float4' THEN 7" + " WHEN external_type = 'double' THEN 8" + " WHEN external_type = 'double precision' THEN 8" + " WHEN external_type = 'float8' THEN 8" + " WHEN external_type = '_float8' THEN 8" + " WHEN external_type = 'float' THEN 6" + " WHEN left(external_type, 7) = 'numeric' THEN 2" + " WHEN external_type = 'bytea' THEN -2" + " WHEN external_type = 'oid' THEN -5" + " WHEN external_type = 'name' THEN 12" + " WHEN external_type = 'ARRAY' THEN 2003" + " WHEN external_type = 'geometry' THEN -4" + " WHEN external_type = 'super' THEN -16" + " WHEN external_type = 'varbyte' THEN -4" + " WHEN external_type = 'geography' THEN -4" + " WHEN external_type = 'intervaly2m' THEN 1111" + " WHEN external_type = 'intervald2s' THEN 1111" + " ELSE 1111 END AS SMALLINT) AS DATA_TYPE," + " CASE WHEN left(external_type, 17) = 'character varying' THEN 'varchar'" + " WHEN left(external_type, 7) = 'varchar' THEN 'varchar'" + " WHEN left(external_type, 4) = 'char' THEN 'char'" + " WHEN left(external_type, 7) = 'decimal' THEN 'numeric'" + " WHEN left(external_type, 7) = 'numeric' THEN 'numeric'" + " WHEN external_type = 'double' THEN 'double precision'" + " WHEN external_type = 'time without time zone' THEN 'time'" + " WHEN external_type = 'time with time zone' THEN 'timetz'" + " WHEN external_type = 'timestamp without time zone' THEN 'timestamp'" + " WHEN external_type = 'timestamp with time zone' THEN 'timestamptz'" + " ELSE external_type END AS TYPE_NAME," + " CASE WHEN external_type = 'int4' THEN 10" + " WHEN external_type = 'bit' THEN 1" + " WHEN external_type = 'bool' THEN 1" + " WHEN external_type = 'boolean' THEN 1" + " WHEN left(external_type, 7) = 'varchar' " + " THEN CASE " + " WHEN regexp_instr(external_type, '\\\\(', 7) = 0 THEN '0' " + " ELSE regexp_substr(external_type, '[0-9]+', 7) " + " END::integer " + " WHEN left(external_type, 17) = 'character varying' " + " THEN CASE " + " WHEN regexp_instr(external_type, '\\\\(', 17) = 0 THEN '0' " + " ELSE regexp_substr(external_type, '[0-9]+', 17) " + " END::integer " + " WHEN left(external_type, 4) = 'char' " + " THEN CASE " + " WHEN regexp_instr(external_type, '\\\\(', 4) = 0 THEN '0' " + " ELSE regexp_substr(external_type, '[0-9]+', 4) " + " END::integer " + " WHEN left(external_type, 9) = 'character' " + " THEN CASE " + " WHEN regexp_instr(external_type, '\\\\(', 9) = 0 THEN '0' " + " ELSE regexp_substr(external_type, '[0-9]+', 9) " + " END::integer " + " WHEN left(external_type, 5) = 'nchar' " + " THEN CASE " + " WHEN regexp_instr(external_type, '\\\\(', 5) = 0 THEN '0' " + " ELSE regexp_substr(external_type, '[0-9]+', 5) " + " END::integer " + " WHEN left(external_type, 6) = 'bpchar' " + " THEN CASE " + " WHEN regexp_instr(external_type, '\\\\(', 6) = 0 THEN '0' " + " ELSE regexp_substr(external_type, '[0-9]+', 6) " + " END::integer " + " WHEN left(external_type, 8) = 'nvarchar' " + " THEN CASE " + " WHEN regexp_instr(external_type, '\\\\(', 8) = 0 THEN '0' " + " ELSE regexp_substr(external_type, '[0-9]+', 8) " + " END::integer " + " WHEN external_type = 'date' THEN 13 " + " WHEN external_type = 'time' THEN 15 " + " WHEN external_type = 'time without time zone' THEN 15 " + " WHEN external_type = 'timetz' THEN 21 " + " WHEN external_type = 'time with time zone' THEN 21 " + " WHEN external_type = 'timestamp' THEN 29 " + " WHEN external_type = 'timestamp without time zone' THEN 29" + " WHEN external_type = 'timestamptz' THEN 35" + " WHEN external_type = 'timestamp with time zone' THEN 35" + " WHEN external_type = 'smallint' THEN 5" + " WHEN external_type = 'int2' THEN 5" + " WHEN external_type = 'integer' THEN 10" + " WHEN external_type = 'int' THEN 10" + " WHEN external_type = 'int4' THEN 10" + " WHEN external_type = 'bigint' THEN 19" + " WHEN external_type = 'int8' THEN 19" + " WHEN left(external_type, 7) = 'decimal' THEN isnull(nullif(regexp_substr(external_type, '[0-9]+', 7),''),'0')::integer" + " WHEN external_type = 'real' THEN 8" + " WHEN external_type = 'float4' THEN 8" + " WHEN external_type = '_float4' THEN 8" + " WHEN external_type = 'double' THEN 17" + " WHEN external_type = 'double precision' THEN 17" + " WHEN external_type = 'float8' THEN 17" + " WHEN external_type = '_float8' THEN 17" + " WHEN external_type = 'float' THEN 17" + " WHEN left(external_type, 7) = 'numeric' THEN isnull(nullif(regexp_substr(external_type, '[0-9]+', 7),''),'0')::integer" + " WHEN external_type = '_float4' THEN 8" + " WHEN external_type = 'oid' THEN 10" + " WHEN external_type = '_int4' THEN 10" + " WHEN external_type = '_int2' THEN 5" + " WHEN external_type = 'geometry' THEN NULL" + " WHEN external_type = 'super' THEN NULL" + " WHEN external_type = 'varbyte' THEN NULL" + " WHEN external_type = 'geography' THEN NULL" + " WHEN external_type = 'intervaly2m' THEN 32" + " WHEN external_type = 'intervald2s' THEN 64" + " ELSE 2147483647 END AS COLUMN_SIZE," + " NULL AS BUFFER_LENGTH," + " CASE WHEN external_type = 'real'THEN 8" + " WHEN external_type = 'float4' THEN 8" + " WHEN external_type = 'double' THEN 17" + " WHEN external_type = 'double precision' THEN 17" + " WHEN external_type = 'float8' THEN 17" + " WHEN left(external_type, 7) = 'numeric' THEN isnull(nullif(regexp_substr(external_type, '[0-9]+', 11),''),'0')::integer" + " WHEN left(external_type, 7) = 'decimal' THEN isnull(nullif(regexp_substr(external_type, '[0-9]+', 11),''),'0')::integer" + " WHEN external_type = 'time' THEN 6 " + " WHEN external_type = 'time without time zone' THEN 6 " + " WHEN external_type = 'timetz' THEN 6 " + " WHEN external_type = 'time with time zone' THEN 6 " + " WHEN external_type = 'timestamp' THEN 6" + " WHEN external_type = 'timestamp without time zone' THEN 6" + " WHEN external_type = 'timestamptz' THEN 6" + " WHEN external_type = 'timestamp with time zone' THEN 6" + " WHEN external_type = 'geometry' THEN NULL" + " WHEN external_type = 'super' THEN NULL" + " WHEN external_type = 'varbyte' THEN NULL" + " WHEN external_type = 'geography' THEN NULL" + " WHEN external_type = 'intervaly2m' THEN 0" + " WHEN external_type = 'intervald2s' THEN 6" + " ELSE 0 END AS DECIMAL_DIGITS," + " CASE WHEN external_type = 'varbyte' THEN 2" + " WHEN external_type = 'geography' THEN 2" + " ELSE 10" + " END AS NUM_PREC_RADIX," + " CAST(CASE is_nullable WHEN 'true' THEN 1 WHEN 'false' THEN 0 ELSE NULL END AS SMALLINT) AS NULLABLE," + " NULL AS REMARKS," + " NULL AS COLUMN_DEF," + " CAST(CASE WHEN external_type = 'text' THEN 12" + " WHEN external_type = 'bit' THEN -7" + " WHEN external_type = 'bool' THEN -7" + " WHEN external_type = 'boolean' THEN -7" + " WHEN left(external_type, 7) = 'varchar' THEN 12" + " WHEN left(external_type, 17) = 'character varying' THEN 12" + " WHEN left(external_type, 4) = 'char' THEN 1" + " WHEN left(external_type, 9) = 'character' THEN 1" + " WHEN left(external_type, 5) = 'nchar' THEN 1" + " WHEN left(external_type, 6) = 'bpchar' THEN 1" + " WHEN left(external_type, 8) = 'nvarchar' THEN 12" + " WHEN external_type = '\"char\"' THEN 1" + " WHEN external_type = 'date' THEN 91" + " WHEN external_type = 'time' THEN 92 " + " WHEN external_type = 'time without time zone' THEN 92 " + " WHEN external_type = 'timetz' THEN 2013 " + " WHEN external_type = 'time with time zone' THEN 2013 " + " WHEN external_type = 'timestamp' THEN 93" + " WHEN external_type = 'timestamp without time zone' THEN 93" + " WHEN external_type = 'timestamptz' THEN 2014" + " WHEN external_type = 'timestamp with time zone' THEN 2014" + " WHEN external_type = 'smallint' THEN 5" + " WHEN external_type = 'int2' THEN 5" + " WHEN external_type = '_int2' THEN 5" + " WHEN external_type = 'integer' THEN 4" + " WHEN external_type = 'int' THEN 4" + " WHEN external_type = 'int4' THEN 4" + " WHEN external_type = '_int4' THEN 4" + " WHEN external_type = 'bigint' THEN -5" + " WHEN external_type = 'int8' THEN -5" + " WHEN left(external_type, 7) = 'decimal' THEN 3" + " WHEN external_type = 'real' THEN 7" + " WHEN external_type = 'float4' THEN 7" + " WHEN external_type = '_float4' THEN 7" + " WHEN external_type = 'double' THEN 8" + " WHEN external_type = 'double precision' THEN 8" + " WHEN external_type = 'float8' THEN 8" + " WHEN external_type = '_float8' THEN 8" + " WHEN external_type = 'float' THEN 6" + " WHEN left(external_type, 7) = 'numeric' THEN 2" + " WHEN external_type = 'bytea' THEN -2" + " WHEN external_type = 'oid' THEN -5" + " WHEN external_type = 'name' THEN 12" + " WHEN external_type = 'ARRAY' THEN 2003" + " WHEN external_type = 'geometry' THEN -4" + " WHEN external_type = 'super' THEN -16" + " WHEN external_type = 'varbyte' THEN -4" + " WHEN external_type = 'geography' THEN -4" + " WHEN external_type = 'intervaly2m' THEN 1111" + " WHEN external_type = 'intervald2s' THEN 1111" + " ELSE 1111 END AS SMALLINT) AS SQL_DATA_TYPE," + " CAST(NULL AS SMALLINT) AS SQL_DATETIME_SUB," + " CASE WHEN left(external_type, 7) = 'varchar' " + " THEN CASE " + " WHEN regexp_instr(external_type, '\\\\(', 7) = 0 THEN '0' " + " ELSE regexp_substr(external_type, '[0-9]+', 7) " + " END::integer " + " WHEN left(external_type, 17) = 'character varying' " + " THEN CASE " + " WHEN regexp_instr(external_type, '\\\\(', 17) = 0 THEN '0' " + " ELSE regexp_substr(external_type, '[0-9]+', 17) " + " END::integer " + " WHEN left(external_type, 4) = 'char' " + " THEN CASE " + " WHEN regexp_instr(external_type, '\\\\(', 4) = 0 THEN '0' " + " ELSE regexp_substr(external_type, '[0-9]+', 4) " + " END::integer " + " WHEN left(external_type, 9) = 'character' " + " THEN CASE " + " WHEN regexp_instr(external_type, '\\\\(', 9) = 0 THEN '0' " + " ELSE regexp_substr(external_type, '[0-9]+', 9) " + " END::integer " + " WHEN left(external_type, 5) = 'nchar' " + " THEN CASE " + " WHEN regexp_instr(external_type, '\\\\(', 5) = 0 THEN '0' " + " ELSE regexp_substr(external_type, '[0-9]+', 5) " + " END::integer " + " WHEN left(external_type, 6) = 'bpchar' " + " THEN CASE " + " WHEN regexp_instr(external_type, '\\\\(', 6) = 0 THEN '0' " + " ELSE regexp_substr(external_type, '[0-9]+', 6) " + " END::integer " + " WHEN left(external_type, 8) = 'nvarchar' " + " THEN CASE " + " WHEN regexp_instr(external_type, '\\\\(', 8) = 0 THEN '0' " + " ELSE regexp_substr(external_type, '[0-9]+', 8) " + " END::integer " + " WHEN external_type = 'string' THEN 16383" + " ELSE NULL END AS CHAR_OCTET_LENGTH," + " columnnum AS ORDINAL_POSITION," + " CASE IS_NULLABLE WHEN 'true' THEN 'YES' WHEN 'false' THEN 'NO' ELSE NULL END AS IS_NULLABLE," + " NULL AS SCOPE_CATALOG," + " NULL AS SCOPE_SCHEMA," + " NULL AS SCOPE_TABLE," + " NULL AS SOURCE_DATA_TYPE," + " 'NO' AS IS_AUTOINCREMENT," + " 'NO' AS IS_GENERATEDCOLUMN" + " FROM svv_external_columns"); result.append( " WHERE true "); result.append(getCatalogFilterCondition(catalog)); if (schemaPattern != null && !schemaPattern.isEmpty()) { result.append(" AND schemaname LIKE " + escapeQuotes(schemaPattern)); } if (tableNamePattern != null && !tableNamePattern.isEmpty()) { result.append(" AND tablename LIKE " + escapeQuotes(tableNamePattern)); } if (columnNamePattern != null && !columnNamePattern.isEmpty()) { result.append(" AND columnname LIKE " + escapeQuotes(columnNamePattern)); } result.append(" ORDER BY table_schem,table_name,ORDINAL_POSITION "); return result.toString(); } @Override public ResultSet getColumnPrivileges(String catalog, String schema, String table, String columnNamePattern) throws SQLException { Field[] f = new Field[8]; List<Tuple> v = new ArrayList<Tuple>(); f[0] = new Field("TABLE_CAT", Oid.VARCHAR); f[1] = new Field("TABLE_SCHEM", Oid.VARCHAR); f[2] = new Field("TABLE_NAME", Oid.VARCHAR); f[3] = new Field("COLUMN_NAME", Oid.VARCHAR); f[4] = new Field("GRANTOR", Oid.VARCHAR); f[5] = new Field("GRANTEE", Oid.VARCHAR); f[6] = new Field("PRIVILEGE", Oid.VARCHAR); f[7] = new Field("IS_GRANTABLE", Oid.VARCHAR); String sql; sql = "SELECT n.nspname,c.relname,u.usename,c.relacl, " // + (connection.haveMinimumServerVersion(ServerVersion.v8_4) ? "a.attacl, " : "") + " a.attname " + " FROM pg_catalog.pg_namespace n, pg_catalog.pg_class c, " + " pg_catalog.pg_user u, pg_catalog.pg_attribute a " + " WHERE c.relnamespace = n.oid " + " AND c.relowner = u.usesysid " + " AND c.oid = a.attrelid " + " AND c.relkind = 'r' " + " AND a.attnum > 0 AND NOT a.attisdropped "; sql += getCatalogFilterCondition(catalog); if (schema != null && !schema.isEmpty()) { sql += " AND n.nspname = " + escapeQuotes(schema); } if (table != null && !table.isEmpty()) { sql += " AND c.relname = " + escapeQuotes(table); } if (columnNamePattern != null && !columnNamePattern.isEmpty()) { sql += " AND a.attname LIKE " + escapeQuotes(columnNamePattern); } sql += " ORDER BY attname "; Statement stmt = connection.createStatement(); ResultSet rs = stmt.executeQuery(sql); while (rs.next()) { byte[] schemaName = rs.getBytes("nspname"); byte[] tableName = rs.getBytes("relname"); byte[] column = rs.getBytes("attname"); String owner = rs.getString("usename"); String relAcl = rs.getString("relacl"); // For instance: SELECT -> user1 -> list of [grantor, grantable] Map<String, Map<String, List<String[]>>> permissions = parseACL(relAcl, owner); /* if (connection.haveMinimumServerVersion(ServerVersion.v8_4)) { String acl = rs.getString("attacl"); Map<String, Map<String, List<String[]>>> relPermissions = parseACL(acl, owner); permissions.putAll(relPermissions); } */ String[] permNames = permissions.keySet().toArray(new String[0]); Arrays.sort(permNames); for (String permName : permNames) { byte[] privilege = connection.encodeString(permName); Map<String, List<String[]>> grantees = permissions.get(permName); for (Map.Entry<String, List<String[]>> userToGrantable : grantees.entrySet()) { List<String[]> grantor = userToGrantable.getValue(); String grantee = userToGrantable.getKey(); for (String[] grants : grantor) { String grantable = owner.equals(grantee) ? "YES" : grants[1]; byte[][] tuple = new byte[8][]; tuple[0] = connection.encodeString(connection.getCatalog()); tuple[1] = schemaName; tuple[2] = tableName; tuple[3] = column; tuple[4] = connection.encodeString(grants[0]); tuple[5] = connection.encodeString(grantee); tuple[6] = privilege; tuple[7] = connection.encodeString(grantable); v.add(new Tuple(tuple)); } } } } rs.close(); stmt.close(); return ((BaseStatement) createMetaDataStatement()).createDriverResultSet(f, v); } @Override public ResultSet getTablePrivileges(String catalog, String schemaPattern, String tableNamePattern) throws SQLException { Field[] f = new Field[7]; List<Tuple> v = new ArrayList<Tuple>(); f[0] = new Field("TABLE_CAT", Oid.VARCHAR); f[1] = new Field("TABLE_SCHEM", Oid.VARCHAR); f[2] = new Field("TABLE_NAME", Oid.VARCHAR); f[3] = new Field("GRANTOR", Oid.VARCHAR); f[4] = new Field("GRANTEE", Oid.VARCHAR); f[5] = new Field("PRIVILEGE", Oid.VARCHAR); f[6] = new Field("IS_GRANTABLE", Oid.VARCHAR); String sql; sql = "SELECT n.nspname,c.relname,u.usename,c.relacl " + " FROM pg_catalog.pg_namespace n, pg_catalog.pg_class c, pg_catalog.pg_user u " + " WHERE c.relnamespace = n.oid " + " AND c.relowner = u.usesysid " + " AND c.relkind IN ('r','p','v','m','f') "; sql += getCatalogFilterCondition(catalog); if (schemaPattern != null && !schemaPattern.isEmpty()) { sql += " AND n.nspname LIKE " + escapeQuotes(schemaPattern); } if (tableNamePattern != null && !tableNamePattern.isEmpty()) { sql += " AND c.relname LIKE " + escapeQuotes(tableNamePattern); } sql += " ORDER BY nspname, relname "; Statement stmt = connection.createStatement(); ResultSet rs = stmt.executeQuery(sql); while (rs.next()) { byte[] schema = rs.getBytes("nspname"); byte[] table = rs.getBytes("relname"); String owner = rs.getString("usename"); String acl = rs.getString("relacl"); Map<String, Map<String, List<String[]>>> permissions = parseACL(acl, owner); String[] permNames = permissions.keySet().toArray(new String[0]); Arrays.sort(permNames); for (String permName : permNames) { byte[] privilege = connection.encodeString(permName); Map<String, List<String[]>> grantees = permissions.get(permName); for (Map.Entry<String, List<String[]>> userToGrantable : grantees.entrySet()) { List<String[]> grants = userToGrantable.getValue(); String granteeUser = userToGrantable.getKey(); for (String[] grantTuple : grants) { // report the owner as grantor if it's missing String grantor = grantTuple[0] == null ? owner : grantTuple[0]; // owner always has grant privileges String grantable = owner.equals(granteeUser) ? "YES" : grantTuple[1]; byte[][] tuple = new byte[7][]; tuple[0] = connection.encodeString(connection.getCatalog()); tuple[1] = schema; tuple[2] = table; tuple[3] = connection.encodeString(grantor); tuple[4] = connection.encodeString(granteeUser); tuple[5] = privilege; tuple[6] = connection.encodeString(grantable); v.add(new Tuple(tuple)); } } } } rs.close(); stmt.close(); return ((BaseStatement) createMetaDataStatement()).createDriverResultSet(f, v); } /** * Parse an String of ACLs into a List of ACLs. */ private static List<String> parseACLArray(String aclString) { List<String> acls = new ArrayList<String>(); if (aclString == null || aclString.isEmpty()) { return acls; } boolean inQuotes = false; // start at 1 because of leading "{" int beginIndex = 1; char prevChar = ' '; for (int i = beginIndex; i < aclString.length(); i++) { char c = aclString.charAt(i); if (c == '"' && prevChar != '\\') { inQuotes = !inQuotes; } else if (c == ',' && !inQuotes) { acls.add(aclString.substring(beginIndex, i)); beginIndex = i + 1; } prevChar = c; } // add last element removing the trailing "}" acls.add(aclString.substring(beginIndex, aclString.length() - 1)); // Strip out enclosing quotes, if any. for (int i = 0; i < acls.size(); i++) { String acl = acls.get(i); if (acl.startsWith("\"") && acl.endsWith("\"")) { acl = acl.substring(1, acl.length() - 1); acls.set(i, acl); } } return acls; } /** * Add the user described by the given acl to the Lists of users with the privileges described by * the acl. */ private static void addACLPrivileges(String acl, Map<String, Map<String, List<String[]>>> privileges) { int equalIndex = acl.lastIndexOf("="); int slashIndex = acl.lastIndexOf("/"); if (equalIndex == -1) { return; } String user = acl.substring(0, equalIndex); String grantor = null; if (user.isEmpty()) { user = "PUBLIC"; } String privs; if (slashIndex != -1) { privs = acl.substring(equalIndex + 1, slashIndex); grantor = acl.substring(slashIndex + 1, acl.length()); } else { privs = acl.substring(equalIndex + 1, acl.length()); } for (int i = 0; i < privs.length(); i++) { char c = privs.charAt(i); if (c != '*') { String sqlpriv; String grantable; if (i < privs.length() - 1 && privs.charAt(i + 1) == '*') { grantable = "YES"; } else { grantable = "NO"; } switch (c) { case 'a': sqlpriv = "INSERT"; break; case 'r': case 'p': sqlpriv = "SELECT"; break; case 'w': sqlpriv = "UPDATE"; break; case 'd': sqlpriv = "DELETE"; break; case 'D': sqlpriv = "TRUNCATE"; break; case 'R': sqlpriv = "RULE"; break; case 'x': sqlpriv = "REFERENCES"; break; case 't': sqlpriv = "TRIGGER"; break; // the following can't be granted to a table, but // we'll keep them for completeness. case 'X': sqlpriv = "EXECUTE"; break; case 'U': sqlpriv = "USAGE"; break; case 'C': sqlpriv = "CREATE"; break; case 'T': sqlpriv = "CREATE TEMP"; break; default: sqlpriv = "UNKNOWN"; } Map<String, List<String[]>> usersWithPermission = privileges.get(sqlpriv); String[] grant = {grantor, grantable}; if (usersWithPermission == null) { usersWithPermission = new HashMap<String, List<String[]>>(); List<String[]> permissionByGrantor = new ArrayList<String[]>(); permissionByGrantor.add(grant); usersWithPermission.put(user, permissionByGrantor); privileges.put(sqlpriv, usersWithPermission); } else { List<String[]> permissionByGrantor = usersWithPermission.get(user); if (permissionByGrantor == null) { permissionByGrantor = new ArrayList<String[]>(); permissionByGrantor.add(grant); usersWithPermission.put(user, permissionByGrantor); } else { permissionByGrantor.add(grant); } } } } } /** * Take the a String representing an array of ACLs and return a Map mapping the SQL permission * name to a List of usernames who have that permission. * For instance: {@code SELECT -> user1 -> list of [grantor, grantable]} * * @param aclArray ACL array * @param owner owner * @return a Map mapping the SQL permission name */ public Map<String, Map<String, List<String[]>>> parseACL(String aclArray, String owner) { if (aclArray == null) { // arwdxt -- 8.2 Removed the separate RULE permission // arwdDxt -- 8.4 Added a separate TRUNCATE permission String perms = "arwdxt"; // connection.haveMinimumServerVersion(ServerVersion.v8_4) ? "arwdDxt" : "arwdxt"; aclArray = "{" + owner + "=" + perms + "/" + owner + "}"; } List<String> acls = parseACLArray(aclArray); Map<String, Map<String, List<String[]>>> privileges = new HashMap<String, Map<String, List<String[]>>>(); for (String acl : acls) { addACLPrivileges(acl, privileges); } return privileges; } public ResultSet getBestRowIdentifier(String catalog, String schema, String table, int scope, boolean nullable) throws SQLException { Field[] f = new Field[8]; List<Tuple> v = new ArrayList<Tuple>(); // The new ResultSet tuple stuff f[0] = new Field("SCOPE", Oid.INT2); f[1] = new Field("COLUMN_NAME", Oid.VARCHAR); f[2] = new Field("DATA_TYPE", Oid.INT2); f[3] = new Field("TYPE_NAME", Oid.VARCHAR); f[4] = new Field("COLUMN_SIZE", Oid.INT4); f[5] = new Field("BUFFER_LENGTH", Oid.INT4); f[6] = new Field("DECIMAL_DIGITS", Oid.INT2); f[7] = new Field("PSEUDO_COLUMN", Oid.INT2); /* * At the moment this simply returns a table's primary key, if there is one. I believe other * unique indexes, ctid, and oid should also be considered. -KJ */ String sql; sql = "SELECT a.attname, a.atttypid, a.atttypmod " + "FROM " + "pg_catalog.pg_namespace n, " + "pg_catalog.pg_class ct, " + "pg_catalog.pg_class ci, " + "pg_catalog.pg_attribute a, " + "pg_catalog.pg_index i " + "WHERE " + "ct.oid=i.indrelid AND " + "ci.oid=i.indexrelid AND " + "a.attrelid=ci.oid AND " + "i.indisprimary AND " + "ct.relnamespace = n.oid "; sql += getCatalogFilterCondition(catalog); if (schema != null && !schema.isEmpty()) { sql += " AND n.nspname = " + escapeQuotes(schema); } if (table != null && !table.isEmpty()) { sql += " AND ct.relname = " + escapeQuotes(table); } sql += " ORDER BY a.attnum "; Statement stmt = connection.createStatement(); ResultSet rs = stmt.executeQuery(sql); while (rs.next()) { byte[][] tuple = new byte[8][]; int typeOid = (int) rs.getLong("atttypid"); int typeMod = rs.getInt("atttypmod"); int decimalDigits = connection.getTypeInfo().getScale(typeOid, typeMod); int columnSize = connection.getTypeInfo().getPrecision(typeOid, typeMod); if (columnSize == 0) { columnSize = connection.getTypeInfo().getDisplaySize(typeOid, typeMod); } tuple[0] = connection.encodeString(Integer.toString(scope)); tuple[1] = rs.getBytes("attname"); tuple[2] = connection.encodeString(Integer.toString(connection.getTypeInfo().getSQLType(typeOid))); tuple[3] = connection.encodeString(connection.getTypeInfo().getRSType(typeOid)); tuple[4] = connection.encodeString(Integer.toString(columnSize)); tuple[5] = null; // unused tuple[6] = connection.encodeString(Integer.toString(decimalDigits)); tuple[7] = connection.encodeString(Integer.toString(java.sql.DatabaseMetaData.bestRowNotPseudo)); v.add(new Tuple(tuple)); } rs.close(); stmt.close(); return ((BaseStatement) createMetaDataStatement()).createDriverResultSet(f, v); } public ResultSet getVersionColumns(String catalog, String schema, String table) throws SQLException { Field[] f = new Field[8]; List<Tuple> v = new ArrayList<Tuple>(); // The new ResultSet tuple stuff f[0] = new Field("SCOPE", Oid.INT2); f[1] = new Field("COLUMN_NAME", Oid.VARCHAR); f[2] = new Field("DATA_TYPE", Oid.INT2); f[3] = new Field("TYPE_NAME", Oid.VARCHAR); f[4] = new Field("COLUMN_SIZE", Oid.INT4); f[5] = new Field("BUFFER_LENGTH", Oid.INT4); f[6] = new Field("DECIMAL_DIGITS", Oid.INT2); f[7] = new Field("PSEUDO_COLUMN", Oid.INT2); byte[][] tuple = new byte[8][]; /* * Redshift does not have any column types that are automatically updated like some databases' * timestamp type. We can't tell what rules or triggers might be doing, so we are left with the * system columns that change on an update. An update may change all of the following system * columns: ctid, xmax, xmin, cmax, and cmin. Depending on if we are in a transaction and * whether we roll it back or not the only guaranteed change is to ctid. -KJ */ tuple[0] = null; tuple[1] = connection.encodeString("ctid"); tuple[2] = connection.encodeString(Integer.toString(connection.getTypeInfo().getSQLType("tid"))); tuple[3] = connection.encodeString("tid"); tuple[4] = null; tuple[5] = null; tuple[6] = null; tuple[7] = connection.encodeString(Integer.toString(java.sql.DatabaseMetaData.versionColumnPseudo)); v.add(new Tuple(tuple)); /* * Perhaps we should check that the given catalog.schema.table actually exists. -KJ */ return ((BaseStatement) createMetaDataStatement()).createDriverResultSet(f, v); } public ResultSet getPrimaryKeys(String catalog, String schema, String table) throws SQLException { String sql; sql = "SELECT " + "current_database() AS TABLE_CAT, " + "n.nspname AS TABLE_SCHEM, " + "ct.relname AS TABLE_NAME, " + "a.attname AS COLUMN_NAME, " + "a.attnum AS KEY_SEQ, " + "ci.relname AS PK_NAME " + "FROM " + "pg_catalog.pg_namespace n, " + "pg_catalog.pg_class ct, " + "pg_catalog.pg_class ci, " + "pg_catalog.pg_attribute a, " + "pg_catalog.pg_index i " + "WHERE " + "ct.oid=i.indrelid AND " + "ci.oid=i.indexrelid AND " + "a.attrelid=ci.oid AND " + "i.indisprimary AND " + "ct.relnamespace = n.oid "; sql += getCatalogFilterCondition(catalog); if (schema != null && !schema.isEmpty()) { sql += " AND n.nspname = " + escapeQuotes(schema); } if (table != null && !table.isEmpty()) { sql += " AND ct.relname = " + escapeQuotes(table); } sql += " ORDER BY table_name, pk_name, key_seq"; return createMetaDataStatement().executeQuery(sql); } /** * @param primaryCatalog primary catalog * @param primarySchema primary schema * @param primaryTable if provided will get the keys exported by this table * @param foreignCatalog foreign catalog * @param foreignSchema foreign schema * @param foreignTable if provided will get the keys imported by this table * @return ResultSet * @throws SQLException if something wrong happens */ protected ResultSet getImportedExportedKeys(String primaryCatalog, String primarySchema, String primaryTable, String foreignCatalog, String foreignSchema, String foreignTable) throws SQLException { /* * The addition of the pg_constraint in 7.3 table should have really helped us out here, but it * comes up just a bit short. - The conkey, confkey columns aren't really useful without * contrib/array unless we want to issues separate queries. - Unique indexes that can support * foreign keys are not necessarily added to pg_constraint. Also multiple unique indexes * covering the same keys can be created which make it difficult to determine the PK_NAME field. */ String sql = "SELECT current_database() AS PKTABLE_CAT, pkn.nspname AS PKTABLE_SCHEM, pkc.relname AS PKTABLE_NAME, pka.attname AS PKCOLUMN_NAME, " + "current_database() AS FKTABLE_CAT, fkn.nspname AS FKTABLE_SCHEM, fkc.relname AS FKTABLE_NAME, fka.attname AS FKCOLUMN_NAME, " + "pos.n AS KEY_SEQ, " + "CASE con.confupdtype " + " WHEN 'c' THEN " + DatabaseMetaData.importedKeyCascade + " WHEN 'n' THEN " + DatabaseMetaData.importedKeySetNull + " WHEN 'd' THEN " + DatabaseMetaData.importedKeySetDefault + " WHEN 'r' THEN " + DatabaseMetaData.importedKeyRestrict + " WHEN 'p' THEN " + DatabaseMetaData.importedKeyRestrict + " WHEN 'a' THEN " + DatabaseMetaData.importedKeyNoAction + " ELSE NULL END AS UPDATE_RULE, " + "CASE con.confdeltype " + " WHEN 'c' THEN " + DatabaseMetaData.importedKeyCascade + " WHEN 'n' THEN " + DatabaseMetaData.importedKeySetNull + " WHEN 'd' THEN " + DatabaseMetaData.importedKeySetDefault + " WHEN 'r' THEN " + DatabaseMetaData.importedKeyRestrict + " WHEN 'p' THEN " + DatabaseMetaData.importedKeyRestrict + " WHEN 'a' THEN " + DatabaseMetaData.importedKeyNoAction + " ELSE NULL END AS DELETE_RULE, " + "con.conname AS FK_NAME, pkic.relname AS PK_NAME, " + "CASE " + " WHEN con.condeferrable AND con.condeferred THEN " + DatabaseMetaData.importedKeyInitiallyDeferred + " WHEN con.condeferrable THEN " + DatabaseMetaData.importedKeyInitiallyImmediate + " ELSE " + DatabaseMetaData.importedKeyNotDeferrable + " END AS DEFERRABILITY " + " FROM " + " pg_catalog.pg_namespace pkn, pg_catalog.pg_class pkc, pg_catalog.pg_attribute pka, " + " pg_catalog.pg_namespace fkn, pg_catalog.pg_class fkc, pg_catalog.pg_attribute fka, " + " pg_catalog.pg_constraint con, " + " pg_catalog.generate_series(1, " + getMaxIndexKeys() + ") pos(n), " + " pg_catalog.pg_class pkic"; // Starting in Postgres 9.0, pg_constraint was augmented with the conindid column, which // contains the oid of the index supporting the constraint. This makes it unnecessary to do a // further join on pg_depend. // if (!connection.haveMinimumServerVersion(ServerVersion.v9_0)) { sql += ", pg_catalog.pg_depend dep "; // } sql += " WHERE pkn.oid = pkc.relnamespace AND pkc.oid = pka.attrelid AND pka.attnum = con.confkey[pos.n] AND con.confrelid = pkc.oid " + " AND fkn.oid = fkc.relnamespace AND fkc.oid = fka.attrelid AND fka.attnum = con.conkey[pos.n] AND con.conrelid = fkc.oid " + " AND con.contype = 'f' AND pkic.relkind = 'i' "; // if (!connection.haveMinimumServerVersion(ServerVersion.v9_0)) { sql += " AND con.oid = dep.objid AND pkic.oid = dep.refobjid AND dep.classid = 'pg_constraint'::regclass::oid AND dep.refclassid = 'pg_class'::regclass::oid "; /* } else { sql += " AND pkic.oid = con.conindid "; } */ sql += getCatalogFilterCondition(primaryCatalog); if (primarySchema != null && !primarySchema.isEmpty()) { sql += " AND pkn.nspname = " + escapeQuotes(primarySchema); } if (foreignSchema != null && !foreignSchema.isEmpty()) { sql += " AND fkn.nspname = " + escapeQuotes(foreignSchema); } if (primaryTable != null && !primaryTable.isEmpty()) { sql += " AND pkc.relname = " + escapeQuotes(primaryTable); } if (foreignTable != null && !foreignTable.isEmpty()) { sql += " AND fkc.relname = " + escapeQuotes(foreignTable); } if (primaryTable != null) { sql += " ORDER BY fkn.nspname,fkc.relname,con.conname,pos.n"; } else { sql += " ORDER BY pkn.nspname,pkc.relname, con.conname,pos.n"; } return createMetaDataStatement().executeQuery(sql); } public ResultSet getImportedKeys(String catalog, String schema, String table) throws SQLException { return getImportedExportedKeys(null, null, null, catalog, schema, table); } public ResultSet getExportedKeys(String catalog, String schema, String table) throws SQLException { return getImportedExportedKeys(catalog, schema, table, null, null, null); } public ResultSet getCrossReference(String primaryCatalog, String primarySchema, String primaryTable, String foreignCatalog, String foreignSchema, String foreignTable) throws SQLException { return getImportedExportedKeys(primaryCatalog, primarySchema, primaryTable, foreignCatalog, foreignSchema, foreignTable); } public ResultSet getTypeInfo() throws SQLException { if (RedshiftLogger.isEnable()) connection.getLogger().logFunction(true); Field[] f = new Field[18]; List<Tuple> v = new ArrayList<Tuple>(); // The new ResultSet tuple stuff f[0] = new Field("TYPE_NAME", Oid.VARCHAR); f[1] = new Field("DATA_TYPE", Oid.INT2); f[2] = new Field("PRECISION", Oid.INT4); f[3] = new Field("LITERAL_PREFIX", Oid.VARCHAR); f[4] = new Field("LITERAL_SUFFIX", Oid.VARCHAR); f[5] = new Field("CREATE_PARAMS", Oid.VARCHAR); f[6] = new Field("NULLABLE", Oid.INT2); f[7] = new Field("CASE_SENSITIVE", Oid.BOOL); f[8] = new Field("SEARCHABLE", Oid.INT2); f[9] = new Field("UNSIGNED_ATTRIBUTE", Oid.BOOL); f[10] = new Field("FIXED_PREC_SCALE", Oid.BOOL); f[11] = new Field("AUTO_INCREMENT", Oid.BOOL); f[12] = new Field("LOCAL_TYPE_NAME", Oid.VARCHAR); f[13] = new Field("MINIMUM_SCALE", Oid.INT2); f[14] = new Field("MAXIMUM_SCALE", Oid.INT2); f[15] = new Field("SQL_DATA_TYPE", Oid.INT4); f[16] = new Field("SQL_DATETIME_SUB", Oid.INT4); f[17] = new Field("NUM_PREC_RADIX", Oid.INT4); String sql; sql = "SELECT t.typname,t.oid FROM pg_catalog.pg_type t" // + " JOIN pg_catalog.pg_namespace n ON (t.typnamespace = n.oid) " // + " WHERE n.nspname != 'pg_toast'" // + " AND " + " WHERE " + " t.typname in (" + "'bool','char','int8','int2','int4','float4','float8','bpchar','varchar','date','time','timestamp','timestamptz','numeric','refcursor','geometry','super','varbyte','geography')"; // + " AND " // + " (t.typrelid = 0 OR (SELECT c.relkind = 'c' FROM pg_catalog.pg_class c WHERE c.oid = t.typrelid))"; /* if (connection.getHideUnprivilegedObjects() && connection.haveMinimumServerVersion(ServerVersion.v9_2)) { sql += " AND has_type_privilege(t.oid, 'USAGE')"; } */ Statement stmt = connection.createStatement(); ResultSet rs = stmt.executeQuery(sql); // cache some results, this will keep memory usage down, and speed // things up a little. byte[] bZero = connection.encodeString("0"); byte[] b10 = connection.encodeString("10"); byte[] b2 = connection.encodeString("2"); byte[] bf = connection.encodeString("f"); byte[] bt = connection.encodeString("t"); byte[] bliteral = connection.encodeString("'"); byte[] bNullable = connection.encodeString(Integer.toString(java.sql.DatabaseMetaData.typeNullable)); byte[] bSearchable = connection.encodeString(Integer.toString(java.sql.DatabaseMetaData.typeSearchable)); TypeInfo ti = connection.getTypeInfo(); if (ti instanceof TypeInfoCache) { ((TypeInfoCache) ti).cacheSQLTypes(connection.getLogger()); } while (rs.next()) { byte[][] tuple = new byte[19][]; String typname = rs.getString(1); int typeOid = (int) rs.getLong(2); tuple[0] = connection.encodeString(typname); int sqlType = connection.getTypeInfo().getSQLType(typname); tuple[1] = connection.encodeString(Integer.toString(sqlType)); /* this is just for sorting below, the result set never sees this */ tuple[18] = BigInteger.valueOf(sqlType).toByteArray(); tuple[2] = connection .encodeString(Integer.toString(connection.getTypeInfo().getMaximumPrecision(typeOid))); // Using requiresQuoting(oid) would might trigger select statements that might fail with NPE // if oid in question is being dropped. // requiresQuotingSqlType is not bulletproof, however, it solves the most visible NPE. if (connection.getTypeInfo().requiresQuotingSqlType(sqlType)) { tuple[3] = bliteral; tuple[4] = bliteral; } tuple[6] = bNullable; // all types can be null tuple[7] = connection.getTypeInfo().isCaseSensitive(typeOid) ? bt : bf; tuple[8] = bSearchable; // any thing can be used in the WHERE clause tuple[9] = connection.getTypeInfo().isSigned(typeOid) ? bf : bt; tuple[10] = bf; // false for now - must handle money tuple[11] = bf; // false - it isn't autoincrement tuple[13] = bZero; // min scale is zero // only numeric can supports a scale. tuple[14] = (typeOid == Oid.NUMERIC) ? connection.encodeString("1000") : bZero; // 12 - LOCAL_TYPE_NAME is null // 15 & 16 are unused so we return null // VARBYTE and GEOGRAPHY is base2,everything else is base 10 tuple[17] = (typeOid == Oid.VARBYTE || typeOid == Oid.GEOGRAPHY) ? b2 : b10; v.add(new Tuple(tuple)); // add pseudo-type serial, bigserial /* if (typname.equals("int4")) { byte[][] tuple1 = tuple.clone(); tuple1[0] = connection.encodeString("serial"); tuple1[11] = bt; v.add(new Tuple(tuple1)); } else if (typname.equals("int8")) { byte[][] tuple1 = tuple.clone(); tuple1[0] = connection.encodeString("bigserial"); tuple1[11] = bt; v.add(new Tuple(tuple1)); } */ } rs.close(); stmt.close(); Collections.sort(v, new Comparator<Tuple>() { @Override public int compare(Tuple o1, Tuple o2) { int i1 = ByteConverter.bytesToInt(o1.get(18)); int i2 = ByteConverter.bytesToInt(o2.get(18)); return (i1 < i2) ? -1 : ((i1 == i2) ? 0 : 1); } }); ResultSet rc = ((BaseStatement) createMetaDataStatement()).createDriverResultSet(f, v); if (RedshiftLogger.isEnable()) connection.getLogger().logFunction(false, rc); return rc; } public ResultSet getIndexInfo(String catalog, String schema, String tableName, boolean unique, boolean approximate) throws SQLException { /* * This is a complicated function because we have three possible situations: <= 7.2 no schemas, * single column functional index 7.3 schemas, single column functional index >= 7.4 schemas, * multi-column expressional index >= 8.3 supports ASC/DESC column info >= 9.0 no longer renames * index columns on a table column rename, so we must look at the table attribute names * * with the single column functional index we need an extra join to the table's pg_attribute * data to get the column the function operates on. */ String sql; /* if (connection.haveMinimumServerVersion(ServerVersion.v8_3)) { sql = "SELECT NULL AS TABLE_CAT, n.nspname AS TABLE_SCHEM, " + " ct.relname AS TABLE_NAME, NOT i.indisunique AS NON_UNIQUE, " + " NULL AS INDEX_QUALIFIER, ci.relname AS INDEX_NAME, " + " CASE i.indisclustered " + " WHEN true THEN " + java.sql.DatabaseMetaData.tableIndexClustered + " ELSE CASE am.amname " + " WHEN 'hash' THEN " + java.sql.DatabaseMetaData.tableIndexHashed + " ELSE " + java.sql.DatabaseMetaData.tableIndexOther + " END " + " END AS TYPE, " + " (information_schema._pg_expandarray(i.indkey)).n AS ORDINAL_POSITION, " + " ci.reltuples AS CARDINALITY, " + " ci.relpages AS PAGES, " + " pg_catalog.pg_get_expr(i.indpred, i.indrelid) AS FILTER_CONDITION, " + " ci.oid AS CI_OID, " + " i.indoption AS I_INDOPTION, " + (connection.haveMinimumServerVersion(ServerVersion.v9_6) ? " am.amname AS AM_NAME " : " am.amcanorder AS AM_CANORDER ") + "FROM pg_catalog.pg_class ct " + " JOIN pg_catalog.pg_namespace n ON (ct.relnamespace = n.oid) " + " JOIN pg_catalog.pg_index i ON (ct.oid = i.indrelid) " + " JOIN pg_catalog.pg_class ci ON (ci.oid = i.indexrelid) " + " JOIN pg_catalog.pg_am am ON (ci.relam = am.oid) " + "WHERE true "; if (schema != null && !schema.isEmpty()) { sql += " AND n.nspname = " + escapeQuotes(schema); } sql += " AND ct.relname = " + escapeQuotes(tableName); if (unique) { sql += " AND i.indisunique "; } sql = "SELECT " + " tmp.TABLE_CAT, " + " tmp.TABLE_SCHEM, " + " tmp.TABLE_NAME, " + " tmp.NON_UNIQUE, " + " tmp.INDEX_QUALIFIER, " + " tmp.INDEX_NAME, " + " tmp.TYPE, " + " tmp.ORDINAL_POSITION, " + " trim(both '\"' from pg_catalog.pg_get_indexdef(tmp.CI_OID, tmp.ORDINAL_POSITION, false)) AS COLUMN_NAME, " + (connection.haveMinimumServerVersion(ServerVersion.v9_6) ? " CASE tmp.AM_NAME " + " WHEN 'btree' THEN CASE tmp.I_INDOPTION[tmp.ORDINAL_POSITION - 1] & 1 " + " WHEN 1 THEN 'D' " + " ELSE 'A' " + " END " + " ELSE NULL " + " END AS ASC_OR_DESC, " : " CASE tmp.AM_CANORDER " + " WHEN true THEN CASE tmp.I_INDOPTION[tmp.ORDINAL_POSITION - 1] & 1 " + " WHEN 1 THEN 'D' " + " ELSE 'A' " + " END " + " ELSE NULL " + " END AS ASC_OR_DESC, ") + " tmp.CARDINALITY, " + " tmp.PAGES, " + " tmp.FILTER_CONDITION " + "FROM (" + sql + ") AS tmp"; } else */ /* { String select; String from; String where; select = "SELECT NULL AS TABLE_CAT, n.nspname AS TABLE_SCHEM, "; from = " FROM pg_catalog.pg_namespace n, pg_catalog.pg_class ct, pg_catalog.pg_class ci, " + " pg_catalog.pg_attribute a, pg_catalog.pg_am am "; where = " AND n.oid = ct.relnamespace "; from += ", pg_catalog.pg_index i "; if (schema != null && !schema.isEmpty()) { where += " AND n.nspname = " + escapeQuotes(schema); } sql = select + " ct.relname AS TABLE_NAME, NOT i.indisunique AS NON_UNIQUE, NULL AS INDEX_QUALIFIER, ci.relname AS INDEX_NAME, " + " CASE i.indisclustered " + " WHEN true THEN " + java.sql.DatabaseMetaData.tableIndexClustered + " ELSE CASE am.amname " + " WHEN 'hash' THEN " + java.sql.DatabaseMetaData.tableIndexHashed + " ELSE " + java.sql.DatabaseMetaData.tableIndexOther + " END " + " END AS TYPE, " + " a.attnum AS ORDINAL_POSITION, " + " CASE WHEN i.indexprs IS NULL THEN a.attname " + " ELSE pg_catalog.pg_get_indexdef(ci.oid,a.attnum,false) END AS COLUMN_NAME, " + " NULL AS ASC_OR_DESC, " + " ci.reltuples AS CARDINALITY, " + " ci.relpages AS PAGES, " + " pg_catalog.pg_get_expr(i.indpred, i.indrelid) AS FILTER_CONDITION " + from + " WHERE ct.oid=i.indrelid AND ci.oid=i.indexrelid AND a.attrelid=ci.oid AND ci.relam=am.oid " + where; sql += " AND ct.relname = " + escapeQuotes(tableName); if (unique) { sql += " AND i.indisunique "; } } sql += " ORDER BY NON_UNIQUE, TYPE, INDEX_NAME, ORDINAL_POSITION "; */ // Disable, Redshift doesn't do indexes... // we'll just execute a dummy query to return an empty resultset sql = "SELECT '' AS TABLE_CAT, " + "'' AS TABLE_SCHEM, " + "'' AS TABLE_NAME, " + "cast('t' as boolean) AS NON_UNIQUE, " + "'' AS INDEX_QUALIFIER, " + "'' AS INDEX_NAME, " + "cast(0 as smallint) AS TYPE, " + "cast(1 as smallint) AS ORDINAL_POSITION, " + "'' AS COLUMN_NAME, " + "NULL AS ASC_OR_DESC, " + "0 AS CARDINALITY, " + "0 AS PAGES, " + "'' AS FILTER_CONDITION " + "WHERE (1 = 0)"; return createMetaDataStatement().executeQuery(sql); } // ** JDBC 2 Extensions ** public boolean supportsResultSetType(int type) throws SQLException { // The only type we don't support return type != ResultSet.TYPE_SCROLL_SENSITIVE; } public boolean supportsResultSetConcurrency(int type, int concurrency) throws SQLException { // These combinations are not supported! if (type == ResultSet.TYPE_SCROLL_SENSITIVE) { return false; } // We do not support Updateable ResultSets if (concurrency == ResultSet.CONCUR_UPDATABLE) { return false; // true } // Everything else we do return true; } /* lots of unsupported stuff... */ public boolean ownUpdatesAreVisible(int type) throws SQLException { return true; } public boolean ownDeletesAreVisible(int type) throws SQLException { return true; } public boolean ownInsertsAreVisible(int type) throws SQLException { // indicates that return true; } public boolean othersUpdatesAreVisible(int type) throws SQLException { return false; } public boolean othersDeletesAreVisible(int i) throws SQLException { return false; } public boolean othersInsertsAreVisible(int type) throws SQLException { return false; } public boolean updatesAreDetected(int type) throws SQLException { return false; } public boolean deletesAreDetected(int i) throws SQLException { return false; } public boolean insertsAreDetected(int type) throws SQLException { return false; } public boolean supportsBatchUpdates() throws SQLException { return true; } public ResultSet getUDTs(String catalog, String schemaPattern, String typeNamePattern, int[] types) throws SQLException { String sql = "select " + "current_database() as type_cat, n.nspname as type_schem, t.typname as type_name, null as class_name, " + "CASE WHEN t.typtype='c' then " + java.sql.Types.STRUCT + " else " + java.sql.Types.DISTINCT + " end as data_type, pg_catalog.obj_description(t.oid, 'pg_type') " + "as remarks, CASE WHEN t.typtype = 'd' then (select CASE"; StringBuilder sqlwhen = new StringBuilder(); for (Iterator<String> i = connection.getTypeInfo().getRSTypeNamesWithSQLTypes(); i.hasNext(); ) { String pgType = i.next(); int sqlType = connection.getTypeInfo().getSQLType(pgType); sqlwhen.append(" when typname = ").append(escapeQuotes(pgType)).append(" then ").append(sqlType); } sql += sqlwhen.toString(); sql += " else " + java.sql.Types.OTHER + " end from pg_type where oid=t.typbasetype) " + "else null end as base_type " + "from pg_catalog.pg_type t, pg_catalog.pg_namespace n where t.typnamespace = n.oid and n.nspname != 'pg_catalog' and n.nspname != 'pg_toast'"; StringBuilder toAdd = new StringBuilder(); if (types != null) { toAdd.append(" and (false "); for (int type : types) { switch (type) { case Types.STRUCT: toAdd.append(" or t.typtype = 'c'"); break; case Types.DISTINCT: toAdd.append(" or t.typtype = 'd'"); break; } } toAdd.append(" ) "); } else { toAdd.append(" and t.typtype IN ('c','d') "); } // spec says that if typeNamePattern is a fully qualified name // then the schema and catalog are ignored if (typeNamePattern != null) { // search for qualifier int firstQualifier = typeNamePattern.indexOf('.'); int secondQualifier = typeNamePattern.lastIndexOf('.'); if (firstQualifier != -1) { // if one of them is -1 they both will be if (firstQualifier != secondQualifier) { // we have a catalog.schema.typename, ignore catalog schemaPattern = typeNamePattern.substring(firstQualifier + 1, secondQualifier); } else { // we just have a schema.typename schemaPattern = typeNamePattern.substring(0, firstQualifier); } // strip out just the typeName typeNamePattern = typeNamePattern.substring(secondQualifier + 1); } toAdd.append(" and t.typname like ").append(escapeQuotes(typeNamePattern)); } toAdd.append(getCatalogFilterCondition(catalog)); // schemaPattern may have been modified above if (schemaPattern != null) { toAdd.append(" and n.nspname like ").append(escapeQuotes(schemaPattern)); } sql += toAdd.toString(); /* if (connection.getHideUnprivilegedObjects() && connection.haveMinimumServerVersion(ServerVersion.v9_2)) { sql += " AND has_type_privilege(t.oid, 'USAGE')"; } */ sql += " order by data_type, type_schem, type_name"; return createMetaDataStatement().executeQuery(sql); } @Override public Connection getConnection() throws SQLException { return connection; } protected Statement createMetaDataStatement() throws SQLException { return connection.createStatement(ResultSet.TYPE_SCROLL_INSENSITIVE, ResultSet.CONCUR_READ_ONLY); } public long getMaxLogicalLobSize() throws SQLException { return 0; } public boolean supportsRefCursors() throws SQLException { return true; } @Override public RowIdLifetime getRowIdLifetime() throws SQLException { throw com.amazon.redshift.Driver.notImplemented(this.getClass(), "getRowIdLifetime()"); } @Override public boolean supportsStoredFunctionsUsingCallSyntax() throws SQLException { return true; } @Override public boolean autoCommitFailureClosesAllResultSets() throws SQLException { return false; } @Override public ResultSet getClientInfoProperties() throws SQLException { Field[] f = new Field[4]; f[0] = new Field("NAME", Oid.VARCHAR); f[1] = new Field("MAX_LEN", Oid.INT4); f[2] = new Field("DEFAULT_VALUE", Oid.VARCHAR); f[3] = new Field("DESCRIPTION", Oid.VARCHAR); List<Tuple> v = new ArrayList<Tuple>(); /* if (connection.haveMinimumServerVersion(ServerVersion.v9_0)) */ { byte[][] tuple = new byte[4][]; tuple[0] = connection.encodeString("ApplicationName"); tuple[1] = connection.encodeString(Integer.toString(getMaxNameLength())); tuple[2] = null; // connection.encodeString(""); tuple[3] = connection .encodeString("The name of the application currently utilizing the connection."); v.add(new Tuple(tuple)); } return ((BaseStatement) createMetaDataStatement()).createDriverResultSet(f, v); } public boolean isWrapperFor(Class<?> iface) throws SQLException { return iface.isAssignableFrom(getClass()); } public <T> T unwrap(Class<T> iface) throws SQLException { if (iface.isAssignableFrom(getClass())) { return iface.cast(this); } throw new SQLException("Cannot unwrap to " + iface.getName()); } public ResultSet getFunctions(String catalog, String schemaPattern, String functionNamePattern) throws SQLException { if (RedshiftLogger.isEnable()) connection.getLogger().logFunction(true, catalog, schemaPattern, functionNamePattern); // The pg_get_function_result only exists 8.4 or later // boolean pgFuncResultExists = connection.haveMinimumServerVersion(ServerVersion.v8_4); // Use query that support pg_get_function_result to get function result, else unknown is defaulted // String funcTypeSql = DatabaseMetaData.functionResultUnknown + " "; /* if (pgFuncResultExists) { funcTypeSql = " CASE " + " WHEN (format_type(p.prorettype, null) = 'unknown') THEN " + DatabaseMetaData.functionResultUnknown + " WHEN " + " (substring(pg_get_function_result(p.oid) from 0 for 6) = 'TABLE') OR " + " (substring(pg_get_function_result(p.oid) from 0 for 6) = 'SETOF') THEN " + DatabaseMetaData.functionReturnsTable + " ELSE " + DatabaseMetaData.functionNoTable + " END "; } */ // Build query and result String sql; sql = "SELECT routine_catalog AS FUNCTION_CAT, " + " routine_schema AS FUNCTION_SCHEM, " + " routine_name AS FUNCTION_NAME," + " CAST('' AS VARCHAR(256)) AS REMARKS, " + " CASE data_type" + " WHEN 'USER-DEFINED' THEN 0" + " WHEN 'record' THEN 2" + " ELSE 1" + " END AS FUNCTION_TYPE, " + " specific_name AS SPECIFIC_NAME" + " FROM INFORMATION_SCHEMA.ROUTINES" + " WHERE routine_type LIKE 'FUNCTION' "; /* sql = "SELECT current_database() AS FUNCTION_CAT, n.nspname AS FUNCTION_SCHEM, p.proname AS FUNCTION_NAME, " + " d.description AS REMARKS, " + funcTypeSql + " AS FUNCTION_TYPE, " + " p.proname || '_' || p.prooid AS SPECIFIC_NAME " + "FROM pg_catalog.pg_proc_info p " + "INNER JOIN pg_catalog.pg_namespace n ON p.pronamespace=n.oid " + "LEFT JOIN pg_catalog.pg_description d ON p.prooid=d.objoid " + "WHERE true "; */ sql += getCatalogFilterCondition(catalog); /* if the user provides a schema then search inside the schema for it */ if (schemaPattern != null && !schemaPattern.isEmpty()) { // sql += " AND n.nspname LIKE " + escapeQuotes(schemaPattern); sql += " AND routine_schema LIKE " + escapeQuotes(schemaPattern); } /* else { // if no schema is provided then limit the search inside the search_path sql += "and pg_function_is_visible(p.prooid)"; } */ if (functionNamePattern != null && !functionNamePattern.isEmpty()) { // sql += " AND p.proname LIKE " + escapeQuotes(functionNamePattern); sql += " AND routine_name LIKE " + escapeQuotes(functionNamePattern);; } /* if (connection.getHideUnprivilegedObjects()) { sql += " AND has_function_privilege(p.prooid,'EXECUTE')"; } */ // sql += " ORDER BY FUNCTION_SCHEM, FUNCTION_NAME, p.prooid::text "; sql += " ORDER BY routine_catalog, routine_schema, routine_name "; ResultSet rs = createMetaDataStatement().executeQuery(sql); if (RedshiftLogger.isEnable()) connection.getLogger().logFunction(false, rs); return rs; } public ResultSet getFunctionColumns(String catalog, String schemaPattern, String functionNamePattern, String columnNamePattern) throws SQLException { int columns = 17; if (RedshiftLogger.isEnable()) connection.getLogger().logFunction(true, catalog, schemaPattern, functionNamePattern, columnNamePattern); /* Field[] f = new Field[columns]; List<Tuple> v = new ArrayList<Tuple>(); f[0] = new Field("FUNCTION_CAT", Oid.VARCHAR); f[1] = new Field("FUNCTION_SCHEM", Oid.VARCHAR); f[2] = new Field("FUNCTION_NAME", Oid.VARCHAR); f[3] = new Field("COLUMN_NAME", Oid.VARCHAR); f[4] = new Field("COLUMN_TYPE", Oid.INT2); f[5] = new Field("DATA_TYPE", Oid.INT2); f[6] = new Field("TYPE_NAME", Oid.VARCHAR); f[7] = new Field("PRECISION", Oid.INT2); f[8] = new Field("LENGTH", Oid.INT4); f[9] = new Field("SCALE", Oid.INT2); f[10] = new Field("RADIX", Oid.INT2); f[11] = new Field("NULLABLE", Oid.INT2); f[12] = new Field("REMARKS", Oid.VARCHAR); f[13] = new Field("CHAR_OCTET_LENGTH", Oid.INT4); f[14] = new Field("ORDINAL_POSITION", Oid.INT4); f[15] = new Field("IS_NULLABLE", Oid.VARCHAR); f[16] = new Field("SPECIFIC_NAME", Oid.VARCHAR); */ /* String sql; sql = "SELECT n.nspname,p.proname,p.prorettype,p.proargtypes, t.typtype,t.typrelid, " + " p.proargnames, p.proargmodes, p.proallargtypes, p.prooid " + " FROM pg_catalog.pg_proc_info p, pg_catalog.pg_namespace n, pg_catalog.pg_type t " + " WHERE p.pronamespace=n.oid AND p.prorettype=t.oid "; */ final String unknownColumnSize = "2147483647"; final String superMaxLength = "4194304 "; final String varbyteMaxLength = "1000000 "; final String geographyMaxLength = "1000000 "; StringBuilder functionColumnQuery = new StringBuilder(); functionColumnQuery.append( "SELECT PROCEDURE_CAT AS FUNCTION_CAT, " + " PROCEDURE_SCHEM AS FUNCTION_SCHEM, " + " PROCEDURE_NAME AS FUNCTION_NAME," + " COLUMN_NAME, " + " COLUMN_TYPE, " + " DATA_TYPE, " + " TYPE_NAME, " + " COLUMN_SIZE AS PRECISION, " + " LENGTH , " + " DECIMAL_DIGITS AS SCALE, " + " NUM_PREC_RADIX AS RADIX, " + " NULLABLE, " + " REMARKS, " + " CHAR_OCTET_LENGTH, " + " ORDINAL_POSITION, " + " IS_NULLABLE, " + " SPECIFIC_NAME " + " FROM ("); functionColumnQuery.append("SELECT current_database() AS PROCEDURE_CAT, " + " n.nspname as PROCEDURE_SCHEM, " + " p.proname AS PROCEDURE_NAME, " + " CAST(CASE ((array_upper(proargnames, 0) - array_lower(proargnames, 0)) > 0) " + " WHEN 't' THEN proargnames[array_upper(proargnames, 1)] " + " ELSE '' " + " END AS VARCHAR(256)) AS COLUMN_NAME, " + " CAST(CASE p.proretset " + " WHEN 't' THEN 3 " + " ELSE 5 " + " END AS SMALLINT) AS COLUMN_TYPE, " + " CAST(CASE pg_catalog.format_type(p.prorettype, NULL) " + " WHEN 'text' THEN 12 " + " WHEN 'bit' THEN -7 " + " WHEN 'bool' THEN -7 " + " WHEN 'boolean' THEN -7 " + " WHEN 'varchar' THEN 12 " + " WHEN 'character varying' THEN 12 " + " WHEN '\"char\"' THEN 1" + " WHEN 'char' THEN 1 " + " WHEN 'character' THEN 1 " + " WHEN 'nchar' THEN 1 " + " WHEN 'bpchar' THEN 1 " + " WHEN 'nvarchar' THEN 12 " + " WHEN 'date' THEN 91 " + " WHEN 'timestamp' THEN 93 " + " WHEN 'timestamp without time zone' THEN 93 " + " WHEN 'timestamptz' THEN 2014 " + " WHEN 'timestamp with time zone' THEN 2014 " + " WHEN 'smallint' THEN 5 " + " WHEN 'int2' THEN 5 " + " WHEN 'integer' THEN 4 " + " WHEN 'int' THEN 4 " + " WHEN 'int4' THEN 4 " + " WHEN 'bigint' THEN -5 " + " WHEN 'int8' THEN -5 " + " WHEN 'real' THEN 7 " + " WHEN 'float4' THEN 7 " + " WHEN 'double precision' THEN 6 " + " WHEN 'float8' THEN 6 " + " WHEN 'float' THEN 6 " + " WHEN 'decimal' THEN 3 " + " WHEN 'numeric' THEN 2 " + " WHEN '_float4' THEN 2003 " + " WHEN '_aclitem' THEN 2003 " + " WHEN '_text' THEN 2003 " + " WHEN 'bytea' THEN -2 " + " WHEN 'oid' THEN -5 " + " WHEN 'name' THEN 12 " + " WHEN '_int4' THEN 2003 " + " WHEN '_int2' THEN 2003 " + " WHEN 'ARRAY' THEN 2003 " + " WHEN 'geometry' THEN -4 " + " WHEN 'super' THEN -1 " + " WHEN 'varbyte' THEN -4 " + " WHEN 'geography' THEN -4 " + " WHEN 'intervaly2m' THEN 1111 " + " WHEN 'intervald2s' THEN 1111 " + " ELSE 1111 " + " END AS SMALLINT) AS DATA_TYPE, " + " pg_catalog.format_type(p.prorettype, NULL) AS TYPE_NAME, " + " CASE pg_catalog.format_type(p.prorettype, NULL) " + " WHEN 'text' THEN NULL " + " WHEN 'varchar' THEN NULL " + " WHEN 'character varying' THEN NULL " + " WHEN '\"char\"' THEN NULL " + " WHEN 'character' THEN NULL " + " WHEN 'nchar' THEN NULL " + " WHEN 'bpchar' THEN NULL " + " WHEN 'nvarchar' THEN NULL " + " WHEN 'text' THEN NULL " + " WHEN 'date' THEN NULL " + " WHEN 'timestamp' THEN 6 " + " WHEN 'smallint' THEN 5 " + " WHEN 'int2' THEN 5 " + " WHEN 'integer' THEN 10 " + " WHEN 'int' THEN 10 " + " WHEN 'int4' THEN 10 " + " WHEN 'bigint' THEN 19 " + " WHEN 'int8' THEN 19 " + " WHEN 'decimal' THEN 38 " + " WHEN 'real' THEN 24 " + " WHEN 'float4' THEN 53 " + " WHEN 'double precision' THEN 53 " + " WHEN 'float8' THEN 53 " + " WHEN 'float' THEN 53 " + " WHEN 'geometry' THEN NULL " + " WHEN 'super' THEN " + superMaxLength + " WHEN 'varbyte' THEN " + varbyteMaxLength + " WHEN 'geography' THEN " + geographyMaxLength + " WHEN 'intervaly2m' THEN 32 " + " WHEN 'intervald2s' THEN 64 " + " ELSE " + unknownColumnSize + " END AS COLUMN_SIZE, " + " CASE pg_catalog.format_type(p.prorettype, NULL) " + " WHEN 'text' THEN NULL " + " WHEN 'varchar' THEN NULL " + " WHEN 'character varying' THEN NULL " + " WHEN '\"char\"' THEN NULL " + " WHEN 'character' THEN NULL " + " WHEN 'nchar' THEN NULL " + " WHEN 'bpchar' THEN NULL " + " WHEN 'nvarchar' THEN NULL " + " WHEN 'date' THEN 6 " + " WHEN 'timestamp' THEN 6 " + " WHEN 'smallint' THEN 2 " + " WHEN 'int2' THEN 2 " + " WHEN 'integer' THEN 4 " + " WHEN 'int' THEN 4 " + " WHEN 'int4' THEN 4 " + " WHEN 'bigint' THEN 20 " + " WHEN 'int8' THEN 20 " + " WHEN 'decimal' THEN 8 " + " WHEN 'real' THEN 4 " + " WHEN 'float4' THEN 8 " + " WHEN 'double precision' THEN 8 " + " WHEN 'float8' THEN 8 " + " WHEN 'float' THEN 8 " + " WHEN 'geometry' THEN NULL " + " WHEN 'super' THEN " + superMaxLength + " WHEN 'varbyte' THEN " + varbyteMaxLength + " WHEN 'geography' THEN " + geographyMaxLength + " WHEN 'intervaly2m' THEN 4 " + " WHEN 'intervald2s' THEN 8 " + " END AS LENGTH, " + " CAST(CASE pg_catalog.format_type(p.prorettype, NULL) " + " WHEN 'smallint' THEN 0 " + " WHEN 'int2' THEN 0 " + " WHEN 'integer' THEN 0 " + " WHEN 'int' THEN 0 " + " WHEN 'int4' THEN 0 " + " WHEN 'bigint' THEN 0 " + " WHEN 'int8' THEN 0 " + " WHEN 'decimal' THEN 0 " + " WHEN 'real' THEN 8 " + " WHEN 'float4' THEN 8 " + " WHEN 'double precision' THEN 17 " + " WHEN 'float' THEN 17 " + " WHEN 'float8' THEN 17 " + " WHEN 'numeric' THEN 0 " + " WHEN 'timestamp' THEN 6 " + " WHEN 'timestamp without time zone' THEN 6 " + " WHEN 'timestamptz' THEN 6 " + " WHEN 'timestamp with time zone' THEN 6 " + " WHEN 'intervaly2m' THEN 0 " + " WHEN 'intervald2s' THEN 6 " + " ELSE NULL END AS SMALLINT) AS DECIMAL_DIGITS, " + " 10 AS NUM_PREC_RADIX, " + " CAST(2 AS SMALLINT) AS NULLABLE, " + " CAST('' AS VARCHAR(256)) AS REMARKS, " + " CAST(NULL AS SMALLINT) AS CHAR_OCTET_LENGTH, " + " CAST(0 AS SMALLINT) AS ORDINAL_POSITION, " + " CAST('' AS VARCHAR(256)) AS IS_NULLABLE, " + " p.proname || '_' || p.prooid AS SPECIFIC_NAME, " + " p.prooid as PROOID, " + " -1 AS PROARGINDEX " + " FROM pg_catalog.pg_proc_info p LEFT JOIN pg_namespace n ON n.oid = p.pronamespace " + " WHERE pg_catalog.format_type(p.prorettype, NULL) != 'void' " + " AND prokind = 'f' "); functionColumnQuery.append(getCatalogFilterCondition(catalog)); if (schemaPattern != null && !schemaPattern.isEmpty()) { // sql += " AND n.nspname LIKE " + escapeQuotes(schemaPattern); functionColumnQuery.append(" AND n.nspname LIKE " + escapeQuotes(schemaPattern)); } if (functionNamePattern != null && !functionNamePattern.isEmpty()) { // sql += " AND p.proname LIKE " + escapeQuotes(functionNamePattern); functionColumnQuery.append(" AND p.proname LIKE " + escapeQuotes(functionNamePattern)); } if (columnNamePattern != null && !columnNamePattern.isEmpty()) { functionColumnQuery.append(" AND COLUMN_NAME LIKE " + escapeQuotes(columnNamePattern)); } functionColumnQuery.append(" UNION ALL "); functionColumnQuery.append(" SELECT DISTINCT current_database() AS PROCEDURE_CAT, " + " PROCEDURE_SCHEM, " + " PROCEDURE_NAME, " + "CAST(CASE (char_length(COLUMN_NAME) > 0) WHEN 't' THEN COLUMN_NAME " + "ELSE '' " + "END AS VARCHAR(256)) AS COLUMN_NAME, " + " CAST( CASE COLUMN_TYPE " + " WHEN 105 THEN 1 " + " WHEN 98 THEN 2 " + " WHEN 111 THEN 4 " + " ELSE 5 END AS SMALLINT) AS COLUMN_TYPE, " + " CAST(CASE DATA_TYPE " + " WHEN 'text' THEN 12 " + " WHEN 'bit' THEN -7 " + " WHEN 'bool' THEN -7 " + " WHEN 'boolean' THEN -7 " + " WHEN 'varchar' THEN 12 " + " WHEN 'character varying' THEN 12 " + " WHEN '\"char\"' THEN 1 " + " WHEN 'char' THEN 1 " + " WHEN 'character' THEN 1 " + " WHEN 'nchar' THEN 1 " + " WHEN 'bpchar' THEN 1 " + " WHEN 'nvarchar' THEN 12 " + " WHEN 'date' THEN 91 " + " WHEN 'timestamp' THEN 93 " + " WHEN 'timestamp without time zone' THEN 93 " + " WHEN 'timestamptz' THEN 2014 " + " WHEN 'timestamp with time zone' THEN 2014 " + " WHEN 'smallint' THEN 5 " + " WHEN 'int2' THEN 5 " + " WHEN 'integer' THEN 4 " + " WHEN 'int' THEN 4 " + " WHEN 'int4' THEN 4 " + " WHEN 'bigint' THEN -5 " + " WHEN 'int8' THEN -5 " + " WHEN 'real' THEN 7 " + " WHEN 'float4' THEN 7 " + " WHEN 'double precision' THEN 6 " + " WHEN 'float8' THEN 6 " + " WHEN 'float' THEN 6 " + " WHEN 'decimal' THEN 3 " + " WHEN 'numeric' THEN 2 " + " WHEN 'bytea' THEN -2 " + " WHEN 'oid' THEN -5 " + " WHEN 'name' THEN 12 " + " WHEN 'ARRAY' THEN 2003 " + " WHEN 'geometry' THEN -4 " + " WHEN 'super' THEN -1 " + " WHEN 'varbyte' THEN -4 " + " WHEN 'geography' THEN -4 " + " WHEN 'intervaly2m' THEN 1111 " + " WHEN 'intervald2s' THEN 1111 " + " ELSE 1111 " + " END AS SMALLINT) AS DATA_TYPE, " + " TYPE_NAME, " + " CASE COLUMN_SIZE " + " WHEN 'text' THEN NULL " + " WHEN 'varchar' THEN NULL " + " WHEN 'character varying' THEN NULL " + " WHEN '\"char\"' THEN NULL " + " WHEN 'character' THEN NULL " + " WHEN 'nchar' THEN NULL " + " WHEN 'bpchar' THEN NULL " + " WHEN 'nvarchar' THEN NULL " + " WHEN 'text' THEN NULL " + " WHEN 'date' THEN NULL " + " WHEN 'timestamp' THEN 6 " + " WHEN 'smallint' THEN 5 " + " WHEN 'int2' THEN 5 " + " WHEN 'integer' THEN 10 " + " WHEN 'int' THEN 10 " + " WHEN 'int4' THEN 10 " + " WHEN 'bigint' THEN 19 " + " WHEN 'int8' THEN 19 " + " WHEN 'decimal' THEN 38 " + " WHEN 'real' THEN 24 " + " WHEN 'float4' THEN 53 " + " WHEN 'double precision' THEN 53 " + " WHEN 'float8' THEN 53 " + " WHEN 'float' THEN 53 " + " WHEN 'geometry' THEN NULL " + " WHEN 'super' THEN " + superMaxLength + " WHEN 'varbyte' THEN " + varbyteMaxLength + " WHEN 'geography' THEN " + geographyMaxLength + " WHEN 'intervaly2m' THEN NULL " + " WHEN 'intervald2s' THEN NULL " + " ELSE " + unknownColumnSize + " END AS COLUMN_SIZE, " + " CASE LENGTH " + " WHEN 'text' THEN NULL " + " WHEN 'varchar' THEN NULL " + " WHEN 'character varying' THEN NULL " + " WHEN '\"char\"' THEN NULL " + " WHEN 'character' THEN NULL " + " WHEN 'nchar' THEN NULL " + " WHEN 'bpchar' THEN NULL " + " WHEN 'nvarchar' THEN NULL " + " WHEN 'date' THEN 6 " + " WHEN 'timestamp' THEN 6 " + " WHEN 'smallint' THEN 2 " + " WHEN 'int2' THEN 2 " + " WHEN 'integer' THEN 4 " + " WHEN 'int' THEN 4 " + " WHEN 'int4' THEN 4 " + " WHEN 'bigint' THEN 20 " + " WHEN 'int8' THEN 20 " + " WHEN 'decimal' THEN 8 " + " WHEN 'real' THEN 4 " + " WHEN 'float4' THEN 8 " + " WHEN 'double precision' THEN 8 " + " WHEN 'float8' THEN 8 " + " WHEN 'float' THEN 8 " + " WHEN 'geometry' THEN NULL " + " WHEN 'super' THEN " + superMaxLength + " WHEN 'varbyte' THEN " + varbyteMaxLength + " WHEN 'geography' THEN " + geographyMaxLength + " WHEN 'intervaly2m' THEN 4 " + " WHEN 'intervald2s' THEN 8 " + " END AS LENGTH, " + " CAST(CASE DECIMAL_DIGITS " + " WHEN 'smallint' THEN 0 " + " WHEN 'int2' THEN 0 " + " WHEN 'integer' THEN 0 " + " WHEN 'int' THEN 0 " + " WHEN 'int4' THEN 0 " + " WHEN 'bigint' THEN 0 " + " WHEN 'int8' THEN 0 " + " WHEN 'decimal' THEN 0 " + " WHEN 'real' THEN 8 " + " WHEN 'float4' THEN 8 " + " WHEN 'double precision' THEN 17 " + " WHEN 'float' THEN 17 " + " WHEN 'float8' THEN 17 " + " WHEN 'numeric' THEN 0 " + " WHEN 'timestamp' THEN 6 " + " WHEN 'timestamp without time zone' THEN 6 " + " WHEN 'timestamptz' THEN 6 " + " WHEN 'timestamp with time zone' THEN 6 " + " WHEN 'intervaly2m' THEN 0 " + " WHEN 'intervald2s' THEN 6 " + " ELSE NULL END AS SMALLINT) AS DECIMAL_DIGITS, " + " 10 AS NUM_PREC_RADIX, " + " CAST(2 AS SMALLINT) AS NULLABLE, " + " CAST(''AS VARCHAR(256)) AS REMARKS, " + " CAST(NULL AS SMALLINT) AS CHAR_OCTET_LENGTH, " + " PROARGINDEX AS ORDINAL_POSITION, " + " CAST(''AS VARCHAR(256)) AS IS_NULLABLE, " + " SPECIFIC_NAME, PROOID, PROARGINDEX " + " FROM ( " + " SELECT current_database() AS PROCEDURE_CAT," + " n.nspname AS PROCEDURE_SCHEM, " + " proname AS PROCEDURE_NAME, " + " CASE WHEN (proallargtypes is NULL) THEN proargnames[pos+1] " + " ELSE proargnames[pos] END AS COLUMN_NAME," + " CASE WHEN proargmodes is NULL THEN 105 " + " ELSE CAST(proargmodes[pos] AS INT) END AS COLUMN_TYPE, " + " CASE WHEN proallargtypes is NULL THEN pg_catalog.format_type(proargtypes[pos], NULL)" + " ELSE pg_catalog.format_type(proallargtypes[pos], NULL) END AS DATA_TYPE," + " CASE WHEN proallargtypes is NULL THEN pg_catalog.format_type(proargtypes[pos], NULL) " + " ELSE pg_catalog.format_type(proallargtypes[pos], NULL) END AS TYPE_NAME," + " CASE WHEN proallargtypes is NULL THEN pg_catalog.format_type(proargtypes[pos], NULL)" + " ELSE pg_catalog.format_type(proallargtypes[pos], NULL) END AS COLUMN_SIZE," + " CASE WHEN proallargtypes is NULL THEN pg_catalog.format_type(proargtypes[pos], NULL)" + " ELSE pg_catalog.format_type(proallargtypes[pos], NULL) END AS LENGTH," + " CASE WHEN proallargtypes is NULL THEN pg_catalog.format_type(proargtypes[pos], NULL)" + " ELSE pg_catalog.format_type(proallargtypes[pos], NULL) END AS DECIMAL_DIGITS," + " CASE WHEN proallargtypes is NULL THEN pg_catalog.format_type(proargtypes[pos], NULL)" + " ELSE pg_catalog.format_type(proallargtypes[pos], NULL) END AS RADIX," + " CAST(2 AS SMALLINT) AS NULLABLE," + " CAST(''AS VARCHAR(256)) AS REMARKS," + " pg_catalog.format_type(proargtypes[pos], NULL) AS CHAR_OCTET_LENGTH," + " CASE WHEN (proallargtypes is NULL) THEN pos+1" + " WHEN pos = array_upper(proallargtypes, 1) THEN 0" + " ELSE pos END AS ORDINAL_POSITION," + " CAST('' AS VARCHAR(256)) AS IS_NULLABLE," + " p.prooid AS PROOID," + " CASE WHEN (proallargtypes is NULL) THEN pos+1" + " WHEN prokind = 'f' AND pos = array_upper(proallargtypes, 1) THEN 0" + " ELSE pos END AS PROARGINDEX, " + " p.proname || '_' || p.prooid AS SPECIFIC_NAME " + " FROM (pg_catalog.pg_proc_info p LEFT JOIN pg_namespace n" + " ON n.oid = p.pronamespace)" + " LEFT JOIN (SELECT " + " CASE WHEN (proallargtypes IS NULL) " + " THEN generate_series(array_lower(proargnames, 1), array_upper(proargnames, 1))-1" + " ELSE generate_series(array_lower(proargnames, 1), array_upper(proargnames, 1)+1)-1 " + " END AS pos" + " FROM pg_catalog.pg_proc_info p ) AS s ON (pos >= 0 AND pos <= pronargs+1)" + " WHERE prokind = 'f' "); if (schemaPattern != null && !schemaPattern.isEmpty()) { functionColumnQuery.append(" AND n.nspname LIKE " + escapeQuotes(schemaPattern)); } if (functionNamePattern != null && !functionNamePattern.isEmpty()) { functionColumnQuery.append(" AND p.proname LIKE " + escapeQuotes(functionNamePattern)); } if (columnNamePattern != null && !columnNamePattern.isEmpty()) { functionColumnQuery.append(" AND COLUMN_NAME LIKE " + escapeQuotes(columnNamePattern)); } functionColumnQuery.append(" ) AS INPUT_PARAM_TABLE" + " WHERE ORDINAL_POSITION IS NOT NULL" + " ) AS RESULT_SET WHERE (DATA_TYPE != 1111 OR (TYPE_NAME IS NOT NULL AND TYPE_NAME != '-'))" + " ORDER BY PROCEDURE_CAT ,PROCEDURE_SCHEM," + " PROCEDURE_NAME, PROOID, PROARGINDEX, COLUMN_TYPE DESC"); // sql += " ORDER BY n.nspname, p.proname, p.prooid::text "; // byte[] isnullableUnknown = new byte[0]; // Statement stmt = connection.createStatement(); // ResultSet rs = stmt.executeQuery(functionColumnQuery.toString()); // sql /* while (rs.next()) { byte[] schema = rs.getBytes("nspname"); byte[] functionName = rs.getBytes("proname"); byte[] specificName = connection.encodeString(rs.getString("proname") + "_" + rs.getString("prooid")); int returnType = (int) rs.getLong("prorettype"); String returnTypeType = rs.getString("typtype"); int returnTypeRelid = (int) rs.getLong("typrelid"); String strArgTypes = rs.getString("proargtypes"); StringTokenizer st = new StringTokenizer(strArgTypes); List<Long> argTypes = new ArrayList<Long>(); while (st.hasMoreTokens()) { argTypes.add(Long.valueOf(st.nextToken())); } String[] argNames = null; Array argNamesArray = rs.getArray("proargnames"); if (argNamesArray != null) { argNames = (String[]) argNamesArray.getArray(); } String[] argModes = null; Array argModesArray = rs.getArray("proargmodes"); if (argModesArray != null) { argModes = (String[]) argModesArray.getArray(); } int numArgs = argTypes.size(); Long[] allArgTypes = null; Array allArgTypesArray = rs.getArray("proallargtypes"); if (allArgTypesArray != null) { allArgTypes = (Long[]) allArgTypesArray.getArray(); numArgs = allArgTypes.length; } // decide if we are returning a single column result. if (returnTypeType.equals("b") || returnTypeType.equals("d") || returnTypeType.equals("e") || (returnTypeType.equals("p") && argModesArray == null)) { byte[][] tuple = new byte[columns][]; tuple[0] = connection.encodeString(connection.getCatalog()); tuple[1] = schema; tuple[2] = functionName; tuple[3] = connection.encodeString("returnValue"); tuple[4] = connection .encodeString(Integer.toString(java.sql.DatabaseMetaData.functionReturn)); tuple[5] = connection .encodeString(Integer.toString(connection.getTypeInfo().getSQLType(returnType))); tuple[6] = connection.encodeString(connection.getTypeInfo().getRSType(returnType)); tuple[7] = null; tuple[8] = null; tuple[9] = null; tuple[10] = null; tuple[11] = connection .encodeString(Integer.toString(java.sql.DatabaseMetaData.functionNullableUnknown)); tuple[12] = null; tuple[14] = connection.encodeString(Integer.toString(0)); tuple[15] = isnullableUnknown; tuple[16] = specificName; v.add(new Tuple(tuple)); } // Add a row for each argument. for (int i = 0; i < numArgs; i++) { byte[][] tuple = new byte[columns][]; tuple[0] = connection.encodeString(connection.getCatalog()); tuple[1] = schema; tuple[2] = functionName; if (argNames != null) { tuple[3] = connection.encodeString(argNames[i]); } else { tuple[3] = connection.encodeString("$" + (i + 1)); } int columnMode = DatabaseMetaData.functionColumnIn; if (argModes != null && argModes[i] != null) { if (argModes[i].equals("o")) { columnMode = DatabaseMetaData.functionColumnOut; } else if (argModes[i].equals("b")) { columnMode = DatabaseMetaData.functionColumnInOut; } else if (argModes[i].equals("t")) { columnMode = DatabaseMetaData.functionReturn; } } tuple[4] = connection.encodeString(Integer.toString(columnMode)); int argOid; if (allArgTypes != null) { argOid = allArgTypes[i].intValue(); } else { argOid = argTypes.get(i).intValue(); } tuple[5] = connection.encodeString(Integer.toString(connection.getTypeInfo().getSQLType(argOid))); tuple[6] = connection.encodeString(connection.getTypeInfo().getRSType(argOid)); tuple[7] = null; tuple[8] = null; tuple[9] = null; tuple[10] = null; tuple[11] = connection.encodeString(Integer.toString(DatabaseMetaData.functionNullableUnknown)); tuple[12] = null; tuple[14] = connection.encodeString(Integer.toString(i + 1)); tuple[15] = isnullableUnknown; tuple[16] = specificName; v.add(new Tuple(tuple)); } // if we are returning a multi-column result. if (returnTypeType.equals("c") || (returnTypeType.equals("p") && argModesArray != null)) { String columnsql = "SELECT a.attname,a.atttypid FROM pg_catalog.pg_attribute a " + " WHERE a.attrelid = " + returnTypeRelid + " AND NOT a.attisdropped AND a.attnum > 0 ORDER BY a.attnum "; Statement columnstmt = connection.createStatement(); ResultSet columnrs = columnstmt.executeQuery(columnsql); while (columnrs.next()) { int columnTypeOid = (int) columnrs.getLong("atttypid"); byte[][] tuple = new byte[columns][]; tuple[0] = connection.encodeString(connection.getCatalog()); tuple[1] = schema; tuple[2] = functionName; tuple[3] = columnrs.getBytes("attname"); tuple[4] = connection .encodeString(Integer.toString(java.sql.DatabaseMetaData.functionColumnResult)); tuple[5] = connection .encodeString(Integer.toString(connection.getTypeInfo().getSQLType(columnTypeOid))); tuple[6] = connection.encodeString(connection.getTypeInfo().getRSType(columnTypeOid)); tuple[7] = null; tuple[8] = null; tuple[9] = null; tuple[10] = null; tuple[11] = connection .encodeString(Integer.toString(java.sql.DatabaseMetaData.functionNullableUnknown)); tuple[12] = null; tuple[14] = connection.encodeString(Integer.toString(0)); tuple[15] = isnullableUnknown; tuple[16] = specificName; v.add(new Tuple(tuple)); } columnrs.close(); columnstmt.close(); } } */ // rs.close(); // stmt.close(); // ResultSet rc = ((BaseStatement) createMetaDataStatement()).createDriverResultSet(f, v); ResultSet rc = createMetaDataStatement().executeQuery(functionColumnQuery.toString()); if (RedshiftLogger.isEnable()) connection.getLogger().logFunction(false, rc); return rc; } public ResultSet getPseudoColumns(String catalog, String schemaPattern, String tableNamePattern, String columnNamePattern) throws SQLException { throw com.amazon.redshift.Driver.notImplemented(this.getClass(), "getPseudoColumns(String, String, String, String)"); } public boolean generatedKeyAlwaysReturned() throws SQLException { return true; } public boolean supportsSavepoints() throws SQLException { return true; } public boolean supportsNamedParameters() throws SQLException { return false; } public boolean supportsMultipleOpenResults() throws SQLException { return false; } public boolean supportsGetGeneratedKeys() throws SQLException { // We don't support returning generated keys by column index, // but that should be a rarer case than the ones we do support. // return true; } public ResultSet getSuperTypes(String catalog, String schemaPattern, String typeNamePattern) throws SQLException { throw com.amazon.redshift.Driver.notImplemented(this.getClass(), "getSuperTypes(String,String,String)"); } public ResultSet getSuperTables(String catalog, String schemaPattern, String tableNamePattern) throws SQLException { throw com.amazon.redshift.Driver.notImplemented(this.getClass(), "getSuperTables(String,String,String,String)"); } public ResultSet getAttributes(String catalog, String schemaPattern, String typeNamePattern, String attributeNamePattern) throws SQLException { throw com.amazon.redshift.Driver.notImplemented(this.getClass(), "getAttributes(String,String,String,String)"); } public boolean supportsResultSetHoldability(int holdability) throws SQLException { return true; } public int getResultSetHoldability() throws SQLException { return ResultSet.HOLD_CURSORS_OVER_COMMIT; } @Override public int getDatabaseMajorVersion() throws SQLException { return connection.getServerMajorVersion(); } @Override public int getDatabaseMinorVersion() throws SQLException { return connection.getServerMinorVersion(); } @Override public int getJDBCMajorVersion() { return com.amazon.redshift.util.DriverInfo.JDBC_MAJOR_VERSION; } @Override public int getJDBCMinorVersion() { return com.amazon.redshift.util.DriverInfo.JDBC_MINOR_VERSION; } public int getSQLStateType() throws SQLException { return sqlStateSQL; } public boolean locatorsUpdateCopy() throws SQLException { /* * Currently LOB's aren't updateable at all, so it doesn't matter what we return. We don't throw * the notImplemented Exception because the 1.5 JDK's CachedRowSet calls this method regardless * of whether large objects are used. */ return true; } public boolean supportsStatementPooling() throws SQLException { return false; } /** * Helper method to determine if there is a possible external schema pattern match. * * @throws SQLException If an error occurs. */ private int getExtSchemaPatternMatch(String schemaPattern) throws SQLException { if (null != schemaPattern && !schemaPattern.equals("")) { if (isSingleDatabaseMetaData()) { String sql = "select 1 from svv_external_schemas where schemaname like " + escapeQuotes(schemaPattern); Statement stmt = null; ResultSet rs = null; try { stmt = connection.createStatement(); rs = stmt.executeQuery(sql); if (rs.next()) { return EXTERNAL_SCHEMA_QUERY; // Optimized query } else return LOCAL_SCHEMA_QUERY; // Only local schema } finally { if (rs != null) rs.close(); if (stmt != null) stmt.close(); } } else { // Datashare or cross-db support always go through // svv_all* view. return NO_SCHEMA_UNIVERSAL_QUERY; // Query both external and local schema } } else { // If the schema filter is null or empty, treat it as if there was a // matching schema found. return NO_SCHEMA_UNIVERSAL_QUERY; // Query both external and local schema } } private boolean isSingleDatabaseMetaData() { return (isDatabaseMetadataCurrentDbOnly() || !isMultiDatabasesCatalogEnableInServer()); } private boolean isDatabaseMetadataCurrentDbOnly() { return connection.isDatabaseMetadataCurrentDbOnly(); } private boolean isMultiDatabasesCatalogEnableInServer() { return connection.getQueryExecutor().isDatashareEnabled(); } private String getCatalogFilterCondition(String catalog) throws SQLException { return getCatalogFilterCondition(catalog, true, null); } private String getCatalogFilterCondition(String catalog, boolean apiSupportedOnlyForConnectedDatabase, String databaseColName) throws SQLException { String catalogFilter = ""; if (catalog != null && !catalog.isEmpty()) { if (isSingleDatabaseMetaData() || apiSupportedOnlyForConnectedDatabase) { // Catalog parameter is not a pattern. catalogFilter = " AND current_database() = " + escapeOnlyQuotes(catalog); } else { if (databaseColName == null) databaseColName = "database_name"; catalogFilter = " AND " + databaseColName + " = " + escapeOnlyQuotes(catalog); } } return catalogFilter; } }
8,532
0
Create_ds/amazon-redshift-jdbc-driver/src/main/java/com/amazon/redshift
Create_ds/amazon-redshift-jdbc-driver/src/main/java/com/amazon/redshift/jdbc/RedshiftSQLXML.java
/* * Copyright (c) 2004, PostgreSQL Global Development Group * See the LICENSE file in the project root for more information. */ package com.amazon.redshift.jdbc; import com.amazon.redshift.core.BaseConnection; import com.amazon.redshift.util.GT; import com.amazon.redshift.util.RedshiftException; import com.amazon.redshift.util.RedshiftState; import org.xml.sax.ErrorHandler; import org.xml.sax.InputSource; import org.xml.sax.SAXParseException; import java.io.ByteArrayInputStream; import java.io.ByteArrayOutputStream; import java.io.IOException; import java.io.InputStream; import java.io.OutputStream; import java.io.Reader; import java.io.StringReader; import java.io.StringWriter; import java.io.Writer; import java.sql.SQLException; import java.sql.SQLXML; import javax.xml.XMLConstants; import javax.xml.parsers.DocumentBuilder; import javax.xml.parsers.DocumentBuilderFactory; import javax.xml.stream.XMLInputFactory; import javax.xml.stream.XMLOutputFactory; import javax.xml.stream.XMLStreamException; import javax.xml.stream.XMLStreamReader; import javax.xml.stream.XMLStreamWriter; import javax.xml.transform.Result; import javax.xml.transform.Source; import javax.xml.transform.Transformer; import javax.xml.transform.TransformerException; import javax.xml.transform.TransformerFactory; import javax.xml.transform.dom.DOMResult; import javax.xml.transform.dom.DOMSource; import javax.xml.transform.sax.SAXResult; import javax.xml.transform.sax.SAXSource; import javax.xml.transform.sax.SAXTransformerFactory; import javax.xml.transform.sax.TransformerHandler; import javax.xml.transform.stax.StAXResult; import javax.xml.transform.stax.StAXSource; import javax.xml.transform.stream.StreamResult; import javax.xml.transform.stream.StreamSource; public class RedshiftSQLXML implements SQLXML { private final BaseConnection conn; private String data; // The actual data contained. private boolean initialized; // Has someone assigned the data for this object? private boolean active; // Is anyone in the process of loading data into us? private boolean freed; private ByteArrayOutputStream byteArrayOutputStream; private StringWriter stringWriter; private DOMResult domResult; public RedshiftSQLXML(BaseConnection conn) { this(conn, null, false); } public RedshiftSQLXML(BaseConnection conn, String data) { this(conn, data, true); } private RedshiftSQLXML(BaseConnection conn, String data, boolean initialized) { this.conn = conn; this.data = data; this.initialized = initialized; this.active = false; this.freed = false; } @Override public synchronized void free() { freed = true; data = null; } @Override public synchronized InputStream getBinaryStream() throws SQLException { checkFreed(); ensureInitialized(); if (data == null) { return null; } try { return new ByteArrayInputStream(conn.getEncoding().encode(data)); } catch (IOException ioe) { // This should be a can't happen exception. We just // decoded this data, so it would be surprising that // we couldn't encode it. // For this reason don't make it translatable. throw new RedshiftException("Failed to re-encode xml data.", RedshiftState.DATA_ERROR, ioe); } } @Override public synchronized Reader getCharacterStream() throws SQLException { checkFreed(); ensureInitialized(); if (data == null) { return null; } return new StringReader(data); } // We must implement this unsafely because that's what the // interface requires. Because it says we're returning T // which is unknown, none of the return values can satisfy it // as Java isn't going to understand the if statements that // ensure they are the same. // @Override public synchronized <T extends Source> T getSource(Class<T> sourceClass) throws SQLException { checkFreed(); ensureInitialized(); if (data == null) { return null; } try { if (sourceClass == null || DOMSource.class.equals(sourceClass)) { DocumentBuilderFactory factory = DocumentBuilderFactory.newInstance(); // https://www.aristotle.a2z.com/implementations/255 factory.setFeature("http://apache.org/xml/features/disallow-doctype-decl", true); factory.setXIncludeAware(false); factory.setExpandEntityReferences(false); factory.setFeature("http://xml.org/sax/features/external-parameter-entities", false); factory.setFeature("http://xml.org/sax/features/external-general-entities", false); DocumentBuilder builder = factory.newDocumentBuilder(); builder.setErrorHandler(new NonPrintingErrorHandler()); InputSource input = new InputSource(new StringReader(data)); return (T) new DOMSource(builder.parse(input)); } else if (SAXSource.class.equals(sourceClass)) { InputSource is = new InputSource(new StringReader(data)); return (T) new SAXSource(is); } else if (StreamSource.class.equals(sourceClass)) { return (T) new StreamSource(new StringReader(data)); } else if (StAXSource.class.equals(sourceClass)) { XMLInputFactory xif = XMLInputFactory.newInstance(); xif.setProperty(XMLInputFactory.SUPPORT_DTD, false); xif.setProperty("javax.xml.stream.isSupportingExternalEntities", false); XMLStreamReader xsr = xif.createXMLStreamReader(new StringReader(data)); return (T) new StAXSource(xsr); } } catch (Exception e) { throw new RedshiftException(GT.tr("Unable to decode xml data."), RedshiftState.DATA_ERROR, e); } throw new RedshiftException(GT.tr("Unknown XML Source class: {0}", sourceClass), RedshiftState.INVALID_PARAMETER_TYPE); } @Override public synchronized String getString() throws SQLException { checkFreed(); ensureInitialized(); return data; } @Override public synchronized OutputStream setBinaryStream() throws SQLException { checkFreed(); initialize(); active = true; byteArrayOutputStream = new ByteArrayOutputStream(); return byteArrayOutputStream; } @Override public synchronized Writer setCharacterStream() throws SQLException { checkFreed(); initialize(); active = true; stringWriter = new StringWriter(); return stringWriter; } @Override public synchronized <T extends Result> T setResult(Class<T> resultClass) throws SQLException { checkFreed(); initialize(); if (resultClass == null || DOMResult.class.equals(resultClass)) { domResult = new DOMResult(); active = true; return (T) domResult; } else if (SAXResult.class.equals(resultClass)) { try { SAXTransformerFactory transformerFactory = (SAXTransformerFactory) SAXTransformerFactory.newInstance(); // https://www.aristotle.a2z.com/implementations/255 transformerFactory.setAttribute(XMLConstants.ACCESS_EXTERNAL_DTD, ""); transformerFactory.setAttribute(XMLConstants.ACCESS_EXTERNAL_STYLESHEET, ""); TransformerHandler transformerHandler = transformerFactory.newTransformerHandler(); stringWriter = new StringWriter(); transformerHandler.setResult(new StreamResult(stringWriter)); active = true; return (T) new SAXResult(transformerHandler); } catch (TransformerException te) { throw new RedshiftException(GT.tr("Unable to create SAXResult for SQLXML."), RedshiftState.UNEXPECTED_ERROR, te); } } else if (StreamResult.class.equals(resultClass)) { stringWriter = new StringWriter(); active = true; return (T) new StreamResult(stringWriter); } else if (StAXResult.class.equals(resultClass)) { stringWriter = new StringWriter(); try { XMLOutputFactory xof = XMLOutputFactory.newInstance(); XMLStreamWriter xsw = xof.createXMLStreamWriter(stringWriter); active = true; return (T) new StAXResult(xsw); } catch (XMLStreamException xse) { throw new RedshiftException(GT.tr("Unable to create StAXResult for SQLXML"), RedshiftState.UNEXPECTED_ERROR, xse); } } throw new RedshiftException(GT.tr("Unknown XML Result class: {0}", resultClass), RedshiftState.INVALID_PARAMETER_TYPE); } @Override public synchronized void setString(String value) throws SQLException { checkFreed(); initialize(); data = value; } private void checkFreed() throws SQLException { if (freed) { throw new RedshiftException(GT.tr("This SQLXML object has already been freed."), RedshiftState.OBJECT_NOT_IN_STATE); } } private void ensureInitialized() throws SQLException { if (!initialized) { throw new RedshiftException( GT.tr( "This SQLXML object has not been initialized, so you cannot retrieve data from it."), RedshiftState.OBJECT_NOT_IN_STATE); } // Is anyone loading data into us at the moment? if (!active) { return; } if (byteArrayOutputStream != null) { try { data = conn.getEncoding().decode(byteArrayOutputStream.toByteArray()); } catch (IOException ioe) { throw new RedshiftException(GT.tr("Failed to convert binary xml data to encoding: {0}.", conn.getEncoding().name()), RedshiftState.DATA_ERROR, ioe); } finally { byteArrayOutputStream = null; active = false; } } else if (stringWriter != null) { // This is also handling the work for Stream, SAX, and StAX Results // as they will use the same underlying stringwriter variable. // data = stringWriter.toString(); stringWriter = null; active = false; } else if (domResult != null) { // Copy the content from the result to a source // and use the identify transform to get it into a // friendlier result format. try { TransformerFactory factory = TransformerFactory.newInstance(); // Disable External Entities (XXE) parsing for Java // https://www.aristotle.a2z.com/implementations/255 factory.setAttribute(XMLConstants.ACCESS_EXTERNAL_DTD, ""); factory.setAttribute(XMLConstants.ACCESS_EXTERNAL_STYLESHEET, ""); Transformer transformer = factory.newTransformer(); DOMSource domSource = new DOMSource(domResult.getNode()); StringWriter stringWriter = new StringWriter(); StreamResult streamResult = new StreamResult(stringWriter); transformer.transform(domSource, streamResult); data = stringWriter.toString(); } catch (TransformerException te) { throw new RedshiftException(GT.tr("Unable to convert DOMResult SQLXML data to a string."), RedshiftState.DATA_ERROR, te); } finally { domResult = null; active = false; } } } private void initialize() throws SQLException { if (initialized) { throw new RedshiftException( GT.tr( "This SQLXML object has already been initialized, so you cannot manipulate it further."), RedshiftState.OBJECT_NOT_IN_STATE); } initialized = true; } // Don't clutter System.err with errors the user can't silence. // If something bad really happens an exception will be thrown. static class NonPrintingErrorHandler implements ErrorHandler { public void error(SAXParseException e) { } public void fatalError(SAXParseException e) { } public void warning(SAXParseException e) { } } }
8,533
0
Create_ds/amazon-redshift-jdbc-driver/src/main/java/com/amazon/redshift
Create_ds/amazon-redshift-jdbc-driver/src/main/java/com/amazon/redshift/jdbc/RedshiftResultSetMetaDataImpl.java
/* * Copyright (c) 2004, PostgreSQL Global Development Group * See the LICENSE file in the project root for more information. */ package com.amazon.redshift.jdbc; import com.amazon.redshift.RedshiftResultSetMetaData; import com.amazon.redshift.core.BaseConnection; import com.amazon.redshift.core.Field; import com.amazon.redshift.core.v3.ConnectionFactoryImpl; import com.amazon.redshift.logger.RedshiftLogger; import com.amazon.redshift.util.GT; import com.amazon.redshift.util.Gettable; import com.amazon.redshift.util.GettableHashMap; import com.amazon.redshift.util.JdbcBlackHole; import com.amazon.redshift.util.RedshiftException; import com.amazon.redshift.util.RedshiftState; import java.sql.ResultSet; import java.sql.ResultSetMetaData; import java.sql.SQLException; import java.sql.Statement; import java.sql.Types; public class RedshiftResultSetMetaDataImpl implements ResultSetMetaData, RedshiftResultSetMetaData { protected final BaseConnection connection; protected final Field[] fields; private boolean fieldInfoFetched; /** * Initialise for a result with a tuple set and a field descriptor set * * @param connection the connection to retrieve metadata * @param fields the array of field descriptors */ public RedshiftResultSetMetaDataImpl(BaseConnection connection, Field[] fields) { this.connection = connection; this.fields = fields; this.fieldInfoFetched = (connection.getQueryExecutor().getServerProtocolVersion() >= ConnectionFactoryImpl.EXTENDED_RESULT_METADATA_SERVER_PROTOCOL_VERSION) ? true : false; } public int getColumnCount() throws SQLException { int rc = fields.length; if (RedshiftLogger.isEnable()) connection.getLogger().logFunction(false, rc); return rc; } /** * {@inheritDoc} * * <p>It is believed that Redshift does not support this feature. * * @param column the first column is 1, the second is 2... * @return true if so * @exception SQLException if a database access error occurs */ public boolean isAutoIncrement(int column) throws SQLException { fetchFieldMetaData(); Field field = getField(column); FieldMetadata metadata = field.getMetadata(); return metadata != null && metadata.autoIncrement; } /** * {@inheritDoc} * * <p>Does a column's case matter? ASSUMPTION: Any field that is not obviously case insensitive is * assumed to be case sensitive * * @param column the first column is 1, the second is 2... * @return true if so * @exception SQLException if a database access error occurs */ public boolean isCaseSensitive(int column) throws SQLException { Field field = getField(column); if(connection.getQueryExecutor().getServerProtocolVersion() >= ConnectionFactoryImpl.EXTENDED2_RESULT_METADATA_SERVER_PROTOCOL_VERSION) { FieldMetadata metadata = field.getMetadata(); return (metadata == null) ? false : metadata.caseSensitive; } else return connection.getTypeInfo().isCaseSensitive(field.getOID()); } /** * {@inheritDoc} * * <p>Can the column be used in a WHERE clause? Basically for this, I split the functions into two * types: recognised types (which are always useable), and OTHER types (which may or may not be * useable). The OTHER types, for now, I will assume they are useable. We should really query the * catalog to see if they are useable. * * @param column the first column is 1, the second is 2... * @return true if they can be used in a WHERE clause * @exception SQLException if a database access error occurs */ @Override public boolean isSearchable(int column) throws SQLException { // fetchFieldMetaData(); Field field = getField(column); FieldMetadata metadata = field.getMetadata(); return (metadata == null) ? true : metadata.searchable; } /** * {@inheritDoc} * * <p>Is the column a cash value? 6.1 introduced the cash/money type, which haven't been incorporated * as of 970414, so I just check the type name for both 'cash' and 'money' * * @param column the first column is 1, the second is 2... * @return true if its a cash column * @exception SQLException if a database access error occurs */ @Override public boolean isCurrency(int column) throws SQLException { String typeName = getRSType(column); return typeName.equals("cash") || typeName.equals("money"); } @Override public int isNullable(int column) throws SQLException { fetchFieldMetaData(); Field field = getField(column); FieldMetadata metadata = field.getMetadata(); return metadata == null ? ResultSetMetaData.columnNullable : metadata.nullable; } /** * {@inheritDoc} * * <p>Is the column a signed number? In Redshift, all numbers are signed, so this is trivial. * However, strings are not signed (duh!) * * @param column the first column is 1, the second is 2... * @return true if so * @exception SQLException if a database access error occurs */ public boolean isSigned(int column) throws SQLException { Field field = getField(column); return connection.getTypeInfo().isSigned(field.getOID()); } public int getColumnDisplaySize(int column) throws SQLException { Field field = getField(column); return connection.getTypeInfo().getDisplaySize(field.getOID(), field.getMod()); } public String getColumnLabel(int column) throws SQLException { Field field = getField(column); return field.getColumnLabel(); } public String getColumnName(int column) throws SQLException { return getColumnLabel(column); } public String getBaseColumnName(int column) throws SQLException { Field field = getField(column); if (field.getTableOid() == 0) { return ""; } fetchFieldMetaData(); FieldMetadata metadata = field.getMetadata(); return metadata == null ? "" : metadata.columnName; } public String getSchemaName(int column) throws SQLException { // fetchFieldMetaData(); Field field = getField(column); FieldMetadata metadata = field.getMetadata(); return metadata == null ? "" : metadata.schemaName; } private boolean populateFieldsWithMetadata(Gettable<FieldMetadata.Key, FieldMetadata> metadata) { boolean allOk = true; for (Field field : fields) { if (field.getMetadata() != null) { // No need to update metadata continue; } final FieldMetadata fieldMetadata = metadata.get(new FieldMetadata.Key(field.getTableOid(), field.getPositionInTable())); if (fieldMetadata == null) { allOk = false; } else { field.setMetadata(fieldMetadata); } } fieldInfoFetched |= allOk; return allOk; } private void fetchFieldMetaData() throws SQLException { if (fieldInfoFetched) { return; } if (populateFieldsWithMetadata(connection.getFieldMetadataCache())) { return; } StringBuilder sql = new StringBuilder( "SELECT c.oid, a.attnum, a.attname, c.relname, n.nspname, " + "a.attnotnull OR (t.typtype = 'd' AND t.typnotnull), "); /* if ( connection.haveMinimumServerVersion(ServerVersion.v10)) { sql.append("a.attidentity != '' OR pg_catalog.pg_get_expr(d.adbin, d.adrelid) LIKE '%nextval(%' "); } else */ { sql.append("pg_catalog.pg_get_expr(d.adbin, d.adrelid) LIKE '%nextval(%' "); } sql.append( "FROM pg_catalog.pg_class c " + "JOIN pg_catalog.pg_namespace n ON (c.relnamespace = n.oid) " + "JOIN pg_catalog.pg_attribute a ON (c.oid = a.attrelid) " + "JOIN pg_catalog.pg_type t ON (a.atttypid = t.oid) " + "LEFT JOIN pg_catalog.pg_attrdef d ON (d.adrelid = a.attrelid AND d.adnum = a.attnum) " + "JOIN ("); // 7.4 servers don't support row IN operations (a,b) IN ((c,d),(e,f)) // so we've got to fake that with a JOIN here. // boolean hasSourceInfo = false; for (Field field : fields) { if (field.getMetadata() != null) { continue; } if (hasSourceInfo) { sql.append(" UNION ALL "); } sql.append("SELECT "); sql.append(field.getTableOid()); if (!hasSourceInfo) { sql.append(" AS oid "); } sql.append(", "); sql.append(field.getPositionInTable()); if (!hasSourceInfo) { sql.append(" AS attnum"); } if (!hasSourceInfo) { hasSourceInfo = true; } } sql.append(") vals ON (c.oid = vals.oid AND a.attnum = vals.attnum) "); if (!hasSourceInfo) { fieldInfoFetched = true; return; } Statement stmt = connection.createStatement(); ResultSet rs = null; GettableHashMap<FieldMetadata.Key, FieldMetadata> md = new GettableHashMap<FieldMetadata.Key, FieldMetadata>(); try { rs = stmt.executeQuery(sql.toString()); while (rs.next()) { int table = (int) rs.getLong(1); int column = (int) rs.getLong(2); String columnName = rs.getString(3); String tableName = rs.getString(4); String schemaName = rs.getString(5); int nullable = rs.getBoolean(6) ? ResultSetMetaData.columnNoNulls : ResultSetMetaData.columnNullable; boolean autoIncrement = rs.getBoolean(7); FieldMetadata fieldMetadata = new FieldMetadata(columnName, tableName, schemaName, nullable, autoIncrement); FieldMetadata.Key key = new FieldMetadata.Key(table, column); md.put(key, fieldMetadata); } } finally { JdbcBlackHole.close(rs); JdbcBlackHole.close(stmt); } populateFieldsWithMetadata(md); connection.getFieldMetadataCache().putAll(md); } public String getBaseSchemaName(int column) throws SQLException { fetchFieldMetaData(); Field field = getField(column); FieldMetadata metadata = field.getMetadata(); return metadata == null ? "" : metadata.schemaName; } public int getPrecision(int column) throws SQLException { Field field = getField(column); return connection.getTypeInfo().getPrecision(field.getOID(), field.getMod()); } public int getScale(int column) throws SQLException { Field field = getField(column); return connection.getTypeInfo().getScale(field.getOID(), field.getMod()); } public String getTableName(int column) throws SQLException { return getBaseTableName(column); } public String getBaseTableName(int column) throws SQLException { fetchFieldMetaData(); Field field = getField(column); FieldMetadata metadata = field.getMetadata(); return metadata == null ? "" : metadata.tableName; } /** * {@inheritDoc} * * <p>As with getSchemaName(), we can say that if * getTableName() returns n/a, then we can too - otherwise, we need to work on it. * * @param column the first column is 1, the second is 2... * @return catalog name, or "" if not applicable * @exception SQLException if a database access error occurs */ public String getCatalogName(int column) throws SQLException { // fetchFieldMetaData(); Field field = getField(column); FieldMetadata metadata = field.getMetadata(); return metadata == null ? "" : metadata.catalogName; } public int getColumnType(int column) throws SQLException { if (RedshiftLogger.isEnable()) connection.getLogger().logFunction(true, column); int rc = getSQLType(column); if (RedshiftLogger.isEnable()) connection.getLogger().logFunction(false, rc); return rc; } public int getFormat(int column) throws SQLException { return getField(column).getFormat(); } public String getColumnTypeName(int column) throws SQLException { if (RedshiftLogger.isEnable()) connection.getLogger().logFunction(true, column); String type = getRSType(column); if (isAutoIncrement(column)) { if ("int4".equals(type)) { return "serial"; } else if ("int8".equals(type)) { return "bigserial"; } } if (RedshiftLogger.isEnable()) connection.getLogger().logFunction(false, type); return type; } /** * {@inheritDoc} * * <p>In reality, we would have to check the GRANT/REVOKE * stuff for this to be effective, and I haven't really looked into that yet, so this will get * re-visited. * * @param column the first column is 1, the second is 2, etc.* * @return true if so* * @exception SQLException if a database access error occurs */ public boolean isReadOnly(int column) throws SQLException { // fetchFieldMetaData(); Field field = getField(column); FieldMetadata metadata = field.getMetadata(); return (metadata == null) ? false : metadata.readOnly; } /** * {@inheritDoc} * * <p>In reality have to check * the GRANT/REVOKE stuff, which I haven't worked with as yet. However, if it isn't ReadOnly, then * it is obviously writable. * * @param column the first column is 1, the second is 2, etc. * @return true if so * @exception SQLException if a database access error occurs */ public boolean isWritable(int column) throws SQLException { return !isReadOnly(column); } /** * {@inheritDoc} * * <p>Hmmm...this is a bad one, since the two * preceding functions have not been really defined. I cannot tell is the short answer. I thus * return isWritable() just to give us an idea. * * @param column the first column is 1, the second is 2, etc.. * @return true if so * @exception SQLException if a database access error occurs */ public boolean isDefinitelyWritable(int column) throws SQLException { return false; } // ******************************************************** // END OF PUBLIC INTERFACE // ******************************************************** /** * For several routines in this package, we need to convert a columnIndex into a Field[] * descriptor. Rather than do the same code several times, here it is. * * @param columnIndex the first column is 1, the second is 2... * @return the Field description * @exception SQLException if a database access error occurs */ protected Field getField(int columnIndex) throws SQLException { if (columnIndex < 1 || columnIndex > fields.length) { throw new RedshiftException( GT.tr("The column index is out of range: {0}, number of columns: {1}.", columnIndex, fields.length), RedshiftState.INVALID_PARAMETER_VALUE); } return fields[columnIndex - 1]; } protected String getRSType(int columnIndex) throws SQLException { return connection.getTypeInfo().getRSType(getField(columnIndex).getOID()); } protected int getSQLType(int columnIndex) throws SQLException { return connection.getTypeInfo().getSQLType(getField(columnIndex).getOID()); } // ** JDBC 2 Extensions ** // This can hook into our Redshift_Object mechanism public String getColumnClassName(int column) throws SQLException { Field field = getField(column); String result = connection.getTypeInfo().getJavaClass(field.getOID()); if (result != null) { return result; } int sqlType = getSQLType(column); switch (sqlType) { case Types.ARRAY: return ("java.sql.Array"); default: String type = getRSType(column); if ("unknown".equals(type)) { return ("java.lang.String"); } return ("java.lang.Object"); } } public boolean isWrapperFor(Class<?> iface) throws SQLException { return iface.isAssignableFrom(getClass()); } public <T> T unwrap(Class<T> iface) throws SQLException { if (iface.isAssignableFrom(getClass())) { return iface.cast(this); } throw new SQLException("Cannot unwrap to " + iface.getName()); } }
8,534
0
Create_ds/amazon-redshift-jdbc-driver/src/main/java/com/amazon/redshift
Create_ds/amazon-redshift-jdbc-driver/src/main/java/com/amazon/redshift/jdbc/EscapedFunctions2.java
/* * Copyright (c) 2018, PostgreSQL Global Development Group * See the LICENSE file in the project root for more information. */ package com.amazon.redshift.jdbc; import com.amazon.redshift.util.GT; import com.amazon.redshift.util.RedshiftException; import com.amazon.redshift.util.RedshiftState; import java.lang.reflect.Method; import java.sql.SQLException; import java.util.List; import java.util.Locale; import java.util.concurrent.ConcurrentHashMap; import java.util.concurrent.ConcurrentMap; /** * This class stores supported escaped function. * Note: this is a rsjdbc-internal class, so it is not supposed to be used outside of the driver. */ public final class EscapedFunctions2 { // constants for timestampadd and timestampdiff private static final String SQL_TSI_ROOT = "SQL_TSI_"; private static final String SQL_TSI_DAY = "SQL_TSI_DAY"; private static final String SQL_TSI_FRAC_SECOND = "SQL_TSI_FRAC_SECOND"; private static final String SQL_TSI_HOUR = "SQL_TSI_HOUR"; private static final String SQL_TSI_MINUTE = "SQL_TSI_MINUTE"; private static final String SQL_TSI_MONTH = "SQL_TSI_MONTH"; private static final String SQL_TSI_QUARTER = "SQL_TSI_QUARTER"; private static final String SQL_TSI_SECOND = "SQL_TSI_SECOND"; private static final String SQL_TSI_WEEK = "SQL_TSI_WEEK"; private static final String SQL_TSI_YEAR = "SQL_TSI_YEAR"; /** * storage for functions implementations */ private static final ConcurrentMap<String, Method> FUNCTION_MAP = createFunctionMap("sql"); private static ConcurrentMap<String, Method> createFunctionMap(String prefix) { Method[] methods = EscapedFunctions2.class.getMethods(); ConcurrentMap<String, Method> functionMap = new ConcurrentHashMap<String, Method>(methods.length * 2); for (Method method : methods) { if (method.getName().startsWith(prefix)) { functionMap.put(method.getName().substring(prefix.length()).toLowerCase(Locale.US), method); } } return functionMap; } /** * get Method object implementing the given function * * @param functionName name of the searched function * @return a Method object or null if not found */ public static Method getFunction(String functionName) { Method method = FUNCTION_MAP.get(functionName); if (method != null) { return method; } //FIXME: this probably should not use the US locale String nameLower = functionName.toLowerCase(Locale.US); if (nameLower.equals(functionName)) { // Input name was in lower case, the function is not there return null; } method = FUNCTION_MAP.get(nameLower); if (method != null && FUNCTION_MAP.size() < 1000) { // Avoid OutOfMemoryError in case input function names are randomized // The number of methods is finite, however the number of upper-lower case combinations // is quite a few (e.g. substr, Substr, sUbstr, SUbstr, etc). FUNCTION_MAP.putIfAbsent(functionName, method); } return method; } // ** numeric functions translations ** /** * ceiling to ceil translation * * @param buf The buffer to append into * @param parsedArgs arguments * @throws SQLException if something wrong happens */ public static void sqlceiling(StringBuilder buf, List<? extends CharSequence> parsedArgs) throws SQLException { singleArgumentFunctionCall(buf, "ceil(", "ceiling", parsedArgs); } /** * log to ln translation * * @param buf The buffer to append into * @param parsedArgs arguments * @throws SQLException if something wrong happens */ public static void sqllog(StringBuilder buf, List<? extends CharSequence> parsedArgs) throws SQLException { singleArgumentFunctionCall(buf, "ln(", "log", parsedArgs); } /** * log10 to log translation * * @param buf The buffer to append into * @param parsedArgs arguments * @throws SQLException if something wrong happens */ public static void sqllog10(StringBuilder buf, List<? extends CharSequence> parsedArgs) throws SQLException { singleArgumentFunctionCall(buf, "log(", "log10", parsedArgs); } /** * power to pow translation * * @param buf The buffer to append into * @param parsedArgs arguments * @throws SQLException if something wrong happens */ public static void sqlpower(StringBuilder buf, List<? extends CharSequence> parsedArgs) throws SQLException { twoArgumentsFunctionCall(buf, "pow(", "power", parsedArgs); } /** * truncate to trunc translation * * @param buf The buffer to append into * @param parsedArgs arguments * @throws SQLException if something wrong happens */ public static void sqltruncate(StringBuilder buf, List<? extends CharSequence> parsedArgs) throws SQLException { twoArgumentsFunctionCall(buf, "trunc(", "truncate", parsedArgs); } // ** string functions translations ** /** * char to chr translation * * @param buf The buffer to append into * @param parsedArgs arguments * @throws SQLException if something wrong happens */ public static void sqlchar(StringBuilder buf, List<? extends CharSequence> parsedArgs) throws SQLException { singleArgumentFunctionCall(buf, "chr(", "char", parsedArgs); } /** * concat translation * * @param buf The buffer to append into * @param parsedArgs arguments */ public static void sqlconcat(StringBuilder buf, List<? extends CharSequence> parsedArgs) { appendCall(buf, "(", "||", ")", parsedArgs); } /** * insert to overlay translation * * @param buf The buffer to append into * @param parsedArgs arguments * @throws SQLException if something wrong happens */ public static void sqlinsert(StringBuilder buf, List<? extends CharSequence> parsedArgs) throws SQLException { if (parsedArgs.size() != 4) { throw new RedshiftException(GT.tr("{0} function takes four and only four argument.", "insert"), RedshiftState.SYNTAX_ERROR); } buf.append("overlay("); buf.append(parsedArgs.get(0)).append(" placing ").append(parsedArgs.get(3)); buf.append(" from ").append(parsedArgs.get(1)).append(" for ").append(parsedArgs.get(2)); buf.append(')'); } /** * lcase to lower translation * * @param buf The buffer to append into * @param parsedArgs arguments * @throws SQLException if something wrong happens */ public static void sqllcase(StringBuilder buf, List<? extends CharSequence> parsedArgs) throws SQLException { singleArgumentFunctionCall(buf, "lower(", "lcase", parsedArgs); } /** * left to substring translation * * @param buf The buffer to append into * @param parsedArgs arguments * @throws SQLException if something wrong happens */ public static void sqlleft(StringBuilder buf, List<? extends CharSequence> parsedArgs) throws SQLException { if (parsedArgs.size() != 2) { throw new RedshiftException(GT.tr("{0} function takes two and only two arguments.", "left"), RedshiftState.SYNTAX_ERROR); } appendCall(buf, "substring(", " for ", ")", parsedArgs); } /** * length translation * * @param buf The buffer to append into * @param parsedArgs arguments * @throws SQLException if something wrong happens */ public static void sqllength(StringBuilder buf, List<? extends CharSequence> parsedArgs) throws SQLException { if (parsedArgs.size() != 1) { throw new RedshiftException(GT.tr("{0} function takes one and only one argument.", "length"), RedshiftState.SYNTAX_ERROR); } appendCall(buf, "length(trim(trailing from ", "", "))", parsedArgs); } /** * locate translation * * @param buf The buffer to append into * @param parsedArgs arguments * @throws SQLException if something wrong happens */ public static void sqllocate(StringBuilder buf, List<? extends CharSequence> parsedArgs) throws SQLException { if (parsedArgs.size() == 2) { appendCall(buf, "position(", " in ", ")", parsedArgs); } else if (parsedArgs.size() == 3) { String tmp = "position(" + parsedArgs.get(0) + " in substring(" + parsedArgs.get(1) + " from " + parsedArgs.get(2) + "))"; buf.append("(") .append(parsedArgs.get(2)) .append("*sign(") .append(tmp) .append(")+") .append(tmp) .append(")"); } else { throw new RedshiftException(GT.tr("{0} function takes two or three arguments.", "locate"), RedshiftState.SYNTAX_ERROR); } } /** * ltrim translation * * @param buf The buffer to append into * @param parsedArgs arguments * @throws SQLException if something wrong happens */ public static void sqlltrim(StringBuilder buf, List<? extends CharSequence> parsedArgs) throws SQLException { singleArgumentFunctionCall(buf, "trim(leading from ", "ltrim", parsedArgs); } /** * right to substring translation * * @param buf The buffer to append into * @param parsedArgs arguments * @throws SQLException if something wrong happens */ public static void sqlright(StringBuilder buf, List<? extends CharSequence> parsedArgs) throws SQLException { if (parsedArgs.size() != 2) { throw new RedshiftException(GT.tr("{0} function takes two and only two arguments.", "right"), RedshiftState.SYNTAX_ERROR); } buf.append("substring("); buf.append(parsedArgs.get(0)) .append(" from (length(") .append(parsedArgs.get(0)) .append(")+1-") .append(parsedArgs.get(1)); buf.append("))"); } /** * rtrim translation * * @param buf The buffer to append into * @param parsedArgs arguments * @throws SQLException if something wrong happens */ public static void sqlrtrim(StringBuilder buf, List<? extends CharSequence> parsedArgs) throws SQLException { singleArgumentFunctionCall(buf, "trim(trailing from ", "rtrim", parsedArgs); } /** * space translation * * @param buf The buffer to append into * @param parsedArgs arguments * @throws SQLException if something wrong happens */ public static void sqlspace(StringBuilder buf, List<? extends CharSequence> parsedArgs) throws SQLException { singleArgumentFunctionCall(buf, "repeat(' ',", "space", parsedArgs); } /** * substring to substr translation * * @param buf The buffer to append into * @param parsedArgs arguments * @throws SQLException if something wrong happens */ public static void sqlsubstring(StringBuilder buf, List<? extends CharSequence> parsedArgs) throws SQLException { int argSize = parsedArgs.size(); if (argSize != 2 && argSize != 3) { throw new RedshiftException(GT.tr("{0} function takes two or three arguments.", "substring"), RedshiftState.SYNTAX_ERROR); } appendCall(buf, "substr(", ",", ")", parsedArgs); } /** * ucase to upper translation * * @param buf The buffer to append into * @param parsedArgs arguments * @throws SQLException if something wrong happens */ public static void sqlucase(StringBuilder buf, List<? extends CharSequence> parsedArgs) throws SQLException { singleArgumentFunctionCall(buf, "upper(", "ucase", parsedArgs); } /** * curdate to current_date translation * * @param buf The buffer to append into * @param parsedArgs arguments * @throws SQLException if something wrong happens */ public static void sqlcurdate(StringBuilder buf, List<? extends CharSequence> parsedArgs) throws SQLException { zeroArgumentFunctionCall(buf, "current_date", "curdate", parsedArgs); } /** * curtime to current_time translation * * @param buf The buffer to append into * @param parsedArgs arguments * @throws SQLException if something wrong happens */ public static void sqlcurtime(StringBuilder buf, List<? extends CharSequence> parsedArgs) throws SQLException { zeroArgumentFunctionCall(buf, "current_time", "curtime", parsedArgs); } /** * dayname translation * * @param buf The buffer to append into * @param parsedArgs arguments * @throws SQLException if something wrong happens */ public static void sqldayname(StringBuilder buf, List<? extends CharSequence> parsedArgs) throws SQLException { if (parsedArgs.size() != 1) { throw new RedshiftException(GT.tr("{0} function takes one and only one argument.", "dayname"), RedshiftState.SYNTAX_ERROR); } appendCall(buf, "to_char(", ",", ",'Day')", parsedArgs); } /** * dayofmonth translation * * @param buf The buffer to append into * @param parsedArgs arguments * @throws SQLException if something wrong happens */ public static void sqldayofmonth(StringBuilder buf, List<? extends CharSequence> parsedArgs) throws SQLException { singleArgumentFunctionCall(buf, "extract(day from ", "dayofmonth", parsedArgs); } /** * dayofweek translation adding 1 to Redshift function since we expect values from 1 to 7 * * @param buf The buffer to append into * @param parsedArgs arguments * @throws SQLException if something wrong happens */ public static void sqldayofweek(StringBuilder buf, List<? extends CharSequence> parsedArgs) throws SQLException { if (parsedArgs.size() != 1) { throw new RedshiftException(GT.tr("{0} function takes one and only one argument.", "dayofweek"), RedshiftState.SYNTAX_ERROR); } appendCall(buf, "extract(dow from ", ",", ")+1", parsedArgs); } /** * dayofyear translation * * @param buf The buffer to append into * @param parsedArgs arguments * @throws SQLException if something wrong happens */ public static void sqldayofyear(StringBuilder buf, List<? extends CharSequence> parsedArgs) throws SQLException { singleArgumentFunctionCall(buf, "extract(doy from ", "dayofyear", parsedArgs); } /** * hour translation * * @param buf The buffer to append into * @param parsedArgs arguments * @throws SQLException if something wrong happens */ public static void sqlhour(StringBuilder buf, List<? extends CharSequence> parsedArgs) throws SQLException { singleArgumentFunctionCall(buf, "extract(hour from ", "hour", parsedArgs); } /** * minute translation * * @param buf The buffer to append into * @param parsedArgs arguments * @throws SQLException if something wrong happens */ public static void sqlminute(StringBuilder buf, List<? extends CharSequence> parsedArgs) throws SQLException { singleArgumentFunctionCall(buf, "extract(minute from ", "minute", parsedArgs); } /** * month translation * * @param buf The buffer to append into * @param parsedArgs arguments * @throws SQLException if something wrong happens */ public static void sqlmonth(StringBuilder buf, List<? extends CharSequence> parsedArgs) throws SQLException { singleArgumentFunctionCall(buf, "extract(month from ", "month", parsedArgs); } /** * monthname translation * * @param buf The buffer to append into * @param parsedArgs arguments * @throws SQLException if something wrong happens */ public static void sqlmonthname(StringBuilder buf, List<? extends CharSequence> parsedArgs) throws SQLException { if (parsedArgs.size() != 1) { throw new RedshiftException(GT.tr("{0} function takes one and only one argument.", "monthname"), RedshiftState.SYNTAX_ERROR); } appendCall(buf, "to_char(", ",", ",'Month')", parsedArgs); } /** * quarter translation * * @param buf The buffer to append into * @param parsedArgs arguments * @throws SQLException if something wrong happens */ public static void sqlquarter(StringBuilder buf, List<? extends CharSequence> parsedArgs) throws SQLException { singleArgumentFunctionCall(buf, "extract(quarter from ", "quarter", parsedArgs); } /** * second translation * * @param buf The buffer to append into * @param parsedArgs arguments * @throws SQLException if something wrong happens */ public static void sqlsecond(StringBuilder buf, List<? extends CharSequence> parsedArgs) throws SQLException { singleArgumentFunctionCall(buf, "extract(second from ", "second", parsedArgs); } /** * week translation * * @param buf The buffer to append into * @param parsedArgs arguments * @throws SQLException if something wrong happens */ public static void sqlweek(StringBuilder buf, List<? extends CharSequence> parsedArgs) throws SQLException { singleArgumentFunctionCall(buf, "extract(week from ", "week", parsedArgs); } /** * year translation * * @param buf The buffer to append into * @param parsedArgs arguments * @throws SQLException if something wrong happens */ public static void sqlyear(StringBuilder buf, List<? extends CharSequence> parsedArgs) throws SQLException { singleArgumentFunctionCall(buf, "extract(year from ", "year", parsedArgs); } /** * time stamp add * * @param buf The buffer to append into * @param parsedArgs arguments * @throws SQLException if something wrong happens */ public static void sqltimestampadd(StringBuilder buf, List<? extends CharSequence> parsedArgs) throws SQLException { if (parsedArgs.size() != 3) { throw new RedshiftException( GT.tr("{0} function takes three and only three arguments.", "timestampadd"), RedshiftState.SYNTAX_ERROR); } buf.append('('); appendInterval(buf, parsedArgs.get(0).toString(), parsedArgs.get(1).toString()); buf.append('+').append(parsedArgs.get(2)).append(')'); } private static void appendInterval(StringBuilder buf, String type, String value) throws SQLException { if (!isTsi(type)) { throw new RedshiftException(GT.tr("Interval {0} not yet implemented", type), RedshiftState.SYNTAX_ERROR); } if (appendSingleIntervalCast(buf, SQL_TSI_DAY, type, value, "day") || appendSingleIntervalCast(buf, SQL_TSI_SECOND, type, value, "second") || appendSingleIntervalCast(buf, SQL_TSI_HOUR, type, value, "hour") || appendSingleIntervalCast(buf, SQL_TSI_MINUTE, type, value, "minute") || appendSingleIntervalCast(buf, SQL_TSI_MONTH, type, value, "month") || appendSingleIntervalCast(buf, SQL_TSI_WEEK, type, value, "week") || appendSingleIntervalCast(buf, SQL_TSI_YEAR, type, value, "year") ) { return; } if (areSameTsi(SQL_TSI_QUARTER, type)) { buf.append("CAST((").append(value).append("::int * 3) || ' month' as interval)"); return; } throw new RedshiftException(GT.tr("Interval {0} not yet implemented", type), RedshiftState.NOT_IMPLEMENTED); } private static boolean appendSingleIntervalCast(StringBuilder buf, String cmp, String type, String value, String pgType) { if (!areSameTsi(type, cmp)) { return false; } buf.ensureCapacity(buf.length() + 5 + 4 + 14 + value.length() + pgType.length()); buf.append("CAST(").append(value).append("||' ").append(pgType).append("' as interval)"); return true; } /** * Compares two TSI intervals. It is * @param a first interval to compare * @param b second interval to compare * @return true when both intervals are equal (case insensitive) */ private static boolean areSameTsi(String a, String b) { return a.length() == b.length() && b.length() > SQL_TSI_ROOT.length() && a.regionMatches(true, SQL_TSI_ROOT.length(), b, SQL_TSI_ROOT.length(), b.length() - SQL_TSI_ROOT.length()); } /** * Checks if given input starts with {@link #SQL_TSI_ROOT} * @param interval input string * @return true if interval.startsWithIgnoreCase(SQL_TSI_ROOT) */ private static boolean isTsi(String interval) { return interval.regionMatches(true, 0, SQL_TSI_ROOT, 0, SQL_TSI_ROOT.length()); } /** * time stamp diff * * @param buf The buffer to append into * @param parsedArgs arguments * @throws SQLException if something wrong happens */ public static void sqltimestampdiff(StringBuilder buf, List<? extends CharSequence> parsedArgs) throws SQLException { if (parsedArgs.size() != 3) { throw new RedshiftException( GT.tr("{0} function takes three and only three arguments.", "timestampdiff"), RedshiftState.SYNTAX_ERROR); } buf.append("extract( ") .append(constantToDatePart(buf, parsedArgs.get(0).toString())) .append(" from (") .append(parsedArgs.get(2)) .append("-") .append(parsedArgs.get(1)) .append("))"); } private static String constantToDatePart(StringBuilder buf, String type) throws SQLException { if (!isTsi(type)) { throw new RedshiftException(GT.tr("Interval {0} not yet implemented", type), RedshiftState.SYNTAX_ERROR); } if (areSameTsi(SQL_TSI_DAY, type)) { return "day"; } else if (areSameTsi(SQL_TSI_SECOND, type)) { return "second"; } else if (areSameTsi(SQL_TSI_HOUR, type)) { return "hour"; } else if (areSameTsi(SQL_TSI_MINUTE, type)) { return "minute"; } else { throw new RedshiftException(GT.tr("Interval {0} not yet implemented", type), RedshiftState.SYNTAX_ERROR); } // See http://archives.postgresql.org/pgsql-jdbc/2006-03/msg00096.php /* * else if (SQL_TSI_MONTH.equalsIgnoreCase(shortType)) return "month"; else if * (SQL_TSI_QUARTER.equalsIgnoreCase(shortType)) return "quarter"; else if * (SQL_TSI_WEEK.equalsIgnoreCase(shortType)) return "week"; else if * (SQL_TSI_YEAR.equalsIgnoreCase(shortType)) return "year"; */ } /** * database translation * * @param buf The buffer to append into * @param parsedArgs arguments * @throws SQLException if something wrong happens */ public static void sqldatabase(StringBuilder buf, List<? extends CharSequence> parsedArgs) throws SQLException { zeroArgumentFunctionCall(buf, "current_database()", "database", parsedArgs); } /** * ifnull translation * * @param buf The buffer to append into * @param parsedArgs arguments * @throws SQLException if something wrong happens */ public static void sqlifnull(StringBuilder buf, List<? extends CharSequence> parsedArgs) throws SQLException { twoArgumentsFunctionCall(buf, "coalesce(", "ifnull", parsedArgs); } /** * user translation * * @param buf The buffer to append into * @param parsedArgs arguments * @throws SQLException if something wrong happens */ public static void sqluser(StringBuilder buf, List<? extends CharSequence> parsedArgs) throws SQLException { zeroArgumentFunctionCall(buf, "user", "user", parsedArgs); } private static void zeroArgumentFunctionCall(StringBuilder buf, String call, String functionName, List<? extends CharSequence> parsedArgs) throws RedshiftException { if (!parsedArgs.isEmpty()) { throw new RedshiftException(GT.tr("{0} function doesn''t take any argument.", functionName), RedshiftState.SYNTAX_ERROR); } buf.append(call); } private static void singleArgumentFunctionCall(StringBuilder buf, String call, String functionName, List<? extends CharSequence> parsedArgs) throws RedshiftException { if (parsedArgs.size() != 1) { throw new RedshiftException(GT.tr("{0} function takes one and only one argument.", functionName), RedshiftState.SYNTAX_ERROR); } CharSequence arg0 = parsedArgs.get(0); buf.ensureCapacity(buf.length() + call.length() + arg0.length() + 1); buf.append(call).append(arg0).append(')'); } private static void twoArgumentsFunctionCall(StringBuilder buf, String call, String functionName, List<? extends CharSequence> parsedArgs) throws RedshiftException { if (parsedArgs.size() != 2) { throw new RedshiftException(GT.tr("{0} function takes two and only two arguments.", functionName), RedshiftState.SYNTAX_ERROR); } appendCall(buf, call, ",", ")", parsedArgs); } /** * Appends {@code begin arg0 separator arg1 separator end} sequence to the input {@link StringBuilder} * @param sb destination StringBuilder * @param begin begin string * @param separator separator string * @param end end string * @param args arguments */ public static void appendCall(StringBuilder sb, String begin, String separator, String end, List<? extends CharSequence> args) { int size = begin.length(); // Typically just-in-time compiler would eliminate Iterator in case foreach is used, // however the code below uses indexed iteration to keep the conde independent from // various JIT implementations (== avoid Iterator allocations even for not-so-smart JITs) // see https://bugs.openjdk.java.net/browse/JDK-8166840 // see http://2016.jpoint.ru/talks/cheremin/ (video and slides) int numberOfArguments = args.size(); for (int i = 0; i < numberOfArguments; i++) { size += args.get(i).length(); } size += separator.length() * (numberOfArguments - 1); sb.ensureCapacity(sb.length() + size + 1); sb.append(begin); for (int i = 0; i < numberOfArguments; i++) { if (i > 0) { sb.append(separator); } sb.append(args.get(i)); } sb.append(end); } }
8,535
0
Create_ds/amazon-redshift-jdbc-driver/src/main/java/com/amazon/redshift
Create_ds/amazon-redshift-jdbc-driver/src/main/java/com/amazon/redshift/jdbc/TimestampUtils.java
/* * Copyright (c) 2003, PostgreSQL Global Development Group * See the LICENSE file in the project root for more information. */ package com.amazon.redshift.jdbc; import com.amazon.redshift.RedshiftStatement; import com.amazon.redshift.core.JavaVersion; import com.amazon.redshift.core.Oid; import com.amazon.redshift.core.Provider; import com.amazon.redshift.util.ByteConverter; import com.amazon.redshift.util.GT; import com.amazon.redshift.util.RedshiftException; import com.amazon.redshift.util.RedshiftState; import com.amazon.redshift.util.RedshiftTime; import com.amazon.redshift.util.RedshiftTimestamp; import java.lang.reflect.Field; import java.sql.Date; import java.sql.SQLException; import java.sql.Time; import java.sql.Timestamp; //JCP! if mvn.project.property.redshift.jdbc.spec >= "JDBC4.2" import java.time.Duration; import java.time.Instant; import java.time.LocalDate; import java.time.LocalDateTime; import java.time.LocalTime; import java.time.OffsetDateTime; import java.time.ZoneOffset; import java.time.ZonedDateTime; import java.time.chrono.IsoEra; import java.time.format.DateTimeParseException; import java.time.temporal.ChronoField; //JCP! endif import java.util.Calendar; import java.util.GregorianCalendar; import java.util.HashMap; import java.util.SimpleTimeZone; import java.util.TimeZone; /** * Misc utils for handling time and date values. */ public class TimestampUtils { /** * Number of milliseconds in one day. */ private static final int ONEDAY = 24 * 3600 * 1000; private static final char[] ZEROS = {'0', '0', '0', '0', '0', '0', '0', '0', '0'}; private static final char[][] NUMBERS; private static final HashMap<String, TimeZone> GMT_ZONES = new HashMap<String, TimeZone>(); private static final int MAX_NANOS_BEFORE_WRAP_ON_ROUND = 999999500; //JCP! if mvn.project.property.redshift.jdbc.spec >= "JDBC4.2" private static final Duration ONE_MICROSECOND = Duration.ofNanos(1000); // LocalTime.MAX is 23:59:59.999_999_999, and it wraps to 24:00:00 when nanos exceed 999_999_499 // since Redshift has microsecond resolution only private static final LocalTime MAX_TIME = LocalTime.MAX.minus(Duration.ofNanos(500)); private static final OffsetDateTime MAX_OFFSET_DATETIME = OffsetDateTime.MAX.minus(Duration.ofMillis(500)); private static final LocalDateTime MAX_LOCAL_DATETIME = LocalDateTime.MAX.minus(Duration.ofMillis(500)); // low value for dates is 4713 BC private static final LocalDate MIN_LOCAL_DATE = LocalDate.of(4713, 1, 1).with(ChronoField.ERA, IsoEra.BCE.getValue()); private static final LocalDateTime MIN_LOCAL_DATETIME = MIN_LOCAL_DATE.atStartOfDay(); private static final OffsetDateTime MIN_OFFSET_DATETIME = MIN_LOCAL_DATETIME.atOffset(ZoneOffset.UTC); //JCP! endif private static final Field DEFAULT_TIME_ZONE_FIELD; private TimeZone prevDefaultZoneFieldValue; private TimeZone defaultTimeZoneCache; static { // The expected maximum value is 60 (seconds), so 64 is used "just in case" NUMBERS = new char[64][]; for (int i = 0; i < NUMBERS.length; i++) { NUMBERS[i] = ((i < 10 ? "0" : "") + Integer.toString(i)).toCharArray(); } // Backend's gmt-3 means GMT+03 in Java. Here a map is created so gmt-3 can be converted to // java TimeZone for (int i = -12; i <= 14; i++) { TimeZone timeZone; String pgZoneName; if (i == 0) { timeZone = TimeZone.getTimeZone("GMT"); pgZoneName = "GMT"; } else { timeZone = TimeZone.getTimeZone("GMT" + (i <= 0 ? "+" : "-") + Math.abs(i)); pgZoneName = "GMT" + (i >= 0 ? "+" : "-"); } if (i == 0) { GMT_ZONES.put(pgZoneName, timeZone); continue; } GMT_ZONES.put(pgZoneName + Math.abs(i), timeZone); GMT_ZONES.put(pgZoneName + new String(NUMBERS[Math.abs(i)]), timeZone); } // Fast path to getting the default timezone. // Accessing the default timezone over and over creates a clone with regular API. // Because we don't mutate that object in our use of it, we can access the field directly. // This saves the creation of a clone everytime, and the memory associated to all these clones. Field tzField; try { tzField = null; // Avoid reflective access in Java 9+ if (JavaVersion.getRuntimeVersion().compareTo(JavaVersion.v1_8) <= 0) { tzField = TimeZone.class.getDeclaredField("defaultTimeZone"); tzField.setAccessible(true); TimeZone defaultTz = TimeZone.getDefault(); Object tzFromField = tzField.get(null); if (defaultTz == null || !defaultTz.equals(tzFromField)) { tzField = null; } } } catch (Exception e) { tzField = null; } DEFAULT_TIME_ZONE_FIELD = tzField; } private final StringBuilder sbuf = new StringBuilder(); // This calendar is used when user provides calendar in setX(, Calendar) method. // It ensures calendar is Gregorian. private final Calendar calendarWithUserTz = new GregorianCalendar(); private final TimeZone utcTz = TimeZone.getTimeZone("UTC"); private Calendar calCache; private int calCacheZone; /** * True if the backend uses doubles for time values. False if long is used. */ private final boolean usesDouble; private final Provider<TimeZone> timeZoneProvider; TimestampUtils(boolean usesDouble, Provider<TimeZone> timeZoneProvider) { this.usesDouble = usesDouble; this.timeZoneProvider = timeZoneProvider; } private Calendar getCalendar(int sign, int hr, int min, int sec) { int rawOffset = sign * (((hr * 60 + min) * 60 + sec) * 1000); if (calCache != null && calCacheZone == rawOffset) { return calCache; } StringBuilder zoneID = new StringBuilder("GMT"); zoneID.append(sign < 0 ? '-' : '+'); if (hr < 10) { zoneID.append('0'); } zoneID.append(hr); if (min < 10) { zoneID.append('0'); } zoneID.append(min); if (sec < 10) { zoneID.append('0'); } zoneID.append(sec); TimeZone syntheticTZ = new SimpleTimeZone(rawOffset, zoneID.toString()); calCache = new GregorianCalendar(syntheticTZ); calCacheZone = rawOffset; return calCache; } private static class ParsedTimestamp { boolean hasDate = false; int era = GregorianCalendar.AD; int year = 1970; int month = 1; boolean hasTime = false; int day = 1; int hour = 0; int minute = 0; int second = 0; int nanos = 0; Calendar tz = null; } private static class ParsedBinaryTimestamp { Infinity infinity = null; long millis = 0; int nanos = 0; } enum Infinity { POSITIVE, NEGATIVE; } /** * Load date/time information into the provided calendar returning the fractional seconds. */ private ParsedTimestamp parseBackendTimestamp(String str) throws SQLException { char[] s = str.toCharArray(); int slen = s.length; // This is pretty gross.. ParsedTimestamp result = new ParsedTimestamp(); // We try to parse these fields in order; all are optional // (but some combinations don't make sense, e.g. if you have // both date and time then they must be whitespace-separated). // At least one of date and time must be present. // leading whitespace // yyyy-mm-dd // whitespace // hh:mm:ss // whitespace // timezone in one of the formats: +hh, -hh, +hh:mm, -hh:mm // whitespace // if date is present, an era specifier: AD or BC // trailing whitespace try { int start = skipWhitespace(s, 0); // Skip leading whitespace int end = firstNonDigit(s, start); int num; char sep; // Possibly read date. if (charAt(s, end) == '-') { // // Date // result.hasDate = true; // year result.year = number(s, start, end); start = end + 1; // Skip '-' // month end = firstNonDigit(s, start); result.month = number(s, start, end); sep = charAt(s, end); if (sep != '-') { throw new NumberFormatException("Expected date to be dash-separated, got '" + sep + "'"); } start = end + 1; // Skip '-' // day of month end = firstNonDigit(s, start); result.day = number(s, start, end); start = skipWhitespace(s, end); // Skip trailing whitespace } // Possibly read time. if (Character.isDigit(charAt(s, start))) { // // Time. // result.hasTime = true; // Hours end = firstNonDigit(s, start); result.hour = number(s, start, end); sep = charAt(s, end); if (sep != ':') { throw new NumberFormatException("Expected time to be colon-separated, got '" + sep + "'"); } start = end + 1; // Skip ':' // minutes end = firstNonDigit(s, start); result.minute = number(s, start, end); sep = charAt(s, end); if (sep != ':') { throw new NumberFormatException("Expected time to be colon-separated, got '" + sep + "'"); } start = end + 1; // Skip ':' // seconds end = firstNonDigit(s, start); result.second = number(s, start, end); start = end; // Fractional seconds. if (charAt(s, start) == '.') { end = firstNonDigit(s, start + 1); // Skip '.' num = number(s, start + 1, end); for (int numlength = (end - (start + 1)); numlength < 9; ++numlength) { num *= 10; } result.nanos = num; start = end; } start = skipWhitespace(s, start); // Skip trailing whitespace } // Possibly read timezone. sep = charAt(s, start); if (sep == '-' || sep == '+') { int tzsign = (sep == '-') ? -1 : 1; int tzhr; int tzmin; int tzsec; end = firstNonDigit(s, start + 1); // Skip +/- tzhr = number(s, start + 1, end); start = end; sep = charAt(s, start); if (sep == ':') { end = firstNonDigit(s, start + 1); // Skip ':' tzmin = number(s, start + 1, end); start = end; } else { tzmin = 0; } tzsec = 0; sep = charAt(s, start); if (sep == ':') { end = firstNonDigit(s, start + 1); // Skip ':' tzsec = number(s, start + 1, end); start = end; } // Setting offset does not seem to work correctly in all // cases.. So get a fresh calendar for a synthetic timezone // instead result.tz = getCalendar(tzsign, tzhr, tzmin, tzsec); start = skipWhitespace(s, start); // Skip trailing whitespace } if (result.hasDate && start < slen) { String eraString = new String(s, start, slen - start); if (eraString.startsWith("AD")) { result.era = GregorianCalendar.AD; start += 2; } else if (eraString.startsWith("BC")) { result.era = GregorianCalendar.BC; start += 2; } } if (start < slen) { throw new NumberFormatException( "Trailing junk on timestamp: '" + new String(s, start, slen - start) + "'"); } if (!result.hasTime && !result.hasDate) { throw new NumberFormatException("Timestamp has neither date nor time"); } } catch (NumberFormatException nfe) { throw new RedshiftException( GT.tr("Bad value for type timestamp/date/time: {1}", str), RedshiftState.BAD_DATETIME_FORMAT, nfe); } return result; } /** * Parse a string and return a timestamp representing its value. * * @param cal calendar to be used to parse the input string * @param s The ISO formated date string to parse. * @return null if s is null or a timestamp of the parsed string s. * @throws SQLException if there is a problem parsing s. */ public synchronized Timestamp toTimestamp(Calendar cal, String s) throws SQLException { if (s == null) { return null; } int slen = s.length(); // convert postgres's infinity values to internal infinity magic value if (slen == 8 && s.equals("infinity")) { return new Timestamp(RedshiftStatement.DATE_POSITIVE_INFINITY); } if (slen == 9 && s.equals("-infinity")) { return new Timestamp(RedshiftStatement.DATE_NEGATIVE_INFINITY); } ParsedTimestamp ts = parseBackendTimestamp(s); if(ts.month < 1 || ts.month > 12) { throw new SQLException("Invalid value for month in timestamp : " + ts.month); } if(ts.day < 1 || ts.day > 31) { throw new SQLException("Invalid value for day of month in timestamp : " + ts.day); } if(ts.hour < 0 || ts.hour > 24) { throw new SQLException("Invalid value for hour of month in timestamp : " + ts.hour); } if(ts.minute < 0 || ts.minute > 60) { throw new SQLException("Invalid value for minute in timestamp : " + ts.minute); } if(ts.second < 0 || ts.second > 60) { throw new SQLException("Invalid value for second in timestamp : " + ts.second); } Calendar useCal = ts.tz != null ? ts.tz : setupCalendar(cal); useCal.set(Calendar.ERA, ts.era); useCal.set(Calendar.YEAR, ts.year); useCal.set(Calendar.MONTH, ts.month - 1); useCal.set(Calendar.DAY_OF_MONTH, ts.day); useCal.set(Calendar.HOUR_OF_DAY, ts.hour); useCal.set(Calendar.MINUTE, ts.minute); useCal.set(Calendar.SECOND, ts.second); useCal.set(Calendar.MILLISECOND, 0); Timestamp result; if(ts.tz != null) { result = new RedshiftTimestamp(useCal.getTimeInMillis(), useCal, s); } else result = new Timestamp(useCal.getTimeInMillis()); result.setNanos(ts.nanos); return result; } //JCP! if mvn.project.property.redshift.jdbc.spec >= "JDBC4.2" /** * Parse a string and return a LocalTime representing its value. * * @param s The ISO formated time string to parse. * @return null if s is null or a LocalTime of the parsed string s. * @throws SQLException if there is a problem parsing s. */ public LocalTime toLocalTime(String s) throws SQLException { if (s == null) { return null; } if (s.equals("24:00:00")) { return LocalTime.MAX; } try { return LocalTime.parse(s); } catch (DateTimeParseException nfe) { throw new RedshiftException( GT.tr("Bad value for type timestamp/date/time: {1}", s), RedshiftState.BAD_DATETIME_FORMAT, nfe); } } /** * Parse a string and return a LocalDateTime representing its value. * * @param s The ISO formated date string to parse. * @return null if s is null or a LocalDateTime of the parsed string s. * @throws SQLException if there is a problem parsing s. */ public LocalDateTime toLocalDateTime(String s) throws SQLException { if (s == null) { return null; } int slen = s.length(); // convert postgres's infinity values to internal infinity magic value if (slen == 8 && s.equals("infinity")) { return LocalDateTime.MAX; } if (slen == 9 && s.equals("-infinity")) { return LocalDateTime.MIN; } ParsedTimestamp ts = parseBackendTimestamp(s); // intentionally ignore time zone // 2004-10-19 10:23:54+03:00 is 2004-10-19 10:23:54 locally LocalDateTime result = LocalDateTime.of(ts.year, ts.month, ts.day, ts.hour, ts.minute, ts.second, ts.nanos); if (ts.era == GregorianCalendar.BC) { return result.with(ChronoField.ERA, IsoEra.BCE.getValue()); } else { return result; } } /** * Parse a string and return a LocalDateTime representing its value. * * @param s The ISO formated date string to parse. * @return null if s is null or a LocalDateTime of the parsed string s. * @throws SQLException if there is a problem parsing s. */ public OffsetDateTime toOffsetDateTime(String s) throws SQLException { if (s == null) { return null; } int slen = s.length(); // convert postgres's infinity values to internal infinity magic value if (slen == 8 && s.equals("infinity")) { return OffsetDateTime.MAX; } if (slen == 9 && s.equals("-infinity")) { return OffsetDateTime.MIN; } ParsedTimestamp ts = parseBackendTimestamp(s); Calendar tz = ts.tz; int offsetSeconds; if (tz == null) { offsetSeconds = 0; } else { offsetSeconds = tz.get(Calendar.ZONE_OFFSET) / 1000; } ZoneOffset zoneOffset = ZoneOffset.ofTotalSeconds(offsetSeconds); // Postgres is always UTC OffsetDateTime result = OffsetDateTime.of(ts.year, ts.month, ts.day, ts.hour, ts.minute, ts.second, ts.nanos, zoneOffset) .withOffsetSameInstant(ZoneOffset.UTC); if (ts.era == GregorianCalendar.BC) { return result.with(ChronoField.ERA, IsoEra.BCE.getValue()); } else { return result; } } /** * Returns the offset date time object matching the given bytes with Oid#TIMETZ. * * @param t the time value * @return the matching offset date time */ public OffsetDateTime toOffsetDateTime(Time t) { // hardcode utc because the backend does not provide us the timezone // hardoce UNIX epoch, JDBC requires OffsetDateTime but doesn't describe what date should be used return t.toLocalTime().atDate(LocalDate.of(1970, 1, 1)).atOffset(ZoneOffset.UTC); } /** * Returns the offset date time object matching the given bytes with Oid#TIMESTAMPTZ. * * @param bytes The binary encoded local date time value. * @return The parsed local date time object. * @throws RedshiftException If binary format could not be parsed. */ public OffsetDateTime toOffsetDateTimeBin(byte[] bytes) throws RedshiftException { ParsedBinaryTimestamp parsedTimestamp = this.toProlepticParsedTimestampBin(bytes); if (parsedTimestamp.infinity == Infinity.POSITIVE) { return OffsetDateTime.MAX; } else if (parsedTimestamp.infinity == Infinity.NEGATIVE) { return OffsetDateTime.MIN; } // hardcode utc because the backend does not provide us the timezone // Postgres is always UTC Instant instant = Instant.ofEpochSecond(parsedTimestamp.millis / 1000L, parsedTimestamp.nanos); return OffsetDateTime.ofInstant(instant, ZoneOffset.UTC); } //JCP! endif public synchronized Time toTime(Calendar cal, String s) throws SQLException { // 1) Parse backend string if (s == null) { return null; } ParsedTimestamp ts = parseBackendTimestamp(s); Calendar useCal = ts.tz != null ? ts.tz : setupCalendar(cal); if (ts.tz == null) { // When no time zone provided (e.g. time or timestamp) // We get the year-month-day from the string, then truncate the day to 1970-01-01 // This is used for timestamp -> time conversion // Note: this cannot be merged with "else" branch since // timestamps at which the time flips to/from DST depend on the date // For instance, 2000-03-26 02:00:00 is invalid timestamp in Europe/Moscow time zone // and the valid one is 2000-03-26 03:00:00. That is why we parse full timestamp // then set year to 1970 later useCal.set(Calendar.ERA, ts.era); useCal.set(Calendar.YEAR, ts.year); useCal.set(Calendar.MONTH, ts.month - 1); useCal.set(Calendar.DAY_OF_MONTH, ts.day); } else { // When time zone is given, we just pick the time part and assume date to be 1970-01-01 // this is used for time, timez, and timestamptz parsing useCal.set(Calendar.ERA, GregorianCalendar.AD); useCal.set(Calendar.YEAR, 1970); useCal.set(Calendar.MONTH, Calendar.JANUARY); useCal.set(Calendar.DAY_OF_MONTH, 1); } useCal.set(Calendar.HOUR_OF_DAY, ts.hour); useCal.set(Calendar.MINUTE, ts.minute); useCal.set(Calendar.SECOND, ts.second); useCal.set(Calendar.MILLISECOND, 0); long timeMillis = useCal.getTimeInMillis() + ts.nanos / 1000000; if (ts.tz != null || (ts.year == 1970 && ts.era == GregorianCalendar.AD)) { // time with time zone has proper time zone, so the value can be returned as is return new Time(timeMillis); } // 2) Truncate date part so in given time zone the date would be formatted as 01/01/1970 return convertToTime(timeMillis, useCal.getTimeZone()); } public synchronized Date toDate(Calendar cal, String s) throws SQLException { // 1) Parse backend string Timestamp timestamp = toTimestamp(cal, s); if (timestamp == null) { return null; } // Note: infinite dates are handled in convertToDate // 2) Truncate date part so in given time zone the date would be formatted as 00:00 return convertToDate(timestamp.getTime(), cal == null ? null : cal.getTimeZone()); } private Calendar setupCalendar(Calendar cal) { TimeZone timeZone = cal == null ? null : cal.getTimeZone(); return getSharedCalendar(timeZone); } /** * Get a shared calendar, applying the supplied time zone or the default time zone if null. * * @param timeZone time zone to be set for the calendar * @return The shared calendar. */ public Calendar getSharedCalendar(TimeZone timeZone) { if (timeZone == null) { timeZone = getDefaultTz(); } Calendar tmp = calendarWithUserTz; tmp.setTimeZone(timeZone); return tmp; } /** * Returns true when microsecond part of the time should be increased * when rounding to microseconds * @param nanos nanosecond part of the time * @return true when microsecond part of the time should be increased when rounding to microseconds */ private static boolean nanosExceed499(int nanos) { return nanos % 1000 > 499; } public synchronized String toString(Calendar cal, Timestamp x) { return toString(cal, x, true); } public synchronized String toString(Calendar cal, Timestamp x, boolean withTimeZone) { if (x.getTime() == RedshiftStatement.DATE_POSITIVE_INFINITY) { return "infinity"; } else if (x.getTime() == RedshiftStatement.DATE_NEGATIVE_INFINITY) { return "-infinity"; } cal = setupCalendar(cal); long timeMillis = x.getTime(); // Round to microseconds int nanos = x.getNanos(); if (nanos >= MAX_NANOS_BEFORE_WRAP_ON_ROUND) { nanos = 0; timeMillis++; } else if (nanosExceed499(nanos)) { // PostgreSQL does not support nanosecond resolution yet, and appendTime will just ignore // 0..999 part of the nanoseconds, however we subtract nanos % 1000 to make the value // a little bit saner for debugging reasons nanos += 1000 - nanos % 1000; } cal.setTimeInMillis(timeMillis); sbuf.setLength(0); appendDate(sbuf, cal); sbuf.append(' '); appendTime(sbuf, cal, nanos); if (withTimeZone) { appendTimeZone(sbuf, cal); } appendEra(sbuf, cal); return sbuf.toString(); } public synchronized String toString(Calendar cal, Date x) { return toString(cal, x, true); } public synchronized String toString(Calendar cal, Date x, boolean withTimeZone) { if (x.getTime() == RedshiftStatement.DATE_POSITIVE_INFINITY) { return "infinity"; } else if (x.getTime() == RedshiftStatement.DATE_NEGATIVE_INFINITY) { return "-infinity"; } cal = setupCalendar(cal); cal.setTime(x); sbuf.setLength(0); appendDate(sbuf, cal); appendEra(sbuf, cal); if (withTimeZone) { sbuf.append(' '); appendTimeZone(sbuf, cal); } return sbuf.toString(); } public synchronized String toString(Calendar cal, Time x) { return toString(cal, x, true); } public synchronized String toString(Calendar cal, Time x, boolean withTimeZone) { cal = setupCalendar(cal); cal.setTime(x); sbuf.setLength(0); int nanos; if(x instanceof RedshiftTime) nanos = ((RedshiftTime)x).getNanos(); else nanos = cal.get(Calendar.MILLISECOND) * 1000000; appendTime(sbuf, cal, nanos); // The 'time' parser for <= 7.3 doesn't like timezones. if (withTimeZone) { appendTimeZone(sbuf, cal); } return sbuf.toString(); } private static void appendDate(StringBuilder sb, Calendar cal) { int year = cal.get(Calendar.YEAR); int month = cal.get(Calendar.MONTH) + 1; int day = cal.get(Calendar.DAY_OF_MONTH); appendDate(sb, year, month, day); } private static void appendDate(StringBuilder sb, int year, int month, int day) { // always use at least four digits for the year so very // early years, like 2, don't get misinterpreted // int prevLength = sb.length(); sb.append(year); int leadingZerosForYear = 4 - (sb.length() - prevLength); if (leadingZerosForYear > 0) { sb.insert(prevLength, ZEROS, 0, leadingZerosForYear); } sb.append('-'); sb.append(NUMBERS[month]); sb.append('-'); sb.append(NUMBERS[day]); } private static void appendTime(StringBuilder sb, Calendar cal, int nanos) { int hours = cal.get(Calendar.HOUR_OF_DAY); int minutes = cal.get(Calendar.MINUTE); int seconds = cal.get(Calendar.SECOND); appendTime(sb, hours, minutes, seconds, nanos); } /** * Appends time part to the {@code StringBuilder} in Redshift-compatible format. * The function truncates {@param nanos} to microseconds. The value is expected to be rounded * beforehand. * @param sb destination * @param hours hours * @param minutes minutes * @param seconds seconds * @param nanos nanoseconds */ private static void appendTime(StringBuilder sb, int hours, int minutes, int seconds, int nanos) { sb.append(NUMBERS[hours]); sb.append(':'); sb.append(NUMBERS[minutes]); sb.append(':'); sb.append(NUMBERS[seconds]); // Add nanoseconds. // This won't work for server versions < 7.2 which only want // a two digit fractional second, but we don't need to support 7.1 // anymore and getting the version number here is difficult. // if (nanos < 1000) { return; } sb.append('.'); int len = sb.length(); sb.append(nanos / 1000); // append microseconds int needZeros = 6 - (sb.length() - len); if (needZeros > 0) { sb.insert(len, ZEROS, 0, needZeros); } int end = sb.length() - 1; while (sb.charAt(end) == '0') { sb.deleteCharAt(end); end--; } } private void appendTimeZone(StringBuilder sb, java.util.Calendar cal) { int offset = (cal.get(Calendar.ZONE_OFFSET) + cal.get(Calendar.DST_OFFSET)) / 1000; appendTimeZone(sb, offset); } private void appendTimeZone(StringBuilder sb, int offset) { int absoff = Math.abs(offset); int hours = absoff / 60 / 60; int mins = (absoff - hours * 60 * 60) / 60; int secs = absoff - hours * 60 * 60 - mins * 60; sb.append((offset >= 0) ? "+" : "-"); sb.append(NUMBERS[hours]); if (mins == 0 && secs == 0) { return; } sb.append(':'); sb.append(NUMBERS[mins]); if (secs != 0) { sb.append(':'); sb.append(NUMBERS[secs]); } } private static void appendEra(StringBuilder sb, Calendar cal) { if (cal.get(Calendar.ERA) == GregorianCalendar.BC) { sb.append(" BC"); } } //JCP! if mvn.project.property.redshift.jdbc.spec >= "JDBC4.2" public synchronized String toString(LocalDate localDate) { if (LocalDate.MAX.equals(localDate)) { return "infinity"; } else if (localDate.isBefore(MIN_LOCAL_DATE)) { return "-infinity"; } sbuf.setLength(0); appendDate(sbuf, localDate); appendEra(sbuf, localDate); return sbuf.toString(); } public synchronized String toString(LocalTime localTime) { sbuf.setLength(0); if (localTime.isAfter(MAX_TIME)) { return "24:00:00"; } int nano = localTime.getNano(); if (nanosExceed499(nano)) { // Technically speaking this is not a proper rounding, however // it relies on the fact that appendTime just truncates 000..999 nanosecond part localTime = localTime.plus(ONE_MICROSECOND); } appendTime(sbuf, localTime); return sbuf.toString(); } public synchronized String toString(OffsetDateTime offsetDateTime) { if (offsetDateTime.isAfter(MAX_OFFSET_DATETIME)) { return "infinity"; } else if (offsetDateTime.isBefore(MIN_OFFSET_DATETIME)) { return "-infinity"; } sbuf.setLength(0); int nano = offsetDateTime.getNano(); if (nanosExceed499(nano)) { // Technically speaking this is not a proper rounding, however // it relies on the fact that appendTime just truncates 000..999 nanosecond part offsetDateTime = offsetDateTime.plus(ONE_MICROSECOND); } LocalDateTime localDateTime = offsetDateTime.toLocalDateTime(); LocalDate localDate = localDateTime.toLocalDate(); appendDate(sbuf, localDate); sbuf.append(' '); appendTime(sbuf, localDateTime.toLocalTime()); appendTimeZone(sbuf, offsetDateTime.getOffset()); appendEra(sbuf, localDate); return sbuf.toString(); } /** * Formats {@link LocalDateTime} to be sent to the backend, thus it adds time zone. * Do not use this method in {@link java.sql.ResultSet#getString(int)} * @param localDateTime The local date to format as a String * @return The formatted local date */ public synchronized String toString(LocalDateTime localDateTime) { if (localDateTime.isAfter(MAX_LOCAL_DATETIME)) { return "infinity"; } else if (localDateTime.isBefore(MIN_LOCAL_DATETIME)) { return "-infinity"; } // LocalDateTime is always passed with time zone so backend can decide between timestamp and timestamptz ZonedDateTime zonedDateTime = localDateTime.atZone(getDefaultTz().toZoneId()); return toString(zonedDateTime.toOffsetDateTime()); } private static void appendDate(StringBuilder sb, LocalDate localDate) { int year = localDate.get(ChronoField.YEAR_OF_ERA); int month = localDate.getMonthValue(); int day = localDate.getDayOfMonth(); appendDate(sb, year, month, day); } private static void appendTime(StringBuilder sb, LocalTime localTime) { int hours = localTime.getHour(); int minutes = localTime.getMinute(); int seconds = localTime.getSecond(); int nanos = localTime.getNano(); appendTime(sb, hours, minutes, seconds, nanos); } private void appendTimeZone(StringBuilder sb, ZoneOffset offset) { int offsetSeconds = offset.getTotalSeconds(); appendTimeZone(sb, offsetSeconds); } private static void appendEra(StringBuilder sb, LocalDate localDate) { if (localDate.get(ChronoField.ERA) == IsoEra.BCE.getValue()) { sb.append(" BC"); } } //JCP! endif private static int skipWhitespace(char[] s, int start) { int slen = s.length; for (int i = start; i < slen; i++) { if (!Character.isSpace(s[i])) { return i; } } return slen; } private static int firstNonDigit(char[] s, int start) { int slen = s.length; for (int i = start; i < slen; i++) { if (!Character.isDigit(s[i])) { return i; } } return slen; } private static int number(char[] s, int start, int end) { if (start >= end) { throw new NumberFormatException(); } int n = 0; for (int i = start; i < end; i++) { n = 10 * n + (s[i] - '0'); } return n; } private static char charAt(char[] s, int pos) { if (pos >= 0 && pos < s.length) { return s[pos]; } return '\0'; } /** * Returns the SQL Date object matching the given bytes with {@link Oid#DATE}. * * @param tz The timezone used. * @param bytes The binary encoded date value. * @return The parsed date object. * @throws RedshiftException If binary format could not be parsed. */ public Date toDateBin(TimeZone tz, byte[] bytes) throws RedshiftException { if (bytes.length != 4) { throw new RedshiftException(GT.tr("Unsupported binary encoding of {0}.", "date"), RedshiftState.BAD_DATETIME_FORMAT); } int days = ByteConverter.int4(bytes, 0); if (tz == null) { tz = getDefaultTz(); } long secs = toJavaSecs(days * 86400L); long millis = secs * 1000L; if (millis <= RedshiftStatement.DATE_NEGATIVE_SMALLER_INFINITY) { millis = RedshiftStatement.DATE_NEGATIVE_INFINITY; } else if (millis >= RedshiftStatement.DATE_POSITIVE_SMALLER_INFINITY) { millis = RedshiftStatement.DATE_POSITIVE_INFINITY; } else { // Here be dragons: backend did not provide us the timezone, so we guess the actual point in // time millis = guessTimestamp(millis, tz); } return new Date(millis); } private TimeZone getDefaultTz() { // Fast path to getting the default timezone. if (DEFAULT_TIME_ZONE_FIELD != null) { try { TimeZone defaultTimeZone = (TimeZone) DEFAULT_TIME_ZONE_FIELD.get(null); if (defaultTimeZone == prevDefaultZoneFieldValue) { return defaultTimeZoneCache; } prevDefaultZoneFieldValue = defaultTimeZone; } catch (Exception e) { // If this were to fail, fallback on slow method. } } TimeZone tz = TimeZone.getDefault(); defaultTimeZoneCache = tz; return tz; } public boolean hasFastDefaultTimeZone() { return DEFAULT_TIME_ZONE_FIELD != null; } /** * Returns the SQL Time object matching the given bytes with {@link Oid#TIME} or * {@link Oid#TIMETZ}. * * @param tz The timezone used when received data is {@link Oid#TIME}, ignored if data already * contains {@link Oid#TIMETZ}. * @param bytes The binary encoded time value. * @return The parsed time object. * @throws RedshiftException If binary format could not be parsed. */ public Time toTimeBin(TimeZone tz, byte[] bytes) throws RedshiftException { if ((bytes.length != 8 && bytes.length != 12)) { throw new RedshiftException(GT.tr("Unsupported binary encoding of {0}.", "time"), RedshiftState.BAD_DATETIME_FORMAT); } long millis; int timeOffset; int nanos = 0; Time timeObj; if (usesDouble) { double time = ByteConverter.float8(bytes, 0); millis = (long) (time * 1000); } else { long time = ByteConverter.int8(bytes, 0); millis = time / 1000; if ((time % 1000) > 0) { // There is a microsec fraction. Server sends precision upto Micro only. nanos = (int)(time % 1000000)*1000; } } if (bytes.length == 12) { timeOffset = ByteConverter.int4(bytes, 8); timeOffset *= -1000; millis -= timeOffset; timeObj = new Time(millis); return (nanos > 0) ? new RedshiftTime(timeObj, nanos) : timeObj; } if (tz == null) { tz = getDefaultTz(); } // Here be dragons: backend did not provide us the timezone, so we guess the actual point in // time millis = guessTimestamp(millis, tz); timeObj = convertToTime(millis, tz); // Ensure date part is 1970-01-01 return (nanos > 0) ? new RedshiftTime(timeObj, nanos) : timeObj; } //JCP! if mvn.project.property.redshift.jdbc.spec >= "JDBC4.2" /** * Returns the SQL Time object matching the given bytes with {@link Oid#TIME}. * * @param bytes The binary encoded time value. * @return The parsed time object. * @throws RedshiftException If binary format could not be parsed. */ public LocalTime toLocalTimeBin(byte[] bytes) throws RedshiftException { if (bytes.length != 8) { throw new RedshiftException(GT.tr("Unsupported binary encoding of {0}.", "time"), RedshiftState.BAD_DATETIME_FORMAT); } long micros; if (usesDouble) { double seconds = ByteConverter.float8(bytes, 0); micros = (long) (seconds * 1000000d); } else { micros = ByteConverter.int8(bytes, 0); } return LocalTime.ofNanoOfDay(micros * 1000); } //JCP! endif /** * Returns the SQL Timestamp object matching the given bytes with {@link Oid#TIMESTAMP} or * {@link Oid#TIMESTAMPTZ}. * * @param tz The timezone used when received data is {@link Oid#TIMESTAMP}, ignored if data * already contains {@link Oid#TIMESTAMPTZ}. * @param bytes The binary encoded timestamp value. * @param timestamptz True if the binary is in GMT. * @param cal Calendar to use * @return The parsed timestamp object. * @throws RedshiftException If binary format could not be parsed. */ public Timestamp toTimestampBin(TimeZone tz, byte[] bytes, boolean timestamptz,java.util.Calendar cal) throws RedshiftException { ParsedBinaryTimestamp parsedTimestamp = this.toParsedTimestampBin(tz, bytes, timestamptz); if (parsedTimestamp.infinity == Infinity.POSITIVE) { return new Timestamp(RedshiftStatement.DATE_POSITIVE_INFINITY); } else if (parsedTimestamp.infinity == Infinity.NEGATIVE) { return new Timestamp(RedshiftStatement.DATE_NEGATIVE_INFINITY); } Timestamp ts; if(timestamptz) { ts = new RedshiftTimestamp(parsedTimestamp.millis, cal); } else { ts = new Timestamp(parsedTimestamp.millis); } ts.setNanos(parsedTimestamp.nanos); return ts; } /** * Returns the SQL Timestamp object matching the given bytes with {@link Oid#TIMESTAMP} or * {@link Oid#TIMESTAMPTZ}. * * @param tz The timezone used when received data is {@link Oid#TIMESTAMP}, ignored if data * already contains {@link Oid#TIMESTAMPTZ}. * @param bytes The binary encoded timestamp value of ABSTIME. ABSTIME has 4 bytes. * @param timestamptz True if the binary is in GMT. * @param cal Calendar to use * @return The parsed timestamp object. * @throws RedshiftException If binary format could not be parsed. */ public Timestamp toTimestampAbsTimeBin(TimeZone tz, byte[] bytes, boolean timestamptz,java.util.Calendar cal) throws RedshiftException { ParsedBinaryTimestamp parsedTimestamp = this.toParsedTimestampAbsTimeBin(tz, bytes, timestamptz); if (parsedTimestamp.infinity == Infinity.POSITIVE) { return new Timestamp(RedshiftStatement.DATE_POSITIVE_INFINITY); } else if (parsedTimestamp.infinity == Infinity.NEGATIVE) { return new Timestamp(RedshiftStatement.DATE_NEGATIVE_INFINITY); } Timestamp ts; if(timestamptz) { ts = new RedshiftTimestamp(parsedTimestamp.millis, cal); } else { ts = new Timestamp(parsedTimestamp.millis); } ts.setNanos(parsedTimestamp.nanos); return ts; } private ParsedBinaryTimestamp toParsedTimestampBinPlain(byte[] bytes) throws RedshiftException { if (bytes.length != 8) { throw new RedshiftException(GT.tr("Unsupported binary encoding of {0}.", "timestamp"), RedshiftState.BAD_DATETIME_FORMAT); } long secs; int nanos; if (usesDouble) { double time = ByteConverter.float8(bytes, 0); if (time == Double.POSITIVE_INFINITY) { ParsedBinaryTimestamp ts = new ParsedBinaryTimestamp(); ts.infinity = Infinity.POSITIVE; return ts; } else if (time == Double.NEGATIVE_INFINITY) { ParsedBinaryTimestamp ts = new ParsedBinaryTimestamp(); ts.infinity = Infinity.NEGATIVE; return ts; } secs = (long) time; nanos = (int) ((time - secs) * 1000000); } else { long time = ByteConverter.int8(bytes, 0); // compatibility with text based receiving, not strictly necessary // and can actually be confusing because there are timestamps // that are larger than infinite if (time == Long.MAX_VALUE) { ParsedBinaryTimestamp ts = new ParsedBinaryTimestamp(); ts.infinity = Infinity.POSITIVE; return ts; } else if (time == Long.MIN_VALUE) { ParsedBinaryTimestamp ts = new ParsedBinaryTimestamp(); ts.infinity = Infinity.NEGATIVE; return ts; } secs = time / 1000000; nanos = (int) (time - secs * 1000000); } if (nanos < 0) { secs--; nanos += 1000000; } nanos *= 1000; long millis = secs * 1000L; ParsedBinaryTimestamp ts = new ParsedBinaryTimestamp(); ts.millis = millis; ts.nanos = nanos; return ts; } // Restricted ABSTIME is 4bytes. private ParsedBinaryTimestamp toParsedTimestampBinAbsTimePlain(byte[] bytes) throws RedshiftException { if (bytes.length != 4) { throw new RedshiftException(GT.tr("Unsupported binary encoding of {0}.", "abstime"), RedshiftState.BAD_DATETIME_FORMAT); } long secs; int nanos; if (usesDouble) { double time = ByteConverter.float4(bytes, 0); if (time == Double.POSITIVE_INFINITY) { ParsedBinaryTimestamp ts = new ParsedBinaryTimestamp(); ts.infinity = Infinity.POSITIVE; return ts; } else if (time == Double.NEGATIVE_INFINITY) { ParsedBinaryTimestamp ts = new ParsedBinaryTimestamp(); ts.infinity = Infinity.NEGATIVE; return ts; } secs = (long) time; nanos = (int) ((time - secs) * 1000000); } else { long time = ByteConverter.int4(bytes, 0); // Time in secs time *= 1000000; // Time in micro secs // compatibility with text based receiving, not strictly necessary // and can actually be confusing because there are timestamps // that are larger than infinite if (time == Long.MAX_VALUE) { ParsedBinaryTimestamp ts = new ParsedBinaryTimestamp(); ts.infinity = Infinity.POSITIVE; return ts; } else if (time == Long.MIN_VALUE) { ParsedBinaryTimestamp ts = new ParsedBinaryTimestamp(); ts.infinity = Infinity.NEGATIVE; return ts; } secs = time / 1000000; nanos = (int) (time - secs * 1000000); } if (nanos < 0) { secs--; nanos += 1000000; } nanos *= 1000; long millis = secs * 1000L; ParsedBinaryTimestamp ts = new ParsedBinaryTimestamp(); ts.millis = millis; ts.nanos = nanos; return ts; } private ParsedBinaryTimestamp toParsedTimestampBin(TimeZone tz, byte[] bytes, boolean timestamptz) throws RedshiftException { ParsedBinaryTimestamp ts = toParsedTimestampBinPlain(bytes); if (ts.infinity != null) { return ts; } long secs = ts.millis / 1000L; secs = toJavaSecs(secs); long millis = secs * 1000L; if (!timestamptz) { // Here be dragons: backend did not provide us the timezone, so we guess the actual point in // time millis = guessTimestamp(millis, tz); } ts.millis = millis; return ts; } private ParsedBinaryTimestamp toParsedTimestampAbsTimeBin(TimeZone tz, byte[] bytes, boolean timestamptz) throws RedshiftException { ParsedBinaryTimestamp ts = toParsedTimestampBinAbsTimePlain(bytes); if (ts.infinity != null) { return ts; } long secs = ts.millis / 1000L; // secs = toJavaSecs(secs); long millis = secs * 1000L; if (!timestamptz) { // Here be dragons: backend did not provide us the timezone, so we guess the actual point in // time millis = guessTimestamp(millis, tz); } ts.millis = millis; return ts; } private ParsedBinaryTimestamp toProlepticParsedTimestampBin(byte[] bytes) throws RedshiftException { ParsedBinaryTimestamp ts = toParsedTimestampBinPlain(bytes); if (ts.infinity != null) { return ts; } long secs = ts.millis / 1000L; // postgres epoc to java epoc secs += 946684800L; long millis = secs * 1000L; ts.millis = millis; return ts; } //JCP! if mvn.project.property.redshift.jdbc.spec >= "JDBC4.2" /** * Returns the local date time object matching the given bytes with {@link Oid#TIMESTAMP} or * {@link Oid#TIMESTAMPTZ}. * @param bytes The binary encoded local date time value. * * @return The parsed local date time object. * @throws RedshiftException If binary format could not be parsed. */ public LocalDateTime toLocalDateTimeBin(byte[] bytes) throws RedshiftException { ParsedBinaryTimestamp parsedTimestamp = this.toProlepticParsedTimestampBin(bytes); if (parsedTimestamp.infinity == Infinity.POSITIVE) { return LocalDateTime.MAX; } else if (parsedTimestamp.infinity == Infinity.NEGATIVE) { return LocalDateTime.MIN; } // hardcode utc because the backend does not provide us the timezone // Postgres is always UTC return LocalDateTime.ofEpochSecond(parsedTimestamp.millis / 1000L, parsedTimestamp.nanos, ZoneOffset.UTC); } //JCP! endif /** * <p>Given a UTC timestamp {@code millis} finds another point in time that is rendered in given time * zone {@code tz} exactly as "millis in UTC".</p> * * <p>For instance, given 7 Jan 16:00 UTC and tz=GMT+02:00 it returns 7 Jan 14:00 UTC == 7 Jan 16:00 * GMT+02:00 Note that is not trivial for timestamps near DST change. For such cases, we rely on * {@link Calendar} to figure out the proper timestamp.</p> * * @param millis source timestamp * @param tz desired time zone * @return timestamp that would be rendered in {@code tz} like {@code millis} in UTC */ private long guessTimestamp(long millis, TimeZone tz) { if (tz == null) { // If client did not provide us with time zone, we use system default time zone tz = getDefaultTz(); } // The story here: // Backend provided us with something like '2015-10-04 13:40' and it did NOT provide us with a // time zone. // On top of that, user asked us to treat the timestamp as if it were in GMT+02:00. // // The code below creates such a timestamp that is rendered as '2015-10-04 13:40 GMT+02:00' // In other words, its UTC value should be 11:40 UTC == 13:40 GMT+02:00. // It is not sufficient to just subtract offset as you might cross DST change as you subtract. // // For instance, on 2000-03-26 02:00:00 Moscow went to DST, thus local time became 03:00:00 // Suppose we deal with 2000-03-26 02:00:01 // If you subtract offset from the timestamp, the time will be "a hour behind" since // "just a couple of hours ago the OFFSET was different" // // To make a long story short: we have UTC timestamp that looks like "2000-03-26 02:00:01" when // rendered in UTC tz. // We want to know another timestamp that will look like "2000-03-26 02:00:01" in Europe/Moscow // time zone. if (isSimpleTimeZone(tz.getID())) { // For well-known non-DST time zones, just subtract offset return millis - tz.getRawOffset(); } // For all the other time zones, enjoy debugging Calendar API // Here we do a straight-forward implementation that splits original timestamp into pieces and // composes it back. // Note: cal.setTimeZone alone is not sufficient as it would alter hour (it will try to keep the // same time instant value) Calendar cal = calendarWithUserTz; cal.setTimeZone(utcTz); cal.setTimeInMillis(millis); int era = cal.get(Calendar.ERA); int year = cal.get(Calendar.YEAR); int month = cal.get(Calendar.MONTH); int day = cal.get(Calendar.DAY_OF_MONTH); int hour = cal.get(Calendar.HOUR_OF_DAY); int min = cal.get(Calendar.MINUTE); int sec = cal.get(Calendar.SECOND); int ms = cal.get(Calendar.MILLISECOND); cal.setTimeZone(tz); cal.set(Calendar.ERA, era); cal.set(Calendar.YEAR, year); cal.set(Calendar.MONTH, month); cal.set(Calendar.DAY_OF_MONTH, day); cal.set(Calendar.HOUR_OF_DAY, hour); cal.set(Calendar.MINUTE, min); cal.set(Calendar.SECOND, sec); cal.set(Calendar.MILLISECOND, ms); return cal.getTimeInMillis(); } private static boolean isSimpleTimeZone(String id) { return id.startsWith("GMT") || id.startsWith("UTC"); } /** * Extracts the date part from a timestamp. * * @param millis The timestamp from which to extract the date. * @param tz The time zone of the date. * @return The extracted date. */ public Date convertToDate(long millis, TimeZone tz) { // no adjustments for the inifity hack values if (millis <= RedshiftStatement.DATE_NEGATIVE_INFINITY || millis >= RedshiftStatement.DATE_POSITIVE_INFINITY) { return new Date(millis); } if (tz == null) { tz = getDefaultTz(); } if (isSimpleTimeZone(tz.getID())) { // Truncate to 00:00 of the day. // Suppose the input date is 7 Jan 15:40 GMT+02:00 (that is 13:40 UTC) // We want it to become 7 Jan 00:00 GMT+02:00 // 1) Make sure millis becomes 15:40 in UTC, so add offset int offset = tz.getRawOffset(); millis += offset; // 2) Truncate hours, minutes, etc. Day is always 86400 seconds, no matter what leap seconds // are millis = floorDiv(millis, ONEDAY) * ONEDAY; // 2) Now millis is 7 Jan 00:00 UTC, however we need that in GMT+02:00, so subtract some // offset millis -= offset; // Now we have brand-new 7 Jan 00:00 GMT+02:00 return new Date(millis); } Calendar cal = calendarWithUserTz; cal.setTimeZone(tz); cal.setTimeInMillis(millis); cal.set(Calendar.HOUR_OF_DAY, 0); cal.set(Calendar.MINUTE, 0); cal.set(Calendar.SECOND, 0); cal.set(Calendar.MILLISECOND, 0); return new Date(cal.getTimeInMillis()); } /** * Extracts the time part from a timestamp. This method ensures the date part of output timestamp * looks like 1970-01-01 in given timezone. * * @param millis The timestamp from which to extract the time. * @param tz timezone to use. * @return The extracted time. */ public Time convertToTime(long millis, TimeZone tz) { if (tz == null) { tz = getDefaultTz(); } if (isSimpleTimeZone(tz.getID())) { // Leave just time part of the day. // Suppose the input date is 2015 7 Jan 15:40 GMT+02:00 (that is 13:40 UTC) // We want it to become 1970 1 Jan 15:40 GMT+02:00 // 1) Make sure millis becomes 15:40 in UTC, so add offset int offset = tz.getRawOffset(); millis += offset; // 2) Truncate year, month, day. Day is always 86400 seconds, no matter what leap seconds are millis = floorMod(millis, ONEDAY); // 2) Now millis is 1970 1 Jan 15:40 UTC, however we need that in GMT+02:00, so subtract some // offset millis -= offset; // Now we have brand-new 1970 1 Jan 15:40 GMT+02:00 return new Time(millis); } Calendar cal = calendarWithUserTz; cal.setTimeZone(tz); cal.setTimeInMillis(millis); cal.set(Calendar.ERA, GregorianCalendar.AD); cal.set(Calendar.YEAR, 1970); cal.set(Calendar.MONTH, 0); cal.set(Calendar.DAY_OF_MONTH, 1); return new Time(cal.getTimeInMillis()); } /** * Returns the given time value as String matching what the current Redshift server would send * in text mode. * * @param time time value * @param withTimeZone whether timezone should be added * @return given time value as String */ public String timeToString(java.util.Date time, boolean withTimeZone) { Calendar cal = null; if (withTimeZone) { cal = calendarWithUserTz; cal.setTimeZone(timeZoneProvider.get()); } if (time instanceof Timestamp) { return toString(cal, (Timestamp) time, withTimeZone); } if (time instanceof Time) { return toString(cal, (Time) time, withTimeZone); } return toString(cal, (Date) time, withTimeZone); } /** * Converts the given Redshift seconds to java seconds. Reverse engineered by inserting varying * dates to Redshift and tuning the formula until the java dates matched. See {@link #toPgSecs} * for the reverse operation. * * @param secs Redshift seconds. * @return Java seconds. */ private static long toJavaSecs(long secs) { // postgres epoc to java epoc secs += 946684800L; // Julian/Gregorian calendar cutoff point if (secs < -12219292800L) { // October 4, 1582 -> October 15, 1582 secs += 86400 * 10; if (secs < -14825808000L) { // 1500-02-28 -> 1500-03-01 int extraLeaps = (int) ((secs + 14825808000L) / 3155760000L); extraLeaps--; extraLeaps -= extraLeaps / 4; secs += extraLeaps * 86400L; } } return secs; } /** * Converts the given java seconds to Redshift seconds. See {@link #toJavaSecs} for the reverse * operation. The conversion is valid for any year 100 BC onwards. * * @param secs Redshift seconds. * @return Java seconds. */ private static long toPgSecs(long secs) { // java epoc to postgres epoc secs -= 946684800L; // Julian/Greagorian calendar cutoff point if (secs < -13165977600L) { // October 15, 1582 -> October 4, 1582 secs -= 86400 * 10; if (secs < -15773356800L) { // 1500-03-01 -> 1500-02-28 int years = (int) ((secs + 15773356800L) / -3155823050L); years++; years -= years / 4; secs += years * 86400L; } } return secs; } /** * Converts the SQL Date to binary representation for {@link Oid#DATE}. * * @param tz The timezone used. * @param bytes The binary encoded date value. * @param value value * @throws RedshiftException If binary format could not be parsed. */ public void toBinDate(TimeZone tz, byte[] bytes, Date value) throws RedshiftException { long millis = value.getTime(); if (tz == null) { tz = getDefaultTz(); } // It "getOffset" is UNTESTED // See com.amazon.redshift.jdbc.AbstractJdbc2Statement.setDate(int, java.sql.Date, // java.util.Calendar) // The problem is we typically do not know for sure what is the exact required date/timestamp // type // Thus pgjdbc sticks to text transfer. millis += tz.getOffset(millis); long secs = toPgSecs(millis / 1000); ByteConverter.int4(bytes, 0, (int) (secs / 86400)); } /** * Converts the SQL Timestamp to binary representation for {@link Oid#TIMESTAMP}. * * @param tz The timezone used. * @param bytes The binary encoded Timestamp value. * @param value value * @throws RedshiftException If binary format could not be parsed. */ public void toBinTimestamp(TimeZone tz, byte[] bytes, Timestamp value) throws RedshiftException { long millis = value.getTime(); if (tz == null) { tz = getDefaultTz(); } // It "getOffset" is UNTESTED // See com.amazon.redshift.jdbc.AbstractJdbc2Statement.setDate(int, java.sql.Date, // java.util.Calendar) // The problem is we typically do not know for sure what is the exact required date/timestamp // type // Thus pgjdbc sticks to text transfer. // millis += tz.getOffset(millis); long secs = toPgSecs(millis / 1000); ByteConverter.int8(bytes, 0, (long) (secs * 1000000)); } /** * Converts backend's TimeZone parameter to java format. * Notable difference: backend's gmt-3 is GMT+03 in Java. * * @param timeZone time zone to use * @return java TimeZone */ public static TimeZone parseBackendTimeZone(String timeZone) { if (timeZone.startsWith("GMT")) { TimeZone tz = GMT_ZONES.get(timeZone); if (tz != null) { return tz; } } return TimeZone.getTimeZone(timeZone); } private static long floorDiv(long x, long y) { long r = x / y; // if the signs are different and modulo not zero, round down if ((x ^ y) < 0 && (r * y != x)) { r--; } return r; } private static long floorMod(long x, long y) { return x - floorDiv(x, y) * y; } }
8,536
0
Create_ds/amazon-redshift-jdbc-driver/src/main/java/com/amazon/redshift
Create_ds/amazon-redshift-jdbc-driver/src/main/java/com/amazon/redshift/jdbc/UUIDArrayAssistant.java
/* * Copyright (c) 2004, PostgreSQL Global Development Group * See the LICENSE file in the project root for more information. */ package com.amazon.redshift.jdbc; import com.amazon.redshift.jdbc2.ArrayAssistant; import com.amazon.redshift.util.ByteConverter; import java.util.UUID; public class UUIDArrayAssistant implements ArrayAssistant { @Override public Class<?> baseType() { return UUID.class; } @Override public Object buildElement(byte[] bytes, int pos, int len) { return new UUID(ByteConverter.int8(bytes, pos + 0), ByteConverter.int8(bytes, pos + 8)); } @Override public Object buildElement(String literal) { return UUID.fromString(literal); } }
8,537
0
Create_ds/amazon-redshift-jdbc-driver/src/main/java/com/amazon/redshift
Create_ds/amazon-redshift-jdbc-driver/src/main/java/com/amazon/redshift/jdbc/RedshiftClob.java
/* * Copyright (c) 2004, PostgreSQL Global Development Group * See the LICENSE file in the project root for more information. */ package com.amazon.redshift.jdbc; import java.io.InputStream; import java.io.InputStreamReader; import java.io.Reader; import java.nio.charset.Charset; import java.sql.Clob; import java.sql.SQLException; public class RedshiftClob extends AbstractBlobClob implements java.sql.Clob { public RedshiftClob(com.amazon.redshift.core.BaseConnection conn, long oid) throws java.sql.SQLException { super(conn, oid); } public synchronized Reader getCharacterStream(long pos, long length) throws SQLException { checkFreed(); throw com.amazon.redshift.Driver.notImplemented(this.getClass(), "getCharacterStream(long, long)"); } public synchronized int setString(long pos, String str) throws SQLException { checkFreed(); throw com.amazon.redshift.Driver.notImplemented(this.getClass(), "setString(long,str)"); } public synchronized int setString(long pos, String str, int offset, int len) throws SQLException { checkFreed(); throw com.amazon.redshift.Driver.notImplemented(this.getClass(), "setString(long,String,int,int)"); } public synchronized java.io.OutputStream setAsciiStream(long pos) throws SQLException { checkFreed(); throw com.amazon.redshift.Driver.notImplemented(this.getClass(), "setAsciiStream(long)"); } public synchronized java.io.Writer setCharacterStream(long pos) throws SQLException { checkFreed(); throw com.amazon.redshift.Driver.notImplemented(this.getClass(), "setCharacteStream(long)"); } public synchronized InputStream getAsciiStream() throws SQLException { return getBinaryStream(); } public synchronized Reader getCharacterStream() throws SQLException { Charset connectionCharset = Charset.forName(conn.getEncoding().name()); return new InputStreamReader(getBinaryStream(), connectionCharset); } public synchronized String getSubString(long i, int j) throws SQLException { assertPosition(i, j); getLo(false).seek((int) i - 1); return new String(getLo(false).read(j)); } /** * For now, this is not implemented. */ public synchronized long position(String pattern, long start) throws SQLException { checkFreed(); throw com.amazon.redshift.Driver.notImplemented(this.getClass(), "position(String,long)"); } /** * This should be simply passing the byte value of the pattern Blob. */ public synchronized long position(Clob pattern, long start) throws SQLException { checkFreed(); throw com.amazon.redshift.Driver.notImplemented(this.getClass(), "position(Clob,start)"); } }
8,538
0
Create_ds/amazon-redshift-jdbc-driver/src/main/java/com/amazon/redshift
Create_ds/amazon-redshift-jdbc-driver/src/main/java/com/amazon/redshift/jdbc/RedshiftBlob.java
/* * Copyright (c) 2004, PostgreSQL Global Development Group * See the LICENSE file in the project root for more information. */ package com.amazon.redshift.jdbc; import com.amazon.redshift.largeobject.LargeObject; import java.sql.SQLException; public class RedshiftBlob extends AbstractBlobClob implements java.sql.Blob { public RedshiftBlob(com.amazon.redshift.core.BaseConnection conn, long oid) throws SQLException { super(conn, oid); } public synchronized java.io.InputStream getBinaryStream(long pos, long length) throws SQLException { checkFreed(); LargeObject subLO = getLo(false).copy(); addSubLO(subLO); if (pos > Integer.MAX_VALUE) { subLO.seek64(pos - 1, LargeObject.SEEK_SET); } else { subLO.seek((int) pos - 1, LargeObject.SEEK_SET); } return subLO.getInputStream(length); } public synchronized int setBytes(long pos, byte[] bytes) throws SQLException { return setBytes(pos, bytes, 0, bytes.length); } public synchronized int setBytes(long pos, byte[] bytes, int offset, int len) throws SQLException { assertPosition(pos); getLo(true).seek((int) (pos - 1)); getLo(true).write(bytes, offset, len); return len; } }
8,539
0
Create_ds/amazon-redshift-jdbc-driver/src/main/java/com/amazon/redshift
Create_ds/amazon-redshift-jdbc-driver/src/main/java/com/amazon/redshift/jdbc/StatementCancelState.java
/* * Copyright (c) 2004, PostgreSQL Global Development Group * See the LICENSE file in the project root for more information. */ package com.amazon.redshift.jdbc; /** * Represents {@link RedshiftStatementImpl#cancel()} state. */ enum StatementCancelState { IDLE, IN_QUERY, CANCELING, CANCELLED }
8,540
0
Create_ds/amazon-redshift-jdbc-driver/src/main/java/com/amazon/redshift
Create_ds/amazon-redshift-jdbc-driver/src/main/java/com/amazon/redshift/jdbc/FieldMetadata.java
/* * Copyright (c) 2016, PostgreSQL Global Development Group * See the LICENSE file in the project root for more information. */ package com.amazon.redshift.jdbc; import com.amazon.redshift.util.CanEstimateSize; /** * This is an internal class to hold field metadata info like table name, column name, etc. * This class is not meant to be used outside of pgjdbc. */ public class FieldMetadata implements CanEstimateSize { public static class Key { final int tableOid; final int positionInTable; Key(int tableOid, int positionInTable) { this.positionInTable = positionInTable; this.tableOid = tableOid; } @Override public boolean equals(Object o) { if (this == o) { return true; } if (o == null || getClass() != o.getClass()) { return false; } Key key = (Key) o; if (tableOid != key.tableOid) { return false; } return positionInTable == key.positionInTable; } @Override public int hashCode() { int result = tableOid; result = 31 * result + positionInTable; return result; } @Override public String toString() { return "Key{" + "tableOid=" + tableOid + ", positionInTable=" + positionInTable + '}'; } } final String columnName; final String tableName; final String schemaName; final int nullable; final boolean autoIncrement; final String catalogName; final boolean readOnly; final boolean searchable; final boolean caseSensitive; public FieldMetadata(String columnName) { this(columnName, "", "", RedshiftResultSetMetaDataImpl.columnNullableUnknown, false); } public FieldMetadata(String columnName, String tableName, String schemaName, int nullable, boolean autoIncrement) { this(columnName, tableName, schemaName, nullable, autoIncrement, "", false, true, false); } public FieldMetadata(String columnName, String tableName, String schemaName, int nullable, boolean autoIncrement, String catalogName, boolean readOnly, boolean searchable, boolean caseSensitive) { this.columnName = columnName; this.tableName = tableName; this.schemaName = schemaName; this.nullable = nullable; this.autoIncrement = autoIncrement; this.catalogName = catalogName; this.readOnly = readOnly; this.searchable = searchable; this.caseSensitive = caseSensitive; } public long getSize() { return columnName.length() * 2 + tableName.length() * 2 + schemaName.length() * 2 + 4L + 1L + catalogName.length() * 2 + 1L + 1L; } @Override public String toString() { return "FieldMetadata{" + "columnName='" + columnName + '\'' + ", tableName='" + tableName + '\'' + ", schemaName='" + schemaName + '\'' + ", nullable=" + nullable + ", autoIncrement=" + autoIncrement + ", catalogName='" + catalogName + '\'' + ", readOnly=" + readOnly + ", searchable=" + searchable + ", caseSensitive=" + caseSensitive + '}'; } }
8,541
0
Create_ds/amazon-redshift-jdbc-driver/src/main/java/com/amazon/redshift
Create_ds/amazon-redshift-jdbc-driver/src/main/java/com/amazon/redshift/jdbc/EscapedFunctions.java
/* * Copyright (c) 2004, PostgreSQL Global Development Group * See the LICENSE file in the project root for more information. */ package com.amazon.redshift.jdbc; import com.amazon.redshift.util.GT; import com.amazon.redshift.util.RedshiftException; import com.amazon.redshift.util.RedshiftState; import java.lang.reflect.Method; import java.sql.SQLException; import java.util.HashMap; import java.util.List; import java.util.Locale; import java.util.Map; /** * This class stores supported escaped function. * * @author Xavier Poinsard * @deprecated see {@link EscapedFunctions2} */ @Deprecated public class EscapedFunctions { // numeric functions names public static final String ABS = "abs"; public static final String ACOS = "acos"; public static final String ASIN = "asin"; public static final String ATAN = "atan"; public static final String ATAN2 = "atan2"; public static final String CEILING = "ceiling"; public static final String COS = "cos"; public static final String COT = "cot"; public static final String DEGREES = "degrees"; public static final String EXP = "exp"; public static final String FLOOR = "floor"; public static final String LOG = "log"; public static final String LOG10 = "log10"; public static final String MOD = "mod"; public static final String PI = "pi"; public static final String POWER = "power"; public static final String RADIANS = "radians"; public static final String RANDOM = "random"; public static final String ROUND = "round"; public static final String SIGN = "sign"; public static final String SIN = "sin"; public static final String SQRT = "sqrt"; public static final String TAN = "tan"; public static final String TRUNCATE = "truncate"; // string function names public static final String ASCII = "ascii"; public static final String CHAR = "char"; public static final String CHAR_LENGTH = "char_length"; public static final String CHARACTER_LENGTH = "character_length"; public static final String CONCAT = "concat"; public static final String INSERT = "insert"; // change arguments order public static final String LCASE = "lcase"; public static final String LEFT = "left"; public static final String LENGTH = "length"; public static final String LOCATE = "locate"; // the 3 args version duplicate args public static final String LTRIM = "ltrim"; public static final String OCTET_LENGTH = "octet_length"; public static final String POSITION = "position"; public static final String REPEAT = "repeat"; public static final String REPLACE = "replace"; public static final String RIGHT = "right"; // duplicate args public static final String RTRIM = "rtrim"; public static final String SPACE = "space"; public static final String SUBSTRING = "substring"; public static final String UCASE = "ucase"; // soundex is implemented on the server side by // the contrib/fuzzystrmatch module. We provide a translation // for this in the driver, but since we don't want to bother with run // time detection of this module's installation we don't report this // method as supported in DatabaseMetaData. // difference is currently unsupported entirely. // date time function names public static final String CURDATE = "curdate"; public static final String CURTIME = "curtime"; public static final String DAYNAME = "dayname"; public static final String DAYOFMONTH = "dayofmonth"; public static final String DAYOFWEEK = "dayofweek"; public static final String DAYOFYEAR = "dayofyear"; public static final String HOUR = "hour"; public static final String MINUTE = "minute"; public static final String MONTH = "month"; public static final String MONTHNAME = "monthname"; public static final String NOW = "now"; public static final String QUARTER = "quarter"; public static final String SECOND = "second"; public static final String WEEK = "week"; public static final String YEAR = "year"; // for timestampadd and timestampdiff the fractional part of second is not supported // by the backend // timestampdiff is very partially supported public static final String TIMESTAMPADD = "timestampadd"; public static final String TIMESTAMPDIFF = "timestampdiff"; // constants for timestampadd and timestampdiff public static final String SQL_TSI_ROOT = "SQL_TSI_"; public static final String SQL_TSI_DAY = "DAY"; public static final String SQL_TSI_FRAC_SECOND = "FRAC_SECOND"; public static final String SQL_TSI_HOUR = "HOUR"; public static final String SQL_TSI_MINUTE = "MINUTE"; public static final String SQL_TSI_MONTH = "MONTH"; public static final String SQL_TSI_QUARTER = "QUARTER"; public static final String SQL_TSI_SECOND = "SECOND"; public static final String SQL_TSI_WEEK = "WEEK"; public static final String SQL_TSI_YEAR = "YEAR"; // system functions public static final String DATABASE = "database"; public static final String IFNULL = "ifnull"; public static final String USER = "user"; /** * storage for functions implementations. */ private static Map<String, Method> functionMap = createFunctionMap(); private static Map<String, Method> createFunctionMap() { Method[] arrayMeths = EscapedFunctions.class.getDeclaredMethods(); Map<String, Method> functionMap = new HashMap<String, Method>(arrayMeths.length * 2); for (Method meth : arrayMeths) { if (meth.getName().startsWith("sql")) { functionMap.put(meth.getName().toLowerCase(Locale.US), meth); } } return functionMap; } /** * get Method object implementing the given function. * * @param functionName name of the searched function * @return a Method object or null if not found */ public static Method getFunction(String functionName) { return functionMap.get("sql" + functionName.toLowerCase(Locale.US)); } // ** numeric functions translations ** /** * ceiling to ceil translation. * * @param parsedArgs arguments * @return sql call * @throws SQLException if something wrong happens */ public static String sqlceiling(List<?> parsedArgs) throws SQLException { return singleArgumentFunctionCall("ceil(", "ceiling", parsedArgs); } /** * log to ln translation. * * @param parsedArgs arguments * @return sql call * @throws SQLException if something wrong happens */ public static String sqllog(List<?> parsedArgs) throws SQLException { return singleArgumentFunctionCall("ln(", "log", parsedArgs); } /** * log10 to log translation. * * @param parsedArgs arguments * @return sql call * @throws SQLException if something wrong happens */ public static String sqllog10(List<?> parsedArgs) throws SQLException { return singleArgumentFunctionCall("log(", "log10", parsedArgs); } /** * power to pow translation. * * @param parsedArgs arguments * @return sql call * @throws SQLException if something wrong happens */ public static String sqlpower(List<?> parsedArgs) throws SQLException { return twoArgumentsFunctionCall("pow(", "power", parsedArgs); } /** * truncate to trunc translation. * * @param parsedArgs arguments * @return sql call * @throws SQLException if something wrong happens */ public static String sqltruncate(List<?> parsedArgs) throws SQLException { return twoArgumentsFunctionCall("trunc(", "truncate", parsedArgs); } // ** string functions translations ** /** * char to chr translation. * * @param parsedArgs arguments * @return sql call * @throws SQLException if something wrong happens */ public static String sqlchar(List<?> parsedArgs) throws SQLException { return singleArgumentFunctionCall("chr(", "char", parsedArgs); } /** * concat translation. * * @param parsedArgs arguments * @return sql call */ public static String sqlconcat(List<?> parsedArgs) { StringBuilder buf = new StringBuilder(); buf.append('('); for (int iArg = 0; iArg < parsedArgs.size(); iArg++) { buf.append(parsedArgs.get(iArg)); if (iArg != (parsedArgs.size() - 1)) { buf.append(" || "); } } return buf.append(')').toString(); } /** * insert to overlay translation. * * @param parsedArgs arguments * @return sql call * @throws SQLException if something wrong happens */ public static String sqlinsert(List<?> parsedArgs) throws SQLException { if (parsedArgs.size() != 4) { throw new RedshiftException(GT.tr("{0} function takes four and only four argument.", "insert"), RedshiftState.SYNTAX_ERROR); } StringBuilder buf = new StringBuilder(); buf.append("overlay("); buf.append(parsedArgs.get(0)).append(" placing ").append(parsedArgs.get(3)); buf.append(" from ").append(parsedArgs.get(1)).append(" for ").append(parsedArgs.get(2)); return buf.append(')').toString(); } /** * lcase to lower translation. * * @param parsedArgs arguments * @return sql call * @throws SQLException if something wrong happens */ public static String sqllcase(List<?> parsedArgs) throws SQLException { return singleArgumentFunctionCall("lower(", "lcase", parsedArgs); } /** * left to substring translation. * * @param parsedArgs arguments * @return sql call * @throws SQLException if something wrong happens */ public static String sqlleft(List<?> parsedArgs) throws SQLException { if (parsedArgs.size() != 2) { throw new RedshiftException(GT.tr("{0} function takes two and only two arguments.", "left"), RedshiftState.SYNTAX_ERROR); } StringBuilder buf = new StringBuilder(); buf.append("substring("); buf.append(parsedArgs.get(0)).append(" for ").append(parsedArgs.get(1)); return buf.append(')').toString(); } /** * length translation. * * @param parsedArgs arguments * @return sql call * @throws SQLException if something wrong happens */ public static String sqllength(List<?> parsedArgs) throws SQLException { if (parsedArgs.size() != 1) { throw new RedshiftException(GT.tr("{0} function takes one and only one argument.", "length"), RedshiftState.SYNTAX_ERROR); } StringBuilder buf = new StringBuilder(); buf.append("length(trim(trailing from "); buf.append(parsedArgs.get(0)); return buf.append("))").toString(); } /** * locate translation. * * @param parsedArgs arguments * @return sql call * @throws SQLException if something wrong happens */ public static String sqllocate(List<?> parsedArgs) throws SQLException { if (parsedArgs.size() == 2) { return "position(" + parsedArgs.get(0) + " in " + parsedArgs.get(1) + ")"; } else if (parsedArgs.size() == 3) { String tmp = "position(" + parsedArgs.get(0) + " in substring(" + parsedArgs.get(1) + " from " + parsedArgs.get(2) + "))"; return "(" + parsedArgs.get(2) + "*sign(" + tmp + ")+" + tmp + ")"; } else { throw new RedshiftException(GT.tr("{0} function takes two or three arguments.", "locate"), RedshiftState.SYNTAX_ERROR); } } /** * ltrim translation. * * @param parsedArgs arguments * @return sql call * @throws SQLException if something wrong happens */ public static String sqlltrim(List<?> parsedArgs) throws SQLException { return singleArgumentFunctionCall("trim(leading from ", "ltrim", parsedArgs); } /** * right to substring translation. * * @param parsedArgs arguments * @return sql call * @throws SQLException if something wrong happens */ public static String sqlright(List<?> parsedArgs) throws SQLException { if (parsedArgs.size() != 2) { throw new RedshiftException(GT.tr("{0} function takes two and only two arguments.", "right"), RedshiftState.SYNTAX_ERROR); } StringBuilder buf = new StringBuilder(); buf.append("substring("); buf.append(parsedArgs.get(0)) .append(" from (length(") .append(parsedArgs.get(0)) .append(")+1-") .append(parsedArgs.get(1)); return buf.append("))").toString(); } /** * rtrim translation. * * @param parsedArgs arguments * @return sql call * @throws SQLException if something wrong happens */ public static String sqlrtrim(List<?> parsedArgs) throws SQLException { return singleArgumentFunctionCall("trim(trailing from ", "rtrim", parsedArgs); } /** * space translation. * * @param parsedArgs arguments * @return sql call * @throws SQLException if something wrong happens */ public static String sqlspace(List<?> parsedArgs) throws SQLException { return singleArgumentFunctionCall("repeat(' ',", "space", parsedArgs); } /** * substring to substr translation. * * @param parsedArgs arguments * @return sql call * @throws SQLException if something wrong happens */ public static String sqlsubstring(List<?> parsedArgs) throws SQLException { if (parsedArgs.size() == 2) { return "substr(" + parsedArgs.get(0) + "," + parsedArgs.get(1) + ")"; } else if (parsedArgs.size() == 3) { return "substr(" + parsedArgs.get(0) + "," + parsedArgs.get(1) + "," + parsedArgs.get(2) + ")"; } else { throw new RedshiftException(GT.tr("{0} function takes two or three arguments.", "substring"), RedshiftState.SYNTAX_ERROR); } } /** * ucase to upper translation. * * @param parsedArgs arguments * @return sql call * @throws SQLException if something wrong happens */ public static String sqlucase(List<?> parsedArgs) throws SQLException { return singleArgumentFunctionCall("upper(", "ucase", parsedArgs); } /** * curdate to current_date translation. * * @param parsedArgs arguments * @return sql call * @throws SQLException if something wrong happens */ public static String sqlcurdate(List<?> parsedArgs) throws SQLException { if (!parsedArgs.isEmpty()) { throw new RedshiftException(GT.tr("{0} function doesn''t take any argument.", "curdate"), RedshiftState.SYNTAX_ERROR); } return "current_date"; } /** * curtime to current_time translation. * * @param parsedArgs arguments * @return sql call * @throws SQLException if something wrong happens */ public static String sqlcurtime(List<?> parsedArgs) throws SQLException { if (!parsedArgs.isEmpty()) { throw new RedshiftException(GT.tr("{0} function doesn''t take any argument.", "curtime"), RedshiftState.SYNTAX_ERROR); } return "current_time"; } /** * dayname translation. * * @param parsedArgs arguments * @return sql call * @throws SQLException if something wrong happens */ public static String sqldayname(List<?> parsedArgs) throws SQLException { if (parsedArgs.size() != 1) { throw new RedshiftException(GT.tr("{0} function takes one and only one argument.", "dayname"), RedshiftState.SYNTAX_ERROR); } return "to_char(" + parsedArgs.get(0) + ",'Day')"; } /** * dayofmonth translation. * * @param parsedArgs arguments * @return sql call * @throws SQLException if something wrong happens */ public static String sqldayofmonth(List<?> parsedArgs) throws SQLException { return singleArgumentFunctionCall("extract(day from ", "dayofmonth", parsedArgs); } /** * dayofweek translation adding 1 to Redshift function since we expect values from 1 to 7. * * @param parsedArgs arguments * @return sql call * @throws SQLException if something wrong happens */ public static String sqldayofweek(List<?> parsedArgs) throws SQLException { if (parsedArgs.size() != 1) { throw new RedshiftException(GT.tr("{0} function takes one and only one argument.", "dayofweek"), RedshiftState.SYNTAX_ERROR); } return "extract(dow from " + parsedArgs.get(0) + ")+1"; } /** * dayofyear translation. * * @param parsedArgs arguments * @return sql call * @throws SQLException if something wrong happens */ public static String sqldayofyear(List<?> parsedArgs) throws SQLException { return singleArgumentFunctionCall("extract(doy from ", "dayofyear", parsedArgs); } /** * hour translation. * * @param parsedArgs arguments * @return sql call * @throws SQLException if something wrong happens */ public static String sqlhour(List<?> parsedArgs) throws SQLException { return singleArgumentFunctionCall("extract(hour from ", "hour", parsedArgs); } /** * minute translation. * * @param parsedArgs arguments * @return sql call * @throws SQLException if something wrong happens */ public static String sqlminute(List<?> parsedArgs) throws SQLException { return singleArgumentFunctionCall("extract(minute from ", "minute", parsedArgs); } /** * month translation. * * @param parsedArgs arguments * @return sql call * @throws SQLException if something wrong happens */ public static String sqlmonth(List<?> parsedArgs) throws SQLException { return singleArgumentFunctionCall("extract(month from ", "month", parsedArgs); } /** * monthname translation. * * @param parsedArgs arguments * @return sql call * @throws SQLException if something wrong happens */ public static String sqlmonthname(List<?> parsedArgs) throws SQLException { if (parsedArgs.size() != 1) { throw new RedshiftException(GT.tr("{0} function takes one and only one argument.", "monthname"), RedshiftState.SYNTAX_ERROR); } return "to_char(" + parsedArgs.get(0) + ",'Month')"; } /** * quarter translation. * * @param parsedArgs arguments * @return sql call * @throws SQLException if something wrong happens */ public static String sqlquarter(List<?> parsedArgs) throws SQLException { return singleArgumentFunctionCall("extract(quarter from ", "quarter", parsedArgs); } /** * second translation. * * @param parsedArgs arguments * @return sql call * @throws SQLException if something wrong happens */ public static String sqlsecond(List<?> parsedArgs) throws SQLException { return singleArgumentFunctionCall("extract(second from ", "second", parsedArgs); } /** * week translation. * * @param parsedArgs arguments * @return sql call * @throws SQLException if something wrong happens */ public static String sqlweek(List<?> parsedArgs) throws SQLException { return singleArgumentFunctionCall("extract(week from ", "week", parsedArgs); } /** * year translation. * * @param parsedArgs arguments * @return sql call * @throws SQLException if something wrong happens */ public static String sqlyear(List<?> parsedArgs) throws SQLException { return singleArgumentFunctionCall("extract(year from ", "year", parsedArgs); } /** * time stamp add. * * @param parsedArgs arguments * @return sql call * @throws SQLException if something wrong happens */ public static String sqltimestampadd(List<?> parsedArgs) throws SQLException { if (parsedArgs.size() != 3) { throw new RedshiftException( GT.tr("{0} function takes three and only three arguments.", "timestampadd"), RedshiftState.SYNTAX_ERROR); } String interval = EscapedFunctions.constantToInterval(parsedArgs.get(0).toString(), parsedArgs.get(1).toString()); StringBuilder buf = new StringBuilder(); buf.append("(").append(interval).append("+"); buf.append(parsedArgs.get(2)).append(")"); return buf.toString(); } private static String constantToInterval(String type, String value) throws SQLException { if (!type.startsWith(SQL_TSI_ROOT)) { throw new RedshiftException(GT.tr("Interval {0} not yet implemented", type), RedshiftState.SYNTAX_ERROR); } String shortType = type.substring(SQL_TSI_ROOT.length()); if (SQL_TSI_DAY.equalsIgnoreCase(shortType)) { return "CAST(" + value + " || ' day' as interval)"; } else if (SQL_TSI_SECOND.equalsIgnoreCase(shortType)) { return "CAST(" + value + " || ' second' as interval)"; } else if (SQL_TSI_HOUR.equalsIgnoreCase(shortType)) { return "CAST(" + value + " || ' hour' as interval)"; } else if (SQL_TSI_MINUTE.equalsIgnoreCase(shortType)) { return "CAST(" + value + " || ' minute' as interval)"; } else if (SQL_TSI_MONTH.equalsIgnoreCase(shortType)) { return "CAST(" + value + " || ' month' as interval)"; } else if (SQL_TSI_QUARTER.equalsIgnoreCase(shortType)) { return "CAST((" + value + "::int * 3) || ' month' as interval)"; } else if (SQL_TSI_WEEK.equalsIgnoreCase(shortType)) { return "CAST(" + value + " || ' week' as interval)"; } else if (SQL_TSI_YEAR.equalsIgnoreCase(shortType)) { return "CAST(" + value + " || ' year' as interval)"; } else if (SQL_TSI_FRAC_SECOND.equalsIgnoreCase(shortType)) { throw new RedshiftException(GT.tr("Interval {0} not yet implemented", "SQL_TSI_FRAC_SECOND"), RedshiftState.SYNTAX_ERROR); } else { throw new RedshiftException(GT.tr("Interval {0} not yet implemented", type), RedshiftState.SYNTAX_ERROR); } } /** * time stamp diff. * * @param parsedArgs arguments * @return sql call * @throws SQLException if something wrong happens */ public static String sqltimestampdiff(List<?> parsedArgs) throws SQLException { if (parsedArgs.size() != 3) { throw new RedshiftException( GT.tr("{0} function takes three and only three arguments.", "timestampdiff"), RedshiftState.SYNTAX_ERROR); } String datePart = EscapedFunctions.constantToDatePart(parsedArgs.get(0).toString()); StringBuilder buf = new StringBuilder(); buf.append("extract( ") .append(datePart) .append(" from (") .append(parsedArgs.get(2)) .append("-") .append(parsedArgs.get(1)) .append("))"); return buf.toString(); } private static String constantToDatePart(String type) throws SQLException { if (!type.startsWith(SQL_TSI_ROOT)) { throw new RedshiftException(GT.tr("Interval {0} not yet implemented", type), RedshiftState.SYNTAX_ERROR); } String shortType = type.substring(SQL_TSI_ROOT.length()); if (SQL_TSI_DAY.equalsIgnoreCase(shortType)) { return "day"; } else if (SQL_TSI_SECOND.equalsIgnoreCase(shortType)) { return "second"; } else if (SQL_TSI_HOUR.equalsIgnoreCase(shortType)) { return "hour"; } else if (SQL_TSI_MINUTE.equalsIgnoreCase(shortType)) { return "minute"; } else if (SQL_TSI_FRAC_SECOND.equalsIgnoreCase(shortType)) { throw new RedshiftException(GT.tr("Interval {0} not yet implemented", "SQL_TSI_FRAC_SECOND"), RedshiftState.SYNTAX_ERROR); } else { throw new RedshiftException(GT.tr("Interval {0} not yet implemented", type), RedshiftState.SYNTAX_ERROR); } // See http://archives.postgresql.org/pgsql-jdbc/2006-03/msg00096.php /* * else if (SQL_TSI_MONTH.equalsIgnoreCase(shortType)) return "month"; else if * (SQL_TSI_QUARTER.equalsIgnoreCase(shortType)) return "quarter"; else if * (SQL_TSI_WEEK.equalsIgnoreCase(shortType)) return "week"; else if * (SQL_TSI_YEAR.equalsIgnoreCase(shortType)) return "year"; */ } /** * database translation. * * @param parsedArgs arguments * @return sql call * @throws SQLException if something wrong happens */ public static String sqldatabase(List<?> parsedArgs) throws SQLException { if (!parsedArgs.isEmpty()) { throw new RedshiftException(GT.tr("{0} function doesn''t take any argument.", "database"), RedshiftState.SYNTAX_ERROR); } return "current_database()"; } /** * ifnull translation. * * @param parsedArgs arguments * @return sql call * @throws SQLException if something wrong happens */ public static String sqlifnull(List<?> parsedArgs) throws SQLException { return twoArgumentsFunctionCall("coalesce(", "ifnull", parsedArgs); } /** * user translation. * * @param parsedArgs arguments * @return sql call * @throws SQLException if something wrong happens */ public static String sqluser(List<?> parsedArgs) throws SQLException { if (!parsedArgs.isEmpty()) { throw new RedshiftException(GT.tr("{0} function doesn''t take any argument.", "user"), RedshiftState.SYNTAX_ERROR); } return "user"; } private static String singleArgumentFunctionCall(String call, String functionName, List<?> parsedArgs) throws RedshiftException { if (parsedArgs.size() != 1) { throw new RedshiftException(GT.tr("{0} function takes one and only one argument.", functionName), RedshiftState.SYNTAX_ERROR); } StringBuilder buf = new StringBuilder(); buf.append(call); buf.append(parsedArgs.get(0)); return buf.append(')').toString(); } private static String twoArgumentsFunctionCall(String call, String functionName, List<?> parsedArgs) throws RedshiftException { if (parsedArgs.size() != 2) { throw new RedshiftException(GT.tr("{0} function takes two and only two arguments.", functionName), RedshiftState.SYNTAX_ERROR); } StringBuilder buf = new StringBuilder(); buf.append(call); buf.append(parsedArgs.get(0)).append(',').append(parsedArgs.get(1)); return buf.append(')').toString(); } }
8,542
0
Create_ds/amazon-redshift-jdbc-driver/src/main/java/com/amazon/redshift
Create_ds/amazon-redshift-jdbc-driver/src/main/java/com/amazon/redshift/jdbc/RedshiftWarningWrapper.java
/* * Copyright (c) 2017, PostgreSQL Global Development Group * See the LICENSE file in the project root for more information. */ package com.amazon.redshift.jdbc; import java.sql.SQLWarning; /** * Wrapper class for SQLWarnings that provides an optimisation to add * new warnings to the tail of the SQLWarning singly linked list, avoiding Θ(n) insertion time * of calling #setNextWarning on the head. By encapsulating this into a single object it allows * users(ie PgStatement) to atomically set and clear the warning chain. */ class RedshiftWarningWrapper { private final SQLWarning firstWarning; private SQLWarning lastWarning; RedshiftWarningWrapper(SQLWarning warning) { firstWarning = warning; lastWarning = warning; } void addWarning(SQLWarning sqlWarning) { lastWarning.setNextWarning(sqlWarning); lastWarning = sqlWarning; } SQLWarning getFirstWarning() { return firstWarning; } }
8,543
0
Create_ds/amazon-redshift-jdbc-driver/src/main/java/com/amazon/redshift
Create_ds/amazon-redshift-jdbc-driver/src/main/java/com/amazon/redshift/jdbc/SslMode.java
/* * Copyright (c) 2018, PostgreSQL Global Development Group * See the LICENSE file in the project root for more information. */ package com.amazon.redshift.jdbc; import com.amazon.redshift.RedshiftProperty; import com.amazon.redshift.util.GT; import com.amazon.redshift.util.RedshiftException; import com.amazon.redshift.util.RedshiftState; import java.util.Properties; public enum SslMode { /** * Do not use encrypted connections. */ DISABLE("disable"), /** * Start with non-encrypted connection, then try encrypted one. */ ALLOW("allow"), /** * Start with encrypted connection, fallback to non-encrypted (default). */ PREFER("prefer"), /** * Ensure connection is encrypted. */ REQUIRE("require"), /** * Ensure connection is encrypted, and client trusts server certificate. */ VERIFY_CA("verify-ca"), /** * Ensure connection is encrypted, client trusts server certificate, and server hostname matches * the one listed in the server certificate. */ VERIFY_FULL("verify-full"), ; public static final SslMode[] VALUES = values(); public final String value; SslMode(String value) { this.value = value; } public boolean requireEncryption() { return this.compareTo(REQUIRE) >= 0; } public boolean verifyCertificate() { return this == VERIFY_CA || this == VERIFY_FULL; } public boolean verifyPeerName() { return this == VERIFY_FULL; } public static SslMode of(Properties info) throws RedshiftException { String sslmodeProp = RedshiftProperty.SSL_MODE.get(info); String authMechProp = RedshiftProperty.AUTH_MECH.get(info); String sslmode = (sslmodeProp != null) ? sslmodeProp : authMechProp; // If sslmode is not set, fallback to ssl parameter if (sslmode == null) { if (RedshiftProperty.SSL.getBoolean(info) || "".equals(RedshiftProperty.SSL.get(info))) { return VERIFY_CA; // VERIFY_FULL; } String iamAuthStr = RedshiftProperty.IAM_AUTH.get(info); Boolean iamAuth = (iamAuthStr == null) ? false : Boolean.parseBoolean(iamAuthStr); return (iamAuth) ? PREFER : DISABLE; } for (SslMode sslMode : VALUES) { if (sslMode.value.equalsIgnoreCase(sslmode)) { return sslMode; } } throw new RedshiftException(GT.tr("Invalid sslmode value: {0}", sslmode), RedshiftState.CONNECTION_UNABLE_TO_CONNECT); } }
8,544
0
Create_ds/amazon-redshift-jdbc-driver/src/main/java/com/amazon/redshift
Create_ds/amazon-redshift-jdbc-driver/src/main/java/com/amazon/redshift/jdbc/DataSource.java
package com.amazon.redshift.jdbc; import com.amazon.redshift.ds.RedshiftConnectionPoolDataSource; /** * Backward compatible DataSource class. * * @author iggarish * */ public class DataSource extends RedshiftConnectionPoolDataSource { }
8,545
0
Create_ds/amazon-redshift-jdbc-driver/src/main/java/com/amazon/redshift
Create_ds/amazon-redshift-jdbc-driver/src/main/java/com/amazon/redshift/largeobject/LargeObjectManager.java
/* * Copyright (c) 2003, PostgreSQL Global Development Group * See the LICENSE file in the project root for more information. */ package com.amazon.redshift.largeobject; import com.amazon.redshift.core.BaseConnection; import com.amazon.redshift.fastpath.Fastpath; import com.amazon.redshift.fastpath.FastpathArg; import com.amazon.redshift.logger.LogLevel; import com.amazon.redshift.util.GT; import com.amazon.redshift.util.RedshiftException; import com.amazon.redshift.util.RedshiftState; import java.sql.ResultSet; import java.sql.SQLException; import java.sql.Statement; /** * This class implements the large object interface to com.amazon.redshift. * * <p>It provides methods that allow client code to create, open and delete large objects from the * database. When opening an object, an instance of com.amazon.redshift.largeobject.LargeObject is * returned, and its methods then allow access to the object.</p> * * <p>This class can only be created by {@link BaseConnection}</p> * * <p>To get access to this class, use the following segment of code:</p> * * <pre> * import com.amazon.redshift.largeobject.*; * * Connection conn; * LargeObjectManager lobj; * * ... code that opens a connection ... * * lobj = ((com.amazon.redshift.RedshiftConnection)myconn).getLargeObjectAPI(); * </pre> * * <p>Normally, client code would use the getAsciiStream, getBinaryStream, or getUnicodeStream methods * in ResultSet, or setAsciiStream, setBinaryStream, or setUnicodeStream methods in * PreparedStatement to access Large Objects.</p> * * <p>However, sometimes lower level access to Large Objects are required, that are not supported by * the JDBC specification.</p> * * <p>Refer to com.amazon.redshift.largeobject.LargeObject on how to manipulate the contents of a Large * Object.</p> * * @see java.sql.ResultSet#getAsciiStream * @see java.sql.ResultSet#getBinaryStream * @see java.sql.ResultSet#getUnicodeStream * @see java.sql.PreparedStatement#setAsciiStream * @see java.sql.PreparedStatement#setBinaryStream * @see java.sql.PreparedStatement#setUnicodeStream */ public class LargeObjectManager { // the fastpath api for this connection private Fastpath fp; private BaseConnection conn; /** * This mode indicates we want to write to an object. */ public static final int WRITE = 0x00020000; /** * This mode indicates we want to read an object. */ public static final int READ = 0x00040000; /** * This mode is the default. It indicates we want read and write access to a large object. */ public static final int READWRITE = READ | WRITE; /** * This prevents us being created by mere mortals. */ private LargeObjectManager() { } /** * <p>Constructs the LargeObject API.</p> * * <p><b>Important Notice</b> <br> * This method should only be called by {@link BaseConnection}</p> * * <p>There should only be one LargeObjectManager per Connection. The {@link BaseConnection} class * keeps track of the various extension API's and it's advised you use those to gain access, and * not going direct.</p> * * @param conn connection * @throws SQLException if something wrong happens */ public LargeObjectManager(BaseConnection conn) throws SQLException { this.conn = conn; // We need Fastpath to do anything this.fp = conn.getFastpathAPI(); // Now get the function oid's for the api // // This is an example of Fastpath.addFunctions(); // String sql; if (conn.getMetaData().supportsSchemasInTableDefinitions()) { sql = "SELECT p.proname,p.oid " + " FROM pg_catalog.pg_proc p, pg_catalog.pg_namespace n " + " WHERE p.pronamespace=n.oid AND n.nspname='pg_catalog' AND ("; } else { sql = "SELECT proname,oid FROM pg_proc WHERE "; } sql += " proname = 'lo_open'" + " or proname = 'lo_close'" + " or proname = 'lo_creat'" + " or proname = 'lo_unlink'" + " or proname = 'lo_lseek'" + " or proname = 'lo_lseek64'" + " or proname = 'lo_tell'" + " or proname = 'lo_tell64'" + " or proname = 'loread'" + " or proname = 'lowrite'" + " or proname = 'lo_truncate'" + " or proname = 'lo_truncate64'"; if (conn.getMetaData().supportsSchemasInTableDefinitions()) { sql += ")"; } Statement stmt = conn.createStatement(); ResultSet res = stmt.executeQuery(sql); fp.addFunctions(res); res.close(); stmt.close(); conn.getLogger().log(LogLevel.DEBUG, "Large Object initialised"); } /** * This opens an existing large object, based on its OID. This method assumes that READ and WRITE * access is required (the default). * * @param oid of large object * @return LargeObject instance providing access to the object * @throws SQLException on error * @deprecated As of 8.3, replaced by {@link #open(long)} */ @Deprecated public LargeObject open(int oid) throws SQLException { return open((long) oid, false); } /** * This opens an existing large object, same as previous method, but commits the transaction on * close if asked. This is useful when the LOB is returned to a caller which won't take care of * transactions by itself. * * @param oid of large object * @param commitOnClose commit the transaction when this LOB will be closed * @return LargeObject instance providing access to the object * @throws SQLException on error */ public LargeObject open(int oid, boolean commitOnClose) throws SQLException { return open((long) oid, commitOnClose); } /** * This opens an existing large object, based on its OID. This method assumes that READ and WRITE * access is required (the default). * * @param oid of large object * @return LargeObject instance providing access to the object * @throws SQLException on error */ public LargeObject open(long oid) throws SQLException { return open(oid, READWRITE, false); } /** * This opens an existing large object, same as previous method, but commits the transaction on * close if asked. * * @param oid of large object * @param commitOnClose commit the transaction when this LOB will be closed * @return LargeObject instance providing access to the object * @throws SQLException on error */ public LargeObject open(long oid, boolean commitOnClose) throws SQLException { return open(oid, READWRITE, commitOnClose); } /** * This opens an existing large object, based on its OID. * * @param oid of large object * @param mode mode of open * @return LargeObject instance providing access to the object * @throws SQLException on error * @deprecated As of 8.3, replaced by {@link #open(long, int)} */ @Deprecated public LargeObject open(int oid, int mode) throws SQLException { return open((long) oid, mode, false); } /** * This opens an existing large object, same as previous method, but commits the transaction on * close if asked. * * @param oid of large object * @param mode mode of open * @param commitOnClose commit the transaction when this LOB will be closed * @return LargeObject instance providing access to the object * @throws SQLException on error */ public LargeObject open(int oid, int mode, boolean commitOnClose) throws SQLException { return open((long) oid, mode, commitOnClose); } /** * This opens an existing large object, based on its OID. * * @param oid of large object * @param mode mode of open * @return LargeObject instance providing access to the object * @throws SQLException on error */ public LargeObject open(long oid, int mode) throws SQLException { return open(oid, mode, false); } /** * This opens an existing large object, based on its OID. * * @param oid of large object * @param mode mode of open * @param commitOnClose commit the transaction when this LOB will be closed * @return LargeObject instance providing access to the object * @throws SQLException on error */ public LargeObject open(long oid, int mode, boolean commitOnClose) throws SQLException { if (conn.getAutoCommit()) { throw new RedshiftException(GT.tr("Large Objects may not be used in auto-commit mode."), RedshiftState.NO_ACTIVE_SQL_TRANSACTION); } return new LargeObject(fp, oid, mode, conn, commitOnClose); } /** * <p>This creates a large object, returning its OID.</p> * * <p>It defaults to READWRITE for the new object's attributes.</p> * * @return oid of new object * @throws SQLException on error * @deprecated As of 8.3, replaced by {@link #createLO()} */ @Deprecated public int create() throws SQLException { return create(READWRITE); } /** * <p>This creates a large object, returning its OID.</p> * * <p>It defaults to READWRITE for the new object's attributes.</p> * * @return oid of new object * @throws SQLException if something wrong happens */ public long createLO() throws SQLException { return createLO(READWRITE); } /** * This creates a large object, returning its OID. * * @param mode a bitmask describing different attributes of the new object * @return oid of new object * @throws SQLException on error */ public long createLO(int mode) throws SQLException { if (conn.getAutoCommit()) { throw new RedshiftException(GT.tr("Large Objects may not be used in auto-commit mode."), RedshiftState.NO_ACTIVE_SQL_TRANSACTION); } FastpathArg[] args = new FastpathArg[1]; args[0] = new FastpathArg(mode); return fp.getOID("lo_creat", args); } /** * This creates a large object, returning its OID. * * @param mode a bitmask describing different attributes of the new object * @return oid of new object * @throws SQLException on error * @deprecated As of 8.3, replaced by {@link #createLO(int)} */ @Deprecated public int create(int mode) throws SQLException { long oid = createLO(mode); return (int) oid; } /** * This deletes a large object. * * @param oid describing object to delete * @throws SQLException on error */ public void delete(long oid) throws SQLException { FastpathArg[] args = new FastpathArg[1]; args[0] = Fastpath.createOIDArg(oid); fp.fastpath("lo_unlink", args); } /** * <p>This deletes a large object.</p> * * <p>It is identical to the delete method, and is supplied as the C API uses unlink.</p> * * @param oid describing object to delete * @throws SQLException on error * @deprecated As of 8.3, replaced by {@link #unlink(long)} */ @Deprecated public void unlink(int oid) throws SQLException { delete((long) oid); } /** * <p>This deletes a large object.</p> * * <p>It is identical to the delete method, and is supplied as the C API uses unlink.</p> * * @param oid describing object to delete * @throws SQLException on error */ public void unlink(long oid) throws SQLException { delete(oid); } /** * This deletes a large object. * * @param oid describing object to delete * @throws SQLException on error * @deprecated As of 8.3, replaced by {@link #delete(long)} */ @Deprecated public void delete(int oid) throws SQLException { delete((long) oid); } }
8,546
0
Create_ds/amazon-redshift-jdbc-driver/src/main/java/com/amazon/redshift
Create_ds/amazon-redshift-jdbc-driver/src/main/java/com/amazon/redshift/largeobject/LargeObject.java
/* * Copyright (c) 2003, PostgreSQL Global Development Group * See the LICENSE file in the project root for more information. */ package com.amazon.redshift.largeobject; import com.amazon.redshift.core.BaseConnection; import com.amazon.redshift.fastpath.Fastpath; import com.amazon.redshift.fastpath.FastpathArg; import com.amazon.redshift.util.RedshiftException; import com.amazon.redshift.util.RedshiftState; import java.io.IOException; import java.io.InputStream; import java.io.OutputStream; import java.sql.SQLException; /** * <p>This class provides the basic methods required to run the interface, plus a pair of methods that * provide InputStream and OutputStream classes for this object.</p> * * <p>Normally, client code would use the getAsciiStream, getBinaryStream, or getUnicodeStream methods * in ResultSet, or setAsciiStream, setBinaryStream, or setUnicodeStream methods in * PreparedStatement to access Large Objects.</p> * * <p>However, sometimes lower level access to Large Objects are required, that are not supported by * the JDBC specification.</p> * * <p>Refer to com.amazon.redshift.largeobject.LargeObjectManager on how to gain access to a Large Object, * or how to create one.</p> * * @see com.amazon.redshift.largeobject.LargeObjectManager * @see java.sql.ResultSet#getAsciiStream * @see java.sql.ResultSet#getBinaryStream * @see java.sql.ResultSet#getUnicodeStream * @see java.sql.PreparedStatement#setAsciiStream * @see java.sql.PreparedStatement#setBinaryStream * @see java.sql.PreparedStatement#setUnicodeStream */ public class LargeObject //JCP! if mvn.project.property.redshift.jdbc.spec >= "JDBC4.1" implements AutoCloseable //JCP! endif /* hi, checkstyle */ { /** * Indicates a seek from the beginning of a file. */ public static final int SEEK_SET = 0; /** * Indicates a seek from the current position. */ public static final int SEEK_CUR = 1; /** * Indicates a seek from the end of a file. */ public static final int SEEK_END = 2; private final Fastpath fp; // Fastpath API to use private final long oid; // OID of this object private final int mode; // read/write mode of this object private final int fd; // the descriptor of the open large object private BlobOutputStream os; // The current output stream private boolean closed = false; // true when we are closed private BaseConnection conn; // Only initialized when open a LOB with CommitOnClose private boolean commitOnClose; // Only initialized when open a LOB with CommitOnClose /** * <p>This opens a large object.</p> * * <p>If the object does not exist, then an SQLException is thrown.</p> * * @param fp FastPath API for the connection to use * @param oid of the Large Object to open * @param mode Mode of opening the large object * @param conn the connection to the database used to access this LOB * @param commitOnClose commit the transaction when this LOB will be closed (defined in * LargeObjectManager) * @throws SQLException if a database-access error occurs. * @see com.amazon.redshift.largeobject.LargeObjectManager */ protected LargeObject(Fastpath fp, long oid, int mode, BaseConnection conn, boolean commitOnClose) throws SQLException { this.fp = fp; this.oid = oid; this.mode = mode; if (commitOnClose) { this.commitOnClose = true; this.conn = conn; } else { this.commitOnClose = false; } FastpathArg[] args = new FastpathArg[2]; args[0] = Fastpath.createOIDArg(oid); args[1] = new FastpathArg(mode); this.fd = fp.getInteger("lo_open", args); } /** * <p>This opens a large object.</p> * * <p>If the object does not exist, then an SQLException is thrown.</p> * * @param fp FastPath API for the connection to use * @param oid of the Large Object to open * @param mode Mode of opening the large object (defined in LargeObjectManager) * @throws SQLException if a database-access error occurs. * @see com.amazon.redshift.largeobject.LargeObjectManager */ protected LargeObject(Fastpath fp, long oid, int mode) throws SQLException { this(fp, oid, mode, null, false); } public LargeObject copy() throws SQLException { return new LargeObject(fp, oid, mode); } /* * Release large object resources during garbage cleanup. * * This code used to call close() however that was problematic because the scope of the fd is a * transaction, thus if commit or rollback was called before garbage collection ran then the call * to close would error out with an invalid large object handle. So this method now does nothing * and lets the server handle cleanup when it ends the transaction. * * protected void finalize() throws SQLException { } */ /** * @return the OID of this LargeObject * @deprecated As of 8.3, replaced by {@link #getLongOID()} */ @Deprecated public int getOID() { return (int) oid; } /** * @return the OID of this LargeObject */ public long getLongOID() { return oid; } /** * This method closes the object. You must not call methods in this object after this is called. * * @throws SQLException if a database-access error occurs. */ public void close() throws SQLException { if (!closed) { // flush any open output streams if (os != null) { try { // we can't call os.close() otherwise we go into an infinite loop! os.flush(); } catch (IOException ioe) { throw new RedshiftException("Exception flushing output stream", RedshiftState.DATA_ERROR, ioe); } finally { os = null; } } // finally close FastpathArg[] args = new FastpathArg[1]; args[0] = new FastpathArg(fd); fp.fastpath("lo_close", args); // true here as we dont care!! closed = true; if (this.commitOnClose) { this.conn.commit(); } } } /** * Reads some data from the object, and return as a byte[] array. * * @param len number of bytes to read * @return byte[] array containing data read * @throws SQLException if a database-access error occurs. */ public byte[] read(int len) throws SQLException { // This is the original method, where the entire block (len bytes) // is retrieved in one go. FastpathArg[] args = new FastpathArg[2]; args[0] = new FastpathArg(fd); args[1] = new FastpathArg(len); return fp.getData("loread", args); } /** * Reads some data from the object into an existing array. * * @param buf destination array * @param off offset within array * @param len number of bytes to read * @return the number of bytes actually read * @throws SQLException if a database-access error occurs. */ public int read(byte[] buf, int off, int len) throws SQLException { byte[] b = read(len); if (b.length < len) { len = b.length; } System.arraycopy(b, 0, buf, off, len); return len; } /** * Writes an array to the object. * * @param buf array to write * @throws SQLException if a database-access error occurs. */ public void write(byte[] buf) throws SQLException { FastpathArg[] args = new FastpathArg[2]; args[0] = new FastpathArg(fd); args[1] = new FastpathArg(buf); fp.fastpath("lowrite", args); } /** * Writes some data from an array to the object. * * @param buf destination array * @param off offset within array * @param len number of bytes to write * @throws SQLException if a database-access error occurs. */ public void write(byte[] buf, int off, int len) throws SQLException { FastpathArg[] args = new FastpathArg[2]; args[0] = new FastpathArg(fd); args[1] = new FastpathArg(buf, off, len); fp.fastpath("lowrite", args); } /** * <p>Sets the current position within the object.</p> * * <p>This is similar to the fseek() call in the standard C library. It allows you to have random * access to the large object.</p> * * @param pos position within object * @param ref Either SEEK_SET, SEEK_CUR or SEEK_END * @throws SQLException if a database-access error occurs. */ public void seek(int pos, int ref) throws SQLException { FastpathArg[] args = new FastpathArg[3]; args[0] = new FastpathArg(fd); args[1] = new FastpathArg(pos); args[2] = new FastpathArg(ref); fp.fastpath("lo_lseek", args); } /** * Sets the current position within the object using 64-bit value (9.3+). * * @param pos position within object * @param ref Either SEEK_SET, SEEK_CUR or SEEK_END * @throws SQLException if a database-access error occurs. */ public void seek64(long pos, int ref) throws SQLException { FastpathArg[] args = new FastpathArg[3]; args[0] = new FastpathArg(fd); args[1] = new FastpathArg(pos); args[2] = new FastpathArg(ref); fp.fastpath("lo_lseek64", args); } /** * <p>Sets the current position within the object.</p> * * <p>This is similar to the fseek() call in the standard C library. It allows you to have random * access to the large object.</p> * * @param pos position within object from beginning * @throws SQLException if a database-access error occurs. */ public void seek(int pos) throws SQLException { seek(pos, SEEK_SET); } /** * @return the current position within the object * @throws SQLException if a database-access error occurs. */ public int tell() throws SQLException { FastpathArg[] args = new FastpathArg[1]; args[0] = new FastpathArg(fd); return fp.getInteger("lo_tell", args); } /** * @return the current position within the object * @throws SQLException if a database-access error occurs. */ public long tell64() throws SQLException { FastpathArg[] args = new FastpathArg[1]; args[0] = new FastpathArg(fd); return fp.getLong("lo_tell64", args); } /** * <p>This method is inefficient, as the only way to find out the size of the object is to seek to * the end, record the current position, then return to the original position.</p> * * <p>A better method will be found in the future.</p> * * @return the size of the large object * @throws SQLException if a database-access error occurs. */ public int size() throws SQLException { int cp = tell(); seek(0, SEEK_END); int sz = tell(); seek(cp, SEEK_SET); return sz; } /** * See #size() for information about efficiency. * * @return the size of the large object * @throws SQLException if a database-access error occurs. */ public long size64() throws SQLException { long cp = tell64(); seek64(0, SEEK_END); long sz = tell64(); seek64(cp, SEEK_SET); return sz; } /** * Truncates the large object to the given length in bytes. If the number of bytes is larger than * the current large object length, the large object will be filled with zero bytes. This method * does not modify the current file offset. * * @param len given length in bytes * @throws SQLException if something goes wrong */ public void truncate(int len) throws SQLException { FastpathArg[] args = new FastpathArg[2]; args[0] = new FastpathArg(fd); args[1] = new FastpathArg(len); fp.getInteger("lo_truncate", args); } /** * Truncates the large object to the given length in bytes. If the number of bytes is larger than * the current large object length, the large object will be filled with zero bytes. This method * does not modify the current file offset. * * @param len given length in bytes * @throws SQLException if something goes wrong */ public void truncate64(long len) throws SQLException { FastpathArg[] args = new FastpathArg[2]; args[0] = new FastpathArg(fd); args[1] = new FastpathArg(len); fp.getInteger("lo_truncate64", args); } /** * <p>Returns an {@link InputStream} from this object.</p> * * <p>This {@link InputStream} can then be used in any method that requires an InputStream.</p> * * @return {@link InputStream} from this object * @throws SQLException if a database-access error occurs. */ public InputStream getInputStream() throws SQLException { return new BlobInputStream(this, 4096); } /** * Returns an {@link InputStream} from this object, that will limit the amount of data that is * visible. * * @param limit maximum number of bytes the resulting stream will serve * @return {@link InputStream} from this object * @throws SQLException if a database-access error occurs. */ public InputStream getInputStream(long limit) throws SQLException { return new BlobInputStream(this, 4096, limit); } /** * <p>Returns an {@link OutputStream} to this object.</p> * * <p>This OutputStream can then be used in any method that requires an OutputStream.</p> * * @return {@link OutputStream} from this object * @throws SQLException if a database-access error occurs. */ public OutputStream getOutputStream() throws SQLException { if (os == null) { os = new BlobOutputStream(this, 4096); } return os; } }
8,547
0
Create_ds/amazon-redshift-jdbc-driver/src/main/java/com/amazon/redshift
Create_ds/amazon-redshift-jdbc-driver/src/main/java/com/amazon/redshift/largeobject/BlobInputStream.java
/* * Copyright (c) 2003, PostgreSQL Global Development Group * See the LICENSE file in the project root for more information. */ package com.amazon.redshift.largeobject; import java.io.IOException; import java.io.InputStream; import java.sql.SQLException; /** * This is an implementation of an InputStream from a large object. */ public class BlobInputStream extends InputStream { /** * The parent LargeObject. */ private LargeObject lo; /** * The absolute position. */ private long apos; /** * Buffer used to improve performance. */ private byte[] buffer; /** * Position within buffer. */ private int bpos; /** * The buffer size. */ private int bsize; /** * The mark position. */ private long mpos = 0; /** * The limit. */ private long limit = -1; /** * @param lo LargeObject to read from */ public BlobInputStream(LargeObject lo) { this(lo, 1024); } /** * @param lo LargeObject to read from * @param bsize buffer size */ public BlobInputStream(LargeObject lo, int bsize) { this(lo, bsize, -1); } /** * @param lo LargeObject to read from * @param bsize buffer size * @param limit max number of bytes to read */ public BlobInputStream(LargeObject lo, int bsize, long limit) { this.lo = lo; buffer = null; bpos = 0; apos = 0; this.bsize = bsize; this.limit = limit; } /** * The minimum required to implement input stream. */ public int read() throws java.io.IOException { checkClosed(); try { if (limit > 0 && apos >= limit) { return -1; } if (buffer == null || bpos >= buffer.length) { buffer = lo.read(bsize); bpos = 0; } // Handle EOF if (bpos >= buffer.length) { return -1; } int ret = (buffer[bpos] & 0x7F); if ((buffer[bpos] & 0x80) == 0x80) { ret |= 0x80; } bpos++; apos++; return ret; } catch (SQLException se) { throw new IOException(se.toString()); } } /** * <p>Closes this input stream and releases any system resources associated with the stream.</p> * * <p>The <code>close</code> method of <code>InputStream</code> does nothing.</p> * * @throws IOException if an I/O error occurs. */ public void close() throws IOException { if (lo != null) { try { lo.close(); lo = null; } catch (SQLException se) { throw new IOException(se.toString()); } } } /** * <p>Marks the current position in this input stream. A subsequent call to the <code>reset</code> * method repositions this stream at the last marked position so that subsequent reads re-read the * same bytes.</p> * * <p>The <code>readlimit</code> arguments tells this input stream to allow that many bytes to be * read before the mark position gets invalidated.</p> * * <p>The general contract of <code>mark</code> is that, if the method <code>markSupported</code> * returns <code>true</code>, the stream somehow remembers all the bytes read after the call to * <code>mark</code> and stands ready to supply those same bytes again if and whenever the method * <code>reset</code> is called. However, the stream is not required to remember any data at all * if more than <code>readlimit</code> bytes are read from the stream before <code>reset</code> is * called.</p> * * <p>Marking a closed stream should not have any effect on the stream.</p> * * @param readlimit the maximum limit of bytes that can be read before the mark position becomes * invalid. * @see java.io.InputStream#reset() */ public synchronized void mark(int readlimit) { mpos = apos; } /** * Repositions this stream to the position at the time the <code>mark</code> method was last * called on this input stream. NB: If mark is not called we move to the beginning. * * @see java.io.InputStream#mark(int) * @see java.io.IOException */ public synchronized void reset() throws IOException { checkClosed(); try { if (mpos <= Integer.MAX_VALUE) { lo.seek((int)mpos); } else { lo.seek64(mpos, LargeObject.SEEK_SET); } buffer = null; apos = mpos; } catch (SQLException se) { throw new IOException(se.toString()); } } /** * Tests if this input stream supports the <code>mark</code> and <code>reset</code> methods. The * <code>markSupported</code> method of <code>InputStream</code> returns <code>false</code>. * * @return <code>true</code> if this true type supports the mark and reset method; * <code>false</code> otherwise. * @see java.io.InputStream#mark(int) * @see java.io.InputStream#reset() */ public boolean markSupported() { return true; } private void checkClosed() throws IOException { if (lo == null) { throw new IOException("BlobOutputStream is closed"); } } }
8,548
0
Create_ds/amazon-redshift-jdbc-driver/src/main/java/com/amazon/redshift
Create_ds/amazon-redshift-jdbc-driver/src/main/java/com/amazon/redshift/largeobject/BlobOutputStream.java
/* * Copyright (c) 2003, PostgreSQL Global Development Group * See the LICENSE file in the project root for more information. */ package com.amazon.redshift.largeobject; import java.io.IOException; import java.io.OutputStream; import java.sql.SQLException; /** * This implements a basic output stream that writes to a LargeObject. */ public class BlobOutputStream extends OutputStream { /** * The parent LargeObject. */ private LargeObject lo; /** * Buffer. */ private byte[] buf; /** * Size of the buffer (default 1K). */ private int bsize; /** * Position within the buffer. */ private int bpos; /** * Create an OutputStream to a large object. * * @param lo LargeObject */ public BlobOutputStream(LargeObject lo) { this(lo, 1024); } /** * Create an OutputStream to a large object. * * @param lo LargeObject * @param bsize The size of the buffer used to improve performance */ public BlobOutputStream(LargeObject lo, int bsize) { this.lo = lo; this.bsize = bsize; buf = new byte[bsize]; bpos = 0; } public void write(int b) throws java.io.IOException { checkClosed(); try { if (bpos >= bsize) { lo.write(buf); bpos = 0; } buf[bpos++] = (byte) b; } catch (SQLException se) { throw new IOException(se.toString()); } } public void write(byte[] buf, int off, int len) throws java.io.IOException { checkClosed(); try { // If we have any internally buffered data, send it first if (bpos > 0) { flush(); } if (off == 0 && len == buf.length) { lo.write(buf); // save a buffer creation and copy since full buffer written } else { lo.write(buf, off, len); } } catch (SQLException se) { throw new IOException(se.toString()); } } /** * Flushes this output stream and forces any buffered output bytes to be written out. The general * contract of <code>flush</code> is that calling it is an indication that, if any bytes * previously written have been buffered by the implementation of the output stream, such bytes * should immediately be written to their intended destination. * * @throws IOException if an I/O error occurs. */ public void flush() throws IOException { checkClosed(); try { if (bpos > 0) { lo.write(buf, 0, bpos); } bpos = 0; } catch (SQLException se) { throw new IOException(se.toString()); } } public void close() throws IOException { if (lo != null) { try { flush(); lo.close(); lo = null; } catch (SQLException se) { throw new IOException(se.toString()); } } } private void checkClosed() throws IOException { if (lo == null) { throw new IOException("BlobOutputStream is closed"); } } }
8,549
0
Create_ds/amazon-redshift-jdbc-driver/src/main/java/com/amazon/redshift
Create_ds/amazon-redshift-jdbc-driver/src/main/java/com/amazon/redshift/copy/CopyDual.java
/* * Copyright (c) 2016, PostgreSQL Global Development Group * See the LICENSE file in the project root for more information. */ package com.amazon.redshift.copy; /** * Bidirectional via copy stream protocol. Via bidirectional copy protocol work Redshift * replication. * * @see CopyIn * @see CopyOut */ public interface CopyDual extends CopyIn, CopyOut { }
8,550
0
Create_ds/amazon-redshift-jdbc-driver/src/main/java/com/amazon/redshift
Create_ds/amazon-redshift-jdbc-driver/src/main/java/com/amazon/redshift/copy/RedshiftCopyOutputStream.java
/* * Copyright (c) 2009, PostgreSQL Global Development Group * See the LICENSE file in the project root for more information. */ package com.amazon.redshift.copy; import com.amazon.redshift.RedshiftConnection; import com.amazon.redshift.util.ByteStreamWriter; import com.amazon.redshift.util.GT; import java.io.IOException; import java.io.OutputStream; import java.sql.SQLException; /** * OutputStream for buffered input into a Redshift COPY FROM STDIN operation. */ public class RedshiftCopyOutputStream extends OutputStream implements CopyIn { private CopyIn op; private final byte[] copyBuffer; private final byte[] singleByteBuffer = new byte[1]; private int at = 0; /** * Uses given connection for specified COPY FROM STDIN operation. * * @param connection database connection to use for copying (protocol version 3 required) * @param sql COPY FROM STDIN statement * @throws SQLException if initializing the operation fails */ public RedshiftCopyOutputStream(RedshiftConnection connection, String sql) throws SQLException { this(connection, sql, CopyManager.DEFAULT_BUFFER_SIZE); } /** * Uses given connection for specified COPY FROM STDIN operation. * * @param connection database connection to use for copying (protocol version 3 required) * @param sql COPY FROM STDIN statement * @param bufferSize try to send this many bytes at a time * @throws SQLException if initializing the operation fails */ public RedshiftCopyOutputStream(RedshiftConnection connection, String sql, int bufferSize) throws SQLException { this(connection.getCopyAPI().copyIn(sql), bufferSize); } /** * Use given CopyIn operation for writing. * * @param op COPY FROM STDIN operation */ public RedshiftCopyOutputStream(CopyIn op) { this(op, CopyManager.DEFAULT_BUFFER_SIZE); } /** * Use given CopyIn operation for writing. * * @param op COPY FROM STDIN operation * @param bufferSize try to send this many bytes at a time */ public RedshiftCopyOutputStream(CopyIn op, int bufferSize) { this.op = op; copyBuffer = new byte[bufferSize]; } public void write(int b) throws IOException { checkClosed(); if (b < 0 || b > 255) { throw new IOException(GT.tr("Cannot write to copy a byte of value {0}", b)); } singleByteBuffer[0] = (byte) b; write(singleByteBuffer, 0, 1); } public void write(byte[] buf) throws IOException { write(buf, 0, buf.length); } public void write(byte[] buf, int off, int siz) throws IOException { checkClosed(); try { writeToCopy(buf, off, siz); } catch (SQLException se) { IOException ioe = new IOException("Write to copy failed."); ioe.initCause(se); throw ioe; } } private void checkClosed() throws IOException { if (op == null) { throw new IOException(GT.tr("This copy stream is closed.")); } } public void close() throws IOException { // Don't complain about a double close. if (op == null) { return; } if (op.isActive()) { try { endCopy(); } catch (SQLException se) { IOException ioe = new IOException("Ending write to copy failed."); ioe.initCause(se); throw ioe; } } op = null; } public void flush() throws IOException { checkClosed(); try { op.writeToCopy(copyBuffer, 0, at); at = 0; op.flushCopy(); } catch (SQLException e) { IOException ioe = new IOException("Unable to flush stream"); ioe.initCause(e); throw ioe; } } public void writeToCopy(byte[] buf, int off, int siz) throws SQLException { if (at > 0 && siz > copyBuffer.length - at) { // would not fit into rest of our buf, so flush buf op.writeToCopy(copyBuffer, 0, at); at = 0; } if (siz > copyBuffer.length) { // would still not fit into buf, so just pass it through op.writeToCopy(buf, off, siz); } else { // fits into our buf, so save it there System.arraycopy(buf, off, copyBuffer, at, siz); at += siz; } } public void writeToCopy(ByteStreamWriter from) throws SQLException { op.writeToCopy(from); } public int getFormat() { return op.getFormat(); } public int getFieldFormat(int field) { return op.getFieldFormat(field); } public void cancelCopy() throws SQLException { op.cancelCopy(); } public int getFieldCount() { return op.getFieldCount(); } public boolean isActive() { return op != null && op.isActive(); } public void flushCopy() throws SQLException { op.flushCopy(); } public long endCopy() throws SQLException { if (at > 0) { op.writeToCopy(copyBuffer, 0, at); } op.endCopy(); return getHandledRowCount(); } public long getHandledRowCount() { return op.getHandledRowCount(); } }
8,551
0
Create_ds/amazon-redshift-jdbc-driver/src/main/java/com/amazon/redshift
Create_ds/amazon-redshift-jdbc-driver/src/main/java/com/amazon/redshift/copy/CopyOut.java
/* * Copyright (c) 2009, PostgreSQL Global Development Group * See the LICENSE file in the project root for more information. */ package com.amazon.redshift.copy; import java.sql.SQLException; public interface CopyOut extends CopyOperation { /** * Blocks wait for a row of data to be received from server on an active copy operation. * * @return byte array received from server, null if server complete copy operation * @throws SQLException if something goes wrong for example socket timeout */ byte[] readFromCopy() throws SQLException; /** * Wait for a row of data to be received from server on an active copy operation. * * @param block {@code true} if need wait data from server otherwise {@code false} and will read * pending message from server * @return byte array received from server, if pending message from server absent and use no * blocking mode return null * @throws SQLException if something goes wrong for example socket timeout */ byte[] readFromCopy(boolean block) throws SQLException; }
8,552
0
Create_ds/amazon-redshift-jdbc-driver/src/main/java/com/amazon/redshift
Create_ds/amazon-redshift-jdbc-driver/src/main/java/com/amazon/redshift/copy/RedshiftCopyInputStream.java
/* * Copyright (c) 2009, PostgreSQL Global Development Group * See the LICENSE file in the project root for more information. */ package com.amazon.redshift.copy; import com.amazon.redshift.RedshiftConnection; import com.amazon.redshift.util.GT; import com.amazon.redshift.util.RedshiftException; import com.amazon.redshift.util.RedshiftState; import java.io.IOException; import java.io.InputStream; import java.sql.SQLException; /** * InputStream for reading from a Redshift COPY TO STDOUT operation. */ public class RedshiftCopyInputStream extends InputStream implements CopyOut { private CopyOut op; private byte[] buf; private int at; private int len; /** * Uses given connection for specified COPY TO STDOUT operation. * * @param connection database connection to use for copying (protocol version 3 required) * @param sql COPY TO STDOUT statement * @throws SQLException if initializing the operation fails */ public RedshiftCopyInputStream(RedshiftConnection connection, String sql) throws SQLException { this(connection.getCopyAPI().copyOut(sql)); } /** * Use given CopyOut operation for reading. * * @param op COPY TO STDOUT operation */ public RedshiftCopyInputStream(CopyOut op) { this.op = op; } private boolean gotBuf() throws IOException { if (at >= len) { try { buf = op.readFromCopy(); } catch (SQLException sqle) { throw new IOException(GT.tr("Copying from database failed: {0}", sqle)); } if (buf == null) { at = -1; return false; } else { at = 0; len = buf.length; return true; } } return buf != null; } private void checkClosed() throws IOException { if (op == null) { throw new IOException(GT.tr("This copy stream is closed.")); } } public int available() throws IOException { checkClosed(); return (buf != null ? len - at : 0); } public int read() throws IOException { checkClosed(); return gotBuf() ? (buf[at++] & 0xFF) : -1; } public int read(byte[] buf) throws IOException { return read(buf, 0, buf.length); } public int read(byte[] buf, int off, int siz) throws IOException { checkClosed(); int got = 0; boolean didReadSomething = false; while (got < siz && (didReadSomething = gotBuf())) { buf[off + got++] = this.buf[at++]; } return got == 0 && !didReadSomething ? -1 : got; } public byte[] readFromCopy() throws SQLException { byte[] result = buf; try { if (gotBuf()) { if (at > 0 || len < buf.length) { byte[] ba = new byte[len - at]; for (int i = at; i < len; i++) { ba[i - at] = buf[i]; } result = ba; } at = len; // either partly or fully returned, buffer is exhausted } } catch (IOException ioe) { throw new RedshiftException(GT.tr("Read from copy failed."), RedshiftState.CONNECTION_FAILURE); } return result; } @Override public byte[] readFromCopy(boolean block) throws SQLException { return readFromCopy(); } public void close() throws IOException { // Don't complain about a double close. if (op == null) { return; } if (op.isActive()) { try { op.cancelCopy(); } catch (SQLException se) { IOException ioe = new IOException("Failed to close copy reader."); ioe.initCause(se); throw ioe; } } op = null; } public void cancelCopy() throws SQLException { op.cancelCopy(); } public int getFormat() { return op.getFormat(); } public int getFieldFormat(int field) { return op.getFieldFormat(field); } public int getFieldCount() { return op.getFieldCount(); } public boolean isActive() { return op != null && op.isActive(); } public long getHandledRowCount() { return op.getHandledRowCount(); } }
8,553
0
Create_ds/amazon-redshift-jdbc-driver/src/main/java/com/amazon/redshift
Create_ds/amazon-redshift-jdbc-driver/src/main/java/com/amazon/redshift/copy/CopyIn.java
/* * Copyright (c) 2009, PostgreSQL Global Development Group * See the LICENSE file in the project root for more information. */ package com.amazon.redshift.copy; import com.amazon.redshift.util.ByteStreamWriter; import java.sql.SQLException; /** * Copy bulk data from client into a Redshift table very fast. */ public interface CopyIn extends CopyOperation { /** * Writes specified part of given byte array to an open and writable copy operation. * * @param buf array of bytes to write * @param off offset of first byte to write (normally zero) * @param siz number of bytes to write (normally buf.length) * @throws SQLException if the operation fails */ void writeToCopy(byte[] buf, int off, int siz) throws SQLException; /** * Writes a ByteStreamWriter to an open and writable copy operation. * * @param from the source of bytes, e.g. a ByteBufferByteStreamWriter * @throws SQLException if the operation fails */ void writeToCopy(ByteStreamWriter from) throws SQLException; /** * Force any buffered output to be sent over the network to the backend. In general this is a * useless operation as it will get pushed over in due time or when endCopy is called. Some * specific modified server versions (Truviso) want this data sooner. If you are unsure if you * need to use this method, don't. * * @throws SQLException if the operation fails. */ void flushCopy() throws SQLException; /** * Finishes copy operation successfully. * * @return number of updated rows for server 8.2 or newer (see getHandledRowCount()) * @throws SQLException if the operation fails. */ long endCopy() throws SQLException; }
8,554
0
Create_ds/amazon-redshift-jdbc-driver/src/main/java/com/amazon/redshift
Create_ds/amazon-redshift-jdbc-driver/src/main/java/com/amazon/redshift/copy/CopyOperation.java
/* * Copyright (c) 2009, PostgreSQL Global Development Group * See the LICENSE file in the project root for more information. */ package com.amazon.redshift.copy; import java.sql.SQLException; /** * Exchange bulk data between client and Redshift database tables. See CopyIn and CopyOut for full * interfaces for corresponding copy directions. */ public interface CopyOperation { /** * @return number of fields in each row for this operation */ int getFieldCount(); /** * @return overall format of each row: 0 = textual, 1 = binary */ int getFormat(); /** * @param field number of field (0..fieldCount()-1) * @return format of requested field: 0 = textual, 1 = binary */ int getFieldFormat(int field); /** * @return is connection reserved for this Copy operation? */ boolean isActive(); /** * Cancels this copy operation, discarding any exchanged data. * * @throws SQLException if cancelling fails */ void cancelCopy() throws SQLException; /** * After successful end of copy, returns the number of database records handled in that operation. * Unimplemented in Redshift server. Returns -1. * * @return number of handled rows or -1 */ long getHandledRowCount(); }
8,555
0
Create_ds/amazon-redshift-jdbc-driver/src/main/java/com/amazon/redshift
Create_ds/amazon-redshift-jdbc-driver/src/main/java/com/amazon/redshift/copy/CopyManager.java
/* * Copyright (c) 2009, PostgreSQL Global Development Group * See the LICENSE file in the project root for more information. */ package com.amazon.redshift.copy; import com.amazon.redshift.core.BaseConnection; import com.amazon.redshift.core.Encoding; import com.amazon.redshift.core.QueryExecutor; import com.amazon.redshift.util.ByteStreamWriter; import com.amazon.redshift.util.GT; import com.amazon.redshift.util.RedshiftException; import com.amazon.redshift.util.RedshiftState; import java.io.IOException; import java.io.InputStream; import java.io.OutputStream; import java.io.Reader; import java.io.Writer; import java.sql.SQLException; /** * API for Redshift COPY bulk data transfer. */ public class CopyManager { // I don't know what the best buffer size is, so we let people specify it if // they want, and if they don't know, we don't make them guess, so that if we // do figure it out we can just set it here and they reap the rewards. // Note that this is currently being used for both a number of bytes and a number // of characters. static final int DEFAULT_BUFFER_SIZE = 65536; private final Encoding encoding; private final QueryExecutor queryExecutor; private final BaseConnection connection; public CopyManager(BaseConnection connection) throws SQLException { this.encoding = connection.getEncoding(); this.queryExecutor = connection.getQueryExecutor(); this.connection = connection; } public CopyIn copyIn(String sql) throws SQLException { CopyOperation op = queryExecutor.startCopy(sql, connection.getAutoCommit()); if (op == null || op instanceof CopyIn) { return (CopyIn) op; } else { op.cancelCopy(); throw new RedshiftException(GT.tr("Requested CopyIn but got {0}", op.getClass().getName()), RedshiftState.WRONG_OBJECT_TYPE); } } public CopyOut copyOut(String sql) throws SQLException { CopyOperation op = queryExecutor.startCopy(sql, connection.getAutoCommit()); if (op == null || op instanceof CopyOut) { return (CopyOut) op; } else { op.cancelCopy(); throw new RedshiftException(GT.tr("Requested CopyOut but got {0}", op.getClass().getName()), RedshiftState.WRONG_OBJECT_TYPE); } } public CopyDual copyDual(String sql) throws SQLException { CopyOperation op = queryExecutor.startCopy(sql, connection.getAutoCommit()); if (op == null || op instanceof CopyDual) { return (CopyDual) op; } else { op.cancelCopy(); throw new RedshiftException(GT.tr("Requested CopyDual but got {0}", op.getClass().getName()), RedshiftState.WRONG_OBJECT_TYPE); } } /** * Pass results of a COPY TO STDOUT query from database into a Writer. * * @param sql COPY TO STDOUT statement * @param to the Writer to write the results to (row by row). * The Writer is not closed at the end of the Copy Out operation. * @return number of rows updated for server 8.2 or newer; -1 for older * @throws SQLException on database usage errors * @throws IOException upon writer or database connection failure */ public long copyOut(final String sql, Writer to) throws SQLException, IOException { byte[] buf; CopyOut cp = copyOut(sql); try { while ((buf = cp.readFromCopy()) != null) { to.write(encoding.decode(buf)); } return cp.getHandledRowCount(); } catch (IOException ioEX) { // if not handled this way the close call will hang, at least in 8.2 if (cp.isActive()) { cp.cancelCopy(); } try { // read until exhausted or operation cancelled SQLException while ((buf = cp.readFromCopy()) != null) { } } catch (SQLException sqlEx) { } // typically after several kB throw ioEX; } finally { // see to it that we do not leave the connection locked if (cp.isActive()) { cp.cancelCopy(); } } } /** * Pass results of a COPY TO STDOUT query from database into an OutputStream. * * @param sql COPY TO STDOUT statement * @param to the stream to write the results to (row by row) * The stream is not closed at the end of the operation. This is intentional so the * caller can continue to write to the output stream * @return number of rows updated for server 8.2 or newer; -1 for older * @throws SQLException on database usage errors * @throws IOException upon output stream or database connection failure */ public long copyOut(final String sql, OutputStream to) throws SQLException, IOException { byte[] buf; CopyOut cp = copyOut(sql); try { while ((buf = cp.readFromCopy()) != null) { to.write(buf); } return cp.getHandledRowCount(); } catch (IOException ioEX) { // if not handled this way the close call will hang, at least in 8.2 if (cp.isActive()) { cp.cancelCopy(); } try { // read until exhausted or operation cancelled SQLException while ((buf = cp.readFromCopy()) != null) { } } catch (SQLException sqlEx) { } // typically after several kB throw ioEX; } finally { // see to it that we do not leave the connection locked if (cp.isActive()) { cp.cancelCopy(); } } } /** * Use COPY FROM STDIN for very fast copying from a Reader into a database table. * * @param sql COPY FROM STDIN statement * @param from a CSV file or such * @return number of rows updated for server 8.2 or newer; -1 for older * @throws SQLException on database usage issues * @throws IOException upon reader or database connection failure */ public long copyIn(final String sql, Reader from) throws SQLException, IOException { return copyIn(sql, from, DEFAULT_BUFFER_SIZE); } /** * Use COPY FROM STDIN for very fast copying from a Reader into a database table. * * @param sql COPY FROM STDIN statement * @param from a CSV file or such * @param bufferSize number of characters to buffer and push over network to server at once * @return number of rows updated for server 8.2 or newer; -1 for older * @throws SQLException on database usage issues * @throws IOException upon reader or database connection failure */ public long copyIn(final String sql, Reader from, int bufferSize) throws SQLException, IOException { char[] cbuf = new char[bufferSize]; int len; CopyIn cp = copyIn(sql); try { while ((len = from.read(cbuf)) >= 0) { if (len > 0) { byte[] buf = encoding.encode(new String(cbuf, 0, len)); cp.writeToCopy(buf, 0, buf.length); } } return cp.endCopy(); } finally { // see to it that we do not leave the connection locked if (cp.isActive()) { cp.cancelCopy(); } } } /** * Use COPY FROM STDIN for very fast copying from an InputStream into a database table. * * @param sql COPY FROM STDIN statement * @param from a CSV file or such * @return number of rows updated for server 8.2 or newer; -1 for older * @throws SQLException on database usage issues * @throws IOException upon input stream or database connection failure */ public long copyIn(final String sql, InputStream from) throws SQLException, IOException { return copyIn(sql, from, DEFAULT_BUFFER_SIZE); } /** * Use COPY FROM STDIN for very fast copying from an InputStream into a database table. * * @param sql COPY FROM STDIN statement * @param from a CSV file or such * @param bufferSize number of bytes to buffer and push over network to server at once * @return number of rows updated for server 8.2 or newer; -1 for older * @throws SQLException on database usage issues * @throws IOException upon input stream or database connection failure */ public long copyIn(final String sql, InputStream from, int bufferSize) throws SQLException, IOException { byte[] buf = new byte[bufferSize]; int len; CopyIn cp = copyIn(sql); try { while ((len = from.read(buf)) >= 0) { if (len > 0) { cp.writeToCopy(buf, 0, len); } } return cp.endCopy(); } finally { // see to it that we do not leave the connection locked if (cp.isActive()) { cp.cancelCopy(); } } } /** * Use COPY FROM STDIN for very fast copying from an ByteStreamWriter into a database table. * * @param sql COPY FROM STDIN statement * @param from the source of bytes, e.g. a ByteBufferByteStreamWriter * @return number of rows updated for server 8.2 or newer; -1 for older * @throws SQLException on database usage issues * @throws IOException upon input stream or database connection failure */ public long copyIn(String sql, ByteStreamWriter from) throws SQLException, IOException { CopyIn cp = copyIn(sql); try { cp.writeToCopy(from); return cp.endCopy(); } finally { // see to it that we do not leave the connection locked if (cp.isActive()) { cp.cancelCopy(); } } } }
8,556
0
Create_ds/amazon-redshift-jdbc-driver/src/main/java/com/amazon/redshift
Create_ds/amazon-redshift-jdbc-driver/src/main/java/com/amazon/redshift/replication/LogSequenceNumber.java
/* * Copyright (c) 2016, PostgreSQL Global Development Group * See the LICENSE file in the project root for more information. */ package com.amazon.redshift.replication; import java.nio.ByteBuffer; /** * LSN (Log Sequence Number) data which is a pointer to a location in the XLOG. */ public final class LogSequenceNumber implements Comparable<LogSequenceNumber> { /** * Zero is used indicate an invalid pointer. Bootstrap skips the first possible WAL segment, * initializing the first WAL page at XLOG_SEG_SIZE, so no XLOG record can begin at zero. */ public static final LogSequenceNumber INVALID_LSN = LogSequenceNumber.valueOf(0); private final long value; private LogSequenceNumber(long value) { this.value = value; } /** * @param value numeric represent position in the write-ahead log stream * @return not null LSN instance */ public static LogSequenceNumber valueOf(long value) { return new LogSequenceNumber(value); } /** * Create LSN instance by string represent LSN. * * @param strValue not null string as two hexadecimal numbers of up to 8 digits each, separated by * a slash. For example {@code 16/3002D50}, {@code 0/15D68C50} * @return not null LSN instance where if specified string represent have not valid form {@link * LogSequenceNumber#INVALID_LSN} */ public static LogSequenceNumber valueOf(String strValue) { int slashIndex = strValue.lastIndexOf('/'); if (slashIndex <= 0) { return INVALID_LSN; } String logicalXLogStr = strValue.substring(0, slashIndex); int logicalXlog = (int) Long.parseLong(logicalXLogStr, 16); String segmentStr = strValue.substring(slashIndex + 1, strValue.length()); int segment = (int) Long.parseLong(segmentStr, 16); ByteBuffer buf = ByteBuffer.allocate(8); buf.putInt(logicalXlog); buf.putInt(segment); buf.position(0); long value = buf.getLong(); return LogSequenceNumber.valueOf(value); } /** * @return Long represent position in the write-ahead log stream */ public long asLong() { return value; } /** * @return String represent position in the write-ahead log stream as two hexadecimal numbers of * up to 8 digits each, separated by a slash. For example {@code 16/3002D50}, {@code 0/15D68C50} */ public String asString() { ByteBuffer buf = ByteBuffer.allocate(8); buf.putLong(value); buf.position(0); int logicalXlog = buf.getInt(); int segment = buf.getInt(); return String.format("%X/%X", logicalXlog, segment); } @Override public boolean equals(Object o) { if (this == o) { return true; } if (o == null || getClass() != o.getClass()) { return false; } LogSequenceNumber that = (LogSequenceNumber) o; return value == that.value; } @Override public int hashCode() { return (int) (value ^ (value >>> 32)); } @Override public String toString() { return "LSN{" + asString() + '}'; } @Override public int compareTo(LogSequenceNumber o) { if (value == o.value) { return 0; } //Unsigned comparison return value + Long.MIN_VALUE < o.value + Long.MIN_VALUE ? -1 : 1; } }
8,557
0
Create_ds/amazon-redshift-jdbc-driver/src/main/java/com/amazon/redshift
Create_ds/amazon-redshift-jdbc-driver/src/main/java/com/amazon/redshift/replication/ReplicationType.java
/* * Copyright (c) 2017, PostgreSQL Global Development Group * See the LICENSE file in the project root for more information. */ package com.amazon.redshift.replication; public enum ReplicationType { LOGICAL, PHYSICAL }
8,558
0
Create_ds/amazon-redshift-jdbc-driver/src/main/java/com/amazon/redshift
Create_ds/amazon-redshift-jdbc-driver/src/main/java/com/amazon/redshift/replication/ReplicationSlotInfo.java
/* * Copyright (c) 2018, PostgreSQL Global Development Group * See the LICENSE file in the project root for more information. */ package com.amazon.redshift.replication; /** * Information returned on replication slot creation. * * <p>Returned keys of CREATE_REPLICATION_SLOT: * <ol> * <li><b>slot_name</b> String {@code =>} the slot name * <li><b>consistent_point</b> String {@code =>} LSN at which we became consistent * <li><b>snapshot_name</b> String {@code =>} exported snapshot's name (may be <code>null</code>) * <li><b>output_plugin</b> String {@code =>} output plugin (may be <code>null</code>) * </ol> */ public final class ReplicationSlotInfo { private final String slotName; private final ReplicationType replicationType; private final LogSequenceNumber consistentPoint; private final String snapshotName; private final String outputPlugin; public ReplicationSlotInfo(String slotName, ReplicationType replicationType, LogSequenceNumber consistentPoint, String snapshotName, String outputPlugin) { this.slotName = slotName; this.replicationType = replicationType; this.consistentPoint = consistentPoint; this.snapshotName = snapshotName; this.outputPlugin = outputPlugin; } /** * Replication slot name. * * @return the slot name */ public String getSlotName() { return slotName; } /** * Replication type of the slot created, might be PHYSICAL or LOGICAL. * * @return ReplicationType, PHYSICAL or LOGICAL */ public ReplicationType getReplicationType() { return replicationType; } /** * LSN at which we became consistent. * * @return LogSequenceNumber with the consistent_point */ public LogSequenceNumber getConsistentPoint() { return consistentPoint; } /** * Exported snapshot name at the point of replication slot creation. * * <p>As long as the exporting transaction remains open, other transactions can import its snapshot, * and thereby be guaranteed that they see exactly the same view of the database that the first * transaction sees. * * @return exported snapshot_name (may be <code>null</code>) */ public String getSnapshotName() { return snapshotName; } /** * Output Plugin used on slot creation. * * @return output_plugin (may be <code>null</code>) */ public String getOutputPlugin() { return outputPlugin; } }
8,559
0
Create_ds/amazon-redshift-jdbc-driver/src/main/java/com/amazon/redshift
Create_ds/amazon-redshift-jdbc-driver/src/main/java/com/amazon/redshift/replication/RedshiftReplicationConnectionImpl.java
/* * Copyright (c) 2016, PostgreSQL Global Development Group * See the LICENSE file in the project root for more information. */ package com.amazon.redshift.replication; import com.amazon.redshift.core.BaseConnection; import com.amazon.redshift.replication.fluent.ChainedCreateReplicationSlotBuilder; import com.amazon.redshift.replication.fluent.ChainedStreamBuilder; import com.amazon.redshift.replication.fluent.ReplicationCreateSlotBuilder; import com.amazon.redshift.replication.fluent.ReplicationStreamBuilder; import java.sql.SQLException; import java.sql.Statement; public class RedshiftReplicationConnectionImpl implements RedshiftReplicationConnection { private BaseConnection connection; public RedshiftReplicationConnectionImpl(BaseConnection connection) { this.connection = connection; } @Override public ChainedStreamBuilder replicationStream() { return new ReplicationStreamBuilder(connection); } @Override public ChainedCreateReplicationSlotBuilder createReplicationSlot() { return new ReplicationCreateSlotBuilder(connection); } @Override public void dropReplicationSlot(String slotName) throws SQLException { if (slotName == null || slotName.isEmpty()) { throw new IllegalArgumentException("Replication slot name can't be null or empty"); } Statement statement = connection.createStatement(); try { statement.execute("DROP_REPLICATION_SLOT " + slotName); } finally { statement.close(); } } }
8,560
0
Create_ds/amazon-redshift-jdbc-driver/src/main/java/com/amazon/redshift
Create_ds/amazon-redshift-jdbc-driver/src/main/java/com/amazon/redshift/replication/RedshiftReplicationStream.java
/* * Copyright (c) 2016, PostgreSQL Global Development Group * See the LICENSE file in the project root for more information. */ package com.amazon.redshift.replication; import com.amazon.redshift.replication.fluent.CommonOptions; import com.amazon.redshift.replication.fluent.logical.LogicalReplicationOptions; import java.nio.ByteBuffer; import java.sql.SQLException; /** * Not tread safe replication stream (though certain methods can be safely called by different * threads). After complete streaming should be close, for free resource on backend. Periodical * status update work only when use {@link RedshiftReplicationStream#read()} method. It means that * process wal record should be fast as possible, because during process wal record lead to * disconnect by timeout from server. */ public interface RedshiftReplicationStream //JCP! if mvn.project.property.redshift.jdbc.spec >= "JDBC4.1" extends AutoCloseable //JCP! endif /* hi, checkstyle */ { /** * <p>Read next wal record from backend. It method can be block until new message will not get * from server.</p> * * <p>A single WAL record is never split across two XLogData messages. When a WAL record crosses a * WAL page boundary, and is therefore already split using continuation records, it can be split * at the page boundary. In other words, the first main WAL record and its continuation records * can be sent in different XLogData messages.</p> * * @return not null byte array received by replication protocol, return ByteBuffer wrap around * received byte array with use offset, so, use {@link ByteBuffer#array()} carefully * @throws SQLException when some internal exception occurs during read from stream */ ByteBuffer read() throws SQLException; /** * <p>Read next WAL record from backend. This method does not block and in contrast to {@link * RedshiftReplicationStream#read()}. If message from backend absent return null. It allow periodically * check message in stream and if they absent sleep some time, but it time should be less than * {@link CommonOptions#getStatusInterval()} to avoid disconnect from the server.</p> * * <p>A single WAL record is never split across two XLogData messages. When a WAL record crosses a * WAL page boundary, and is therefore already split using continuation records, it can be split * at the page boundary. In other words, the first main WAL record and its continuation records * can be sent in different XLogData messages.</p> * * @return byte array received by replication protocol or NULL if pending message from server * absent. Returns ByteBuffer wrap around received byte array with use offset, so, use {@link * ByteBuffer#array()} carefully. * @throws SQLException when some internal exception occurs during read from stream */ ByteBuffer readPending() throws SQLException; /** * <p>Parameter updates by execute {@link RedshiftReplicationStream#read()} method.</p> * * <p>It is safe to call this method in a thread different than the main thread. However, usually this * method is called in the main thread after a successful {@link RedshiftReplicationStream#read()} or * {@link RedshiftReplicationStream#readPending()}, to get the LSN corresponding to the received record.</p> * * @return NOT NULL LSN position that was receive last time via {@link RedshiftReplicationStream#read()} * method */ LogSequenceNumber getLastReceiveLSN(); /** * <p>Last flushed LSN sent in update message to backend. Parameter updates only via {@link * RedshiftReplicationStream#setFlushedLSN(LogSequenceNumber)}</p> * * <p>It is safe to call this method in a thread different than the main thread.</p> * * @return NOT NULL location of the last WAL flushed to disk in the standby. */ LogSequenceNumber getLastFlushedLSN(); /** * <p>Last applied lsn sent in update message to backed. Parameter updates only via {@link * RedshiftReplicationStream#setAppliedLSN(LogSequenceNumber)}</p> * * <p>It is safe to call this method in a thread different than the main thread.</p> * * @return not null location of the last WAL applied in the standby. */ LogSequenceNumber getLastAppliedLSN(); /** * <p>Set flushed LSN. This parameter will be sent to backend on next update status iteration. Flushed * LSN position help backend define which WAL can be recycled.</p> * * <p>It is safe to call this method in a thread different than the main thread. The updated value * will be sent to the backend in the next status update run.</p> * * @param flushed NOT NULL location of the last WAL flushed to disk in the standby. * @see RedshiftReplicationStream#forceUpdateStatus() */ void setFlushedLSN(LogSequenceNumber flushed); /** * <p>Inform backend which LSN has been applied on standby. * Feedback will send to backend on next update status iteration.</p> * * <p>It is safe to call this method in a thread different than the main thread. The updated value * will be sent to the backend in the next status update run.</p> * * @param applied NOT NULL location of the last WAL applied in the standby. * @see RedshiftReplicationStream#forceUpdateStatus() */ void setAppliedLSN(LogSequenceNumber applied); /** * Force send last received, flushed and applied LSN status to backend. You cannot send LSN status * explicitly because {@link RedshiftReplicationStream} sends the status to backend periodically by * configured interval via {@link LogicalReplicationOptions#getStatusInterval} * * @throws SQLException when some internal exception occurs during read from stream * @see LogicalReplicationOptions#getStatusInterval() */ void forceUpdateStatus() throws SQLException; /** * @return {@code true} if replication stream was already close, otherwise return {@code false} */ boolean isClosed(); /** * <p>Stop replication changes from server and free resources. After that connection can be reuse * to another queries. Also after close current stream they cannot be used anymore.</p> * * <p><b>Note:</b> This method can spend much time for logical replication stream on Redshift * version 9.6 and lower, because Redshift have bug - during decode big transaction to logical * form and during wait new changes Redshift ignore messages from client. As workaround you can * close replication connection instead of close replication stream. For more information about it * problem see mailing list thread <a href="http://www.postgresql.org/message-id/CAFgjRd3hdYOa33m69TbeOfNNer2BZbwa8FFjt2V5VFzTBvUU3w@mail.gmail.com"> * Stopping logical replication protocol</a></p> * * @throws SQLException when some internal exception occurs during end streaming */ void close() throws SQLException; }
8,561
0
Create_ds/amazon-redshift-jdbc-driver/src/main/java/com/amazon/redshift
Create_ds/amazon-redshift-jdbc-driver/src/main/java/com/amazon/redshift/replication/RedshiftReplicationConnection.java
/* * Copyright (c) 2016, PostgreSQL Global Development Group * See the LICENSE file in the project root for more information. */ package com.amazon.redshift.replication; import com.amazon.redshift.RedshiftProperty; import com.amazon.redshift.replication.fluent.ChainedCreateReplicationSlotBuilder; import com.amazon.redshift.replication.fluent.ChainedStreamBuilder; import java.sql.SQLException; /** * Api available only if connection was create with required for replication properties: {@link * RedshiftProperty#REPLICATION} and {@link RedshiftProperty#ASSUME_MIN_SERVER_VERSION}. Without it property * building replication stream fail with exception. */ public interface RedshiftReplicationConnection { /** * After start replication stream this connection not available to use for another queries until * replication stream will not close. * * @return not null fluent api for build replication stream */ ChainedStreamBuilder replicationStream(); /** * <p>Create replication slot, that can be next use in {@link RedshiftReplicationConnection#replicationStream()}</p> * * <p>Replication slots provide an automated way to ensure that the master does not remove WAL * segments until they have been received by all standbys, and that the master does not remove * rows which could cause a recovery conflict even when the standby is disconnected.</p> * * @return not null fluent api for build create replication slot */ ChainedCreateReplicationSlotBuilder createReplicationSlot(); /** * @param slotName not null replication slot name exists in database that should be drop * @throws SQLException if the replication slot cannot be dropped. */ void dropReplicationSlot(String slotName) throws SQLException; }
8,562
0
Create_ds/amazon-redshift-jdbc-driver/src/main/java/com/amazon/redshift/replication
Create_ds/amazon-redshift-jdbc-driver/src/main/java/com/amazon/redshift/replication/fluent/AbstractCreateSlotBuilder.java
/* * Copyright (c) 2016, PostgreSQL Global Development Group * See the LICENSE file in the project root for more information. */ package com.amazon.redshift.replication.fluent; import com.amazon.redshift.core.BaseConnection; import com.amazon.redshift.core.ServerVersion; import com.amazon.redshift.util.GT; import java.sql.SQLFeatureNotSupportedException; public abstract class AbstractCreateSlotBuilder<T extends ChainedCommonCreateSlotBuilder<T>> implements ChainedCommonCreateSlotBuilder<T> { protected String slotName; protected boolean temporaryOption = false; protected BaseConnection connection; protected AbstractCreateSlotBuilder(BaseConnection connection) { this.connection = connection; } protected abstract T self(); @Override public T withSlotName(String slotName) { this.slotName = slotName; return self(); } @Override public T withTemporaryOption() throws SQLFeatureNotSupportedException { if (!connection.haveMinimumServerVersion(ServerVersion.v10)) { throw new SQLFeatureNotSupportedException( GT.tr("Server does not support temporary replication slots") ); } this.temporaryOption = true; return self(); } }
8,563
0
Create_ds/amazon-redshift-jdbc-driver/src/main/java/com/amazon/redshift/replication
Create_ds/amazon-redshift-jdbc-driver/src/main/java/com/amazon/redshift/replication/fluent/ReplicationCreateSlotBuilder.java
/* * Copyright (c) 2016, PostgreSQL Global Development Group * See the LICENSE file in the project root for more information. */ package com.amazon.redshift.replication.fluent; import com.amazon.redshift.core.BaseConnection; import com.amazon.redshift.replication.fluent.logical.ChainedLogicalCreateSlotBuilder; import com.amazon.redshift.replication.fluent.logical.LogicalCreateSlotBuilder; import com.amazon.redshift.replication.fluent.physical.ChainedPhysicalCreateSlotBuilder; import com.amazon.redshift.replication.fluent.physical.PhysicalCreateSlotBuilder; public class ReplicationCreateSlotBuilder implements ChainedCreateReplicationSlotBuilder { private final BaseConnection baseConnection; public ReplicationCreateSlotBuilder(BaseConnection baseConnection) { this.baseConnection = baseConnection; } @Override public ChainedLogicalCreateSlotBuilder logical() { return new LogicalCreateSlotBuilder(baseConnection); } @Override public ChainedPhysicalCreateSlotBuilder physical() { return new PhysicalCreateSlotBuilder(baseConnection); } }
8,564
0
Create_ds/amazon-redshift-jdbc-driver/src/main/java/com/amazon/redshift/replication
Create_ds/amazon-redshift-jdbc-driver/src/main/java/com/amazon/redshift/replication/fluent/CommonOptions.java
/* * Copyright (c) 2016, PostgreSQL Global Development Group * See the LICENSE file in the project root for more information. */ package com.amazon.redshift.replication.fluent; import com.amazon.redshift.replication.LogSequenceNumber; /** * Common parameters for logical and physical replication. */ public interface CommonOptions { /** * Replication slots provide an automated way to ensure that the master does not remove WAL * segments until they have been received by all standbys, and that the master does not remove * rows which could cause a recovery conflict even when the standby is disconnected. * * @return nullable replication slot name that already exists on server and free. */ String getSlotName(); /** * @return the position to start replication. This cannot be null. */ LogSequenceNumber getStartLSNPosition(); /** * Specifies the number of millisecond between status packets sent back to the server. This allows * for easier monitoring of the progress from server. A value of zero disables the periodic status * updates completely, although an update will still be sent when requested by the server, to * avoid timeout disconnect. The default value is 10 seconds. * * @return the current status interval */ int getStatusInterval(); }
8,565
0
Create_ds/amazon-redshift-jdbc-driver/src/main/java/com/amazon/redshift/replication
Create_ds/amazon-redshift-jdbc-driver/src/main/java/com/amazon/redshift/replication/fluent/ChainedCommonCreateSlotBuilder.java
/* * Copyright (c) 2016, PostgreSQL Global Development Group * See the LICENSE file in the project root for more information. */ package com.amazon.redshift.replication.fluent; import com.amazon.redshift.replication.ReplicationSlotInfo; import java.sql.SQLException; import java.sql.SQLFeatureNotSupportedException; /** * Fluent interface for specify common parameters for create Logical and Physical replication slot. */ public interface ChainedCommonCreateSlotBuilder<T extends ChainedCommonCreateSlotBuilder<T>> { /** * Replication slots provide an automated way to ensure that the master does not remove WAL * segments until they have been received by all standbys, and that the master does not remove * rows which could cause a recovery conflict even when the standby is disconnected. * * @param slotName not null unique replication slot name for create. * @return T a slot builder */ T withSlotName(String slotName); /** * <p>Temporary slots are not saved to disk and are automatically dropped on error or when * the session has finished.</p> * * <p>This feature is only supported by PostgreSQL versions &gt;= 10.</p> * * @return T a slot builder * @throws SQLFeatureNotSupportedException throws an exception, if not supported. */ T withTemporaryOption() throws SQLFeatureNotSupportedException; /** * Create slot with specified parameters in database. * * @return ReplicationSlotInfo with the information of the created slot. * @throws SQLException on error */ ReplicationSlotInfo make() throws SQLException; }
8,566
0
Create_ds/amazon-redshift-jdbc-driver/src/main/java/com/amazon/redshift/replication
Create_ds/amazon-redshift-jdbc-driver/src/main/java/com/amazon/redshift/replication/fluent/ChainedCreateReplicationSlotBuilder.java
/* * Copyright (c) 2016, PostgreSQL Global Development Group * See the LICENSE file in the project root for more information. */ package com.amazon.redshift.replication.fluent; import com.amazon.redshift.replication.fluent.logical.ChainedLogicalCreateSlotBuilder; import com.amazon.redshift.replication.fluent.physical.ChainedPhysicalCreateSlotBuilder; /** * Fluent interface for specify common parameters for Logical and Physical replication. */ public interface ChainedCreateReplicationSlotBuilder { /** * Get the logical slot builder. * Example usage: * <pre> * {@code * * pgConnection * .getReplicationAPI() * .createReplicationSlot() * .logical() * .withSlotName("mySlot") * .withOutputPlugin("test_decoding") * .make(); * * RedshiftReplicationStream stream = * pgConnection * .getReplicationAPI() * .replicationStream() * .logical() * .withSlotName("mySlot") * .withSlotOption("include-xids", false) * .withSlotOption("skip-empty-xacts", true) * .start(); * * while (true) { * ByteBuffer buffer = stream.read(); * //process logical changes * } * * } * </pre> * @return not null fluent api */ ChainedLogicalCreateSlotBuilder logical(); /** * <p>Create physical replication stream for process wal logs in binary form.</p> * * <p>Example usage:</p> * <pre> * {@code * * pgConnection * .getReplicationAPI() * .createReplicationSlot() * .physical() * .withSlotName("mySlot") * .make(); * * RedshiftReplicationStream stream = * pgConnection * .getReplicationAPI() * .replicationStream() * .physical() * .withSlotName("mySlot") * .start(); * * while (true) { * ByteBuffer buffer = stream.read(); * //process binary WAL logs * } * * } * </pre> * * @return not null fluent api */ ChainedPhysicalCreateSlotBuilder physical(); }
8,567
0
Create_ds/amazon-redshift-jdbc-driver/src/main/java/com/amazon/redshift/replication
Create_ds/amazon-redshift-jdbc-driver/src/main/java/com/amazon/redshift/replication/fluent/ChainedCommonStreamBuilder.java
/* * Copyright (c) 2016, PostgreSQL Global Development Group * See the LICENSE file in the project root for more information. */ package com.amazon.redshift.replication.fluent; import com.amazon.redshift.replication.LogSequenceNumber; import java.util.concurrent.TimeUnit; /** * Fluent interface for specify common parameters for Logical and Physical replication. */ public interface ChainedCommonStreamBuilder<T extends ChainedCommonStreamBuilder<T>> { /** * Replication slots provide an automated way to ensure that the master does not remove WAL * segments until they have been received by all standbys, and that the master does not remove * rows which could cause a recovery conflict even when the standby is disconnected. * * @param slotName not null replication slot already exists on server. * @return this instance as a fluent interface */ T withSlotName(String slotName); /** * Specifies the number of time between status packets sent back to the server. This allows for * easier monitoring of the progress from server. A value of zero disables the periodic status * updates completely, although an update will still be sent when requested by the server, to * avoid timeout disconnect. The default value is 10 seconds. * * @param time positive time * @param format format for specified time * @return not null fluent */ T withStatusInterval(int time, TimeUnit format); /** * Specify start position from which backend will start stream changes. If parameter will not * specify, streaming starts from restart_lsn. For more details see pg_replication_slots * description. * * @param lsn not null position from which need start replicate changes * @return not null fluent */ T withStartPosition(LogSequenceNumber lsn); }
8,568
0
Create_ds/amazon-redshift-jdbc-driver/src/main/java/com/amazon/redshift/replication
Create_ds/amazon-redshift-jdbc-driver/src/main/java/com/amazon/redshift/replication/fluent/AbstractStreamBuilder.java
/* * Copyright (c) 2016, PostgreSQL Global Development Group * See the LICENSE file in the project root for more information. */ package com.amazon.redshift.replication.fluent; import com.amazon.redshift.replication.LogSequenceNumber; import java.util.concurrent.TimeUnit; public abstract class AbstractStreamBuilder<T extends ChainedCommonStreamBuilder<T>> implements ChainedCommonStreamBuilder<T> { private static final int DEFAULT_STATUS_INTERVAL = (int) TimeUnit.SECONDS.toMillis(10L); protected int statusIntervalMs = DEFAULT_STATUS_INTERVAL; protected LogSequenceNumber startPosition = LogSequenceNumber.INVALID_LSN; protected String slotName; protected abstract T self(); @Override public T withStatusInterval(int time, TimeUnit format) { statusIntervalMs = (int) TimeUnit.MILLISECONDS.convert(time, format); return self(); } @Override public T withStartPosition(LogSequenceNumber lsn) { this.startPosition = lsn; return self(); } @Override public T withSlotName(String slotName) { this.slotName = slotName; return self(); } }
8,569
0
Create_ds/amazon-redshift-jdbc-driver/src/main/java/com/amazon/redshift/replication
Create_ds/amazon-redshift-jdbc-driver/src/main/java/com/amazon/redshift/replication/fluent/ChainedStreamBuilder.java
/* * Copyright (c) 2016, PostgreSQL Global Development Group * See the LICENSE file in the project root for more information. */ package com.amazon.redshift.replication.fluent; import com.amazon.redshift.replication.fluent.logical.ChainedLogicalStreamBuilder; import com.amazon.redshift.replication.fluent.physical.ChainedPhysicalStreamBuilder; /** * Start point for fluent API that build replication stream(logical or physical). * Api not thread safe, and can be use only for crate single stream. */ public interface ChainedStreamBuilder { /** * <p>Create logical replication stream that decode raw wal logs by output plugin to logical form. * Default about logical decoding you can see by following link * <a href="http://www.postgresql.org/docs/current/static/logicaldecoding-explanation.html"> * Logical Decoding Concepts * </a>. * </p> * * <p>Example usage:</p> * <pre> * {@code * * RedshiftReplicationStream stream = * pgConnection * .getReplicationAPI() * .replicationStream() * .logical() * .withSlotName("test_decoding") * .withSlotOption("include-xids", false) * .withSlotOption("skip-empty-xacts", true) * .start(); * * while (true) { * ByteBuffer buffer = stream.read(); * //process logical changes * } * * } * </pre> * * @return not null fluent api */ ChainedLogicalStreamBuilder logical(); /** * <p>Create physical replication stream for process wal logs in binary form.</p> * * <p>Example usage:</p> * <pre> * {@code * * LogSequenceNumber lsn = getCurrentLSN(); * * RedshiftReplicationStream stream = * pgConnection * .getReplicationAPI() * .replicationStream() * .physical() * .withStartPosition(lsn) * .start(); * * while (true) { * ByteBuffer buffer = stream.read(); * //process binary WAL logs * } * * } * </pre> * * @return not null fluent api */ ChainedPhysicalStreamBuilder physical(); }
8,570
0
Create_ds/amazon-redshift-jdbc-driver/src/main/java/com/amazon/redshift/replication
Create_ds/amazon-redshift-jdbc-driver/src/main/java/com/amazon/redshift/replication/fluent/ReplicationStreamBuilder.java
/* * Copyright (c) 2016, PostgreSQL Global Development Group * See the LICENSE file in the project root for more information. */ package com.amazon.redshift.replication.fluent; import com.amazon.redshift.core.BaseConnection; import com.amazon.redshift.core.ReplicationProtocol; import com.amazon.redshift.replication.RedshiftReplicationStream; import com.amazon.redshift.replication.fluent.logical.ChainedLogicalStreamBuilder; import com.amazon.redshift.replication.fluent.logical.LogicalReplicationOptions; import com.amazon.redshift.replication.fluent.logical.LogicalStreamBuilder; import com.amazon.redshift.replication.fluent.logical.StartLogicalReplicationCallback; import com.amazon.redshift.replication.fluent.physical.ChainedPhysicalStreamBuilder; import com.amazon.redshift.replication.fluent.physical.PhysicalReplicationOptions; import com.amazon.redshift.replication.fluent.physical.PhysicalStreamBuilder; import com.amazon.redshift.replication.fluent.physical.StartPhysicalReplicationCallback; import java.sql.SQLException; public class ReplicationStreamBuilder implements ChainedStreamBuilder { private final BaseConnection baseConnection; /** * @param connection not null connection with that will be associate replication */ public ReplicationStreamBuilder(final BaseConnection connection) { this.baseConnection = connection; } @Override public ChainedLogicalStreamBuilder logical() { return new LogicalStreamBuilder(new StartLogicalReplicationCallback() { @Override public RedshiftReplicationStream start(LogicalReplicationOptions options) throws SQLException { ReplicationProtocol protocol = baseConnection.getReplicationProtocol(); return protocol.startLogical(options, baseConnection.getLogger()); } }); } @Override public ChainedPhysicalStreamBuilder physical() { return new PhysicalStreamBuilder(new StartPhysicalReplicationCallback() { @Override public RedshiftReplicationStream start(PhysicalReplicationOptions options) throws SQLException { ReplicationProtocol protocol = baseConnection.getReplicationProtocol(); return protocol.startPhysical(options, baseConnection.getLogger()); } }); } }
8,571
0
Create_ds/amazon-redshift-jdbc-driver/src/main/java/com/amazon/redshift/replication/fluent
Create_ds/amazon-redshift-jdbc-driver/src/main/java/com/amazon/redshift/replication/fluent/physical/ChainedPhysicalStreamBuilder.java
/* * Copyright (c) 2016, PostgreSQL Global Development Group * See the LICENSE file in the project root for more information. */ package com.amazon.redshift.replication.fluent.physical; import com.amazon.redshift.replication.RedshiftReplicationStream; import com.amazon.redshift.replication.fluent.ChainedCommonStreamBuilder; import java.sql.SQLException; public interface ChainedPhysicalStreamBuilder extends ChainedCommonStreamBuilder<ChainedPhysicalStreamBuilder> { /** * Open physical replication stream. * * @return not null RedshiftReplicationStream available for fetch wal logs in binary form * @throws SQLException on error */ RedshiftReplicationStream start() throws SQLException; }
8,572
0
Create_ds/amazon-redshift-jdbc-driver/src/main/java/com/amazon/redshift/replication/fluent
Create_ds/amazon-redshift-jdbc-driver/src/main/java/com/amazon/redshift/replication/fluent/physical/PhysicalStreamBuilder.java
/* * Copyright (c) 2016, PostgreSQL Global Development Group * See the LICENSE file in the project root for more information. */ package com.amazon.redshift.replication.fluent.physical; import com.amazon.redshift.replication.LogSequenceNumber; import com.amazon.redshift.replication.RedshiftReplicationStream; import com.amazon.redshift.replication.fluent.AbstractStreamBuilder; import java.sql.SQLException; public class PhysicalStreamBuilder extends AbstractStreamBuilder<ChainedPhysicalStreamBuilder> implements ChainedPhysicalStreamBuilder, PhysicalReplicationOptions { private final StartPhysicalReplicationCallback startCallback; /** * @param startCallback not null callback that should be execute after build parameters for start * replication */ public PhysicalStreamBuilder(StartPhysicalReplicationCallback startCallback) { this.startCallback = startCallback; } @Override protected ChainedPhysicalStreamBuilder self() { return this; } @Override public RedshiftReplicationStream start() throws SQLException { return this.startCallback.start(this); } @Override public String getSlotName() { return slotName; } @Override public LogSequenceNumber getStartLSNPosition() { return startPosition; } @Override public int getStatusInterval() { return statusIntervalMs; } }
8,573
0
Create_ds/amazon-redshift-jdbc-driver/src/main/java/com/amazon/redshift/replication/fluent
Create_ds/amazon-redshift-jdbc-driver/src/main/java/com/amazon/redshift/replication/fluent/physical/PhysicalReplicationOptions.java
/* * Copyright (c) 2016, PostgreSQL Global Development Group * See the LICENSE file in the project root for more information. */ package com.amazon.redshift.replication.fluent.physical; import com.amazon.redshift.replication.fluent.CommonOptions; public interface PhysicalReplicationOptions extends CommonOptions { }
8,574
0
Create_ds/amazon-redshift-jdbc-driver/src/main/java/com/amazon/redshift/replication/fluent
Create_ds/amazon-redshift-jdbc-driver/src/main/java/com/amazon/redshift/replication/fluent/physical/ChainedPhysicalCreateSlotBuilder.java
/* * Copyright (c) 2016, PostgreSQL Global Development Group * See the LICENSE file in the project root for more information. */ package com.amazon.redshift.replication.fluent.physical; import com.amazon.redshift.replication.fluent.ChainedCommonCreateSlotBuilder; /** * Physical replication slot specific parameters. */ public interface ChainedPhysicalCreateSlotBuilder extends ChainedCommonCreateSlotBuilder<ChainedPhysicalCreateSlotBuilder> { }
8,575
0
Create_ds/amazon-redshift-jdbc-driver/src/main/java/com/amazon/redshift/replication/fluent
Create_ds/amazon-redshift-jdbc-driver/src/main/java/com/amazon/redshift/replication/fluent/physical/StartPhysicalReplicationCallback.java
/* * Copyright (c) 2016, PostgreSQL Global Development Group * See the LICENSE file in the project root for more information. */ package com.amazon.redshift.replication.fluent.physical; import com.amazon.redshift.replication.RedshiftReplicationStream; import java.sql.SQLException; public interface StartPhysicalReplicationCallback { RedshiftReplicationStream start(PhysicalReplicationOptions options) throws SQLException; }
8,576
0
Create_ds/amazon-redshift-jdbc-driver/src/main/java/com/amazon/redshift/replication/fluent
Create_ds/amazon-redshift-jdbc-driver/src/main/java/com/amazon/redshift/replication/fluent/physical/PhysicalCreateSlotBuilder.java
/* * Copyright (c) 2016, PostgreSQL Global Development Group * See the LICENSE file in the project root for more information. */ package com.amazon.redshift.replication.fluent.physical; import com.amazon.redshift.core.BaseConnection; import com.amazon.redshift.replication.LogSequenceNumber; import com.amazon.redshift.replication.ReplicationSlotInfo; import com.amazon.redshift.replication.ReplicationType; import com.amazon.redshift.replication.fluent.AbstractCreateSlotBuilder; import java.sql.ResultSet; import java.sql.SQLException; import java.sql.Statement; public class PhysicalCreateSlotBuilder extends AbstractCreateSlotBuilder<ChainedPhysicalCreateSlotBuilder> implements ChainedPhysicalCreateSlotBuilder { public PhysicalCreateSlotBuilder(BaseConnection connection) { super(connection); } @Override protected ChainedPhysicalCreateSlotBuilder self() { return this; } @Override public ReplicationSlotInfo make() throws SQLException { if (slotName == null || slotName.isEmpty()) { throw new IllegalArgumentException("Replication slotName can't be null"); } Statement statement = connection.createStatement(); ResultSet result = null; ReplicationSlotInfo slotInfo = null; try { statement.execute(String.format( "CREATE_REPLICATION_SLOT %s %s PHYSICAL", slotName, temporaryOption ? "TEMPORARY" : "" )); result = statement.getResultSet(); if (result != null && result.next()) { slotInfo = new ReplicationSlotInfo( result.getString("slot_name"), ReplicationType.PHYSICAL, LogSequenceNumber.valueOf(result.getString("consistent_point")), result.getString("snapshot_name"), result.getString("output_plugin")); } } finally { if (result != null) { result.close(); } statement.close(); } return slotInfo; } }
8,577
0
Create_ds/amazon-redshift-jdbc-driver/src/main/java/com/amazon/redshift/replication/fluent
Create_ds/amazon-redshift-jdbc-driver/src/main/java/com/amazon/redshift/replication/fluent/logical/LogicalReplicationOptions.java
/* * Copyright (c) 2016, PostgreSQL Global Development Group * See the LICENSE file in the project root for more information. */ package com.amazon.redshift.replication.fluent.logical; import com.amazon.redshift.replication.fluent.CommonOptions; import java.util.Properties; public interface LogicalReplicationOptions extends CommonOptions { /** * Required parameter for logical replication. * * @return not null logical replication slot name that already exists on server and free. */ String getSlotName(); /** * Parameters for output plugin. Parameters will be set to output plugin that register for * specified replication slot name. * * @return list options that will be pass to output_plugin for that was create replication slot */ Properties getSlotOptions(); }
8,578
0
Create_ds/amazon-redshift-jdbc-driver/src/main/java/com/amazon/redshift/replication/fluent
Create_ds/amazon-redshift-jdbc-driver/src/main/java/com/amazon/redshift/replication/fluent/logical/ChainedLogicalStreamBuilder.java
/* * Copyright (c) 2016, PostgreSQL Global Development Group * See the LICENSE file in the project root for more information. */ package com.amazon.redshift.replication.fluent.logical; import com.amazon.redshift.replication.RedshiftReplicationStream; import com.amazon.redshift.replication.fluent.ChainedCommonStreamBuilder; import java.sql.SQLException; import java.util.Properties; public interface ChainedLogicalStreamBuilder extends ChainedCommonStreamBuilder<ChainedLogicalStreamBuilder> { /** * Open logical replication stream. * * @return not null RedshfitReplicationStream available for fetch data in logical form * @throws SQLException if there are errors */ RedshiftReplicationStream start() throws SQLException; /** * * @param optionName name of option * @param optionValue boolean value * @return ChainedLogicalStreamBuilder */ ChainedLogicalStreamBuilder withSlotOption(String optionName, boolean optionValue); /** * * @param optionName name of option * @param optionValue integer value * @return ChainedLogicalStreamBuilder */ ChainedLogicalStreamBuilder withSlotOption(String optionName, int optionValue); /** * * @param optionName name of option * @param optionValue String value * @return ChainedLogicalStreamBuilder */ ChainedLogicalStreamBuilder withSlotOption(String optionName, String optionValue); /** * * @param options properties * @return ChainedLogicalStreamBuilder */ ChainedLogicalStreamBuilder withSlotOptions(Properties options); }
8,579
0
Create_ds/amazon-redshift-jdbc-driver/src/main/java/com/amazon/redshift/replication/fluent
Create_ds/amazon-redshift-jdbc-driver/src/main/java/com/amazon/redshift/replication/fluent/logical/LogicalStreamBuilder.java
/* * Copyright (c) 2016, PostgreSQL Global Development Group * See the LICENSE file in the project root for more information. */ package com.amazon.redshift.replication.fluent.logical; import com.amazon.redshift.replication.LogSequenceNumber; import com.amazon.redshift.replication.RedshiftReplicationStream; import com.amazon.redshift.replication.fluent.AbstractStreamBuilder; import com.amazon.redshift.util.RedshiftProperties; import java.sql.SQLException; import java.util.Properties; public class LogicalStreamBuilder extends AbstractStreamBuilder<ChainedLogicalStreamBuilder> implements ChainedLogicalStreamBuilder, LogicalReplicationOptions { private final Properties slotOptions; private StartLogicalReplicationCallback startCallback; /** * @param startCallback not null callback that should be execute after build parameters for start * replication */ public LogicalStreamBuilder(StartLogicalReplicationCallback startCallback) { this.startCallback = startCallback; this.slotOptions = new Properties(); } @Override protected ChainedLogicalStreamBuilder self() { return this; } @Override public RedshiftReplicationStream start() throws SQLException { return startCallback.start(this); } @Override public String getSlotName() { return slotName; } @Override public ChainedLogicalStreamBuilder withStartPosition(LogSequenceNumber lsn) { startPosition = lsn; return this; } @Override public ChainedLogicalStreamBuilder withSlotOption(String optionName, boolean optionValue) { slotOptions.setProperty(optionName, String.valueOf(optionValue)); return this; } @Override public ChainedLogicalStreamBuilder withSlotOption(String optionName, int optionValue) { slotOptions.setProperty(optionName, String.valueOf(optionValue)); return this; } @Override public ChainedLogicalStreamBuilder withSlotOption(String optionName, String optionValue) { slotOptions.setProperty(optionName, optionValue); return this; } @Override public ChainedLogicalStreamBuilder withSlotOptions(Properties options) { for (String propertyName : options.stringPropertyNames()) { slotOptions.setProperty(propertyName, options.getProperty(propertyName)); } return this; } @Override public LogSequenceNumber getStartLSNPosition() { return startPosition; } @Override public Properties getSlotOptions() { return slotOptions; } @Override public int getStatusInterval() { return statusIntervalMs; } }
8,580
0
Create_ds/amazon-redshift-jdbc-driver/src/main/java/com/amazon/redshift/replication/fluent
Create_ds/amazon-redshift-jdbc-driver/src/main/java/com/amazon/redshift/replication/fluent/logical/ChainedLogicalCreateSlotBuilder.java
/* * Copyright (c) 2016, PostgreSQL Global Development Group * See the LICENSE file in the project root for more information. */ package com.amazon.redshift.replication.fluent.logical; import com.amazon.redshift.replication.fluent.ChainedCommonCreateSlotBuilder; /** * Logical replication slot specific parameters. */ public interface ChainedLogicalCreateSlotBuilder extends ChainedCommonCreateSlotBuilder<ChainedLogicalCreateSlotBuilder> { /** * <p>Output plugin that should be use for decode physical represent WAL to some logical form. * Output plugin should be installed on server(exists in shared_preload_libraries).</p> * * <p>Package postgresql-contrib provides sample output plugin <b>test_decoding</b> that can be * use for test logical replication api</p> * * @param outputPlugin not null name of the output plugin used for logical decoding * @return the logical slot builder */ ChainedLogicalCreateSlotBuilder withOutputPlugin(String outputPlugin); }
8,581
0
Create_ds/amazon-redshift-jdbc-driver/src/main/java/com/amazon/redshift/replication/fluent
Create_ds/amazon-redshift-jdbc-driver/src/main/java/com/amazon/redshift/replication/fluent/logical/StartLogicalReplicationCallback.java
/* * Copyright (c) 2016, PostgreSQL Global Development Group * See the LICENSE file in the project root for more information. */ package com.amazon.redshift.replication.fluent.logical; import com.amazon.redshift.replication.RedshiftReplicationStream; import java.sql.SQLException; public interface StartLogicalReplicationCallback { RedshiftReplicationStream start(LogicalReplicationOptions options) throws SQLException; }
8,582
0
Create_ds/amazon-redshift-jdbc-driver/src/main/java/com/amazon/redshift/replication/fluent
Create_ds/amazon-redshift-jdbc-driver/src/main/java/com/amazon/redshift/replication/fluent/logical/LogicalCreateSlotBuilder.java
/* * Copyright (c) 2016, PostgreSQL Global Development Group * See the LICENSE file in the project root for more information. */ package com.amazon.redshift.replication.fluent.logical; import com.amazon.redshift.core.BaseConnection; import com.amazon.redshift.replication.LogSequenceNumber; import com.amazon.redshift.replication.ReplicationSlotInfo; import com.amazon.redshift.replication.ReplicationType; import com.amazon.redshift.replication.fluent.AbstractCreateSlotBuilder; import java.sql.ResultSet; import java.sql.SQLException; import java.sql.Statement; public class LogicalCreateSlotBuilder extends AbstractCreateSlotBuilder<ChainedLogicalCreateSlotBuilder> implements ChainedLogicalCreateSlotBuilder { private String outputPlugin; public LogicalCreateSlotBuilder(BaseConnection connection) { super(connection); } @Override protected ChainedLogicalCreateSlotBuilder self() { return this; } @Override public ChainedLogicalCreateSlotBuilder withOutputPlugin(String outputPlugin) { this.outputPlugin = outputPlugin; return self(); } @Override public ReplicationSlotInfo make() throws SQLException { if (outputPlugin == null || outputPlugin.isEmpty()) { throw new IllegalArgumentException( "OutputPlugin required parameter for logical replication slot"); } if (slotName == null || slotName.isEmpty()) { throw new IllegalArgumentException("Replication slotName can't be null"); } Statement statement = connection.createStatement(); ResultSet result = null; ReplicationSlotInfo slotInfo = null; try { statement.execute(String.format( "CREATE_REPLICATION_SLOT %s %s LOGICAL %s", slotName, temporaryOption ? "TEMPORARY" : "", outputPlugin )); result = statement.getResultSet(); if (result != null && result.next()) { slotInfo = new ReplicationSlotInfo( result.getString("slot_name"), ReplicationType.LOGICAL, LogSequenceNumber.valueOf(result.getString("consistent_point")), result.getString("snapshot_name"), result.getString("output_plugin")); } } finally { if (result != null) { result.close(); } statement.close(); } return slotInfo; } }
8,583
0
Create_ds/amazon-redshift-jdbc-driver/src/main/java/com/amazon/redshift
Create_ds/amazon-redshift-jdbc-driver/src/main/java/com/amazon/redshift/fastpath/Fastpath.java
/* * Copyright (c) 2003, PostgreSQL Global Development Group * See the LICENSE file in the project root for more information. */ package com.amazon.redshift.fastpath; import com.amazon.redshift.core.BaseConnection; import com.amazon.redshift.core.ParameterList; import com.amazon.redshift.core.QueryExecutor; import com.amazon.redshift.logger.LogLevel; import com.amazon.redshift.util.ByteConverter; import com.amazon.redshift.util.GT; import com.amazon.redshift.util.RedshiftException; import com.amazon.redshift.util.RedshiftState; import java.sql.ResultSet; import java.sql.SQLException; import java.util.HashMap; import java.util.Map; /** * <p>This class implements the Fastpath api.</p> * * <p>This is a means of executing functions embedded in the backend from within a java application.</p> * * <p>It is based around the file src/interfaces/libpq/fe-exec.c</p> * * @deprecated This API is somewhat obsolete, as one may achieve similar performance * and greater functionality by setting up a prepared statement to define * the function call. Then, executing the statement with binary transmission of parameters * and results substitutes for a fast-path function call. */ @Deprecated public class Fastpath { // Java passes oids around as longs, but in the backend // it's an unsigned int, so we use this to make the conversion // of long -> signed int which the backend interprets as unsigned. private static final long NUM_OIDS = 4294967296L; // 2^32 // This maps the functions names to their id's (possible unique just // to a connection). private final Map<String, Integer> func = new HashMap<String, Integer>(); private final QueryExecutor executor; private final BaseConnection connection; /** * Initialises the fastpath system. * * @param conn BaseConnection to attach to */ public Fastpath(BaseConnection conn) { this.connection = conn; this.executor = conn.getQueryExecutor(); } /** * Send a function call to the Redshift backend. * * @param fnId Function id * @param resultType True if the result is a numeric (Integer or Long) * @param args FastpathArguments to pass to fastpath * @return null if no data, Integer if an integer result, Long if a long result, or byte[] * otherwise * @throws SQLException if a database-access error occurs. * @deprecated please use {@link #fastpath(int, FastpathArg[])} */ @Deprecated public Object fastpath(int fnId, boolean resultType, FastpathArg[] args) throws SQLException { // Run it. byte[] returnValue = fastpath(fnId, args); // Interpret results. if (!resultType || returnValue == null) { return returnValue; } if (returnValue.length == 4) { return ByteConverter.int4(returnValue, 0); } else if (returnValue.length == 8) { return ByteConverter.int8(returnValue, 0); } else { throw new RedshiftException( GT.tr("Fastpath call {0} - No result was returned and we expected a numeric.", fnId), RedshiftState.NO_DATA); } } /** * Send a function call to the Redshift backend. * * @param fnId Function id * @param args FastpathArguments to pass to fastpath * @return null if no data, byte[] otherwise * @throws SQLException if a database-access error occurs. */ public byte[] fastpath(int fnId, FastpathArg[] args) throws SQLException { // Turn fastpath array into a parameter list. ParameterList params = executor.createFastpathParameters(args.length); for (int i = 0; i < args.length; ++i) { args[i].populateParameter(params, i + 1); } // Run it. return executor.fastpathCall(fnId, params, connection.getAutoCommit()); } /** * @param name Function name * @param resulttype True if the result is a numeric (Integer or Long) * @param args FastpathArguments to pass to fastpath * @return null if no data, Integer if an integer result, Long if a long result, or byte[] * otherwise * @throws SQLException if something goes wrong * @see #fastpath(int, FastpathArg[]) * @see #fastpath(String, FastpathArg[]) * @deprecated Use {@link #getData(String, FastpathArg[])} if you expect a binary result, or one * of {@link #getInteger(String, FastpathArg[])} or * {@link #getLong(String, FastpathArg[])} if you expect a numeric one */ @Deprecated public Object fastpath(String name, boolean resulttype, FastpathArg[] args) throws SQLException { connection.getLogger().log(LogLevel.DEBUG, "Fastpath: calling {0}", name); return fastpath(getID(name), resulttype, args); } /** * <p>Send a function call to the Redshift backend by name.</p> * * <p>Note: the mapping for the procedure name to function id needs to exist, usually to an earlier * call to addfunction().</p> * * <p>This is the preferred method to call, as function id's can/may change between versions of the * backend.</p> * * <p>For an example of how this works, refer to com.amazon.redshift.largeobject.LargeObject</p> * * @param name Function name * @param args FastpathArguments to pass to fastpath * @return null if no data, byte[] otherwise * @throws SQLException if name is unknown or if a database-access error occurs. * @see com.amazon.redshift.largeobject.LargeObject */ public byte[] fastpath(String name, FastpathArg[] args) throws SQLException { connection.getLogger().log(LogLevel.DEBUG, "Fastpath: calling {0}", name); return fastpath(getID(name), args); } /** * This convenience method assumes that the return value is an integer. * * @param name Function name * @param args Function arguments * @return integer result * @throws SQLException if a database-access error occurs or no result */ public int getInteger(String name, FastpathArg[] args) throws SQLException { byte[] returnValue = fastpath(name, args); if (returnValue == null) { throw new RedshiftException( GT.tr("Fastpath call {0} - No result was returned and we expected an integer.", name), RedshiftState.NO_DATA); } if (returnValue.length == 4) { return ByteConverter.int4(returnValue, 0); } else { throw new RedshiftException(GT.tr( "Fastpath call {0} - No result was returned or wrong size while expecting an integer.", name), RedshiftState.NO_DATA); } } /** * This convenience method assumes that the return value is a long (bigint). * * @param name Function name * @param args Function arguments * @return long result * @throws SQLException if a database-access error occurs or no result */ public long getLong(String name, FastpathArg[] args) throws SQLException { byte[] returnValue = fastpath(name, args); if (returnValue == null) { throw new RedshiftException( GT.tr("Fastpath call {0} - No result was returned and we expected a long.", name), RedshiftState.NO_DATA); } if (returnValue.length == 8) { return ByteConverter.int8(returnValue, 0); } else { throw new RedshiftException( GT.tr("Fastpath call {0} - No result was returned or wrong size while expecting a long.", name), RedshiftState.NO_DATA); } } /** * This convenience method assumes that the return value is an oid. * * @param name Function name * @param args Function arguments * @return oid of the given call * @throws SQLException if a database-access error occurs or no result */ public long getOID(String name, FastpathArg[] args) throws SQLException { long oid = getInteger(name, args); if (oid < 0) { oid += NUM_OIDS; } return oid; } /** * This convenience method assumes that the return value is not an Integer. * * @param name Function name * @param args Function arguments * @return byte[] array containing result * @throws SQLException if a database-access error occurs or no result */ public byte[] getData(String name, FastpathArg[] args) throws SQLException { return fastpath(name, args); } /** * <p>This adds a function to our lookup table.</p> * * <p>User code should use the addFunctions method, which is based upon a query, rather than hard * coding the oid. The oid for a function is not guaranteed to remain static, even on different * servers of the same version.</p> * * @param name Function name * @param fnid Function id */ public void addFunction(String name, int fnid) { func.put(name, fnid); } /** * <p>This takes a ResultSet containing two columns. Column 1 contains the function name, Column 2 * the oid.</p> * * <p>It reads the entire ResultSet, loading the values into the function table.</p> * * <p><b>REMEMBER</b> to close() the resultset after calling this!!</p> * * <p><b><em>Implementation note about function name lookups:</em></b></p> * * <p>Redshift stores the function id's and their corresponding names in the pg_proc table. To * speed things up locally, instead of querying each function from that table when required, a * HashMap is used. Also, only the function's required are entered into this table, keeping * connection times as fast as possible.</p> * * <p>The com.amazon.redshift.largeobject.LargeObject class performs a query upon it's startup, and passes * the returned ResultSet to the addFunctions() method here.</p> * * <p>Once this has been done, the LargeObject api refers to the functions by name.</p> * * <p>Don't think that manually converting them to the oid's will work. Ok, they will for now, but * they can change during development (there was some discussion about this for V7.0), so this is * implemented to prevent any unwarranted headaches in the future.</p> * * @param rs ResultSet * @throws SQLException if a database-access error occurs. * @see com.amazon.redshift.largeobject.LargeObjectManager */ public void addFunctions(ResultSet rs) throws SQLException { while (rs.next()) { func.put(rs.getString(1), rs.getInt(2)); } } /** * <p>This returns the function id associated by its name.</p> * * <p>If addFunction() or addFunctions() have not been called for this name, then an SQLException is * thrown.</p> * * @param name Function name to lookup * @return Function ID for fastpath call * @throws SQLException is function is unknown. */ public int getID(String name) throws SQLException { Integer id = func.get(name); // may be we could add a lookup to the database here, and store the result // in our lookup table, throwing the exception if that fails. // We must, however, ensure that if we do, any existing ResultSet is // unaffected, otherwise we could break user code. // // so, until we know we can do this (needs testing, on the TODO list) // for now, we throw the exception and do no lookups. if (id == null) { throw new RedshiftException(GT.tr("The fastpath function {0} is unknown.", name), RedshiftState.UNEXPECTED_ERROR); } return id; } /** * Creates a FastpathArg with an oid parameter. This is here instead of a constructor of * FastpathArg because the constructor can't tell the difference between an long that's really * int8 and a long thats an oid. * * @param oid input oid * @return FastpathArg with an oid parameter */ public static FastpathArg createOIDArg(long oid) { if (oid > Integer.MAX_VALUE) { oid -= NUM_OIDS; } return new FastpathArg((int) oid); } }
8,584
0
Create_ds/amazon-redshift-jdbc-driver/src/main/java/com/amazon/redshift
Create_ds/amazon-redshift-jdbc-driver/src/main/java/com/amazon/redshift/fastpath/FastpathArg.java
/* * Copyright (c) 2003, PostgreSQL Global Development Group * See the LICENSE file in the project root for more information. */ package com.amazon.redshift.fastpath; import com.amazon.redshift.core.ParameterList; import java.sql.SQLException; // Not a very clean mapping to the new QueryExecutor/ParameterList // stuff, but it seems hard to support both v2 and v3 cleanly with // the same model while retaining API compatibility. So I've just // done it the ugly way.. /** * Each fastpath call requires an array of arguments, the number and type dependent on the function * being called. * * @deprecated This API is somewhat obsolete, as one may achieve similar performance * and greater functionality by setting up a prepared statement to define * the function call. Then, executing the statement with binary transmission of parameters * and results substitutes for a fast-path function call. */ @Deprecated public class FastpathArg { /** * Encoded byte value of argument. */ private final byte[] bytes; private final int bytesStart; private final int bytesLength; /** * Constructs an argument that consists of an integer value. * * @param value int value to set */ public FastpathArg(int value) { bytes = new byte[4]; bytes[3] = (byte) (value); bytes[2] = (byte) (value >> 8); bytes[1] = (byte) (value >> 16); bytes[0] = (byte) (value >> 24); bytesStart = 0; bytesLength = 4; } /** * Constructs an argument that consists of an integer value. * * @param value int value to set */ public FastpathArg(long value) { bytes = new byte[8]; bytes[7] = (byte) (value); bytes[6] = (byte) (value >> 8); bytes[5] = (byte) (value >> 16); bytes[4] = (byte) (value >> 24); bytes[3] = (byte) (value >> 32); bytes[2] = (byte) (value >> 40); bytes[1] = (byte) (value >> 48); bytes[0] = (byte) (value >> 56); bytesStart = 0; bytesLength = 8; } /** * Constructs an argument that consists of an array of bytes. * * @param bytes array to store */ public FastpathArg(byte[] bytes) { this(bytes, 0, bytes.length); } /** * Constructs an argument that consists of part of a byte array. * * @param buf source array * @param off offset within array * @param len length of data to include */ public FastpathArg(byte[] buf, int off, int len) { this.bytes = buf; this.bytesStart = off; this.bytesLength = len; } /** * Constructs an argument that consists of a String. * * @param s String to store */ public FastpathArg(String s) { this(s.getBytes()); } void populateParameter(ParameterList params, int index) throws SQLException { if (bytes == null) { params.setNull(index, 0); } else { params.setBytea(index, bytes, bytesStart, bytesLength); } } }
8,585
0
Create_ds/amazon-redshift-jdbc-driver/src/main/java/com/amazon/redshift
Create_ds/amazon-redshift-jdbc-driver/src/main/java/com/amazon/redshift/jdbc3/Jdbc3SimpleDataSource.java
/* * Copyright (c) 2004, PostgreSQL Global Development Group * See the LICENSE file in the project root for more information. */ package com.amazon.redshift.jdbc3; import com.amazon.redshift.ds.RedshiftSimpleDataSource; /** * @deprecated Please use {@link RedshiftSimpleDataSource} */ @Deprecated public class Jdbc3SimpleDataSource extends RedshiftSimpleDataSource { }
8,586
0
Create_ds/amazon-redshift-jdbc-driver/src/main/java/com/amazon/redshift
Create_ds/amazon-redshift-jdbc-driver/src/main/java/com/amazon/redshift/jdbc3/Jdbc3ConnectionPool.java
/* * Copyright (c) 2004, PostgreSQL Global Development Group * See the LICENSE file in the project root for more information. */ package com.amazon.redshift.jdbc3; import com.amazon.redshift.ds.RedshiftConnectionPoolDataSource; /** * @deprecated Please use {@link RedshiftConnectionPoolDataSource} */ @Deprecated public class Jdbc3ConnectionPool extends RedshiftConnectionPoolDataSource { }
8,587
0
Create_ds/amazon-redshift-jdbc-driver/src/main/java/com/amazon/redshift
Create_ds/amazon-redshift-jdbc-driver/src/main/java/com/amazon/redshift/jdbc3/Jdbc3PoolingDataSource.java
/* * Copyright (c) 2004, PostgreSQL Global Development Group * See the LICENSE file in the project root for more information. */ package com.amazon.redshift.jdbc3; import com.amazon.redshift.ds.RedshiftPoolingDataSource; /** * @deprecated Since 2.0.0, see {@link RedshiftPoolingDataSource} */ @Deprecated public class Jdbc3PoolingDataSource extends RedshiftPoolingDataSource { }
8,588
0
Create_ds/amazon-redshift-jdbc-driver/src/main/java/com/amazon/redshift
Create_ds/amazon-redshift-jdbc-driver/src/main/java/com/amazon/redshift/sspi/SSPIClient.java
/* * Copyright (c) 2003, PostgreSQL Global Development Group * See the LICENSE file in the project root for more information. */ // Copyright (c) 2004, Open Cloud Limited. package com.amazon.redshift.sspi; import com.amazon.redshift.core.RedshiftStream; import com.amazon.redshift.logger.LogLevel; import com.amazon.redshift.logger.RedshiftLogger; import com.amazon.redshift.util.HostSpec; import com.amazon.redshift.util.RedshiftException; import com.amazon.redshift.util.RedshiftState; import com.sun.jna.LastErrorException; import com.sun.jna.Platform; import com.sun.jna.platform.win32.Sspi; import com.sun.jna.platform.win32.Sspi.SecBufferDesc; import com.sun.jna.platform.win32.Win32Exception; import waffle.windows.auth.IWindowsCredentialsHandle; import waffle.windows.auth.impl.WindowsCredentialsHandleImpl; import waffle.windows.auth.impl.WindowsSecurityContextImpl; import java.io.IOException; import java.sql.SQLException; /** * <p>Use Waffle-JNI to support SSPI authentication when PgJDBC is running on a Windows client and * talking to a Windows server.</p> * * <p>SSPI is not supported on a non-Windows client.</p> * * @author craig */ public class SSPIClient implements ISSPIClient { public static final String SSPI_DEFAULT_SPN_SERVICE_CLASS = "REDSHIFT"; private RedshiftLogger logger; private final RedshiftStream rsStream; private final String spnServiceClass; private final boolean enableNegotiate; private IWindowsCredentialsHandle clientCredentials; private WindowsSecurityContextImpl sspiContext; private String targetName; /** * <p>Instantiate an SSPIClient for authentication of a connection.</p> * * <p>SSPIClient is not re-usable across connections.</p> * * <p>It is safe to instantiate SSPIClient even if Waffle and JNA are missing or on non-Windows * platforms, however you may not call any methods other than isSSPISupported().</p> * * @param rsStream Redshift connection stream * @param spnServiceClass SSPI SPN service class, defaults to POSTGRES if null * @param enableNegotiate enable negotiate */ public SSPIClient(RedshiftStream rsStream, String spnServiceClass, boolean enableNegotiate) { this.logger = (rsStream != null) ? rsStream.getLogger() : RedshiftLogger.getDriverLogger(); this.rsStream = rsStream; if (spnServiceClass == null || spnServiceClass.isEmpty()) { spnServiceClass = SSPI_DEFAULT_SPN_SERVICE_CLASS; } this.spnServiceClass = spnServiceClass; /* If we're forcing Kerberos (no spnego), disable SSPI negotiation */ this.enableNegotiate = enableNegotiate; } /** * Test whether we can attempt SSPI authentication. If false, do not attempt to call any other * SSPIClient methods. * * @return true if it's safe to attempt SSPI authentication */ @Override public boolean isSSPISupported() { try { /* * SSPI is windows-only. Attempt to use JNA to identify the platform. If Waffle is missing we * won't have JNA and this will throw a NoClassDefFoundError. */ if (!Platform.isWindows()) { if(RedshiftLogger.isEnable()) logger.log(LogLevel.DEBUG, "SSPI not supported: non-Windows host"); return false; } /* Waffle must be on the CLASSPATH */ Class.forName("waffle.windows.auth.impl.WindowsSecurityContextImpl"); return true; } catch (NoClassDefFoundError ex) { if(RedshiftLogger.isEnable()) logger.log(LogLevel.INFO, "SSPI unavailable (no Waffle/JNA libraries?)", ex); return false; } catch (ClassNotFoundException ex) { if(RedshiftLogger.isEnable()) logger.log(LogLevel.INFO, "SSPI unavailable (no Waffle/JNA libraries?)", ex); return false; } } private String makeSPN() throws RedshiftException { final HostSpec hs = rsStream.getHostSpec(); try { /* The GSSAPI implementation does not use the port in the service name. Force the port number to 0 Fixes issue 1482 */ return NTDSAPIWrapper.instance.DsMakeSpn(spnServiceClass, hs.getHost(), null, (short) 0, null); } catch (LastErrorException ex) { throw new RedshiftException("SSPI setup failed to determine SPN", RedshiftState.CONNECTION_UNABLE_TO_CONNECT, ex); } } /** * Respond to an authentication request from the back-end for SSPI authentication (AUTH_REQ_SSPI). * * @throws SQLException on SSPI authentication handshake failure * @throws IOException on network I/O issues */ @Override public void startSSPI() throws SQLException, IOException { /* * We usually use SSPI negotiation (spnego), but it's disabled if the client asked for GSSPI and * usespngo isn't explicitly turned on. */ final String securityPackage = enableNegotiate ? "negotiate" : "kerberos"; if(RedshiftLogger.isEnable()) logger.log(LogLevel.DEBUG, "Beginning SSPI/Kerberos negotiation with SSPI package: {0}", securityPackage); try { /* * Acquire a handle for the local Windows login credentials for the current user * * See AcquireCredentialsHandle * (http://msdn.microsoft.com/en-us/library/windows/desktop/aa374712%28v=vs.85%29.aspx) * * This corresponds to pg_SSPI_startup in libpq/fe-auth.c . */ try { clientCredentials = WindowsCredentialsHandleImpl.getCurrent(securityPackage); clientCredentials.initialize(); } catch (Win32Exception ex) { throw new RedshiftException("Could not obtain local Windows credentials for SSPI", RedshiftState.CONNECTION_UNABLE_TO_CONNECT /* TODO: Should be authentication error */, ex); } try { targetName = makeSPN(); if(RedshiftLogger.isEnable()) logger.log(LogLevel.DEBUG, "SSPI target name: {0}", targetName); sspiContext = new WindowsSecurityContextImpl(); sspiContext.setPrincipalName(targetName); sspiContext.setCredentialsHandle(clientCredentials); sspiContext.setSecurityPackage(securityPackage); sspiContext.initialize(null, null, targetName); } catch (Win32Exception ex) { throw new RedshiftException("Could not initialize SSPI security context", RedshiftState.CONNECTION_UNABLE_TO_CONNECT /* TODO: Should be auth error */, ex); } sendSSPIResponse(sspiContext.getToken()); if(RedshiftLogger.isEnable()) logger.log(LogLevel.DEBUG, "Sent first SSPI negotiation message"); } catch (NoClassDefFoundError ex) { throw new RedshiftException( "SSPI cannot be used, Waffle or its dependencies are missing from the classpath", RedshiftState.NOT_IMPLEMENTED, ex); } } /** * Continue an existing authentication conversation with the back-end in resonse to an * authentication request of type AUTH_REQ_GSS_CONT. * * @param msgLength Length of message to read, excluding length word and message type word * @throws SQLException if something wrong happens * @throws IOException if something wrong happens */ @Override public void continueSSPI(int msgLength) throws SQLException, IOException { if (sspiContext == null) { throw new IllegalStateException("Cannot continue SSPI authentication that we didn't begin"); } if(RedshiftLogger.isEnable()) logger.log(LogLevel.DEBUG, "Continuing SSPI negotiation"); /* Read the response token from the server */ byte[] receivedToken = rsStream.receive(msgLength); SecBufferDesc continueToken = new SecBufferDesc(Sspi.SECBUFFER_TOKEN, receivedToken); sspiContext.initialize(sspiContext.getHandle(), continueToken, targetName); /* * Now send the response token. If negotiation is complete there may be zero bytes to send, in * which case we shouldn't send a reply as the server is not expecting one; see fe-auth.c in * libpq for details. */ byte[] responseToken = sspiContext.getToken(); if (responseToken.length > 0) { sendSSPIResponse(responseToken); if(RedshiftLogger.isEnable()) logger.log(LogLevel.DEBUG, "Sent SSPI negotiation continuation message"); } else { if(RedshiftLogger.isEnable()) logger.log(LogLevel.DEBUG, "SSPI authentication complete, no reply required"); } } private void sendSSPIResponse(byte[] outToken) throws IOException { /* * The sspiContext now contains a token we can send to the server to start the handshake. Send a * 'password' message containing the required data; the server knows we're doing SSPI * negotiation and will deal with it appropriately. */ rsStream.sendChar('p'); rsStream.sendInteger4(4 + outToken.length); rsStream.send(outToken); rsStream.flush(); } /** * Clean up native win32 resources after completion or failure of SSPI authentication. This * SSPIClient instance becomes unusable after disposal. */ @Override public void dispose() { if (sspiContext != null) { sspiContext.dispose(); sspiContext = null; } if (clientCredentials != null) { clientCredentials.dispose(); clientCredentials = null; } } }
8,589
0
Create_ds/amazon-redshift-jdbc-driver/src/main/java/com/amazon/redshift
Create_ds/amazon-redshift-jdbc-driver/src/main/java/com/amazon/redshift/sspi/NTDSAPI.java
/* * Copyright (c) 2003, PostgreSQL Global Development Group * See the LICENSE file in the project root for more information. */ // Copyright (c) 2004, Open Cloud Limited. package com.amazon.redshift.sspi; import com.sun.jna.LastErrorException; import com.sun.jna.Native; import com.sun.jna.WString; import com.sun.jna.ptr.IntByReference; import com.sun.jna.win32.StdCallLibrary; interface NTDSAPI extends StdCallLibrary { NTDSAPI instance = (NTDSAPI) Native.loadLibrary("NTDSAPI", NTDSAPI.class); /** * <p>Wrap DsMakeSpn</p> * * <p>To get the String result, call * * <pre> * new String(buf, 0, spnLength) * </pre> * * on the byte[] buffer passed to 'spn' after testing to ensure ERROR_SUCCESS.</p> * * @param serviceClass SPN service class (in) * @param serviceName SPN service name (in) * @param instanceName SPN instance name (in, null ok) * @param instancePort SPN port number (in, 0 to omit) * @param referrer SPN referer (in, null ok) * @param spnLength Size of 'spn' buffer (in), actul length of spn created including null * terminator (out) * @param spn SPN buffer (in/out) * @return Error code ERROR_SUCCESS, ERROR_BUFFER_OVERFLOW or ERROR_INVALID_PARAMETER * @see <a href="https://msdn.microsoft.com/en-us/library/ms676007(v=vs.85).aspx"> * https://msdn.microsoft.com/en-us/library/ms676007(v=vs.85).aspx</a> */ int DsMakeSpnW(WString serviceClass, /* in */ WString serviceName, /* in */ WString instanceName, /* in, optional, may be null */ short instancePort, /* in */ WString referrer, /* in, optional, may be null */ IntByReference spnLength, /* in: length of buffer spn; out: chars written */ char[] spn /* out string */ ) throws LastErrorException; int ERROR_SUCCESS = 0; int ERROR_INVALID_PARAMETER = 87; int ERROR_BUFFER_OVERFLOW = 111; }
8,590
0
Create_ds/amazon-redshift-jdbc-driver/src/main/java/com/amazon/redshift
Create_ds/amazon-redshift-jdbc-driver/src/main/java/com/amazon/redshift/sspi/NTDSAPIWrapper.java
/* * Copyright (c) 2003, PostgreSQL Global Development Group * See the LICENSE file in the project root for more information. */ // Copyright (c) 2004, Open Cloud Limited. package com.amazon.redshift.sspi; import com.sun.jna.LastErrorException; import com.sun.jna.WString; import com.sun.jna.ptr.IntByReference; public class NTDSAPIWrapper { static final NTDSAPIWrapper instance = new NTDSAPIWrapper(); /** * Convenience wrapper for NTDSAPI DsMakeSpn with Java friendly string and exception handling. * * @param serviceClass See MSDN * @param serviceName See MSDN * @param instanceName See MSDN * @param instancePort See MSDN * @param referrer See MSDN * @return SPN generated * @throws LastErrorException If buffer too small or parameter incorrect * @see <a href="https://msdn.microsoft.com/en-us/library/ms676007(v=vs.85).aspx"> * https://msdn.microsoft.com/en-us/library/ms676007(v=vs.85).aspx</a> */ public String DsMakeSpn(String serviceClass, String serviceName, String instanceName, short instancePort, String referrer) throws LastErrorException { IntByReference spnLength = new IntByReference(2048); char[] spn = new char[spnLength.getValue()]; final int ret = NTDSAPI.instance.DsMakeSpnW( new WString(serviceClass), new WString(serviceName), instanceName == null ? null : new WString(instanceName), instancePort, referrer == null ? null : new WString(referrer), spnLength, spn); if (ret != NTDSAPI.ERROR_SUCCESS) { /* Should've thrown LastErrorException, but just in case */ throw new RuntimeException("NTDSAPI DsMakeSpn call failed with " + ret); } return new String(spn, 0, spnLength.getValue()); } }
8,591
0
Create_ds/amazon-redshift-jdbc-driver/src/main/java/com/amazon/redshift
Create_ds/amazon-redshift-jdbc-driver/src/main/java/com/amazon/redshift/sspi/ISSPIClient.java
/* * Copyright (c) 2003, PostgreSQL Global Development Group * See the LICENSE file in the project root for more information. */ // Copyright (c) 2004, Open Cloud Limited. package com.amazon.redshift.sspi; import java.io.IOException; import java.sql.SQLException; /** * <p>Use Waffle-JNI to support SSPI authentication when RsJDBC is running on a Windows * client and talking to a Windows server.</p> * * <p>SSPI is not supported on a non-Windows client.</p> */ public interface ISSPIClient { boolean isSSPISupported(); void startSSPI() throws SQLException, IOException; void continueSSPI(int msgLength) throws SQLException, IOException; void dispose(); }
8,592
0
Create_ds/amazon-redshift-jdbc-driver/src/main/java/com/amazon/redshift
Create_ds/amazon-redshift-jdbc-driver/src/main/java/com/amazon/redshift/jdbc2/ArrayAssistantRegistry.java
/* * Copyright (c) 2004, PostgreSQL Global Development Group * See the LICENSE file in the project root for more information. */ package com.amazon.redshift.jdbc2; import java.util.HashMap; import java.util.Map; /** * Array assistants register here. * * @author Minglei Tu */ public class ArrayAssistantRegistry { private static Map<Integer, ArrayAssistant> arrayAssistantMap = new HashMap<Integer, ArrayAssistant>(); public static ArrayAssistant getAssistant(int oid) { return arrayAssistantMap.get(oid); } //// public static void register(int oid, ArrayAssistant assistant) { arrayAssistantMap.put(oid, assistant); } }
8,593
0
Create_ds/amazon-redshift-jdbc-driver/src/main/java/com/amazon/redshift
Create_ds/amazon-redshift-jdbc-driver/src/main/java/com/amazon/redshift/jdbc2/ArrayAssistant.java
/* * Copyright (c) 2004, PostgreSQL Global Development Group * See the LICENSE file in the project root for more information. */ package com.amazon.redshift.jdbc2; /** * Implement this interface and register the its instance to ArrayAssistantRegistry, to let Redshift * driver to support more array type. * * @author Minglei Tu */ public interface ArrayAssistant { /** * get array base type. * * @return array base type */ Class<?> baseType(); /** * build a array element from its binary bytes. * * @param bytes input bytes * @param pos position in input array * @param len length of the element * @return array element from its binary bytes */ Object buildElement(byte[] bytes, int pos, int len); /** * build an array element from its literal string. * * @param literal string representation of array element * @return array element */ Object buildElement(String literal); }
8,594
0
Create_ds/amazon-redshift-jdbc-driver/src/main/java/com/amazon/redshift/jdbc2
Create_ds/amazon-redshift-jdbc-driver/src/main/java/com/amazon/redshift/jdbc2/optional/SimpleDataSource.java
/* * Copyright (c) 2004, PostgreSQL Global Development Group * See the LICENSE file in the project root for more information. */ package com.amazon.redshift.jdbc2.optional; import com.amazon.redshift.ds.RedshiftSimpleDataSource; /** * @deprecated Please use {@link RedshiftSimpleDataSource} */ @Deprecated public class SimpleDataSource extends RedshiftSimpleDataSource { }
8,595
0
Create_ds/amazon-redshift-jdbc-driver/src/main/java/com/amazon/redshift/jdbc2
Create_ds/amazon-redshift-jdbc-driver/src/main/java/com/amazon/redshift/jdbc2/optional/ConnectionPool.java
/* * Copyright (c) 2004, PostgreSQL Global Development Group * See the LICENSE file in the project root for more information. */ package com.amazon.redshift.jdbc2.optional; import com.amazon.redshift.ds.RedshiftConnectionPoolDataSource; /** * @deprecated Please use {@link RedshiftConnectionPoolDataSource} */ @Deprecated public class ConnectionPool extends RedshiftConnectionPoolDataSource { }
8,596
0
Create_ds/amazon-redshift-jdbc-driver/src/main/java/com/amazon/redshift/jdbc2
Create_ds/amazon-redshift-jdbc-driver/src/main/java/com/amazon/redshift/jdbc2/optional/PoolingDataSource.java
/* * Copyright (c) 2004, PostgreSQL Global Development Group * See the LICENSE file in the project root for more information. */ package com.amazon.redshift.jdbc2.optional; import com.amazon.redshift.ds.RedshiftPoolingDataSource; /** * @deprecated Since 2.0.0, see {@link RedshiftPoolingDataSource} */ @Deprecated public class PoolingDataSource extends RedshiftPoolingDataSource { }
8,597
0
Create_ds/hollow/hollow-zenoadapter/src/test/java/com/netflix/hollow/zenoadapter
Create_ds/hollow/hollow-zenoadapter/src/test/java/com/netflix/hollow/zenoadapter/util/ObjectIdentityOrdinalMapTest.java
/* * Copyright 2016-2019 Netflix, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ package com.netflix.hollow.zenoadapter.util; import org.junit.Assert; import org.junit.Test; public class ObjectIdentityOrdinalMapTest { Object obj[] = new Object[10000]; @Test public void test() { for(int i=0;i<obj.length;i++) { obj[i] = new Object(); } ObjectIdentityOrdinalMap ordinalMap = new ObjectIdentityOrdinalMap(); for(int i=0;i<obj.length;i++) { ordinalMap.put(obj[i], i); } for(int i=0;i<obj.length;i++) { Assert.assertEquals(ordinalMap.getEntry(obj[i]).getOrdinal(), i); } } }
8,598
0
Create_ds/hollow/hollow-zenoadapter/src/main/java/com/netflix/hollow
Create_ds/hollow/hollow-zenoadapter/src/main/java/com/netflix/hollow/zenoadapter/HollowSerializationRecord.java
/* * Copyright 2016-2019 Netflix, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ package com.netflix.hollow.zenoadapter; import com.netflix.hollow.core.write.HollowWriteRecord; import com.netflix.zeno.serializer.NFSerializationRecord; public class HollowSerializationRecord extends NFSerializationRecord { private final String typeName; private final HollowWriteRecord hollowWriteRecord; public HollowSerializationRecord(HollowWriteRecord rec, String typeName) { this.hollowWriteRecord = rec; this.typeName = typeName; } public String getTypeName() { return typeName; } public HollowWriteRecord getHollowWriteRecord() { return hollowWriteRecord; } public void reset() { hollowWriteRecord.reset(); } }
8,599