index
int64
0
0
repo_id
stringlengths
26
205
file_path
stringlengths
51
246
content
stringlengths
8
433k
__index_level_0__
int64
0
10k
0
Create_ds/amazon-redshift-jdbc-driver/src/main/java/com/amazon/redshift
Create_ds/amazon-redshift-jdbc-driver/src/main/java/com/amazon/redshift/logger/LogHandler.java
package com.amazon.redshift.logger; public interface LogHandler { /** * Write the message using this handler. * This can be a file or console. * * @param message Log entry * @throws Exception throws when any error happens during write operation. */ public void write(String message) throws Exception; public void close() throws Exception; public void flush(); }
8,400
0
Create_ds/amazon-redshift-jdbc-driver/src/main/java/com/amazon/redshift
Create_ds/amazon-redshift-jdbc-driver/src/main/java/com/amazon/redshift/logger/LogFileHandler.java
package com.amazon.redshift.logger; import java.io.BufferedOutputStream; import java.io.File; import java.io.FileNotFoundException; import java.io.FileOutputStream; import java.io.IOException; import java.io.PrintWriter; import java.util.ArrayList; import java.util.regex.Matcher; import java.util.regex.Pattern; import com.amazon.redshift.util.GT; import com.amazon.redshift.util.RedshiftException; import com.amazon.redshift.util.RedshiftState; public class LogFileHandler implements LogHandler { private static final int FILE_SIZE = 10 * 1024 * 1024; // 10 MB private static final int FILE_COUNT = 10; private static final String FILE_EXTENSION = ".log"; private static final String FILE_EXTENSION_SEPERATOR = "."; private static final Pattern FILE_SIZE_PATTERN = Pattern.compile( "\\s*(\\d+)\\s*(k|g|m|)b?\\s*", Pattern.CASE_INSENSITIVE); private static final int BUFFER_SIZE = 8 * 1024; private File currentFile; private String fileName; private boolean isRotation = false; private String directory; private int maxFileSize; private int maxFileCount; private ArrayList<String> rotationFileNames; private PrintWriter writer = null; private boolean flushAfterWrite; public LogFileHandler(String filename, boolean flushAfterWrite, String maxLogFileSize, String maxLogFileCount) throws Exception { int separator = filename.lastIndexOf(File.separator); directory = filename.substring(0, separator); fileName = filename.substring(separator + 1); this.flushAfterWrite = flushAfterWrite; if (-1 != separator) { createDirectory(); } createWriter(maxLogFileSize, maxLogFileCount); } @Override public synchronized void write(String message) throws Exception { writer.println(message); if (flushAfterWrite) writer.flush(); if (isRotation) { writer.flush(); if ((0 != maxFileSize) && (currentFile.length() >= maxFileSize)) { closeFile(); rotateFiles(); openFile(); } } } @Override public synchronized void close() throws Exception { if (writer != null) { writer.close(); } } @Override public synchronized void flush() { if (writer != null) { writer.flush(); } } private void createDirectory() throws RedshiftException { File directory = new File(this.directory); if (!directory.exists() && !directory.mkdir()) { throw new RedshiftException(GT.tr("Couldn't create log directory {0}", directory), RedshiftState.UNEXPECTED_ERROR); } } private void createWriter(String maxLogFileSize, String maxLogFileCount) throws Exception { String fullFilename = directory + File.separator + fileName; if ((null != fullFilename) && (0 != fullFilename.length())) { BufferedOutputStream outStream = new BufferedOutputStream( new FileOutputStream(fullFilename, true), BUFFER_SIZE); writer = new PrintWriter(outStream); updateLoggingFileSettings(maxLogFileSize, maxLogFileCount); return; } throw new Exception("Failed to create log writer"); } private void updateLoggingFileSettings(String maxLogFileSize, String maxLogFileCount) { currentFile = new File(directory + File.separator + fileName); // Read "maxLogFileCount" maxFileCount = getMaxFileCount(maxLogFileCount); // Read "maxLogFileSize" maxFileSize = getMaxFileSize(maxLogFileSize); if (maxFileCount > 1) { isRotation = true; String fileExt = FILE_EXTENSION; rotationFileNames = new ArrayList<String>(); if (fileName.contains(FILE_EXTENSION_SEPERATOR)) { fileExt = fileName .substring(fileName.lastIndexOf(FILE_EXTENSION_SEPERATOR), fileName.length()); fileName = fileName.substring(0, fileName.lastIndexOf(FILE_EXTENSION_SEPERATOR)); } rotationFileNames.add(directory + File.separator + fileName + fileExt); for (int i = 1; i < maxFileCount; i++) { // Name format: {basename}.{i}{.ext} : e.g. "redshift.1.log" rotationFileNames .add(directory + File.separator + fileName + "." + i + fileExt); } } } private int getMaxFileSize(String maxLogFileSize) { int maxBytes = FILE_SIZE; if ((null == maxLogFileSize) || (maxLogFileSize.isEmpty())) return maxBytes; Matcher fileSizematcher = FILE_SIZE_PATTERN.matcher(maxLogFileSize); if (fileSizematcher.find()) { try { maxBytes = Integer.valueOf(fileSizematcher.group(1)) * toMultiplier(fileSizematcher.group(2).toLowerCase()); } catch (NumberFormatException e) { writer.println(e.getMessage()); writer.flush(); } } return maxBytes; } private int getMaxFileCount(String maxLogFileCount) { int maxfileCount = FILE_COUNT; if ((null != maxLogFileCount) && (!maxLogFileCount.isEmpty())) { int fileCount = Integer.valueOf(maxLogFileCount); if (fileCount > 0) maxfileCount = fileCount; } return maxfileCount; } private Integer toMultiplier(String sizeChar) { if (sizeChar.equals("g")) { return 1024 * 1024 * 1024; } else if (sizeChar.equals("m")) { return 1024 * 1024; } else if (sizeChar.equals("k")) { return 1024; } throw new NumberFormatException("Invalid file size unit."); } private boolean isOpen() { return (writer != null); } private void openFile() throws FileNotFoundException { currentFile = new File(rotationFileNames.get(0)); BufferedOutputStream outStream = new BufferedOutputStream( new FileOutputStream(rotationFileNames.get(0), true), BUFFER_SIZE); writer = new PrintWriter(outStream); } private void closeFile() { if (isOpen()) { currentFile = null; if (null != writer) { writer.close(); } } } private void rotateFiles() throws Exception { if(!(rotationFileNames.isEmpty())) deleteOldestFile(); } private void deleteOldestFile() throws IOException { String last = rotationFileNames.get(rotationFileNames.size() - 1); File file = new File(last); if (file.exists()) { file.delete(); } for (int i = rotationFileNames.size() - 2; i >= 0; i--) { File current; current = new File(rotationFileNames.get(i)); if (current.exists()) { File dest = new File(last); if (!current.renameTo(dest)) { throw new IOException( "can not rename file: " + current.getName() + " to: " + last); } } last = rotationFileNames.get(i); } } }
8,401
0
Create_ds/amazon-redshift-jdbc-driver/src/main/java/com/amazon/redshift
Create_ds/amazon-redshift-jdbc-driver/src/main/java/com/amazon/redshift/logger/LogLevel.java
package com.amazon.redshift.logger; import java.util.ArrayList; public enum LogLevel { /* * OFF < ERROR < INFO < FUNCTION < DEBUG */ OFF, ERROR, INFO, FUNCTION, DEBUG; private static ArrayList<String> names = new ArrayList<String>(); static { names.add("OFF"); names.add("ERROR"); names.add("INFO"); names.add("FUNCTION"); names.add("DEBUG"); } public static LogLevel getLogLevel(int level) { switch (level) { case 0: { return LogLevel.OFF; } case 1: { return LogLevel.ERROR; } case 2: { return LogLevel.INFO; } case 3: { return LogLevel.FUNCTION; } case 4: case 5: // TRACE for backward compatibility case 6: // DEBUG for backward compatibility { return LogLevel.DEBUG; } default: { return LogLevel.OFF; } } } public static LogLevel getLogLevel(String level) { LogLevel logLevel = LogLevel.OFF; if ((null == level) || level.equals("")) { return logLevel; } if (level.equalsIgnoreCase("OFF")) { logLevel = LogLevel.OFF; } else if (level.equalsIgnoreCase("ERROR")) { logLevel = LogLevel.ERROR; } else if (level.equalsIgnoreCase("INFO")) { logLevel = LogLevel.INFO; } else if (level.equalsIgnoreCase("FUNCTION")) { logLevel = LogLevel.FUNCTION; } else if (level.equalsIgnoreCase("DEBUG") || level.equalsIgnoreCase("TRACE")) // TRACE is for backward compatibility { logLevel = LogLevel.DEBUG; } else { try { logLevel = getLogLevel(Integer.parseInt(level)); } catch (NumberFormatException e) { // Ignore } } return logLevel; } }
8,402
0
Create_ds/amazon-redshift-jdbc-driver/src/main/java/com/amazon/redshift
Create_ds/amazon-redshift-jdbc-driver/src/main/java/com/amazon/redshift/logger/LogConsoleHandler.java
package com.amazon.redshift.logger; import java.io.PrintWriter; public class LogConsoleHandler implements LogHandler { private final PrintWriter writer = new PrintWriter(System.out); @Override public synchronized void write(String message) throws Exception { writer.println(message); writer.flush(); } @Override public synchronized void close() throws Exception { // Do nothing as Writer is on the stdout. } @Override public synchronized void flush() { if (writer != null) { writer.flush(); } } }
8,403
0
Create_ds/amazon-redshift-jdbc-driver/src/main/java/com/amazon/redshift
Create_ds/amazon-redshift-jdbc-driver/src/main/java/com/amazon/redshift/logger/RedshiftLogger.java
package com.amazon.redshift.logger; import java.io.File; import java.io.PrintWriter; import java.io.StringWriter; import java.sql.DriverManager; import java.text.FieldPosition; import java.text.MessageFormat; import java.text.SimpleDateFormat; import java.util.Arrays; import java.util.Date; import java.util.Iterator; import java.util.Map.Entry; import java.util.Properties; import java.util.Set; import java.util.concurrent.atomic.AtomicInteger; import java.util.stream.Collectors; import java.util.Locale; import com.amazon.redshift.RedshiftProperty; import com.amazon.redshift.util.RedshiftProperties; /** * Logger for each connection or at driver. * * @author iggarish * */ public class RedshiftLogger { // Any connection enable the logging private static boolean isEnable = false; private static RedshiftLogger driverLogger; private volatile LogLevel level = LogLevel.OFF; private String fileName; private LogHandler handler; private static AtomicInteger connectionId = new AtomicInteger(); public RedshiftLogger(String fileName, String logLevel, boolean driver, String maxLogFileSize, String maxLogFileCount) { if (driver) { this.fileName = fileName; driverLogger = this; } else { int connId = connectionId.incrementAndGet(); if (fileName != null) { String fileExt = ""; if (fileName.contains(".")) { fileExt = fileName .substring(fileName.lastIndexOf("."), fileName.length()); fileName = fileName.substring(0, fileName.lastIndexOf(".")); } this.fileName = fileName + "_connection_" + connId + fileExt ; } } level = (logLevel != null ) ? LogLevel.getLogLevel(logLevel) : LogLevel.OFF; if (level != LogLevel.OFF) { try { if (DriverManager.getLogWriter() != null) { handler = new LogWriterHandler(DriverManager.getLogWriter()); } else if (this.fileName != null) { handler = new LogFileHandler(this.fileName, driver, maxLogFileSize, maxLogFileCount); } else { handler = new LogConsoleHandler(); } } catch(Exception ex) { handler = new LogConsoleHandler(); } isEnable = true; } } /** * Check for logging enable or not. * * @return true if any of the connection enable the logging, false otherwise. */ public static boolean isEnable() { return isEnable; } public static RedshiftLogger getDriverLogger() { return driverLogger; } /** True if logger associated with the connection enable the logging. * Otherwise false. * * One logger associated with each connection. * There is a separate logger at driver level. * * @return */ private boolean isEnabled() { return (level != LogLevel.OFF); } public LogLevel getLogLevel() { return level; } /** * Determines if logging should occur based on the LogLevel. * * @param level * The level of logging to attempt. * @param log * The ILogger to try to log to. * * @return True if logging is to be done according to LogLevel; false otherwise. */ public static boolean checkLogLevel(LogLevel level, RedshiftLogger log) { return (log.isEnabled()) && (level.ordinal() <= log.getLogLevel().ordinal()); } /* public static boolean isLoggable(LogLevel level, RedshiftLogger log) { return checkLogLevel(level, log); } */ private static StackTraceElement getStackElementAbove(String functionName) { boolean returnNextFunction = false; // Look for the function above the specified one. for (StackTraceElement s : Thread.currentThread().getStackTrace()) { if (returnNextFunction) { return s; } else if (s.getMethodName().equals(functionName)) { returnNextFunction = true; } } // Default to just returning 3 above, which should be the caller of the caller of this // function. return Thread.currentThread().getStackTrace()[3]; } public static String maskSecureInfoInUrl(String url) { String[] tokens = { RedshiftProperty.PWD.getName(), RedshiftProperty.PASSWORD.getName(), RedshiftProperty.IAM_ACCESS_KEY_ID.getName(), RedshiftProperty.IAM_SECRET_ACCESS_KEY.getName(), RedshiftProperty.IAM_SESSION_TOKEN.getName(), RedshiftProperty.AUTH_PROFILE.getName(), RedshiftProperty.SSL_KEY.getName(), RedshiftProperty.SSL_PASSWORD.getName(), RedshiftProperty.WEB_IDENTITY_TOKEN.getName(), "Client_ID", "Client_Secret", "IdP_Tenant", "Partner_SPID", "Preferred_Role", "Profile", "roleArn", }; String temp = maskSecureInfo(url, tokens, "[\\?;&]"); return temp; } public static String maskSecureInfo(String msg, String[] tokens, String tokenizer) { if(msg == null) return msg; StringBuilder newMsg = new StringBuilder(); String[] splitMsgs = msg.split(tokenizer); boolean secureInfoFound = false; for(String splitMsg : splitMsgs) { String tokenFound = null; String sTemp = splitMsg.toLowerCase(); for(String token : tokens) { String temp = token.toLowerCase(); if(sTemp.contains(temp)) { tokenFound = token; secureInfoFound = true; break; } } if(tokenFound == null) { newMsg.append(splitMsg).append(";"); } else { newMsg.append(tokenFound).append("=***;"); } } return (secureInfoFound) ? newMsg.toString() : msg; } public static Properties maskSecureInfoInProps(Properties info) { if(info == null) return null; String[] propNames = { RedshiftProperty.PWD.getName(), RedshiftProperty.PASSWORD.getName(), RedshiftProperty.IAM_ACCESS_KEY_ID.getName(), RedshiftProperty.IAM_SECRET_ACCESS_KEY.getName(), RedshiftProperty.IAM_SESSION_TOKEN.getName(), RedshiftProperty.AUTH_PROFILE.getName(), RedshiftProperty.SSL_KEY.getName(), RedshiftProperty.SSL_PASSWORD.getName(), RedshiftProperty.WEB_IDENTITY_TOKEN.getName(), "Client_ID", "Client_Secret", "IdP_Tenant", "Partner_SPID", "Preferred_Role", "Profile", "roleArn", }; Properties loggedProperties = new RedshiftProperties(); loggedProperties.putAll(info); removeUnrecognizedPropertiesFromLogging(loggedProperties); for(String propName : propNames) { replaceIgnoreCase(loggedProperties, propName, "***"); } return loggedProperties; } /** * fetches set of properties defined in the RedshiftProperty enum class and properties defined in public docs * compares against given properties and removes unrecognized properties from logs */ public static void removeUnrecognizedPropertiesFromLogging(Properties loggedProperties) { Set<String> enumProperties = Arrays.stream(RedshiftProperty.values()).map(x -> x.getName().toLowerCase(Locale.US)).collect(Collectors.toSet()); Set<String> publicProperties = RedshiftProperty.getPublicProperties().stream().map(x -> x.toLowerCase(Locale.US)).collect(Collectors.toSet()); Set<String> allProperties = enumProperties.stream().collect(Collectors.toSet()); allProperties.addAll(publicProperties); for (String givenProperty : loggedProperties.stringPropertyNames()) { if (!allProperties.contains(givenProperty)) { loggedProperties.remove(givenProperty); } } } public static String replaceIgnoreCase(Properties info, String key, String newVal) { String value = info.getProperty(key); if (null != value) { info.replace(key, newVal); return value; } // Not matching with the actual key then Set<Entry<Object, Object>> s = info.entrySet(); Iterator<Entry<Object, Object>> it = s.iterator(); while (it.hasNext()) { Entry<Object, Object> entry = it.next(); if (key.equalsIgnoreCase((String) entry.getKey())) { info.replace(key, newVal); return (String) entry.getValue(); } } return null; } private static String[] getCallerMethodName(String logFunction) { /* * Stack Trace: * 0 - dumpThreads * 1 - getStackTrace * 2 - current method (logging) * 3 - calling method (the one we want to log) * 4 - method calling the method we want to log... etc. */ // Retrieve the information necessary to log the message. StackTraceElement element = getStackElementAbove(logFunction); String[] names = new String[3]; names[2] = element.getMethodName(); try { // Dynamically look up the name of the class. Class<?> originatingClass = Class.forName(element.getClassName()); names[1] = originatingClass.getSimpleName(); // Get the package of the class. names[0] = ""; Package originatingPackage = originatingClass.getPackage(); if (null != originatingPackage) { names[0] = originatingPackage.getName(); } } catch (ClassNotFoundException e) { // Failed to look up the class, just omit it. names[0] = "<error>"; names[1] = element.getClassName(); } if (names[2].equals("<init>")) { // <init> means the constructor, so just use the class name. names[2] = names[1]; } return names; } public void log(LogLevel logLevel, String msg, Object... msgArgs) { if (!checkLogLevel(logLevel, this)) return; // Get the package, class, and method names. String[] callerNames = getCallerMethodName("log"); logMsg(logLevel, callerNames, msg, msgArgs); } public void log(LogLevel logLevel, Throwable thrown, String msg, Object... msgArgs) { if (!checkLogLevel(logLevel, this)) return; // Get the package, class, and method names. String[] callerNames = getCallerMethodName("log"); StringWriter sw = new StringWriter(); thrown.printStackTrace(new PrintWriter(sw)); String stacktrace = sw.toString(); logMsg(logLevel, callerNames, msg, msgArgs); logMsg(logLevel, callerNames, stacktrace); } public void logError(Exception error) { if (!checkLogLevel(LogLevel.ERROR, this)) return; // Get the package, class, and method names. String[] callerNames = getCallerMethodName("logError"); StringWriter sw = new StringWriter(); error.printStackTrace(new PrintWriter(sw)); String stacktrace = sw.toString(); logMsg(LogLevel.ERROR, callerNames, stacktrace); } public void logError(String msg, Object... msgArgs) { if (!checkLogLevel(LogLevel.ERROR, this)) return; // Get the package, class, and method names. String[] callerNames = getCallerMethodName("logError"); logMsg(LogLevel.ERROR, callerNames, msg, msgArgs); } public void logInfo(String msg, Object... msgArgs) { if (!checkLogLevel(LogLevel.INFO, this)) return; // Get the package, class, and method names. String[] callerNames = getCallerMethodName("logInfo"); logMsg(LogLevel.INFO, callerNames, msg, msgArgs); } public void logFunction(boolean entry, Object... params) { if (!checkLogLevel(LogLevel.FUNCTION, this)) return; String msg = (entry) ? " Enter " : " Return "; if (params != null) { StringBuffer paramVal = new StringBuffer(); int paramCount = 0; paramVal.append(msg); if (entry) paramVal.append("("); for (Object param : params) { if (paramCount++ != 0) paramVal.append(","); if(param != null) { if(param.getClass().isArray()) { if(param instanceof Object[]) paramVal.append(Arrays.toString((Object[])param)); else if(param instanceof int[]) paramVal.append(Arrays.toString((int[])param)); else if(param instanceof long[]) paramVal.append(Arrays.toString((long[])param)); else paramVal.append(param); } else paramVal.append(param); } else paramVal.append(param); } if (entry) paramVal.append(") "); else paramVal.append(" "); msg = paramVal.toString(); } // Get the package, class, and method names. String[] callerNames = getCallerMethodName("logFunction"); logMsg(LogLevel.FUNCTION, callerNames, msg); } public void logDebug(String msg, Object... msgArgs) { if (!checkLogLevel(LogLevel.DEBUG, this)) return; // Get the package, class, and method names. String[] callerNames = getCallerMethodName("logDebug"); logMsg(LogLevel.DEBUG, callerNames, msg, msgArgs); } public void close() { if (handler != null && handler instanceof LogFileHandler) { try { handler.close(); } catch (Exception e) { // Ignore it. } } } public void flush() { if (handler != null) handler.flush(); } private void logMsg(LogLevel level, String[] callerNames, String msg, Object... msgArgs) { // Log the message. String formattedMsg = formatLogMsg( level, callerNames[0], callerNames[1], callerNames[2], msg, msgArgs); try { if (formattedMsg != null && null != handler) { handler.write(formattedMsg); } } catch(Exception ex) { throw new RuntimeException(ex); } } private String formatLogMsg( LogLevel logLevel, String packageName, String className, String methodName, String msg, Object... msgArgs) { if (null == handler) return null; StringBuffer msgBuf = new StringBuffer(); SimpleDateFormat dateFormat = null; dateFormat = new SimpleDateFormat("MMM dd HH:mm:ss.SSS"); dateFormat.format(new Date(), msgBuf, new FieldPosition(0)); msgBuf.append(" "); msgBuf.append(logLevel.toString()).append(" "); msgBuf.append(" "); msgBuf.append("[").append(Thread.currentThread().getId()).append(" ").append(Thread.currentThread().getName()).append("] "); msgBuf.append(packageName).append("."); msgBuf.append(className).append("."); msgBuf.append(methodName).append(": "); if (msgArgs == null || msgArgs.length == 0) msgBuf.append(msg); else msgBuf.append(new MessageFormat(msg).format(msgArgs)); return msgBuf.toString(); } public static String getLogFileUsingPath(String logLevel, String logPath) { if (logPath == null) { // Check loglevel and get current directory LogLevel level = (logLevel != null ) ? LogLevel.getLogLevel(logLevel) : LogLevel.OFF; if (level != LogLevel.OFF) logPath = System.getProperty("user.dir"); } if (logPath != null) { return logPath + File.separatorChar + "redshift_jdbc.log"; } else { return null; } } }
8,404
0
Create_ds/amazon-redshift-jdbc-driver/src/main/java/com/amazon/redshift
Create_ds/amazon-redshift-jdbc-driver/src/main/java/com/amazon/redshift/util/QuerySanitizer.java
package com.amazon.redshift.util; import java.util.Objects; import java.util.regex.Matcher; import java.util.regex.Pattern; /** * The utility class implements credentials removal from query text. It is based on the PADB * implementation in padb/src/xen_utils/log_statement.cpp with the following change: it will apply * to any query text regardless of the SQL statement type. */ public class QuerySanitizer { // replace all occurrences of // password<space>*['\"].*?['\"]" // with // password<space>'***' private static final String PWD_REGEX_STR = "(password)\\s*['\"].*?['\"]"; private static final Pattern PWD_REGEX = Pattern.compile(PWD_REGEX_STR, Pattern.CASE_INSENSITIVE); private static final String PWD_REPLACEMENT_REGEX = "$1 '***'"; // replace all occurrences of: // credentials\s*(as)?\s*['\"].*?['\"] // with // credentials (as) '' // // if no quotes are found after the credentials keyword, then we eat all // remaining characters in the query text to avoid accidental exposure in // logs, etc. We look for a leading whitespace character before "credentials" // to minimize the risk of mismatching any substring private static final String CREDS_REGEX_STR = "(\\scredentials\\s*)(as\\s*)?(?:(?:['\"].*?['\"])|.*)"; private static final Pattern CREDS_REGEX = Pattern.compile(CREDS_REGEX_STR, Pattern.CASE_INSENSITIVE); private static final String CREDS_REPLACEMENT_REGEX = "$1'***'"; // replace all occurrences of: // access_key_id\s*(as)?\s*['\"].*?['\"] // with // access_key_id (as) '' // private static final String ACCESS_KEY_REGEX_STR = "(\\saccess_key_id\\s*)(as\\s*)?(?:(?:['\"].*?['\"])|.*)"; private static final Pattern ACCESS_KEY_REGEX = Pattern.compile(ACCESS_KEY_REGEX_STR, Pattern.CASE_INSENSITIVE); // replace all occurrences of: // secret_access_key\s*(as)?\s*['\"].*?['\"] // with // secret_access_key (as) '' // private static final String SECRET_KEY_REGEX_STR = "(\\ssecret_access_key\\s*)(as\\s*)?(?:(?:['\"].*?['\"])|.*)"; private static final Pattern SECRET_KEY_REGEX = Pattern.compile(SECRET_KEY_REGEX_STR, Pattern.CASE_INSENSITIVE); // replace all occurrences of: // iam_role\s*(as)?\s*['\"].*?['\"] // with // iam_role (as) '' // private static final String IAM_ROLE_REGEX_STR = "(\\siam_role\\s*)(as\\s*)?(?:(?:['\"].*?['\"])|.*)"; private static final Pattern IAM_ROLE_REGEX = Pattern.compile(IAM_ROLE_REGEX_STR, Pattern.CASE_INSENSITIVE); // replace all occurrences of: // master_symmetric_key\s*(as)?\s*['\"].*?['\"] // with // master_symmetric_key (as) '' // private static final String SYM_KEY_REGEX_STR = "(\\smaster_symmetric_key\\s*)(as\\s*)?(?:(?:['\"].*?['\"])|.*)"; private static final Pattern SYM_KEY_REGEX = Pattern.compile(SYM_KEY_REGEX_STR, Pattern.CASE_INSENSITIVE); // replace all occurrences of: // kms_key_id\s*(as)?\s*['\"].*?['\"] // with // kms_key_id (as) '' // private static final String KMS_KEY_REGEX_STR = "(\\skms_key_id\\s*)(as\\s*)?(?:(?:['\"].*?['\"])|.*)"; private static final Pattern KMS_KEY_REGEX = Pattern.compile(KMS_KEY_REGEX_STR, Pattern.CASE_INSENSITIVE); // replace all occurrences of: // session_token\s*(as)?\s*['\"].*?['\"] // with // session_token (as) '' // private static final String SESSION_TOKEN_REGEX_STR = "(\\ssession_token\\s*)(as\\s*)?(?:(?:['\"].*?['\"])|.*)"; private static final Pattern SESSION_TOKEN_REGEX = Pattern.compile(SESSION_TOKEN_REGEX_STR, Pattern.CASE_INSENSITIVE); // As a last-resort, we replace any lingering secrets in the text // For example: // // aws_access_key_id=foo -> aws_access_key_id=*** // awsaccesskeyid=foo -> awsaccesskeyid=*** // access_key_id = foo -> access_key_id=*** // access_key = foo -> access_key=*** // accesskeyid = foo -> accesskeyid=*** // aws_secret_access_key=foo -> aws_secret_access_key=*** // secret_access_key=foo -> secret_access_key=*** // master_symmetric_key=foo -> master_symmetric_key=*** // mastersymmetrickey=foo -> mastersymmetrickey=*** // symmetric_key=foo -> symmetric_key=*** private static final String LINGERING_SECRET_REGEX_STR = "(((aws[-_]?)?" + "((iam[-_]?role)" + "|(access[-_]?key([-_]id)?)" + "|(secret[-_]?access[-_]?key))" + ")|(token)|((master[-_]?)?symmetric[-_]?key)" + ")\\s*=\\s*[^;'\"]*"; private static final Pattern LINGERING_SECRET_REGEX = Pattern.compile(LINGERING_SECRET_REGEX_STR, Pattern.CASE_INSENSITIVE); private static final String LINGERING_SECRET_REPLACEMENT_REGEX = "$1=***"; // encryptionKey is the one of the arguments of xpx restore_table in case of // encrypted clusters. We want to filter that out before dumping query text // to log files. // Since the value we want to filter out is base64 encoded, we are using // [A-Za-z0-9+/=] to identify that as these are the only characters possible // for base64 encoded string. private static final String ENCRYPT_KEY_REGEX_STR = "((--encryptionKey|--k|-k)\\s+)([A-Za-z0-9+/=]*)"; private static final Pattern ENCRYPT_KEY_REGEX = Pattern.compile(ENCRYPT_KEY_REGEX_STR, Pattern.CASE_INSENSITIVE); private static final String ENCRYPT_KEY_REPLACEMENT_REGEX = "$1***"; public static String filterCredentials(final String queryText) { String sanitizedText = processPassword(queryText); sanitizedText = processCreds(sanitizedText); sanitizedText = processAccessKey(sanitizedText); sanitizedText = processSecretKey(sanitizedText); sanitizedText = processIamRole(sanitizedText); sanitizedText = processSymKey(sanitizedText); sanitizedText = processKmsKey(sanitizedText); sanitizedText = processSessionToken(sanitizedText); sanitizedText = processLingeringSecrets(sanitizedText); sanitizedText = processEncryptKey(sanitizedText); return sanitizedText; } protected static String processPassword(final String queryText) { return processQueryText(queryText, PWD_REGEX, PWD_REPLACEMENT_REGEX); } protected static String processCreds(final String queryText) { return processQueryText(queryText, CREDS_REGEX, CREDS_REPLACEMENT_REGEX); } protected static String processAccessKey(final String queryText) { return processQueryText(queryText, ACCESS_KEY_REGEX, CREDS_REPLACEMENT_REGEX); } protected static String processSecretKey(final String queryText) { return processQueryText(queryText, SECRET_KEY_REGEX, CREDS_REPLACEMENT_REGEX); } protected static String processIamRole(final String queryText) { return processQueryText(queryText, IAM_ROLE_REGEX, CREDS_REPLACEMENT_REGEX); } protected static String processSymKey(final String queryText) { return processQueryText(queryText, SYM_KEY_REGEX, CREDS_REPLACEMENT_REGEX); } protected static String processKmsKey(final String queryText) { return processQueryText(queryText, KMS_KEY_REGEX, CREDS_REPLACEMENT_REGEX); } protected static String processSessionToken(final String queryText) { return processQueryText(queryText, SESSION_TOKEN_REGEX, CREDS_REPLACEMENT_REGEX); } protected static String processLingeringSecrets(final String queryText) { return processQueryText(queryText, LINGERING_SECRET_REGEX, LINGERING_SECRET_REPLACEMENT_REGEX); } protected static String processEncryptKey(final String queryText) { return processQueryText(queryText, ENCRYPT_KEY_REGEX, ENCRYPT_KEY_REPLACEMENT_REGEX); } private static String processQueryText( String queryText, Pattern matchPattern, String replacementPattern) { if (Objects.isNull(queryText)) { return null; } Matcher matcher = matchPattern.matcher(queryText); return matcher.find() ? matcher.replaceAll(replacementPattern) : queryText; } }
8,405
0
Create_ds/amazon-redshift-jdbc-driver/src/main/java/com/amazon/redshift
Create_ds/amazon-redshift-jdbc-driver/src/main/java/com/amazon/redshift/util/RedshiftState.java
/* * Copyright (c) 2003, PostgreSQL Global Development Group * See the LICENSE file in the project root for more information. */ package com.amazon.redshift.util; /** * This class is used for holding SQLState codes. */ public enum RedshiftState { UNKNOWN_STATE(""), TOO_MANY_RESULTS("0100E"), NO_DATA("02000"), INVALID_PARAMETER_TYPE("07006"), /** * We could establish a connection with the server for unknown reasons. Could be a network * problem. */ CONNECTION_UNABLE_TO_CONNECT("08001"), CONNECTION_DOES_NOT_EXIST("08003"), /** * The server rejected our connection attempt. Usually an authentication failure, but could be a * configuration error like asking for a SSL connection with a server that wasn't built with SSL * support. */ CONNECTION_REJECTED("08004"), /** * After a connection has been established, it went bad. */ CONNECTION_FAILURE("08006"), CONNECTION_FAILURE_DURING_TRANSACTION("08007"), /** * The server sent us a response the driver was not prepared for and is either bizarre datastream * corruption, a driver bug, or a protocol violation on the server's part. */ PROTOCOL_VIOLATION("08P01"), COMMUNICATION_ERROR("08S01"), NOT_IMPLEMENTED("0A000"), DATA_ERROR("22000"), STRING_DATA_RIGHT_TRUNCATION("22001"), NUMERIC_VALUE_OUT_OF_RANGE("22003"), BAD_DATETIME_FORMAT("22007"), DATETIME_OVERFLOW("22008"), DIVISION_BY_ZERO("22012"), MOST_SPECIFIC_TYPE_DOES_NOT_MATCH("2200G"), INVALID_PARAMETER_VALUE("22023"), NOT_NULL_VIOLATION("23502"), FOREIGN_KEY_VIOLATION("23503"), UNIQUE_VIOLATION("23505"), CHECK_VIOLATION("23514"), EXCLUSION_VIOLATION("23P01"), INVALID_CURSOR_STATE("24000"), TRANSACTION_STATE_INVALID("25000"), ACTIVE_SQL_TRANSACTION("25001"), NO_ACTIVE_SQL_TRANSACTION("25P01"), IN_FAILED_SQL_TRANSACTION("25P02"), INVALID_SQL_STATEMENT_NAME("26000"), INVALID_AUTHORIZATION_SPECIFICATION("28000"), INVALID_TRANSACTION_TERMINATION("2D000"), STATEMENT_NOT_ALLOWED_IN_FUNCTION_CALL("2F003"), INVALID_SAVEPOINT_SPECIFICATION("3B000"), DEADLOCK_DETECTED("40P01"), SYNTAX_ERROR("42601"), UNDEFINED_COLUMN("42703"), UNDEFINED_OBJECT("42704"), WRONG_OBJECT_TYPE("42809"), NUMERIC_CONSTANT_OUT_OF_RANGE("42820"), DATA_TYPE_MISMATCH("42821"), UNDEFINED_FUNCTION("42883"), INVALID_NAME("42602"), DATATYPE_MISMATCH("42804"), CANNOT_COERCE("42846"), UNDEFINED_TABLE("42P01"), OUT_OF_MEMORY("53200"), OBJECT_NOT_IN_STATE("55000"), OBJECT_IN_USE("55006"), QUERY_CANCELED("57014"), SYSTEM_ERROR("60000"), IO_ERROR("58030"), UNEXPECTED_ERROR("99999"); private final String state; RedshiftState(String state) { this.state = state; } public String getState() { return this.state; } public static boolean isConnectionError(String psqlState) { return RedshiftState.CONNECTION_UNABLE_TO_CONNECT.getState().equals(psqlState) || RedshiftState.CONNECTION_DOES_NOT_EXIST.getState().equals(psqlState) || RedshiftState.CONNECTION_REJECTED.getState().equals(psqlState) || RedshiftState.CONNECTION_FAILURE.getState().equals(psqlState) || RedshiftState.CONNECTION_FAILURE_DURING_TRANSACTION.getState().equals(psqlState); } }
8,406
0
Create_ds/amazon-redshift-jdbc-driver/src/main/java/com/amazon/redshift
Create_ds/amazon-redshift-jdbc-driver/src/main/java/com/amazon/redshift/util/RedshiftProperties.java
package com.amazon.redshift.util; import com.amazon.redshift.RedshiftProperty; import java.util.Locale; import java.util.Properties; import java.util.Enumeration; import java.util.Collections; public class RedshiftProperties extends Properties { /** * Creates an empty property list with no default values. */ public RedshiftProperties() { super(null); } /** * Creates an empty property list with the specified defaults. * * @param defaults the defaults. */ public RedshiftProperties(Properties defaults) { super(defaults); } /** * Creates an empty property list with the specified defaults. * * @param defaults the defaults. * @param info the input properties that need to be copied. * @throws RedshiftException RedshiftException */ public RedshiftProperties(Properties info, Properties defaults) throws RedshiftException { super(defaults); if(info!=null) { // Properties from user come in as a Properties object. Below code block converts them to a RedshiftProperties object and also converting their keys to lowercase. Enumeration en = Collections.enumeration(info.stringPropertyNames()); while (en.hasMoreElements()) { String key = (String) en.nextElement(); String val = info.getProperty(key); if (val == null) { throw new RedshiftException( GT.tr("Properties for the driver contains a non-string value for the key ") + key, RedshiftState.UNEXPECTED_ERROR); } this.setProperty(key, val); } } } /** * get value from {Properties} * @param key key * @return property value */ @Override public String getProperty(String key) { return super.getProperty(key.toLowerCase(Locale.ENGLISH)); } @Override public synchronized Object setProperty(String key, String value) { return super.setProperty(key.toLowerCase(Locale.ENGLISH), value); } public static void evaluateProperties(RedshiftProperties properties) throws RedshiftException { //evaluate compression algo String compressionAlgo = RedshiftProperty.COMPRESSION.get(properties); if(!(compressionAlgo.equalsIgnoreCase("lz4:1") || compressionAlgo.equalsIgnoreCase("lz4") || compressionAlgo.equalsIgnoreCase("off"))) { throw new RedshiftException("Unsupported compression algorithm specified : " + compressionAlgo); } } }
8,407
0
Create_ds/amazon-redshift-jdbc-driver/src/main/java/com/amazon/redshift
Create_ds/amazon-redshift-jdbc-driver/src/main/java/com/amazon/redshift/util/HostSpec.java
/* * Copyright (c) 2012, PostgreSQL Global Development Group * See the LICENSE file in the project root for more information. */ package com.amazon.redshift.util; import static java.util.regex.Pattern.compile; import java.util.regex.Matcher; import java.util.regex.Pattern; /** * Simple container for host and port. */ public class HostSpec { public static final String DEFAULT_NON_PROXY_HOSTS = "localhost|127.*|[::1]|0.0.0.0|[::0]"; protected final String host; protected final int port; public HostSpec(String host, int port) { this.host = host; this.port = port; } public String getHost() { return host; } public int getPort() { return port; } public String toString() { return host + ":" + port; } @Override public boolean equals(Object obj) { return obj instanceof HostSpec && port == ((HostSpec) obj).port && host.equals(((HostSpec) obj).host); } @Override public int hashCode() { return port ^ host.hashCode(); } public Boolean shouldResolve() { String socksProxy = System.getProperty("socksProxyHost"); if (socksProxy == null || socksProxy.trim().isEmpty()) { return true; } return matchesNonProxyHosts(); } private Boolean matchesNonProxyHosts() { String nonProxyHosts = System.getProperty("socksNonProxyHosts", DEFAULT_NON_PROXY_HOSTS); if (nonProxyHosts == null || this.host.isEmpty()) { return false; } Pattern pattern = toPattern(nonProxyHosts); Matcher matcher = pattern == null ? null : pattern.matcher(this.host); return matcher != null && matcher.matches(); } private Pattern toPattern(String mask) { StringBuilder joiner = new StringBuilder(); String separator = ""; for (String disjunct : mask.split("\\|")) { if (!disjunct.isEmpty()) { String regex = disjunctToRegex(disjunct.toLowerCase()); joiner.append(separator).append(regex); separator = "|"; } } return joiner.length() == 0 ? null : compile(joiner.toString()); } private String disjunctToRegex(String disjunct) { String regex; if (disjunct.startsWith("*")) { regex = ".*" + Pattern.quote(disjunct.substring(1)); } else if (disjunct.endsWith("*")) { regex = Pattern.quote(disjunct.substring(0, disjunct.length() - 1)) + ".*"; } else { regex = Pattern.quote(disjunct); } return regex; } }
8,408
0
Create_ds/amazon-redshift-jdbc-driver/src/main/java/com/amazon/redshift
Create_ds/amazon-redshift-jdbc-driver/src/main/java/com/amazon/redshift/util/RedshiftInterval.java
/* * Copyright (c) 2004, PostgreSQL Global Development Group * See the LICENSE file in the project root for more information. */ package com.amazon.redshift.util; import java.io.Serializable; import java.sql.SQLException; import java.text.DecimalFormat; import java.text.NumberFormat; import java.util.Calendar; import java.util.Date; import java.util.Locale; import java.util.StringTokenizer; /** * This implements a class that handles the Redshift interval type. */ public class RedshiftInterval extends RedshiftObject implements Serializable, Cloneable { private static final int MICROS_IN_SECOND = 1000000; private int years; private int months; private int days; private int hours; private int minutes; private int wholeSeconds; private int microSeconds; /** * required by the driver. */ public RedshiftInterval() { setType("interval"); } /** * Initialize a interval with a given interval string representation. * * @param value String representated interval (e.g. '3 years 2 mons') * @throws SQLException Is thrown if the string representation has an unknown format * @see #setValue(String) */ public RedshiftInterval(String value) throws SQLException { this(); setValue(value); } private int lookAhead(String value, int position, String find) { char [] tokens = find.toCharArray(); int found = -1; for ( int i = 0; i < tokens.length; i++ ) { found = value.indexOf(tokens[i], position); if ( found > 0 ) { return found; } } return found; } private void parseISO8601Format(String value) { int number = 0; String dateValue; String timeValue = null; int hasTime = value.indexOf('T'); if ( hasTime > 0 ) { /* skip over the P */ dateValue = value.substring(1,hasTime); timeValue = value.substring(hasTime + 1); } else { /* skip over the P */ dateValue = value.substring(1); } for ( int i = 0; i < dateValue.length(); i++ ) { int lookAhead = lookAhead(dateValue, i, "YMD"); if (lookAhead > 0) { number = Integer.parseInt(dateValue.substring(i, lookAhead)); if (dateValue.charAt(lookAhead) == 'Y') { setYears(number); } else if (dateValue.charAt(lookAhead) == 'M') { setMonths(number); } else if (dateValue.charAt(lookAhead) == 'D') { setDays(number); } i = lookAhead; } } if ( timeValue != null ) { for (int i = 0; i < timeValue.length(); i++) { int lookAhead = lookAhead(timeValue, i, "HMS"); if (lookAhead > 0) { number = Integer.parseInt(timeValue.substring(i, lookAhead)); if (timeValue.charAt(lookAhead) == 'H') { setHours(number); } else if (timeValue.charAt(lookAhead) == 'M') { setMinutes(number); } else if (timeValue.charAt(lookAhead) == 'S') { setSeconds(number); } i = lookAhead; } } } } /** * Initializes all values of this interval to the specified values. * * @param years years * @param months months * @param days days * @param hours hours * @param minutes minutes * @param seconds seconds * @see #setValue(int, int, int, int, int, double) */ public RedshiftInterval(int years, int months, int days, int hours, int minutes, double seconds) { this(); setValue(years, months, days, hours, minutes, seconds); } /** * Sets a interval string represented value to this instance. This method only recognize the * format, that Postgres returns - not all input formats are supported (e.g. '1 yr 2 m 3 s'). * * @param value String representated interval (e.g. '3 years 2 mons') * @throws SQLException Is thrown if the string representation has an unknown format */ public void setValue(String value) throws SQLException { final boolean PostgresFormat = !value.startsWith("@"); if (value.startsWith("P")) { parseISO8601Format(value); return; } // Just a simple '0' if (!PostgresFormat && value.length() == 3 && value.charAt(2) == '0') { setValue(0, 0, 0, 0, 0, 0.0); return; } int years = 0; int months = 0; int days = 0; int hours = 0; int minutes = 0; double seconds = 0; try { String valueToken = null; value = value.replace('+', ' ').replace('@', ' '); final StringTokenizer st = new StringTokenizer(value); for (int i = 1; st.hasMoreTokens(); i++) { String token = st.nextToken(); if ((i & 1) == 1) { int endHours = token.indexOf(':'); if (endHours == -1) { valueToken = token; continue; } // This handles hours, minutes, seconds and microseconds for // ISO intervals int offset = (token.charAt(0) == '-') ? 1 : 0; hours = nullSafeIntGet(token.substring(offset + 0, endHours)); minutes = nullSafeIntGet(token.substring(endHours + 1, endHours + 3)); // Pre 7.4 servers do not put second information into the results // unless it is non-zero. int endMinutes = token.indexOf(':', endHours + 1); if (endMinutes != -1) { seconds = nullSafeDoubleGet(token.substring(endMinutes + 1)); } if (offset == 1) { hours = -hours; minutes = -minutes; seconds = -seconds; } valueToken = null; } else { // This handles years, months, days for both, ISO and // Non-ISO intervals. Hours, minutes, seconds and microseconds // are handled for Non-ISO intervals here. if (token.startsWith("year")) { years = nullSafeIntGet(valueToken); } else if (token.startsWith("mon")) { months = nullSafeIntGet(valueToken); } else if (token.startsWith("day")) { days = nullSafeIntGet(valueToken); } else if (token.startsWith("hour")) { hours = nullSafeIntGet(valueToken); } else if (token.startsWith("min")) { minutes = nullSafeIntGet(valueToken); } else if (token.startsWith("sec")) { seconds = nullSafeDoubleGet(valueToken); } } } } catch (NumberFormatException e) { throw new RedshiftException(GT.tr("Conversion of interval failed"), RedshiftState.NUMERIC_CONSTANT_OUT_OF_RANGE, e); } if (!PostgresFormat && value.endsWith("ago")) { // Inverse the leading sign setValue(-years, -months, -days, -hours, -minutes, -seconds); } else { setValue(years, months, days, hours, minutes, seconds); } } /** * Set all values of this interval to the specified values. * * @param years years * @param months months * @param days days * @param hours hours * @param minutes minutes * @param seconds seconds */ public void setValue(int years, int months, int days, int hours, int minutes, double seconds) { setYears(years); setMonths(months); setDays(days); setHours(hours); setMinutes(minutes); setSeconds(seconds); } /** * Set all values of this interval using just two specified values. * * @param month Total number of months (assuming 12 months in a year) * @param time Total number of microseconds (assuming 1day = 24hrs = 1440mins = 86400secs = 8.64e10microsecs) */ public void setValue(int month, long time) { int tm_year; int tm_mon; if (month != 0) { tm_year = month / 12; tm_mon = month % 12; } else { tm_year = 0; tm_mon = 0; } int tm_mday = (int)(time / 86400000000L); time -= (tm_mday * 86400000000L); int tm_hour = (int)(time / 3600000000L); time -= (tm_hour * 3600000000L); int tm_min = (int)(time / 60000000L); time -= (tm_min * 60000000L); int tm_sec = (int)(time / 1000000L); int fsec = (int)(time - (tm_sec * 1000000)); double sec = tm_sec + (fsec/1000000.0); setValue(tm_year, tm_mon, tm_mday, tm_hour, tm_min, sec); } /** * Returns the stored interval information as a string. * * @return String represented interval */ public String getValue() { DecimalFormat df = (DecimalFormat) NumberFormat.getInstance(Locale.US); df.applyPattern("0.0#####"); return String.format( Locale.ROOT, "%d years %d mons %d days %d hours %d mins %s secs", years, months, days, hours, minutes, df.format(getSeconds()) ); } /** * Returns the years represented by this interval. * * @return years represented by this interval */ public int getYears() { return years; } /** * Set the years of this interval to the specified value. * * @param years years to set */ public void setYears(int years) { this.years = years; } /** * Returns the months represented by this interval. * * @return months represented by this interval */ public int getMonths() { return months; } /** * Set the months of this interval to the specified value. * * @param months months to set */ public void setMonths(int months) { this.months = months; } /** * Returns the days represented by this interval. * * @return days represented by this interval */ public int getDays() { return days; } /** * Set the days of this interval to the specified value. * * @param days days to set */ public void setDays(int days) { this.days = days; } /** * Returns the hours represented by this interval. * * @return hours represented by this interval */ public int getHours() { return hours; } /** * Set the hours of this interval to the specified value. * * @param hours hours to set */ public void setHours(int hours) { this.hours = hours; } /** * Returns the minutes represented by this interval. * * @return minutes represented by this interval */ public int getMinutes() { return minutes; } /** * Set the minutes of this interval to the specified value. * * @param minutes minutes to set */ public void setMinutes(int minutes) { this.minutes = minutes; } /** * Returns the seconds represented by this interval. * * @return seconds represented by this interval */ public double getSeconds() { return wholeSeconds + (double) microSeconds / MICROS_IN_SECOND; } public int getWholeSeconds() { return wholeSeconds; } public int getMicroSeconds() { return microSeconds; } /** * Set the seconds of this interval to the specified value. * * @param seconds seconds to set */ public void setSeconds(double seconds) { wholeSeconds = (int) seconds; microSeconds = (int) Math.round((seconds - wholeSeconds) * MICROS_IN_SECOND); } /** * Rolls this interval on a given calendar. * * @param cal Calendar instance to add to */ public void add(Calendar cal) { final int milliseconds = (microSeconds + ((microSeconds < 0) ? -500 : 500)) / 1000 + wholeSeconds * 1000; cal.add(Calendar.MILLISECOND, milliseconds); cal.add(Calendar.MINUTE, getMinutes()); cal.add(Calendar.HOUR, getHours()); cal.add(Calendar.DAY_OF_MONTH, getDays()); cal.add(Calendar.MONTH, getMonths()); cal.add(Calendar.YEAR, getYears()); } /** * Rolls this interval on a given date. * * @param date Date instance to add to */ public void add(Date date) { final Calendar cal = Calendar.getInstance(); cal.setTime(date); add(cal); date.setTime(cal.getTime().getTime()); } /** * Add this interval's value to the passed interval. This is backwards to what I would expect, but * this makes it match the other existing add methods. * * @param interval intval to add */ public void add(RedshiftInterval interval) { interval.setYears(interval.getYears() + getYears()); interval.setMonths(interval.getMonths() + getMonths()); interval.setDays(interval.getDays() + getDays()); interval.setHours(interval.getHours() + getHours()); interval.setMinutes(interval.getMinutes() + getMinutes()); interval.setSeconds(interval.getSeconds() + getSeconds()); } /** * Scale this interval by an integer factor. The server can scale by arbitrary factors, but that * would require adjusting the call signatures for all the existing methods like getDays() or * providing our own justification of fractional intervals. Neither of these seem like a good idea * without a strong use case. * * @param factor scale factor */ public void scale(int factor) { setYears(factor * getYears()); setMonths(factor * getMonths()); setDays(factor * getDays()); setHours(factor * getHours()); setMinutes(factor * getMinutes()); setSeconds(factor * getSeconds()); } /** * Converts the month-year part of interval to the total number of months using 1year = 12months. * * @return Total number of months. */ public int totalMonths() { return 12 * years + months; } /** * Converts the day-time part of interval to the total number of microseconds using * 1day = 24hrs = 1440mins = 86400secs = 8.64e10microsecs. * * @return Total number of microseconds. */ public long totalMicroseconds() { return ((long) (((days * 24 + hours) * 60 + minutes) * 60 + wholeSeconds)) * 1000000 + microSeconds; } /** * Returns integer value of value or 0 if value is null. * * @param value integer as string value * @return integer parsed from string value * @throws NumberFormatException if the string contains invalid chars */ private static int nullSafeIntGet(String value) throws NumberFormatException { return (value == null) ? 0 : Integer.parseInt(value); } /** * Returns double value of value or 0 if value is null. * * @param value double as string value * @return double parsed from string value * @throws NumberFormatException if the string contains invalid chars */ private static double nullSafeDoubleGet(String value) throws NumberFormatException { return (value == null) ? 0 : Double.parseDouble(value); } /** * Returns whether an object is equal to this one or not. * * @param obj Object to compare with * @return true if the two intervals are identical */ public boolean equals(Object obj) { if (obj == null) { return false; } if (obj == this) { return true; } if (!(obj instanceof RedshiftInterval)) { return false; } final RedshiftInterval pgi = (RedshiftInterval) obj; return pgi.years == years && pgi.months == months && pgi.days == days && pgi.hours == hours && pgi.minutes == minutes && pgi.wholeSeconds == wholeSeconds && pgi.microSeconds == microSeconds; } /** * Returns a hashCode for this object. * * @return hashCode */ @Override public int hashCode() { return (((((((8 * 31 + microSeconds) * 31 + wholeSeconds) * 31 + minutes) * 31 + hours) * 31 + days) * 31 + months) * 31 + years) * 31; } @Override public Object clone() throws CloneNotSupportedException { // squid:S2157 "Cloneables" should implement "clone return super.clone(); } }
8,409
0
Create_ds/amazon-redshift-jdbc-driver/src/main/java/com/amazon/redshift
Create_ds/amazon-redshift-jdbc-driver/src/main/java/com/amazon/redshift/util/RedshiftVarbyte.java
package com.amazon.redshift.util; // Right now most of methods in base class. // In future, if there are differences in bytes conversion of VARBYTE and GEOGRAPHY // then we can add more methods in this class. public class RedshiftVarbyte extends RedshiftByteTypes{ }
8,410
0
Create_ds/amazon-redshift-jdbc-driver/src/main/java/com/amazon/redshift
Create_ds/amazon-redshift-jdbc-driver/src/main/java/com/amazon/redshift/util/HStoreConverter.java
/* * Copyright (c) 2004, PostgreSQL Global Development Group * See the LICENSE file in the project root for more information. */ package com.amazon.redshift.util; import com.amazon.redshift.core.Encoding; import java.io.ByteArrayOutputStream; import java.io.IOException; import java.sql.SQLException; import java.util.HashMap; import java.util.Map; import java.util.Map.Entry; public class HStoreConverter { public static Map<String, String> fromBytes(byte[] b, Encoding encoding) throws SQLException { Map<String, String> m = new HashMap<String, String>(); int pos = 0; int numElements = ByteConverter.int4(b, pos); pos += 4; try { for (int i = 0; i < numElements; ++i) { int keyLen = ByteConverter.int4(b, pos); pos += 4; String key = encoding.decode(b, pos, keyLen); pos += keyLen; int valLen = ByteConverter.int4(b, pos); pos += 4; String val; if (valLen == -1) { val = null; } else { val = encoding.decode(b, pos, valLen); pos += valLen; } m.put(key, val); } } catch (IOException ioe) { throw new RedshiftException( GT.tr( "Invalid character data was found. This is most likely caused by stored data containing characters that are invalid for the character set the database was created in. The most common example of this is storing 8bit data in a SQL_ASCII database."), RedshiftState.DATA_ERROR, ioe); } return m; } public static byte[] toBytes(Map<?, ?> m, Encoding encoding) throws SQLException { ByteArrayOutputStream baos = new ByteArrayOutputStream(4 + 10 * m.size()); byte[] lenBuf = new byte[4]; try { ByteConverter.int4(lenBuf, 0, m.size()); baos.write(lenBuf); for (Entry<?, ?> e : m.entrySet()) { byte[] key = encoding.encode(e.getKey().toString()); ByteConverter.int4(lenBuf, 0, key.length); baos.write(lenBuf); baos.write(key); if (e.getValue() == null) { ByteConverter.int4(lenBuf, 0, -1); baos.write(lenBuf); } else { byte[] val = encoding.encode(e.getValue().toString()); ByteConverter.int4(lenBuf, 0, val.length); baos.write(lenBuf); baos.write(val); } } } catch (IOException ioe) { throw new RedshiftException( GT.tr( "Invalid character data was found. This is most likely caused by stored data containing characters that are invalid for the character set the database was created in. The most common example of this is storing 8bit data in a SQL_ASCII database."), RedshiftState.DATA_ERROR, ioe); } return baos.toByteArray(); } public static String toString(Map<?, ?> map) { if (map.isEmpty()) { return ""; } StringBuilder sb = new StringBuilder(map.size() * 8); for (Entry<?, ?> e : map.entrySet()) { appendEscaped(sb, e.getKey()); sb.append("=>"); appendEscaped(sb, e.getValue()); sb.append(", "); } sb.setLength(sb.length() - 2); return sb.toString(); } private static void appendEscaped(StringBuilder sb, Object val) { if (val != null) { sb.append('"'); String s = val.toString(); for (int pos = 0; pos < s.length(); pos++) { char ch = s.charAt(pos); if (ch == '"' || ch == '\\') { sb.append('\\'); } sb.append(ch); } sb.append('"'); } else { sb.append("NULL"); } } public static Map<String, String> fromString(String s) { Map<String, String> m = new HashMap<String, String>(); int pos = 0; StringBuilder sb = new StringBuilder(); while (pos < s.length()) { sb.setLength(0); int start = s.indexOf('"', pos); int end = appendUntilQuote(sb, s, start); String key = sb.toString(); pos = end + 3; String val; if (s.charAt(pos) == 'N') { val = null; pos += 4; } else { sb.setLength(0); end = appendUntilQuote(sb, s, pos); val = sb.toString(); pos = end; } pos++; m.put(key, val); } return m; } private static int appendUntilQuote(StringBuilder sb, String s, int pos) { for (pos += 1; pos < s.length(); pos++) { char ch = s.charAt(pos); if (ch == '"') { break; } if (ch == '\\') { pos++; ch = s.charAt(pos); } sb.append(ch); } return pos; } }
8,411
0
Create_ds/amazon-redshift-jdbc-driver/src/main/java/com/amazon/redshift
Create_ds/amazon-redshift-jdbc-driver/src/main/java/com/amazon/redshift/util/RedshiftBinaryObject.java
/* * Copyright (c) 2011, PostgreSQL Global Development Group * See the LICENSE file in the project root for more information. */ package com.amazon.redshift.util; import java.sql.SQLException; /** * RedshiftBinaryObject is an interface that classes extending {@link RedshiftObject} can use to take advantage of * more optimal binary encoding of the data type. */ public interface RedshiftBinaryObject { /** * This method is called to set the value of this object. * * @param value data containing the binary representation of the value of the object * @param offset the offset in the byte array where object data starts * @throws SQLException thrown if value is invalid for this type */ void setByteValue(byte[] value, int offset) throws SQLException; /** * This method is called to return the number of bytes needed to store this object in the binary * form required by com.amazon.redshift. * * @return the number of bytes needed to store this object */ int lengthInBytes(); /** * This method is called the to store the value of the object, in the binary form required by * com.amazon.redshift. * * @param bytes the array to store the value, it is guaranteed to be at lest * {@link #lengthInBytes} in size. * @param offset the offset in the byte array where object must be stored */ void toBytes(byte[] bytes, int offset); }
8,412
0
Create_ds/amazon-redshift-jdbc-driver/src/main/java/com/amazon/redshift
Create_ds/amazon-redshift-jdbc-driver/src/main/java/com/amazon/redshift/util/Base64.java
/* * Copyright (c) 2003, PostgreSQL Global Development Group * See the LICENSE file in the project root for more information. */ package com.amazon.redshift.util; /** * <p>This code is a stripped down version of Robert Harder's Public Domain Base64 implementation. GZIP * support, InputStream and OutputStream stuff and some unneeded encode/decode methods have been * removed.</p> * * <p>-- Original comments follow --</p> * * <p>Encodes and decodes to and from Base64 notation.</p> * * <p> * Change Log: * </p> * <ul> * <li>v2.1 - Cleaned up javadoc comments and unused variables and methods. Added some convenience * methods for reading and writing to and from files.</li> * <li>v2.0.2 - Now specifies UTF-8 encoding in places where the code fails on systems with other * encodings (like EBCDIC).</li> * <li>v2.0.1 - Fixed an error when decoding a single byte, that is, when the encoded data was a * single byte.</li> * <li>v2.0 - I got rid of methods that used booleans to set options. Now everything is more * consolidated and cleaner. The code now detects when data that's being decoded is gzip-compressed * and will decompress it automatically. Generally things are cleaner. You'll probably have to * change some method calls that you were making to support the new options format ({@code int}s * that you "OR" together).</li> * <li>v1.5.1 - Fixed bug when decompressing and decoding to a byte[] using * {@code decode( String s, boolean gzipCompressed )}. Added the ability to "suspend" encoding in * the Output Stream so you can turn on and off the encoding if you need to embed base64 data in an * otherwise "normal" stream (like an XML file).</li> * <li>v1.5 - Output stream pases on flush() command but doesn't do anything itself. This helps when * using GZIP streams. Added the ability to GZip-compress objects before encoding them.</li> * <li>v1.4 - Added helper methods to read/write files.</li> * <li>v1.3.6 - Fixed OutputStream.flush() so that 'position' is reset.</li> * <li>v1.3.5 - Added flag to turn on and off line breaks. Fixed bug in input stream where last * buffer being read, if not completely full, was not returned.</li> * <li>v1.3.4 - Fixed when "improperly padded stream" error was thrown at the wrong time.</li> * <li>v1.3.3 - Fixed I/O streams which were totally messed up.</li> * </ul> * * <p> * I am placing this code in the Public Domain. Do with it as you will. This software comes with no * guarantees or warranties but with plenty of well-wishing instead! Please visit * <a href="http://iharder.net/base64">http://iharder.net/base64</a> periodically to check for * updates or to contribute improvements. * </p> * * @author Robert Harder * @author rob@iharder.net * @version 2.1 */ public class Base64 { /* ******** P U B L I C F I E L D S ******** */ /** * No options specified. Value is zero. */ public static final int NO_OPTIONS = 0; /** * Specify encoding. */ public static final int ENCODE = 1; /** * Specify decoding. */ public static final int DECODE = 0; /** * Don't break lines when encoding (violates strict Base64 specification). */ public static final int DONT_BREAK_LINES = 8; /* ******** P R I V A T E F I E L D S ******** */ /** * Maximum line length (76) of Base64 output. */ private static final int MAX_LINE_LENGTH = 76; /** * The equals sign (=) as a byte. */ private static final byte EQUALS_SIGN = (byte) '='; /** * The new line character (\n) as a byte. */ private static final byte NEW_LINE = (byte) '\n'; /** * Preferred encoding. */ private static final String PREFERRED_ENCODING = "UTF-8"; /** * The 64 valid Base64 values. */ private static final byte[] ALPHABET; private static final byte[] _NATIVE_ALPHABET = { /* May be something funny like EBCDIC */ (byte) 'A', (byte) 'B', (byte) 'C', (byte) 'D', (byte) 'E', (byte) 'F', (byte) 'G', (byte) 'H', (byte) 'I', (byte) 'J', (byte) 'K', (byte) 'L', (byte) 'M', (byte) 'N', (byte) 'O', (byte) 'P', (byte) 'Q', (byte) 'R', (byte) 'S', (byte) 'T', (byte) 'U', (byte) 'V', (byte) 'W', (byte) 'X', (byte) 'Y', (byte) 'Z', (byte) 'a', (byte) 'b', (byte) 'c', (byte) 'd', (byte) 'e', (byte) 'f', (byte) 'g', (byte) 'h', (byte) 'i', (byte) 'j', (byte) 'k', (byte) 'l', (byte) 'm', (byte) 'n', (byte) 'o', (byte) 'p', (byte) 'q', (byte) 'r', (byte) 's', (byte) 't', (byte) 'u', (byte) 'v', (byte) 'w', (byte) 'x', (byte) 'y', (byte) 'z', (byte) '0', (byte) '1', (byte) '2', (byte) '3', (byte) '4', (byte) '5', (byte) '6', (byte) '7', (byte) '8', (byte) '9', (byte) '+', (byte) '/' }; /* Determine which ALPHABET to use. */ static { byte[] bytes; try { bytes = "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/" .getBytes(PREFERRED_ENCODING); } catch (java.io.UnsupportedEncodingException use) { bytes = _NATIVE_ALPHABET; // Fall back to native encoding } ALPHABET = bytes; } /** * Translates a Base64 value to either its 6-bit reconstruction value or a negative number * indicating some other meaning. **/ private static final byte[] DECODABET = {-9, -9, -9, -9, -9, -9, -9, -9, -9, // Decimal 0 - 8 -5, -5, // Whitespace: Tab and Linefeed -9, -9, // Decimal 11 - 12 -5, // Whitespace: Carriage Return -9, -9, -9, -9, -9, -9, -9, -9, -9, -9, -9, -9, -9, // Decimal 14 - 26 -9, -9, -9, -9, -9, // Decimal 27 - 31 -5, // Whitespace: Space -9, -9, -9, -9, -9, -9, -9, -9, -9, -9, // Decimal 33 - 42 62, // Plus sign at decimal 43 -9, -9, -9, // Decimal 44 - 46 63, // Slash at decimal 47 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, // Numbers zero through nine -9, -9, -9, // Decimal 58 - 60 -1, // Equals sign at decimal 61 -9, -9, -9, // Decimal 62 - 64 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, // Letters 'A' through 'N' 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, // Letters 'O' through 'Z' -9, -9, -9, -9, -9, -9, // Decimal 91 - 96 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, // Letters 'a' through 'm' 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, // Letters 'n' through 'z' -9, -9, -9, -9 // Decimal 123 - 126 /* * ,-9,-9,-9,-9,-9,-9,-9,-9,-9,-9,-9,-9,-9, // Decimal 127 - 139 * -9,-9,-9,-9,-9,-9,-9,-9,-9,-9,-9,-9,-9, // Decimal 140 - 152 * -9,-9,-9,-9,-9,-9,-9,-9,-9,-9,-9,-9,-9, // Decimal 153 - 165 * -9,-9,-9,-9,-9,-9,-9,-9,-9,-9,-9,-9,-9, // Decimal 166 - 178 * -9,-9,-9,-9,-9,-9,-9,-9,-9,-9,-9,-9,-9, // Decimal 179 - 191 * -9,-9,-9,-9,-9,-9,-9,-9,-9,-9,-9,-9,-9, // Decimal 192 - 204 * -9,-9,-9,-9,-9,-9,-9,-9,-9,-9,-9,-9,-9, // Decimal 205 - 217 * -9,-9,-9,-9,-9,-9,-9,-9,-9,-9,-9,-9,-9, // Decimal 218 - 230 * -9,-9,-9,-9,-9,-9,-9,-9,-9,-9,-9,-9,-9, // Decimal 231 - 243 * -9,-9,-9,-9,-9,-9,-9,-9,-9,-9,-9,-9 // Decimal 244 - 255 */ }; // I think I end up not using the BAD_ENCODING indicator. // private final static byte BAD_ENCODING = -9; // Indicates error in encoding private static final byte WHITE_SPACE_ENC = -5; // Indicates white space in encoding private static final byte EQUALS_SIGN_ENC = -1; // Indicates equals sign in encoding /** * Defeats instantiation. */ private Base64() { } /* ******** E N C O D I N G M E T H O D S ******** */ /** * Encodes up to three bytes of the array <var>source</var> and writes the resulting four Base64 * bytes to <var>destination</var>. The source and destination arrays can be manipulated anywhere * along their length by specifying <var>srcOffset</var> and <var>destOffset</var>. This method * does not check to make sure your arrays are large enough to accommodate <var>srcOffset</var> + 3 * for the <var>source</var> array or <var>destOffset</var> + 4 for the <var>destination</var> * array. The actual number of significant bytes in your array is given by <var>numSigBytes</var>. * * @param source the array to convert * @param srcOffset the index where conversion begins * @param numSigBytes the number of significant bytes in your array * @param destination the array to hold the conversion * @param destOffset the index where output will be put * @return the <var>destination</var> array * @since 1.3 */ private static byte[] encode3to4(byte[] source, int srcOffset, int numSigBytes, byte[] destination, int destOffset) { // 1 2 3 // 01234567890123456789012345678901 Bit position // --------000000001111111122222222 Array position from threeBytes // --------| || || || | Six bit groups to index ALPHABET // >>18 >>12 >> 6 >> 0 Right shift necessary // 0x3f 0x3f 0x3f Additional AND // Create buffer with zero-padding if there are only one or two // significant bytes passed in the array. // We have to shift left 24 in order to flush out the 1's that appear // when Java treats a value as negative that is cast from a byte to an int. int inBuff = (numSigBytes > 0 ? ((source[srcOffset] << 24) >>> 8) : 0) | (numSigBytes > 1 ? ((source[srcOffset + 1] << 24) >>> 16) : 0) | (numSigBytes > 2 ? ((source[srcOffset + 2] << 24) >>> 24) : 0); switch (numSigBytes) { case 3: destination[destOffset] = ALPHABET[(inBuff >>> 18)]; destination[destOffset + 1] = ALPHABET[(inBuff >>> 12) & 0x3f]; destination[destOffset + 2] = ALPHABET[(inBuff >>> 6) & 0x3f]; destination[destOffset + 3] = ALPHABET[(inBuff) & 0x3f]; return destination; case 2: destination[destOffset] = ALPHABET[(inBuff >>> 18)]; destination[destOffset + 1] = ALPHABET[(inBuff >>> 12) & 0x3f]; destination[destOffset + 2] = ALPHABET[(inBuff >>> 6) & 0x3f]; destination[destOffset + 3] = EQUALS_SIGN; return destination; case 1: destination[destOffset] = ALPHABET[(inBuff >>> 18)]; destination[destOffset + 1] = ALPHABET[(inBuff >>> 12) & 0x3f]; destination[destOffset + 2] = EQUALS_SIGN; destination[destOffset + 3] = EQUALS_SIGN; return destination; default: return destination; } // end switch } // end encode3to4 /** * Encodes a byte array into Base64 notation. Does not GZip-compress data. * * @param source The data to convert * @return Base64 notation of input data * @since 1.4 */ public static String encodeBytes(byte[] source) { return encodeBytes(source, 0, source.length, NO_OPTIONS); } // end encodeBytes /** * <p>Encodes a byte array into Base64 notation.</p> * * <p>Valid options:</p> * * <pre> * GZIP: gzip-compresses object before encoding it. * DONT_BREAK_LINES: don't break lines at 76 characters * <i>Note: Technically, this makes your encoding non-compliant.</i> * </pre> * * <p>Example: <code>encodeBytes( myData, Base64.GZIP )</code> or</p> * * <p>Example: <code>encodeBytes( * myData, Base64.GZIP | Base64.DONT_BREAK_LINES )</code></p> * * @param source The data to convert * @param options Specified options * @return Base64 notation * @see Base64#DONT_BREAK_LINES * @since 2.0 */ public static String encodeBytes(byte[] source, int options) { return encodeBytes(source, 0, source.length, options); } // end encodeBytes /** * Encodes a byte array into Base64 notation. Does not GZip-compress data. * * @param source The data to convert * @param off Offset in array where conversion should begin * @param len Length of data to convert * @return Base64 notation * @since 1.4 */ public static String encodeBytes(byte[] source, int off, int len) { return encodeBytes(source, off, len, NO_OPTIONS); } // end encodeBytes /** * <p>Encodes a byte array into Base64 notation.</p> * * <p>Valid options:</p> * * <pre> * GZIP: gzip-compresses object before encoding it. * DONT_BREAK_LINES: don't break lines at 76 characters * <i>Note: Technically, this makes your encoding non-compliant.</i> * </pre> * * <p>Example: <code>encodeBytes( myData, Base64.GZIP )</code> or</p> * * <p>Example: <code>encodeBytes( * myData, Base64.GZIP | Base64.DONT_BREAK_LINES )</code></p> * * @param source The data to convert * @param off Offset in array where conversion should begin * @param len Length of data to convert * @param options Specified options * @return Base64 notation * @see Base64#DONT_BREAK_LINES * @since 2.0 */ public static String encodeBytes(byte[] source, int off, int len, int options) { // Isolate options int dontBreakLines = (options & DONT_BREAK_LINES); // Else, don't compress. Better not to use streams at all then. { // Convert option to boolean in way that code likes it. boolean breakLines = dontBreakLines == 0; int len43 = len * 4 / 3; byte[] outBuff = new byte[(len43) // Main 4:3 + ((len % 3) > 0 ? 4 : 0) // Account for padding + (breakLines ? (len43 / MAX_LINE_LENGTH) : 0)]; // New lines int d = 0; int e = 0; int len2 = len - 2; int lineLength = 0; for (; d < len2; d += 3, e += 4) { encode3to4(source, d + off, 3, outBuff, e); lineLength += 4; if (breakLines && lineLength == MAX_LINE_LENGTH) { outBuff[e + 4] = NEW_LINE; e++; lineLength = 0; } // end if: end of line } // en dfor: each piece of array if (d < len) { encode3to4(source, d + off, len - d, outBuff, e); e += 4; } // end if: some padding needed // Return value according to relevant encoding. try { return new String(outBuff, 0, e, PREFERRED_ENCODING); } catch (java.io.UnsupportedEncodingException uue) { return new String(outBuff, 0, e); } } } /* ******** D E C O D I N G M E T H O D S ******** */ /** * Decodes four bytes from array <var>source</var> and writes the resulting bytes (up to three of * them) to <var>destination</var>. The source and destination arrays can be manipulated anywhere * along their length by specifying <var>srcOffset</var> and <var>destOffset</var>. This method * does not check to make sure your arrays are large enough to accommodate <var>srcOffset</var> + 4 * for the <var>source</var> array or <var>destOffset</var> + 3 for the <var>destination</var> * array. This method returns the actual number of bytes that were converted from the Base64 * encoding. * * @param source the array to convert * @param srcOffset the index where conversion begins * @param destination the array to hold the conversion * @param destOffset the index where output will be put * @return the number of decoded bytes converted * @since 1.3 */ private static int decode4to3(byte[] source, int srcOffset, byte[] destination, int destOffset) { // Example: Dk== if (source[srcOffset + 2] == EQUALS_SIGN) { // Two ways to do the same thing. Don't know which way I like best. // int outBuff = ( ( DECODABET[ source[ srcOffset ] ] << 24 ) >>> 6 ) // | ( ( DECODABET[ source[ srcOffset + 1] ] << 24 ) >>> 12 ); int outBuff = ((DECODABET[source[srcOffset]] & 0xFF) << 18) | ((DECODABET[source[srcOffset + 1]] & 0xFF) << 12); destination[destOffset] = (byte) (outBuff >>> 16); return 1; } else if (source[srcOffset + 3] == EQUALS_SIGN) { // Example: DkL= // Two ways to do the same thing. Don't know which way I like best. // int outBuff = ( ( DECODABET[ source[ srcOffset ] ] << 24 ) >>> 6 ) // | ( ( DECODABET[ source[ srcOffset + 1 ] ] << 24 ) >>> 12 ) // | ( ( DECODABET[ source[ srcOffset + 2 ] ] << 24 ) >>> 18 ); int outBuff = ((DECODABET[source[srcOffset]] & 0xFF) << 18) | ((DECODABET[source[srcOffset + 1]] & 0xFF) << 12) | ((DECODABET[source[srcOffset + 2]] & 0xFF) << 6); destination[destOffset] = (byte) (outBuff >>> 16); destination[destOffset + 1] = (byte) (outBuff >>> 8); return 2; } else { // Example: DkLE try { // Two ways to do the same thing. Don't know which way I like best. // int outBuff = ( ( DECODABET[ source[ srcOffset ] ] << 24 ) >>> 6 ) // | ( ( DECODABET[ source[ srcOffset + 1 ] ] << 24 ) >>> 12 ) // | ( ( DECODABET[ source[ srcOffset + 2 ] ] << 24 ) >>> 18 ) // | ( ( DECODABET[ source[ srcOffset + 3 ] ] << 24 ) >>> 24 ); int outBuff = ((DECODABET[source[srcOffset]] & 0xFF) << 18) | ((DECODABET[source[srcOffset + 1]] & 0xFF) << 12) | ((DECODABET[source[srcOffset + 2]] & 0xFF) << 6) | ((DECODABET[source[srcOffset + 3]] & 0xFF)); destination[destOffset] = (byte) (outBuff >> 16); destination[destOffset + 1] = (byte) (outBuff >> 8); destination[destOffset + 2] = (byte) (outBuff); return 3; } catch (Exception e) { System.out.println("" + source[srcOffset] + ": " + (DECODABET[source[srcOffset]])); System.out.println("" + source[srcOffset + 1] + ": " + (DECODABET[source[srcOffset + 1]])); System.out.println("" + source[srcOffset + 2] + ": " + (DECODABET[source[srcOffset + 2]])); System.out.println("" + source[srcOffset + 3] + ": " + (DECODABET[source[srcOffset + 3]])); return -1; } // e nd catch } } // end decodeToBytes /** * Very low-level access to decoding ASCII characters in the form of a byte array. Does not * support automatically gunzipping or any other "fancy" features. * * @param source The Base64 encoded data * @param off The offset of where to begin decoding * @param len The length of characters to decode * @return decoded data * @since 1.3 */ public static byte[] decode(byte[] source, int off, int len) { int len34 = len * 3 / 4; byte[] outBuff = new byte[len34]; // Upper limit on size of output int outBuffPosn = 0; byte[] b4 = new byte[4]; int b4Posn = 0; int i = 0; byte sbiCrop = 0; byte sbiDecode = 0; for (i = off; i < off + len; i++) { sbiCrop = (byte) (source[i] & 0x7f); // Only the low seven bits sbiDecode = DECODABET[sbiCrop]; if (sbiDecode >= WHITE_SPACE_ENC) { // White space, Equals sign or better if (sbiDecode >= EQUALS_SIGN_ENC) { b4[b4Posn++] = sbiCrop; if (b4Posn > 3) { outBuffPosn += decode4to3(b4, 0, outBuff, outBuffPosn); b4Posn = 0; // If that was the equals sign, break out of 'for' loop if (sbiCrop == EQUALS_SIGN) { break; } } // end if: quartet built } // end if: equals sign or better } else { // end if: white space, equals sign or better System.err.println("Bad Base64 input character at " + i + ": " + source[i] + "(decimal)"); return null; } // end else: } // each input character byte[] out = new byte[outBuffPosn]; System.arraycopy(outBuff, 0, out, 0, outBuffPosn); return out; } // end decode /** * Decodes data from Base64 notation, automatically detecting gzip-compressed data and * decompressing it. * * @param s the string to decode * @return the decoded data * @since 1.4 */ public static byte[] decode(String s) { byte[] bytes; try { bytes = s.getBytes(PREFERRED_ENCODING); } catch (java.io.UnsupportedEncodingException uee) { bytes = s.getBytes(); } // </change> // Decode bytes = decode(bytes, 0, bytes.length); return bytes; } // end decode } // end class Base64
8,413
0
Create_ds/amazon-redshift-jdbc-driver/src/main/java/com/amazon/redshift
Create_ds/amazon-redshift-jdbc-driver/src/main/java/com/amazon/redshift/util/ExtensibleDigest.java
package com.amazon.redshift.util; import java.security.MessageDigest; import java.security.NoSuchAlgorithmException; /** * Extensible hashing utility function to obfuscate passwords before network transmission. * */ public class ExtensibleDigest { private ExtensibleDigest() { } /** * Encodes user/password/salt information in the following way: SHA2(SHA2(password + user) + salt). * * @param clientNonce The client nonce. * @param password The connecting user's password. * @param salt salt sent by the server. * @param algoName Algorithm name such as "SHA-256" etc. * @param serverNonce random number generated by server * @return A byte array of the digest. */ public static byte[] encode(byte[] clientNonce, byte[] password, byte[] salt, String algoName, byte[] serverNonce) { MessageDigest md; byte[] passDigest; try { md = MessageDigest.getInstance(algoName); md.update(password); md.update(salt); passDigest = md.digest(); md = MessageDigest.getInstance(algoName); md.update(passDigest); md.update(serverNonce); md.update(clientNonce); passDigest = md.digest(); } catch (NoSuchAlgorithmException e) { throw new IllegalStateException("Unable to encode password with extensible hashing:" + algoName, e); } return passDigest; } }
8,414
0
Create_ds/amazon-redshift-jdbc-driver/src/main/java/com/amazon/redshift
Create_ds/amazon-redshift-jdbc-driver/src/main/java/com/amazon/redshift/util/RedshiftTime.java
/* * Copyright (c) 2004, PostgreSQL Global Development Group * See the LICENSE file in the project root for more information. */ package com.amazon.redshift.util; import java.sql.PreparedStatement; import java.sql.Time; import java.util.Calendar; /** * This class augments the Java built-in Time to allow for explicit setting of the time zone. */ public class RedshiftTime extends Time { /** * The serial version UID. */ private static final long serialVersionUID = 3592492258676494276L; /** * The optional calendar for this time. */ private Calendar calendar; private int nanos; /** * Constructs a <code>RedshiftTime</code> without a time zone. * * @param time milliseconds since January 1, 1970, 00:00:00 GMT; a negative number is milliseconds * before January 1, 1970, 00:00:00 GMT. * @see Time#Time(long) */ public RedshiftTime(long time) { this(time, null); } public RedshiftTime(long time, int nanos) { this(time, null, nanos); } /** * Constructs a <code>RedshiftTime</code> with the given calendar object. The calendar object is * optional. If absent, the driver will treat the time as <code>time without time zone</code>. * When present, the driver will treat the time as a <code>time with time zone</code> using the * <code>TimeZone</code> in the calendar object. Furthermore, this calendar will be used instead * of the calendar object passed to {@link PreparedStatement#setTime(int, Time, Calendar)}. * * @param time milliseconds since January 1, 1970, 00:00:00 GMT; a negative number is milliseconds * before January 1, 1970, 00:00:00 GMT. * @param calendar the calendar object containing the time zone or <code>null</code>. * @see Time#Time(long) */ public RedshiftTime(long time, Calendar calendar) { this(time, calendar, 0); } /** * Store time with nanos. * * @param time milliseconds since January 1, 1970, 00:00:00 GMT; a negative number is milliseconds * before January 1, 1970, 00:00:00 GMT. * @param calendar the calendar object containing the time zone or <code>null</code>. * @param nanos nanos */ public RedshiftTime(long time, Calendar calendar, int nanos) { super(time); this.setCalendar(calendar); this.nanos = nanos; } public RedshiftTime(Time time, int nanos) { this(time.getTime(), null, nanos); } /** * Sets the calendar object for this time. * * @param calendar the calendar object or <code>null</code>. */ public void setCalendar(Calendar calendar) { this.calendar = calendar; } /** * Returns the calendar object for this time. * * @return the calendar or <code>null</code>. */ public Calendar getCalendar() { return calendar; } /** * Returns nano seconds of time. * * @return nanos */ public int getNanos() { return nanos; } @Override public int hashCode() { final int prime = 31; int result = super.hashCode(); result = prime * result + ((calendar == null) ? 0 : calendar.hashCode()); return result; } @Override public boolean equals(Object obj) { if (this == obj) { return true; } if (!super.equals(obj)) { return false; } if (!(obj instanceof RedshiftTime)) { return false; } RedshiftTime other = (RedshiftTime) obj; if (calendar == null) { if (other.calendar != null) { return false; } } else if (!calendar.equals(other.calendar)) { return false; } return true; } @Override public Object clone() { RedshiftTime clone = (RedshiftTime) super.clone(); if (getCalendar() != null) { clone.setCalendar((Calendar) getCalendar().clone()); } return clone; } }
8,415
0
Create_ds/amazon-redshift-jdbc-driver/src/main/java/com/amazon/redshift
Create_ds/amazon-redshift-jdbc-driver/src/main/java/com/amazon/redshift/util/CanEstimateSize.java
/* * Copyright (c) 2015, PostgreSQL Global Development Group * See the LICENSE file in the project root for more information. */ package com.amazon.redshift.util; public interface CanEstimateSize { long getSize(); }
8,416
0
Create_ds/amazon-redshift-jdbc-driver/src/main/java/com/amazon/redshift
Create_ds/amazon-redshift-jdbc-driver/src/main/java/com/amazon/redshift/util/URLCoder.java
/* * Copyright (c) 2018, PostgreSQL Global Development Group * See the LICENSE file in the project root for more information. */ package com.amazon.redshift.util; import java.io.UnsupportedEncodingException; import java.net.URLDecoder; import java.net.URLEncoder; /** * <p>This class helps with URL encoding and decoding. UTF-8 encoding is used by default to make * encoding consistent across the driver, and encoding might be changed via {@code * redshift.url.encoding} property</p> * * <p>Note: this should not be used outside of Redshift source, this is not a public API of the * driver.</p> */ public final class URLCoder { private static final String ENCODING_FOR_URL = System.getProperty("redshift.url.encoding", "UTF-8"); /** * Decodes {@code x-www-form-urlencoded} string into Java string. * * @param encoded encoded value * @return decoded value * @see URLDecoder#decode(String, String) */ public static String decode(String encoded) { try { return URLDecoder.decode(encoded, ENCODING_FOR_URL); } catch (UnsupportedEncodingException e) { throw new IllegalStateException( "Unable to decode URL entry via " + ENCODING_FOR_URL + ". This should not happen", e); } } /** * Encodes Java string into {@code x-www-form-urlencoded} format * * @param plain input value * @return encoded value * @see URLEncoder#encode(String, String) */ public static String encode(String plain) { try { return URLEncoder.encode(plain, "UTF-8"); } catch (UnsupportedEncodingException e) { throw new IllegalStateException( "Unable to encode URL entry via " + ENCODING_FOR_URL + ". This should not happen", e); } } }
8,417
0
Create_ds/amazon-redshift-jdbc-driver/src/main/java/com/amazon/redshift
Create_ds/amazon-redshift-jdbc-driver/src/main/java/com/amazon/redshift/util/ByteStreamWriter.java
/* * Copyright (c) 2020, PostgreSQL Global Development Group * See the LICENSE file in the project root for more information. */ package com.amazon.redshift.util; import java.io.IOException; import java.io.InputStream; import java.io.OutputStream; /** * A class that can be used to set a byte array parameter by writing to an OutputStream. * * <p>The intended use case is wanting to write data to a byte array parameter that is stored off * heap in a direct memory pool or in some other form that is inconvenient to assemble into a single * heap-allocated buffer.</p> * <p> Users should write their own implementation depending on the * original data source. The driver provides a built-in implementation supporting the {@link * java.nio.ByteBuffer} class, see {@link ByteBufferByteStreamWriter}.</p> * <p> Intended usage is to simply pass in an instance using * {@link java.sql.PreparedStatement#setObject(int, Object)}:</p> * <pre> * int bufLength = someBufferObject.length(); * preparedStatement.setObject(1, new MyByteStreamWriter(bufLength, someBufferObject)); * </pre> * <p>The length must be known ahead of the stream being written to. </p> * <p>This provides the application more control over memory management than calling * {@link java.sql.PreparedStatement#setBinaryStream(int, InputStream)} as with the latter the * caller has no control over the buffering strategy. </p> */ public interface ByteStreamWriter { /** * Returns the length of the stream. * * <p> This must be known ahead of calling {@link #writeTo(ByteStreamTarget)}. </p> * * @return the number of bytes in the stream. */ int getLength(); /** * Write the data to the provided {@link OutputStream}. * * <p> Should not write more than {@link #getLength()} bytes. If attempted, the provided stream * will throw an {@link java.io.IOException}. </p> * * @param target the stream to write the data to * @throws IOException if the underlying stream throws or there is some other error. */ void writeTo(ByteStreamTarget target) throws IOException; /** * Provides a target to write bytes to. */ interface ByteStreamTarget { /** * Provides an output stream to write bytes to. * * @return an output stream */ OutputStream getOutputStream(); } }
8,418
0
Create_ds/amazon-redshift-jdbc-driver/src/main/java/com/amazon/redshift
Create_ds/amazon-redshift-jdbc-driver/src/main/java/com/amazon/redshift/util/ReaderInputStream.java
/* * Copyright (c) 2016, PostgreSQL Global Development Group * See the LICENSE file in the project root for more information. */ package com.amazon.redshift.util; import java.io.IOException; import java.io.InputStream; import java.io.Reader; import java.nio.ByteBuffer; import java.nio.CharBuffer; import java.nio.charset.CharacterCodingException; import java.nio.charset.Charset; import java.nio.charset.CharsetEncoder; import java.nio.charset.CoderResult; /** * <p>ReaderInputStream accepts a UTF-16 char stream (Reader) as input and * converts it to a UTF-8 byte stream (InputStream) as output.</p> * * <p>This is the inverse of java.io.InputStreamReader which converts a * binary stream to a character stream.</p> */ public class ReaderInputStream extends InputStream { private static final int DEFAULT_CHAR_BUFFER_SIZE = 8 * 1024; private static final Charset UTF_8 = Charset.forName("UTF-8"); private final Reader reader; private final CharsetEncoder encoder; private final ByteBuffer bbuf; private final CharBuffer cbuf; /** * true when all of the characters have been read from the reader into inbuf. */ private boolean endOfInput; private final byte[] oneByte = new byte[1]; public ReaderInputStream(Reader reader) { this(reader, DEFAULT_CHAR_BUFFER_SIZE); } /** * Allow ReaderInputStreamTest to use small buffers to force UTF-16 * surrogate pairs to cross buffer boundaries in interesting ways. * Because this constructor is package-private, the unit test must be in * the same package. */ ReaderInputStream(Reader reader, int charBufferSize) { if (reader == null) { throw new IllegalArgumentException("reader cannot be null"); } // The standard UTF-8 encoder will only encode a UTF-16 surrogate pair // when both surrogates are available in the CharBuffer. if (charBufferSize < 2) { throw new IllegalArgumentException("charBufferSize must be at least 2 chars"); } this.reader = reader; this.encoder = UTF_8.newEncoder(); // encoder.maxBytesPerChar() always returns 3.0 for UTF-8 this.bbuf = ByteBuffer.allocate(3 * charBufferSize); this.bbuf.flip(); // prepare for subsequent write this.cbuf = CharBuffer.allocate(charBufferSize); this.cbuf.flip(); // prepare for subsequent write } private void advance() throws IOException { assert !endOfInput; assert !bbuf.hasRemaining() : "advance() should be called when output byte buffer is empty. bbuf: " + bbuf + ", as string: " + bbuf.asCharBuffer().toString(); assert cbuf.remaining() < 2; // given that bbuf.capacity = 3 x cbuf.capacity, the only time that we should have a // remaining char is if the last char read was the 1st half of a surrogate pair if (cbuf.remaining() == 0) { cbuf.clear(); } else { cbuf.compact(); } int n = reader.read(cbuf); // read #1 cbuf.flip(); CoderResult result; endOfInput = n == -1; bbuf.clear(); result = encoder.encode(cbuf, bbuf, endOfInput); checkEncodeResult(result); if (endOfInput) { result = encoder.flush(bbuf); checkEncodeResult(result); } bbuf.flip(); } private void checkEncodeResult(CoderResult result) throws CharacterCodingException { if (result.isError()) { result.throwException(); } } @Override public int read() throws IOException { int res = 0; while (res != -1) { res = read(oneByte); if (res > 0) { return (oneByte[0] & 0xFF); } } return -1; } // The implementation of InputStream.read(byte[], int, int) silently ignores // an IOException thrown by overrides of the read() method. @Override public int read(byte[] b, int off, int len) throws IOException { if (b == null) { throw new NullPointerException(); } else if (off < 0 || len < 0 || len > b.length - off) { throw new IndexOutOfBoundsException(); } else if (len == 0) { return 0; } if (endOfInput && !bbuf.hasRemaining()) { return -1; } int totalRead = 0; while (len > 0 && !endOfInput) { if (bbuf.hasRemaining()) { int remaining = Math.min(len, bbuf.remaining()); bbuf.get(b, off, remaining); totalRead += remaining; off += remaining; len -= remaining; if (len == 0) { return totalRead; } } advance(); } if (endOfInput && !bbuf.hasRemaining() && totalRead == 0) { return -1; } return totalRead; } @Override public void close() throws IOException { endOfInput = true; reader.close(); } }
8,419
0
Create_ds/amazon-redshift-jdbc-driver/src/main/java/com/amazon/redshift
Create_ds/amazon-redshift-jdbc-driver/src/main/java/com/amazon/redshift/util/RedshiftIntervalYearToMonth.java
package com.amazon.redshift.util; import java.io.Serializable; import java.sql.SQLException; import java.text.DecimalFormat; import java.util.Locale; import com.amazon.redshift.util.RedshiftInterval; public class RedshiftIntervalYearToMonth extends RedshiftInterval implements Serializable, Cloneable { /** * required by the driver. */ public RedshiftIntervalYearToMonth() { setType("intervaly2m"); } /** * Initialize an interval with a given interval string representation. * This method simply calls the parent public method setValue(). * * @param value String representated interval (e.g. '3 years 2 mons'). * @throws SQLException Is thrown if the string representation has an * unknown format. * @see #setValue(String) */ public RedshiftIntervalYearToMonth(String value) throws SQLException { this(); setValue(value); if (this.getDays() != 0 || this.getHours() != 0 || this.getMinutes() != 0 || this.getWholeSeconds() != 0 || this.getMicroSeconds() != 0) { throw new RedshiftException("Invalid value for Interval Year To Month. " + "Value cannot contain day-time parts."); }; } public RedshiftIntervalYearToMonth(int year, int month) throws SQLException { this(); setValue(year * 12 + month, 0); } public RedshiftIntervalYearToMonth(int month) throws SQLException { this(); setValue(month, 0); } /** * Set all values of this interval using just one specified value. * * @param month Total number of months (assuming 12 months in a year) */ public void setValue(int month) { super.setValue(month, 0); } /** * Override the parent setValue method disallowing a non-zero value for time. * * @param month Total number of months (assuming 12 months in a year) * @param time Should be 0. */ @Override public void setValue(int month, long time) { assert(time == 0); super.setValue(month, 0); } /** * Override the parent setDays method disallowing a non-zero value. * * @param days Should be 0. */ @Override public void setDays(int days) { assert(days == 0); super.setDays(0); } /** * Override the parent setHours method disallowing a non-zero value. * * @param hours Should be 0. */ @Override public void setHours(int hours) { assert(hours == 0); super.setHours(0); } /** * Override the parent setMinutes method disallowing a non-zero value. * * @param minutes Should be 0. */ @Override public void setMinutes(int minutes) { assert(minutes == 0); super.setMinutes(0); } /** * Override the parent setSeconds method disallowing a non-zero value. * * @param seconds Should be 0. */ @Override public void setSeconds(double seconds) { assert(seconds == 0); super.setSeconds(0); } /** * Returns the stored interval information as a string. * * @return String represented interval */ @Override public String getValue() { return String.format( Locale.ROOT, "%d years %d mons", this.getYears(), this.getMonths() ); } /** * Add this interval's value to the passed interval. This is backwards to what I would expect, but * this makes it match the other existing add methods. * * @param interval intval to add */ public void add(RedshiftIntervalYearToMonth interval) { interval.setValue(totalMonths() + interval.totalMonths()); } /** * Returns whether an object is equal to this one or not. * * @param obj Object to compare with * @return true if the two intervals are identical */ @Override public boolean equals(Object obj) { if (obj == null || !(obj instanceof RedshiftIntervalYearToMonth)) { return false; } if (obj == this) { return true; } final RedshiftIntervalYearToMonth pgi = (RedshiftIntervalYearToMonth) obj; return pgi.getYears() == this.getYears() && pgi.getMonths() == this.getMonths(); } }
8,420
0
Create_ds/amazon-redshift-jdbc-driver/src/main/java/com/amazon/redshift
Create_ds/amazon-redshift-jdbc-driver/src/main/java/com/amazon/redshift/util/RedshiftJDBCMain.java
/* * Copyright (c) 2004, PostgreSQL Global Development Group * See the LICENSE file in the project root for more information. */ package com.amazon.redshift.util; import com.amazon.redshift.Driver; public class RedshiftJDBCMain { public static void main(String[] args) { java.net.URL url = Driver.class.getResource("/com/amazon/redshift/Driver.class"); System.out.printf("%n%s%n", com.amazon.redshift.util.DriverInfo.DRIVER_FULL_NAME); System.out.printf("Found in: %s%n%n", url); System.out.printf("The Redshift JDBC driver is not an executable Java program.%n%n" + "You must install it according to the JDBC driver installation " + "instructions for your application / container / appserver, " + "then use it by specifying a JDBC URL of the form %n jdbc:redshift://%n" + "or using an application specific method.%n%n" + "See the Redshift JDBC documentation: https://docs.aws.amazon.com/redshift/latest/mgmt/configure-jdbc-connection.html%n%n" + "This command has had no effect.%n"); System.exit(1); } }
8,421
0
Create_ds/amazon-redshift-jdbc-driver/src/main/java/com/amazon/redshift
Create_ds/amazon-redshift-jdbc-driver/src/main/java/com/amazon/redshift/util/Gettable.java
/* * Copyright (c) 2018, PostgreSQL Global Development Group * See the LICENSE file in the project root for more information. */ package com.amazon.redshift.util; public interface Gettable<K,V> { V get(K key); }
8,422
0
Create_ds/amazon-redshift-jdbc-driver/src/main/java/com/amazon/redshift
Create_ds/amazon-redshift-jdbc-driver/src/main/java/com/amazon/redshift/util/StreamWrapper.java
/* * Copyright (c) 2004, PostgreSQL Global Development Group * See the LICENSE file in the project root for more information. */ // Copyright (c) 2004, Open Cloud Limited. package com.amazon.redshift.util; import java.io.ByteArrayOutputStream; import java.io.File; import java.io.FileInputStream; import java.io.FileOutputStream; import java.io.IOException; import java.io.InputStream; import java.io.OutputStream; /** * Wrapper around a length-limited InputStream. * * @author Oliver Jowett (oliver@opencloud.com) */ public class StreamWrapper { private static final int MAX_MEMORY_BUFFER_BYTES = 51200; private static final String TEMP_FILE_PREFIX = "redshift-rsjdbc-stream"; public StreamWrapper(byte[] data, int offset, int length) { this.stream = null; this.rawData = data; this.offset = offset; this.length = length; } public StreamWrapper(InputStream stream, int length) { this.stream = stream; this.rawData = null; this.offset = 0; this.length = length; } public StreamWrapper(InputStream stream) throws RedshiftException { try { ByteArrayOutputStream memoryOutputStream = new ByteArrayOutputStream(); final int memoryLength = copyStream(stream, memoryOutputStream, MAX_MEMORY_BUFFER_BYTES); byte[] rawData = memoryOutputStream.toByteArray(); if (memoryLength == -1) { final int diskLength; final File tempFile = File.createTempFile(TEMP_FILE_PREFIX, null); FileOutputStream diskOutputStream = new FileOutputStream(tempFile); diskOutputStream.write(rawData); try { diskLength = copyStream(stream, diskOutputStream, Integer.MAX_VALUE - rawData.length); if (diskLength == -1) { throw new RedshiftException(GT.tr("Object is too large to send over the protocol."), RedshiftState.NUMERIC_CONSTANT_OUT_OF_RANGE); } diskOutputStream.flush(); } finally { diskOutputStream.close(); } this.offset = 0; this.length = rawData.length + diskLength; this.rawData = null; this.stream = new FileInputStream(tempFile) { /* * Usually, closing stream should be done by pgjdbc clients. Here it's an internally * managed stream so we need to auto-close it and be sure to delete the temporary file * when doing so. Auto-closing will be done when the first occurs: reaching EOF or Garbage * Collection */ private boolean closed = false; private int position = 0; /** * Check if we should auto-close this stream */ private void checkShouldClose(int readResult) throws IOException { if (readResult == -1) { close(); } else { position += readResult; if (position >= length) { close(); } } } public int read(byte[] b) throws IOException { if (closed) { return -1; } int result = super.read(b); checkShouldClose(result); return result; } public int read(byte[] b, int off, int len) throws IOException { if (closed) { return -1; } int result = super.read(b, off, len); checkShouldClose(result); return result; } public void close() throws IOException { if (!closed) { super.close(); tempFile.delete(); closed = true; } } protected void finalize() throws IOException { // forcibly close it because super.finalize() may keep the FD open, which may prevent // file deletion close(); super.finalize(); } }; } else { this.rawData = rawData; this.stream = null; this.offset = 0; this.length = rawData.length; } } catch (IOException e) { throw new RedshiftException(GT.tr("An I/O error occurred while sending to the backend."), RedshiftState.IO_ERROR, e); } } public InputStream getStream() { if (stream != null) { return stream; } return new java.io.ByteArrayInputStream(rawData, offset, length); } public int getLength() { return length; } public int getOffset() { return offset; } public byte[] getBytes() { return rawData; } public String toString() { return "<stream of " + length + " bytes>"; } private static int copyStream(InputStream inputStream, OutputStream outputStream, int limit) throws IOException { int totalLength = 0; byte[] buffer = new byte[2048]; int readLength = inputStream.read(buffer); while (readLength > 0) { totalLength += readLength; outputStream.write(buffer, 0, readLength); if (totalLength >= limit) { return -1; } readLength = inputStream.read(buffer); } return totalLength; } private final InputStream stream; private final byte[] rawData; private final int offset; private final int length; }
8,423
0
Create_ds/amazon-redshift-jdbc-driver/src/main/java/com/amazon/redshift
Create_ds/amazon-redshift-jdbc-driver/src/main/java/com/amazon/redshift/util/SharedTimer.java
/* * Copyright (c) 2004, PostgreSQL Global Development Group * See the LICENSE file in the project root for more information. */ package com.amazon.redshift.util; import java.util.Timer; import java.util.concurrent.atomic.AtomicInteger; import com.amazon.redshift.logger.LogLevel; import com.amazon.redshift.logger.RedshiftLogger; public class SharedTimer { // Incremented for each Timer created, this allows each to have a unique Timer name private static final AtomicInteger timerCount = new AtomicInteger(0); private static final RedshiftLogger logger = RedshiftLogger.getDriverLogger(); private volatile Timer timer = null; private final AtomicInteger refCount = new AtomicInteger(0); public SharedTimer() { } public int getRefCount() { return refCount.get(); } public synchronized Timer getTimer() { if (timer == null) { int index = timerCount.incrementAndGet(); /* Temporarily switch contextClassLoader to the one that loaded this driver to avoid TimerThread preventing current contextClassLoader - which may be the ClassLoader of a web application - from being GC:ed. */ final ClassLoader prevContextCL = Thread.currentThread().getContextClassLoader(); try { /* Scheduled tasks whould not need to use .getContextClassLoader, so we just reset it to null */ Thread.currentThread().setContextClassLoader(null); timer = new Timer("Redshift-JDBC-SharedTimer-" + index, true); } finally { Thread.currentThread().setContextClassLoader(prevContextCL); } } refCount.incrementAndGet(); return timer; } public synchronized void releaseTimer() { int count = refCount.decrementAndGet(); if (count > 0) { // There are outstanding references to the timer so do nothing if(RedshiftLogger.isEnable() && logger != null) logger.log(LogLevel.DEBUG, "Outstanding references still exist so not closing shared Timer"); } else if (count == 0) { // This is the last usage of the Timer so cancel it so it's resources can be release. if(RedshiftLogger.isEnable() && logger != null) logger.log(LogLevel.DEBUG, "No outstanding references to shared Timer, will cancel and close it"); if (timer != null) { timer.cancel(); timer = null; } } else { // Should not get here under normal circumstance, probably a bug in app code. if(RedshiftLogger.isEnable() && logger != null) logger.log(LogLevel.INFO, "releaseTimer() called too many times; there is probably a bug in the calling code"); refCount.set(0); } } }
8,424
0
Create_ds/amazon-redshift-jdbc-driver/src/main/java/com/amazon/redshift
Create_ds/amazon-redshift-jdbc-driver/src/main/java/com/amazon/redshift/util/GT.java
/* * Copyright (c) 2004, PostgreSQL Global Development Group * See the LICENSE file in the project root for more information. */ package com.amazon.redshift.util; import java.text.MessageFormat; import java.util.Locale; import java.util.MissingResourceException; import java.util.ResourceBundle; /** * This class provides a wrapper around a gettext message catalog that can provide a localized * version of error messages. The caller provides a message String in the standard * java.text.MessageFormat syntax and any arguments it may need. The returned String is the * localized version if available or the original if not. */ public class GT { private static final GT _gt = new GT(); private static final Object[] noargs = new Object[0]; public static String tr(String message, Object... args) { return _gt.translate(message, args); } private ResourceBundle bundle; private GT() { try { //JCP! if mvn.project.property.redshift.jdbc.spec < "JDBC4.1" //JCP> bundle = ResourceBundle.getBundle("com.amazon.redshift.translation.messages"); //JCP! else bundle = ResourceBundle.getBundle("com.amazon.redshift.translation.messages", Locale.getDefault(Locale.Category.DISPLAY)); //JCP! endif } catch (MissingResourceException mre) { // translation files have not been installed bundle = null; } } private String translate(String message, Object[] args) { if (bundle != null && message != null) { try { message = bundle.getString(message); } catch (MissingResourceException mre) { // If we can't find a translation, just // use the untranslated message. } } // If we don't have any parameters we still need to run // this through the MessageFormat(ter) to allow the same // quoting and escaping rules to be used for all messages. // if (args == null) { args = noargs; } // Replace placeholders with arguments // if (message != null) { message = MessageFormat.format(message, args); } return message; } }
8,425
0
Create_ds/amazon-redshift-jdbc-driver/src/main/java/com/amazon/redshift
Create_ds/amazon-redshift-jdbc-driver/src/main/java/com/amazon/redshift/util/RedshiftPropertyMaxResultBufferParser.java
/* * Copyright (c) 2019, PostgreSQL Global Development Group * See the LICENSE file in the project root for more information. */ package com.amazon.redshift.util; import java.lang.management.ManagementFactory; import com.amazon.redshift.logger.LogLevel; import com.amazon.redshift.logger.RedshiftLogger; public class RedshiftPropertyMaxResultBufferParser { private static final RedshiftLogger logger = RedshiftLogger.getDriverLogger(); private static final String[] PERCENT_PHRASES = new String[]{ "p", "pct", "percent" }; /** * Method to parse value of max result buffer size. * * @param value string containing size of bytes with optional multiplier (T, G, M or K) or percent * value to declare max percent of heap memory to use. * @return value of max result buffer size. * @throws RedshiftException Exception when given value can't be parsed. */ public static long parseProperty(String value, String propertyName) throws RedshiftException { long result = -1; if (checkIfValueContainsPercent(value)) { result = parseBytePercentValue(value, propertyName); } else if (checkIfValueExistsToBeParsed(value)) { result = parseByteValue(value, propertyName); } result = adjustResultSize(result); return result; } /** * Method to check if given value can contain percent declaration of size of max result buffer. * * @param value Value to check. * @return Result if value contains percent. */ private static boolean checkIfValueContainsPercent(String value) { return (value != null) && (getPercentPhraseLengthIfContains(value) != -1); } /** * Method to get percent value of max result buffer size dependable on actual free memory. This * method doesn't check other possibilities of value declaration. * * @param value string containing percent used to define max result buffer. * @return percent value of max result buffer size. * @throws RedshiftException Exception when given value can't be parsed. */ private static long parseBytePercentValue(String value, String propertyName) throws RedshiftException { long result = -1; int length; if (checkIfValueExistsToBeParsed(value)) { length = getPercentPhraseLengthIfContains(value); if (length == -1) { throwExceptionAboutParsingError( String.format("Received parameter '%s' can't be parsed. Value received to parse is '%s'", propertyName, value)); } result = calculatePercentOfMemory(value, length); } return result; } /** * Method to get length of percent phrase existing in given string, only if one of phrases exist * on the length of string. * * @param valueToCheck String which is gonna be checked if contains percent phrase. * @return Length of phrase inside string, returns -1 when no phrase found. */ private static int getPercentPhraseLengthIfContains(String valueToCheck) { int result = -1; for (String phrase : PERCENT_PHRASES) { int indx = getPhraseLengthIfContains(valueToCheck, phrase); if (indx != -1) { result = indx; } } return result; } /** * Method to get length of given phrase in given string to check, method checks if phrase exist on * the end of given string. * * @param valueToCheck String which gonna be checked if contains phrase. * @param phrase Phrase to be looked for on the end of given string. * @return Length of phrase inside string, returns -1 when phrase wasn't found. */ private static int getPhraseLengthIfContains(String valueToCheck, String phrase) { int searchValueLength = phrase.length(); if (valueToCheck.length() > searchValueLength) { String subValue = valueToCheck.substring(valueToCheck.length() - searchValueLength); if (subValue.equals(phrase)) { return searchValueLength; } } return -1; } /** * Method to calculate percent of given max heap memory. * * @param value String which contains percent + percent phrase which gonna be used * during calculations. * @param percentPhraseLength Length of percent phrase inside given value. * @return Size of byte buffer based on percent of max heap memory. */ private static long calculatePercentOfMemory(String value, int percentPhraseLength) { String realValue = value.substring(0, value.length() - percentPhraseLength); double percent = Double.parseDouble(realValue) / 100; long result = (long) (percent * ManagementFactory.getMemoryMXBean().getHeapMemoryUsage().getMax()); return result; } /** * Method to check if given value has any chars to be parsed. * * @param value Value to be checked. * @return Result if value can be parsed. */ private static boolean checkIfValueExistsToBeParsed(String value) { return value != null && value.length() != 0; } /** * Method to get size based on given string value. String can contains just a number or number + * multiplier sign (like T, G, M or K). * * @param value Given string to be parsed. * @return Size based on given string. * @throws RedshiftException Exception when given value can't be parsed. */ private static long parseByteValue(String value, String propertyName) throws RedshiftException { long result = -1; long multiplier = 1; long mul = 1000; String realValue; char sign = value.charAt(value.length() - 1); switch (sign) { case 'T': case 't': multiplier *= mul; case 'G': case 'g': multiplier *= mul; case 'M': case 'm': multiplier *= mul; case 'K': case 'k': multiplier *= mul; realValue = value.substring(0, value.length() - 1); result = Integer.parseInt(realValue) * multiplier; break; case '%': return result; default: if (sign >= '0' && sign <= '9') { result = Long.parseLong(value); } else { throwExceptionAboutParsingError( String.format("Received parameter '%s' can't be parsed. Value received to parse is '%s'", propertyName, value)); } break; } return result; } /** * Method to adjust result memory limit size. If given memory is larger than 90% of max heap * memory then it gonna be reduced to 90% of max heap memory. * * @param value Size to be adjusted. * @return Adjusted size (original size or 90% of max heap memory) */ private static long adjustResultSize(long value) { if (value > 0.9 * ManagementFactory.getMemoryMXBean().getHeapMemoryUsage().getMax()) { long newResult = (long) (0.9 * ManagementFactory.getMemoryMXBean().getHeapMemoryUsage().getMax()); if(RedshiftLogger.isEnable() && logger != null) logger.log(LogLevel.INFO, GT.tr( "WARNING! Required to allocate {0} bytes, which exceeded possible heap memory size. Assigned {1} bytes as limit.", String.valueOf(value), String.valueOf(newResult))); value = newResult; } return value; } /** * Method to throw message for parsing MaxResultBuffer. * * @param message Message to be added to exception. * @param values Values to be put inside exception message. * @throws RedshiftException Exception when given value can't be parsed. */ private static void throwExceptionAboutParsingError(String message, Object... values) throws RedshiftException { throw new RedshiftException(GT.tr( message, values), RedshiftState.SYNTAX_ERROR); } }
8,426
0
Create_ds/amazon-redshift-jdbc-driver/src/main/java/com/amazon/redshift
Create_ds/amazon-redshift-jdbc-driver/src/main/java/com/amazon/redshift/util/GettableHashMap.java
/* * Copyright (c) 2018, PostgreSQL Global Development Group * See the LICENSE file in the project root for more information. */ package com.amazon.redshift.util; import java.util.HashMap; public class GettableHashMap<K,V> extends HashMap<K,V> implements Gettable<K,V> { }
8,427
0
Create_ds/amazon-redshift-jdbc-driver/src/main/java/com/amazon/redshift
Create_ds/amazon-redshift-jdbc-driver/src/main/java/com/amazon/redshift/util/RedshiftTimestamp.java
/* * Copyright (c) 2004, PostgreSQL Global Development Group * See the LICENSE file in the project root for more information. */ package com.amazon.redshift.util; import java.sql.Timestamp; import java.text.SimpleDateFormat; import java.util.Arrays; import java.util.Calendar; /** * This class augments the Java built-in Timestamp to allow for explicit setting of the time zone. */ public class RedshiftTimestamp extends Timestamp { /** * The serial version UID. */ private static final long serialVersionUID = -6245623465210738466L; /** * The special keyword for -infinity values. */ private static final String MINUS_INFINITY_KEYWORD = "-infinity"; /** * The special keyword for infinity values. */ private static final String INFINITY_KEYWORD = "infinity"; /** * Threadsafe access to ready to use date formatter */ private static ThreadLocal<SimpleDateFormat> TIMESTAMP_FORMAT = new ThreadLocal<SimpleDateFormat>() { @Override protected SimpleDateFormat initialValue() { return new SimpleDateFormat("yyyy-MM-dd HH:mm:ss"); } }; /** * Constant to represent a timezone that is invalid */ private static final int UNINITIALIZED_TIMEZONE = 25; /** * The first time in millis in AD ('0001-01-01 00:00:00 AD') */ private static final long FIRST_AD_TIMESTAMP = -62135769599767L; /** * The optional calendar for this timestamp. */ private Calendar calendar; private String s; private int offSetHour; private int offSetMinute; /** * True if the timestamp is a positive value false otherwise */ private boolean isAD = true; /** * Constructs a <code>RedshiftTimestamp</code> without a time zone. The integral seconds are stored in * the underlying date value; the fractional seconds are stored in the <code>nanos</code> field of * the <code>Timestamp</code> object. * * @param time milliseconds since January 1, 1970, 00:00:00 GMT. A negative number is the number * of milliseconds before January 1, 1970, 00:00:00 GMT. * @see Timestamp#Timestamp(long) */ public RedshiftTimestamp(long time) { this(time, null); } /** * <p>Constructs a <code>RedshiftTimestamp</code> with the given time zone. The integral seconds are stored * in the underlying date value; the fractional seconds are stored in the <code>nanos</code> field * of the <code>Timestamp</code> object.</p> * * <p>The calendar object is optional. If absent, the driver will treat the timestamp as * <code>timestamp without time zone</code>. When present, the driver will treat the timestamp as * a <code>timestamp with time zone</code> using the <code>TimeZone</code> in the calendar object. * Furthermore, this calendar will be used instead of the calendar object passed to * {@link java.sql.PreparedStatement#setTimestamp(int, Timestamp, Calendar)}.</p> * * @param time milliseconds since January 1, 1970, 00:00:00 GMT. A negative number is the number * of milliseconds before January 1, 1970, 00:00:00 GMT. * @param calendar the calendar object containing the time zone or <code>null</code>. * @see Timestamp#Timestamp(long) */ public RedshiftTimestamp(long time, Calendar calendar) { this(time, calendar, null); } public RedshiftTimestamp(long time, Calendar calendar, String s) { super(time); this.setCalendar(calendar); this.s = s; if(calendar != null) { int rawOffset = calendar.getTimeZone().getRawOffset(); this.offSetHour = rawOffset / 3600000; this.offSetMinute = (rawOffset / 60000) % 60; } // Determine the ERA (BC or AD) if (FIRST_AD_TIMESTAMP > time) { isAD = false; } } /** * Sets the calendar object for this timestamp. * * @param calendar the calendar object or <code>null</code>. */ public void setCalendar(Calendar calendar) { this.calendar = calendar; } /** * Returns the calendar object for this timestamp. * * @return the calendar object or <code>null</code>. */ public Calendar getCalendar() { return calendar; } @Override public int hashCode() { final int prime = 31; int result = super.hashCode(); result = prime * result + ((calendar == null) ? 0 : calendar.hashCode()); return result; } @Override public boolean equals(Object obj) { if (this == obj) { return true; } if (!super.equals(obj)) { return false; } if (!(obj instanceof RedshiftTimestamp)) { return false; } RedshiftTimestamp other = (RedshiftTimestamp) obj; if (calendar == null) { if (other.calendar != null) { return false; } } else if (!calendar.equals(other.calendar)) { return false; } return true; } @Override public Object clone() { RedshiftTimestamp clone = (RedshiftTimestamp) super.clone(); if (getCalendar() != null) { clone.setCalendar((Calendar) getCalendar().clone()); } return clone; } @Override public String toString() { if (s == null) return super.toString(); else { return getRedshiftString(); } } /** * Convert the given time from the JVM local to an 'equivalent' time that is in the timezone of * the given calendar. * <p> * 'Equivalent' in this case means that the Day/Month/Year/Hour/Minute/Second/Millisecond * fields are equal if you interpret the input long as being in the 'from' calendar's timezone * and you interpret the output long as being in the 'to' calendar's timezone. * * @param timeMillis The time to convert. * @param to The timezone to convert to. * @param from The timezone of the original data. * * @return the given time in the local time zone. */ private static long convertTimeMillis(long timeMillis, Calendar to, Calendar from) { from.setTimeInMillis(timeMillis); to.set(from.get(Calendar.YEAR), from.get(Calendar.MONTH), from.get(Calendar.DAY_OF_MONTH), from.get(Calendar.HOUR_OF_DAY), from.get(Calendar.MINUTE), from.get(Calendar.SECOND)); to.set(Calendar.MILLISECOND, from.get(Calendar.MILLISECOND)); to.set(Calendar.ERA, from.get(Calendar.ERA)); return to.getTimeInMillis(); } /** * Get the Timestamp object adjusted to the JVM timezone. * * @return The adjusted Timestamp object. */ private synchronized Timestamp getAdjustedTimestamp() { return getTimestamp(this, Calendar.getInstance(), calendar); } /** * Returns a Timestamp object set to the timezone of the given calendar. The Timestamp input * must be created with the default JVM timezone. * * @param timestamp The Timestamp object to set. * @param to The java.util.Calendar object to use in constructing the * converted Date. Cannot be null. * @param from The Calendar object used to create the input Date. * Cannot be null. * * @return The new Timestamp object, or null if a null Timestamp was passed in. */ private static Timestamp getTimestamp(Timestamp timestamp, Calendar to, Calendar from) { if (null == timestamp) { return null; } if (null == to || null == from) { throw new NullPointerException("Calendar cannot be null."); } to.clear(); from.clear(); if (to.equals(from)) { // No need to convert. return timestamp; } // Retrieve millisecond time Timestamp tz = new Timestamp(convertTimeMillis(timestamp.getTime(), to, from)); tz.setNanos(timestamp.getNanos()); return tz; } /** * Specialized to string method to return the timestamp in a format that is the same as * opensource driver * * @return String to match the opensource driver's get string method */ public String getPostgresqlString() { return getRedshiftString(); } private String getRedshiftString() { //Handle the negative and positive infinity case if (this.getTime() == Long.MIN_VALUE) { return MINUS_INFINITY_KEYWORD; } else if (this.getTime() == Long.MAX_VALUE) { return INFINITY_KEYWORD; } else { StringBuilder baseResult = new StringBuilder(); Calendar cal = this.getCalendar(); cal.setTime(this.getAdjustedTimestamp()); baseResult.append(TIMESTAMP_FORMAT.get().format(cal.getTime())); String nanoSeconds = String.valueOf(this.getNanos()); if (0 < this.getNanos()) { baseResult.append("."); if (6 < nanoSeconds.length() && nanoSeconds.length() < 9) { // Prefix with zeroes and make it 9 digits int zeroesToPrefix = 9 -nanoSeconds.length(); char[] charArray = new char[zeroesToPrefix]; Arrays.fill(charArray, '0'); String str = new String(charArray); nanoSeconds = str + nanoSeconds; } baseResult.append(nanoSeconds); // TIMESTAMP columns store values with up to a maximum of 6 digits of precision for fractional seconds. // If TIMESTAMP has more than 6 digits, trim the last 3 digit at the end of TIMESTAMP. if (6 < nanoSeconds.length()) { baseResult.delete((baseResult.length() - (nanoSeconds.length() - 6)), baseResult.length()); } } if (UNINITIALIZED_TIMEZONE != offSetHour) { baseResult.append(offSetHour < 0 ? '-' : '+'); if (0 != offSetHour) { baseResult.append(String.format("%02d", Math.abs(offSetHour))); } else { baseResult.append("00"); } if (0 != offSetMinute) { baseResult.append(":"); baseResult.append(String.format("%02d", Math.abs(offSetMinute))); } } if (!isAD) { baseResult.append(" BC"); } return baseResult.toString(); } } }
8,428
0
Create_ds/amazon-redshift-jdbc-driver/src/main/java/com/amazon/redshift
Create_ds/amazon-redshift-jdbc-driver/src/main/java/com/amazon/redshift/util/ExpressionProperties.java
/* * Copyright (c) 2017, PostgreSQL Global Development Group * See the LICENSE file in the project root for more information. */ package com.amazon.redshift.util; import java.util.Properties; import java.util.regex.Matcher; import java.util.regex.Pattern; public class ExpressionProperties extends Properties { private static final Pattern EXPRESSION = Pattern.compile("\\$\\{([^}]+)\\}"); private final Properties[] defaults; /** * Creates an empty property list with the specified defaults. * * @param defaults java.util.Properties */ public ExpressionProperties(Properties ...defaults) { this.defaults = defaults; } /** * <p>Returns property value with all {@code ${propKey}} like references replaced with the value of * the relevant property with recursive resolution.</p> * * <p>The method returns <code>null</code> if the property is not found.</p> * * @param key the property key. * * @return the value in this property list with * the specified key value. */ @Override public String getProperty(String key) { String value = getRawPropertyValue(key); return replaceProperties(value); } @Override public String getProperty(String key, String defaultValue) { String value = getRawPropertyValue(key); if (value == null) { value = defaultValue; } return replaceProperties(value); } /** * Returns raw value of a property without any replacements. * @param key property name * @return raw property value */ public String getRawPropertyValue(String key) { String value = super.getProperty(key); if (value != null) { return value; } for (Properties properties : defaults) { value = properties.getProperty(key); if (value != null) { return value; } } return null; } private String replaceProperties(String value) { if (value == null) { return null; } Matcher matcher = EXPRESSION.matcher(value); StringBuffer sb = null; while (matcher.find()) { if (sb == null) { sb = new StringBuffer(); } String propValue = getProperty(matcher.group(1)); if (propValue == null) { // Use original content like ${propKey} if property is not found propValue = matcher.group(); } matcher.appendReplacement(sb, Matcher.quoteReplacement(propValue)); } if (sb == null) { return value; } matcher.appendTail(sb); return sb.toString(); } }
8,429
0
Create_ds/amazon-redshift-jdbc-driver/src/main/java/com/amazon/redshift
Create_ds/amazon-redshift-jdbc-driver/src/main/java/com/amazon/redshift/util/RedshiftException.java
/* * Copyright (c) 2003, PostgreSQL Global Development Group * See the LICENSE file in the project root for more information. */ package com.amazon.redshift.util; import java.sql.SQLException; import com.amazon.redshift.logger.RedshiftLogger; public class RedshiftException extends SQLException { /** * */ private static final long serialVersionUID = 1L; private ServerErrorMessage serverError; public RedshiftException(String msg, RedshiftState state, Throwable cause, RedshiftLogger logger) { this(msg, state, cause); if(RedshiftLogger.isEnable()) logger.logError(this); } public RedshiftException(String msg, RedshiftState state, Throwable cause) { super(msg, state == null ? null : state.getState(), cause); } public RedshiftException(String msg, RedshiftState state) { super(msg, state == null ? null : state.getState()); } public RedshiftException(String msg, Throwable cause) { super(msg, null , cause); } public RedshiftException(String msg) { super(msg, ""); } public RedshiftException(ServerErrorMessage serverError) { this(serverError, true); } public RedshiftException(ServerErrorMessage serverError, boolean detail) { super(detail ? serverError.getExternalErrorMessage() : serverError.getNonSensitiveErrorMessage(), serverError.getSQLState()); this.serverError = serverError; } public ServerErrorMessage getServerErrorMessage() { return serverError; } public SQLException getSQLException() { return new SQLException(this.getMessage(),this.getSQLState(), this.getCause()); } }
8,430
0
Create_ds/amazon-redshift-jdbc-driver/src/main/java/com/amazon/redshift
Create_ds/amazon-redshift-jdbc-driver/src/main/java/com/amazon/redshift/util/RedshiftMoney.java
/* * Copyright (c) 2003, PostgreSQL Global Development Group * See the LICENSE file in the project root for more information. */ package com.amazon.redshift.util; import java.io.Serializable; import java.sql.SQLException; /** * This implements a class that handles the Redshift money and cash types. */ public class RedshiftMoney extends RedshiftObject implements Serializable, Cloneable { /* * The value of the field */ public double val; /** * @param value of field */ public RedshiftMoney(double value) { this(); val = value; } public RedshiftMoney(String value) throws SQLException { this(); setValue(value); } /* * Required by the driver */ public RedshiftMoney() { setType("money"); } public void setValue(String s) throws SQLException { try { String s1; boolean negative; negative = (s.charAt(0) == '('); // Remove any () (for negative) & currency symbol s1 = RedshiftTokenizer.removePara(s).substring(1); // Strip out any , in currency int pos = s1.indexOf(','); while (pos != -1) { s1 = s1.substring(0, pos) + s1.substring(pos + 1); pos = s1.indexOf(','); } val = Double.parseDouble(s1); val = negative ? -val : val; } catch (NumberFormatException e) { throw new RedshiftException(GT.tr("Conversion of money failed."), RedshiftState.NUMERIC_CONSTANT_OUT_OF_RANGE, e); } } @Override public int hashCode() { final int prime = 31; int result = super.hashCode(); long temp; temp = Double.doubleToLongBits(val); result = prime * result + (int) (temp ^ (temp >>> 32)); return result; } public boolean equals(Object obj) { if (obj instanceof RedshiftMoney) { RedshiftMoney p = (RedshiftMoney) obj; return val == p.val; } return false; } public String getValue() { if (val < 0) { return "-$" + (-val); } else { return "$" + val; } } @Override public Object clone() throws CloneNotSupportedException { // squid:S2157 "Cloneables" should implement "clone return super.clone(); } }
8,431
0
Create_ds/amazon-redshift-jdbc-driver/src/main/java/com/amazon/redshift
Create_ds/amazon-redshift-jdbc-driver/src/main/java/com/amazon/redshift/util/LruCache.java
/* * Copyright (c) 2015, PostgreSQL Global Development Group * See the LICENSE file in the project root for more information. */ package com.amazon.redshift.util; import java.sql.SQLException; import java.util.Iterator; import java.util.LinkedHashMap; import java.util.Map; /** * Caches values in simple least-recently-accessed order. */ public class LruCache<Key, Value extends CanEstimateSize> implements Gettable<Key, Value> { /** * Action that is invoked when the entry is removed from the cache. * * @param <Value> type of the cache entry */ public interface EvictAction<Value> { void evict(Value value) throws SQLException; } /** * When the entry is not present in cache, this create action is used to create one. * * @param <Value> type of the cache entry */ public interface CreateAction<Key, Value> { Value create(Key key) throws SQLException; } private final EvictAction<Value> onEvict; private final CreateAction<Key, Value> createAction; private final int maxSizeEntries; private final long maxSizeBytes; private long currentSize; private final Map<Key, Value> cache; private class LimitedMap extends LinkedHashMap<Key, Value> { LimitedMap(int initialCapacity, float loadFactor, boolean accessOrder) { super(initialCapacity, loadFactor, accessOrder); } @Override protected boolean removeEldestEntry(Map.Entry<Key, Value> eldest) { // Avoid creating iterators if size constraints not violated if (size() <= maxSizeEntries && currentSize <= maxSizeBytes) { return false; } Iterator<Map.Entry<Key, Value>> it = entrySet().iterator(); while (it.hasNext()) { if (size() <= maxSizeEntries && currentSize <= maxSizeBytes) { return false; } Map.Entry<Key, Value> entry = it.next(); evictValue(entry.getValue()); long valueSize = entry.getValue().getSize(); if (valueSize > 0) { // just in case currentSize -= valueSize; } it.remove(); } return false; } } private void evictValue(Value value) { try { onEvict.evict(value); } catch (SQLException e) { /* ignore */ } } public LruCache(int maxSizeEntries, long maxSizeBytes, boolean accessOrder) { this(maxSizeEntries, maxSizeBytes, accessOrder, NOOP_CREATE_ACTION, NOOP_EVICT_ACTION); } public LruCache(int maxSizeEntries, long maxSizeBytes, boolean accessOrder, CreateAction<Key, Value> createAction, EvictAction<Value> onEvict) { this.maxSizeEntries = maxSizeEntries; this.maxSizeBytes = maxSizeBytes; this.createAction = createAction; this.onEvict = onEvict; this.cache = new LimitedMap(16, 0.75f, accessOrder); } /** * Returns an entry from the cache. * * @param key cache key * @return entry from cache or null if cache does not contain given key. */ public synchronized Value get(Key key) { return cache.get(key); } /** * Borrows an entry from the cache. * * @param key cache key * @return entry from cache or newly created entry if cache does not contain given key. * @throws SQLException if entry creation fails */ public synchronized Value borrow(Key key) throws SQLException { Value value = cache.remove(key); if (value == null) { return createAction.create(key); } currentSize -= value.getSize(); return value; } /** * Returns given value to the cache. * * @param key key * @param value value */ public synchronized void put(Key key, Value value) { long valueSize = value.getSize(); if (maxSizeBytes == 0 || maxSizeEntries == 0 || valueSize * 2 > maxSizeBytes) { // Just destroy the value if cache is disabled or if entry would consume more than a half of // the cache evictValue(value); return; } currentSize += valueSize; Value prev = cache.put(key, value); if (prev == null) { return; } // This should be a rare case currentSize -= prev.getSize(); if (prev != value) { evictValue(prev); } } /** * Puts all the values from the given map into the cache. * * @param m The map containing entries to put into the cache */ public synchronized void putAll(Map<Key, Value> m) { for (Map.Entry<Key, Value> entry : m.entrySet()) { this.put(entry.getKey(), entry.getValue()); } } public static final CreateAction NOOP_CREATE_ACTION = new CreateAction() { @Override public Object create(Object o) throws SQLException { return null; } }; public static final EvictAction NOOP_EVICT_ACTION = new EvictAction() { @Override public void evict(Object o) throws SQLException { return; } }; }
8,432
0
Create_ds/amazon-redshift-jdbc-driver/src/main/java/com/amazon/redshift
Create_ds/amazon-redshift-jdbc-driver/src/main/java/com/amazon/redshift/util/ObjectFactory.java
/* * Copyright (c) 2004, PostgreSQL Global Development Group * See the LICENSE file in the project root for more information. */ package com.amazon.redshift.util; import java.lang.reflect.Constructor; import java.lang.reflect.InvocationTargetException; import java.util.Properties; /** * Helper class to instantiate objects. Note: the class is <b>NOT</b> public API, so it is subject * to change. */ public class ObjectFactory { /** * Instantiates a class using the appropriate constructor. If a constructor with a single * Propertiesparameter exists, it is used. Otherwise, if tryString is true a constructor with a * single String argument is searched if it fails, or tryString is true a no argument constructor * is tried. * * @param <T> subclass type * @param expectedClass expected subclass * @param classname name of the class to instantiate * @param info parameter to pass as Properties * @param tryString whether to look for a single String argument constructor * @param stringarg parameter to pass as String * @return the instantiated class * @throws ClassNotFoundException if something goes wrong * @throws SecurityException if something goes wrong * @throws NoSuchMethodException if something goes wrong * @throws IllegalArgumentException if something goes wrong * @throws InstantiationException if something goes wrong * @throws IllegalAccessException if something goes wrong * @throws InvocationTargetException if something goes wrong */ public static <T> T instantiate(Class<T> expectedClass, String classname, Properties info, boolean tryString, String stringarg) throws ClassNotFoundException, SecurityException, NoSuchMethodException, IllegalArgumentException, InstantiationException, IllegalAccessException, InvocationTargetException { Object[] args = {info}; Constructor<? extends T> ctor = null; Class<? extends T> cls = Class.forName(classname).asSubclass(expectedClass); try { ctor = cls.getConstructor(Properties.class); } catch (NoSuchMethodException nsme) { if (tryString) { try { ctor = cls.getConstructor(String.class); args = new String[]{stringarg}; } catch (NoSuchMethodException nsme2) { tryString = false; } } if (!tryString) { ctor = cls.getConstructor((Class[]) null); args = null; } } return ctor.newInstance(args); } }
8,433
0
Create_ds/amazon-redshift-jdbc-driver/src/main/java/com/amazon/redshift
Create_ds/amazon-redshift-jdbc-driver/src/main/java/com/amazon/redshift/util/RedshiftByteTypes.java
package com.amazon.redshift.util; import java.sql.SQLException; // Base class for VARBYTE and GEOGRAPHY bytes conversion public class RedshiftByteTypes { /* * Converts a RS bytes raw value (i.e. the raw binary representation of the varbyte/geography data type) into * a java byte[] */ public static byte[] toBytes(byte[] s) throws SQLException { if (s == null) { return null; } return toBytesFromHex(s); } public static String convertToString(byte[] data) { char[] hex = "0123456789ABCDEF".toCharArray(); char[] hexChars = new char[2 * data.length]; for (int i = 0; i < data.length; i++) { int v = data[i] & 0xFF; hexChars[i * 2] = hex[v >>> 4]; hexChars[i * 2 + 1] = hex[v & 0x0F]; } return new String(hexChars); } private static byte[] toBytesFromHex(byte[] s) { byte[] output = new byte[(s.length) / 2]; for (int i = 0; i < output.length; i++) { byte b1 = gethex(s[i * 2]); byte b2 = gethex(s[i * 2 + 1]); // squid:S3034 // Raw byte values should not be used in bitwise operations in combination with shifts output[i] = (byte) ((b1 << 4) | (b2 & 0xff)); } return output; } private static byte gethex(byte b) { // 0-9 == 48-57 if (b <= 57) { return (byte) (b - 48); } // a-f == 97-102 if (b >= 97) { return (byte) (b - 97 + 10); } // A-F == 65-70 return (byte) (b - 65 + 10); } }
8,434
0
Create_ds/amazon-redshift-jdbc-driver/src/main/java/com/amazon/redshift
Create_ds/amazon-redshift-jdbc-driver/src/main/java/com/amazon/redshift/util/RedshiftWarning.java
/* * Copyright (c) 2004, PostgreSQL Global Development Group * See the LICENSE file in the project root for more information. */ package com.amazon.redshift.util; import java.sql.SQLWarning; public class RedshiftWarning extends SQLWarning { private ServerErrorMessage serverError; public RedshiftWarning(ServerErrorMessage err) { super(err.toString(), err.getSQLState()); this.serverError = err; } public String getMessage() { return serverError.getMessage(); } public ServerErrorMessage getServerErrorMessage() { return serverError; } }
8,435
0
Create_ds/amazon-redshift-jdbc-driver/src/main/java/com/amazon/redshift
Create_ds/amazon-redshift-jdbc-driver/src/main/java/com/amazon/redshift/util/ServerErrorMessage.java
/* * Copyright (c) 2004, PostgreSQL Global Development Group * See the LICENSE file in the project root for more information. */ package com.amazon.redshift.util; import com.amazon.redshift.core.EncodingPredictor; import com.amazon.redshift.logger.RedshiftLogger; import java.io.Serializable; import java.util.HashMap; import java.util.Map; public class ServerErrorMessage implements Serializable { private static final Character SEVERITY = 'S'; private static final Character MESSAGE = 'M'; private static final Character DETAIL = 'D'; private static final Character HINT = 'H'; private static final Character POSITION = 'P'; private static final Character WHERE = 'W'; private static final Character FILE = 'F'; private static final Character LINE = 'L'; private static final Character ROUTINE = 'R'; private static final Character SQLSTATE = 'C'; private static final Character INTERNAL_POSITION = 'p'; private static final Character INTERNAL_QUERY = 'q'; private static final Character SCHEMA = 's'; private static final Character TABLE = 't'; private static final Character COLUMN = 'c'; private static final Character DATATYPE = 'd'; private static final Character CONSTRAINT = 'n'; private final Map<Character, String> mesgParts = new HashMap<Character, String>(); public ServerErrorMessage(EncodingPredictor.DecodeResult serverError) { this(serverError.result); if (serverError.encoding != null) { mesgParts.put(MESSAGE, mesgParts.get(MESSAGE) + GT.tr(" (pgjdbc: autodetected server-encoding to be {0}, if the message is not readable, please check database logs and/or host, port, dbname, user, password, pg_hba.conf)", serverError.encoding) ); } } public ServerErrorMessage(String serverError) { char[] chars = serverError.toCharArray(); int pos = 0; int length = chars.length; while (pos < length) { char mesgType = chars[pos]; if (mesgType != '\0') { pos++; int startString = pos; // order here is important position must be checked before accessing the array while (pos < length && chars[pos] != '\0') { pos++; } String mesgPart = new String(chars, startString, pos - startString); mesgParts.put(mesgType, mesgPart); } pos++; } } public String getSQLState() { return mesgParts.get(SQLSTATE); } public String getMessage() { return mesgParts.get(MESSAGE); } public String getSeverity() { return mesgParts.get(SEVERITY); } public String getDetail() { return mesgParts.get(DETAIL); } public String getHint() { return mesgParts.get(HINT); } public int getPosition() { return getIntegerPart(POSITION); } public String getWhere() { return mesgParts.get(WHERE); } public String getSchema() { return mesgParts.get(SCHEMA); } public String getTable() { return mesgParts.get(TABLE); } public String getColumn() { return mesgParts.get(COLUMN); } public String getDatatype() { return mesgParts.get(DATATYPE); } public String getConstraint() { return mesgParts.get(CONSTRAINT); } public String getFile() { return mesgParts.get(FILE); } public int getLine() { return getIntegerPart(LINE); } public String getRoutine() { return mesgParts.get(ROUTINE); } public String getInternalQuery() { return mesgParts.get(INTERNAL_QUERY); } public int getInternalPosition() { return getIntegerPart(INTERNAL_POSITION); } private int getIntegerPart(Character c) { String s = mesgParts.get(c); if (s == null) { return 0; } return Integer.parseInt(s); } String getNonSensitiveErrorMessage() { StringBuilder totalMessage = new StringBuilder(); String message = mesgParts.get(SEVERITY); if (message != null) { totalMessage.append(message).append(": "); } message = mesgParts.get(MESSAGE); if (message != null) { totalMessage.append(message); } return totalMessage.toString(); } String getExternalErrorMessage() { // Now construct the message from what the server sent // The general format is: // SEVERITY: Message \n // Detail: \n // Hint: \n // Position: \n // Where: \n // StringBuilder totalMessage = new StringBuilder(); String message = mesgParts.get(SEVERITY); if (message != null) { totalMessage.append(message).append(": "); } message = mesgParts.get(MESSAGE); if (message != null) { totalMessage.append(message); } message = mesgParts.get(DETAIL); if (message != null) { totalMessage.append("\n ").append(GT.tr("Detail: {0}", message)); } message = mesgParts.get(HINT); if (message != null) { totalMessage.append("\n ").append(GT.tr("Hint: {0}", message)); } message = mesgParts.get(POSITION); if (message != null) { totalMessage.append("\n ").append(GT.tr("Position: {0}", message)); } message = mesgParts.get(WHERE); if (message != null) { totalMessage.append("\n ").append(GT.tr("Where: {0}", message)); } return totalMessage.toString(); } public String toString() { // Now construct the message from what the server sent // The general format is: // SEVERITY: Message \n // Detail: \n // Hint: \n // Position: \n // Where: \n // Internal Query: \n // Internal Position: \n // Location: File:Line:Routine \n // SQLState: \n // // Normally only the message and detail is included. // If INFO level logging is enabled then detail, hint, position and where are // included. If DEBUG level logging is enabled then all information // is included. StringBuilder totalMessage = new StringBuilder(); String message = mesgParts.get(SEVERITY); if (message != null) { totalMessage.append(message).append(": "); } message = mesgParts.get(MESSAGE); if (message != null) { totalMessage.append(message); } message = mesgParts.get(DETAIL); if (message != null) { totalMessage.append("\n ").append(GT.tr("Detail: {0}", message)); } message = mesgParts.get(HINT); if (message != null) { totalMessage.append("\n ").append(GT.tr("Hint: {0}", message)); } message = mesgParts.get(POSITION); if (message != null) { totalMessage.append("\n ").append(GT.tr("Position: {0}", message)); } message = mesgParts.get(WHERE); if (message != null) { totalMessage.append("\n ").append(GT.tr("Where: {0}", message)); } if (RedshiftLogger.isEnable()) { String internalQuery = mesgParts.get(INTERNAL_QUERY); if (internalQuery != null) { totalMessage.append("\n ").append(GT.tr("Internal Query: {0}", internalQuery)); } String internalPosition = mesgParts.get(INTERNAL_POSITION); if (internalPosition != null) { totalMessage.append("\n ").append(GT.tr("Internal Position: {0}", internalPosition)); } String file = mesgParts.get(FILE); String line = mesgParts.get(LINE); String routine = mesgParts.get(ROUTINE); if (file != null || line != null || routine != null) { totalMessage.append("\n ").append(GT.tr("Location: File: {0}, Routine: {1}, Line: {2}", file, routine, line)); } message = mesgParts.get(SQLSTATE); if (message != null) { totalMessage.append("\n ").append(GT.tr("Server SQLState: {0}", message)); } } return totalMessage.toString(); } }
8,436
0
Create_ds/amazon-redshift-jdbc-driver/src/main/java/com/amazon/redshift
Create_ds/amazon-redshift-jdbc-driver/src/main/java/com/amazon/redshift/util/IniFile.java
package com.amazon.redshift.util; import java.io.BufferedReader; import java.io.FileReader; import java.io.IOException; import java.util.HashMap; import java.util.Map; import java.util.regex.Matcher; import java.util.regex.Pattern; public class IniFile { private Pattern SECTION_PATTERN = Pattern.compile( "\\s*\\[([^]]*)\\]\\s*" ); private Pattern KEY_VAL_PATTERN = Pattern.compile( "\\s*([^=]*)=(.*)" ); private Map<String, Map< String, String >> sections = new HashMap<>(); public IniFile( String path ) throws IOException { load( path ); } public void load( String path ) throws IOException { try( BufferedReader br = new BufferedReader( new FileReader( path ))) { String line; String section = null; while(( line = br.readLine()) != null ) { Matcher m = SECTION_PATTERN.matcher( line ); if( m.matches()) { section = m.group( 1 ).trim(); } else if( section != null ) { m = KEY_VAL_PATTERN.matcher( line ); if( m.matches()) { String key = m.group( 1 ).trim(); String value = m.group( 2 ).trim(); Map< String, String > kv = sections.get( section ); if( kv == null ) { sections.put( section, kv = new HashMap<>()); } kv.put( key, value ); } } } // Loop } } public Map< String, String > getAllKeyVals( String section ) { Map< String, String > kv = sections.get( section ); return kv; } }
8,437
0
Create_ds/amazon-redshift-jdbc-driver/src/main/java/com/amazon/redshift
Create_ds/amazon-redshift-jdbc-driver/src/main/java/com/amazon/redshift/util/ByteBufferByteStreamWriter.java
/* * Copyright (c) 2020, PostgreSQL Global Development Group * See the LICENSE file in the project root for more information. */ package com.amazon.redshift.util; import java.io.IOException; import java.nio.ByteBuffer; import java.nio.channels.Channels; import java.nio.channels.WritableByteChannel; /** * A {@link ByteStreamWriter} that writes a {@link ByteBuffer java.nio.ByteBuffer} to a byte array * parameter. */ public class ByteBufferByteStreamWriter implements ByteStreamWriter { private final ByteBuffer buf; private final int length; /** * Construct the writer with the given {@link ByteBuffer} * * @param buf the buffer to use. */ public ByteBufferByteStreamWriter(ByteBuffer buf) { this.buf = buf; this.length = buf.remaining(); } @Override public int getLength() { return length; } @Override public void writeTo(ByteStreamTarget target) throws IOException { // this _does_ involve some copying to a temporary buffer, but that's unavoidable // as OutputStream itself only accepts single bytes or heap allocated byte arrays WritableByteChannel c = Channels.newChannel(target.getOutputStream()); try { c.write(buf); } finally { c.close(); } } }
8,438
0
Create_ds/amazon-redshift-jdbc-driver/src/main/java/com/amazon/redshift
Create_ds/amazon-redshift-jdbc-driver/src/main/java/com/amazon/redshift/util/RedshiftConstants.java
/** * Copyright 2010-2023 Amazon.com, Inc. or its affiliates. All Rights Reserved. * * This file is licensed under the Apache License, Version 2.0 (the "License"). * You may not use this file except in compliance with the License. A copy of * the License is located at * * http://aws.amazon.com/apache2.0/ * * This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR * CONDITIONS OF ANY KIND, either express or implied. See the License for the * specific language governing permissions and limitations under the License. */ package com.amazon.redshift.util; /** * Class to contain all Redshift JDBC driver wide constants * Constants should be organized into logical groups with comments. */ public final class RedshiftConstants { private RedshiftConstants() { throw new AssertionError("RedshiftConstants class should not be instantiated."); } // Auth plugins names related constants public static final String BASIC_JWT_PLUGIN = "com.amazon.redshift.plugin.BasicJwtCredentialsProvider"; public static final String NATIVE_IDP_AZUREAD_BROWSER_PLUGIN = "com.amazon.redshift.plugin.BrowserAzureOAuth2CredentialsProvider"; public static final String NATIVE_IDP_OKTA_BROWSER_PLUGIN = "com.amazon.redshift.plugin.BrowserOktaSAMLCredentialsProvider"; public static final String NATIVE_IDP_OKTA_NON_BROWSER_PLUGIN = "com.amazon.redshift.plugin.BasicNativeSamlCredentialsProvider"; public static final String IDC_BROWSER_PLUGIN = "com.amazon.redshift.plugin.BrowserIdcAuthPlugin"; public static final String IDP_TOKEN_PLUGIN = "com.amazon.redshift.plugin.IdpTokenAuthPlugin"; }
8,439
0
Create_ds/amazon-redshift-jdbc-driver/src/main/java/com/amazon/redshift
Create_ds/amazon-redshift-jdbc-driver/src/main/java/com/amazon/redshift/util/JdbcBlackHole.java
/* * Copyright (c) 2004, PostgreSQL Global Development Group * See the LICENSE file in the project root for more information. */ package com.amazon.redshift.util; import java.sql.Connection; import java.sql.ResultSet; import java.sql.SQLException; import java.sql.Statement; public class JdbcBlackHole { public static void close(Connection con) { try { if (con != null) { con.close(); } } catch (SQLException e) { /* ignore for now */ } } public static void close(Statement s) { try { if (s != null) { s.close(); } } catch (SQLException e) { /* ignore for now */ } } public static void close(ResultSet rs) { try { if (rs != null) { rs.close(); } } catch (SQLException e) { /* ignore for now */ } } }
8,440
0
Create_ds/amazon-redshift-jdbc-driver/src/main/java/com/amazon/redshift
Create_ds/amazon-redshift-jdbc-driver/src/main/java/com/amazon/redshift/util/RedshiftGeometry.java
package com.amazon.redshift.util; public class RedshiftGeometry { /** * Look up for ascii value DEL */ private static final byte asciiInvalidValue = 0x7f; /** * Helper method to turn input EWKT into binary format * * @param bytes The input data. * @param beginIndex The starting index. * @param length The number of characters to consume. * * @return transformed byte array */ public static byte[] transformEWKTFormat(byte[] bytes, int beginIndex, int length) { boolean errorFlag = false; int pointer = beginIndex; byte[] result; if (null == bytes ) { return null; } else if (0 == length) { return new byte[0]; } else { // EWKT is always hex encoded // Check to see if byte array is of expected length if (1 == (beginIndex + length-pointer) % 2) { byte[] newArray = new byte[length]; System.arraycopy(bytes, beginIndex, newArray, 0, length); return newArray; } result = new byte[(beginIndex + length-pointer) / 2]; errorFlag = false; for (int i = 0 ; pointer < beginIndex + length ; i++) { // Get the ascii number encoded int stage = hexEncodingLookupNoCase(bytes[pointer]) << 4; // Error Check errorFlag = ((byte)stage == asciiInvalidValue) || errorFlag; pointer++; int stage2 =hexEncodingLookupNoCase(bytes[pointer]); errorFlag = ((byte)stage2 == asciiInvalidValue) || errorFlag; pointer++; result[i] = (byte)(stage | stage2); } if(errorFlag) { byte[] newArray = new byte[length]; System.arraycopy(bytes, beginIndex, newArray, 0, length); return newArray; } return result; } } /** * Look up method to return the ascii value from a ascii encoding * * @param bytes The intput data * * @return value represented by byte */ private static int hexEncodingLookupNoCase(byte inputValue) { byte result = asciiInvalidValue; switch (inputValue) { case 48: result = 0x00; break; case 49: result = 0x01; break; case 50: result = 0x02; break; case 51: result = 0x03; break; case 52: result = 0x04; break; case 53: result = 0x05; break; case 54: result = 0x06; break; case 55: result = 0x07; break; case 56: result = 0x08; break; case 57: result = 0x09; break; case 65: case 97: result = 0x0a; break; case 66: case 98: result = 0x0b; break; case 67: case 99: result = 0x0c; break; case 68: case 100: result = 0x0d; break; case 69: case 101: result = 0x0e; break; case 70: case 102: result = 0x0f; break; } return result; } public static String convertToString(byte[] data) { char[] hex = "0123456789ABCDEF".toCharArray(); char[] hexChars = new char[2 * data.length]; for (int i = 0; i < data.length; i++) { int v = data[i] & 0xFF; hexChars[i * 2] = hex[v >>> 4]; hexChars[i * 2 + 1] = hex[v & 0x0F]; } return new String(hexChars); } }
8,441
0
Create_ds/amazon-redshift-jdbc-driver/src/main/java/com/amazon/redshift
Create_ds/amazon-redshift-jdbc-driver/src/main/java/com/amazon/redshift/util/RedshiftIntervalDayToSecond.java
package com.amazon.redshift.util; import java.io.Serializable; import java.sql.SQLException; import java.text.DecimalFormat; import java.util.Locale; import java.text.NumberFormat; import com.amazon.redshift.util.RedshiftInterval; public class RedshiftIntervalDayToSecond extends RedshiftInterval implements Serializable, Cloneable { /** * required by the driver. */ public RedshiftIntervalDayToSecond() { setType("intervald2s"); } /** * Initialize an interval with a given interval string representation. * This method simply calls the parent public method setValue(). * * @param value String representated interval * (e.g. '3 days 4 hours 1 mins 4.30 seconds'). * @throws SQLException Is thrown if the string representation has an * unknown format. * @see #setValue(String) */ public RedshiftIntervalDayToSecond(String value) throws SQLException { this(); setValue(value); if (this.getYears() != 0 || this.getMonths() != 0) { throw new RedshiftException("Invalid value for Interval Day To Second. " + "Value cannot contain year-month parts."); }; } public RedshiftIntervalDayToSecond(int days, int hours, int minutes, double seconds) throws SQLException { this(); super.setValue(0 /* years */, 0 /* months */, days, hours, minutes, seconds); setValue(totalMicroseconds()); } public RedshiftIntervalDayToSecond(long time) throws SQLException { this(); setValue(0, time); } /** * Set all values of this interval using just one specified value. * * @param time Total number of microseconds * (assuming 1day = 24hrs = 1440mins = 86400secs = 8.64e10microsecs) */ public void setValue(long time) { super.setValue(0, time); } /** * Override the parent setValue method disallowing a non-zero value for month. * * @param month Should be 0. * @param time Total number of microseconds * (assuming 1day = 24hrs = 1440mins = 86400secs = 8.64e10microsecs) */ @Override public void setValue(int month, long time) { assert(month == 0); super.setValue(0, time); } /** * Override the parent setYears method disallowing a non-zero value. * * @param years Should be 0. */ @Override public void setYears(int years) { assert(years == 0); super.setYears(0); } /** * Override the parent setMonths method disallowing a non-zero value. * * @param months Should be 0. */ @Override public void setMonths(int months) { assert(months == 0); super.setMonths(0); } /** * Returns the stored interval information as a string. * * @return String represented interval */ @Override public String getValue() { DecimalFormat df = (DecimalFormat) NumberFormat.getInstance(Locale.US); df.applyPattern("0.0#####"); return String.format( Locale.ROOT, "%d days %d hours %d mins %s secs", this.getDays(), this.getHours(), this.getMinutes(), df.format(this.getSeconds()) ); } /** * Add this interval's value to the passed interval. This is backwards to what I would expect, but * this makes it match the other existing add methods. * * @param interval intval to add */ public void add(RedshiftIntervalDayToSecond interval) { interval.setValue(0, totalMicroseconds() + interval.totalMicroseconds()); } /** * Returns whether an object is equal to this one or not. * * @param obj Object to compare with * @return true if the two intervals are identical */ @Override public boolean equals(Object obj) { if (obj == null || !(obj instanceof RedshiftIntervalDayToSecond)) { return false; } if (obj == this) { return true; } final RedshiftIntervalDayToSecond pgi = (RedshiftIntervalDayToSecond) obj; return pgi.getDays() == this.getDays() && pgi.getHours() == this.getHours() && pgi.getMinutes() == this.getMinutes() && pgi.getWholeSeconds() == this.getWholeSeconds() && pgi.getMicroSeconds() == this.getMicroSeconds(); } }
8,442
0
Create_ds/amazon-redshift-jdbc-driver/src/main/java/com/amazon/redshift
Create_ds/amazon-redshift-jdbc-driver/src/main/java/com/amazon/redshift/util/MD5Digest.java
/* * Copyright (c) 2003, PostgreSQL Global Development Group * See the LICENSE file in the project root for more information. */ package com.amazon.redshift.util; import java.security.MessageDigest; import java.security.NoSuchAlgorithmException; /** * MD5-based utility function to obfuscate passwords before network transmission. * * @author Jeremy Wohl */ public class MD5Digest { private MD5Digest() { } /** * Encodes user/password/salt information in the following way: MD5(MD5(password + user) + salt). * * @param user The connecting user. * @param password The connecting user's password. * @param salt A four-salt sent by the server. * @return A 35-byte array, comprising the string "md5" and an MD5 digest. */ public static byte[] encode(byte[] user, byte[] password, byte[] salt) { MessageDigest md; byte[] tempDigest; byte[] passDigest; byte[] hexDigest = new byte[35]; try { md = MessageDigest.getInstance("MD5"); md.update(password); md.update(user); tempDigest = md.digest(); bytesToHex(tempDigest, hexDigest, 0); md.update(hexDigest, 0, 32); md.update(salt); passDigest = md.digest(); bytesToHex(passDigest, hexDigest, 3); hexDigest[0] = (byte) 'm'; hexDigest[1] = (byte) 'd'; hexDigest[2] = (byte) '5'; } catch (NoSuchAlgorithmException e) { throw new IllegalStateException("Unable to encode password with MD5", e); } return hexDigest; } /* * Turn 16-byte stream into a human-readable 32-byte hex string */ private static void bytesToHex(byte[] bytes, byte[] hex, int offset) { final char[] lookup = {'0', '1', '2', '3', '4', '5', '6', '7', '8', '9', 'a', 'b', 'c', 'd', 'e', 'f'}; int i; int c; int j; int pos = offset; for (i = 0; i < 16; i++) { c = bytes[i] & 0xFF; j = c >> 4; hex[pos++] = (byte) lookup[j]; j = (c & 0xF); hex[pos++] = (byte) lookup[j]; } } }
8,443
0
Create_ds/amazon-redshift-jdbc-driver/src/main/java/com/amazon/redshift
Create_ds/amazon-redshift-jdbc-driver/src/main/java/com/amazon/redshift/util/RedshiftObject.java
/* * Copyright (c) 2003, PostgreSQL Global Development Group * See the LICENSE file in the project root for more information. */ package com.amazon.redshift.util; import java.io.Serializable; import java.sql.SQLException; /** * RedshiftObject is a class used to describe unknown types An unknown type is any type that is unknown by * JDBC Standards. */ public class RedshiftObject implements Serializable, Cloneable { protected String type; protected String value; /** * This is called by com.amazon.redshift.Connection.getObject() to create the object. */ public RedshiftObject() { } public RedshiftObject(String type, String value) { setType(type); try { setValue(value); } catch (SQLException e) { this.value = value; } } /** * <p>This method sets the type of this object.</p> * * <p>It should not be extended by subclasses, hence it is final</p> * * @param type a string describing the type of the object */ public final void setType(String type) { this.type = type; } /** * This method sets the value of this object. It must be overridden. * * @param value a string representation of the value of the object * @throws SQLException thrown if value is invalid for this type */ public void setValue(String value) throws SQLException { this.value = value; } /** * As this cannot change during the life of the object, it's final. * * @return the type name of this object */ public final String getType() { return type; } /** * This must be overidden, to return the value of the object, in the form required by * com.amazon.redshift. * * @return the value of this object */ public String getValue() { return value; } /** * This must be overidden to allow comparisons of objects. * * @param obj Object to compare with * @return true if the two boxes are identical */ public boolean equals(Object obj) { if (obj instanceof RedshiftObject) { final Object otherValue = ((RedshiftObject) obj).getValue(); if (otherValue == null) { return getValue() == null; } return otherValue.equals(getValue()); } return false; } /** * This must be overidden to allow the object to be cloned. */ public Object clone() throws CloneNotSupportedException { return super.clone(); } /** * This is defined here, so user code need not overide it. * * @return the value of this object, in the syntax expected by com.amazon.redshift */ public String toString() { return getValue(); } /** * Compute hash. As equals() use only value. Return the same hash for the same value. * * @return Value hashcode, 0 if value is null {@link java.util.Objects#hashCode(Object)} */ @Override public int hashCode() { return getValue() != null ? getValue().hashCode() : 0; } }
8,444
0
Create_ds/amazon-redshift-jdbc-driver/src/main/java/com/amazon/redshift
Create_ds/amazon-redshift-jdbc-driver/src/main/java/com/amazon/redshift/util/RedshiftTokenizer.java
/* * Copyright (c) 2003, PostgreSQL Global Development Group * See the LICENSE file in the project root for more information. */ package com.amazon.redshift.util; import java.util.ArrayList; import java.util.List; /** * This class is used to tokenize the text output of com.amazon.redshift. It's mainly used by the geometric * classes, but is useful in parsing any output from custom data types output from com.amazon.redshift. * * @see com.amazon.redshift.geometric.RedshiftBox * @see com.amazon.redshift.geometric.RedshiftCircle * @see com.amazon.redshift.geometric.RedshiftLseg * @see com.amazon.redshift.geometric.RedshiftPath * @see com.amazon.redshift.geometric.RedshiftPoint * @see com.amazon.redshift.geometric.RedshiftPolygon */ public class RedshiftTokenizer { // Our tokens protected List<String> tokens; /** * <p>Create a tokeniser.</p> * * <p>We could have used StringTokenizer to do this, however, we needed to handle nesting of '(' ')' * '[' ']' '&lt;' and '&gt;' as these are used by the geometric data types.</p> * * @param string containing tokens * @param delim single character to split the tokens */ public RedshiftTokenizer(String string, char delim) { tokenize(string, delim); } /** * This resets this tokenizer with a new string and/or delimiter. * * @param string containing tokens * @param delim single character to split the tokens * @return number of tokens */ public int tokenize(String string, char delim) { tokens = new ArrayList<String>(); // nest holds how many levels we are in the current token. // if this is > 0 then we don't split a token when delim is matched. // // The Geometric datatypes use this, because often a type may have others // (usualls RedshiftPoint) imbedded within a token. // // Peter 1998 Jan 6 - Added < and > to the nesting rules int nest = 0; int p; int s; boolean skipChar = false; boolean nestedDoubleQuote = false; for (p = 0, s = 0; p < string.length(); p++) { char c = string.charAt(p); // increase nesting if an open character is found if (c == '(' || c == '[' || c == '<' || (!nestedDoubleQuote && !skipChar && c == '"')) { nest++; if (c == '"') { nestedDoubleQuote = true; skipChar = true; } } // decrease nesting if a close character is found if (c == ')' || c == ']' || c == '>' || (nestedDoubleQuote && !skipChar && c == '"')) { nest--; if (c == '"') { nestedDoubleQuote = false; } } skipChar = c == '\\'; if (nest == 0 && c == delim) { tokens.add(string.substring(s, p)); s = p + 1; // +1 to skip the delimiter } } // Don't forget the last token ;-) if (s < string.length()) { tokens.add(string.substring(s)); } return tokens.size(); } /** * @return the number of tokens available */ public int getSize() { return tokens.size(); } /** * @param n Token number ( 0 ... getSize()-1 ) * @return The token value */ public String getToken(int n) { return tokens.get(n); } /** * <p>This returns a new tokenizer based on one of our tokens.</p> * * <p>The geometric datatypes use this to process nested tokens (usually RedshiftPoint).</p> * * @param n Token number ( 0 ... getSize()-1 ) * @param delim The delimiter to use * @return A new instance of RedshiftTokenizer based on the token */ public RedshiftTokenizer tokenizeToken(int n, char delim) { return new RedshiftTokenizer(getToken(n), delim); } /** * This removes the lead/trailing strings from a string. * * @param s Source string * @param l Leading string to remove * @param t Trailing string to remove * @return String without the lead/trailing strings */ public static String remove(String s, String l, String t) { if (s.startsWith(l)) { s = s.substring(l.length()); } if (s.endsWith(t)) { s = s.substring(0, s.length() - t.length()); } return s; } /** * This removes the lead/trailing strings from all tokens. * * @param l Leading string to remove * @param t Trailing string to remove */ public void remove(String l, String t) { for (int i = 0; i < tokens.size(); i++) { tokens.set(i, remove(tokens.get(i), l, t)); } } /** * Removes ( and ) from the beginning and end of a string. * * @param s String to remove from * @return String without the ( or ) */ public static String removePara(String s) { return remove(s, "(", ")"); } /** * Removes ( and ) from the beginning and end of all tokens. */ public void removePara() { remove("(", ")"); } /** * Removes [ and ] from the beginning and end of a string. * * @param s String to remove from * @return String without the [ or ] */ public static String removeBox(String s) { return remove(s, "[", "]"); } /** * Removes [ and ] from the beginning and end of all tokens. */ public void removeBox() { remove("[", "]"); } /** * Removes &lt; and &gt; from the beginning and end of a string. * * @param s String to remove from * @return String without the &lt; or &gt; */ public static String removeAngle(String s) { return remove(s, "<", ">"); } /** * Removes &lt; and &gt; from the beginning and end of all tokens. */ public void removeAngle() { remove("<", ">"); } /** * Removes curly braces { and } from the beginning and end of a string. * * @param s String to remove from * @return String without the { or } */ public static String removeCurlyBrace(String s) { return remove(s, "{", "}"); } /** * Removes &lt; and &gt; from the beginning and end of all tokens. */ public void removeCurlyBrace() { remove("{", "}"); } }
8,445
0
Create_ds/amazon-redshift-jdbc-driver/src/main/java/com/amazon/redshift
Create_ds/amazon-redshift-jdbc-driver/src/main/java/com/amazon/redshift/util/DriverInfo.java
/* * Copyright (c) 2017, PostgreSQL Global Development Group * See the LICENSE file in the project root for more information. */ package com.amazon.redshift.util; import java.io.IOException; import java.io.InputStream; import java.util.Properties; /** * Utility class with constants of Driver information. */ public final class DriverInfo { // Driver name public static final String DRIVER_NAME = "Redshift JDBC Driver"; public static final String DRIVER_SHORT_NAME = "RsJDBC"; public static final String DRIVER_VERSION; public static final String DRIVER_FULL_NAME; // Driver version public static final int MAJOR_VERSION; public static final int MINOR_VERSION; public static final int PATCH_VERSION; // JDBC specification public static final String JDBC_VERSION = "4.2"; private static final int JDBC_INTVERSION = 42; public static final int JDBC_MAJOR_VERSION = JDBC_INTVERSION / 10; public static final int JDBC_MINOR_VERSION = JDBC_INTVERSION % 10; static { String version = "2.0.0.0"; try (InputStream resourceAsStream = DriverInfo.class.getClassLoader().getResourceAsStream("redshift_jdbc_driver.properties")) { Properties versionFromBuild = new Properties(); versionFromBuild.load(resourceAsStream); version = versionFromBuild.getProperty("version"); } catch (IOException ex) { // do nothing } String[] versionComponents = version.split("\\."); int majorVersion = 2; int minorVersion = 0; int patchVersion = 0; try { if (versionComponents.length >= 3) { majorVersion = Integer.parseInt(versionComponents[0]); minorVersion = Integer.parseInt(versionComponents[1]); patchVersion = Integer.parseInt(versionComponents[2]); } else { version = "2.0.0.0"; } } catch (NumberFormatException ex) { majorVersion = 2; minorVersion = 0; patchVersion = 0; } MAJOR_VERSION = majorVersion; MINOR_VERSION = minorVersion; PATCH_VERSION = patchVersion; DRIVER_VERSION = version; DRIVER_FULL_NAME = DRIVER_NAME + " " + DRIVER_VERSION; } private DriverInfo() { } }
8,446
0
Create_ds/amazon-redshift-jdbc-driver/src/main/java/com/amazon/redshift
Create_ds/amazon-redshift-jdbc-driver/src/main/java/com/amazon/redshift/util/RedshiftGeography.java
package com.amazon.redshift.util; // Right now most of methods in base class. // In future, if there are differences in bytes conversion of VARBYTE and GEOGRAPHY // then we can add more methods in this class. public class RedshiftGeography extends RedshiftByteTypes{ }
8,447
0
Create_ds/amazon-redshift-jdbc-driver/src/main/java/com/amazon/redshift
Create_ds/amazon-redshift-jdbc-driver/src/main/java/com/amazon/redshift/util/RedshiftBytea.java
/* * Copyright (c) 2003, PostgreSQL Global Development Group * See the LICENSE file in the project root for more information. */ package com.amazon.redshift.util; import java.sql.SQLException; /** * Converts to and from the Redshift bytea datatype used by the backend. */ public class RedshiftBytea { private static final int MAX_3_BUFF_SIZE = 2 * 1024 * 1024; /* * Converts a RS bytea raw value (i.e. the raw binary representation of the bytea data type) into * a java byte[] */ public static byte[] toBytes(byte[] s) throws SQLException { if (s == null) { return null; } // Starting with PG 9.0, a new hex format is supported // that starts with "\x". Figure out which format we're // dealing with here. // if (s.length < 2 || s[0] != '\\' || s[1] != 'x') { return toBytesOctalEscaped(s); } return toBytesHexEscaped(s); } private static byte[] toBytesHexEscaped(byte[] s) { byte[] output = new byte[(s.length - 2) / 2]; for (int i = 0; i < output.length; i++) { byte b1 = gethex(s[2 + i * 2]); byte b2 = gethex(s[2 + i * 2 + 1]); // squid:S3034 // Raw byte values should not be used in bitwise operations in combination with shifts output[i] = (byte) ((b1 << 4) | (b2 & 0xff)); } return output; } private static byte gethex(byte b) { // 0-9 == 48-57 if (b <= 57) { return (byte) (b - 48); } // a-f == 97-102 if (b >= 97) { return (byte) (b - 97 + 10); } // A-F == 65-70 return (byte) (b - 65 + 10); } private static byte[] toBytesOctalEscaped(byte[] s) { final int slength = s.length; byte[] buf = null; int correctSize = slength; if (slength > MAX_3_BUFF_SIZE) { // count backslash escapes, they will be either // backslashes or an octal escape \\ or \003 // for (int i = 0; i < slength; ++i) { byte current = s[i]; if (current == '\\') { byte next = s[++i]; if (next == '\\') { --correctSize; } else { correctSize -= 3; } } } buf = new byte[correctSize]; } else { buf = new byte[slength]; } int bufpos = 0; int thebyte; byte nextbyte; byte secondbyte; for (int i = 0; i < slength; i++) { nextbyte = s[i]; if (nextbyte == (byte) '\\') { secondbyte = s[++i]; if (secondbyte == (byte) '\\') { // escaped \ buf[bufpos++] = (byte) '\\'; } else { thebyte = (secondbyte - 48) * 64 + (s[++i] - 48) * 8 + (s[++i] - 48); if (thebyte > 127) { thebyte -= 256; } buf[bufpos++] = (byte) thebyte; } } else { buf[bufpos++] = nextbyte; } } if (bufpos == correctSize) { return buf; } byte[] result = new byte[bufpos]; System.arraycopy(buf, 0, result, 0, bufpos); return result; } /* * Converts a java byte[] into a RS bytea string (i.e. the text representation of the bytea data * type) */ public static String toRSString(byte[] buf) { if (buf == null) { return null; } StringBuilder stringBuilder = new StringBuilder(2 * buf.length); for (byte element : buf) { int elementAsInt = (int) element; if (elementAsInt < 0) { elementAsInt = 256 + elementAsInt; } // we escape the same non-printable characters as the backend // we must escape all 8bit characters otherwise when convering // from java unicode to the db character set we may end up with // question marks if the character set is SQL_ASCII if (elementAsInt < 040 || elementAsInt > 0176) { // escape charcter with the form \000, but need two \\ because of // the Java parser stringBuilder.append("\\"); stringBuilder.append((char) (((elementAsInt >> 6) & 0x3) + 48)); stringBuilder.append((char) (((elementAsInt >> 3) & 0x7) + 48)); stringBuilder.append((char) ((elementAsInt & 0x07) + 48)); } else if (element == (byte) '\\') { // escape the backslash character as \\, but need four \\\\ because // of the Java parser stringBuilder.append("\\\\"); } else { // other characters are left alone stringBuilder.append((char) element); } } return stringBuilder.toString(); } }
8,448
0
Create_ds/amazon-redshift-jdbc-driver/src/main/java/com/amazon/redshift
Create_ds/amazon-redshift-jdbc-driver/src/main/java/com/amazon/redshift/util/ByteConverter.java
/* * Copyright (c) 2011, PostgreSQL Global Development Group * See the LICENSE file in the project root for more information. */ package com.amazon.redshift.util; import java.math.BigDecimal; import java.math.BigInteger; import java.math.MathContext; import java.nio.CharBuffer; /** * Helper methods to parse java base types from byte arrays. * * @author Mikko Tiihonen */ public class ByteConverter { private static final int NBASE = 10000; private static final int NUMERIC_DSCALE_MASK = 0x00003FFF; private static final short NUMERIC_POS = 0x0000; private static final short NUMERIC_NEG = 0x4000; private static final short NUMERIC_NAN = (short) 0xC000; private static final int DEC_DIGITS = 4; private static final int[] round_powers = {0, 1000, 100, 10}; private static final int SHORT_BYTES = 2; private static final int LONG_BYTES = 4; private ByteConverter() { // prevent instantiation of static helper class } /** * Convert a variable length array of bytes to an integer * @param bytes array of bytes that can be decoded as an integer * @return integer */ public static int bytesToInt(byte []bytes) { if ( bytes.length == 1 ) { return (int)bytes[0]; } if ( bytes.length == SHORT_BYTES ) { return int2(bytes, 0); } if ( bytes.length == LONG_BYTES ) { return int4(bytes, 0); } else { throw new IllegalArgumentException("Argument bytes is empty"); } } /** * Convert a number from binary representation to text representation. * @param idx index of the digit to be converted in the digits array * @param digits array of shorts that can be decoded as the number String * @param buffer the character buffer to put the text representation in * @param alwaysPutIt a flag that indicate whether or not to put the digit char even if it is zero * @return String the number as String */ private static void digitToString(int idx, short[] digits, CharBuffer buffer, boolean alwaysPutIt) { short dig = (idx >= 0 && idx < digits.length) ? digits[idx] : 0; for (int p = 1; p < round_powers.length; p++) { int pow = round_powers[p]; short d1 = (short)(dig / pow); dig -= d1 * pow; boolean putit = (d1 > 0); if (putit || alwaysPutIt) { buffer.put((char)(d1 + '0')); } } buffer.put((char)(dig + '0')); } /** * Convert a number from binary representation to text representation. * @param digits array of shorts that can be decoded as the number String * @param scale the scale of the number binary representation * @param weight the weight of the number binary representation * @param sign the sign of the number * @return String the number as String */ private static String numberBytesToString(short[] digits, int scale, int weight, int sign) { CharBuffer buffer; int i; int d; /* * Allocate space for the result. * * i is set to the # of decimal digits before decimal point. dscale is the * # of decimal digits we will print after decimal point. We may generate * as many as DEC_DIGITS-1 excess digits at the end, and in addition we * need room for sign, decimal point, null terminator. */ i = (weight + 1) * DEC_DIGITS; if (i <= 0) { i = 1; } buffer = CharBuffer.allocate((i + scale + DEC_DIGITS + 2)); /* * Output a dash for negative values */ if (sign == NUMERIC_NEG) { buffer.put('-'); } /* * Output all digits before the decimal point */ if (weight < 0) { d = weight + 1; buffer.put('0'); } else { for (d = 0; d <= weight; d++) { /* In the first digit, suppress extra leading decimal zeroes */ digitToString(d, digits, buffer, d != 0); } } /* * If requested, output a decimal point and all the digits that follow it. * We initially put out a multiple of DEC_DIGITS digits, then truncate if * needed. */ if (scale > 0) { buffer.put('.'); for (i = 0; i < scale; d++, i += DEC_DIGITS) { digitToString(d, digits, buffer, true); } } /* * terminate the string and return it */ int extra = (i - scale) % DEC_DIGITS; return new String(buffer.array(), 0, buffer.position() - extra); } /** * Convert a variable length array of bytes to an integer * @param bytes array of bytes that can be decoded as an integer * @return integer */ public static Number numeric(byte [] bytes) { return numeric(bytes, 0, bytes.length); } /** * Convert a variable length array of bytes to an integer * @param bytes array of bytes that can be decoded as an integer * @return integer */ /** * Convert a variable length array of bytes to a Number * * @param bytes array of column bytes that can be decoded as Numeric * @param precision precision of the defined column * @param scale scale of the defined column * @return Number value of the given unscaled byte array */ public static Number redshiftNumeric(byte [] bytes, int precision, int scale) { return redshiftNumeric(bytes, 0, bytes.length, precision, scale); } /** * Convert a variable length array of bytes to an integer * @param bytes array of bytes that can be decoded as an integer * @param pos index of the start position of the bytes array for number * @param numBytes number of bytes to use, length is already encoded * in the binary format but this is used for double checking * @return integer */ public static Number numeric(byte [] bytes, int pos, int numBytes) { if (numBytes < 8) { throw new IllegalArgumentException("number of bytes should be at-least 8"); } short len = ByteConverter.int2(bytes, pos); short weight = ByteConverter.int2(bytes, pos + 2); short sign = ByteConverter.int2(bytes, pos + 4); short scale = ByteConverter.int2(bytes, pos + 6); if (numBytes != (len * SHORT_BYTES + 8)) { throw new IllegalArgumentException("invalid length of bytes \"numeric\" value"); } if (!(sign == NUMERIC_POS || sign == NUMERIC_NEG || sign == NUMERIC_NAN)) { throw new IllegalArgumentException("invalid sign in \"numeric\" value"); } if (sign == NUMERIC_NAN) { return Double.NaN; } if ((scale & NUMERIC_DSCALE_MASK) != scale) { throw new IllegalArgumentException("invalid scale in \"numeric\" value"); } short[] digits = new short[len]; int idx = pos + 8; for (int i = 0; i < len; i++) { short d = ByteConverter.int2(bytes, idx); idx += 2; if (d < 0 || d >= NBASE) { throw new IllegalArgumentException("invalid digit in \"numeric\" value"); } digits[i] = d; } String numString = numberBytesToString(digits, scale, weight, sign); return new BigDecimal(numString); } /** * Convert a variable length array of bytes to a Number * * @param bytes array of column bytes that can be decoded as Numeric * @param pos index of the start position of the bytes array for number * @param numBytes number of bytes to use, length is already encoded * in the binary format but this is used for double checking * @param precision precision of the defined column * @param scale scale of the defined column * @return Number value of the given unscaled byte array */ public static Number redshiftNumeric(byte [] bytes, int pos, int numBytes, int precision, int scale) { if (numBytes != 8 && numBytes != 16) { throw new IllegalArgumentException("number of bytes should be 8 or 16"); } BigInteger bigInt = new BigInteger(bytes); return new BigDecimal(bigInt, scale, new MathContext(precision)); } /** * Convert BigDecimal value into scaled bytes. * * @param val BigDecimal value to be converted into bytes * @param precision Precision of column * @param scale Scale of column * @return Scaled bytes of the given BigDecimal value. */ public static byte[] redshiftNumeric(BigDecimal val, int precision, int scale) { val.setScale(scale); val.round(new MathContext(precision)); byte[] bigDecimalBytes = val.unscaledValue().toByteArray(); byte[] rc = bigDecimalBytes; int paddingZeros = 0; if(bigDecimalBytes.length != 8 && bigDecimalBytes.length != 16) { if (bigDecimalBytes.length < 8) { rc = new byte[8]; paddingZeros = 8 - bigDecimalBytes.length; } else if (bigDecimalBytes.length < 16) { rc = new byte[16]; paddingZeros = 16 - bigDecimalBytes.length; } } if (paddingZeros > 0) System.arraycopy(bigDecimalBytes, 0, rc, paddingZeros, bigDecimalBytes.length); return rc; } /** * Parses a long value from the byte array. * * @param bytes The byte array to parse. * @param idx The starting index of the parse in the byte array. * @return parsed long value. */ public static long int8(byte[] bytes, int idx) { return ((long) (bytes[idx + 0] & 255) << 56) + ((long) (bytes[idx + 1] & 255) << 48) + ((long) (bytes[idx + 2] & 255) << 40) + ((long) (bytes[idx + 3] & 255) << 32) + ((long) (bytes[idx + 4] & 255) << 24) + ((long) (bytes[idx + 5] & 255) << 16) + ((long) (bytes[idx + 6] & 255) << 8) + (bytes[idx + 7] & 255); } /** * Parses an int value from the byte array. * * @param bytes The byte array to parse. * @param idx The starting index of the parse in the byte array. * @return parsed int value. */ public static int int4(byte[] bytes, int idx) { return ((bytes[idx] & 255) << 24) + ((bytes[idx + 1] & 255) << 16) + ((bytes[idx + 2] & 255) << 8) + ((bytes[idx + 3] & 255)); } /** * Parses a short value from the byte array. * * @param bytes The byte array to parse. * @param idx The starting index of the parse in the byte array. * @return parsed short value. */ public static short int2(byte[] bytes, int idx) { return (short) (((bytes[idx] & 255) << 8) + ((bytes[idx + 1] & 255))); } /** * Parses a boolean value from the byte array. * * @param bytes * The byte array to parse. * @param idx * The starting index to read from bytes. * @return parsed boolean value. */ public static boolean bool(byte[] bytes, int idx) { return bytes[idx] == 1; } /** * Parses a float value from the byte array. * * @param bytes The byte array to parse. * @param idx The starting index of the parse in the byte array. * @return parsed float value. */ public static float float4(byte[] bytes, int idx) { return Float.intBitsToFloat(int4(bytes, idx)); } /** * Parses a double value from the byte array. * * @param bytes The byte array to parse. * @param idx The starting index of the parse in the byte array. * @return parsed double value. */ public static double float8(byte[] bytes, int idx) { return Double.longBitsToDouble(int8(bytes, idx)); } /** * Encodes a long value to the byte array. * * @param target The byte array to encode to. * @param idx The starting index in the byte array. * @param value The value to encode. */ public static void int8(byte[] target, int idx, long value) { target[idx + 0] = (byte) (value >>> 56); target[idx + 1] = (byte) (value >>> 48); target[idx + 2] = (byte) (value >>> 40); target[idx + 3] = (byte) (value >>> 32); target[idx + 4] = (byte) (value >>> 24); target[idx + 5] = (byte) (value >>> 16); target[idx + 6] = (byte) (value >>> 8); target[idx + 7] = (byte) value; } /** * Encodes a int value to the byte array. * * @param target The byte array to encode to. * @param idx The starting index in the byte array. * @param value The value to encode. */ public static void int4(byte[] target, int idx, int value) { target[idx + 0] = (byte) (value >>> 24); target[idx + 1] = (byte) (value >>> 16); target[idx + 2] = (byte) (value >>> 8); target[idx + 3] = (byte) value; } /** * Encodes a int value to the byte array. * * @param target The byte array to encode to. * @param idx The starting index in the byte array. * @param value The value to encode. */ public static void int2(byte[] target, int idx, int value) { target[idx + 0] = (byte) (value >>> 8); target[idx + 1] = (byte) value; } /** * Encodes a boolean value to the byte array. * * @param target * The byte array to encode to. * @param idx * The starting index in the byte array. * @param value * The value to encode. */ public static void bool(byte[] target, int idx, boolean value) { target[idx] = value ? (byte) 1 : (byte) 0; } /** * Encodes a int value to the byte array. * * @param target The byte array to encode to. * @param idx The starting index in the byte array. * @param value The value to encode. */ public static void float4(byte[] target, int idx, float value) { int4(target, idx, Float.floatToRawIntBits(value)); } /** * Encodes a int value to the byte array. * * @param target The byte array to encode to. * @param idx The starting index in the byte array. * @param value The value to encode. */ public static void float8(byte[] target, int idx, double value) { int8(target, idx, Double.doubleToRawLongBits(value)); } }
8,449
0
Create_ds/amazon-redshift-jdbc-driver/src/main/java/com/amazon/redshift
Create_ds/amazon-redshift-jdbc-driver/src/main/java/com/amazon/redshift/translation/messages_fr.java
/* Automatically generated by GNU msgfmt. Do not modify! */ package com.amazon.redshift.translation; public class messages_fr extends java.util.ResourceBundle { private static final java.lang.String[] table; static { java.lang.String[] t = new java.lang.String[794]; t[0] = ""; t[1] = "Project-Id-Version: head-fr\nReport-Msgid-Bugs-To: \nPO-Revision-Date: 2007-07-27 12:27+0200\nLast-Translator: \nLanguage-Team: <en@li.org>\nLanguage: \nMIME-Version: 1.0\nContent-Type: text/plain; charset=UTF-8\nContent-Transfer-Encoding: 8bit\nX-Generator: KBabel 1.11.4\nPlural-Forms: nplurals=2; plural=(n > 1);\n"; t[4] = "DataSource has been closed."; t[5] = "DataSource a été fermée."; t[18] = "Where: {0}"; t[19] = "Où : {0}"; t[26] = "The connection attempt failed."; t[27] = "La tentative de connexion a échoué."; t[28] = "Currently positioned after the end of the ResultSet. You cannot call deleteRow() here."; t[29] = "Actuellement positionné après la fin du ResultSet. Vous ne pouvez pas appeler deleteRow() ici."; t[32] = "Can''t use query methods that take a query string on a PreparedStatement."; t[33] = "Impossible d''utiliser les fonctions de requête qui utilisent une chaîne de caractères sur un PreparedStatement."; t[36] = "Multiple ResultSets were returned by the query."; t[37] = "Plusieurs ResultSets ont été retournés par la requête."; t[50] = "Too many update results were returned."; t[51] = "Trop de résultats de mise à jour ont été retournés."; t[58] = "Illegal UTF-8 sequence: initial byte is {0}: {1}"; t[59] = "Séquence UTF-8 illégale: le premier octet est {0}: {1}"; t[66] = "The column name {0} was not found in this ResultSet."; t[67] = "Le nom de colonne {0} n''a pas été trouvé dans ce ResultSet."; t[70] = "Fastpath call {0} - No result was returned and we expected an integer."; t[71] = "Appel Fastpath {0} - Aucun résultat n''a été retourné et nous attendions un entier."; t[74] = "Protocol error. Session setup failed."; t[75] = "Erreur de protocole. Ouverture de la session en échec."; t[76] = "A CallableStatement was declared, but no call to registerOutParameter(1, <some type>) was made."; t[77] = "Un CallableStatement a été déclaré, mais aucun appel à registerOutParameter(1, <un type>) n''a été fait."; t[78] = "ResultSets with concurrency CONCUR_READ_ONLY cannot be updated."; t[79] = "Les ResultSets avec la concurrence CONCUR_READ_ONLY ne peuvent être mis à jour."; t[90] = "LOB positioning offsets start at 1."; t[91] = "Les décalages de position des LOB commencent à 1."; t[92] = "Internal Position: {0}"; t[93] = "Position interne : {0}"; t[96] = "free() was called on this LOB previously"; t[97] = "free() a été appelée auparavant sur ce LOB"; t[100] = "Cannot change transaction read-only property in the middle of a transaction."; t[101] = "Impossible de changer la propriété read-only d''une transaction au milieu d''une transaction."; t[102] = "The JVM claims not to support the {0} encoding."; t[103] = "La JVM prétend ne pas supporter l''encodage {0}."; t[108] = "{0} function doesn''t take any argument."; t[109] = "La fonction {0} n''accepte aucun argument."; t[112] = "xid must not be null"; t[113] = "xid ne doit pas être nul"; t[114] = "Connection has been closed."; t[115] = "La connexion a été fermée."; t[122] = "The server does not support SSL."; t[123] = "Le serveur ne supporte pas SSL."; t[140] = "Illegal UTF-8 sequence: byte {0} of {1} byte sequence is not 10xxxxxx: {2}"; t[141] = "Séquence UTF-8 illégale: l''octet {0} de la séquence d''octet {1} n''est pas 10xxxxxx: {2}"; t[148] = "Hint: {0}"; t[149] = "Indice : {0}"; t[152] = "Unable to find name datatype in the system catalogs."; t[153] = "Incapable de trouver le type de donnée name dans les catalogues systèmes."; t[156] = "Unsupported Types value: {0}"; t[157] = "Valeur de type non supportée : {0}"; t[158] = "Unknown type {0}."; t[159] = "Type inconnu : {0}."; t[166] = "{0} function takes two and only two arguments."; t[167] = "La fonction {0} n''accepte que deux et seulement deux arguments."; t[170] = "Finalizing a Connection that was never closed:"; t[171] = "Destruction d''une connection qui n''a jamais été fermée:"; t[180] = "The maximum field size must be a value greater than or equal to 0."; t[181] = "La taille maximum des champs doit être une valeur supérieure ou égale à 0."; t[186] = "Redshift LOBs can only index to: {0}"; t[187] = "Les LOB Redshift peuvent seulement s''indicer à: {0}"; t[194] = "Method {0} is not yet implemented."; t[195] = "La fonction {0} n''est pas encore implémentée."; t[198] = "Error loading default settings from driverconfig.properties"; t[199] = "Erreur de chargement des valeurs par défaut depuis driverconfig.properties"; t[200] = "Results cannot be retrieved from a CallableStatement before it is executed."; t[201] = "Les résultats ne peuvent être récupérés à partir d''un CallableStatement avant qu''il ne soit exécuté."; t[202] = "Large Objects may not be used in auto-commit mode."; t[203] = "Les Large Objects ne devraient pas être utilisés en mode auto-commit."; t[208] = "Expected command status BEGIN, got {0}."; t[209] = "Attendait le statut de commande BEGIN, obtenu {0}."; t[218] = "Invalid fetch direction constant: {0}."; t[219] = "Constante de direction pour la récupération invalide : {0}."; t[222] = "{0} function takes three and only three arguments."; t[223] = "La fonction {0} n''accepte que trois et seulement trois arguments."; t[226] = "Error during recover"; t[227] = "Erreur durant la restauration"; t[228] = "Cannot update the ResultSet because it is either before the start or after the end of the results."; t[229] = "Impossible de mettre à jour le ResultSet car c''est soit avant le début ou après la fin des résultats."; t[232] = "Parameter of type {0} was registered, but call to get{1} (sqltype={2}) was made."; t[233] = "Un paramètre de type {0} a été enregistré, mais un appel à get{1} (sqltype={2}) a été fait."; t[240] = "Cannot establish a savepoint in auto-commit mode."; t[241] = "Impossible d''établir un savepoint en mode auto-commit."; t[242] = "Cannot retrieve the id of a named savepoint."; t[243] = "Impossible de retrouver l''identifiant d''un savepoint nommé."; t[244] = "The column index is out of range: {0}, number of columns: {1}."; t[245] = "L''indice de la colonne est hors limite : {0}, nombre de colonnes : {1}."; t[250] = "Something unusual has occurred to cause the driver to fail. Please report this exception."; t[251] = "Quelque chose d''inhabituel a provoqué l''échec du pilote. Veuillez faire un rapport sur cette erreur."; t[260] = "Cannot cast an instance of {0} to type {1}"; t[261] = "Impossible de convertir une instance de {0} vers le type {1}"; t[264] = "Unknown Types value."; t[265] = "Valeur de Types inconnue."; t[266] = "Invalid stream length {0}."; t[267] = "Longueur de flux invalide {0}."; t[272] = "Cannot retrieve the name of an unnamed savepoint."; t[273] = "Impossible de retrouver le nom d''un savepoint sans nom."; t[274] = "Unable to translate data into the desired encoding."; t[275] = "Impossible de traduire les données dans l''encodage désiré."; t[276] = "Expected an EOF from server, got: {0}"; t[277] = "Attendait une fin de fichier du serveur, reçu: {0}"; t[278] = "Bad value for type {0} : {1}"; t[279] = "Mauvaise valeur pour le type {0} : {1}"; t[280] = "The server requested password-based authentication, but no password was provided."; t[281] = "Le serveur a demandé une authentification par mots de passe, mais aucun mot de passe n''a été fourni."; t[296] = "Truncation of large objects is only implemented in 8.3 and later servers."; t[297] = "Le troncage des large objects n''est implémenté que dans les serveurs 8.3 et supérieurs."; t[298] = "This PooledConnection has already been closed."; t[299] = "Cette PooledConnection a déjà été fermée."; t[306] = "Fetch size must be a value greater to or equal to 0."; t[307] = "Fetch size doit être une valeur supérieur ou égal à 0."; t[312] = "A connection could not be made using the requested protocol {0}."; t[313] = "Aucune connexion n''a pu être établie en utilisant le protocole demandé {0}. "; t[322] = "There are no rows in this ResultSet."; t[323] = "Il n''y pas pas de lignes dans ce ResultSet."; t[324] = "Unexpected command status: {0}."; t[325] = "Statut de commande inattendu : {0}."; t[334] = "Not on the insert row."; t[335] = "Pas sur la ligne en insertion."; t[344] = "Server SQLState: {0}"; t[345] = "SQLState serveur : {0}"; t[348] = "The server''s standard_conforming_strings parameter was reported as {0}. The JDBC driver expected on or off."; t[349] = "Le paramètre serveur standard_conforming_strings a pour valeur {0}. Le driver JDBC attend on ou off."; t[360] = "The driver currently does not support COPY operations."; t[361] = "Le pilote ne supporte pas actuellement les opérations COPY."; t[364] = "The array index is out of range: {0}, number of elements: {1}."; t[365] = "L''indice du tableau est hors limites : {0}, nombre d''éléments : {1}."; t[374] = "suspend/resume not implemented"; t[375] = "suspend/resume pas implémenté"; t[378] = "Not implemented: one-phase commit must be issued using the same connection that was used to start it"; t[379] = "Pas implémenté: le commit à une phase doit avoir lieu en utilisant la même connection que celle où il a commencé"; t[398] = "Cannot call cancelRowUpdates() when on the insert row."; t[399] = "Impossible d''appeler cancelRowUpdates() pendant l''insertion d''une ligne."; t[400] = "Cannot reference a savepoint after it has been released."; t[401] = "Impossible de référencer un savepoint après qu''il ait été libéré."; t[402] = "You must specify at least one column value to insert a row."; t[403] = "Vous devez spécifier au moins une valeur de colonne pour insérer une ligne."; t[404] = "Unable to determine a value for MaxIndexKeys due to missing system catalog data."; t[405] = "Incapable de déterminer la valeur de MaxIndexKeys en raison de données manquante dans lecatalogue système."; t[412] = "The JVM claims not to support the encoding: {0}"; t[413] = "La JVM prétend ne pas supporter l''encodage: {0}"; t[414] = "{0} function takes two or three arguments."; t[415] = "La fonction {0} n''accepte que deux ou trois arguments."; t[440] = "Unexpected error writing large object to database."; t[441] = "Erreur inattendue pendant l''écriture de large object dans la base."; t[442] = "Zero bytes may not occur in string parameters."; t[443] = "Zéro octets ne devrait pas se produire dans les paramètres de type chaîne de caractères."; t[444] = "A result was returned when none was expected."; t[445] = "Un résultat a été retourné alors qu''aucun n''était attendu."; t[450] = "ResultSet is not updateable. The query that generated this result set must select only one table, and must select all primary keys from that table. See the JDBC 2.1 API Specification, section 5.6 for more details."; t[451] = "Le ResultSet n''est pas modifiable. La requête qui a généré ce résultat doit sélectionner seulement une table, et doit sélectionner toutes les clés primaires de cette table. Voir la spécification de l''API JDBC 2.1, section 5.6 pour plus de détails."; t[454] = "Bind message length {0} too long. This can be caused by very large or incorrect length specifications on InputStream parameters."; t[455] = "La longueur du message de liaison {0} est trop grande. Cela peut être causé par des spécification de longueur très grandes ou incorrectes pour les paramètres de type InputStream."; t[460] = "Statement has been closed."; t[461] = "Statement a été fermé."; t[462] = "No value specified for parameter {0}."; t[463] = "Pas de valeur spécifiée pour le paramètre {0}."; t[468] = "The array index is out of range: {0}"; t[469] = "L''indice du tableau est hors limites : {0}"; t[474] = "Unable to bind parameter values for statement."; t[475] = "Incapable de lier les valeurs des paramètres pour la commande."; t[476] = "Can''t refresh the insert row."; t[477] = "Impossible de rafraîchir la ligne insérée."; t[480] = "No primary key found for table {0}."; t[481] = "Pas de clé primaire trouvée pour la table {0}."; t[482] = "Cannot change transaction isolation level in the middle of a transaction."; t[483] = "Impossible de changer le niveau d''isolation des transactions au milieu d''une transaction."; t[498] = "Provided InputStream failed."; t[499] = "L''InputStream fourni a échoué."; t[500] = "The parameter index is out of range: {0}, number of parameters: {1}."; t[501] = "L''indice du paramètre est hors limites : {0}, nombre de paramètres : {1}."; t[502] = "The server''s DateStyle parameter was changed to {0}. The JDBC driver requires DateStyle to begin with ISO for correct operation."; t[503] = "Le paramètre DateStyle du serveur a été changé pour {0}. Le pilote JDBC nécessite que DateStyle commence par ISO pour un fonctionnement correct."; t[508] = "Connection attempt timed out."; t[509] = "La tentative de connexion a échoué dans le délai imparti."; t[512] = "Internal Query: {0}"; t[513] = "Requête interne: {0}"; t[518] = "The authentication type {0} is not supported. Check that you have configured the pg_hba.conf file to include the client''s IP address or subnet, and that it is using an authentication scheme supported by the driver."; t[519] = "Le type d''authentification {0} n''est pas supporté. Vérifiez que vous avez configuré le fichier pg_hba.conf pour inclure l''adresse IP du client ou le sous-réseau et qu''il utilise un schéma d''authentification supporté par le pilote."; t[526] = "Interval {0} not yet implemented"; t[527] = "L''interval {0} n''est pas encore implémenté"; t[532] = "Conversion of interval failed"; t[533] = "La conversion de l''intervalle a échoué"; t[540] = "Query timeout must be a value greater than or equals to 0."; t[541] = "Query timeout doit être une valeur supérieure ou égale à 0."; t[542] = "Connection has been closed automatically because a new connection was opened for the same PooledConnection or the PooledConnection has been closed."; t[543] = "La connexion a été fermée automatiquement car une nouvelle connexion a été ouverte pour la même PooledConnection ou la PooledConnection a été fermée."; t[544] = "ResultSet not positioned properly, perhaps you need to call next."; t[545] = "Le ResultSet n''est pas positionné correctement, vous devez peut-être appeler next()."; t[550] = "This statement has been closed."; t[551] = "Ce statement a été fermé."; t[552] = "Can''t infer the SQL type to use for an instance of {0}. Use setObject() with an explicit Types value to specify the type to use."; t[553] = "Impossible de déduire le type SQL à utiliser pour une instance de {0}. Utilisez setObject() avec une valeur de type explicite pour spécifier le type à utiliser."; t[554] = "Cannot call updateRow() when on the insert row."; t[555] = "Impossible d''appeler updateRow() tant que l''on est sur la ligne insérée."; t[562] = "Detail: {0}"; t[563] = "Détail : {0}"; t[566] = "Cannot call deleteRow() when on the insert row."; t[567] = "Impossible d''appeler deleteRow() pendant l''insertion d''une ligne."; t[568] = "Currently positioned before the start of the ResultSet. You cannot call deleteRow() here."; t[569] = "Actuellement positionné avant le début du ResultSet. Vous ne pouvez pas appeler deleteRow() ici."; t[576] = "Illegal UTF-8 sequence: final value is a surrogate value: {0}"; t[577] = "Séquence UTF-8 illégale: la valeur finale est une valeur de remplacement: {0}"; t[578] = "Unknown Response Type {0}."; t[579] = "Type de réponse inconnu {0}."; t[582] = "Unsupported value for stringtype parameter: {0}"; t[583] = "Valeur non supportée pour les paramètre de type chaîne de caractères : {0}"; t[584] = "Conversion to type {0} failed: {1}."; t[585] = "La conversion vers le type {0} a échoué : {1}."; t[586] = "Conversion of money failed."; t[587] = "La conversion de money a échoué."; t[600] = "Unable to load the class {0} responsible for the datatype {1}"; t[601] = "Incapable de charger la classe {0} responsable du type de données {1}"; t[604] = "The fastpath function {0} is unknown."; t[605] = "La fonction fastpath {0} est inconnue."; t[608] = "Malformed function or procedure escape syntax at offset {0}."; t[609] = "Syntaxe de fonction ou d''échappement de procédure malformée à l''indice {0}."; t[612] = "Provided Reader failed."; t[613] = "Le Reader fourni a échoué."; t[614] = "Maximum number of rows must be a value grater than or equal to 0."; t[615] = "Le nombre maximum de lignes doit être une valeur supérieure ou égale à 0."; t[616] = "Failed to create object for: {0}."; t[617] = "Échec à la création de l''objet pour : {0}."; t[622] = "Premature end of input stream, expected {0} bytes, but only read {1}."; t[623] = "Fin prématurée du flux en entrée, {0} octets attendus, mais seulement {1} lus."; t[626] = "An unexpected result was returned by a query."; t[627] = "Un résultat inattendu a été retourné par une requête."; t[646] = "An error occurred while setting up the SSL connection."; t[647] = "Une erreur s''est produite pendant l''établissement de la connexion SSL."; t[654] = "Illegal UTF-8 sequence: {0} bytes used to encode a {1} byte value: {2}"; t[655] = "Séquence UTF-8 illégale: {0} octets utilisé pour encoder une valeur à {1} octets: {2}"; t[658] = "The SSLSocketFactory class provided {0} could not be instantiated."; t[659] = "La classe SSLSocketFactory fournie {0} n''a pas pu être instanciée."; t[670] = "Position: {0}"; t[671] = "Position : {0}"; t[676] = "Location: File: {0}, Routine: {1}, Line: {2}"; t[677] = "Localisation : Fichier : {0}, Routine : {1}, Ligne : {2}"; t[684] = "Cannot tell if path is open or closed: {0}."; t[685] = "Impossible de dire si path est fermé ou ouvert : {0}."; t[700] = "Cannot convert an instance of {0} to type {1}"; t[701] = "Impossible de convertir une instance de type {0} vers le type {1}"; t[710] = "{0} function takes four and only four argument."; t[711] = "La fonction {0} n''accepte que quatre et seulement quatre arguments."; t[718] = "Interrupted while attempting to connect."; t[719] = "Interrompu pendant l''établissement de la connexion."; t[722] = "Illegal UTF-8 sequence: final value is out of range: {0}"; t[723] = "Séquence UTF-8 illégale: la valeur finale est en dehors des limites: {0}"; t[734] = "No function outputs were registered."; t[735] = "Aucune fonction outputs n''a été enregistrée."; t[736] = "{0} function takes one and only one argument."; t[737] = "La fonction {0} n''accepte qu''un et un seul argument."; t[744] = "This ResultSet is closed."; t[745] = "Ce ResultSet est fermé."; t[746] = "Invalid character data was found. This is most likely caused by stored data containing characters that are invalid for the character set the database was created in. The most common example of this is storing 8bit data in a SQL_ASCII database."; t[747] = "Des données de caractères invalides ont été trouvées. C''est probablement causé par le stockage de caractères invalides pour le jeu de caractères de création de la base. L''exemple le plus courant est le stockage de données 8bit dans une base SQL_ASCII."; t[750] = "An I/O error occurred while sending to the backend."; t[751] = "Une erreur d''entrée/sortie a eu lieu lors d''envoi vers le serveur."; t[752] = "Error disabling autocommit"; t[753] = "Erreur en désactivant autocommit"; t[754] = "Ran out of memory retrieving query results."; t[755] = "Ai manqué de mémoire en récupérant les résultats de la requête."; t[756] = "Returning autogenerated keys is not supported."; t[757] = "Le renvoi des clés automatiquement générées n''est pas supporté."; t[760] = "Operation requires a scrollable ResultSet, but this ResultSet is FORWARD_ONLY."; t[761] = "L''opération nécessite un scrollable ResultSet, mais ce ResultSet est FORWARD_ONLY."; t[762] = "A CallableStatement function was executed and the out parameter {0} was of type {1} however type {2} was registered."; t[763] = "Une fonction CallableStatement a été exécutée et le paramètre en sortie {0} était du type {1} alors que le type {2} était prévu."; t[768] = "Unknown ResultSet holdability setting: {0}."; t[769] = "Paramètre holdability du ResultSet inconnu : {0}."; t[772] = "Transaction isolation level {0} not supported."; t[773] = "Le niveau d''isolation de transaction {0} n''est pas supporté."; t[774] = "Zero bytes may not occur in identifiers."; t[775] = "Des octects à 0 ne devraient pas apparaître dans les identifiants."; t[776] = "No results were returned by the query."; t[777] = "Aucun résultat retourné par la requête."; t[778] = "A CallableStatement was executed with nothing returned."; t[779] = "Un CallableStatement a été exécuté mais n''a rien retourné."; t[780] = "wasNull cannot be call before fetching a result."; t[781] = "wasNull ne peut pas être appelé avant la récupération d''un résultat."; t[786] = "This statement does not declare an OUT parameter. Use '{' ?= call ... '}' to declare one."; t[787] = "Cette requête ne déclare pas de paramètre OUT. Utilisez '{' ?= call ... '}' pour en déclarer un."; t[788] = "Can''t use relative move methods while on the insert row."; t[789] = "Impossible d''utiliser les fonctions de déplacement relatif pendant l''insertion d''une ligne."; t[792] = "Connection is busy with another transaction"; t[793] = "La connection est occupée avec une autre transaction"; table = t; } public java.lang.Object handleGetObject (java.lang.String msgid) throws java.util.MissingResourceException { int hash_val = msgid.hashCode() & 0x7fffffff; int idx = (hash_val % 397) << 1; { java.lang.Object found = table[idx]; if (found == null) return null; if (msgid.equals(found)) return table[idx + 1]; } int incr = ((hash_val % 395) + 1) << 1; for (;;) { idx += incr; if (idx >= 794) idx -= 794; java.lang.Object found = table[idx]; if (found == null) return null; if (msgid.equals(found)) return table[idx + 1]; } } public java.util.Enumeration getKeys () { return new java.util.Enumeration() { private int idx = 0; { while (idx < 794 && table[idx] == null) idx += 2; } public boolean hasMoreElements () { return (idx < 794); } public java.lang.Object nextElement () { java.lang.Object key = table[idx]; do idx += 2; while (idx < 794 && table[idx] == null); return key; } }; } public java.util.ResourceBundle getParent () { return parent; } }
8,450
0
Create_ds/amazon-redshift-jdbc-driver/src/main/java/com/amazon/redshift
Create_ds/amazon-redshift-jdbc-driver/src/main/java/com/amazon/redshift/translation/messages_bg.java
/* Automatically generated by GNU msgfmt. Do not modify! */ package com.amazon.redshift.translation; public class messages_bg extends java.util.ResourceBundle { private static final java.lang.String[] table; static { java.lang.String[] t = new java.lang.String[890]; t[0] = ""; t[1] = "Project-Id-Version: JDBC Driver for PostgreSQL 8.x\nReport-Msgid-Bugs-To: \nPO-Revision-Date: 2009-12-28 00:01+0100\nLast-Translator: <usun0v@mail.bg>\nLanguage-Team: <bg@li.org>\nLanguage: \nMIME-Version: 1.0\nContent-Type: text/plain; charset=UTF-8\nContent-Transfer-Encoding: 8bit\nX-Poedit-Language: Bulgarian\nX-Poedit-Country: BULGARIA\n"; t[2] = "A CallableStatement function was executed and the out parameter {0} was of type {1} however type {2} was registered."; t[3] = "CallableStatement функция бе обработена и изходния параметър {0} бе от тип {1}, обаче тип {2} бе използван."; t[6] = "Too many update results were returned."; t[7] = "Твърде много резултати бяха получени при актуализацията."; t[10] = "There are no rows in this ResultSet."; t[11] = "В този ResultSet няма редове."; t[14] = "Detail: {0}"; t[15] = "Подробност: {0}"; t[20] = "Invalid fetch direction constant: {0}."; t[21] = "Невалидна константа за fetch посоката: {0}."; t[22] = "No function outputs were registered."; t[23] = "Резултати от функцията не бяха регистрирани."; t[24] = "The array index is out of range: {0}"; t[25] = "Индексът на масив е извън обхвата: {0}"; t[26] = "The authentication type {0} is not supported. Check that you have configured the pg_hba.conf file to include the client''s IP address or subnet, and that it is using an authentication scheme supported by the driver."; t[27] = "Тип на удостоверяване {0} не се поддържа. Проверете дали сте конфигурирали pg_hba.conf файла, да включва IP адреса на клиента или подмрежата, и че се използва схема за удостоверяване, поддържана от драйвъра."; t[28] = "The server requested password-based authentication, but no password was provided."; t[29] = "Сървърът изисква идентифициране с парола, но парола не бе въведена."; t[40] = "Large Objects may not be used in auto-commit mode."; t[41] = "Големи обекти LOB не могат да се използват в auto-commit модус."; t[46] = "Operation requires a scrollable ResultSet, but this ResultSet is FORWARD_ONLY."; t[47] = "Операцията изисква резултатите да са scrollable, но този ResultSet е FORWARD_ONLY."; t[48] = "Zero bytes may not occur in string parameters."; t[49] = "Не може да има нула байта в низ параметрите."; t[50] = "The JVM claims not to support the encoding: {0}"; t[51] = "JVM не поддържа тази кодова таблица за момента: {0}"; t[54] = "Your security policy has prevented the connection from being attempted. You probably need to grant the connect java.net.SocketPermission to the database server host and port that you wish to connect to."; t[55] = "Връзката не бе осъществена, поради вашите настройки за сигурност. Може би трябва да предоставите java.net.SocketPermission права на сървъра и порта с базата данни, към който искате да се свържете."; t[62] = "Database connection failed when canceling copy operation"; t[63] = "Неосъществена връзка към базата данни при прекъсване на копирането"; t[78] = "Error loading default settings from driverconfig.properties"; t[79] = "Грешка при зареждане на настройките по подразбиране от файла driverconfig.properties"; t[82] = "Returning autogenerated keys is not supported."; t[83] = "Автоматично генерирани ключове не се поддържат."; t[92] = "Unable to find name datatype in the system catalogs."; t[93] = "Не може да се намери името на типа данни в системните каталози."; t[94] = "Tried to read from inactive copy"; t[95] = "Опит за четене при неактивно копиране"; t[96] = "ResultSet is not updateable. The query that generated this result set must select only one table, and must select all primary keys from that table. See the JDBC 2.1 API Specification, section 5.6 for more details."; t[97] = "ResultSet не може да се обновява. Заявката генерираща този резултат трябва да селектира само една таблица, както и всички първични ключове в нея. За повече информация, вижте раздел 5.6 на JDBC 2.1 API Specification."; t[98] = "Cannot cast an instance of {0} to type {1}"; t[99] = "Не може да преобразува инстанция на {0} към тип {1}"; t[102] = "Requested CopyOut but got {0}"; t[103] = "Зададено CopyOut но получено {0}"; t[106] = "Not implemented: Prepare must be issued using the same connection that started the transaction. currentXid={0}, prepare xid={1}"; t[107] = "Невъзможна комбинация: Prepare трябва да бъде издадено чрез използване на същата връзка, при която е започната транзакцията. currentXid={0}, prepare xid={1}"; t[108] = "Can''t use query methods that take a query string on a PreparedStatement."; t[109] = "Не може да се употребяват методи за заявка, които ползват низове на PreparedStatement."; t[114] = "Conversion of money failed."; t[115] = "Неуспешно валутно преобразуване."; t[118] = "Tried to obtain lock while already holding it"; t[119] = "Опит за получаване на заключване/резервация докато вече е получено"; t[120] = "This SQLXML object has not been initialized, so you cannot retrieve data from it."; t[121] = "Този SQLXML обект не е инициализиран, така че не могат да се извличат данни от него."; t[122] = "This SQLXML object has already been freed."; t[123] = "Този SQLXML обект вече е освободен."; t[124] = "Invalid stream length {0}."; t[125] = "Невалидна дължина {0} на потока данни."; t[130] = "Position: {0}"; t[131] = "Позиция: {0}"; t[132] = "The server does not support SSL."; t[133] = "Сървърът не поддържа SSL."; t[134] = "Got {0} error responses to single copy cancel request"; t[135] = "Получени {0} отговори за грешка при единствено искане да се прекъсне копирането"; t[136] = "DataSource has been closed."; t[137] = "Източникът на данни е прекъснат."; t[138] = "Unable to convert DOMResult SQLXML data to a string."; t[139] = "Не може да преобразува DOMResult SQLXML данни в низ."; t[144] = "Invalid UUID data."; t[145] = "Невалидни UUID данни."; t[148] = "The fastpath function {0} is unknown."; t[149] = "Функцията {0} е неизвестна."; t[154] = "Connection has been closed."; t[155] = "Връзката бе прекъсната."; t[156] = "This statement does not declare an OUT parameter. Use '{' ?= call ... '}' to declare one."; t[157] = "Тази заявка не декларира изходен параметър. Ползвайте '{' ?= call ... '}' за да декларирате такъв."; t[158] = "A connection could not be made using the requested protocol {0}."; t[159] = "Не може да осъществи връзка, ползвайки искания протокол {0}."; t[162] = "The maximum field size must be a value greater than or equal to 0."; t[163] = "Максималният размер на полето трябва да бъде стойност по-голяма или равна на 0."; t[166] = "GSS Authentication failed"; t[167] = "GSS удостоверяването бе неуспешно"; t[176] = "Unknown XML Result class: {0}"; t[177] = "Неизвестен XML изходящ клас: {0}"; t[180] = "Server SQLState: {0}"; t[181] = "SQL статус на сървъра: {0}"; t[182] = "Unknown Response Type {0}."; t[183] = "Неизвестен тип на отговор {0}."; t[186] = "Tried to cancel an inactive copy operation"; t[187] = "Опит за прекъсване на неактивно копиране"; t[190] = "This PooledConnection has already been closed."; t[191] = "Тази PooledConnection връзка бе вече прекъсната."; t[200] = "Multiple ResultSets were returned by the query."; t[201] = "Заявката върна няколко ResultSets."; t[202] = "Finalizing a Connection that was never closed:"; t[203] = "Приключване на връзка, която не бе прекъсната:"; t[204] = "Unsupported Types value: {0}"; t[205] = "Неподдържана стойност за тип: {0}"; t[206] = "A CallableStatement was declared, but no call to registerOutParameter(1, <some type>) was made."; t[207] = "CallableStatement функция бе декларирана, но обработена като registerOutParameter(1, <some type>) "; t[208] = "Cannot retrieve the name of an unnamed savepoint."; t[209] = "Не може да определи името на неупомената savepoint."; t[220] = "Cannot change transaction read-only property in the middle of a transaction."; t[221] = "Не може да променяте правата на транзакцията по време на нейното извършване."; t[222] = "Bind message length {0} too long. This can be caused by very large or incorrect length specifications on InputStream parameters."; t[223] = "Прекалено голяма дължина {0} на съобщението. Това може да е причинено от прекалено голяма или неправилно зададена дължина на InputStream параметри."; t[224] = "The parameter index is out of range: {0}, number of parameters: {1}."; t[225] = "Параметърният индекс е извън обхват: {0}, брой параметри: {1}."; t[226] = "Transaction isolation level {0} not supported."; t[227] = "Изолационно ниво на транзакциите {0} не се поддържа."; t[234] = "Cannot update the ResultSet because it is either before the start or after the end of the results."; t[235] = "Не може да се обнови ResultSet, когато се намираме преди началото или след края на резултатите."; t[238] = "tried to call end without corresponding start call. state={0}, start xid={1}, currentXid={2}, preparedXid={3}"; t[239] = "опита да извика end без съответстващо извикване на start. state={0}, start xid={1}, currentXid={2}, preparedXid={3}"; t[242] = "This SQLXML object has already been initialized, so you cannot manipulate it further."; t[243] = "Този SQLXML обект вече е инициализиран и не може да бъде променен."; t[250] = "Conversion to type {0} failed: {1}."; t[251] = "Неуспешно преобразуване към тип {0}: {1}."; t[252] = "The SSLSocketFactory class provided {0} could not be instantiated."; t[253] = "Класът SSLSocketFactory връща {0} и не може да бъде инстанцииран."; t[254] = "Unable to create SAXResult for SQLXML."; t[255] = "Не може да се създаде SAXResult за SQLXML."; t[256] = "Interrupted while attempting to connect."; t[257] = "Опита за осъществяване на връзка бе своевременно прекъснат. "; t[260] = "Protocol error. Session setup failed."; t[261] = "Грешка в протокола. Неуспешна настройка на сесията."; t[264] = "Database connection failed when starting copy"; t[265] = "Неосъществена връзка към базата данни при започване на копирането"; t[272] = "Cannot call cancelRowUpdates() when on the insert row."; t[273] = "Не може да се изпълни cancelRowUpdates() метода, когато се намираме при редицата на въвеждане."; t[274] = "Unable to bind parameter values for statement."; t[275] = "Не може да подготви параметрите на командата."; t[280] = "A result was returned when none was expected."; t[281] = "Бе получен резултат, когато такъв не бе очакван."; t[282] = "The server''s standard_conforming_strings parameter was reported as {0}. The JDBC driver expected on or off."; t[283] = "Параметърът standard_conforming_strings при сървъра бе докладван като {0}. JDBC драйвъра очаква този параметър да бъде on или off."; t[284] = "Unable to translate data into the desired encoding."; t[285] = "Невъзможно преобразуване на данни в желаното кодиране."; t[292] = "Redshift LOBs can only index to: {0}"; t[293] = "Redshift индексира големи обекти LOB само до: {0}"; t[294] = "Provided InputStream failed."; t[295] = "Зададения InputStream поток е неуспешен."; t[296] = "Invalid protocol state requested. Attempted transaction interleaving is not supported. xid={0}, currentXid={1}, state={2}, flags={3}"; t[297] = "Транзакция в транзакция не се поддържа за момента. xid={0}, currentXid={1}, state={2}, flags={3}"; t[304] = "{0} function takes four and only four argument."; t[305] = "Функцията {0} може да приеме четири и само четири аргумента."; t[306] = "{0} function doesn''t take any argument."; t[307] = "Функцията {0} не може да приема аргументи."; t[310] = "Got CopyOutResponse from server during an active {0}"; t[311] = "Получен CopyOutResponse отговор от сървъра при активно {0}"; t[322] = "No value specified for parameter {0}."; t[323] = "Няма стойност, определена за параметър {0}."; t[324] = "Illegal UTF-8 sequence: initial byte is {0}: {1}"; t[325] = "Невалидна UTF-8 последователност: първоначален байт е {0}: {1}"; t[326] = "Error disabling autocommit"; t[327] = "Грешка при изключване на autocommit"; t[328] = "Illegal UTF-8 sequence: byte {0} of {1} byte sequence is not 10xxxxxx: {2}"; t[329] = "Невалидна UTF-8 последователност: байта {0} от байтова последователност {1} не е 10xxxxxx: {2}"; t[330] = "Received CommandComplete ''{0}'' without an active copy operation"; t[331] = "Получено командно допълнение ''{0}'' без активна команда за копиране"; t[332] = "Illegal UTF-8 sequence: final value is out of range: {0}"; t[333] = "Невалидна UTF-8 последователност: крайната стойност е извън стойностните граници: {0}"; t[336] = "Cannot change transaction isolation level in the middle of a transaction."; t[337] = "Не може да променяте изолационното ниво на транзакцията по време на нейното извършване."; t[340] = "An unexpected result was returned by a query."; t[341] = "Заявката върна неочакван резултат."; t[346] = "Conversion of interval failed"; t[347] = "Неуспешно преобразуване на интервал"; t[350] = "This ResultSet is closed."; t[351] = "Операциите по този ResultSet са били прекратени."; t[352] = "Read from copy failed."; t[353] = "Четене от копието неуспешно."; t[354] = "Unable to load the class {0} responsible for the datatype {1}"; t[355] = "Невъзможно е зареждането на клас {0}, отговарящ за типа данни {1}"; t[356] = "Failed to convert binary xml data to encoding: {0}."; t[357] = "Неуспешно преобразуване на двоични XML данни за кодиране съгласно: {0}."; t[362] = "Connection attempt timed out."; t[363] = "Времето за осъществяване на връзката изтече (таймаут)."; t[364] = "Expected command status BEGIN, got {0}."; t[365] = "Очаквана команда BEGIN, получена {0}."; t[372] = "This copy stream is closed."; t[373] = "Потока за копиране на данните е затворен."; t[376] = "Can''t infer the SQL type to use for an instance of {0}. Use setObject() with an explicit Types value to specify the type to use."; t[377] = "Не може да се определи SQL тип, който да се използва за инстанцията на {0}. Ползвайте метода setObject() с точни стойности, за да определите типа."; t[378] = "Can''t refresh the insert row."; t[379] = "Не може да обнови въведения ред."; t[382] = "You must specify at least one column value to insert a row."; t[383] = "Трябва да посочите поне една стойност за колона, за да вмъкнете ред."; t[388] = "Connection is busy with another transaction"; t[389] = "Връзката е заета с друга транзакция"; t[392] = "Bad value for type {0} : {1}"; t[393] = "Невалидна стойност за тип {0} : {1}"; t[396] = "This statement has been closed."; t[397] = "Командата е извършена."; t[404] = "No primary key found for table {0}."; t[405] = "Няма първичен ключ за таблица {0}."; t[406] = "Currently positioned after the end of the ResultSet. You cannot call deleteRow() here."; t[407] = "В момента се намираме преди края на ResultSet. Тук не може да се изпълни deleteRow() метода."; t[414] = "{0} function takes two or three arguments."; t[415] = "Функцията {0} може да приеме два или три аргумента."; t[416] = "{0} function takes three and only three arguments."; t[417] = "Функцията {0} може да приеме три и само три аргумента."; t[418] = "Unable to find server array type for provided name {0}."; t[419] = "Не може да се намери типа на сървърен масив за зададеното име {0}."; t[420] = "Fastpath call {0} - No result was returned and we expected an integer."; t[421] = "Извикване на {0} - няма резултати и а бе очаквано цяло число."; t[426] = "Database connection failed when ending copy"; t[427] = "Неосъществена връзка към базата данни при завършване на копирането"; t[428] = "Cannot write to copy a byte of value {0}"; t[429] = "Няма пишещи права, за да копира байтова стойност {0}"; t[430] = "Results cannot be retrieved from a CallableStatement before it is executed."; t[431] = "Резултати от CallableStatement функция не могат да бъдат получени, преди тя да бъде обработена."; t[432] = "Cannot reference a savepoint after it has been released."; t[433] = "Не може да референцира savepoint, след като е била освободена."; t[434] = "Failed to create object for: {0}."; t[435] = "Неуспешно създаване на обект за: {0}."; t[438] = "Unexpected packet type during copy: {0}"; t[439] = "Неочакван тип пакет при копиране: {0}"; t[442] = "Unable to determine a value for MaxIndexKeys due to missing system catalog data."; t[443] = "Невъзможно е да се определи стойността за MaxIndexKeys поради липса на системния каталог с данни."; t[444] = "Tried to end inactive copy"; t[445] = "Опит за прекъсване на неактивно копиране"; t[450] = "Unexpected copydata from server for {0}"; t[451] = "Неочаквано CopyData от сървъра за {0}"; t[460] = "Zero bytes may not occur in identifiers."; t[461] = "Не може да има нула байта в идентификаторите."; t[462] = "Error during one-phase commit. commit xid={0}"; t[463] = "Грешка при едно-фазов commit. commit xid={0}"; t[464] = "Ran out of memory retrieving query results."; t[465] = "Недостатъчна памет при представяна на резултатите от заявката."; t[468] = "Unable to create StAXResult for SQLXML"; t[469] = "Не може да се създаде StAXResult за SQLXML."; t[470] = "Location: File: {0}, Routine: {1}, Line: {2}"; t[471] = "Местоположение: Файл: {0}, Функция: {1}, Ред: {2}"; t[482] = "A CallableStatement was executed with an invalid number of parameters"; t[483] = "CallableStatement функция бе обработена, но с непозволен брой параметри."; t[486] = "Illegal UTF-8 sequence: {0} bytes used to encode a {1} byte value: {2}"; t[487] = "Невалидна UTF-8 последователност: {0} байта използвани за кодирането на {1} байтова стойност: {2}"; t[496] = "Interrupted while waiting to obtain lock on database connection"; t[497] = "Прекъсване при чакане да получи заключване/резервация при връзка към базата данни"; t[502] = "LOB positioning offsets start at 1."; t[503] = "Позиционалният офсет при големи обекти LOB започва от 1."; t[506] = "Returning autogenerated keys by column index is not supported."; t[507] = "Автоматично генерирани ключове спрямо индекс на колона не се поддържат."; t[510] = "Currently positioned before the start of the ResultSet. You cannot call deleteRow() here."; t[511] = "В момента се намираме в началото на ResultSet. Тук не може да се изпълни deleteRow() метода."; t[524] = "Truncation of large objects is only implemented in 8.3 and later servers."; t[525] = "Скъсяване на големи обекти LOB е осъществено само във версии след 8.3."; t[526] = "Statement has been closed."; t[527] = "Командата е завършена."; t[540] = "Database connection failed when writing to copy"; t[541] = "Неосъществена връзка към базата данни при опит за копиране"; t[544] = "The server''s DateStyle parameter was changed to {0}. The JDBC driver requires DateStyle to begin with ISO for correct operation."; t[545] = "Параметърът DateStyle при сървъра бе променен на {0}. JDBC драйвъра изисква DateStyle започва с ISO за да функционира правилно."; t[546] = "Provided Reader failed."; t[547] = "Грешка с ползвания четец."; t[550] = "Not on the insert row."; t[551] = "Не сме в редицата на въвеждане."; t[566] = "Unable to decode xml data."; t[567] = "Не може да декодира XML данните."; t[570] = "Not implemented: 2nd phase commit must be issued using an idle connection. commit xid={0}, currentXid={1}, state={2}, transactionState={3}"; t[571] = "Невъзможна комбинация: втората фаза на commit задължително трябва да бъде издадена при свободна връзка. commit xid={0}, currentXid={1}, state={2}, transactionState={3}"; t[596] = "Tried to write to an inactive copy operation"; t[597] = "Опит за писане при неактивна операция за копиране"; t[606] = "An error occurred while setting up the SSL connection."; t[607] = "Възникна грешка при осъществяване на SSL връзката."; t[614] = "Something unusual has occurred to cause the driver to fail. Please report this exception."; t[615] = "Възникна неочаквана грешка с драйвъра. Моля докадвайте това изключение. "; t[618] = "No results were returned by the query."; t[619] = "Няма намерени резултати за заявката."; t[620] = "ClientInfo property not supported."; t[621] = "Информацията за ClientInfo не се поддържа."; t[622] = "Unexpected error writing large object to database."; t[623] = "Неочаквана грешка при записване на голям обект LOB в базата данни."; t[628] = "The JVM claims not to support the {0} encoding."; t[629] = "JVM не поддържа за момента {0} кодовата таблица."; t[630] = "Unknown XML Source class: {0}"; t[631] = "Неизвестен XML входящ клас: {0}"; t[632] = "Interval {0} not yet implemented"; t[633] = "Интервалът {0} не е валиден все още."; t[636] = "commit called before end. commit xid={0}, state={1}"; t[637] = "commit извикан преди end. commit xid={0}, state={1}"; t[638] = "Tried to break lock on database connection"; t[639] = "Опит за премахване на заключването/резервацията при връзка към базата данни"; t[642] = "Missing expected error response to copy cancel request"; t[643] = "Липсва очакван отговор при грешка да прекъсне копирането"; t[644] = "Maximum number of rows must be a value grater than or equal to 0."; t[645] = "Максималният брой редове трябва да бъде стойност по-голяма или равна на 0."; t[652] = "Requested CopyIn but got {0}"; t[653] = "Зададено CopyIn но получено {0}"; t[656] = "Parameter of type {0} was registered, but call to get{1} (sqltype={2}) was made."; t[657] = "Отчетен параметър от тип {0}, но обработено като get{1} (sqltype={2}). "; t[662] = "Unsupported value for stringtype parameter: {0}"; t[663] = "Непозволена стойност за StringType параметър: {0}"; t[664] = "Fetch size must be a value greater to or equal to 0."; t[665] = "Размера за fetch size трябва да бъде по-голям или равен на 0."; t[670] = "Cannot tell if path is open or closed: {0}."; t[671] = "Не може да определи дали адреса е отворен или затворен: {0}."; t[672] = "Expected an EOF from server, got: {0}"; t[673] = "Очакван край на файла от сървъра, но получено: {0}"; t[680] = "Copying from database failed: {0}"; t[681] = "Копирането от базата данни бе неуспешно: {0}"; t[682] = "Connection has been closed automatically because a new connection was opened for the same PooledConnection or the PooledConnection has been closed."; t[683] = "Връзката бе автоматично прекъсната, защото нова връзка за същата беше осъществена или PooledConnection връзката е вече прекъсната."; t[698] = "Custom type maps are not supported."; t[699] = "Специфични типови съответствия не се поддържат."; t[700] = "xid must not be null"; t[701] = "xid не може да бъде null"; t[706] = "Internal Position: {0}"; t[707] = "Вътрешна позиция: {0}"; t[708] = "Error during recover"; t[709] = "Грешка при възстановяване"; t[712] = "Method {0} is not yet implemented."; t[713] = "Методът {0} все още не е функционален."; t[714] = "Unexpected command status: {0}."; t[715] = "Неочакван статус на команда: {0}."; t[718] = "The column index is out of range: {0}, number of columns: {1}."; t[719] = "Индексът на колоната е извън стойностен обхват: {0}, брой колони: {1}."; t[730] = "Unknown ResultSet holdability setting: {0}."; t[731] = "Неизвестна ResultSet holdability настройка: {0}."; t[734] = "Cannot call deleteRow() when on the insert row."; t[735] = "Не може да се изпълни deleteRow() метода, когато се намираме при редицата на въвеждане."; t[740] = "ResultSet not positioned properly, perhaps you need to call next."; t[741] = "ResultSet не е референциран правилно. Вероятно трябва да придвижите курсора посредством next."; t[742] = "wasNull cannot be call before fetching a result."; t[743] = "wasNull не може да бьде изпълнен, преди наличието на резултата."; t[746] = "{0} function takes two and only two arguments."; t[747] = "Функцията {0} може да приеме два и само два аргумента."; t[750] = "Malformed function or procedure escape syntax at offset {0}."; t[751] = "Непозволен синтаксис на функция или процедура при офсет {0}."; t[752] = "Premature end of input stream, expected {0} bytes, but only read {1}."; t[753] = "Преждевременен край на входящ поток на данни, очаквани {0} байта, но прочетени само {1}."; t[756] = "Got CopyData without an active copy operation"; t[757] = "Получено CopyData без наличие на активна операция за копиране"; t[758] = "Cannot retrieve the id of a named savepoint."; t[759] = "Не може да определи ID на спомената savepoint."; t[770] = "Where: {0}"; t[771] = "Където: {0}"; t[778] = "Got CopyInResponse from server during an active {0}"; t[779] = "Получен CopyInResponse отговор от сървъра при активно {0}"; t[780] = "Cannot convert an instance of {0} to type {1}"; t[781] = "Не може да преобразува инстанцията на {0} във вида {1}"; t[784] = "Not implemented: one-phase commit must be issued using the same connection that was used to start it"; t[785] = "Невъзможна комбинация: едно-фазов commit трябва да бъде издаден чрез използване на същата връзка, при която е започнал"; t[790] = "Invalid flags {0}"; t[791] = "Невалидни флагове {0}"; t[798] = "Query timeout must be a value greater than or equals to 0."; t[799] = "Времето за изпълнение на заявката трябва да бъде стойност по-голяма или равна на 0."; t[802] = "Hint: {0}"; t[803] = "Забележка: {0}"; t[810] = "The array index is out of range: {0}, number of elements: {1}."; t[811] = "Индексът на масив е извън обхвата: {0}, брой елементи: {1}."; t[812] = "Internal Query: {0}"; t[813] = "Вътрешна заявка: {0}"; t[816] = "CommandComplete expected COPY but got: "; t[817] = "Очаквано командно допълнение COPY но получено: "; t[824] = "Illegal UTF-8 sequence: final value is a surrogate value: {0}"; t[825] = "Невалидна UTF-8 последователност: крайната стойност е заместителна стойност: {0}"; t[826] = "Unknown type {0}."; t[827] = "Неизвестен тип {0}."; t[828] = "ResultSets with concurrency CONCUR_READ_ONLY cannot be updated."; t[829] = "ResultSets с concurrency CONCUR_READ_ONLY не могат да бъдат актуализирани."; t[830] = "The connection attempt failed."; t[831] = "Опита за връзка бе неуспешен."; t[834] = "{0} function takes one and only one argument."; t[835] = "Функцията {0} може да приеме само един единствен аргумент."; t[838] = "suspend/resume not implemented"; t[839] = "спиране / започване не се поддържа за момента"; t[840] = "Error preparing transaction. prepare xid={0}"; t[841] = "Грешка при подготвяне на транзакция. prepare xid={0}"; t[842] = "The driver currently does not support COPY operations."; t[843] = "За момента драйвъра не поддържа COPY команди."; t[852] = "Heuristic commit/rollback not supported. forget xid={0}"; t[853] = "Евристичен commit или rollback не се поддържа. forget xid={0}"; t[856] = "Invalid character data was found. This is most likely caused by stored data containing characters that are invalid for the character set the database was created in. The most common example of this is storing 8bit data in a SQL_ASCII database."; t[857] = "Бяха намерени невалидни данни. Това най-вероятно се дължи на съхранявани данни, съдържащи символи, които са невалидни за набора от знаци при създаване на базата данни. Чест пример за това е съхраняване на 8bit данни в SQL_ASCII бази данни."; t[858] = "Cannot establish a savepoint in auto-commit mode."; t[859] = "Не може да се установи savepoint в auto-commit модус."; t[862] = "The column name {0} was not found in this ResultSet."; t[863] = "Името на колоната {0} не бе намерено в този ResultSet."; t[864] = "Prepare called before end. prepare xid={0}, state={1}"; t[865] = "Prepare извикано преди края. prepare xid={0}, state={1}"; t[866] = "Unknown Types value."; t[867] = "Стойност от неизвестен тип."; t[870] = "Cannot call updateRow() when on the insert row."; t[871] = "Не може да се изпълни updateRow() метода, когато се намираме при редицата на въвеждане."; t[876] = "Database connection failed when reading from copy"; t[877] = "Неосъществена връзка към базата данни при четене от копие"; t[880] = "Error rolling back prepared transaction. rollback xid={0}, preparedXid={1}, currentXid={2}"; t[881] = "Грешка при възстановяване на състоянието преди подготвена транзакция. rollback xid={0}, preparedXid={1}, currentXid={2}"; t[882] = "Can''t use relative move methods while on the insert row."; t[883] = "Не може да се използват относителни методи за движение, когато се намираме при редицата на въвеждане."; t[884] = "free() was called on this LOB previously"; t[885] = "Функцията free() бе вече извикана за този голям обект LOB"; t[888] = "A CallableStatement was executed with nothing returned."; t[889] = "CallableStatement функция бе обработена, но няма резултати."; table = t; } public java.lang.Object handleGetObject (java.lang.String msgid) throws java.util.MissingResourceException { int hash_val = msgid.hashCode() & 0x7fffffff; int idx = (hash_val % 445) << 1; { java.lang.Object found = table[idx]; if (found == null) return null; if (msgid.equals(found)) return table[idx + 1]; } int incr = ((hash_val % 443) + 1) << 1; for (;;) { idx += incr; if (idx >= 890) idx -= 890; java.lang.Object found = table[idx]; if (found == null) return null; if (msgid.equals(found)) return table[idx + 1]; } } public java.util.Enumeration getKeys () { return new java.util.Enumeration() { private int idx = 0; { while (idx < 890 && table[idx] == null) idx += 2; } public boolean hasMoreElements () { return (idx < 890); } public java.lang.Object nextElement () { java.lang.Object key = table[idx]; do idx += 2; while (idx < 890 && table[idx] == null); return key; } }; } public java.util.ResourceBundle getParent () { return parent; } }
8,451
0
Create_ds/amazon-redshift-jdbc-driver/src/main/java/com/amazon/redshift
Create_ds/amazon-redshift-jdbc-driver/src/main/java/com/amazon/redshift/translation/messages_cs.java
/* Automatically generated by GNU msgfmt. Do not modify! */ package com.amazon.redshift.translation; public class messages_cs extends java.util.ResourceBundle { private static final java.lang.String[] table; static { java.lang.String[] t = new java.lang.String[314]; t[0] = ""; t[1] = "Project-Id-Version: Redshift JDBC Driver 2.0\nReport-Msgid-Bugs-To: \nPO-Revision-Date: 2005-08-21 20:00+0200\nLast-Translator: Petr Dittrich <bodyn@medoro.org>\nLanguage-Team: \nLanguage: \nMIME-Version: 1.0\nContent-Type: text/plain; charset=UTF-8\nContent-Transfer-Encoding: 8bit\n"; t[2] = "A connection could not be made using the requested protocol {0}."; t[3] = "Spojení nelze vytvořit s použitím žádaného protokolu {0}."; t[4] = "Malformed function or procedure escape syntax at offset {0}."; t[5] = "Poškozená funkce nebo opuštění procedury na pozici {0}."; t[8] = "Cannot cast an instance of {0} to type {1}"; t[9] = "Nemohu přetypovat instanci {0} na typ {1}"; t[12] = "ResultSet is not updateable. The query that generated this result set must select only one table, and must select all primary keys from that table. See the JDBC 2.1 API Specification, section 5.6 for more details."; t[13] = "ResultSet není aktualizavatelný. Dotaz musí vybírat pouze z jedné tabulky a musí obsahovat všechny primární klíče tabulky. Koukni do JDBC 2.1 API Specifikace, sekce 5.6 pro více podrobností."; t[14] = "The JVM claims not to support the {0} encoding."; t[15] = "JVM tvrdí, že nepodporuje kodování {0}."; t[16] = "An I/O error occurred while sending to the backend."; t[17] = "Vystupně/výstupní chyba při odesílání k backend."; t[18] = "Statement has been closed."; t[19] = "Statement byl uzavřen."; t[20] = "Unknown Types value."; t[21] = "Neznámá hodnota typu."; t[22] = "ResultSets with concurrency CONCUR_READ_ONLY cannot be updated."; t[23] = "ResultSets se souběžností CONCUR_READ_ONLY nemůže být aktualizováno"; t[26] = "You must specify at least one column value to insert a row."; t[27] = "Musíte vyplnit alespoň jeden sloupec pro vložení řádku."; t[32] = "No primary key found for table {0}."; t[33] = "Nenalezen primární klíč pro tabulku {0}."; t[34] = "Cannot establish a savepoint in auto-commit mode."; t[35] = "Nemohu vytvořit savepoint v auto-commit modu."; t[38] = "Can''t use relative move methods while on the insert row."; t[39] = "Nemůžete používat relativní přesuny při vkládání řádku."; t[44] = "The column name {0} was not found in this ResultSet."; t[45] = "Sloupec pojmenovaný {0} nebyl nalezen v ResultSet."; t[46] = "This statement has been closed."; t[47] = "Příkaz byl uzavřen."; t[48] = "The SSLSocketFactory class provided {0} could not be instantiated."; t[49] = "Třída SSLSocketFactory poskytla {0} což nemůže být instancionizováno."; t[50] = "Multiple ResultSets were returned by the query."; t[51] = "Vícenásobný ResultSet byl vrácen dotazem."; t[52] = "DataSource has been closed."; t[53] = "DataSource byl uzavřen."; t[56] = "Error loading default settings from driverconfig.properties"; t[57] = "Chyba načítání standardního nastavení z driverconfig.properties"; t[62] = "Bad value for type {0} : {1}"; t[63] = "Špatná hodnota pro typ {0} : {1}"; t[66] = "Method {0} is not yet implemented."; t[67] = "Metoda {0} není implementována."; t[68] = "The array index is out of range: {0}"; t[69] = "Index pole mimo rozsah: {0}"; t[70] = "Unexpected command status: {0}."; t[71] = "Neočekávaný stav příkazu: {0}."; t[74] = "Expected command status BEGIN, got {0}."; t[75] = "Očekáván příkaz BEGIN, obdržen {0}."; t[76] = "Cannot retrieve the id of a named savepoint."; t[77] = "Nemohu získat id nepojmenovaného savepointu."; t[78] = "Unexpected error writing large object to database."; t[79] = "Neočekávaná chyba při zapisování velkého objektu do databáze."; t[84] = "Not on the insert row."; t[85] = "Ne na vkládaném řádku."; t[86] = "Returning autogenerated keys is not supported."; t[87] = "Vrácení automaticky generovaných klíčů není podporováno."; t[88] = "The server requested password-based authentication, but no password was provided."; t[89] = "Server vyžaduje ověření heslem, ale žádné nebylo posláno."; t[98] = "Unable to load the class {0} responsible for the datatype {1}"; t[99] = "Nemohu načíst třídu {0} odpovědnou za typ {1}"; t[100] = "Invalid fetch direction constant: {0}."; t[101] = "Špatný směr čtení: {0}."; t[102] = "Conversion of money failed."; t[103] = "Převod peněz selhal."; t[104] = "Connection has been closed."; t[105] = "Spojeni bylo uzavřeno."; t[106] = "Cannot retrieve the name of an unnamed savepoint."; t[107] = "Nemohu získat název nepojmenovaného savepointu."; t[108] = "Large Objects may not be used in auto-commit mode."; t[109] = "Velké objecky nemohou být použity v auto-commit modu."; t[110] = "This ResultSet is closed."; t[111] = "Tento ResultSet je uzavřený."; t[116] = "Something unusual has occurred to cause the driver to fail. Please report this exception."; t[117] = "Něco neobvyklého přinutilo ovladač selhat. Prosím nahlaste tuto vyjímku."; t[118] = "The server does not support SSL."; t[119] = "Server nepodporuje SSL."; t[120] = "Invalid stream length {0}."; t[121] = "Vadná délka proudu {0}."; t[126] = "The maximum field size must be a value greater than or equal to 0."; t[127] = "Maximální velikost pole musí být nezáporné číslo."; t[130] = "Cannot call updateRow() when on the insert row."; t[131] = "Nemohu volat updateRow() na vlkádaném řádku."; t[132] = "A CallableStatement was executed with nothing returned."; t[133] = "CallableStatement byl spuštěn, leč nic nebylo vráceno."; t[134] = "Provided Reader failed."; t[135] = "Selhal poskytnutý Reader."; t[146] = "Cannot call deleteRow() when on the insert row."; t[147] = "Nemůžete volat deleteRow() při vkládání řádku."; t[156] = "Where: {0}"; t[157] = "Kde: {0}"; t[158] = "An unexpected result was returned by a query."; t[159] = "Obdržen neočekávaný výsledek dotazu."; t[160] = "The connection attempt failed."; t[161] = "Pokus o připojení selhal."; t[162] = "Too many update results were returned."; t[163] = "Bylo vráceno příliš mnoho výsledků aktualizací."; t[164] = "Unknown type {0}."; t[165] = "Neznámý typ {0}."; t[166] = "{0} function takes two and only two arguments."; t[167] = "Funkce {0} bere právě dva argumenty."; t[168] = "{0} function doesn''t take any argument."; t[169] = "Funkce {0} nebere žádný argument."; t[172] = "Unable to find name datatype in the system catalogs."; t[173] = "Nemohu najít název typu v systémovém katalogu."; t[174] = "Protocol error. Session setup failed."; t[175] = "Chyba protokolu. Nastavení relace selhalo."; t[176] = "{0} function takes one and only one argument."; t[177] = "Funkce {0} bere jeden argument."; t[186] = "The driver currently does not support COPY operations."; t[187] = "Ovladač nyní nepodporuje příkaz COPY."; t[190] = "Invalid character data was found. This is most likely caused by stored data containing characters that are invalid for the character set the database was created in. The most common example of this is storing 8bit data in a SQL_ASCII database."; t[191] = "Nalezena vada ve znakových datech. Toto může být způsobeno uloženými daty obsahujícími znaky, které jsou závadné pro znakovou sadu nastavenou při zakládání databáze. Nejznámejší příklad je ukládání 8bitových dat vSQL_ASCII databázi."; t[196] = "Fetch size must be a value greater to or equal to 0."; t[197] = "Nabraná velikost musí být nezáporná."; t[204] = "Unsupported Types value: {0}"; t[205] = "Nepodporovaná hodnota typu: {0}"; t[206] = "Can''t refresh the insert row."; t[207] = "Nemohu obnovit vkládaný řádek."; t[210] = "Maximum number of rows must be a value grater than or equal to 0."; t[211] = "Maximální počet řádek musí být nezáporné číslo."; t[216] = "No value specified for parameter {0}."; t[217] = "Nespecifikována hodnota parametru {0}."; t[218] = "The array index is out of range: {0}, number of elements: {1}."; t[219] = "Index pole mimo rozsah: {0}, počet prvků: {1}."; t[220] = "Provided InputStream failed."; t[221] = "Selhal poskytnutý InputStream."; t[228] = "Cannot reference a savepoint after it has been released."; t[229] = "Nemohu získat odkaz na savepoint, když byl uvolněn."; t[232] = "An error occurred while setting up the SSL connection."; t[233] = "Nastala chyba při nastavení SSL spojení."; t[246] = "Detail: {0}"; t[247] = "Detail: {0}"; t[248] = "This PooledConnection has already been closed."; t[249] = "Tento PooledConnection byl uzavřen."; t[250] = "A result was returned when none was expected."; t[251] = "Obdržen výsledek, ikdyž žádný nebyl očekáván."; t[254] = "The JVM claims not to support the encoding: {0}"; t[255] = "JVM tvrdí, že nepodporuje kodování: {0}"; t[256] = "The parameter index is out of range: {0}, number of parameters: {1}."; t[257] = "Index parametru mimo rozsah: {0}, počet parametrů {1}."; t[258] = "LOB positioning offsets start at 1."; t[259] = "Začátek pozicování LOB začína na 1."; t[260] = "{0} function takes two or three arguments."; t[261] = "Funkce {0} bere dva nebo tři argumenty."; t[262] = "Currently positioned after the end of the ResultSet. You cannot call deleteRow() here."; t[263] = "Právě jste za pozicí konce ResultSetu. Zde nemůžete volat deleteRow().s"; t[266] = "Server SQLState: {0}"; t[267] = "Server SQLState: {0}"; t[270] = "{0} function takes four and only four argument."; t[271] = "Funkce {0} bere přesně čtyři argumenty."; t[272] = "Failed to create object for: {0}."; t[273] = "Selhalo vytvoření objektu: {0}."; t[274] = "No results were returned by the query."; t[275] = "Neobdržen žádný výsledek dotazu."; t[276] = "Position: {0}"; t[277] = "Pozice: {0}"; t[278] = "The column index is out of range: {0}, number of columns: {1}."; t[279] = "Index sloupece je mimo rozsah: {0}, počet sloupců: {1}."; t[280] = "Unknown Response Type {0}."; t[281] = "Neznámý typ odpovědi {0}."; t[284] = "Hint: {0}"; t[285] = "Rada: {0}"; t[286] = "Location: File: {0}, Routine: {1}, Line: {2}"; t[287] = "Poloha: Soubor: {0}, Rutina: {1}, Řádek: {2}"; t[288] = "Query timeout must be a value greater than or equals to 0."; t[289] = "Časový limit dotazu musí být nezáporné číslo."; t[292] = "Unable to translate data into the desired encoding."; t[293] = "Nemohu přeložit data do požadovaného kódování."; t[296] = "Cannot call cancelRowUpdates() when on the insert row."; t[297] = "Nemůžete volat cancelRowUpdates() při vkládání řádku."; t[298] = "The authentication type {0} is not supported. Check that you have configured the pg_hba.conf file to include the client''s IP address or subnet, and that it is using an authentication scheme supported by the driver."; t[299] = "Ověření typu {0} není podporováno. Zkontrolujte zda konfigurační soubor pg_hba.conf obsahuje klientskou IP adresu či podsíť a zda je použité ověřenovací schéma podporováno ovladačem."; t[308] = "There are no rows in this ResultSet."; t[309] = "Žádný řádek v ResultSet."; table = t; } public java.lang.Object handleGetObject (java.lang.String msgid) throws java.util.MissingResourceException { int hash_val = msgid.hashCode() & 0x7fffffff; int idx = (hash_val % 157) << 1; { java.lang.Object found = table[idx]; if (found == null) return null; if (msgid.equals(found)) return table[idx + 1]; } int incr = ((hash_val % 155) + 1) << 1; for (;;) { idx += incr; if (idx >= 314) idx -= 314; java.lang.Object found = table[idx]; if (found == null) return null; if (msgid.equals(found)) return table[idx + 1]; } } public java.util.Enumeration getKeys () { return new java.util.Enumeration() { private int idx = 0; { while (idx < 314 && table[idx] == null) idx += 2; } public boolean hasMoreElements () { return (idx < 314); } public java.lang.Object nextElement () { java.lang.Object key = table[idx]; do idx += 2; while (idx < 314 && table[idx] == null); return key; } }; } public java.util.ResourceBundle getParent () { return parent; } }
8,452
0
Create_ds/amazon-redshift-jdbc-driver/src/main/java/com/amazon/redshift
Create_ds/amazon-redshift-jdbc-driver/src/main/java/com/amazon/redshift/translation/messages_zh_CN.java
/* Automatically generated by GNU msgfmt. Do not modify! */ package com.amazon.redshift.translation; public class messages_zh_CN extends java.util.ResourceBundle { private static final java.lang.String[] table; static { java.lang.String[] t = new java.lang.String[578]; t[0] = ""; t[1] = "Project-Id-Version: Redshift JDBC Driver 2.0\nReport-Msgid-Bugs-To: \nPO-Revision-Date: 2008-01-31 14:34+0800\nLast-Translator: 郭朝益(ChaoYi, Kuo) <Kuo.ChaoYi@gmail.com>\nLanguage-Team: The PostgreSQL Development Team <Kuo.ChaoYi@gmail.com>\nLanguage: \nMIME-Version: 1.0\nContent-Type: text/plain; charset=UTF-8\nContent-Transfer-Encoding: 8bit\nX-Poedit-Language: Chinese\nX-Poedit-Country: CHINA\nX-Poedit-SourceCharset: utf-8\n"; t[6] = "Cannot call cancelRowUpdates() when on the insert row."; t[7] = "不能在新增的数据列上呼叫 cancelRowUpdates()。"; t[8] = "The server requested password-based authentication, but no password was provided."; t[9] = "服务器要求使用密码验证,但是密码并未提供。"; t[12] = "Detail: {0}"; t[13] = "详细:{0}"; t[16] = "Can''t refresh the insert row."; t[17] = "无法重读新增的数据列。"; t[18] = "Connection has been closed."; t[19] = "Connection 已经被关闭。"; t[24] = "Bad value for type {0} : {1}"; t[25] = "不良的类型值 {0} : {1}"; t[36] = "Truncation of large objects is only implemented in 8.3 and later servers."; t[37] = "大型对象的截断(Truncation)仅被实作执行在 8.3 和后来的服务器。"; t[40] = "Cannot retrieve the name of an unnamed savepoint."; t[41] = "无法取得未命名储存点(Savepoint)的名称。"; t[46] = "An error occurred while setting up the SSL connection."; t[47] = "进行 SSL 连线时发生错误。"; t[50] = "suspend/resume not implemented"; t[51] = "暂停(suspend)/再继续(resume)尚未被实作。"; t[60] = "{0} function takes one and only one argument."; t[61] = "{0} 函式取得一个且仅有一个引数。"; t[62] = "Conversion to type {0} failed: {1}."; t[63] = "转换类型 {0} 失败:{1}。"; t[66] = "Conversion of money failed."; t[67] = "money 转换失败。"; t[70] = "A result was returned when none was expected."; t[71] = "传回预期之外的结果。"; t[80] = "This PooledConnection has already been closed."; t[81] = "这个 PooledConnection 已经被关闭。"; t[84] = "Multiple ResultSets were returned by the query."; t[85] = "查询传回多个 ResultSet。"; t[90] = "Not on the insert row."; t[91] = "不在新增的数据列上。"; t[94] = "An unexpected result was returned by a query."; t[95] = "传回非预期的查询结果。"; t[102] = "Internal Query: {0}"; t[103] = "内部查询:{0}"; t[106] = "The array index is out of range: {0}"; t[107] = "阵列索引超过许可范围:{0}"; t[112] = "Connection attempt timed out."; t[113] = "Connection 尝试逾时。"; t[114] = "Unable to find name datatype in the system catalogs."; t[115] = "在系统 catalog 中找不到名称数据类型(datatype)。"; t[116] = "Something unusual has occurred to cause the driver to fail. Please report this exception."; t[117] = "不明的原因导致驱动程序造成失败,请回报这个例外。"; t[120] = "The array index is out of range: {0}, number of elements: {1}."; t[121] = "阵列索引超过许可范围:{0},元素数量:{1}。"; t[138] = "Invalid flags {0}"; t[139] = "无效的旗标 flags {0}"; t[146] = "Unexpected error writing large object to database."; t[147] = "将大型对象(large object)写入数据库时发生不明错误。"; t[162] = "Query timeout must be a value greater than or equals to 0."; t[163] = "查询逾时等候时间必须大于或等于 0。"; t[170] = "Unknown type {0}."; t[171] = "不明的类型 {0}"; t[174] = "The server''s standard_conforming_strings parameter was reported as {0}. The JDBC driver expected on or off."; t[175] = "这服务器的 standard_conforming_strings 参数已回报为 {0},JDBC 驱动程序已预期开启或是关闭。"; t[176] = "Invalid character data was found. This is most likely caused by stored data containing characters that are invalid for the character set the database was created in. The most common example of this is storing 8bit data in a SQL_ASCII database."; t[177] = "发现不合法的字元,可能的原因是欲储存的数据中包含数据库的字元集不支援的字码,其中最常见例子的就是将 8 位元数据存入使用 SQL_ASCII 编码的数据库中。"; t[178] = "The column index is out of range: {0}, number of columns: {1}."; t[179] = "栏位索引超过许可范围:{0},栏位数:{1}。"; t[180] = "The connection attempt failed."; t[181] = "尝试连线已失败。"; t[182] = "No value specified for parameter {0}."; t[183] = "未设定参数值 {0} 的内容。"; t[190] = "Provided Reader failed."; t[191] = "提供的 Reader 已失败。"; t[194] = "Unsupported value for stringtype parameter: {0}"; t[195] = "字符类型参数值未被支持:{0}"; t[198] = "A CallableStatement was declared, but no call to registerOutParameter(1, <some type>) was made."; t[199] = "已经宣告 CallableStatement 函式,但是尚未呼叫 registerOutParameter (1, <some_type>) 。"; t[204] = "Currently positioned before the start of the ResultSet. You cannot call deleteRow() here."; t[205] = "不能在 ResultSet 的第一笔数据之前呼叫 deleteRow()。"; t[214] = "The maximum field size must be a value greater than or equal to 0."; t[215] = "最大栏位容量必须大于或等于 0。"; t[216] = "Fetch size must be a value greater to or equal to 0."; t[217] = "数据读取笔数(fetch size)必须大于或等于 0。"; t[220] = "Redshift LOBs can only index to: {0}"; t[221] = "Redshift LOBs 仅能索引到:{0}"; t[224] = "The JVM claims not to support the encoding: {0}"; t[225] = "JVM 声明并不支援编码:{0} 。"; t[226] = "Interval {0} not yet implemented"; t[227] = "隔绝 {0} 尚未被实作。"; t[238] = "Fastpath call {0} - No result was returned and we expected an integer."; t[239] = "Fastpath 呼叫 {0} - 没有传回值,且应该传回一个整数。"; t[246] = "ResultSets with concurrency CONCUR_READ_ONLY cannot be updated."; t[247] = "ResultSets 与并发同作(Concurrency) CONCUR_READ_ONLY 不能被更新。"; t[250] = "This statement does not declare an OUT parameter. Use '{' ?= call ... '}' to declare one."; t[251] = "这个 statement 未宣告 OUT 参数,使用 '{' ?= call ... '}' 宣告一个。"; t[256] = "Cannot reference a savepoint after it has been released."; t[257] = "无法参照已经被释放的储存点。"; t[260] = "Unsupported Types value: {0}"; t[261] = "未被支持的类型值:{0}"; t[266] = "Protocol error. Session setup failed."; t[267] = "通讯协定错误,Session 初始化失败。"; t[274] = "Currently positioned after the end of the ResultSet. You cannot call deleteRow() here."; t[275] = "不能在 ResultSet 的最后一笔数据之后呼叫 deleteRow()。"; t[278] = "Internal Position: {0}"; t[279] = "内部位置:{0}"; t[280] = "Zero bytes may not occur in identifiers."; t[281] = "在标识识别符中不存在零位元组。"; t[288] = "{0} function doesn''t take any argument."; t[289] = "{0} 函式无法取得任何的引数。"; t[300] = "This statement has been closed."; t[301] = "这个 statement 已经被关闭。"; t[318] = "Cannot establish a savepoint in auto-commit mode."; t[319] = "在自动确认事物交易模式无法建立储存点(Savepoint)。"; t[320] = "Position: {0}"; t[321] = "位置:{0}"; t[322] = "ResultSet is not updateable. The query that generated this result set must select only one table, and must select all primary keys from that table. See the JDBC 2.1 API Specification, section 5.6 for more details."; t[323] = "不可更新的 ResultSet。用来产生这个 ResultSet 的 SQL 命令只能操作一个数据表,并且必需选择所有主键栏位,详细请参阅 JDBC 2.1 API 规格书 5.6 节。"; t[330] = "This ResultSet is closed."; t[331] = "这个 ResultSet 已经被关闭。"; t[338] = "Parameter of type {0} was registered, but call to get{1} (sqltype={2}) was made."; t[339] = "已注册参数类型 {0},但是又呼叫了get{1}(sqltype={2})。"; t[342] = "Transaction isolation level {0} not supported."; t[343] = "不支援交易隔绝等级 {0} 。"; t[344] = "Statement has been closed."; t[345] = "Sstatement 已经被关闭。"; t[352] = "Server SQLState: {0}"; t[353] = "服务器 SQLState:{0}"; t[354] = "No primary key found for table {0}."; t[355] = "{0} 数据表中未找到主键(Primary key)。"; t[362] = "Cannot convert an instance of {0} to type {1}"; t[363] = "无法转换 {0} 到类型 {1} 的实例"; t[364] = "DataSource has been closed."; t[365] = "DataSource 已经被关闭。"; t[368] = "The column name {0} was not found in this ResultSet."; t[369] = "ResultSet 中找不到栏位名称 {0}。"; t[372] = "ResultSet not positioned properly, perhaps you need to call next."; t[373] = "查询结果指标位置不正确,您也许需要呼叫 ResultSet 的 next() 方法。"; t[378] = "Cannot update the ResultSet because it is either before the start or after the end of the results."; t[379] = "无法更新 ResultSet,可能在第一笔数据之前或最未笔数据之后。"; t[380] = "Method {0} is not yet implemented."; t[381] = "这个 {0} 方法尚未被实作。"; t[382] = "{0} function takes two or three arguments."; t[383] = "{0} 函式取得二个或三个引数。"; t[384] = "The JVM claims not to support the {0} encoding."; t[385] = "JVM 声明并不支援 {0} 编码。"; t[396] = "Unknown Response Type {0}."; t[397] = "不明的回应类型 {0}。"; t[398] = "The parameter index is out of range: {0}, number of parameters: {1}."; t[399] = "参数索引超出许可范围:{0},参数总数:{1}。"; t[400] = "Where: {0}"; t[401] = "在位置:{0}"; t[406] = "Cannot call deleteRow() when on the insert row."; t[407] = "不能在新增的数据上呼叫 deleteRow()。"; t[414] = "{0} function takes four and only four argument."; t[415] = "{0} 函式取得四个且仅有四个引数。"; t[416] = "Unable to translate data into the desired encoding."; t[417] = "无法将数据转成目标编码。"; t[424] = "Can''t use relative move methods while on the insert row."; t[425] = "不能在新增的数据列上使用相对位置 move 方法。"; t[434] = "Invalid stream length {0}."; t[435] = "无效的串流长度 {0}."; t[436] = "The driver currently does not support COPY operations."; t[437] = "驱动程序目前不支援 COPY 操作。"; t[440] = "Maximum number of rows must be a value grater than or equal to 0."; t[441] = "最大数据读取笔数必须大于或等于 0。"; t[446] = "Failed to create object for: {0}."; t[447] = "为 {0} 建立对象失败。"; t[448] = "{0} function takes three and only three arguments."; t[449] = "{0} 函式取得三个且仅有三个引数。"; t[450] = "Conversion of interval failed"; t[451] = "隔绝(Interval)转换失败。"; t[452] = "Cannot tell if path is open or closed: {0}."; t[453] = "无法得知 path 是开启或关闭:{0}。"; t[460] = "Provided InputStream failed."; t[461] = "提供的 InputStream 已失败。"; t[462] = "Invalid fetch direction constant: {0}."; t[463] = "无效的 fetch 方向常数:{0}。"; t[472] = "Invalid protocol state requested. Attempted transaction interleaving is not supported. xid={0}, currentXid={1}, state={2}, flags={3}"; t[473] = "事物交易隔绝(Transaction interleaving)未被实作。xid={0}, currentXid={1}, state={2}, flags={3}"; t[474] = "{0} function takes two and only two arguments."; t[475] = "{0} 函式取得二个且仅有二个引数。"; t[476] = "There are no rows in this ResultSet."; t[477] = "ResultSet 中找不到数据列。"; t[478] = "Zero bytes may not occur in string parameters."; t[479] = "字符参数不能有 0 个位元组。"; t[480] = "Cannot call updateRow() when on the insert row."; t[481] = "不能在新增的数据列上呼叫 deleteRow()。"; t[482] = "Connection has been closed automatically because a new connection was opened for the same PooledConnection or the PooledConnection has been closed."; t[483] = "Connection 已自动结束,因为一个新的 PooledConnection 连线被开启或者或 PooledConnection 已被关闭。"; t[488] = "A CallableStatement function was executed and the out parameter {0} was of type {1} however type {2} was registered."; t[489] = "一个 CallableStatement 执行函式后输出的参数类型为 {1} 值为 {0},但是已注册的类型是 {2}。"; t[494] = "Cannot cast an instance of {0} to type {1}"; t[495] = "不能转换一个 {0} 实例到类型 {1}"; t[498] = "Cannot retrieve the id of a named savepoint."; t[499] = "无法取得已命名储存点的 id。"; t[500] = "Cannot change transaction read-only property in the middle of a transaction."; t[501] = "不能在事物交易过程中改变事物交易唯读属性。"; t[502] = "The server does not support SSL."; t[503] = "服务器不支援 SSL 连线。"; t[510] = "A connection could not be made using the requested protocol {0}."; t[511] = "无法以要求的通讯协定 {0} 建立连线。"; t[512] = "The authentication type {0} is not supported. Check that you have configured the pg_hba.conf file to include the client''s IP address or subnet, and that it is using an authentication scheme supported by the driver."; t[513] = "不支援 {0} 验证类型。请核对您已经组态 pg_hba.conf 文件包含客户端的IP位址或网路区段,以及驱动程序所支援的验证架构模式已被支援。"; t[514] = "Malformed function or procedure escape syntax at offset {0}."; t[515] = "不正确的函式或程序 escape 语法于 {0}。"; t[516] = "The server''s DateStyle parameter was changed to {0}. The JDBC driver requires DateStyle to begin with ISO for correct operation."; t[517] = "这服务器的 DateStyle 参数被更改成 {0},JDBC 驱动程序请求需要 DateStyle 以 ISO 开头以正确工作。"; t[518] = "No results were returned by the query."; t[519] = "查询没有传回任何结果。"; t[520] = "Location: File: {0}, Routine: {1}, Line: {2}"; t[521] = "位置:文件:{0},常式:{1},行:{2}"; t[526] = "Hint: {0}"; t[527] = "建议:{0}"; t[528] = "A CallableStatement was executed with nothing returned."; t[529] = "一个 CallableStatement 执行函式后没有传回值。"; t[530] = "Unknown ResultSet holdability setting: {0}."; t[531] = "未知的 ResultSet 可适用的设置:{0}。"; t[540] = "Cannot change transaction isolation level in the middle of a transaction."; t[541] = "不能在事务交易过程中改变事物交易隔绝等级。"; t[544] = "The fastpath function {0} is unknown."; t[545] = "不明的 fastpath 函式 {0}。"; t[546] = "Can''t use query methods that take a query string on a PreparedStatement."; t[547] = "在 PreparedStatement 上不能使用获取查询字符的查询方法。"; t[556] = "Operation requires a scrollable ResultSet, but this ResultSet is FORWARD_ONLY."; t[557] = "操作要求可卷动的 ResultSet,但此 ResultSet 是 FORWARD_ONLY。"; t[564] = "Unknown Types value."; t[565] = "不明的类型值。"; t[570] = "Large Objects may not be used in auto-commit mode."; t[571] = "大型对象无法被使用在自动确认事物交易模式。"; table = t; } public java.lang.Object handleGetObject (java.lang.String msgid) throws java.util.MissingResourceException { int hash_val = msgid.hashCode() & 0x7fffffff; int idx = (hash_val % 289) << 1; { java.lang.Object found = table[idx]; if (found == null) return null; if (msgid.equals(found)) return table[idx + 1]; } int incr = ((hash_val % 287) + 1) << 1; for (;;) { idx += incr; if (idx >= 578) idx -= 578; java.lang.Object found = table[idx]; if (found == null) return null; if (msgid.equals(found)) return table[idx + 1]; } } public java.util.Enumeration getKeys () { return new java.util.Enumeration() { private int idx = 0; { while (idx < 578 && table[idx] == null) idx += 2; } public boolean hasMoreElements () { return (idx < 578); } public java.lang.Object nextElement () { java.lang.Object key = table[idx]; do idx += 2; while (idx < 578 && table[idx] == null); return key; } }; } public java.util.ResourceBundle getParent () { return parent; } }
8,453
0
Create_ds/amazon-redshift-jdbc-driver/src/main/java/com/amazon/redshift
Create_ds/amazon-redshift-jdbc-driver/src/main/java/com/amazon/redshift/translation/messages_es.java
/* Automatically generated by GNU msgfmt. Do not modify! */ package com.amazon.redshift.translation; public class messages_es extends java.util.ResourceBundle { private static final java.lang.String[] table; static { java.lang.String[] t = new java.lang.String[74]; t[0] = ""; t[1] = "Project-Id-Version: JDBC Redshift Driver\nReport-Msgid-Bugs-To: \nPO-Revision-Date: 2004-10-22 16:51-0300\nLast-Translator: Diego Gil <diego@adminsa.com>\nLanguage-Team: \nLanguage: \nMIME-Version: 1.0\nContent-Type: text/plain; charset=UTF-8\nContent-Transfer-Encoding: 8bit\nX-Poedit-Language: Spanish\n"; t[4] = "The column index is out of range: {0}, number of columns: {1}."; t[5] = "El índice de la columna está fuera de rango: {0}, número de columnas: {1}."; t[12] = "Unknown Response Type {0}."; t[13] = "Tipo de respuesta desconocida {0}."; t[16] = "Protocol error. Session setup failed."; t[17] = "Error de protocolo. Falló el inicio de la sesión."; t[20] = "The server requested password-based authentication, but no password was provided."; t[21] = "El servidor requiere autenticación basada en contraseña, pero no se ha provisto ninguna contraseña."; t[26] = "A result was returned when none was expected."; t[27] = "Se retornó un resultado cuando no se esperaba ninguno."; t[28] = "Server SQLState: {0}"; t[29] = "SQLState del servidor: {0}."; t[30] = "The array index is out of range: {0}, number of elements: {1}."; t[31] = "El índice del arreglo esta fuera de rango: {0}, número de elementos: {1}."; t[32] = "Premature end of input stream, expected {0} bytes, but only read {1}."; t[33] = "Final prematuro del flujo de entrada, se esperaban {0} bytes, pero solo se leyeron {1}."; t[36] = "The connection attempt failed."; t[37] = "El intento de conexión falló."; t[38] = "Failed to create object for: {0}."; t[39] = "Fallo al crear objeto: {0}."; t[42] = "An error occurred while setting up the SSL connection."; t[43] = "Ha ocorrido un error mientras se establecía la conexión SSL."; t[48] = "No value specified for parameter {0}."; t[49] = "No se ha especificado un valor para el parámetro {0}."; t[50] = "The server does not support SSL."; t[51] = "Este servidor no soporta SSL."; t[52] = "An unexpected result was returned by a query."; t[53] = "Una consulta retornó un resultado inesperado."; t[60] = "Something unusual has occurred to cause the driver to fail. Please report this exception."; t[61] = "Algo inusual ha ocurrido que provocó un fallo en el controlador. Por favor reporte esta excepción."; t[64] = "No results were returned by the query."; t[65] = "La consulta no retornó ningún resultado."; table = t; } public java.lang.Object handleGetObject (java.lang.String msgid) throws java.util.MissingResourceException { int hash_val = msgid.hashCode() & 0x7fffffff; int idx = (hash_val % 37) << 1; { java.lang.Object found = table[idx]; if (found == null) return null; if (msgid.equals(found)) return table[idx + 1]; } int incr = ((hash_val % 35) + 1) << 1; for (;;) { idx += incr; if (idx >= 74) idx -= 74; java.lang.Object found = table[idx]; if (found == null) return null; if (msgid.equals(found)) return table[idx + 1]; } } public java.util.Enumeration getKeys () { return new java.util.Enumeration() { private int idx = 0; { while (idx < 74 && table[idx] == null) idx += 2; } public boolean hasMoreElements () { return (idx < 74); } public java.lang.Object nextElement () { java.lang.Object key = table[idx]; do idx += 2; while (idx < 74 && table[idx] == null); return key; } }; } public java.util.ResourceBundle getParent () { return parent; } }
8,454
0
Create_ds/amazon-redshift-jdbc-driver/src/main/java/com/amazon/redshift
Create_ds/amazon-redshift-jdbc-driver/src/main/java/com/amazon/redshift/translation/messages_zh_TW.java
/* Automatically generated by GNU msgfmt. Do not modify! */ package com.amazon.redshift.translation; public class messages_zh_TW extends java.util.ResourceBundle { private static final java.lang.String[] table; static { java.lang.String[] t = new java.lang.String[578]; t[0] = ""; t[1] = "Project-Id-Version: Redshift JDBC Driver 2.0\nReport-Msgid-Bugs-To: \nPO-Revision-Date: 2008-01-21 16:50+0800\nLast-Translator: 郭朝益(ChaoYi, Kuo) <Kuo.ChaoYi@gmail.com>\nLanguage-Team: The PostgreSQL Development Team <Kuo.ChaoYi@gmail.com>\nLanguage: \nMIME-Version: 1.0\nContent-Type: text/plain; charset=UTF-8\nContent-Transfer-Encoding: 8bit\nX-Poedit-Language: Chinese\nX-Poedit-Country: TAIWAN\nX-Poedit-SourceCharset: utf-8\n"; t[6] = "Cannot call cancelRowUpdates() when on the insert row."; t[7] = "不能在新增的資料列上呼叫 cancelRowUpdates()。"; t[8] = "The server requested password-based authentication, but no password was provided."; t[9] = "伺服器要求使用密碼驗證,但是密碼並未提供。"; t[12] = "Detail: {0}"; t[13] = "詳細:{0}"; t[16] = "Can''t refresh the insert row."; t[17] = "無法重讀新增的資料列。"; t[18] = "Connection has been closed."; t[19] = "Connection 已經被關閉。"; t[24] = "Bad value for type {0} : {1}"; t[25] = "不良的型別值 {0} : {1}"; t[36] = "Truncation of large objects is only implemented in 8.3 and later servers."; t[37] = "大型物件的截斷(Truncation)僅被實作執行在 8.3 和後來的伺服器。"; t[40] = "Cannot retrieve the name of an unnamed savepoint."; t[41] = "無法取得未命名儲存點(Savepoint)的名稱。"; t[46] = "An error occurred while setting up the SSL connection."; t[47] = "進行 SSL 連線時發生錯誤。"; t[50] = "suspend/resume not implemented"; t[51] = "暫停(suspend)/再繼續(resume)尚未被實作。"; t[60] = "{0} function takes one and only one argument."; t[61] = "{0} 函式取得一個且僅有一個引數。"; t[62] = "Conversion to type {0} failed: {1}."; t[63] = "轉換型別 {0} 失敗:{1}。"; t[66] = "Conversion of money failed."; t[67] = "money 轉換失敗。"; t[70] = "A result was returned when none was expected."; t[71] = "傳回預期之外的結果。"; t[80] = "This PooledConnection has already been closed."; t[81] = "這個 PooledConnection 已經被關閉。"; t[84] = "Multiple ResultSets were returned by the query."; t[85] = "查詢傳回多個 ResultSet。"; t[90] = "Not on the insert row."; t[91] = "不在新增的資料列上。"; t[94] = "An unexpected result was returned by a query."; t[95] = "傳回非預期的查詢結果。"; t[102] = "Internal Query: {0}"; t[103] = "內部查詢:{0}"; t[106] = "The array index is out of range: {0}"; t[107] = "陣列索引超過許可範圍:{0}"; t[112] = "Connection attempt timed out."; t[113] = "Connection 嘗試逾時。"; t[114] = "Unable to find name datatype in the system catalogs."; t[115] = "在系統 catalog 中找不到名稱資料類型(datatype)。"; t[116] = "Something unusual has occurred to cause the driver to fail. Please report this exception."; t[117] = "不明的原因導致驅動程式造成失敗,請回報這個例外。"; t[120] = "The array index is out of range: {0}, number of elements: {1}."; t[121] = "陣列索引超過許可範圍:{0},元素數量:{1}。"; t[138] = "Invalid flags {0}"; t[139] = "無效的旗標 {0}"; t[146] = "Unexpected error writing large object to database."; t[147] = "將大型物件(large object)寫入資料庫時發生不明錯誤。"; t[162] = "Query timeout must be a value greater than or equals to 0."; t[163] = "查詢逾時等候時間必須大於或等於 0。"; t[170] = "Unknown type {0}."; t[171] = "不明的型別 {0}"; t[174] = "The server''s standard_conforming_strings parameter was reported as {0}. The JDBC driver expected on or off."; t[175] = "這伺服器的 standard_conforming_strings 參數已回報為 {0},JDBC 驅動程式已預期開啟或是關閉。"; t[176] = "Invalid character data was found. This is most likely caused by stored data containing characters that are invalid for the character set the database was created in. The most common example of this is storing 8bit data in a SQL_ASCII database."; t[177] = "發現不合法的字元,可能的原因是欲儲存的資料中包含資料庫的字元集不支援的字碼,其中最常見例子的就是將 8 位元資料存入使用 SQL_ASCII 編碼的資料庫中。"; t[178] = "The column index is out of range: {0}, number of columns: {1}."; t[179] = "欄位索引超過許可範圍:{0},欄位數:{1}。"; t[180] = "The connection attempt failed."; t[181] = "嘗試連線已失敗。"; t[182] = "No value specified for parameter {0}."; t[183] = "未設定參數值 {0} 的內容。"; t[190] = "Provided Reader failed."; t[191] = "提供的 Reader 已失敗。"; t[194] = "Unsupported value for stringtype parameter: {0}"; t[195] = "字串型別參數值未被支持:{0}"; t[198] = "A CallableStatement was declared, but no call to registerOutParameter(1, <some type>) was made."; t[199] = "已經宣告 CallableStatement 函式,但是尚未呼叫 registerOutParameter (1, <some_type>) 。"; t[204] = "Currently positioned before the start of the ResultSet. You cannot call deleteRow() here."; t[205] = "不能在 ResultSet 的第一筆資料之前呼叫 deleteRow()。"; t[214] = "The maximum field size must be a value greater than or equal to 0."; t[215] = "最大欄位容量必須大於或等於 0。"; t[216] = "Fetch size must be a value greater to or equal to 0."; t[217] = "資料讀取筆數(fetch size)必須大於或等於 0。"; t[220] = "Redshift LOBs can only index to: {0}"; t[221] = "Redshift LOBs 僅能索引到:{0}"; t[224] = "The JVM claims not to support the encoding: {0}"; t[225] = "JVM 聲明並不支援編碼:{0} 。"; t[226] = "Interval {0} not yet implemented"; t[227] = "隔絕 {0} 尚未被實作。"; t[238] = "Fastpath call {0} - No result was returned and we expected an integer."; t[239] = "Fastpath 呼叫 {0} - 沒有傳回值,且應該傳回一個整數。"; t[246] = "ResultSets with concurrency CONCUR_READ_ONLY cannot be updated."; t[247] = "ResultSets 與並發同作(Concurrency) CONCUR_READ_ONLY 不能被更新。"; t[250] = "This statement does not declare an OUT parameter. Use '{' ?= call ... '}' to declare one."; t[251] = "這個 statement 未宣告 OUT 參數,使用 '{' ?= call ... '}' 宣告一個。"; t[256] = "Cannot reference a savepoint after it has been released."; t[257] = "無法參照已經被釋放的儲存點。"; t[260] = "Unsupported Types value: {0}"; t[261] = "未被支持的型別值:{0}"; t[266] = "Protocol error. Session setup failed."; t[267] = "通訊協定錯誤,Session 初始化失敗。"; t[274] = "Currently positioned after the end of the ResultSet. You cannot call deleteRow() here."; t[275] = "不能在 ResultSet 的最後一筆資料之後呼叫 deleteRow()。"; t[278] = "Internal Position: {0}"; t[279] = "內部位置:{0}"; t[280] = "Zero bytes may not occur in identifiers."; t[281] = "在標識識別符中不存在零位元組。"; t[288] = "{0} function doesn''t take any argument."; t[289] = "{0} 函式無法取得任何的引數。"; t[300] = "This statement has been closed."; t[301] = "這個 statement 已經被關閉。"; t[318] = "Cannot establish a savepoint in auto-commit mode."; t[319] = "在自動確認事物交易模式無法建立儲存點(Savepoint)。"; t[320] = "Position: {0}"; t[321] = "位置:{0}"; t[322] = "ResultSet is not updateable. The query that generated this result set must select only one table, and must select all primary keys from that table. See the JDBC 2.1 API Specification, section 5.6 for more details."; t[323] = "不可更新的 ResultSet。用來產生這個 ResultSet 的 SQL 命令只能操作一個資料表,並且必需選擇所有主鍵欄位,詳細請參閱 JDBC 2.1 API 規格書 5.6 節。"; t[330] = "This ResultSet is closed."; t[331] = "這個 ResultSet 已經被關閉。"; t[338] = "Parameter of type {0} was registered, but call to get{1} (sqltype={2}) was made."; t[339] = "已註冊參數型別 {0},但是又呼叫了get{1}(sqltype={2})。"; t[342] = "Transaction isolation level {0} not supported."; t[343] = "不支援交易隔絕等級 {0} 。"; t[344] = "Statement has been closed."; t[345] = "Sstatement 已經被關閉。"; t[352] = "Server SQLState: {0}"; t[353] = "伺服器 SQLState:{0}"; t[354] = "No primary key found for table {0}."; t[355] = "{0} 資料表中未找到主鍵(Primary key)。"; t[362] = "Cannot convert an instance of {0} to type {1}"; t[363] = "無法轉換 {0} 到類型 {1} 的實例"; t[364] = "DataSource has been closed."; t[365] = "DataSource 已經被關閉。"; t[368] = "The column name {0} was not found in this ResultSet."; t[369] = "ResultSet 中找不到欄位名稱 {0}。"; t[372] = "ResultSet not positioned properly, perhaps you need to call next."; t[373] = "查詢結果指標位置不正確,您也許需要呼叫 ResultSet 的 next() 方法。"; t[378] = "Cannot update the ResultSet because it is either before the start or after the end of the results."; t[379] = "無法更新 ResultSet,可能在第一筆資料之前或最未筆資料之後。"; t[380] = "Method {0} is not yet implemented."; t[381] = "這個 {0} 方法尚未被實作。"; t[382] = "{0} function takes two or three arguments."; t[383] = "{0} 函式取得二個或三個引數。"; t[384] = "The JVM claims not to support the {0} encoding."; t[385] = "JVM 聲明並不支援 {0} 編碼。"; t[396] = "Unknown Response Type {0}."; t[397] = "不明的回應類型 {0}。"; t[398] = "The parameter index is out of range: {0}, number of parameters: {1}."; t[399] = "參數索引超出許可範圍:{0},參數總數:{1}。"; t[400] = "Where: {0}"; t[401] = "在位置:{0}"; t[406] = "Cannot call deleteRow() when on the insert row."; t[407] = "不能在新增的資料上呼叫 deleteRow()。"; t[414] = "{0} function takes four and only four argument."; t[415] = "{0} 函式取得四個且僅有四個引數。"; t[416] = "Unable to translate data into the desired encoding."; t[417] = "無法將資料轉成目標編碼。"; t[424] = "Can''t use relative move methods while on the insert row."; t[425] = "不能在新增的資料列上使用相對位置 move 方法。"; t[434] = "Invalid stream length {0}."; t[435] = "無效的串流長度 {0}."; t[436] = "The driver currently does not support COPY operations."; t[437] = "驅動程式目前不支援 COPY 操作。"; t[440] = "Maximum number of rows must be a value grater than or equal to 0."; t[441] = "最大資料讀取筆數必須大於或等於 0。"; t[446] = "Failed to create object for: {0}."; t[447] = "為 {0} 建立物件失敗。"; t[448] = "{0} function takes three and only three arguments."; t[449] = "{0} 函式取得三個且僅有三個引數。"; t[450] = "Conversion of interval failed"; t[451] = "隔絕(Interval)轉換失敗。"; t[452] = "Cannot tell if path is open or closed: {0}."; t[453] = "無法得知 path 是開啟或關閉:{0}。"; t[460] = "Provided InputStream failed."; t[461] = "提供的 InputStream 已失敗。"; t[462] = "Invalid fetch direction constant: {0}."; t[463] = "無效的 fetch 方向常數:{0}。"; t[472] = "Invalid protocol state requested. Attempted transaction interleaving is not supported. xid={0}, currentXid={1}, state={2}, flags={3}"; t[473] = "事物交易隔絕(Transaction interleaving)未被實作。xid={0}, currentXid={1}, state={2}, flags={3}"; t[474] = "{0} function takes two and only two arguments."; t[475] = "{0} 函式取得二個且僅有二個引數。"; t[476] = "There are no rows in this ResultSet."; t[477] = "ResultSet 中找不到資料列。"; t[478] = "Zero bytes may not occur in string parameters."; t[479] = "字串參數不能有 0 個位元組。"; t[480] = "Cannot call updateRow() when on the insert row."; t[481] = "不能在新增的資料列上呼叫 deleteRow()。"; t[482] = "Connection has been closed automatically because a new connection was opened for the same PooledConnection or the PooledConnection has been closed."; t[483] = "Connection 已自動結束,因為一個新的 PooledConnection 連線被開啟或者或 PooledConnection 已被關閉。"; t[488] = "A CallableStatement function was executed and the out parameter {0} was of type {1} however type {2} was registered."; t[489] = "一個 CallableStatement 執行函式後輸出的參數型別為 {1} 值為 {0},但是已註冊的型別是 {2}。"; t[494] = "Cannot cast an instance of {0} to type {1}"; t[495] = "不能轉換一個 {0} 實例到型別 {1}"; t[498] = "Cannot retrieve the id of a named savepoint."; t[499] = "無法取得已命名儲存點的 id。"; t[500] = "Cannot change transaction read-only property in the middle of a transaction."; t[501] = "不能在事物交易過程中改變事物交易唯讀屬性。"; t[502] = "The server does not support SSL."; t[503] = "伺服器不支援 SSL 連線。"; t[510] = "A connection could not be made using the requested protocol {0}."; t[511] = "無法以要求的通訊協定 {0} 建立連線。"; t[512] = "The authentication type {0} is not supported. Check that you have configured the pg_hba.conf file to include the client''s IP address or subnet, and that it is using an authentication scheme supported by the driver."; t[513] = "不支援 {0} 驗證型別。請核對您已經組態 pg_hba.conf 檔案包含客戶端的IP位址或網路區段,以及驅動程式所支援的驗證架構模式已被支援。"; t[514] = "Malformed function or procedure escape syntax at offset {0}."; t[515] = "不正確的函式或程序 escape 語法於 {0}。"; t[516] = "The server''s DateStyle parameter was changed to {0}. The JDBC driver requires DateStyle to begin with ISO for correct operation."; t[517] = "這伺服器的 DateStyle 參數被更改成 {0},JDBC 驅動程式請求需要 DateStyle 以 ISO 開頭以正確工作。"; t[518] = "No results were returned by the query."; t[519] = "查詢沒有傳回任何結果。"; t[520] = "Location: File: {0}, Routine: {1}, Line: {2}"; t[521] = "位置:檔案:{0},常式:{1},行:{2}"; t[526] = "Hint: {0}"; t[527] = "建議:{0}"; t[528] = "A CallableStatement was executed with nothing returned."; t[529] = "一個 CallableStatement 執行函式後沒有傳回值。"; t[530] = "Unknown ResultSet holdability setting: {0}."; t[531] = "未知的 ResultSet 可適用的設置:{0}。"; t[540] = "Cannot change transaction isolation level in the middle of a transaction."; t[541] = "不能在事務交易過程中改變事物交易隔絕等級。"; t[544] = "The fastpath function {0} is unknown."; t[545] = "不明的 fastpath 函式 {0}。"; t[546] = "Can''t use query methods that take a query string on a PreparedStatement."; t[547] = "在 PreparedStatement 上不能使用獲取查詢字串的查詢方法。"; t[556] = "Operation requires a scrollable ResultSet, but this ResultSet is FORWARD_ONLY."; t[557] = "操作要求可捲動的 ResultSet,但此 ResultSet 是 FORWARD_ONLY。"; t[564] = "Unknown Types value."; t[565] = "不明的型別值。"; t[570] = "Large Objects may not be used in auto-commit mode."; t[571] = "大型物件無法被使用在自動確認事物交易模式。"; table = t; } public java.lang.Object handleGetObject (java.lang.String msgid) throws java.util.MissingResourceException { int hash_val = msgid.hashCode() & 0x7fffffff; int idx = (hash_val % 289) << 1; { java.lang.Object found = table[idx]; if (found == null) return null; if (msgid.equals(found)) return table[idx + 1]; } int incr = ((hash_val % 287) + 1) << 1; for (;;) { idx += incr; if (idx >= 578) idx -= 578; java.lang.Object found = table[idx]; if (found == null) return null; if (msgid.equals(found)) return table[idx + 1]; } } public java.util.Enumeration getKeys () { return new java.util.Enumeration() { private int idx = 0; { while (idx < 578 && table[idx] == null) idx += 2; } public boolean hasMoreElements () { return (idx < 578); } public java.lang.Object nextElement () { java.lang.Object key = table[idx]; do idx += 2; while (idx < 578 && table[idx] == null); return key; } }; } public java.util.ResourceBundle getParent () { return parent; } }
8,455
0
Create_ds/amazon-redshift-jdbc-driver/src/main/java/com/amazon/redshift
Create_ds/amazon-redshift-jdbc-driver/src/main/java/com/amazon/redshift/translation/messages_pt_BR.java
/* Automatically generated by GNU msgfmt. Do not modify! */ package com.amazon.redshift.translation; public class messages_pt_BR extends java.util.ResourceBundle { private static final java.lang.String[] table; static { java.lang.String[] t = new java.lang.String[794]; t[0] = ""; t[1] = "Project-Id-Version: Redshift 1.0\nReport-Msgid-Bugs-To: \nPO-Revision-Date: 2004-10-31 20:48-0300\nLast-Translator: Euler Taveira de Oliveira <euler@timbira.com>\nLanguage-Team: Brazilian Portuguese <pgbr-dev@listas.postgresql.org.br>\nLanguage: pt_BR\nMIME-Version: 1.0\nContent-Type: text/plain; charset=UTF-8\nContent-Transfer-Encoding: 8bit\n"; t[2] = "Not implemented: 2nd phase commit must be issued using an idle connection. commit xid={0}, currentXid={1}, state={2}, transactionState={3}"; t[3] = "Não está implementado: efetivação da segunda fase deve ser executada utilizado uma conexão ociosa. commit xid={0}, currentXid={1}, state={2}, transactionState={3}"; t[4] = "DataSource has been closed."; t[5] = "DataSource foi fechado."; t[8] = "Invalid flags {0}"; t[9] = "Marcadores={0} inválidos"; t[18] = "Where: {0}"; t[19] = "Onde: {0}"; t[24] = "Unknown XML Source class: {0}"; t[25] = "Classe XML Source desconhecida: {0}"; t[26] = "The connection attempt failed."; t[27] = "A tentativa de conexão falhou."; t[28] = "Currently positioned after the end of the ResultSet. You cannot call deleteRow() here."; t[29] = "Posicionado depois do fim do ResultSet. Você não pode chamar deleteRow() aqui."; t[32] = "Can''t use query methods that take a query string on a PreparedStatement."; t[33] = "Não pode utilizar métodos de consulta que pegam uma consulta de um comando preparado."; t[36] = "Multiple ResultSets were returned by the query."; t[37] = "ResultSets múltiplos foram retornados pela consulta."; t[50] = "Too many update results were returned."; t[51] = "Muitos resultados de atualização foram retornados."; t[58] = "Illegal UTF-8 sequence: initial byte is {0}: {1}"; t[59] = "Sequência UTF-8 ilegal: byte inicial é {0}: {1}"; t[66] = "The column name {0} was not found in this ResultSet."; t[67] = "A nome da coluna {0} não foi encontrado neste ResultSet."; t[70] = "Fastpath call {0} - No result was returned and we expected an integer."; t[71] = "Chamada ao Fastpath {0} - Nenhum resultado foi retornado e nós esperávamos um inteiro."; t[74] = "Protocol error. Session setup failed."; t[75] = "Erro de Protocolo. Configuração da sessão falhou."; t[76] = "A CallableStatement was declared, but no call to registerOutParameter(1, <some type>) was made."; t[77] = "Uma função foi declarada mas nenhuma chamada a registerOutParameter (1, <algum_tipo>) foi feita."; t[78] = "ResultSets with concurrency CONCUR_READ_ONLY cannot be updated."; t[79] = "ResultSets com CONCUR_READ_ONLY concorrentes não podem ser atualizados."; t[90] = "LOB positioning offsets start at 1."; t[91] = "Deslocamentos da posição de LOB começam em 1."; t[92] = "Internal Position: {0}"; t[93] = "Posição Interna: {0}"; t[96] = "free() was called on this LOB previously"; t[97] = "free() já foi chamado neste LOB"; t[100] = "Cannot change transaction read-only property in the middle of a transaction."; t[101] = "Não pode mudar propriedade somente-leitura da transação no meio de uma transação."; t[102] = "The JVM claims not to support the {0} encoding."; t[103] = "A JVM reclamou que não suporta a codificação {0}."; t[108] = "{0} function doesn''t take any argument."; t[109] = "função {0} não recebe nenhum argumento."; t[112] = "xid must not be null"; t[113] = "xid não deve ser nulo"; t[114] = "Connection has been closed."; t[115] = "Conexão foi fechada."; t[122] = "The server does not support SSL."; t[123] = "O servidor não suporta SSL."; t[124] = "Custom type maps are not supported."; t[125] = "Mapeamento de tipos personalizados não são suportados."; t[140] = "Illegal UTF-8 sequence: byte {0} of {1} byte sequence is not 10xxxxxx: {2}"; t[141] = "Sequência UTF-8 ilegal: byte {0} da sequência de bytes {1} não é 10xxxxxx: {2}"; t[148] = "Hint: {0}"; t[149] = "Dica: {0}"; t[152] = "Unable to find name datatype in the system catalogs."; t[153] = "Não foi possível encontrar tipo de dado name nos catálogos do sistema."; t[156] = "Unsupported Types value: {0}"; t[157] = "Valor de Types não é suportado: {0}"; t[158] = "Unknown type {0}."; t[159] = "Tipo desconhecido {0}."; t[166] = "{0} function takes two and only two arguments."; t[167] = "função {0} recebe somente dois argumentos."; t[170] = "Finalizing a Connection that was never closed:"; t[171] = "Fechando uma Conexão que não foi fechada:"; t[180] = "The maximum field size must be a value greater than or equal to 0."; t[181] = "O tamanho máximo de um campo deve ser um valor maior ou igual a 0."; t[186] = "Redshift LOBs can only index to: {0}"; t[187] = "LOBs do Redshift só podem indexar até: {0}"; t[194] = "Method {0} is not yet implemented."; t[195] = "Método {0} ainda não foi implementado."; t[198] = "Error loading default settings from driverconfig.properties"; t[199] = "Erro ao carregar configurações padrão do driverconfig.properties"; t[200] = "Results cannot be retrieved from a CallableStatement before it is executed."; t[201] = "Resultados não podem ser recuperados de uma função antes dela ser executada."; t[202] = "Large Objects may not be used in auto-commit mode."; t[203] = "Objetos Grandes não podem ser usados no modo de efetivação automática (auto-commit)."; t[208] = "Expected command status BEGIN, got {0}."; t[209] = "Status do comando BEGIN esperado, recebeu {0}."; t[218] = "Invalid fetch direction constant: {0}."; t[219] = "Constante de direção da busca é inválida: {0}."; t[222] = "{0} function takes three and only three arguments."; t[223] = "função {0} recebe três e somente três argumentos."; t[226] = "This SQLXML object has already been freed."; t[227] = "Este objeto SQLXML já foi liberado."; t[228] = "Cannot update the ResultSet because it is either before the start or after the end of the results."; t[229] = "Não pode atualizar o ResultSet porque ele está antes do início ou depois do fim dos resultados."; t[230] = "The JVM claims not to support the encoding: {0}"; t[231] = "A JVM reclamou que não suporta a codificação: {0}"; t[232] = "Parameter of type {0} was registered, but call to get{1} (sqltype={2}) was made."; t[233] = "Parâmetro do tipo {0} foi registrado, mas uma chamada a get{1} (tiposql={2}) foi feita."; t[240] = "Cannot establish a savepoint in auto-commit mode."; t[241] = "Não pode estabelecer um savepoint no modo de efetivação automática (auto-commit)."; t[242] = "Cannot retrieve the id of a named savepoint."; t[243] = "Não pode recuperar o id de um savepoint com nome."; t[244] = "The column index is out of range: {0}, number of columns: {1}."; t[245] = "O índice da coluna está fora do intervalo: {0}, número de colunas: {1}."; t[250] = "Something unusual has occurred to cause the driver to fail. Please report this exception."; t[251] = "Alguma coisa não usual ocorreu para causar a falha do driver. Por favor reporte esta exceção."; t[260] = "Cannot cast an instance of {0} to type {1}"; t[261] = "Não pode converter uma instância de {0} para tipo {1}"; t[264] = "Unknown Types value."; t[265] = "Valor de Types desconhecido."; t[266] = "Invalid stream length {0}."; t[267] = "Tamanho de dado {0} é inválido."; t[272] = "Cannot retrieve the name of an unnamed savepoint."; t[273] = "Não pode recuperar o nome de um savepoint sem nome."; t[274] = "Unable to translate data into the desired encoding."; t[275] = "Não foi possível traduzir dado para codificação desejada."; t[276] = "Expected an EOF from server, got: {0}"; t[277] = "Esperado um EOF do servidor, recebido: {0}"; t[278] = "Bad value for type {0} : {1}"; t[279] = "Valor inválido para tipo {0} : {1}"; t[280] = "The server requested password-based authentication, but no password was provided."; t[281] = "O servidor pediu autenticação baseada em senha, mas nenhuma senha foi fornecida."; t[286] = "Unable to create SAXResult for SQLXML."; t[287] = "Não foi possível criar SAXResult para SQLXML."; t[292] = "Error during recover"; t[293] = "Erro durante recuperação"; t[294] = "tried to call end without corresponding start call. state={0}, start xid={1}, currentXid={2}, preparedXid={3}"; t[295] = "tentou executar end sem a chamada ao start correspondente. state={0}, start xid={1}, currentXid={2}, preparedXid={3}"; t[296] = "Truncation of large objects is only implemented in 8.3 and later servers."; t[297] = "Truncar objetos grandes só é implementado por servidores 8.3 ou superiores."; t[298] = "This PooledConnection has already been closed."; t[299] = "Este PooledConnection já foi fechado."; t[302] = "ClientInfo property not supported."; t[303] = "propriedade ClientInfo não é suportada."; t[306] = "Fetch size must be a value greater to or equal to 0."; t[307] = "Tamanho da busca deve ser um valor maior ou igual a 0."; t[312] = "A connection could not be made using the requested protocol {0}."; t[313] = "A conexão não pode ser feita usando protocolo informado {0}."; t[318] = "Unknown XML Result class: {0}"; t[319] = "Classe XML Result desconhecida: {0}"; t[322] = "There are no rows in this ResultSet."; t[323] = "Não há nenhum registro neste ResultSet."; t[324] = "Unexpected command status: {0}."; t[325] = "Status do comando inesperado: {0}."; t[330] = "Heuristic commit/rollback not supported. forget xid={0}"; t[331] = "Efetivação/Cancelamento heurístico não é suportado. forget xid={0}"; t[334] = "Not on the insert row."; t[335] = "Não está inserindo um registro."; t[336] = "This SQLXML object has already been initialized, so you cannot manipulate it further."; t[337] = "Este objeto SQLXML já foi inicializado, então você não pode manipulá-lo depois."; t[344] = "Server SQLState: {0}"; t[345] = "SQLState: {0}"; t[348] = "The server''s standard_conforming_strings parameter was reported as {0}. The JDBC driver expected on or off."; t[349] = "O parâmetro do servidor standard_conforming_strings foi definido como {0}. O driver JDBC espera que seja on ou off."; t[360] = "The driver currently does not support COPY operations."; t[361] = "O driver atualmente não suporta operações COPY."; t[364] = "The array index is out of range: {0}, number of elements: {1}."; t[365] = "O índice da matriz está fora do intervalo: {0}, número de elementos: {1}."; t[374] = "suspend/resume not implemented"; t[375] = "suspender/recomeçar não está implementado"; t[378] = "Not implemented: one-phase commit must be issued using the same connection that was used to start it"; t[379] = "Não está implementado: efetivada da primeira fase deve ser executada utilizando a mesma conexão que foi utilizada para iniciá-la"; t[380] = "Error during one-phase commit. commit xid={0}"; t[381] = "Erro durante efetivação de uma fase. commit xid={0}"; t[398] = "Cannot call cancelRowUpdates() when on the insert row."; t[399] = "Não pode chamar cancelRowUpdates() quando estiver inserindo registro."; t[400] = "Cannot reference a savepoint after it has been released."; t[401] = "Não pode referenciar um savepoint após ele ser descartado."; t[402] = "You must specify at least one column value to insert a row."; t[403] = "Você deve especificar pelo menos uma coluna para inserir um registro."; t[404] = "Unable to determine a value for MaxIndexKeys due to missing system catalog data."; t[405] = "Não foi possível determinar um valor para MaxIndexKeys por causa de falta de dados no catálogo do sistema."; t[410] = "commit called before end. commit xid={0}, state={1}"; t[411] = "commit executado antes do end. commit xid={0}, state={1}"; t[412] = "Illegal UTF-8 sequence: final value is out of range: {0}"; t[413] = "Sequência UTF-8 ilegal: valor final está fora do intervalo: {0}"; t[414] = "{0} function takes two or three arguments."; t[415] = "função {0} recebe dois ou três argumentos."; t[428] = "Unable to convert DOMResult SQLXML data to a string."; t[429] = "Não foi possível converter dado SQLXML do DOMResult para uma cadeia de caracteres."; t[434] = "Unable to decode xml data."; t[435] = "Não foi possível decodificar dado xml."; t[440] = "Unexpected error writing large object to database."; t[441] = "Erro inesperado ao escrever objeto grande no banco de dados."; t[442] = "Zero bytes may not occur in string parameters."; t[443] = "Zero bytes não podem ocorrer em parâmetros de cadeia de caracteres."; t[444] = "A result was returned when none was expected."; t[445] = "Um resultado foi retornado quando nenhum era esperado."; t[450] = "ResultSet is not updateable. The query that generated this result set must select only one table, and must select all primary keys from that table. See the JDBC 2.1 API Specification, section 5.6 for more details."; t[451] = "ResultSet não é atualizável. A consulta que gerou esse conjunto de resultados deve selecionar somente uma tabela, e deve selecionar todas as chaves primárias daquela tabela. Veja a especificação na API do JDBC 2.1, seção 5.6 para obter mais detalhes."; t[454] = "Bind message length {0} too long. This can be caused by very large or incorrect length specifications on InputStream parameters."; t[455] = "Tamanho de mensagem de ligação {0} é muito longo. Isso pode ser causado por especificações de tamanho incorretas ou muito grandes nos parâmetros do InputStream."; t[460] = "Statement has been closed."; t[461] = "Comando foi fechado."; t[462] = "No value specified for parameter {0}."; t[463] = "Nenhum valor especificado para parâmetro {0}."; t[468] = "The array index is out of range: {0}"; t[469] = "O índice da matriz está fora do intervalo: {0}"; t[474] = "Unable to bind parameter values for statement."; t[475] = "Não foi possível ligar valores de parâmetro ao comando."; t[476] = "Can''t refresh the insert row."; t[477] = "Não pode renovar um registro inserido."; t[480] = "No primary key found for table {0}."; t[481] = "Nenhuma chave primária foi encontrada para tabela {0}."; t[482] = "Cannot change transaction isolation level in the middle of a transaction."; t[483] = "Não pode mudar nível de isolamento da transação no meio de uma transação."; t[498] = "Provided InputStream failed."; t[499] = "InputStream fornecido falhou."; t[500] = "The parameter index is out of range: {0}, number of parameters: {1}."; t[501] = "O índice de parâmetro está fora do intervalo: {0}, número de parâmetros: {1}."; t[502] = "The server''s DateStyle parameter was changed to {0}. The JDBC driver requires DateStyle to begin with ISO for correct operation."; t[503] = "O parâmetro do servidor DateStyle foi alterado para {0}. O driver JDBC requer que o DateStyle começe com ISO para operação normal."; t[508] = "Connection attempt timed out."; t[509] = "Tentativa de conexão falhou."; t[512] = "Internal Query: {0}"; t[513] = "Consulta Interna: {0}"; t[514] = "Error preparing transaction. prepare xid={0}"; t[515] = "Erro ao preparar transação. prepare xid={0}"; t[518] = "The authentication type {0} is not supported. Check that you have configured the pg_hba.conf file to include the client''s IP address or subnet, and that it is using an authentication scheme supported by the driver."; t[519] = "O tipo de autenticação {0} não é suportado. Verifique se você configurou o arquivo pg_hba.conf incluindo a subrede ou endereço IP do cliente, e se está utilizando o esquema de autenticação suportado pelo driver."; t[526] = "Interval {0} not yet implemented"; t[527] = "Intervalo {0} ainda não foi implementado"; t[532] = "Conversion of interval failed"; t[533] = "Conversão de interval falhou"; t[540] = "Query timeout must be a value greater than or equals to 0."; t[541] = "Tempo de espera da consulta deve ser um valor maior ou igual a 0."; t[542] = "Connection has been closed automatically because a new connection was opened for the same PooledConnection or the PooledConnection has been closed."; t[543] = "Conexão foi fechada automaticamente porque uma nova conexão foi aberta pelo mesmo PooledConnection ou o PooledConnection foi fechado."; t[544] = "ResultSet not positioned properly, perhaps you need to call next."; t[545] = "ResultSet não está posicionado corretamente, talvez você precise chamar next."; t[546] = "Prepare called before end. prepare xid={0}, state={1}"; t[547] = "Prepare executado antes do end. prepare xid={0}, state={1}"; t[548] = "Invalid UUID data."; t[549] = "dado UUID é inválido."; t[550] = "This statement has been closed."; t[551] = "Este comando foi fechado."; t[552] = "Can''t infer the SQL type to use for an instance of {0}. Use setObject() with an explicit Types value to specify the type to use."; t[553] = "Não pode inferir um tipo SQL a ser usado para uma instância de {0}. Use setObject() com um valor de Types explícito para especificar o tipo a ser usado."; t[554] = "Cannot call updateRow() when on the insert row."; t[555] = "Não pode chamar updateRow() quando estiver inserindo registro."; t[562] = "Detail: {0}"; t[563] = "Detalhe: {0}"; t[566] = "Cannot call deleteRow() when on the insert row."; t[567] = "Não pode chamar deleteRow() quando estiver inserindo registro."; t[568] = "Currently positioned before the start of the ResultSet. You cannot call deleteRow() here."; t[569] = "Posicionado antes do início do ResultSet. Você não pode chamar deleteRow() aqui."; t[576] = "Illegal UTF-8 sequence: final value is a surrogate value: {0}"; t[577] = "Sequência UTF-8 ilegal: valor final é um valor suplementar: {0}"; t[578] = "Unknown Response Type {0}."; t[579] = "Tipo de Resposta Desconhecido {0}."; t[582] = "Unsupported value for stringtype parameter: {0}"; t[583] = "Valor do parâmetro stringtype não é suportado: {0}"; t[584] = "Conversion to type {0} failed: {1}."; t[585] = "Conversão para tipo {0} falhou: {1}."; t[586] = "This SQLXML object has not been initialized, so you cannot retrieve data from it."; t[587] = "Este objeto SQLXML não foi inicializado, então você não pode recuperar dados dele."; t[600] = "Unable to load the class {0} responsible for the datatype {1}"; t[601] = "Não foi possível carregar a classe {0} responsável pelo tipo de dado {1}"; t[604] = "The fastpath function {0} is unknown."; t[605] = "A função do fastpath {0} é desconhecida."; t[608] = "Malformed function or procedure escape syntax at offset {0}."; t[609] = "Sintaxe de escape mal formada da função ou do procedimento no deslocamento {0}."; t[612] = "Provided Reader failed."; t[613] = "Reader fornecido falhou."; t[614] = "Maximum number of rows must be a value grater than or equal to 0."; t[615] = "Número máximo de registros deve ser um valor maior ou igual a 0."; t[616] = "Failed to create object for: {0}."; t[617] = "Falhou ao criar objeto para: {0}."; t[620] = "Conversion of money failed."; t[621] = "Conversão de money falhou."; t[622] = "Premature end of input stream, expected {0} bytes, but only read {1}."; t[623] = "Fim de entrada prematuro, eram esperados {0} bytes, mas somente {1} foram lidos."; t[626] = "An unexpected result was returned by a query."; t[627] = "Um resultado inesperado foi retornado pela consulta."; t[644] = "Invalid protocol state requested. Attempted transaction interleaving is not supported. xid={0}, currentXid={1}, state={2}, flags={3}"; t[645] = "Intercalação de transação não está implementado. xid={0}, currentXid={1}, state={2}, flags={3}"; t[646] = "An error occurred while setting up the SSL connection."; t[647] = "Um erro ocorreu ao estabelecer uma conexão SSL."; t[654] = "Illegal UTF-8 sequence: {0} bytes used to encode a {1} byte value: {2}"; t[655] = "Sequência UTF-8 ilegal: {0} bytes utilizados para codificar um valor de {1} bytes: {2}"; t[656] = "Not implemented: Prepare must be issued using the same connection that started the transaction. currentXid={0}, prepare xid={1}"; t[657] = "Não está implementado: Prepare deve ser executado utilizando a mesma conexão que iniciou a transação. currentXid={0}, prepare xid={1}"; t[658] = "The SSLSocketFactory class provided {0} could not be instantiated."; t[659] = "A classe SSLSocketFactory forneceu {0} que não pôde ser instanciado."; t[662] = "Failed to convert binary xml data to encoding: {0}."; t[663] = "Falhou ao converter dados xml binários para codificação: {0}."; t[670] = "Position: {0}"; t[671] = "Posição: {0}"; t[676] = "Location: File: {0}, Routine: {1}, Line: {2}"; t[677] = "Local: Arquivo: {0}, Rotina: {1}, Linha: {2}"; t[684] = "Cannot tell if path is open or closed: {0}."; t[685] = "Não pode dizer se caminho está aberto ou fechado: {0}."; t[690] = "Unable to create StAXResult for SQLXML"; t[691] = "Não foi possível criar StAXResult para SQLXML"; t[700] = "Cannot convert an instance of {0} to type {1}"; t[701] = "Não pode converter uma instância de {0} para tipo {1}"; t[710] = "{0} function takes four and only four argument."; t[711] = "função {0} recebe somente quatro argumentos."; t[716] = "Error disabling autocommit"; t[717] = "Erro ao desabilitar autocommit"; t[718] = "Interrupted while attempting to connect."; t[719] = "Interrompido ao tentar se conectar."; t[722] = "Your security policy has prevented the connection from being attempted. You probably need to grant the connect java.net.SocketPermission to the database server host and port that you wish to connect to."; t[723] = "Sua política de segurança impediu que a conexão pudesse ser estabelecida. Você provavelmente precisa conceder permissão em java.net.SocketPermission para a máquina e a porta do servidor de banco de dados que você deseja se conectar."; t[734] = "No function outputs were registered."; t[735] = "Nenhum saída de função foi registrada."; t[736] = "{0} function takes one and only one argument."; t[737] = "função {0} recebe somente um argumento."; t[744] = "This ResultSet is closed."; t[745] = "Este ResultSet está fechado."; t[746] = "Invalid character data was found. This is most likely caused by stored data containing characters that are invalid for the character set the database was created in. The most common example of this is storing 8bit data in a SQL_ASCII database."; t[747] = "Caracter inválido foi encontrado. Isso é mais comumente causado por dado armazenado que contém caracteres que são inválidos para a codificação que foi criado o banco de dados. O exemplo mais comum disso é armazenar dados de 8 bits em um banco de dados SQL_ASCII."; t[752] = "GSS Authentication failed"; t[753] = "Autenticação GSS falhou"; t[754] = "Ran out of memory retrieving query results."; t[755] = "Memória insuficiente ao recuperar resultados da consulta."; t[756] = "Returning autogenerated keys is not supported."; t[757] = "Retorno de chaves geradas automaticamente não é suportado."; t[760] = "Operation requires a scrollable ResultSet, but this ResultSet is FORWARD_ONLY."; t[761] = "Operação requer um ResultSet rolável, mas este ResultSet é FORWARD_ONLY (somente para frente)."; t[762] = "A CallableStatement function was executed and the out parameter {0} was of type {1} however type {2} was registered."; t[763] = "Uma função foi executada e o parâmetro de retorno {0} era do tipo {1} contudo tipo {2} foi registrado."; t[764] = "Unable to find server array type for provided name {0}."; t[765] = "Não foi possível encontrar tipo matriz para nome fornecido {0}."; t[768] = "Unknown ResultSet holdability setting: {0}."; t[769] = "Definição de durabilidade do ResultSet desconhecida: {0}."; t[772] = "Transaction isolation level {0} not supported."; t[773] = "Nível de isolamento da transação {0} não é suportado."; t[774] = "Zero bytes may not occur in identifiers."; t[775] = "Zero bytes não podem ocorrer em identificadores."; t[776] = "No results were returned by the query."; t[777] = "Nenhum resultado foi retornado pela consulta."; t[778] = "A CallableStatement was executed with nothing returned."; t[779] = "Uma função foi executada e nada foi retornado."; t[780] = "wasNull cannot be call before fetching a result."; t[781] = "wasNull não pode ser chamado antes de obter um resultado."; t[784] = "Returning autogenerated keys by column index is not supported."; t[785] = "Retorno de chaves geradas automaticamente por índice de coluna não é suportado."; t[786] = "This statement does not declare an OUT parameter. Use '{' ?= call ... '}' to declare one."; t[787] = "Este comando não declara um parâmetro de saída. Utilize '{' ?= chamada ... '}' para declarar um)"; t[788] = "Can''t use relative move methods while on the insert row."; t[789] = "Não pode utilizar métodos de movimentação relativos enquanto estiver inserindo registro."; t[790] = "A CallableStatement was executed with an invalid number of parameters"; t[791] = "Uma função foi executada com um número inválido de parâmetros"; t[792] = "Connection is busy with another transaction"; t[793] = "Conexão está ocupada com outra transação"; table = t; } public java.lang.Object handleGetObject (java.lang.String msgid) throws java.util.MissingResourceException { int hash_val = msgid.hashCode() & 0x7fffffff; int idx = (hash_val % 397) << 1; { java.lang.Object found = table[idx]; if (found == null) return null; if (msgid.equals(found)) return table[idx + 1]; } int incr = ((hash_val % 395) + 1) << 1; for (;;) { idx += incr; if (idx >= 794) idx -= 794; java.lang.Object found = table[idx]; if (found == null) return null; if (msgid.equals(found)) return table[idx + 1]; } } public java.util.Enumeration getKeys () { return new java.util.Enumeration() { private int idx = 0; { while (idx < 794 && table[idx] == null) idx += 2; } public boolean hasMoreElements () { return (idx < 794); } public java.lang.Object nextElement () { java.lang.Object key = table[idx]; do idx += 2; while (idx < 794 && table[idx] == null); return key; } }; } public java.util.ResourceBundle getParent () { return parent; } }
8,456
0
Create_ds/amazon-redshift-jdbc-driver/src/main/java/com/amazon/redshift
Create_ds/amazon-redshift-jdbc-driver/src/main/java/com/amazon/redshift/translation/messages_tr.java
/* Automatically generated by GNU msgfmt. Do not modify! */ package com.amazon.redshift.translation; public class messages_tr extends java.util.ResourceBundle { private static final java.lang.String[] table; static { java.lang.String[] t = new java.lang.String[794]; t[0] = ""; t[1] = "Project-Id-Version: jdbc-tr\nReport-Msgid-Bugs-To: \nPO-Revision-Date: 2009-05-31 21:47+0200\nLast-Translator: Devrim GÜNDÜZ <devrim@gunduz.org>\nLanguage-Team: Turkish <pgsql-tr-genel@PostgreSQL.org>\nLanguage: tr\nMIME-Version: 1.0\nContent-Type: text/plain; charset=UTF-8\nContent-Transfer-Encoding: 8bit\nX-Generator: KBabel 1.3.1\nX-Poedit-Language: Turkish\nX-Poedit-Country: TURKEY\n"; t[2] = "Not implemented: 2nd phase commit must be issued using an idle connection. commit xid={0}, currentXid={1}, state={2}, transactionState={3}"; t[3] = "Desteklenmiyor: 2nd phase commit, atıl bir bağlantıdan başlatılmalıdır. commit xid={0}, currentXid={1}, state={2}, transactionState={3}"; t[4] = "DataSource has been closed."; t[5] = "DataSource kapatıldı."; t[8] = "Invalid flags {0}"; t[9] = "Geçersiz seçenekler {0}"; t[18] = "Where: {0}"; t[19] = "Where: {0}"; t[24] = "Unknown XML Source class: {0}"; t[25] = "Bilinmeyen XML Kaynak Sınıfı: {0}"; t[26] = "The connection attempt failed."; t[27] = "Bağlantı denemesi başarısız oldu."; t[28] = "Currently positioned after the end of the ResultSet. You cannot call deleteRow() here."; t[29] = "Şu an ResultSet sonucundan sonra konumlandı. deleteRow() burada çağırabilirsiniz."; t[32] = "Can''t use query methods that take a query string on a PreparedStatement."; t[33] = "PreparedStatement ile sorgu satırı alan sorgu yöntemleri kullanılamaz."; t[36] = "Multiple ResultSets were returned by the query."; t[37] = "Sorgu tarafından birden fazla ResultSet getirildi."; t[50] = "Too many update results were returned."; t[51] = "Çok fazla güncelleme sonucu döndürüldü."; t[58] = "Illegal UTF-8 sequence: initial byte is {0}: {1}"; t[59] = "Geçersiz UTF-8 çoklu bayt karakteri: ilk bayt {0}: {1}"; t[66] = "The column name {0} was not found in this ResultSet."; t[67] = "Bu ResultSet içinde {0} sütun adı bulunamadı."; t[70] = "Fastpath call {0} - No result was returned and we expected an integer."; t[71] = "Fastpath call {0} - Integer beklenirken hiçbir sonuç getirilmedi."; t[74] = "Protocol error. Session setup failed."; t[75] = "Protokol hatası. Oturum kurulumu başarısız oldu."; t[76] = "A CallableStatement was declared, but no call to registerOutParameter(1, <some type>) was made."; t[77] = "CallableStatement bildirildi ancak registerOutParameter(1, < bir tip>) tanıtımı yapılmadı."; t[78] = "ResultSets with concurrency CONCUR_READ_ONLY cannot be updated."; t[79] = "Eş zamanlama CONCUR_READ_ONLY olan ResultSet''ler değiştirilemez"; t[90] = "LOB positioning offsets start at 1."; t[91] = "LOB bağlangıç adresi 1Den başlıyor"; t[92] = "Internal Position: {0}"; t[93] = "Internal Position: {0}"; t[96] = "free() was called on this LOB previously"; t[97] = "Bu LOB'da free() daha önce çağırıldı"; t[100] = "Cannot change transaction read-only property in the middle of a transaction."; t[101] = "Transaction ortasında geçerli transactionun read-only özellği değiştirilemez."; t[102] = "The JVM claims not to support the {0} encoding."; t[103] = "JVM, {0} dil kodlamasını desteklememektedir."; t[108] = "{0} function doesn''t take any argument."; t[109] = "{0} fonksiyonu parametre almaz."; t[112] = "xid must not be null"; t[113] = "xid null olamaz"; t[114] = "Connection has been closed."; t[115] = "Bağlantı kapatıldı."; t[122] = "The server does not support SSL."; t[123] = "Sunucu SSL desteklemiyor."; t[124] = "Custom type maps are not supported."; t[125] = "Özel tip eşleştirmeleri desteklenmiyor."; t[140] = "Illegal UTF-8 sequence: byte {0} of {1} byte sequence is not 10xxxxxx: {2}"; t[141] = "Geçersiz UTF-8 çoklu bayt karakteri: {0}/{1} baytı 10xxxxxx değildir: {2}"; t[148] = "Hint: {0}"; t[149] = "İpucu: {0}"; t[152] = "Unable to find name datatype in the system catalogs."; t[153] = "Sistem kataloglarında name veri tipi bulunamıyor."; t[156] = "Unsupported Types value: {0}"; t[157] = "Geçersiz Types değeri: {0}"; t[158] = "Unknown type {0}."; t[159] = "Bilinmeyen tip {0}."; t[166] = "{0} function takes two and only two arguments."; t[167] = "{0} fonksiyonunu sadece iki parametre alabilir."; t[170] = "Finalizing a Connection that was never closed:"; t[171] = "Kapatılmamış bağlantı sonlandırılıyor."; t[180] = "The maximum field size must be a value greater than or equal to 0."; t[181] = "En büyük alan boyutu sıfır ya da sıfırdan büyük bir değer olmalı."; t[186] = "Redshift LOBs can only index to: {0}"; t[187] = "Redshift LOB göstergeleri sadece {0} referans edebilir"; t[194] = "Method {0} is not yet implemented."; t[195] = "{0} yöntemi henüz kodlanmadı."; t[198] = "Error loading default settings from driverconfig.properties"; t[199] = "driverconfig.properties dosyasından varsayılan ayarları yükleme hatası"; t[200] = "Results cannot be retrieved from a CallableStatement before it is executed."; t[201] = "CallableStatement çalıştırılmadan sonuçlar ondan alınamaz."; t[202] = "Large Objects may not be used in auto-commit mode."; t[203] = "Auto-commit biçimde large object kullanılamaz."; t[208] = "Expected command status BEGIN, got {0}."; t[209] = "BEGIN komut durumunu beklenirken {0} alındı."; t[218] = "Invalid fetch direction constant: {0}."; t[219] = "Getirme yönü değişmezi geçersiz: {0}."; t[222] = "{0} function takes three and only three arguments."; t[223] = "{0} fonksiyonunu sadece üç parametre alabilir."; t[226] = "This SQLXML object has already been freed."; t[227] = "Bu SQLXML nesnesi zaten boşaltılmış."; t[228] = "Cannot update the ResultSet because it is either before the start or after the end of the results."; t[229] = "ResultSet, sonuçların ilk kaydından önce veya son kaydından sonra olduğu için güncelleme yapılamamaktadır."; t[230] = "The JVM claims not to support the encoding: {0}"; t[231] = "JVM, {0} dil kodlamasını desteklememektedir."; t[232] = "Parameter of type {0} was registered, but call to get{1} (sqltype={2}) was made."; t[233] = "{0} tipinde parametre tanıtıldı, ancak {1} (sqltype={2}) tipinde geri getirmek için çağrı yapıldı."; t[234] = "Error rolling back prepared transaction. rollback xid={0}, preparedXid={1}, currentXid={2}"; t[235] = "Hazırlanmış transaction rollback hatası. rollback xid={0}, preparedXid={1}, currentXid={2}"; t[240] = "Cannot establish a savepoint in auto-commit mode."; t[241] = "Auto-commit biçimde savepoint oluşturulamıyor."; t[242] = "Cannot retrieve the id of a named savepoint."; t[243] = "Adlandırılmış savepointin id değerine erişilemiyor."; t[244] = "The column index is out of range: {0}, number of columns: {1}."; t[245] = "Sütun gçstergesi kapsam dışıdır: {0}, sütun sayısı: {1}."; t[250] = "Something unusual has occurred to cause the driver to fail. Please report this exception."; t[251] = "Sıradışı bir durum sürücünün hata vermesine sebep oldu. Lütfen bu durumu geliştiricilere bildirin."; t[260] = "Cannot cast an instance of {0} to type {1}"; t[261] = "{0} tipi {1} tipine dönüştürülemiyor"; t[264] = "Unknown Types value."; t[265] = "Geçersiz Types değeri."; t[266] = "Invalid stream length {0}."; t[267] = "Geçersiz akım uzunluğu {0}."; t[272] = "Cannot retrieve the name of an unnamed savepoint."; t[273] = "Adı verilmemiş savepointin id değerine erişilemiyor."; t[274] = "Unable to translate data into the desired encoding."; t[275] = "Veri, istenilen dil kodlamasına çevrilemiyor."; t[276] = "Expected an EOF from server, got: {0}"; t[277] = "Sunucudan EOF beklendi; ama {0} alındı."; t[278] = "Bad value for type {0} : {1}"; t[279] = "{0} veri tipi için geçersiz değer : {1}"; t[280] = "The server requested password-based authentication, but no password was provided."; t[281] = "Sunucu şifre tabanlı yetkilendirme istedi; ancak bir şifre sağlanmadı."; t[286] = "Unable to create SAXResult for SQLXML."; t[287] = "SQLXML için SAXResult yaratılamadı."; t[292] = "Error during recover"; t[293] = "Kurtarma sırasında hata"; t[294] = "tried to call end without corresponding start call. state={0}, start xid={1}, currentXid={2}, preparedXid={3}"; t[295] = "start çağırımı olmadan end çağırılmıştır. state={0}, start xid={1}, currentXid={2}, preparedXid={3}"; t[296] = "Truncation of large objects is only implemented in 8.3 and later servers."; t[297] = "Large objectlerin temizlenmesi 8.3 ve sonraki sürümlerde kodlanmıştır."; t[298] = "This PooledConnection has already been closed."; t[299] = "Geçerli PooledConnection zaten önceden kapatıldı."; t[302] = "ClientInfo property not supported."; t[303] = "Clientinfo property'si desteklenememktedir."; t[306] = "Fetch size must be a value greater to or equal to 0."; t[307] = "Fetch boyutu sıfır veya daha büyük bir değer olmalıdır."; t[312] = "A connection could not be made using the requested protocol {0}."; t[313] = "İstenilen protokol ile bağlantı kurulamadı {0}"; t[318] = "Unknown XML Result class: {0}"; t[319] = "Bilinmeyen XML Sonuç sınıfı: {0}."; t[322] = "There are no rows in this ResultSet."; t[323] = "Bu ResultSet içinde kayıt bulunamadı."; t[324] = "Unexpected command status: {0}."; t[325] = "Beklenmeyen komut durumu: {0}."; t[330] = "Heuristic commit/rollback not supported. forget xid={0}"; t[331] = "Heuristic commit/rollback desteklenmiyor. forget xid={0}"; t[334] = "Not on the insert row."; t[335] = "Insert kaydı değil."; t[336] = "This SQLXML object has already been initialized, so you cannot manipulate it further."; t[337] = "Bu SQLXML nesnesi daha önceden ilklendirilmiştir; o yüzden daha fazla müdahale edilemez."; t[344] = "Server SQLState: {0}"; t[345] = "Sunucu SQLState: {0}"; t[348] = "The server''s standard_conforming_strings parameter was reported as {0}. The JDBC driver expected on or off."; t[349] = "İstemcinin client_standard_conforming_strings parametresi {0} olarak raporlandı. JDBC sürücüsü on ya da off olarak bekliyordu."; t[360] = "The driver currently does not support COPY operations."; t[361] = "Bu sunucu şu aşamada COPY işlemleri desteklememktedir."; t[364] = "The array index is out of range: {0}, number of elements: {1}."; t[365] = "Dizin göstergisi kapsam dışıdır: {0}, öğe sayısı: {1}."; t[374] = "suspend/resume not implemented"; t[375] = "suspend/resume desteklenmiyor"; t[378] = "Not implemented: one-phase commit must be issued using the same connection that was used to start it"; t[379] = "Desteklenmiyor: one-phase commit, işlevinde başlatan ve bitiren bağlantı aynı olmalıdır"; t[380] = "Error during one-phase commit. commit xid={0}"; t[381] = "One-phase commit sırasında hata. commit xid={0}"; t[398] = "Cannot call cancelRowUpdates() when on the insert row."; t[399] = "Insert edilmiş kaydın üzerindeyken cancelRowUpdates() çağırılamaz."; t[400] = "Cannot reference a savepoint after it has been released."; t[401] = "Bırakıldıktan sonra savepoint referans edilemez."; t[402] = "You must specify at least one column value to insert a row."; t[403] = "Bir satır eklemek için en az bir sütun değerini belirtmelisiniz."; t[404] = "Unable to determine a value for MaxIndexKeys due to missing system catalog data."; t[405] = "Sistem kataloğu olmadığından MaxIndexKeys değerini tespit edilememektedir."; t[410] = "commit called before end. commit xid={0}, state={1}"; t[411] = "commit, sondan önce çağırıldı. commit xid={0}, state={1}"; t[412] = "Illegal UTF-8 sequence: final value is out of range: {0}"; t[413] = "Geçersiz UTF-8 çoklu bayt karakteri: son değer sıra dışıdır: {0}"; t[414] = "{0} function takes two or three arguments."; t[415] = "{0} fonksiyonu yalnız iki veya üç argüman alabilir."; t[428] = "Unable to convert DOMResult SQLXML data to a string."; t[429] = "DOMResult SQLXML verisini diziye dönüştürülemedi."; t[434] = "Unable to decode xml data."; t[435] = "XML verisinin kodu çözülemedi."; t[440] = "Unexpected error writing large object to database."; t[441] = "Large object veritabanına yazılırken beklenmeyan hata."; t[442] = "Zero bytes may not occur in string parameters."; t[443] = "String parametrelerinde sıfır bayt olamaz."; t[444] = "A result was returned when none was expected."; t[445] = "Hiçbir sonuç kebklenimezken sonuç getirildi."; t[450] = "ResultSet is not updateable. The query that generated this result set must select only one table, and must select all primary keys from that table. See the JDBC 2.1 API Specification, section 5.6 for more details."; t[451] = "ResultSet değiştirilemez. Bu sonucu üreten sorgu tek bir tablodan sorgulamalı ve tablonun tüm primary key alanları belirtmelidir. Daha fazla bilgi için bk. JDBC 2.1 API Specification, section 5.6."; t[454] = "Bind message length {0} too long. This can be caused by very large or incorrect length specifications on InputStream parameters."; t[455] = "Bind mesaj uzunluğu ({0}) fazla uzun. Bu durum InputStream yalnış uzunluk belirtimlerden kaynaklanabilir."; t[460] = "Statement has been closed."; t[461] = "Komut kapatıldı."; t[462] = "No value specified for parameter {0}."; t[463] = "{0} parametresi için hiç bir değer belirtilmedi."; t[468] = "The array index is out of range: {0}"; t[469] = "Dizi göstergesi kapsam dışıdır: {0}"; t[474] = "Unable to bind parameter values for statement."; t[475] = "Komut için parametre değerlei bağlanamadı."; t[476] = "Can''t refresh the insert row."; t[477] = "Inser satırı yenilenemiyor."; t[480] = "No primary key found for table {0}."; t[481] = "{0} tablosunda primary key yok."; t[482] = "Cannot change transaction isolation level in the middle of a transaction."; t[483] = "Transaction ortasında geçerli transactionun transaction isolation level özellği değiştirilemez."; t[498] = "Provided InputStream failed."; t[499] = "Sağlanmış InputStream başarısız."; t[500] = "The parameter index is out of range: {0}, number of parameters: {1}."; t[501] = "Dizin göstergisi kapsam dışıdır: {0}, öğe sayısı: {1}."; t[502] = "The server''s DateStyle parameter was changed to {0}. The JDBC driver requires DateStyle to begin with ISO for correct operation."; t[503] = "Sunucunun DateStyle parametresi {0} olarak değiştirildi. JDBC sürücüsü doğru işlemesi için DateStyle tanımının ISO işle başlamasını gerekir."; t[508] = "Connection attempt timed out."; t[509] = "Bağlantı denemesi zaman aşımına uğradı."; t[512] = "Internal Query: {0}"; t[513] = "Internal Query: {0}"; t[514] = "Error preparing transaction. prepare xid={0}"; t[515] = "Transaction hazırlama hatası. prepare xid={0}"; t[518] = "The authentication type {0} is not supported. Check that you have configured the pg_hba.conf file to include the client''s IP address or subnet, and that it is using an authentication scheme supported by the driver."; t[519] = "{0} yetkinlendirme tipi desteklenmemektedir. pg_hba.conf dosyanızı istemcinin IP adresini ya da subnetini içerecek şekilde ayarlayıp ayarlamadığınızı ve sürücü tarafından desteklenen yetkilendirme yöntemlerinden birisini kullanıp kullanmadığını kontrol ediniz."; t[526] = "Interval {0} not yet implemented"; t[527] = "{0} aralığı henüz kodlanmadı."; t[532] = "Conversion of interval failed"; t[533] = "Interval dönüştürmesi başarısız."; t[540] = "Query timeout must be a value greater than or equals to 0."; t[541] = "Sorgu zaman aşımı değer sıfır veya sıfırdan büyük bir sayı olmalıdır."; t[542] = "Connection has been closed automatically because a new connection was opened for the same PooledConnection or the PooledConnection has been closed."; t[543] = "PooledConnection kapatıldığı için veya aynı PooledConnection için yeni bir bağlantı açıldığı için geçerli bağlantı otomatik kapatıldı."; t[544] = "ResultSet not positioned properly, perhaps you need to call next."; t[545] = "ResultSet doğru konumlanmamıştır, next işlemi çağırmanız gerekir."; t[546] = "Prepare called before end. prepare xid={0}, state={1}"; t[547] = "Sondan önce prepare çağırılmış. prepare xid={0}, state={1}"; t[548] = "Invalid UUID data."; t[549] = "Geçersiz UUID verisi."; t[550] = "This statement has been closed."; t[551] = "Bu komut kapatıldı."; t[552] = "Can''t infer the SQL type to use for an instance of {0}. Use setObject() with an explicit Types value to specify the type to use."; t[553] = "{0}''nin örneği ile kullanılacak SQL tip bulunamadı. Kullanılacak tip belirtmek için kesin Types değerleri ile setObject() kullanın."; t[554] = "Cannot call updateRow() when on the insert row."; t[555] = "Insert kaydı üzerinde updateRow() çağırılamaz."; t[562] = "Detail: {0}"; t[563] = "Ayrıntı: {0}"; t[566] = "Cannot call deleteRow() when on the insert row."; t[567] = "Insert kaydı üzerinde deleteRow() çağırılamaz."; t[568] = "Currently positioned before the start of the ResultSet. You cannot call deleteRow() here."; t[569] = "Şu an ResultSet başlangcıından önce konumlandı. deleteRow() burada çağırabilirsiniz."; t[576] = "Illegal UTF-8 sequence: final value is a surrogate value: {0}"; t[577] = "Geçersiz UTF-8 çoklu bayt karakteri: son değer yapay bir değerdir: {0}"; t[578] = "Unknown Response Type {0}."; t[579] = "Bilinmeyen yanıt tipi {0}"; t[582] = "Unsupported value for stringtype parameter: {0}"; t[583] = "strinftype parametresi için destekleneyen değer: {0}"; t[584] = "Conversion to type {0} failed: {1}."; t[585] = "{0} veri tipine dönüştürme hatası: {1}."; t[586] = "This SQLXML object has not been initialized, so you cannot retrieve data from it."; t[587] = "Bu SQLXML nesnesi ilklendirilmemiş; o yüzden ondan veri alamazsınız."; t[600] = "Unable to load the class {0} responsible for the datatype {1}"; t[601] = "{1} veri tipinden sorumlu {0} sınıfı yüklenemedi"; t[604] = "The fastpath function {0} is unknown."; t[605] = "{0} fastpath fonksiyonu bilinmemektedir."; t[608] = "Malformed function or procedure escape syntax at offset {0}."; t[609] = "{0} adresinde fonksiyon veya yordamda kaçış söz dizimi geçersiz."; t[612] = "Provided Reader failed."; t[613] = "Sağlanmış InputStream başarısız."; t[614] = "Maximum number of rows must be a value grater than or equal to 0."; t[615] = "En büyük getirilecek satır sayısı sıfırdan büyük olmalıdır."; t[616] = "Failed to create object for: {0}."; t[617] = "{0} için nesne oluşturma hatası."; t[620] = "Conversion of money failed."; t[621] = "Money dönüştürmesi başarısız."; t[622] = "Premature end of input stream, expected {0} bytes, but only read {1}."; t[623] = "Giriş akımında beklenmeyen dosya sonu, {0} bayt beklenirken sadece {1} bayt alındı."; t[626] = "An unexpected result was returned by a query."; t[627] = "Sorgu beklenmeyen bir sonuç döndürdü."; t[644] = "Invalid protocol state requested. Attempted transaction interleaving is not supported. xid={0}, currentXid={1}, state={2}, flags={3}"; t[645] = "Transaction interleaving desteklenmiyor. xid={0}, currentXid={1}, state={2}, flags={3}"; t[646] = "An error occurred while setting up the SSL connection."; t[647] = "SSL bağlantısı ayarlanırken bir hata oluştu."; t[654] = "Illegal UTF-8 sequence: {0} bytes used to encode a {1} byte value: {2}"; t[655] = "Geçersiz UTF-8 çoklu bayt karakteri: {0} bayt, {1} bayt değeri kodlamak için kullanılmış: {2}"; t[656] = "Not implemented: Prepare must be issued using the same connection that started the transaction. currentXid={0}, prepare xid={1}"; t[657] = "Desteklenmiyor: Prepare, transaction başlatran bağlantı tarafından çağırmalıdır. currentXid={0}, prepare xid={1}"; t[658] = "The SSLSocketFactory class provided {0} could not be instantiated."; t[659] = "SSLSocketFactory {0} ile örneklenmedi."; t[662] = "Failed to convert binary xml data to encoding: {0}."; t[663] = "xml verisinin şu dil kodlamasına çevirilmesi başarısız oldu: {0}"; t[670] = "Position: {0}"; t[671] = "Position: {0}"; t[676] = "Location: File: {0}, Routine: {1}, Line: {2}"; t[677] = "Yer: Dosya: {0}, Yordam: {1}, Satır: {2}"; t[684] = "Cannot tell if path is open or closed: {0}."; t[685] = "Pathın açık mı kapalı olduğunu tespit edilemiyor: {0}."; t[690] = "Unable to create StAXResult for SQLXML"; t[691] = "SQLXML için StAXResult yaratılamadı"; t[700] = "Cannot convert an instance of {0} to type {1}"; t[701] = "{0} instance, {1} tipine dönüştürülemiyor"; t[710] = "{0} function takes four and only four argument."; t[711] = "{0} fonksiyonunu yalnız dört parametre alabilir."; t[718] = "Interrupted while attempting to connect."; t[719] = "Bağlanırken kesildi."; t[722] = "Your security policy has prevented the connection from being attempted. You probably need to grant the connect java.net.SocketPermission to the database server host and port that you wish to connect to."; t[723] = "Güvenlik politikanız bağlantının kurulmasını engelledi. java.net.SocketPermission'a veritabanına ve de bağlanacağı porta bağlantı izni vermelisiniz."; t[734] = "No function outputs were registered."; t[735] = "Hiçbir fonksiyon çıktısı kaydedilmedi."; t[736] = "{0} function takes one and only one argument."; t[737] = "{0} fonksiyonunu yalnız tek bir parametre alabilir."; t[744] = "This ResultSet is closed."; t[745] = "ResultSet kapalıdır."; t[746] = "Invalid character data was found. This is most likely caused by stored data containing characters that are invalid for the character set the database was created in. The most common example of this is storing 8bit data in a SQL_ASCII database."; t[747] = "Geçersiz karakterler bulunmuştur. Bunun sebebi, verilerde veritabanın desteklediği dil kodlamadaki karakterlerin dışında bir karaktere rastlamasıdır. Bunun en yaygın örneği 8 bitlik veriyi SQL_ASCII veritabanında saklamasıdır."; t[752] = "Error disabling autocommit"; t[753] = "autocommit'i devre dışı bırakma sırasında hata"; t[754] = "Ran out of memory retrieving query results."; t[755] = "Sorgu sonuçları alınırken bellek yetersiz."; t[756] = "Returning autogenerated keys is not supported."; t[757] = "Otomatik üretilen değerlerin getirilmesi desteklenememktedir."; t[760] = "Operation requires a scrollable ResultSet, but this ResultSet is FORWARD_ONLY."; t[761] = "İşlem, kaydırılabilen ResultSet gerektirir, ancak bu ResultSet FORWARD_ONLYdir."; t[762] = "A CallableStatement function was executed and the out parameter {0} was of type {1} however type {2} was registered."; t[763] = "CallableStatement çalıştırıldı, ancak {2} tipi kaydedilmesine rağmen döndürme parametresi {0} ve tipi {1} idi."; t[764] = "Unable to find server array type for provided name {0}."; t[765] = "Belirtilen {0} adı için sunucu array tipi bulunamadı."; t[768] = "Unknown ResultSet holdability setting: {0}."; t[769] = "ResultSet tutabilme ayarı geçersiz: {0}."; t[772] = "Transaction isolation level {0} not supported."; t[773] = "Transaction isolation level {0} desteklenmiyor."; t[774] = "Zero bytes may not occur in identifiers."; t[775] = "Belirteçlerde sıfır bayt olamaz."; t[776] = "No results were returned by the query."; t[777] = "Sorgudan hiç bir sonuç dönmedi."; t[778] = "A CallableStatement was executed with nothing returned."; t[779] = "CallableStatement çalıştırma sonucunda veri getirilmedi."; t[780] = "wasNull cannot be call before fetching a result."; t[781] = "wasNull sonuç çekmeden önce çağırılamaz."; t[784] = "Returning autogenerated keys by column index is not supported."; t[785] = "Kolonların indexlenmesi ile otomatik olarak oluşturulan anahtarların döndürülmesi desteklenmiyor."; t[786] = "This statement does not declare an OUT parameter. Use '{' ?= call ... '}' to declare one."; t[787] = "Bu komut OUT parametresi bildirmemektedir. Bildirmek için '{' ?= call ... '}' kullanın."; t[788] = "Can''t use relative move methods while on the insert row."; t[789] = "Insert kaydı üzerinde relative move method kullanılamaz."; t[790] = "A CallableStatement was executed with an invalid number of parameters"; t[791] = "CallableStatement geçersiz sayıda parametre ile çalıştırıldı."; t[792] = "Connection is busy with another transaction"; t[793] = "Bağlantı, başka bir transaction tarafından meşgul ediliyor"; table = t; } public java.lang.Object handleGetObject (java.lang.String msgid) throws java.util.MissingResourceException { int hash_val = msgid.hashCode() & 0x7fffffff; int idx = (hash_val % 397) << 1; { java.lang.Object found = table[idx]; if (found == null) return null; if (msgid.equals(found)) return table[idx + 1]; } int incr = ((hash_val % 395) + 1) << 1; for (;;) { idx += incr; if (idx >= 794) idx -= 794; java.lang.Object found = table[idx]; if (found == null) return null; if (msgid.equals(found)) return table[idx + 1]; } } public java.util.Enumeration getKeys () { return new java.util.Enumeration() { private int idx = 0; { while (idx < 794 && table[idx] == null) idx += 2; } public boolean hasMoreElements () { return (idx < 794); } public java.lang.Object nextElement () { java.lang.Object key = table[idx]; do idx += 2; while (idx < 794 && table[idx] == null); return key; } }; } public java.util.ResourceBundle getParent () { return parent; } }
8,457
0
Create_ds/amazon-redshift-jdbc-driver/src/main/java/com/amazon/redshift
Create_ds/amazon-redshift-jdbc-driver/src/main/java/com/amazon/redshift/translation/messages_ja.java
/* Automatically generated by GNU msgfmt. Do not modify! */ package com.amazon.redshift.translation; public class messages_ja extends java.util.ResourceBundle { private static final java.lang.String[] table; static { java.lang.String[] t = new java.lang.String[1426]; t[0] = ""; t[1] = "Project-Id-Version: head-ja\nReport-Msgid-Bugs-To: \nPO-Revision-Date: 2018-07-23 11:10+0900\nLast-Translator: Kyotaro Horiguchi <horiguchi.kyotaro@lab.ntt.co.jp>\nLanguage-Team: Redshift <z-saito@guitar.ocn.ne.jp>\nLanguage: ja_JP\nMIME-Version: 1.0\nContent-Type: text/plain; charset=UTF-8\nContent-Transfer-Encoding: 8bit\nX-Generator: Poedit 1.5.4\n"; t[2] = "Method {0} is not yet implemented."; t[3] = "{0} メソッドはまだ実装されていません。"; t[10] = "Got {0} error responses to single copy cancel request"; t[11] = "一つのコピー中断要求にたいして {0} 個のエラー応答が返されました"; t[20] = "The array index is out of range: {0}, number of elements: {1}."; t[21] = "配列インデックスが範囲外です: {0} 、要素の数: {1}"; t[26] = "Tried to obtain lock while already holding it"; t[27] = "すでに取得中のロックを取得しようとしました"; t[28] = "Invalid protocol state requested. Attempted transaction interleaving is not supported. xid={0}, currentXid={1}, state={2}, flags={3}"; t[29] = "不正なプロトコル状態が要求されました。Transaction interleaving を試みましたが実装されていません。xid={0}, currentXid={1}, state={2}, flags={3}"; t[34] = "Unsupported property name: {0}"; t[35] = "サポートされていないプロパティ名: {0}"; t[36] = "Unsupported Types value: {0}"; t[37] = "サポートされない Types の値: {0}."; t[44] = "The hostname {0} could not be verified by hostnameverifier {1}."; t[45] = "ホスト名 {0} は、hostnameverifier {1} で検証できませんでした。"; t[52] = "Invalid UUID data."; t[53] = "不正なUUIDデータです。"; t[54] = "{0} parameter value must be an integer but was: {1}"; t[55] = "パラメータ {0} の値は整数でなければなりませんが指定された値は {1} でした"; t[56] = "Copying from database failed: {0}"; t[57] = "データベースからのコピーに失敗しました: {0}"; t[58] = "Requested CopyDual but got {0}"; t[59] = "CopyDualを要求しましたが {0} が返却されました。"; t[64] = "Multiple ResultSets were returned by the query."; t[65] = "クエリの実行により、複数のResultSetが返されました。"; t[76] = "Too many update results were returned."; t[77] = "返却された更新結果が多すぎます。"; t[84] = "Unable to determine a value for MaxIndexKeys due to missing system catalog data."; t[85] = "システムカタログにデータがないため MaxIndexKeys の値を決定できません。"; t[90] = "Database connection failed when starting copy"; t[91] = "コピー開始時のデータベース接続に失敗しました"; t[94] = "Unknown XML Result class: {0}"; t[95] = "未知のXML結果クラス: {0}"; t[100] = "The server''s standard_conforming_strings parameter was reported as {0}. The JDBC driver expected on or off."; t[101] = "サーバのstandard_conforming_stringsパラメータは、{0}であると報告されました。JDBCドライバは、on または off を想定しています。"; t[102] = "Batch entry {0} {1} was aborted: {2} Call getNextException to see other errors in the batch."; t[103] = "バッチ {0} {1} はアボートしました: {2} このバッチの他のエラーは getNextException を呼び出すことで確認できます。"; t[104] = "Protocol error. Session setup failed."; t[105] = "プロトコルエラー。セッションは準備できませんでした。"; t[106] = "This SQLXML object has not been initialized, so you cannot retrieve data from it."; t[107] = "このSQLXMLオブジェクトは初期化されてなかったため、そこからデータを取得できません。"; t[116] = "Bad value for type {0} : {1}"; t[117] = "型 {0} に対する不正な値 : {1}"; t[120] = "A CallableStatement was executed with an invalid number of parameters"; t[121] = "CallableStatement は不正な数のパラメータで実行されました。"; t[124] = "Error preparing transaction. prepare xid={0}"; t[125] = "トランザクションの準備エラー。prepare xid={0}"; t[126] = "Can''t use relative move methods while on the insert row."; t[127] = "行挿入中に相対移動メソッドは使えません。"; t[130] = "Failed to create object for: {0}."; t[131] = "{0} のオブジェクトの生成に失敗しました。"; t[138] = "Cannot change transaction read-only property in the middle of a transaction."; t[139] = "トランザクションの中で read-only プロパティは変更できません。"; t[154] = "{0} function takes three and only three arguments."; t[155] = "{0} 関数はちょうど3個の引数を取ります。"; t[158] = "One-phase commit called for xid {0} but connection was prepared with xid {1}"; t[159] = "単相コミットが xid {0} に対してよびだされましたが、コネクションは xid {1} と関連付けられています"; t[160] = "Validating connection."; t[161] = "コネクションを検証しています"; t[166] = "This replication stream has been closed."; t[167] = "このレプリケーション接続は既にクローズされています。"; t[168] = "An error occurred while trying to get the socket timeout."; t[169] = "ソケットタイムアウト取得中にエラーが発生しました。"; t[170] = "Conversion of money failed."; t[171] = "貨幣金額の変換に失敗しました。"; t[172] = "Provided Reader failed."; t[173] = "渡された Reader で異常が発生しました。"; t[174] = "tried to call end without corresponding start call. state={0}, start xid={1}, currentXid={2}, preparedXid={3}"; t[175] = "対応する start の呼び出しなしで、end を呼び出しました。state={0}, start xid={1}, currentXid={2}, preparedXid={3}"; t[178] = "Got CopyBothResponse from server during an active {0}"; t[179] = "{0} を実行中のサーバから CopyOutResponse を受け取りました"; t[186] = "Unknown ResultSet holdability setting: {0}."; t[187] = "ResultSet の holdability に対する未知の設定値です: {0}"; t[188] = "Not implemented: 2nd phase commit must be issued using an idle connection. commit xid={0}, currentXid={1}, state={2}, transactionState={3}"; t[189] = "実装されていません: 第二フェーズの COMMIT は、待機接続で使わなくてはなりません。xid={0}, currentXid={1}, state={2}, transactionState={3}"; t[190] = "Invalid server SCRAM signature"; t[191] = "不正なサーバSCRAM署名です"; t[192] = "The server''s client_encoding parameter was changed to {0}. The JDBC driver requires client_encoding to be UTF8 for correct operation."; t[193] = "サーバの client_encoding パラメータが {0} に変わりました。JDBCドライバが正しく動作するためには、 client_encoding は UTF8 である必要があります。"; t[198] = "Detail: {0}"; t[199] = "詳細: {0}"; t[200] = "Unexpected packet type during copy: {0}"; t[201] = "コピー中の想定外のパケット型です: {0}"; t[206] = "Transaction isolation level {0} not supported."; t[207] = "トランザクション分離レベル{0} はサポートされていません。"; t[210] = "The server requested password-based authentication, but no password was provided."; t[211] = "サーバはパスワード・ベースの認証を要求しましたが、パスワードが渡されませんでした。"; t[214] = "Interrupted while attempting to connect."; t[215] = "接続試行中に割り込みがありました。"; t[216] = "Fetch size must be a value greater to or equal to 0."; t[217] = "フェッチサイズは、0または、より大きな値でなくてはなりません。"; t[228] = "Added parameters index out of range: {0}, number of columns: {1}."; t[229] = "パラメータ・インデックスは範囲外です: {0} , カラム数: {1}"; t[230] = "Could not decrypt SSL key file {0}."; t[231] = "SSL keyファイル {0} を復号できませんでした。"; t[242] = "Could not initialize SSL context."; t[243] = "SSLコンテクストを初期化できませんでした。"; t[244] = "{0} function takes one and only one argument."; t[245] = "{0} 関数はちょうど1個の引数を取ります。"; t[248] = "Parameter of type {0} was registered, but call to get{1} (sqltype={2}) was made."; t[249] = "{0} 型のパラメータが登録されましたが、get{1} (sqltype={2}) が呼び出されました。"; t[258] = "Conversion of interval failed"; t[259] = "時間間隔の変換に失敗しました。"; t[262] = "xid must not be null"; t[263] = "xidはnullではいけません。"; t[264] = "Your security policy has prevented the connection from being attempted. You probably need to grant the connect java.net.SocketPermission to the database server host and port that you wish to connect to."; t[265] = "セキュリティ・ポリシーにより、接続が妨げられました。おそらく、接続先のデータベースサーバのホストとポートに対して java.net.SocketPermission の connect 権限を許可する必要があります。"; t[270] = "ClientInfo property not supported."; t[271] = "ClientInfo プロパティはサポートされていません。"; t[272] = "LOB positioning offsets start at 1."; t[273] = "LOB 位置指定のオフセット値は 1 以上です。"; t[276] = "Tried to write to an inactive copy operation"; t[277] = "実行中ではないコピー操作に書き込もうとしました"; t[278] = "suspend/resume not implemented"; t[279] = "停止/再開 は実装されていません。"; t[290] = "Transaction control methods setAutoCommit(true), commit, rollback and setSavePoint not allowed while an XA transaction is active."; t[291] = "トランザクション制御メソッド setAutoCommit(true), commit, rollback, setSavePoint は、XAトランザクションが有効である間は利用できません。"; t[292] = "Unable to find server array type for provided name {0}."; t[293] = "指定された名前 {0} のサーバ配列型はありません。"; t[300] = "Statement has been closed."; t[301] = "ステートメントはクローズされました。"; t[302] = "The fastpath function {0} is unknown."; t[303] = "{0} は未知の fastpath 関数です。"; t[306] = "The server''s DateStyle parameter was changed to {0}. The JDBC driver requires DateStyle to begin with ISO for correct operation."; t[307] = "サーバのDateStyleパラメータは、{0} に変わりました。JDBCドライバが正しく動作するためには、DateStyle が ISO で始まる値である必要があります。"; t[308] = "Invalid flags {0}"; t[309] = "不正なフラグ {0}"; t[324] = "A CallableStatement was declared, but no call to registerOutParameter(1, <some type>) was made."; t[325] = "CallableStatementは宣言されましたが、registerOutParameter(1, <some type>) は呼び出されませんでした。"; t[328] = "Cannot commit when autoCommit is enabled."; t[329] = "autoCommit有効時に、明示的なコミットはできません。"; t[330] = "Database connection failed when writing to copy"; t[331] = "コピーへの書き込み中にデータベース接続で異常が発生しました"; t[334] = "Hint: {0}"; t[335] = "ヒント: {0}"; t[336] = "Interval {0} not yet implemented"; t[337] = "時間間隔 {0} は実装されていません"; t[338] = "No X509TrustManager found"; t[339] = "X509TrustManager が見つかりません"; t[346] = "No results were returned by the query."; t[347] = "クエリは結果を返却しませんでした。"; t[354] = "Heuristic commit/rollback not supported. forget xid={0}"; t[355] = "ヒューリスティック commit/rollback はサポートされません。forget xid={0}"; t[362] = "Fastpath call {0} - No result was returned or wrong size while expecting an integer."; t[363] = "Fastpath 呼び出し {0} - integer を想定していましたが、結果は返却されないかまたは間違った大きさでした。"; t[364] = "Cannot cast an instance of {0} to type {1}"; t[365] = "{0} のインスタンスは {1} 型へキャストできません"; t[366] = "ResultSet not positioned properly, perhaps you need to call next."; t[367] = "適切な位置にいない ResultSetです。おそらく、nextを呼ぶ必要があります。"; t[372] = "Cannot establish a savepoint in auto-commit mode."; t[373] = "自動コミットモードでsavepointを作成できません。"; t[374] = "Prepare called before end. prepare xid={0}, state={1}"; t[375] = "end より前に prepare が呼ばれました prepare xid={0}, state={1}"; t[382] = "You must specify at least one column value to insert a row."; t[383] = "行挿入には、最低でも1つの列の値が必要です。"; t[388] = "Query timeout must be a value greater than or equals to 0."; t[389] = "クエリタイムアウトは、0またはより大きな値でなくてはなりません。"; t[394] = "The SSLSocketFactory class provided {0} could not be instantiated."; t[395] = "渡された SSLSocketFactoryクラス {0} はインスタンス化できませんでした。"; t[396] = "The parameter index is out of range: {0}, number of parameters: {1}."; t[397] = "パラメータのインデックスが範囲外です: {0} , パラメータ数: {1}"; t[400] = "This ResultSet is closed."; t[401] = "この ResultSet はクローズされています。"; t[402] = "Cannot update the ResultSet because it is either before the start or after the end of the results."; t[403] = "開始位置より前もしくは終了位置より後ろであるため、ResultSetを更新することができません。"; t[404] = "SSL error: {0}"; t[405] = "SSL エラー: {0}"; t[408] = "The column name {0} was not found in this ResultSet."; t[409] = "この ResultSet に列名 {0} ありません。"; t[412] = "The authentication type {0} is not supported. Check that you have configured the pg_hba.conf file to include the client''s IP address or subnet, and that it is using an authentication scheme supported by the driver."; t[413] = "認証タイプ {0} はサポートされません。pg_hba.confでクライアントのIPアドレスまたはサブネットの指定があり、そのエントリでこのドライバがサポートする認証機構を使うように設定されていることを確認してください。"; t[440] = "The driver currently does not support COPY operations."; t[441] = "ドライバはコピー操作をサポートしていません。"; t[442] = "This statement has been closed."; t[443] = "このステートメントはクローズされています。"; t[444] = "Object is too large to send over the protocol."; t[445] = "オブジェクトが大きすぎてこのプロトコルでは送信できません。"; t[448] = "oid type {0} not known and not a number"; t[449] = "OID型 {0} は未知でかつ数値でもありません"; t[452] = "No hstore extension installed."; t[453] = "hstore 拡張がインストールされてません。"; t[454] = "Currently positioned after the end of the ResultSet. You cannot call deleteRow() here."; t[455] = "ResultSet の最後尾より後ろにいるため、deleteRow() を呼ぶことはできません。"; t[462] = "The column index is out of range: {0}, number of columns: {1}."; t[463] = "列インデックスは範囲外です: {0} , 列の数: {1}"; t[468] = "Got CopyInResponse from server during an active {0}"; t[469] = "{0} を実行中のサーバから CopyInResponse を受け取りました"; t[474] = "Fastpath call {0} - No result was returned and we expected a numeric."; t[475] = "Fastpath 呼び出し {0} - numeric を想定していましたが、結果は返却されませんでした。"; t[482] = "An error occurred while setting up the SSL connection."; t[483] = "SSL接続のセットアップ中に、エラーが起こりました。"; t[484] = "Could not open SSL certificate file {0}."; t[485] = "SSL証明書ファイル {0} を開けませんでした。"; t[490] = "free() was called on this LOB previously"; t[491] = "このLOBに対して free() はすでに呼び出し済みです"; t[492] = "Finalizing a Connection that was never closed:"; t[493] = "クローズされていないコネクションの終了処理を行います: "; t[494] = "Unsupported properties: {0}"; t[495] = "サポートされないプロパティ: {0}"; t[498] = "Interrupted while waiting to obtain lock on database connection"; t[499] = "データベース接続のロック待ちの最中に割り込みがありました"; t[504] = "The HostnameVerifier class provided {0} could not be instantiated."; t[505] = "与えれた HostnameVerifier クラス {0} はインスタンス化できませんした。"; t[506] = "Unable to create SAXResult for SQLXML."; t[507] = "SQLXMLに対するSAXResultを生成できません。"; t[510] = "The server does not support SSL."; t[511] = "サーバはSSLをサポートしていません。"; t[516] = "Got CopyData without an active copy operation"; t[517] = "実行中のコピー操作がないにもかかわらず CopyData を受け取りました"; t[518] = "Error during one-phase commit. commit xid={0}"; t[519] = "単一フェーズのCOMMITの処理中のエラー commit xid={0}"; t[522] = "Network timeout must be a value greater than or equal to 0."; t[523] = "ネットワークタイムアウトは、0またはより大きな値でなくてはなりません。"; t[532] = "Unsupported type conversion to {1}."; t[533] = "{1} への型変換はサポートされていません。"; t[534] = "Premature end of input stream, expected {0} bytes, but only read {1}."; t[535] = "入力ストリームが途中で終了しました、{0} バイトを読み込もうとしましたが、 {1} バイトしかありませんでした。"; t[536] = "Zero bytes may not occur in string parameters."; t[537] = "バイト値0を文字列ラメータに含めることはできません。"; t[538] = "This connection has been closed."; t[539] = "このコネクションは既にクローズされています。"; t[540] = "Cannot call deleteRow() when on the insert row."; t[541] = "行挿入時に deleteRow() を呼び出せません。"; t[544] = "Unable to bind parameter values for statement."; t[545] = "ステートメントのパラメータ値をバインドできませんでした。"; t[552] = "Cannot convert an instance of {0} to type {1}"; t[553] = "{0} のインスタンスは {1} 型に変換できません"; t[554] = "Conversion to type {0} failed: {1}."; t[555] = "{0} への型変換に失敗しました: {1}"; t[556] = "Error loading default settings from driverconfig.properties"; t[557] = "driverconfig.properties からの初期設定ロード中のエラー"; t[558] = "Expected command status BEGIN, got {0}."; t[559] = "BEGINコマンドステータスを想定しましたが、{0} が返却されました。"; t[564] = "An unexpected result was returned by a query."; t[565] = "クエリが想定外の結果を返却しました。"; t[568] = "Something unusual has occurred to cause the driver to fail. Please report this exception."; t[569] = "何らかの異常によりドライバが動作できません。この例外を報告して下さい。"; t[576] = "One or more ClientInfo failed."; t[577] = "1つ以上の ClinentInfo で問題が発生しました。"; t[578] = "Location: File: {0}, Routine: {1}, Line: {2}"; t[579] = "場所: ファイル: {0}, ルーチン: {1},行: {2}"; t[582] = "Unknown type {0}."; t[583] = "未知の型 {0}."; t[590] = "This SQLXML object has already been freed."; t[591] = "このSQLXMLオブジェクトはすでに解放されています。"; t[594] = "Unexpected copydata from server for {0}"; t[595] = "{0} を実行中のサーバからのあり得ない CopyData"; t[596] = "{0} function takes two or three arguments."; t[597] = "{0} 関数は2個、または3個の引数を取ります。"; t[602] = "Connection to {0} refused. Check that the hostname and port are correct and that the postmaster is accepting TCP/IP connections."; t[603] = "{0} への接続が拒絶されました。ホスト名とポート番号が正しいことと、postmaster がTCP/IP接続を受け付けていることを確認してください。"; t[612] = "Unsupported binary encoding of {0}."; t[613] = "{0} 型に対するサポートされないバイナリエンコーディング。"; t[616] = "Returning autogenerated keys is not supported."; t[617] = "自動生成キーを返すことはサポートされていません。"; t[620] = "Provided InputStream failed."; t[621] = "渡された InputStream で異常が発生しました。"; t[626] = "No IOException expected from StringBuffer or StringBuilder"; t[627] = "StringBuffer または StringBuilder からの IOException は想定されていません"; t[638] = "Not implemented: one-phase commit must be issued using the same connection that was used to start it"; t[639] = "実装されていません: 単一フェーズのCOMMITは、開始時と同じ接続で発行されなければなりません。"; t[640] = "Cannot reference a savepoint after it has been released."; t[641] = "解放された savepoint は参照できません。"; t[642] = "Ran out of memory retrieving query results."; t[643] = "クエリの結果取得中にメモリ不足が起きました。"; t[654] = "No primary key found for table {0}."; t[655] = "テーブル {0} には主キーがありません。"; t[658] = "Error during recover"; t[659] = "recover 処理中のエラー"; t[666] = "This copy stream is closed."; t[667] = "このコピーストリームはクローズされています。"; t[668] = "Could not open SSL root certificate file {0}."; t[669] = "SSLルート証明書ファイル {0} をオープンできませんでした。"; t[676] = "Invalid sslmode value: {0}"; t[677] = "不正な sslmode 値: {0}"; t[678] = "Cannot tell if path is open or closed: {0}."; t[679] = "経路が開いているか、閉じているか判別できません: {0}"; t[682] = "Illegal UTF-8 sequence: {0} bytes used to encode a {1} byte value: {2}"; t[683] = "不正なUTF-8シーケンス: {1} バイトの値のエンコードに{0} バイト使用しています: {2}"; t[684] = "Unknown XML Source class: {0}"; t[685] = "未知のXMLソースクラス: {0}"; t[686] = "Internal Query: {0}"; t[687] = "内部クエリ: {0}"; t[702] = "Could not find a java cryptographic algorithm: {0}."; t[703] = "javaの暗号化アルゴリズム {0} を見つけることができませんでした。"; t[706] = "Connection has been closed automatically because a new connection was opened for the same PooledConnection or the PooledConnection has been closed."; t[707] = "同じ PooledConnection に対して新しい接続をオープンしたか、この PooledConnection がクローズされたため、接続が自動的にクローズされました。"; t[708] = "Invalid fetch direction constant: {0}."; t[709] = "不正なフェッチ方向の定数です: {0}"; t[714] = "Can''t use query methods that take a query string on a PreparedStatement."; t[715] = "PreparedStatement でクエリ文字列を取るクエリメソッドは使えません。"; t[716] = "SCRAM authentication failed, server returned error: {0}"; t[717] = "スクラム認証が失敗しました、サーバはエラーを返却しました: {0}"; t[722] = "Invalid elements {0}"; t[723] = "不正な要素です: {0}"; t[738] = "Not on the insert row."; t[739] = "挿入行上にいません。"; t[740] = "Unable to load the class {0} responsible for the datatype {1}"; t[741] = "データ型 {1} に対応するクラス{0} をロードできません。"; t[752] = "Could not find a java cryptographic algorithm: X.509 CertificateFactory not available."; t[753] = "javaの暗号化アルゴリズムを見つけることができませんでした。X.509 CertificateFactory は利用できません。"; t[756] = "Can''t infer the SQL type to use for an instance of {0}. Use setObject() with an explicit Types value to specify the type to use."; t[757] = "{0} のインスタンスに対して使うべきSQL型を推測できません。明示的な Types 引数をとる setObject() で使うべき型を指定してください。"; t[760] = "Invalid server-first-message: {0}"; t[761] = "不正な server-first-message: {0}"; t[762] = "No value specified for parameter {0}."; t[763] = "パラメータ {0} に値が設定されてません。"; t[766] = "Fastpath call {0} - No result was returned and we expected an integer."; t[767] = "Fastpath 呼び出し {0} - integer を想定していましたが、結果は返却されませんでした。"; t[774] = "Unable to create StAXResult for SQLXML"; t[775] = "SQLXMLに対するStAXResultを生成できません。"; t[798] = "CommandComplete expected COPY but got: "; t[799] = "CommandComplete はCOPYを想定しましたが、次の結果が返却されました:"; t[800] = "Enter SSL password: "; t[801] = "SSLパスワード入力: "; t[802] = "Failed to convert binary xml data to encoding: {0}."; t[803] = "バイナリxmlデータのエンコード: {0} への変換に失敗しました。"; t[804] = "No SCRAM mechanism(s) advertised by the server"; t[805] = "サーバは SCRAM認証機構を広告していません"; t[818] = "Custom type maps are not supported."; t[819] = "カスタム型マップはサポートされません。"; t[822] = "Illegal UTF-8 sequence: final value is a surrogate value: {0}"; t[823] = "不正なUTF-8シーケンス: 変換後の値がサロゲート値です: {0}"; t[824] = "The SocketFactory class provided {0} could not be instantiated."; t[825] = "渡された SocketFactoryクラス {0} はインスタンス化できませんでした。"; t[832] = "Large Objects may not be used in auto-commit mode."; t[833] = "ラージオブジェクトは、自動コミットモードで使うことができません。"; t[834] = "Fastpath call {0} - No result was returned or wrong size while expecting a long."; t[835] = "Fastpath 呼び出し {0} - long を想定していましたが、結果は返却されないかまたは間違った大きさでした。"; t[844] = "Invalid stream length {0}."; t[845] = "不正なストリーム長 {0}。"; t[850] = "The sslfactoryarg property must start with the prefix file:, classpath:, env:, sys:, or -----BEGIN CERTIFICATE-----."; t[851] = "プロパティ sslfactoryarg の先頭はプリフィクス file:, classpath:, env:, sys: もしくは -----BEGIN CERTIFICATE----- のいずれかでなければなりません。"; t[852] = "Can''t use executeWithFlags(int) on a Statement."; t[853] = "executeWithFlags(int) は Statement インスタンスでは使えません。"; t[856] = "Cannot retrieve the id of a named savepoint."; t[857] = "名前付き savepoint の id は取得できません。"; t[860] = "Could not read password for SSL key file by callbackhandler {0}."; t[861] = "callbackhandler {0} で、SSL keyファイルを読めませんでした。"; t[874] = "Tried to break lock on database connection"; t[875] = "データベース接続のロックを破壊しようとしました"; t[878] = "Unexpected error writing large object to database."; t[879] = "データベースへのラージオブジェクト書き込み中に想定外のエラーが起きました。"; t[880] = "Expected an EOF from server, got: {0}"; t[881] = "サーバからの EOF を期待していましたが、{0} が送られてきました"; t[886] = "Could not read SSL root certificate file {0}."; t[887] = "SSLルート証明書ファイル {0} を読めませんでした。"; t[888] = "This SQLXML object has already been initialized, so you cannot manipulate it further."; t[889] = "このSQLXMLオブジェクトは既に初期化済みであるため、これ以上操作できません。"; t[896] = "The array index is out of range: {0}"; t[897] = "配列インデックスが範囲外です: {0}"; t[898] = "Unable to set network timeout."; t[899] = "ネットワークタイムアウトが設定できません。"; t[900] = "{0} function takes four and only four argument."; t[901] = "{0} 関数はちょうど4個の引数を取ります。"; t[904] = "Unable to decode xml data."; t[905] = "xmlデータをデコードできません。"; t[916] = "Bad value for type timestamp/date/time: {1}"; t[917] = "timestamp/date/time 型に対する不正な値: {1}"; t[928] = "Illegal UTF-8 sequence: final value is out of range: {0}"; t[929] = "不正なUTF-8シーケンス: 変換後の値が範囲外です: {0}"; t[932] = "Unable to parse the count in command completion tag: {0}."; t[933] = "コマンド完了タグのカウントをパースできません: {0}"; t[942] = "Read from copy failed."; t[943] = "コピーストリームからの読み取りに失敗しました。"; t[944] = "Maximum number of rows must be a value grater than or equal to 0."; t[945] = "行数の制限値は 0またはより大きな値でなくてはなりません。"; t[958] = "The password callback class provided {0} could not be instantiated."; t[959] = "渡されたパスワードコールバッククラス {0} はインスタンス化できませんでした。"; t[960] = "Returning autogenerated keys by column index is not supported."; t[961] = "列インデックスで自動生成キーを返すことはサポートされていません。"; t[966] = "Properties for the driver contains a non-string value for the key "; t[967] = "このドライバのプロパティでは以下のキーに対して文字列ではない値が設定されています: "; t[974] = "Database connection failed when canceling copy operation"; t[975] = "コピー操作中断のためのデータベース接続に失敗しました"; t[976] = "DataSource has been closed."; t[977] = "データソースはクローズされました。"; t[996] = "Unable to get network timeout."; t[997] = "ネットワークタイムアウトが取得できません。"; t[1000] = "A CallableStatement was executed with nothing returned."; t[1001] = "CallableStatement が実行されましたがなにも返却されませんでした。"; t[1002] = "Can''t refresh the insert row."; t[1003] = "挿入行を再フェッチすることはできません。"; t[1004] = "Could not find a server with specified targetServerType: {0}"; t[1005] = "指定された targetServerType のサーバーが見つかりません: {0}"; t[1006] = "This PooledConnection has already been closed."; t[1007] = "この PooledConnectionは、すでに閉じられています。"; t[1010] = "Cannot call cancelRowUpdates() when on the insert row."; t[1011] = "行挿入時に cancelRowUpdates() を呼び出せません。"; t[1012] = "Preparing already prepared transaction, the prepared xid {0}, prepare xid={1}"; t[1013] = "すでにプリペアされているトランザクションをプリペアしようとしました、プリペアされている xid={0}, プリペアしようとした xid={1}"; t[1018] = "CopyIn copy direction can't receive data"; t[1019] = "コピー方向 CopyIn はデータを受信できません"; t[1024] = "conversion to {0} from {1} not supported"; t[1025] = "{1} から {0} への変換はサポートされていません。"; t[1030] = "An error occurred reading the certificate"; t[1031] = "証明書の読み込み中にエラーが起きました"; t[1032] = "Invalid or unsupported by client SCRAM mechanisms"; t[1033] = "不正であるかクライアントのSCRAM機構でサポートされていません"; t[1034] = "Malformed function or procedure escape syntax at offset {0}."; t[1035] = "関数またはプロシージャの間違ったエスケープ構文が位置{0}で見つかりました。"; t[1038] = "Bind message length {0} too long. This can be caused by very large or incorrect length specifications on InputStream parameters."; t[1039] = "バインドメッセージ長 {0} は長すぎます。InputStreamのパラメータにとても大きな長さ、あるいは不正確な長さが設定されている可能性があります。"; t[1050] = "Cannot change transaction isolation level in the middle of a transaction."; t[1051] = "トランザクションの中でトランザクション分離レベルは変更できません。"; t[1058] = "Internal Position: {0}"; t[1059] = "内部位置: {0}"; t[1062] = "No function outputs were registered."; t[1063] = "関数出力は登録されていません。"; t[1072] = "Unexpected packet type during replication: {0}"; t[1073] = "レプリケーション中に想定外のパケット型: {0}"; t[1076] = "Error disabling autocommit"; t[1077] = "自動コミットの無効化処理中のエラー"; t[1080] = "Requested CopyOut but got {0}"; t[1081] = "CopyOut を要求しましたが {0} が返却されました"; t[1084] = "Error rolling back prepared transaction. rollback xid={0}, preparedXid={1}, currentXid={2}"; t[1085] = "プリペアドトランザクションのロールバック中のエラー rollback xid={0}, preparedXid={1}, currentXid={2}"; t[1086] = "Database connection failed when ending copy"; t[1087] = "コピー操作の終了中にデータベース接続で異常が発生しました"; t[1090] = "Unsupported value for stringtype parameter: {0}"; t[1091] = "サポートされないstringtypeパラメータ値です: {0}"; t[1094] = "The sslfactoryarg property may not be empty."; t[1095] = "プロパティ sslfactoryarg は空であってはなりません。"; t[1102] = "Loading the SSL root certificate {0} into a TrustManager failed."; t[1103] = "SSLルート証明書 {0} をTrustManagerへ読み込めませんでした。"; t[1104] = "Illegal UTF-8 sequence: initial byte is {0}: {1}"; t[1105] = "不正なUTF-8シーケンス: 先頭バイトが {0}: {1}"; t[1116] = "The environment variable containing the server's SSL certificate must not be empty."; t[1117] = "サーバのSSL証明書を指定する環境変数は空であってはなりません。"; t[1118] = "Connection attempt timed out."; t[1119] = "接続試行がタイムアウトしました。"; t[1130] = "Cannot write to copy a byte of value {0}"; t[1131] = "バイト値{0}はコピーストリームへの書き込みはできません"; t[1132] = "Connection has been closed."; t[1133] = "接続はクローズされました。"; t[1136] = "Could not read password for SSL key file, console is not available."; t[1137] = "SSL keyファイルのパスワードを読めませんでした。コンソールは利用できません。"; t[1140] = "The JVM claims not to support the encoding: {0}"; t[1141] = "JVMでサポートされないエンコーディングです: {0}"; t[1146] = "Unexpected command status: {0}."; t[1147] = "想定外のコマンドステータス: {0}。"; t[1154] = "Cannot rollback when autoCommit is enabled."; t[1155] = "autoCommit有効時に、明示的なロールバックはできません。"; t[1158] = "Not implemented: Prepare must be issued using the same connection that started the transaction. currentXid={0}, prepare xid={1}"; t[1159] = "実装されていません: Prepareは、トランザクションを開始したものと同じコネクションで発行しなくてはなりません。currentXid={0}, prepare xid={1}"; t[1162] = "The connection attempt failed."; t[1163] = "接続試行は失敗しました。"; t[1166] = "Illegal UTF-8 sequence: byte {0} of {1} byte sequence is not 10xxxxxx: {2}"; t[1167] = "不正なUTF-8シーケンス: {1} バイトのシーケンス中 {0} バイト目が、10xxxxxx ではありません: {2}"; t[1178] = "A connection could not be made using the requested protocol {0}."; t[1179] = "要求されたプロトコル {0} で接続することができませんでした。"; t[1182] = "The system property containing the server's SSL certificate must not be empty."; t[1183] = "サーバーのSSL証明書を指定するシステムプロパティは空であってはなりません。"; t[1188] = "Cannot call updateRow() when on the insert row."; t[1189] = "挿入行上では updateRow() を呼び出すことができません。"; t[1192] = "Fastpath call {0} - No result was returned and we expected a long."; t[1193] = "Fastpath 呼び出し {0} - long を想定していましたが、結果は返却されませんでした。"; t[1198] = "Truncation of large objects is only implemented in 8.3 and later servers."; t[1199] = "ラージオブジェクトの切り詰めは、バージョン8.3 以降のサーバでのみ実装されています。"; t[1200] = "Cannot convert the column of type {0} to requested type {1}."; t[1201] = "{0}型のカラムの値を指定の型 {1} に変換できませんでした。"; t[1204] = "Requested CopyIn but got {0}"; t[1205] = "CopyIn を要求しましたが {0} が返却されました"; t[1206] = "Cannot cast to boolean: \"{0}\""; t[1207] = "boolean へのキャストはできません: \"{0}\""; t[1212] = "Invalid server-final-message: {0}"; t[1213] = "不正な server-final-message: {0}."; t[1214] = "This statement does not declare an OUT parameter. Use '{' ?= call ... '}' to declare one."; t[1215] = "このステートメントは、OUTパラメータを宣言していません。'{' ?= call ... '}' を使って宣言して下さい。"; t[1218] = "Cannot truncate LOB to a negative length."; t[1219] = "LOBを負の長さに切り詰めることはできません。"; t[1220] = "Zero bytes may not occur in identifiers."; t[1221] = "バイト値0を識別子に含めることはできません。"; t[1222] = "Unable to convert DOMResult SQLXML data to a string."; t[1223] = "DOMResult SQLXMLデータを文字列に変換することができません。"; t[1224] = "Missing expected error response to copy cancel request"; t[1225] = "予期していたコピーの中断要求へのエラー応答がありませんでした"; t[1234] = "SCRAM authentication is not supported by this driver. You need JDK >= 8 and pgjdbc >= 42.2.0 (not \".jre\" versions)"; t[1235] = "SCRAM認証はこのドライバではサポートされません。JDK8 以降かつ pgjdbc 42.2.0 以降(\".jre\"のバージョンではありません)が必要です。"; t[1240] = "Tried to end inactive copy"; t[1241] = "実行中ではないコピー操作を終了しようとしました"; t[1246] = "A CallableStatement function was executed and the out parameter {0} was of type {1} however type {2} was registered."; t[1247] = "CallableStatement 関数が実行され、出力パラメータ {0} は {1} 型 でした。しかし、{2} 型 が登録されました。"; t[1250] = "Failed to setup DataSource."; t[1251] = "データソースのセットアップに失敗しました。"; t[1252] = "Loading the SSL certificate {0} into a KeyManager failed."; t[1253] = "SSL証明書 {0} をKeyManagerへ読み込めませんでした。"; t[1254] = "Could not read SSL key file {0}."; t[1255] = "SSL keyファイル {0} を読めませんでした。"; t[1258] = "Tried to read from inactive copy"; t[1259] = "実行中ではないコピーから読み取ろうとしました"; t[1260] = "ResultSet is not updateable. The query that generated this result set must select only one table, and must select all primary keys from that table. See the JDBC 2.1 API Specification, section 5.6 for more details."; t[1261] = "ResultSetは更新不可です。この結果セットを生成したクエリは、ただ一つのテーブルを選択して、そのテーブルの全ての主キーを選択する必要があります。詳細に関しては JDBC 2.1 API仕様、章 5.6 を参照して下さい。"; t[1264] = "A result was returned when none was expected."; t[1265] = "ないはずの結果が返却されました。"; t[1266] = "Tried to cancel an inactive copy operation"; t[1267] = "実行中ではないコピー操作の中断を試みました"; t[1268] = "Server SQLState: {0}"; t[1269] = "サーバ SQLState: {0}"; t[1272] = "Unable to find keywords in the system catalogs."; t[1273] = "キーワードはシステムカタログにありません。"; t[1276] = "Connection is busy with another transaction"; t[1277] = "接続は、別のトランザクションを処理中です"; t[1280] = "ResultSets with concurrency CONCUR_READ_ONLY cannot be updated."; t[1281] = "CONCUR_READ_ONLYに設定されている ResultSet は更新できません。"; t[1296] = "commit called before end. commit xid={0}, state={1}"; t[1297] = "end の前に COMMIT を呼びました commit xid={0}, state={1}"; t[1308] = "Redshift LOBs can only index to: {0}"; t[1309] = "Redshift LOB 上の位置指定は最大 {0} までです"; t[1310] = "Where: {0}"; t[1311] = "場所: {0}"; t[1312] = "Unable to find name datatype in the system catalogs."; t[1313] = "name データ型がシステムカタログにありません。"; t[1314] = "Invalid targetServerType value: {0}"; t[1315] = "不正な targetServerType 値です。{0}."; t[1318] = "Cannot retrieve the name of an unnamed savepoint."; t[1319] = "無名 savepoint の名前は取得できません。"; t[1320] = "Error committing prepared transaction. commit xid={0}, preparedXid={1}, currentXid={2}"; t[1321] = "プリペアドトランザクションの COMMIT 処理中のエラー。commit xid={0}, preparedXid={1}, currentXid={2}"; t[1324] = "Invalid timeout ({0}<0)."; t[1325] = "不正なタイムアウト値 ({0}<0)。"; t[1328] = "Operation requires a scrollable ResultSet, but this ResultSet is FORWARD_ONLY."; t[1329] = "操作は、スクロール可能なResultSetを必要としますが、このResultSetは、 FORWARD_ONLYです。"; t[1330] = "Results cannot be retrieved from a CallableStatement before it is executed."; t[1331] = "実行前の CallableStatement から結果の取得はできません。"; t[1332] = "wasNull cannot be call before fetching a result."; t[1333] = "wasNullは、結果フェッチ前に呼び出せません。"; t[1336] = "{0} function doesn''t take any argument."; t[1337] = "{0} 関数は引数を取りません。"; t[1344] = "Unknown Response Type {0}."; t[1345] = "未知の応答タイプ {0} です。"; t[1346] = "The JVM claims not to support the {0} encoding."; t[1347] = "JVMは、エンコーディング {0} をサポートしません。"; t[1348] = "{0} function takes two and only two arguments."; t[1349] = "{0} 関数はちょうど2個の引数を取ります。"; t[1350] = "The maximum field size must be a value greater than or equal to 0."; t[1351] = "最大の項目サイズは、0またはより大きな値でなくてはなりません。"; t[1352] = "Received CommandComplete ''{0}'' without an active copy operation"; t[1353] = "実行中のコピー操作がないにもかかわらず CommandComplete ''{0}'' を受信しました"; t[1354] = "Unable to translate data into the desired encoding."; t[1355] = "データを指定されたエンコーディングに変換することができません。"; t[1368] = "Got CopyOutResponse from server during an active {0}"; t[1369] = "{0} を実行中のサーバから CopyOutResponse を受け取りました"; t[1370] = "Failed to set ClientInfo property: {0}"; t[1371] = "ClientInfo のプロパティの設定に失敗しました: {0}"; t[1372] = "Invalid character data was found. This is most likely caused by stored data containing characters that are invalid for the character set the database was created in. The most common example of this is storing 8bit data in a SQL_ASCII database."; t[1373] = "不正な文字データが見つかりました。これはデータベース作成時の文字セットに対して不正な文字を含むデータが格納されているために起きている可能性が高いです。最も一般的な例は、SQL_ASCIIデータベースに8bitデータが保存されている場合です。"; t[1374] = "Unknown Types value."; t[1375] = "未知の Types の値です。"; t[1376] = " (pgjdbc: autodetected server-encoding to be {0}, if the message is not readable, please check database logs and/or host, port, dbname, user, password, pg_hba.conf)"; t[1377] = "(pgjdbc: server-encoding として {0} を自動検出しました、メッセージが読めない場合はデータベースログおよび host, port, dbname, user, password, pg_dba.conf を確認してください)"; t[1386] = "GSS Authentication failed"; t[1387] = "GSS認証は失敗しました。"; t[1390] = "An error occurred while trying to reset the socket timeout."; t[1391] = "ソケットタイムアウトのリセット中にエラーが発生しました。"; t[1392] = "Currently positioned before the start of the ResultSet. You cannot call deleteRow() here."; t[1393] = "RsultSet の開始点より前にいるため、deleteRow() を呼ぶことはできません。"; t[1394] = "Current connection does not have an associated xid. prepare xid={0}"; t[1395] = "この接続は xid と関連付けられていません。プリペア xid={0}"; t[1408] = "An I/O error occurred while sending to the backend."; t[1409] = "バックエンドへの送信中に、入出力エラーが起こりました。"; t[1416] = "One-phase commit with unknown xid. commit xid={0}, currentXid={1}"; t[1417] = "未知の xid の単相コミット。 コミットxid={0}, 現在のxid={1}"; t[1420] = "Position: {0}"; t[1421] = "位置: {0}"; t[1422] = "There are no rows in this ResultSet."; t[1423] = "このResultSetに行がありません。"; t[1424] = "Database connection failed when reading from copy"; t[1425] = "コピーからの読み取り中にデータベース接続で異常が発生しました"; table = t; } public java.lang.Object handleGetObject (java.lang.String msgid) throws java.util.MissingResourceException { int hash_val = msgid.hashCode() & 0x7fffffff; int idx = (hash_val % 713) << 1; { java.lang.Object found = table[idx]; if (found == null) return null; if (msgid.equals(found)) return table[idx + 1]; } int incr = ((hash_val % 711) + 1) << 1; for (;;) { idx += incr; if (idx >= 1426) idx -= 1426; java.lang.Object found = table[idx]; if (found == null) return null; if (msgid.equals(found)) return table[idx + 1]; } } public java.util.Enumeration getKeys () { return new java.util.Enumeration() { private int idx = 0; { while (idx < 1426 && table[idx] == null) idx += 2; } public boolean hasMoreElements () { return (idx < 1426); } public java.lang.Object nextElement () { java.lang.Object key = table[idx]; do idx += 2; while (idx < 1426 && table[idx] == null); return key; } }; } public java.util.ResourceBundle getParent () { return parent; } }
8,458
0
Create_ds/amazon-redshift-jdbc-driver/src/main/java/com/amazon/redshift
Create_ds/amazon-redshift-jdbc-driver/src/main/java/com/amazon/redshift/translation/messages_nl.java
/* Automatically generated by GNU msgfmt. Do not modify! */ package com.amazon.redshift.translation; public class messages_nl extends java.util.ResourceBundle { private static final java.lang.String[] table; static { java.lang.String[] t = new java.lang.String[36]; t[0] = ""; t[1] = "Project-Id-Version: Redshift JDBC Driver 2.0\nReport-Msgid-Bugs-To: \nPO-Revision-Date: 2004-10-11 23:55-0700\nLast-Translator: Arnout Kuiper <ajkuiper@wxs.nl>\nLanguage-Team: Dutch <ajkuiper@wxs.nl>\nLanguage: nl\nMIME-Version: 1.0\nContent-Type: text/plain; charset=UTF-8\nContent-Transfer-Encoding: 8bit\n"; t[2] = "Something unusual has occurred to cause the driver to fail. Please report this exception."; t[3] = "Iets ongewoons is opgetreden, wat deze driver doet falen. Rapporteer deze fout AUB: {0}"; t[8] = "Unknown Types value."; t[9] = "Onbekende Types waarde."; t[12] = "Fastpath call {0} - No result was returned and we expected an integer."; t[13] = "Fastpath aanroep {0} - Geen resultaat werd teruggegeven, terwijl we een integer verwacht hadden."; t[20] = "The fastpath function {0} is unknown."; t[21] = "De fastpath functie {0} is onbekend."; t[22] = "No results were returned by the query."; t[23] = "Geen resultaten werden teruggegeven door de query."; t[26] = "An unexpected result was returned by a query."; t[27] = "Een onverwacht resultaat werd teruggegeven door een query"; table = t; } public java.lang.Object handleGetObject (java.lang.String msgid) throws java.util.MissingResourceException { int hash_val = msgid.hashCode() & 0x7fffffff; int idx = (hash_val % 18) << 1; java.lang.Object found = table[idx]; if (found != null && msgid.equals(found)) return table[idx + 1]; return null; } public java.util.Enumeration getKeys () { return new java.util.Enumeration() { private int idx = 0; { while (idx < 36 && table[idx] == null) idx += 2; } public boolean hasMoreElements () { return (idx < 36); } public java.lang.Object nextElement () { java.lang.Object key = table[idx]; do idx += 2; while (idx < 36 && table[idx] == null); return key; } }; } public java.util.ResourceBundle getParent () { return parent; } }
8,459
0
Create_ds/amazon-redshift-jdbc-driver/src/main/java/com/amazon/redshift
Create_ds/amazon-redshift-jdbc-driver/src/main/java/com/amazon/redshift/translation/messages_de.java
/* Automatically generated by GNU msgfmt. Do not modify! */ package com.amazon.redshift.translation; public class messages_de extends java.util.ResourceBundle { private static final java.lang.String[] table; static { java.lang.String[] t = new java.lang.String[794]; t[0] = ""; t[1] = "Project-Id-Version: head-de\nReport-Msgid-Bugs-To: \nPO-Revision-Date: 2008-09-12 14:22+0200\nLast-Translator: Andre Bialojahn <ab.spamnews@freenet.de>\nLanguage-Team: Deutsch\nLanguage: \nMIME-Version: 1.0\nContent-Type: text/plain; charset=UTF-8\nContent-Transfer-Encoding: 8bit\nX-Generator: KBabel 1.0.2\nX-Poedit-Language: German\nX-Poedit-Country: GERMANY\n"; t[4] = "DataSource has been closed."; t[5] = "Die Datenquelle wurde geschlossen."; t[18] = "Where: {0}"; t[19] = "Wobei: {0}"; t[26] = "The connection attempt failed."; t[27] = "Der Verbindungsversuch schlug fehl."; t[28] = "Currently positioned after the end of the ResultSet. You cannot call deleteRow() here."; t[29] = "Die augenblickliche Position ist hinter dem Ende des ResultSets. Dort kann ''deleteRow()'' nicht aufgerufen werden."; t[36] = "Multiple ResultSets were returned by the query."; t[37] = "Die Abfrage ergab mehrere ResultSets."; t[50] = "Too many update results were returned."; t[51] = "Zu viele Updateergebnisse wurden zurückgegeben."; t[58] = "Illegal UTF-8 sequence: initial byte is {0}: {1}"; t[59] = "Ungültige UTF-8-Sequenz: das erste Byte ist {0}: {1}"; t[66] = "The column name {0} was not found in this ResultSet."; t[67] = "Der Spaltenname {0} wurde in diesem ResultSet nicht gefunden."; t[70] = "Fastpath call {0} - No result was returned and we expected an integer."; t[71] = "Der Fastpath-Aufruf {0} gab kein Ergebnis zurück, jedoch wurde ein Integer erwartet."; t[74] = "Protocol error. Session setup failed."; t[75] = "Protokollfehler. Die Sitzung konnte nicht gestartet werden."; t[76] = "A CallableStatement was declared, but no call to registerOutParameter(1, <some type>) was made."; t[77] = "Ein CallableStatement wurde deklariert, aber kein Aufruf von ''registerOutParameter(1, <some type>)'' erfolgte."; t[78] = "ResultSets with concurrency CONCUR_READ_ONLY cannot be updated."; t[79] = "ResultSets, deren Zugriffsart CONCUR_READ_ONLY ist, können nicht aktualisiert werden."; t[90] = "LOB positioning offsets start at 1."; t[91] = "Positionsoffsets für LOBs beginnen bei 1."; t[92] = "Internal Position: {0}"; t[93] = "Interne Position: {0}"; t[96] = "free() was called on this LOB previously"; t[97] = "free() wurde bereits für dieses LOB aufgerufen."; t[100] = "Cannot change transaction read-only property in the middle of a transaction."; t[101] = "Die Nur-Lesen-Eigenschaft einer Transaktion kann nicht während der Transaktion verändert werden."; t[102] = "The JVM claims not to support the {0} encoding."; t[103] = "Die JVM behauptet, die Zeichenkodierung {0} nicht zu unterstützen."; t[108] = "{0} function doesn''t take any argument."; t[109] = "Die {0}-Funktion akzeptiert kein Argument."; t[112] = "xid must not be null"; t[113] = "Die xid darf nicht null sein."; t[114] = "Connection has been closed."; t[115] = "Die Verbindung wurde geschlossen."; t[122] = "The server does not support SSL."; t[123] = "Der Server unterstützt SSL nicht."; t[140] = "Illegal UTF-8 sequence: byte {0} of {1} byte sequence is not 10xxxxxx: {2}"; t[141] = "Ungültige UTF-8-Sequenz: Byte {0} der {1} Bytesequenz ist nicht 10xxxxxx: {2}"; t[148] = "Hint: {0}"; t[149] = "Hinweis: {0}"; t[152] = "Unable to find name datatype in the system catalogs."; t[153] = "In den Systemkatalogen konnte der Namensdatentyp nicht gefunden werden."; t[156] = "Unsupported Types value: {0}"; t[157] = "Unbekannter Typ: {0}."; t[158] = "Unknown type {0}."; t[159] = "Unbekannter Typ {0}."; t[166] = "{0} function takes two and only two arguments."; t[167] = "Die {0}-Funktion erwartet genau zwei Argumente."; t[170] = "Finalizing a Connection that was never closed:"; t[171] = "Eine Connection wurde finalisiert, die nie geschlossen wurde:"; t[180] = "The maximum field size must be a value greater than or equal to 0."; t[181] = "Die maximale Feldgröße muss ein Wert größer oder gleich Null sein."; t[186] = "Redshift LOBs can only index to: {0}"; t[187] = "LOBs in Redshift können nur auf {0} verweisen."; t[194] = "Method {0} is not yet implemented."; t[195] = "Die Methode {0} ist noch nicht implementiert."; t[198] = "Error loading default settings from driverconfig.properties"; t[199] = "Fehler beim Laden der Voreinstellungen aus driverconfig.properties"; t[200] = "Results cannot be retrieved from a CallableStatement before it is executed."; t[201] = "Ergebnisse können nicht von einem CallableStatement abgerufen werden, bevor es ausgeführt wurde."; t[202] = "Large Objects may not be used in auto-commit mode."; t[203] = "LargeObjects (LOB) dürfen im Modus ''auto-commit'' nicht verwendet werden."; t[208] = "Expected command status BEGIN, got {0}."; t[209] = "Statt des erwarteten Befehlsstatus BEGIN, wurde {0} empfangen."; t[218] = "Invalid fetch direction constant: {0}."; t[219] = "Unzulässige Richtungskonstante bei fetch: {0}."; t[222] = "{0} function takes three and only three arguments."; t[223] = "Die {0}-Funktion erwartet genau drei Argumente."; t[226] = "Error during recover"; t[227] = "Beim Wiederherstellen trat ein Fehler auf."; t[228] = "Cannot update the ResultSet because it is either before the start or after the end of the results."; t[229] = "Das ResultSet kann nicht aktualisiert werden, da es entweder vor oder nach dem Ende der Ergebnisse ist."; t[230] = "The JVM claims not to support the encoding: {0}"; t[231] = "Die JVM behauptet, die Zeichenkodierung {0} nicht zu unterstützen."; t[232] = "Parameter of type {0} was registered, but call to get{1} (sqltype={2}) was made."; t[233] = "Ein Parameter des Typs {0} wurde registriert, jedoch erfolgte ein Aufruf get{1} (sqltype={2})."; t[240] = "Cannot establish a savepoint in auto-commit mode."; t[241] = "Ein Rettungspunkt kann im Modus ''auto-commit'' nicht erstellt werden."; t[242] = "Cannot retrieve the id of a named savepoint."; t[243] = "Die ID eines benamten Rettungspunktes kann nicht ermittelt werden."; t[244] = "The column index is out of range: {0}, number of columns: {1}."; t[245] = "Der Spaltenindex {0} ist außerhalb des gültigen Bereichs. Anzahl Spalten: {1}."; t[250] = "Something unusual has occurred to cause the driver to fail. Please report this exception."; t[251] = "Etwas Ungewöhnliches ist passiert, das den Treiber fehlschlagen ließ. Bitte teilen Sie diesen Fehler mit."; t[260] = "Cannot cast an instance of {0} to type {1}"; t[261] = "Die Typwandlung für eine Instanz von {0} nach {1} ist nicht möglich."; t[264] = "Unknown Types value."; t[265] = "Unbekannter Typ."; t[266] = "Invalid stream length {0}."; t[267] = "Ungültige Länge des Datenstroms: {0}."; t[272] = "Cannot retrieve the name of an unnamed savepoint."; t[273] = "Der Name eines namenlosen Rettungpunktes kann nicht ermittelt werden."; t[274] = "Unable to translate data into the desired encoding."; t[275] = "Die Daten konnten nicht in die gewünschte Kodierung gewandelt werden."; t[276] = "Expected an EOF from server, got: {0}"; t[277] = "Vom Server wurde ein EOF erwartet, jedoch {0} gelesen."; t[278] = "Bad value for type {0} : {1}"; t[279] = "Unzulässiger Wert für den Typ {0} : {1}."; t[280] = "The server requested password-based authentication, but no password was provided."; t[281] = "Der Server verlangt passwortbasierte Authentifizierung, jedoch wurde kein Passwort angegeben."; t[296] = "Truncation of large objects is only implemented in 8.3 and later servers."; t[297] = "Das Abschneiden großer Objekte ist nur in Versionen nach 8.3 implementiert."; t[298] = "This PooledConnection has already been closed."; t[299] = "Diese PooledConnection ist bereits geschlossen worden."; t[302] = "ClientInfo property not supported."; t[303] = "Die ClientInfo-Eigenschaft ist nicht unterstützt."; t[306] = "Fetch size must be a value greater to or equal to 0."; t[307] = "Die Fetch-Größe muss ein Wert größer oder gleich Null sein."; t[312] = "A connection could not be made using the requested protocol {0}."; t[313] = "Es konnte keine Verbindung unter Verwendung des Protokolls {0} hergestellt werden."; t[322] = "There are no rows in this ResultSet."; t[323] = "Es gibt keine Zeilen in diesem ResultSet."; t[324] = "Unexpected command status: {0}."; t[325] = "Unerwarteter Befehlsstatus: {0}."; t[334] = "Not on the insert row."; t[335] = "Nicht in der Einfügezeile."; t[344] = "Server SQLState: {0}"; t[345] = "Server SQLState: {0}"; t[348] = "The server''s standard_conforming_strings parameter was reported as {0}. The JDBC driver expected on or off."; t[349] = "Der standard_conforming_strings Parameter des Servers steht auf {0}. Der JDBC-Treiber erwartete on oder off."; t[360] = "The driver currently does not support COPY operations."; t[361] = "Der Treiber unterstützt derzeit keine COPY-Operationen."; t[364] = "The array index is out of range: {0}, number of elements: {1}."; t[365] = "Der Arrayindex {0} ist außerhalb des gültigen Bereichs. Vorhandene Elemente: {1}."; t[374] = "suspend/resume not implemented"; t[375] = "Anhalten/Fortsetzen ist nicht implementiert."; t[378] = "Not implemented: one-phase commit must be issued using the same connection that was used to start it"; t[379] = "Nicht implementiert: Die einphasige Bestätigung muss über die selbe Verbindung abgewickelt werden, die verwendet wurde, um sie zu beginnen."; t[398] = "Cannot call cancelRowUpdates() when on the insert row."; t[399] = "''cancelRowUpdates()'' kann in der Einfügezeile nicht aufgerufen werden."; t[400] = "Cannot reference a savepoint after it has been released."; t[401] = "Ein Rettungspunkt kann nicht angesprochen werden, nach dem er entfernt wurde."; t[402] = "You must specify at least one column value to insert a row."; t[403] = "Sie müssen mindestens einen Spaltenwert angeben, um eine Zeile einzufügen."; t[404] = "Unable to determine a value for MaxIndexKeys due to missing system catalog data."; t[405] = "Es konnte kein Wert für MaxIndexKeys gefunden werden, da die Systemkatalogdaten fehlen."; t[412] = "Illegal UTF-8 sequence: final value is out of range: {0}"; t[413] = "Ungültige UTF-8-Sequenz: Der letzte Wert ist außerhalb des zulässigen Bereichs: {0}"; t[414] = "{0} function takes two or three arguments."; t[415] = "Die {0}-Funktion erwartet zwei oder drei Argumente."; t[440] = "Unexpected error writing large object to database."; t[441] = "Beim Schreiben eines LargeObjects (LOB) in die Datenbank trat ein unerwarteter Fehler auf."; t[442] = "Zero bytes may not occur in string parameters."; t[443] = "Stringparameter dürfen keine Nullbytes enthalten."; t[444] = "A result was returned when none was expected."; t[445] = "Die Anweisung lieferte ein Ergebnis obwohl keines erwartet wurde."; t[450] = "ResultSet is not updateable. The query that generated this result set must select only one table, and must select all primary keys from that table. See the JDBC 2.1 API Specification, section 5.6 for more details."; t[451] = "Das ResultSet kann nicht aktualisiert werden. Die Abfrage, die es erzeugte, darf nur eine Tabelle und muss darin alle Primärschlüssel auswählen. Siehe JDBC 2.1 API-Spezifikation, Abschnitt 5.6 für mehr Details."; t[454] = "Bind message length {0} too long. This can be caused by very large or incorrect length specifications on InputStream parameters."; t[455] = "Die Nachrichtenlänge {0} ist zu groß. Das kann von sehr großen oder inkorrekten Längenangaben eines InputStream-Parameters herrühren."; t[460] = "Statement has been closed."; t[461] = "Die Anweisung wurde geschlossen."; t[462] = "No value specified for parameter {0}."; t[463] = "Für den Parameter {0} wurde kein Wert angegeben."; t[468] = "The array index is out of range: {0}"; t[469] = "Der Arrayindex ist außerhalb des gültigen Bereichs: {0}."; t[474] = "Unable to bind parameter values for statement."; t[475] = "Der Anweisung konnten keine Parameterwerte zugewiesen werden."; t[476] = "Can''t refresh the insert row."; t[477] = "Die Einfügezeile kann nicht aufgefrischt werden."; t[480] = "No primary key found for table {0}."; t[481] = "Für die Tabelle {0} konnte kein Primärschlüssel gefunden werden."; t[482] = "Cannot change transaction isolation level in the middle of a transaction."; t[483] = "Die Transaktions-Trennungsstufe kann nicht während einer Transaktion verändert werden."; t[498] = "Provided InputStream failed."; t[499] = "Der bereitgestellte InputStream scheiterte."; t[500] = "The parameter index is out of range: {0}, number of parameters: {1}."; t[501] = "Der Parameterindex {0} ist außerhalb des gültigen Bereichs. Es gibt {1} Parameter."; t[502] = "The server''s DateStyle parameter was changed to {0}. The JDBC driver requires DateStyle to begin with ISO for correct operation."; t[503] = "Der Parameter ''Date Style'' wurde auf dem Server auf {0} verändert. Der JDBC-Treiber setzt für korrekte Funktion voraus, dass ''Date Style'' mit ''ISO'' beginnt."; t[508] = "Connection attempt timed out."; t[509] = "Keine Verbindung innerhalb des Zeitintervalls möglich."; t[512] = "Internal Query: {0}"; t[513] = "Interne Abfrage: {0}"; t[518] = "The authentication type {0} is not supported. Check that you have configured the pg_hba.conf file to include the client''s IP address or subnet, and that it is using an authentication scheme supported by the driver."; t[519] = "Der Authentifizierungstyp {0} wird nicht unterstützt. Stellen Sie sicher, dass die Datei ''pg_hba.conf'' die IP-Adresse oder das Subnetz des Clients enthält und dass der Client ein Authentifizierungsschema nutzt, das vom Treiber unterstützt wird."; t[526] = "Interval {0} not yet implemented"; t[527] = "Intervall {0} ist noch nicht implementiert."; t[532] = "Conversion of interval failed"; t[533] = "Die Umwandlung eines Intervalls schlug fehl."; t[540] = "Query timeout must be a value greater than or equals to 0."; t[541] = "Das Abfragetimeout muss ein Wert größer oder gleich Null sein."; t[542] = "Connection has been closed automatically because a new connection was opened for the same PooledConnection or the PooledConnection has been closed."; t[543] = "Die Verbindung wurde automatisch geschlossen, da entweder eine neue Verbindung für die gleiche PooledConnection geöffnet wurde, oder die PooledConnection geschlossen worden ist.."; t[544] = "ResultSet not positioned properly, perhaps you need to call next."; t[545] = "Das ResultSet ist nicht richtig positioniert. Eventuell muss ''next'' aufgerufen werden."; t[550] = "This statement has been closed."; t[551] = "Die Anweisung wurde geschlossen."; t[552] = "Can''t infer the SQL type to use for an instance of {0}. Use setObject() with an explicit Types value to specify the type to use."; t[553] = "Der in SQL für eine Instanz von {0} zu verwendende Datentyp kann nicht abgeleitet werden. Benutzen Sie ''setObject()'' mit einem expliziten Typ, um ihn festzulegen."; t[554] = "Cannot call updateRow() when on the insert row."; t[555] = "''updateRow()'' kann in der Einfügezeile nicht aufgerufen werden."; t[562] = "Detail: {0}"; t[563] = "Detail: {0}"; t[566] = "Cannot call deleteRow() when on the insert row."; t[567] = "''deleteRow()'' kann in der Einfügezeile nicht aufgerufen werden."; t[568] = "Currently positioned before the start of the ResultSet. You cannot call deleteRow() here."; t[569] = "Die augenblickliche Position ist vor dem Beginn des ResultSets. Dort kann ''deleteRow()'' nicht aufgerufen werden."; t[576] = "Illegal UTF-8 sequence: final value is a surrogate value: {0}"; t[577] = "Ungültige UTF-8-Sequenz: der letzte Wert ist ein Ersatzwert: {0}"; t[578] = "Unknown Response Type {0}."; t[579] = "Die Antwort weist einen unbekannten Typ auf: {0}."; t[582] = "Unsupported value for stringtype parameter: {0}"; t[583] = "Nichtunterstützter Wert für den Stringparameter: {0}"; t[584] = "Conversion to type {0} failed: {1}."; t[585] = "Die Umwandlung in den Typ {0} schlug fehl: {1}."; t[586] = "Conversion of money failed."; t[587] = "Die Umwandlung eines Währungsbetrags schlug fehl."; t[600] = "Unable to load the class {0} responsible for the datatype {1}"; t[601] = "Die für den Datentyp {1} verantwortliche Klasse {0} konnte nicht geladen werden."; t[604] = "The fastpath function {0} is unknown."; t[605] = "Die Fastpath-Funktion {0} ist unbekannt."; t[608] = "Malformed function or procedure escape syntax at offset {0}."; t[609] = "Unzulässige Syntax für ein Funktions- oder Prozedur-Escape an Offset {0}."; t[612] = "Provided Reader failed."; t[613] = "Der bereitgestellte Reader scheiterte."; t[614] = "Maximum number of rows must be a value grater than or equal to 0."; t[615] = "Die maximale Zeilenzahl muss ein Wert größer oder gleich Null sein."; t[616] = "Failed to create object for: {0}."; t[617] = "Erstellung des Objektes schlug fehl für: {0}."; t[622] = "Premature end of input stream, expected {0} bytes, but only read {1}."; t[623] = "Vorzeitiges Ende des Eingabedatenstroms. Es wurden {0} Bytes erwartet, jedoch nur {1} gelesen."; t[626] = "An unexpected result was returned by a query."; t[627] = "Eine Abfrage lieferte ein unerwartetes Resultat."; t[646] = "An error occurred while setting up the SSL connection."; t[647] = "Beim Aufbau der SSL-Verbindung trat ein Fehler auf."; t[654] = "Illegal UTF-8 sequence: {0} bytes used to encode a {1} byte value: {2}"; t[655] = "Ungültige UTF-8-Sequenz: {0} Bytes wurden verwendet um einen {1} Bytewert zu kodieren: {2}"; t[658] = "The SSLSocketFactory class provided {0} could not be instantiated."; t[659] = "Die von {0} bereitgestellte SSLSocketFactory-Klasse konnte nicht instanziiert werden."; t[670] = "Position: {0}"; t[671] = "Position: {0}"; t[676] = "Location: File: {0}, Routine: {1}, Line: {2}"; t[677] = "Ort: Datei: {0}, Routine: {1}, Zeile: {2}."; t[684] = "Cannot tell if path is open or closed: {0}."; t[685] = "Es konnte nicht ermittelt werden, ob der Pfad offen oder geschlossen ist: {0}."; t[700] = "Cannot convert an instance of {0} to type {1}"; t[701] = "Die Typwandlung für eine Instanz von {0} nach {1} ist nicht möglich."; t[710] = "{0} function takes four and only four argument."; t[711] = "Die {0}-Funktion erwartet genau vier Argumente."; t[718] = "Interrupted while attempting to connect."; t[719] = "Beim Verbindungsversuch trat eine Unterbrechung auf."; t[722] = "Your security policy has prevented the connection from being attempted. You probably need to grant the connect java.net.SocketPermission to the database server host and port that you wish to connect to."; t[723] = "Ihre Sicherheitsrichtlinie hat den Versuch des Verbindungsaufbaus verhindert. Sie müssen wahrscheinlich der Verbindung zum Datenbankrechner java.net.SocketPermission gewähren, um den Rechner auf dem gewählten Port zu erreichen."; t[736] = "{0} function takes one and only one argument."; t[737] = "Die {0}-Funktion erwartet nur genau ein Argument."; t[744] = "This ResultSet is closed."; t[745] = "Dieses ResultSet ist geschlossen."; t[746] = "Invalid character data was found. This is most likely caused by stored data containing characters that are invalid for the character set the database was created in. The most common example of this is storing 8bit data in a SQL_ASCII database."; t[747] = "Ungültige Zeichendaten. Das ist höchstwahrscheinlich von in der Datenbank gespeicherten Zeichen hervorgerufen, die in einer anderen Kodierung vorliegen, als die, in der die Datenbank erstellt wurde. Das häufigste Beispiel dafür ist es, 8Bit-Daten in SQL_ASCII-Datenbanken abzulegen."; t[752] = "Error disabling autocommit"; t[753] = "Fehler beim Abschalten von Autocommit."; t[754] = "Ran out of memory retrieving query results."; t[755] = "Nicht genügend Speicher beim Abholen der Abfrageergebnisse."; t[756] = "Returning autogenerated keys is not supported."; t[757] = "Die Rückgabe automatisch generierter Schlüssel wird nicht unterstützt,"; t[760] = "Operation requires a scrollable ResultSet, but this ResultSet is FORWARD_ONLY."; t[761] = "Die Operation erfordert ein scrollbares ResultSet, dieses jedoch ist FORWARD_ONLY."; t[762] = "A CallableStatement function was executed and the out parameter {0} was of type {1} however type {2} was registered."; t[763] = "Eine CallableStatement-Funktion wurde ausgeführt und der Rückgabewert {0} war vom Typ {1}. Jedoch wurde der Typ {2} dafür registriert."; t[768] = "Unknown ResultSet holdability setting: {0}."; t[769] = "Unbekannte Einstellung für die Haltbarkeit des ResultSets: {0}."; t[772] = "Transaction isolation level {0} not supported."; t[773] = "Die Transaktions-Trennungsstufe {0} ist nicht unterstützt."; t[774] = "Zero bytes may not occur in identifiers."; t[775] = "Nullbytes dürfen in Bezeichnern nicht vorkommen."; t[776] = "No results were returned by the query."; t[777] = "Die Abfrage lieferte kein Ergebnis."; t[778] = "A CallableStatement was executed with nothing returned."; t[779] = "Ein CallableStatement wurde ausgeführt ohne etwas zurückzugeben."; t[780] = "wasNull cannot be call before fetching a result."; t[781] = "wasNull kann nicht aufgerufen werden, bevor ein Ergebnis abgefragt wurde."; t[786] = "This statement does not declare an OUT parameter. Use '{' ?= call ... '}' to declare one."; t[787] = "Diese Anweisung deklariert keinen OUT-Parameter. Benutzen Sie '{' ?= call ... '}' um das zu tun."; t[788] = "Can''t use relative move methods while on the insert row."; t[789] = "Relative Bewegungen können in der Einfügezeile nicht durchgeführt werden."; t[790] = "A CallableStatement was executed with an invalid number of parameters"; t[791] = "Ein CallableStatement wurde mit einer falschen Anzahl Parameter ausgeführt."; t[792] = "Connection is busy with another transaction"; t[793] = "Die Verbindung ist derzeit mit einer anderen Transaktion beschäftigt."; table = t; } public java.lang.Object handleGetObject (java.lang.String msgid) throws java.util.MissingResourceException { int hash_val = msgid.hashCode() & 0x7fffffff; int idx = (hash_val % 397) << 1; { java.lang.Object found = table[idx]; if (found == null) return null; if (msgid.equals(found)) return table[idx + 1]; } int incr = ((hash_val % 395) + 1) << 1; for (;;) { idx += incr; if (idx >= 794) idx -= 794; java.lang.Object found = table[idx]; if (found == null) return null; if (msgid.equals(found)) return table[idx + 1]; } } public java.util.Enumeration getKeys () { return new java.util.Enumeration() { private int idx = 0; { while (idx < 794 && table[idx] == null) idx += 2; } public boolean hasMoreElements () { return (idx < 794); } public java.lang.Object nextElement () { java.lang.Object key = table[idx]; do idx += 2; while (idx < 794 && table[idx] == null); return key; } }; } public java.util.ResourceBundle getParent () { return parent; } }
8,460
0
Create_ds/amazon-redshift-jdbc-driver/src/main/java/com/amazon/redshift
Create_ds/amazon-redshift-jdbc-driver/src/main/java/com/amazon/redshift/translation/messages_ru.java
/* Automatically generated by GNU msgfmt. Do not modify! */ package com.amazon.redshift.translation; public class messages_ru extends java.util.ResourceBundle { private static final java.lang.String[] table; static { java.lang.String[] t = new java.lang.String[538]; t[0] = ""; t[1] = "Project-Id-Version: JDBC Driver for Redshift 1.x.x\nReport-Msgid-Bugs-To: \nPO-Revision-Date: 2016-01-07 15:09+0300\nLast-Translator: Vladimir Sitnikov <sitnikov.vladimir@gmail.com>\nLanguage-Team: pgsql-rus <pgsql-rus@yahoogroups.com>\nLanguage: ru_RU\nMIME-Version: 1.0\nContent-Type: text/plain; charset=UTF-8\nContent-Transfer-Encoding: 8bit\nX-Generator: Poedit 1.5.7\n"; t[4] = "Server SQLState: {0}"; t[5] = "SQLState сервера: {0}"; t[14] = "suspend/resume not implemented"; t[15] = "Операции XA suspend/resume не реализованы"; t[18] = "The array index is out of range: {0}"; t[19] = "Индекс массива вне диапазона: {0}"; t[28] = "This PooledConnection has already been closed."; t[29] = "Это соединение уже было закрыто"; t[30] = "Malformed function or procedure escape syntax at offset {0}."; t[31] = "Невозможно разобрать SQL команду. Ошибка на позиции {0}"; t[32] = "The column index is out of range: {0}, number of columns: {1}."; t[33] = "Индекс колонки вне диапазона: {0}. Допустимые значения: 1..{1}"; t[34] = "Premature end of input stream, expected {0} bytes, but only read {1}."; t[35] = "Раннее завершение входного потока, ожидалось байт: {0}, но считано только {1}"; t[44] = "An I/O error occurred while sending to the backend."; t[45] = "Ошибка ввода/ввывода при отправке бэкенду"; t[46] = "Prepare called before end. prepare xid={0}, state={1}"; t[47] = "Вызов prepare должен происходить только после вызова end. prepare xid={0}, state={1}"; t[48] = "Transaction isolation level {0} not supported."; t[49] = "Уровень изоляции транзакций {0} не поддерживается."; t[50] = "Could not find a server with specified targetServerType: {0}"; t[51] = "Не удалось найти сервер с указанным значением targetServerType: {0}"; t[52] = "Conversion of interval failed"; t[53] = "Невозможно обработать RedshiftInterval: {0}"; t[54] = "The array index is out of range: {0}, number of elements: {1}."; t[55] = "Индекс массива вне диапазона: {0}. Допустимые значения: 1..{1}"; t[62] = "Unsupported value for stringtype parameter: {0}"; t[63] = "Неподдерживаемое значение для параметра stringtype: {0}"; t[72] = "Invalid stream length {0}."; t[73] = "Неверная длина потока {0}."; t[80] = "Error rolling back prepared transaction. rollback xid={0}, preparedXid={1}, currentXid={2}"; t[81] = "Ошибка при откате подготовленной транзакции. rollback xid={0}, preparedXid={1}, currentXid={2}"; t[84] = "The driver currently does not support COPY operations."; t[85] = "Драйвер в данный момент не поддерживате операции COPY."; t[94] = "DataSource has been closed."; t[95] = "DataSource закрыт."; t[96] = "Cannot write to copy a byte of value {0}"; t[97] = "Значение byte должно быть в диапазоне 0..255, переданное значение: {0}"; t[98] = "Fastpath call {0} - No result was returned and we expected a long."; t[99] = "Вызов fastpath {0} ничего не вернул, а ожидалось long"; t[100] = "Connection attempt timed out."; t[101] = "Закончилось время ожидания"; t[102] = "Detail: {0}"; t[103] = "Подробности: {0}"; t[104] = "Connection to {0} refused. Check that the hostname and port are correct and that the postmaster is accepting TCP/IP connections."; t[105] = "Подсоединение по адресу {0} отклонено. Проверьте что хост и порт указаны правильно и что postmaster принимает TCP/IP-подсоединения."; t[108] = "This statement has been closed."; t[109] = "Этот Sstatement был закрыт."; t[110] = "Error committing prepared transaction. commit xid={0}, preparedXid={1}, currentXid={2}"; t[111] = "Ошибка при фиксации подготовленной транзакции. commit xid={0}, preparedXid={1}, currentXid={2}"; t[114] = "Position: {0}"; t[115] = "Позиция: {0}"; t[116] = "Not implemented: Prepare must be issued using the same connection that started the transaction. currentXid={0}, prepare xid={1}"; t[117] = "В каком соединении транзакцию начинали, в таком и вызывайте prepare. По-другому не работает. currentXid={0}, prepare xid={1}"; t[118] = "The connection attempt failed."; t[119] = "Ошибка при попытке подсоединения."; t[120] = "Unexpected copydata from server for {0}"; t[121] = "Неожиданный статус команды COPY: {0}"; t[124] = "Illegal UTF-8 sequence: initial byte is {0}: {1}"; t[125] = "Неверная последовательность UTF-8: начальное значеие {0}: {1}"; t[128] = "This ResultSet is closed."; t[129] = "ResultSet закрыт."; t[142] = "Not implemented: 2nd phase commit must be issued using an idle connection. commit xid={0}, currentXid={1}, state={2}, transactionState={3}"; t[143] = "Духфазная фиксация работает только, если соединение неактивно (state=idle и транзакцция отсутствует). commit xid={0}, currentXid={1}, state={2}, transactionState={3}"; t[146] = "Too many update results were returned."; t[147] = "Возвращено слишком много результатов обновления."; t[148] = "An error occurred reading the certificate"; t[149] = "Ошибка при чтении сертификата"; t[160] = "Unknown type {0}."; t[161] = "Неизвестный тип {0}."; t[172] = "Illegal UTF-8 sequence: {0} bytes used to encode a {1} byte value: {2}"; t[173] = "Неверная последовательность UTF-8: {0} bytes used to encode a {1} byte value: {2}"; t[182] = "Protocol error. Session setup failed."; t[183] = "Ошибка протокола. Установление сессии не удалось."; t[184] = "Connection has been closed."; t[185] = "Это соединение уже было закрыто"; t[188] = "This copy stream is closed."; t[189] = "Поток уже был закрыт"; t[196] = "Statement has been closed."; t[197] = "Statement закрыт."; t[200] = "Failed to set ClientInfo property: {0}"; t[201] = "Невозможно установить свойство ClientInfo: {0}"; t[204] = "Where: {0}"; t[205] = "Где: {0}"; t[212] = "Expected command status BEGIN, got {0}."; t[213] = "Ожидался статус команды BEGIN, но получен {0}"; t[216] = "The HostnameVerifier class provided {0} could not be instantiated."; t[217] = "Невозможно создать HostnameVerifier с помощью указанного класса {0}"; t[220] = "Unsupported properties: {0}"; t[221] = "Указанные свойства не поддерживаются: {0}"; t[222] = "Failed to create object for: {0}."; t[223] = "Ошибка при создании объект для: {0}."; t[230] = "Something unusual has occurred to cause the driver to fail. Please report this exception."; t[231] = "Случилось что-то необычное, что заставило драйвер произвести ошибку. Пожалуйста сообщите это исключение."; t[236] = "Finalizing a Connection that was never closed:"; t[237] = "Соединение «утекло». Проверьте, что в коде приложения вызывается connection.close(). Далее следует стектрейс того места, где создавалось проблемное соединение"; t[238] = "Invalid character data was found. This is most likely caused by stored data containing characters that are invalid for the character set the database was created in. The most common example of this is storing 8bit data in a SQL_ASCII database."; t[239] = "Найдены неверные символьные данные. Причиной этого скорее всего являются хранимые данные содержащие символы не соответствующие набору символов базы. Типичным примером этого является хранение 8-битных данных в базе SQL_ASCII."; t[252] = "Unable to create SAXResult for SQLXML."; t[253] = "Невозможно создать SAXResult для SQLXML"; t[260] = "The SSLSocketFactory class provided {0} could not be instantiated."; t[261] = "Невозможно создать SSLSocketFactory с помощью указанного класса {0}"; t[266] = "No IOException expected from StringBuffer or StringBuilder"; t[267] = "Что-то пошло не так: из классов StringBuffer и StringBuilder исключений не ожидалось"; t[280] = "Interrupted while waiting to obtain lock on database connection"; t[281] = "Ожидание COPY блокировки прервано получением interrupt"; t[284] = "Zero bytes may not occur in identifiers."; t[285] = "Символ с кодом 0 в идентификаторах не допустим"; t[286] = "There are no rows in this ResultSet."; t[287] = "Невозможно удалить строку, т.к. в текущем ResultSet’е строк вообще нет"; t[288] = "Expected an EOF from server, got: {0}"; t[289] = "Неожиданный ответ от сервера. Ожидалось окончание потока, получен байт {0}"; t[304] = "No results were returned by the query."; t[305] = "Запрос не вернул результатов."; t[306] = "Invalid targetServerType value: {0}"; t[307] = "Неверное значение targetServerType: {0}"; t[310] = "Requested CopyOut but got {0}"; t[311] = "Ожидался ответ CopyOut, а получен {0}"; t[318] = "Invalid flags {0}"; t[319] = "Неверные флаги {0}"; t[324] = "Unsupported Types value: {0}"; t[325] = "Неподдерживаемый java.sql.Types тип: {0}"; t[326] = "Invalid timeout ({0}<0)."; t[327] = "Значение таймаута должно быть неотрицательным: {0}"; t[328] = "tried to call end without corresponding start call. state={0}, start xid={1}, currentXid={2}, preparedXid={3}"; t[329] = "Невозможно завершить транзакцию, т.к. транзакция не была начата. state={0}, start xid={1}, currentXid={2}, preparedXid={3}"; t[350] = "A result was returned when none was expected."; t[351] = "Результат возвращён когда его не ожидалось."; t[352] = "Unsupported binary encoding of {0}."; t[353] = "Бинарная передача не поддерживается для типа {0}"; t[354] = "Zero bytes may not occur in string parameters."; t[355] = "Байт с кодом 0 не может втречаться в строковых параметрах"; t[360] = "Requested CopyIn but got {0}"; t[361] = "Ожидался ответ CopyIn, а получен {0}"; t[364] = "Error during one-phase commit. commit xid={0}"; t[365] = "Ошибка при однофазной фиксации транзакции. commit xid={0}"; t[372] = "Unable to bind parameter values for statement."; t[373] = "Не в состоянии ассоциировать значения параметров для команды (RedshiftBindException)"; t[374] = "Interrupted while attempting to connect."; t[375] = "Подключение прервано получаением interrupt"; t[380] = "An unexpected result was returned by a query."; t[381] = "Запрос вернул неожиданный результат."; t[384] = "Method {0} is not yet implemented."; t[385] = "Метод {0} ещё не реализован"; t[386] = "Location: File: {0}, Routine: {1}, Line: {2}"; t[387] = "Местонахождение: Файл {0}, Процедура: {1}, Строка: {2}"; t[388] = "The server does not support SSL."; t[389] = "Сервер не поддерживает SSL."; t[392] = "The password callback class provided {0} could not be instantiated."; t[393] = "Невозможно создать password callback с помощью указанного класса {0}"; t[396] = "Unknown Types value."; t[397] = "Неизвестное значение Types."; t[400] = "Unknown Response Type {0}."; t[401] = "Неизвестный тип ответа {0}."; t[406] = "commit called before end. commit xid={0}, state={1}"; t[407] = "Операция commit должна вызываться только после операции end. commit xid={0}, state={1}"; t[420] = "An error occurred while setting up the SSL connection."; t[421] = "Ошибка при установке SSL-подсоединения."; t[424] = "Invalid sslmode value: {0}"; t[425] = "Неверное значение sslmode: {0}"; t[436] = "Copying from database failed: {0}"; t[437] = "Ошибка при обработке ответа команды COPY: {0}"; t[438] = "Illegal UTF-8 sequence: final value is out of range: {0}"; t[439] = "Неверная последовательность UTF-8: финальное значение вне области допустимых: {0}"; t[442] = "Error preparing transaction. prepare xid={0}"; t[443] = "Ошибка при выполнении prepare для транзакции {0}"; t[450] = "A connection could not be made using the requested protocol {0}."; t[451] = "Невозможно установить соединение с помощью протокола {0}"; t[460] = "Invalid protocol state requested. Attempted transaction interleaving is not supported. xid={0}, currentXid={1}, state={2}, flags={3}"; t[461] = "Чередование транзакций в одном соединении не поддерживается. Предыдущую транзакцию нужно завершить xid={0}, currentXid={1}, state={2}, flags={3}"; t[462] = "Illegal UTF-8 sequence: final value is a surrogate value: {0}"; t[463] = "Неверная последовательность UTF-8: финальное значение является surrogate значением: {0}"; t[466] = "The column name {0} was not found in this ResultSet."; t[467] = "Колонки {0} не найдено в этом ResultSet’’е."; t[468] = "oid type {0} not known and not a number"; t[469] = "Oid {0} не известен или не является числом"; t[476] = "Hint: {0}"; t[477] = "Подсказка: {0}"; t[478] = "Unsupported property name: {0}"; t[479] = "Свойство {0} не поддерживается"; t[480] = "Ran out of memory retrieving query results."; t[481] = "Недостаточно памяти для обработки результатов запроса. Попробуйте увеличить -Xmx или проверьте размеры обрабатываемых данных"; t[484] = "Interval {0} not yet implemented"; t[485] = "Интеврвал {0} ещё не реализован"; t[486] = "This connection has been closed."; t[487] = "Соединение уже было закрыто"; t[488] = "The SocketFactory class provided {0} could not be instantiated."; t[489] = "Невозможно создать SSLSocketFactory с помощью указанного класса {0}"; t[490] = "This SQLXML object has already been freed."; t[491] = "Этот объект SQLXML уже был закрыт"; t[494] = "Unexpected command status: {0}."; t[495] = "Неожиданный статус команды: {0}."; t[502] = "Large Objects may not be used in auto-commit mode."; t[503] = "Большие объекты не могут использоваться в режиме авто-подтверждения (auto-commit)."; t[504] = "Conversion of money failed."; t[505] = "Ошибка при преобразовании типа money."; t[512] = "No value specified for parameter {0}."; t[513] = "Не указано значение для параметра {0}."; t[514] = "The server requested password-based authentication, but no password was provided."; t[515] = "Сервер запросил парольную аутентификацию, но пароль не был указан."; t[518] = "Illegal UTF-8 sequence: byte {0} of {1} byte sequence is not 10xxxxxx: {2}"; t[519] = "Неверная последовательность UTF-8: байт {0} из {1} не подходит к маске 10xxxxxx: {2}"; t[522] = "Conversion to type {0} failed: {1}."; t[523] = "Ошибка при преобразовании к типу {0}: {1}"; t[528] = "The authentication type {0} is not supported. Check that you have configured the pg_hba.conf file to include the client''s IP address or subnet, and that it is using an authentication scheme supported by the driver."; t[529] = "Тип аутентификации {0} не поддерживается. Проверьте если вы сконфигурировали файл pg_hba.conf чтобы включить IP-адреса клиентов или подсеть. Также удостовертесь что он использует схему аутентификации поддерживаемую драйвером."; t[534] = "The parameter index is out of range: {0}, number of parameters: {1}."; t[535] = "Индекс параметра вне диапазона: {0}. Допустимые значения: 1..{1}"; table = t; } public java.lang.Object handleGetObject (java.lang.String msgid) throws java.util.MissingResourceException { int hash_val = msgid.hashCode() & 0x7fffffff; int idx = (hash_val % 269) << 1; { java.lang.Object found = table[idx]; if (found == null) return null; if (msgid.equals(found)) return table[idx + 1]; } int incr = ((hash_val % 267) + 1) << 1; for (;;) { idx += incr; if (idx >= 538) idx -= 538; java.lang.Object found = table[idx]; if (found == null) return null; if (msgid.equals(found)) return table[idx + 1]; } } public java.util.Enumeration getKeys () { return new java.util.Enumeration() { private int idx = 0; { while (idx < 538 && table[idx] == null) idx += 2; } public boolean hasMoreElements () { return (idx < 538); } public java.lang.Object nextElement () { java.lang.Object key = table[idx]; do idx += 2; while (idx < 538 && table[idx] == null); return key; } }; } public java.util.ResourceBundle getParent () { return parent; } }
8,461
0
Create_ds/amazon-redshift-jdbc-driver/src/main/java/com/amazon/redshift
Create_ds/amazon-redshift-jdbc-driver/src/main/java/com/amazon/redshift/translation/messages_pl.java
/* Automatically generated by GNU msgfmt. Do not modify! */ package com.amazon.redshift.translation; public class messages_pl extends java.util.ResourceBundle { private static final java.lang.String[] table; static { java.lang.String[] t = new java.lang.String[346]; t[0] = ""; t[1] = "Project-Id-Version: head-pl\nReport-Msgid-Bugs-To: \nPO-Revision-Date: 2005-05-22 03:01+0200\nLast-Translator: Jarosław Jan Pyszny <jarek@pyszny.net>\nLanguage-Team: <pl@li.org>\nLanguage: \nMIME-Version: 1.0\nContent-Type: text/plain; charset=UTF-8\nContent-Transfer-Encoding: 8bit\nX-Generator: KBabel 1.10\nPlural-Forms: nplurals=3; plural=(n==1 ? 0 : n%10>=2 && n%10<=4 && (n%100<10 || n%100>=20) ? 1 : 2);\n"; t[2] = "The driver currently does not support COPY operations."; t[3] = "Sterownik nie obsługuje aktualnie operacji COPY."; t[4] = "Internal Query: {0}"; t[5] = "Wewnętrzne Zapytanie: {0}"; t[6] = "There are no rows in this ResultSet."; t[7] = "Nie ma żadnych wierszy w tym ResultSet."; t[8] = "Invalid character data was found. This is most likely caused by stored data containing characters that are invalid for the character set the database was created in. The most common example of this is storing 8bit data in a SQL_ASCII database."; t[9] = "Znaleziono nieprawidłowy znak. Najprawdopodobniej jest to spowodowane przechowywaniem w bazie znaków, które nie pasują do zestawu znaków wybranego podczas tworzenia bazy danych. Najczęstszy przykład to przechowywanie 8-bitowych znaków w bazie o kodowaniu SQL_ASCII."; t[12] = "Fastpath call {0} - No result was returned and we expected an integer."; t[13] = "Wywołanie fastpath {0} - Nie otrzymano żadnego wyniku, a oczekiwano liczby całkowitej."; t[14] = "An error occurred while setting up the SSL connection."; t[15] = "Wystąpił błąd podczas ustanawiania połączenia SSL."; t[20] = "A CallableStatement was declared, but no call to registerOutParameter(1, <some type>) was made."; t[21] = "Funkcja CallableStatement została zadeklarowana, ale nie wywołano registerOutParameter (1, <jakiś typ>)."; t[24] = "Unexpected command status: {0}."; t[25] = "Nieoczekiwany status komendy: {0}."; t[32] = "A connection could not be made using the requested protocol {0}."; t[33] = "Nie można było nawiązać połączenia stosując żądany protokołu {0}."; t[38] = "Bad value for type {0} : {1}"; t[39] = "Zła wartość dla typu {0}: {1}"; t[40] = "Not on the insert row."; t[41] = "Nie na wstawianym rekordzie."; t[42] = "Premature end of input stream, expected {0} bytes, but only read {1}."; t[43] = "Przedwczesny koniec strumienia wejściowego, oczekiwano {0} bajtów, odczytano tylko {1}."; t[48] = "Unknown type {0}."; t[49] = "Nieznany typ {0}."; t[52] = "The server does not support SSL."; t[53] = "Serwer nie obsługuje SSL."; t[60] = "Cannot call updateRow() when on the insert row."; t[61] = "Nie można wywołać updateRow() na wstawianym rekordzie."; t[62] = "Where: {0}"; t[63] = "Gdzie: {0}"; t[72] = "Cannot call cancelRowUpdates() when on the insert row."; t[73] = "Nie można wywołać cancelRowUpdates() na wstawianym rekordzie."; t[82] = "Server SQLState: {0}"; t[83] = "Serwer SQLState: {0}"; t[92] = "ResultSet is not updateable. The query that generated this result set must select only one table, and must select all primary keys from that table. See the JDBC 2.1 API Specification, section 5.6 for more details."; t[93] = "ResultSet nie jest modyfikowalny (not updateable). Zapytanie, które zwróciło ten wynik musi dotyczyć tylko jednej tabeli oraz musi pobierać wszystkie klucze główne tej tabeli. Zobacz Specyfikację JDBC 2.1 API, rozdział 5.6, by uzyskać więcej szczegółów."; t[102] = "Cannot tell if path is open or closed: {0}."; t[103] = "Nie można stwierdzić, czy ścieżka jest otwarta czy zamknięta: {0}."; t[108] = "The parameter index is out of range: {0}, number of parameters: {1}."; t[109] = "Indeks parametru jest poza zakresem: {0}, liczba parametrów: {1}."; t[110] = "Unsupported Types value: {0}"; t[111] = "Nieznana wartość Types: {0}"; t[112] = "Currently positioned after the end of the ResultSet. You cannot call deleteRow() here."; t[113] = "Aktualna pozycja za końcem ResultSet. Nie można wywołać deleteRow()."; t[114] = "This ResultSet is closed."; t[115] = "Ten ResultSet jest zamknięty."; t[120] = "Conversion of interval failed"; t[121] = "Konwersja typu interval nie powiodła się"; t[122] = "Unable to load the class {0} responsible for the datatype {1}"; t[123] = "Nie jest możliwe załadowanie klasy {0} odpowiedzialnej za typ danych {1}"; t[138] = "Error loading default settings from driverconfig.properties"; t[139] = "Błąd podczas wczytywania ustawień domyślnych z driverconfig.properties"; t[142] = "The array index is out of range: {0}"; t[143] = "Indeks tablicy jest poza zakresem: {0}"; t[146] = "Unknown Types value."; t[147] = "Nieznana wartość Types."; t[154] = "The maximum field size must be a value greater than or equal to 0."; t[155] = "Maksymalny rozmiar pola musi być wartością dodatnią lub 0."; t[168] = "Detail: {0}"; t[169] = "Szczegóły: {0}"; t[170] = "Unknown Response Type {0}."; t[171] = "Nieznany typ odpowiedzi {0}."; t[172] = "Maximum number of rows must be a value grater than or equal to 0."; t[173] = "Maksymalna liczba rekordów musi być wartością dodatnią lub 0."; t[184] = "Query timeout must be a value greater than or equals to 0."; t[185] = "Timeout zapytania musi być wartością dodatnią lub 0."; t[186] = "Too many update results were returned."; t[187] = "Zapytanie nie zwróciło żadnych wyników."; t[190] = "The connection attempt failed."; t[191] = "Próba nawiązania połączenia nie powiodła się."; t[198] = "Connection has been closed automatically because a new connection was opened for the same PooledConnection or the PooledConnection has been closed."; t[199] = "Połączenie zostało zamknięte automatycznie, ponieważ nowe połączenie zostało otwarte dla tego samego PooledConnection lub PooledConnection zostało zamknięte."; t[204] = "Protocol error. Session setup failed."; t[205] = "Błąd protokołu. Nie udało się utworzyć sesji."; t[206] = "This PooledConnection has already been closed."; t[207] = "To PooledConnection zostało już zamknięte."; t[208] = "DataSource has been closed."; t[209] = "DataSource zostało zamknięte."; t[212] = "Method {0} is not yet implemented."; t[213] = "Metoda {0}nie jest jeszcze obsługiwana."; t[216] = "Hint: {0}"; t[217] = "Wskazówka: {0}"; t[218] = "No value specified for parameter {0}."; t[219] = "Nie podano wartości dla parametru {0}."; t[222] = "Position: {0}"; t[223] = "Pozycja: {0}"; t[226] = "Cannot call deleteRow() when on the insert row."; t[227] = "Nie można wywołać deleteRow() na wstawianym rekordzie."; t[240] = "Conversion of money failed."; t[241] = "Konwersja typu money nie powiodła się."; t[244] = "Internal Position: {0}"; t[245] = "Wewnętrzna Pozycja: {0}"; t[248] = "Connection has been closed."; t[249] = "Połączenie zostało zamknięte."; t[254] = "Currently positioned before the start of the ResultSet. You cannot call deleteRow() here."; t[255] = "Aktualna pozycja przed początkiem ResultSet. Nie można wywołać deleteRow()."; t[258] = "Failed to create object for: {0}."; t[259] = "Nie powiodło się utworzenie obiektu dla: {0}."; t[262] = "Fetch size must be a value greater to or equal to 0."; t[263] = "Rozmiar pobierania musi być wartością dodatnią lub 0."; t[270] = "No results were returned by the query."; t[271] = "Zapytanie nie zwróciło żadnych wyników."; t[276] = "The authentication type {0} is not supported. Check that you have configured the pg_hba.conf file to include the client''s IP address or subnet, and that it is using an authentication scheme supported by the driver."; t[277] = "Uwierzytelnienie typu {0} nie jest obsługiwane. Upewnij się, że skonfigurowałeś plik pg_hba.conf tak, że zawiera on adres IP lub podsieć klienta oraz że użyta metoda uwierzytelnienia jest wspierana przez ten sterownik."; t[280] = "Conversion to type {0} failed: {1}."; t[281] = "Konwersja do typu {0} nie powiodła się: {1}."; t[282] = "A result was returned when none was expected."; t[283] = "Zwrócono wynik zapytania, choć nie był on oczekiwany."; t[292] = "Transaction isolation level {0} not supported."; t[293] = "Poziom izolacji transakcji {0} nie jest obsługiwany."; t[306] = "ResultSet not positioned properly, perhaps you need to call next."; t[307] = "Zła pozycja w ResultSet, może musisz wywołać next."; t[308] = "Location: File: {0}, Routine: {1}, Line: {2}"; t[309] = "Lokalizacja: Plik: {0}, Procedura: {1}, Linia: {2}"; t[314] = "An unexpected result was returned by a query."; t[315] = "Zapytanie zwróciło nieoczekiwany wynik."; t[316] = "The column index is out of range: {0}, number of columns: {1}."; t[317] = "Indeks kolumny jest poza zakresem: {0}, liczba kolumn: {1}."; t[318] = "Expected command status BEGIN, got {0}."; t[319] = "Spodziewano się statusu komendy BEGIN, otrzymano {0}."; t[320] = "The fastpath function {0} is unknown."; t[321] = "Funkcja fastpath {0} jest nieznana."; t[324] = "The server requested password-based authentication, but no password was provided."; t[325] = "Serwer zażądał uwierzytelnienia opartego na haśle, ale żadne hasło nie zostało dostarczone."; t[332] = "The array index is out of range: {0}, number of elements: {1}."; t[333] = "Indeks tablicy jest poza zakresem: {0}, liczba elementów: {1}."; t[338] = "Something unusual has occurred to cause the driver to fail. Please report this exception."; t[339] = "Coś niezwykłego spowodowało pad sterownika. Proszę, zgłoś ten wyjątek."; t[342] = "Zero bytes may not occur in string parameters."; t[343] = "Zerowe bajty nie mogą pojawiać się w parametrach typu łańcuch znakowy."; table = t; } public java.lang.Object handleGetObject (java.lang.String msgid) throws java.util.MissingResourceException { int hash_val = msgid.hashCode() & 0x7fffffff; int idx = (hash_val % 173) << 1; { java.lang.Object found = table[idx]; if (found == null) return null; if (msgid.equals(found)) return table[idx + 1]; } int incr = ((hash_val % 171) + 1) << 1; for (;;) { idx += incr; if (idx >= 346) idx -= 346; java.lang.Object found = table[idx]; if (found == null) return null; if (msgid.equals(found)) return table[idx + 1]; } } public java.util.Enumeration getKeys () { return new java.util.Enumeration() { private int idx = 0; { while (idx < 346 && table[idx] == null) idx += 2; } public boolean hasMoreElements () { return (idx < 346); } public java.lang.Object nextElement () { java.lang.Object key = table[idx]; do idx += 2; while (idx < 346 && table[idx] == null); return key; } }; } public java.util.ResourceBundle getParent () { return parent; } }
8,462
0
Create_ds/amazon-redshift-jdbc-driver/src/main/java/com/amazon/redshift
Create_ds/amazon-redshift-jdbc-driver/src/main/java/com/amazon/redshift/translation/messages_it.java
/* Automatically generated by GNU msgfmt. Do not modify! */ package com.amazon.redshift.translation; public class messages_it extends java.util.ResourceBundle { private static final java.lang.String[] table; static { java.lang.String[] t = new java.lang.String[794]; t[0] = ""; t[1] = "Project-Id-Version: Redshift JDBC Driver 2.0\nReport-Msgid-Bugs-To: \nPO-Revision-Date: 2006-06-23 17:25+0200\nLast-Translator: Giuseppe Sacco <eppesuig@debian.org>\nLanguage-Team: Italian <tp@lists.linux.it>\nLanguage: it\nMIME-Version: 1.0\nContent-Type: text/plain; charset=UTF-8\nContent-Transfer-Encoding: 8bit\n"; t[4] = "DataSource has been closed."; t[5] = "Questo «DataSource» è stato chiuso."; t[18] = "Where: {0}"; t[19] = "Dove: {0}"; t[26] = "The connection attempt failed."; t[27] = "Il tentativo di connessione è fallito."; t[28] = "Currently positioned after the end of the ResultSet. You cannot call deleteRow() here."; t[29] = "La posizione attuale è successiva alla fine del ResultSet. Non è possibile invocare «deleteRow()» qui."; t[32] = "Can''t use query methods that take a query string on a PreparedStatement."; t[33] = "Non si possono utilizzare i metodi \"query\" che hanno come argomento una stringa nel caso di «PreparedStatement»."; t[36] = "Multiple ResultSets were returned by the query."; t[37] = "La query ha restituito «ResultSet» multipli."; t[50] = "Too many update results were returned."; t[51] = "Sono stati restituiti troppi aggiornamenti."; t[58] = "Illegal UTF-8 sequence: initial byte is {0}: {1}"; t[59] = "Sequenza UTF-8 illegale: il byte iniziale è {0}: {1}"; t[66] = "The column name {0} was not found in this ResultSet."; t[67] = "Colonna denominata «{0}» non è presente in questo «ResultSet»."; t[70] = "Fastpath call {0} - No result was returned and we expected an integer."; t[71] = "Chiamata Fastpath «{0}»: Nessun risultato restituito mentre ci si aspettava un intero."; t[74] = "Protocol error. Session setup failed."; t[75] = "Errore di protocollo. Impostazione della sessione fallita."; t[76] = "A CallableStatement was declared, but no call to registerOutParameter(1, <some type>) was made."; t[77] = "È stato definito un «CallableStatement» ma non è stato invocato il metodo «registerOutParameter(1, <tipo>)»."; t[78] = "ResultSets with concurrency CONCUR_READ_ONLY cannot be updated."; t[79] = "I «ResultSet» in modalità CONCUR_READ_ONLY non possono essere aggiornati."; t[90] = "LOB positioning offsets start at 1."; t[91] = "L''offset per la posizione dei LOB comincia da 1."; t[92] = "Internal Position: {0}"; t[93] = "Posizione interna: {0}"; t[100] = "Cannot change transaction read-only property in the middle of a transaction."; t[101] = "Non è possibile modificare la proprietà «read-only» delle transazioni nel mezzo di una transazione."; t[102] = "The JVM claims not to support the {0} encoding."; t[103] = "La JVM sostiene di non supportare la codifica {0}."; t[108] = "{0} function doesn''t take any argument."; t[109] = "Il metodo «{0}» non accetta argomenti."; t[112] = "xid must not be null"; t[113] = "xid non può essere NULL"; t[114] = "Connection has been closed."; t[115] = "Questo «Connection» è stato chiuso."; t[122] = "The server does not support SSL."; t[123] = "Il server non supporta SSL."; t[140] = "Illegal UTF-8 sequence: byte {0} of {1} byte sequence is not 10xxxxxx: {2}"; t[141] = "Sequenza UTF-8 illegale: il byte {0} di una sequenza di {1} byte non è 10xxxxxx: {2}"; t[148] = "Hint: {0}"; t[149] = "Suggerimento: {0}"; t[152] = "Unable to find name datatype in the system catalogs."; t[153] = "Non è possibile trovare il datatype «name» nel catalogo di sistema."; t[156] = "Unsupported Types value: {0}"; t[157] = "Valore di tipo «{0}» non supportato."; t[158] = "Unknown type {0}."; t[159] = "Tipo sconosciuto {0}."; t[166] = "{0} function takes two and only two arguments."; t[167] = "Il metodo «{0}» accetta due e solo due argomenti."; t[170] = "Finalizing a Connection that was never closed:"; t[171] = "Finalizzazione di una «Connection» che non è stata chiusa."; t[186] = "Redshift LOBs can only index to: {0}"; t[187] = "Il massimo valore per l''indice dei LOB di Redshift è {0}. "; t[194] = "Method {0} is not yet implemented."; t[195] = "Il metodo «{0}» non è stato ancora implementato."; t[198] = "Error loading default settings from driverconfig.properties"; t[199] = "Si è verificato un errore caricando le impostazioni predefinite da «driverconfig.properties»."; t[202] = "Large Objects may not be used in auto-commit mode."; t[203] = "Non è possibile impostare i «Large Object» in modalità «auto-commit»."; t[208] = "Expected command status BEGIN, got {0}."; t[209] = "Lo stato del comando avrebbe dovuto essere BEGIN, mentre invece è {0}."; t[218] = "Invalid fetch direction constant: {0}."; t[219] = "Costante per la direzione dell''estrazione non valida: {0}."; t[222] = "{0} function takes three and only three arguments."; t[223] = "Il metodo «{0}» accetta tre e solo tre argomenti."; t[226] = "Error during recover"; t[227] = "Errore durante il ripristino"; t[228] = "Cannot update the ResultSet because it is either before the start or after the end of the results."; t[229] = "Non è possibile aggiornare il «ResultSet» perché la posizione attuale è precedente all''inizio o successiva alla file dei risultati."; t[232] = "Parameter of type {0} was registered, but call to get{1} (sqltype={2}) was made."; t[233] = "È stato definito il parametro di tipo «{0}», ma poi è stato invocato il metodo «get{1}()» (sqltype={2})."; t[240] = "Cannot establish a savepoint in auto-commit mode."; t[241] = "Non è possibile impostare i punti di ripristino in modalità «auto-commit»."; t[242] = "Cannot retrieve the id of a named savepoint."; t[243] = "Non è possibile trovare l''id del punto di ripristino indicato."; t[244] = "The column index is out of range: {0}, number of columns: {1}."; t[245] = "Indice di colonna, {0}, è maggiore del numero di colonne {1}."; t[250] = "Something unusual has occurred to cause the driver to fail. Please report this exception."; t[251] = "Qualcosa di insolito si è verificato causando il fallimento del driver. Per favore riferire all''autore del driver questa eccezione."; t[260] = "Cannot cast an instance of {0} to type {1}"; t[261] = "Non è possibile fare il cast di una istanza di «{0}» al tipo «{1}»."; t[264] = "Unknown Types value."; t[265] = "Valore di tipo sconosciuto."; t[266] = "Invalid stream length {0}."; t[267] = "La dimensione specificata, {0}, per lo «stream» non è valida."; t[272] = "Cannot retrieve the name of an unnamed savepoint."; t[273] = "Non è possibile trovare il nome di un punto di ripristino anonimo."; t[274] = "Unable to translate data into the desired encoding."; t[275] = "Impossibile tradurre i dati nella codifica richiesta."; t[276] = "Expected an EOF from server, got: {0}"; t[277] = "Ricevuto dal server «{0}» mentre era atteso un EOF"; t[278] = "Bad value for type {0} : {1}"; t[279] = "Il valore «{1}» non è adeguato al tipo «{0}»."; t[280] = "The server requested password-based authentication, but no password was provided."; t[281] = "Il server ha richiesto l''autenticazione con password, ma tale password non è stata fornita."; t[298] = "This PooledConnection has already been closed."; t[299] = "Questo «PooledConnection» è stato chiuso."; t[306] = "Fetch size must be a value greater to or equal to 0."; t[307] = "La dimensione dell''area di «fetch» deve essere maggiore o eguale a 0."; t[312] = "A connection could not be made using the requested protocol {0}."; t[313] = "Non è stato possibile attivare la connessione utilizzando il protocollo richiesto {0}."; t[322] = "There are no rows in this ResultSet."; t[323] = "Non ci sono righe in questo «ResultSet»."; t[324] = "Unexpected command status: {0}."; t[325] = "Stato del comando non previsto: {0}."; t[334] = "Not on the insert row."; t[335] = "Non si è in una nuova riga."; t[344] = "Server SQLState: {0}"; t[345] = "SQLState del server: {0}"; t[360] = "The driver currently does not support COPY operations."; t[361] = "Il driver non supporta al momento l''operazione «COPY»."; t[364] = "The array index is out of range: {0}, number of elements: {1}."; t[365] = "L''indice dell''array è fuori intervallo: {0}, numero di elementi: {1}."; t[374] = "suspend/resume not implemented"; t[375] = "«suspend»/«resume» non implementato"; t[378] = "Not implemented: one-phase commit must be issued using the same connection that was used to start it"; t[379] = "Non implementato: il commit \"one-phase\" deve essere invocato sulla stessa connessione che ha iniziato la transazione."; t[398] = "Cannot call cancelRowUpdates() when on the insert row."; t[399] = "Non è possibile invocare «cancelRowUpdates()» durante l''inserimento di una riga."; t[400] = "Cannot reference a savepoint after it has been released."; t[401] = "Non è possibile utilizzare un punto di ripristino successivamente al suo rilascio."; t[402] = "You must specify at least one column value to insert a row."; t[403] = "Per inserire un record si deve specificare almeno il valore di una colonna."; t[404] = "Unable to determine a value for MaxIndexKeys due to missing system catalog data."; t[405] = "Non è possibile trovare il valore di «MaxIndexKeys» nel catalogo si sistema."; t[412] = "The JVM claims not to support the encoding: {0}"; t[413] = "La JVM sostiene di non supportare la codifica: {0}."; t[414] = "{0} function takes two or three arguments."; t[415] = "Il metodo «{0}» accetta due o tre argomenti."; t[440] = "Unexpected error writing large object to database."; t[441] = "Errore inatteso inviando un «large object» al database."; t[442] = "Zero bytes may not occur in string parameters."; t[443] = "Byte con valore zero non possono essere contenuti nei parametri stringa."; t[444] = "A result was returned when none was expected."; t[445] = "È stato restituito un valore nonostante non ne fosse atteso nessuno."; t[450] = "ResultSet is not updateable. The query that generated this result set must select only one table, and must select all primary keys from that table. See the JDBC 2.1 API Specification, section 5.6 for more details."; t[451] = "Il «ResultSet» non è aggiornabile. La query che lo genera deve selezionare una sola tabella e deve selezionarne tutti i campi che ne compongono la chiave primaria. Si vedano le specifiche dell''API JDBC 2.1, sezione 5.6, per ulteriori dettagli."; t[454] = "Bind message length {0} too long. This can be caused by very large or incorrect length specifications on InputStream parameters."; t[455] = "Il messaggio di «bind» è troppo lungo ({0}). Questo può essere causato da una dimensione eccessiva o non corretta dei parametri dell''«InputStream»."; t[460] = "Statement has been closed."; t[461] = "Questo «Statement» è stato chiuso."; t[462] = "No value specified for parameter {0}."; t[463] = "Nessun valore specificato come parametro {0}."; t[468] = "The array index is out of range: {0}"; t[469] = "Indice di colonna fuori dall''intervallo ammissibile: {0}"; t[474] = "Unable to bind parameter values for statement."; t[475] = "Impossibile fare il «bind» dei valori passati come parametri per lo statement."; t[476] = "Can''t refresh the insert row."; t[477] = "Non è possibile aggiornare la riga in inserimento."; t[480] = "No primary key found for table {0}."; t[481] = "Non è stata trovata la chiave primaria della tabella «{0}»."; t[482] = "Cannot change transaction isolation level in the middle of a transaction."; t[483] = "Non è possibile cambiare il livello di isolamento delle transazioni nel mezzo di una transazione."; t[498] = "Provided InputStream failed."; t[499] = "L''«InputStream» fornito è fallito."; t[500] = "The parameter index is out of range: {0}, number of parameters: {1}."; t[501] = "Il parametro indice è fuori intervallo: {0}, numero di elementi: {1}."; t[502] = "The server''s DateStyle parameter was changed to {0}. The JDBC driver requires DateStyle to begin with ISO for correct operation."; t[503] = "Il parametro del server «DateStyle» è stato cambiato in {0}. Il driver JDBC richiede che «DateStyle» cominci con «ISO» per un corretto funzionamento."; t[508] = "Connection attempt timed out."; t[509] = "Il tentativo di connessione è scaduto."; t[512] = "Internal Query: {0}"; t[513] = "Query interna: {0}"; t[518] = "The authentication type {0} is not supported. Check that you have configured the pg_hba.conf file to include the client''s IP address or subnet, and that it is using an authentication scheme supported by the driver."; t[519] = "L''autenticazione di tipo {0} non è supportata. Verificare che nel file di configurazione pg_hba.conf sia presente l''indirizzo IP o la sottorete del client, e che lo schema di autenticazione utilizzato sia supportato dal driver."; t[526] = "Interval {0} not yet implemented"; t[527] = "L''intervallo «{0}» non è stato ancora implementato."; t[532] = "Conversion of interval failed"; t[533] = "Fallita la conversione di un «interval»."; t[540] = "Query timeout must be a value greater than or equals to 0."; t[541] = "Il timeout relativo alle query deve essere maggiore o eguale a 0."; t[542] = "Connection has been closed automatically because a new connection was opened for the same PooledConnection or the PooledConnection has been closed."; t[543] = "La «Connection» è stata chiusa automaticamente perché una nuova l''ha sostituita nello stesso «PooledConnection», oppure il «PooledConnection» è stato chiuso."; t[544] = "ResultSet not positioned properly, perhaps you need to call next."; t[545] = "Il «ResultSet» non è correttamente posizionato; forse è necessario invocare «next()»."; t[550] = "This statement has been closed."; t[551] = "Questo statement è stato chiuso."; t[552] = "Can''t infer the SQL type to use for an instance of {0}. Use setObject() with an explicit Types value to specify the type to use."; t[553] = "Non è possibile identificare il tipo SQL da usare per l''istanza di tipo «{0}». Usare «setObject()» specificando esplicitamente il tipo da usare per questo valore."; t[554] = "Cannot call updateRow() when on the insert row."; t[555] = "Non è possibile invocare «updateRow()» durante l''inserimento di una riga."; t[562] = "Detail: {0}"; t[563] = "Dettaglio: {0}"; t[566] = "Cannot call deleteRow() when on the insert row."; t[567] = "Non è possibile invocare «deleteRow()» durante l''inserimento di una riga."; t[568] = "Currently positioned before the start of the ResultSet. You cannot call deleteRow() here."; t[569] = "La posizione attuale è precedente all''inizio del ResultSet. Non è possibile invocare «deleteRow()» qui."; t[576] = "Illegal UTF-8 sequence: final value is a surrogate value: {0}"; t[577] = "Sequenza UTF-8 illegale: il valore è finale è un surrogato: {0}"; t[578] = "Unknown Response Type {0}."; t[579] = "Risposta di tipo sconosciuto {0}."; t[582] = "Unsupported value for stringtype parameter: {0}"; t[583] = "Il valore per il parametro di tipo string «{0}» non è supportato."; t[584] = "Conversion to type {0} failed: {1}."; t[585] = "Conversione al tipo {0} fallita: {1}."; t[586] = "Conversion of money failed."; t[587] = "Fallita la conversione di un «money»."; t[600] = "Unable to load the class {0} responsible for the datatype {1}"; t[601] = "Non è possibile caricare la class «{0}» per gestire il tipo «{1}»."; t[604] = "The fastpath function {0} is unknown."; t[605] = "La funzione fastpath «{0}» è sconosciuta."; t[608] = "Malformed function or procedure escape syntax at offset {0}."; t[609] = "Sequenza di escape definita erroneamente nella funzione o procedura all''offset {0}."; t[612] = "Provided Reader failed."; t[613] = "Il «Reader» fornito è fallito."; t[614] = "Maximum number of rows must be a value grater than or equal to 0."; t[615] = "Il numero massimo di righe deve essere maggiore o eguale a 0."; t[616] = "Failed to create object for: {0}."; t[617] = "Fallita la creazione dell''oggetto per: {0}."; t[622] = "Premature end of input stream, expected {0} bytes, but only read {1}."; t[623] = "Il flusso di input è stato interrotto, sono arrivati {1} byte al posto dei {0} attesi."; t[626] = "An unexpected result was returned by a query."; t[627] = "Un risultato inaspettato è stato ricevuto dalla query."; t[646] = "An error occurred while setting up the SSL connection."; t[647] = "Si è verificato un errore impostando la connessione SSL."; t[654] = "Illegal UTF-8 sequence: {0} bytes used to encode a {1} byte value: {2}"; t[655] = "Sequenza UTF-8 illegale: {0} byte utilizzati per codificare un valore di {1} byte: {2}"; t[658] = "The SSLSocketFactory class provided {0} could not be instantiated."; t[659] = "La classe «SSLSocketFactory» specificata, «{0}», non può essere istanziata."; t[670] = "Position: {0}"; t[671] = "Posizione: {0}"; t[676] = "Location: File: {0}, Routine: {1}, Line: {2}"; t[677] = "Individuazione: file: \"{0}\", routine: {1}, linea: {2}"; t[684] = "Cannot tell if path is open or closed: {0}."; t[685] = "Impossibile stabilire se il percorso è aperto o chiuso: {0}."; t[700] = "Cannot convert an instance of {0} to type {1}"; t[701] = "Non è possibile convertire una istanza di «{0}» nel tipo «{1}»"; t[710] = "{0} function takes four and only four argument."; t[711] = "Il metodo «{0}» accetta quattro e solo quattro argomenti."; t[718] = "Interrupted while attempting to connect."; t[719] = "Si è verificata una interruzione durante il tentativo di connessione."; t[722] = "Illegal UTF-8 sequence: final value is out of range: {0}"; t[723] = "Sequenza UTF-8 illegale: il valore finale è fuori dall''intervallo permesso: {0}"; t[736] = "{0} function takes one and only one argument."; t[737] = "Il metodo «{0}» accetta un ed un solo argomento."; t[744] = "This ResultSet is closed."; t[745] = "Questo «ResultSet» è chiuso."; t[746] = "Invalid character data was found. This is most likely caused by stored data containing characters that are invalid for the character set the database was created in. The most common example of this is storing 8bit data in a SQL_ASCII database."; t[747] = "Sono stati trovati caratteri non validi tra i dati. Molto probabilmente sono stati memorizzati dei caratteri che non sono validi per la codifica dei caratteri impostata alla creazione del database. Il caso più diffuso è quello nel quale si memorizzano caratteri a 8bit in un database con codifica SQL_ASCII."; t[750] = "An I/O error occurred while sending to the backend."; t[751] = "Si è verificato un errore di I/O nella spedizione di dati al server."; t[754] = "Ran out of memory retrieving query results."; t[755] = "Fine memoria scaricando i risultati della query."; t[756] = "Returning autogenerated keys is not supported."; t[757] = "La restituzione di chiavi autogenerate non è supportata."; t[760] = "Operation requires a scrollable ResultSet, but this ResultSet is FORWARD_ONLY."; t[761] = "L''operazione richiete un «ResultSet» scorribile mentre questo è «FORWARD_ONLY»."; t[762] = "A CallableStatement function was executed and the out parameter {0} was of type {1} however type {2} was registered."; t[763] = "È stato eseguito un «CallableStatement» ma il parametro in uscita «{0}» era di tipo «{1}» al posto di «{2}», che era stato dichiarato."; t[768] = "Unknown ResultSet holdability setting: {0}."; t[769] = "Il parametro «holdability» per il «ResultSet» è sconosciuto: {0}."; t[772] = "Transaction isolation level {0} not supported."; t[773] = "Il livello di isolamento delle transazioni «{0}» non è supportato."; t[776] = "No results were returned by the query."; t[777] = "Nessun risultato è stato restituito dalla query."; t[778] = "A CallableStatement was executed with nothing returned."; t[779] = "Un «CallableStatement» è stato eseguito senza produrre alcun risultato. "; t[780] = "The maximum field size must be a value greater than or equal to 0."; t[781] = "La dimensione massima del campo deve essere maggiore o eguale a 0."; t[786] = "This statement does not declare an OUT parameter. Use '{' ?= call ... '}' to declare one."; t[787] = "Questo statement non dichiara il parametro in uscita. Usare «{ ?= call ... }» per farlo."; t[788] = "Can''t use relative move methods while on the insert row."; t[789] = "Non è possibile utilizzare gli spostamenti relativi durante l''inserimento di una riga."; t[792] = "Connection is busy with another transaction"; t[793] = "La connessione è utilizzata da un''altra transazione"; table = t; } public java.lang.Object handleGetObject (java.lang.String msgid) throws java.util.MissingResourceException { int hash_val = msgid.hashCode() & 0x7fffffff; int idx = (hash_val % 397) << 1; { java.lang.Object found = table[idx]; if (found == null) return null; if (msgid.equals(found)) return table[idx + 1]; } int incr = ((hash_val % 395) + 1) << 1; for (;;) { idx += incr; if (idx >= 794) idx -= 794; java.lang.Object found = table[idx]; if (found == null) return null; if (msgid.equals(found)) return table[idx + 1]; } } public java.util.Enumeration getKeys () { return new java.util.Enumeration() { private int idx = 0; { while (idx < 794 && table[idx] == null) idx += 2; } public boolean hasMoreElements () { return (idx < 794); } public java.lang.Object nextElement () { java.lang.Object key = table[idx]; do idx += 2; while (idx < 794 && table[idx] == null); return key; } }; } public java.util.ResourceBundle getParent () { return parent; } }
8,463
0
Create_ds/amazon-redshift-jdbc-driver/src/main/java/com/amazon/redshift
Create_ds/amazon-redshift-jdbc-driver/src/main/java/com/amazon/redshift/translation/messages_sr.java
/* Automatically generated by GNU msgfmt. Do not modify! */ package com.amazon.redshift.translation; public class messages_sr extends java.util.ResourceBundle { private static final java.lang.String[] table; static { java.lang.String[] t = new java.lang.String[794]; t[0] = ""; t[1] = "Project-Id-Version: Redshift 1.0\nReport-Msgid-Bugs-To: \nPO-Revision-Date: 2009-05-26 11:13+0100\nLast-Translator: Bojan Škaljac <skaljac (at) gmail.com>\nLanguage-Team: Srpski <skaljac@gmail.com>\nLanguage: \nMIME-Version: 1.0\nContent-Type: text/plain; charset=UTF-8\nContent-Transfer-Encoding: 8bit\nX-Poedit-Language: Serbian\nX-Poedit-Country: YUGOSLAVIA\n"; t[2] = "Not implemented: 2nd phase commit must be issued using an idle connection. commit xid={0}, currentXid={1}, state={2}, transactionState={3}"; t[3] = "Nije implementirano: Dvofazni commit mora biti izdat uz korištenje besposlene konekcije. commit xid={0}, currentXid={1}, state={2}, transactionState={3}"; t[4] = "DataSource has been closed."; t[5] = "DataSource je zatvoren."; t[8] = "Invalid flags {0}"; t[9] = "Nevažeće zastavice {0}"; t[18] = "Where: {0}"; t[19] = "Gde: {0}"; t[24] = "Unknown XML Source class: {0}"; t[25] = "Nepoznata XML ulazna klasa: {0}"; t[26] = "The connection attempt failed."; t[27] = "Pokušaj konektovanja propao."; t[28] = "Currently positioned after the end of the ResultSet. You cannot call deleteRow() here."; t[29] = "Trenutna pozicija posle kraja ResultSet-a. Ne možete pozvati deleteRow() na toj poziciji."; t[32] = "Can''t use query methods that take a query string on a PreparedStatement."; t[33] = "Ne možete da koristite metode za upit koji uzimaju string iz upita u PreparedStatement-u."; t[36] = "Multiple ResultSets were returned by the query."; t[37] = "Višestruki ResultSet-vi su vraćeni od strane upita."; t[50] = "Too many update results were returned."; t[51] = "Previše rezultata za ažuriranje je vraćeno."; t[58] = "Illegal UTF-8 sequence: initial byte is {0}: {1}"; t[59] = "Ilegalna UTF-8 sekvenca: inicijalni bajt je {0}: {1}"; t[66] = "The column name {0} was not found in this ResultSet."; t[67] = "Ime kolone {0} nije pronadjeno u ResultSet."; t[70] = "Fastpath call {0} - No result was returned and we expected an integer."; t[71] = "Fastpath poziv {0} - Nikakav rezultat nije vraćen a očekivan je integer."; t[74] = "Protocol error. Session setup failed."; t[75] = "Greška protokola. Zakazivanje sesije propalo."; t[76] = "A CallableStatement was declared, but no call to registerOutParameter(1, <some type>) was made."; t[77] = "CallableStatement jedeklarisan ali nije bilo poziva registerOutParameter (1, <neki_tip>)."; t[78] = "ResultSets with concurrency CONCUR_READ_ONLY cannot be updated."; t[79] = "ResultSets sa osobinom CONCUR_READ_ONLY ne moeže biti ažuriran."; t[90] = "LOB positioning offsets start at 1."; t[91] = "LOB pozicija ofset počinje kod 1."; t[92] = "Internal Position: {0}"; t[93] = "Interna pozicija: {0}"; t[96] = "free() was called on this LOB previously"; t[97] = "free() je pozvan na ovom LOB-u prethodno"; t[100] = "Cannot change transaction read-only property in the middle of a transaction."; t[101] = "Nije moguće izmeniti read-only osobinu transakcije u sred izvršavanja transakcije."; t[102] = "The JVM claims not to support the {0} encoding."; t[103] = "JVM tvrdi da ne podržava {0} encoding."; t[108] = "{0} function doesn''t take any argument."; t[109] = "Funkcija {0} nema parametara."; t[112] = "xid must not be null"; t[113] = "xid ne sme biti null"; t[114] = "Connection has been closed."; t[115] = "Konekcija je već zatvorena."; t[122] = "The server does not support SSL."; t[123] = "Server ne podržava SSL."; t[124] = "Custom type maps are not supported."; t[125] = "Mape sa korisnički definisanim tipovima nisu podržane."; t[140] = "Illegal UTF-8 sequence: byte {0} of {1} byte sequence is not 10xxxxxx: {2}"; t[141] = "Ilegalna UTF-8 sekvenca: bajt {0} od {1} bajtova sekvence nije 10xxxxxx: {2}"; t[148] = "Hint: {0}"; t[149] = "Nagovest: {0}"; t[152] = "Unable to find name datatype in the system catalogs."; t[153] = "Nije moguće pronaći ime tipa podatka u sistemskom katalogu."; t[156] = "Unsupported Types value: {0}"; t[157] = "Za tip nije podržana vrednost: {0}"; t[158] = "Unknown type {0}."; t[159] = "Nepoznat tip {0}."; t[166] = "{0} function takes two and only two arguments."; t[167] = "Funkcija {0} prima dva i samo dva parametra."; t[170] = "Finalizing a Connection that was never closed:"; t[171] = "Dovršavanje konekcije koja nikada nije zatvorena:"; t[180] = "The maximum field size must be a value greater than or equal to 0."; t[181] = "Maksimalna vrednost veličine polja mora biti vrednost veća ili jednaka 0."; t[186] = "Redshift LOBs can only index to: {0}"; t[187] = "Redshift LOB mogu jedino da označavaju: {0}"; t[194] = "Method {0} is not yet implemented."; t[195] = "Metod {0} nije još impelemtiran."; t[198] = "Error loading default settings from driverconfig.properties"; t[199] = "Greška u čitanju standardnih podešavanja iz driverconfig.properties"; t[200] = "Results cannot be retrieved from a CallableStatement before it is executed."; t[201] = "Razultat nemože da se primi iz CallableStatement pre nego što se on izvrši."; t[202] = "Large Objects may not be used in auto-commit mode."; t[203] = "Veliki objekti (Large Object) se nemogu koristiti u auto-commit modu."; t[208] = "Expected command status BEGIN, got {0}."; t[209] = "Očekivan status komande je BEGIN, a dobijeno je {0}."; t[218] = "Invalid fetch direction constant: {0}."; t[219] = "Pogrešna konstanta za direkciju donošenja: {0}."; t[222] = "{0} function takes three and only three arguments."; t[223] = "Funkcija {0} prima tri i samo tri parametra."; t[226] = "This SQLXML object has already been freed."; t[227] = "Ovaj SQLXML je već obrisan."; t[228] = "Cannot update the ResultSet because it is either before the start or after the end of the results."; t[229] = "Nije moguće ažurirati ResultSet zato što je ili početak ili kraj rezultata."; t[230] = "The JVM claims not to support the encoding: {0}"; t[231] = "JVM tvrdi da ne podržava encoding: {0}"; t[232] = "Parameter of type {0} was registered, but call to get{1} (sqltype={2}) was made."; t[233] = "Parametar tipa {0} je registrovan,ali poziv za get{1} (sql tip={2}) je izvršen."; t[234] = "Error rolling back prepared transaction. rollback xid={0}, preparedXid={1}, currentXid={2}"; t[235] = "Greška prilikom povratka na prethodo pripremljenu transakciju. rollback xid={0}, preparedXid={1}, currentXid={2}"; t[240] = "Cannot establish a savepoint in auto-commit mode."; t[241] = "U auto-commit modu nije moguće podešavanje tački snimanja."; t[242] = "Cannot retrieve the id of a named savepoint."; t[243] = "Nije moguće primiti id imena tačke snimanja."; t[244] = "The column index is out of range: {0}, number of columns: {1}."; t[245] = "Indeks kolone van osega: {0}, broj kolona: {1}."; t[250] = "Something unusual has occurred to cause the driver to fail. Please report this exception."; t[251] = "Nešto neobično se dogodilo i drajver je zakazao. Molim prijavite ovaj izuzetak."; t[260] = "Cannot cast an instance of {0} to type {1}"; t[261] = "Nije moguće kastovati instancu {0} u tip {1}"; t[264] = "Unknown Types value."; t[265] = "Nepoznata vrednost za Types."; t[266] = "Invalid stream length {0}."; t[267] = "Nevažeća dužina toka {0}."; t[272] = "Cannot retrieve the name of an unnamed savepoint."; t[273] = "Nije moguće izvaditi ime tačke snimanja koja nema ime."; t[274] = "Unable to translate data into the desired encoding."; t[275] = "Nije moguće prevesti podatke u odabrani encoding format."; t[276] = "Expected an EOF from server, got: {0}"; t[277] = "Očekivan EOF od servera, a dobijeno: {0}"; t[278] = "Bad value for type {0} : {1}"; t[279] = "Pogrešna vrednost za tip {0} : {1}"; t[280] = "The server requested password-based authentication, but no password was provided."; t[281] = "Server zahteva autentifikaciju baziranu na šifri, ali šifra nije prosleđena."; t[286] = "Unable to create SAXResult for SQLXML."; t[287] = "Nije moguće kreirati SAXResult za SQLXML."; t[292] = "Error during recover"; t[293] = "Greška prilikom oporavljanja."; t[294] = "tried to call end without corresponding start call. state={0}, start xid={1}, currentXid={2}, preparedXid={3}"; t[295] = "Pokušaj pozivanja kraja pre odgovarajućeg početka. state={0}, start xid={1}, currentXid={2}, preparedXid={3}"; t[296] = "Truncation of large objects is only implemented in 8.3 and later servers."; t[297] = "Skraćivanje velikih objekata je implementirano samo u 8.3 i novijim serverima."; t[298] = "This PooledConnection has already been closed."; t[299] = "PooledConnection je već zatvoren."; t[302] = "ClientInfo property not supported."; t[303] = "ClientInfo property nije podržan."; t[306] = "Fetch size must be a value greater to or equal to 0."; t[307] = "Doneta veličina mora biti vrednost veća ili jednaka 0."; t[312] = "A connection could not be made using the requested protocol {0}."; t[313] = "Konekciju nije moguće kreirati uz pomoć protokola {0}."; t[318] = "Unknown XML Result class: {0}"; t[319] = "nepoznata XML klasa rezultata: {0}"; t[322] = "There are no rows in this ResultSet."; t[323] = "U ResultSet-u nema redova."; t[324] = "Unexpected command status: {0}."; t[325] = "Neočekivan komandni status: {0}."; t[330] = "Heuristic commit/rollback not supported. forget xid={0}"; t[331] = "Heuristički commit/rollback nije podržan. forget xid={0}"; t[334] = "Not on the insert row."; t[335] = "Nije mod ubacivanja redova."; t[336] = "This SQLXML object has already been initialized, so you cannot manipulate it further."; t[337] = "SQLXML objekat je već inicijalizovan, tako da ga nije moguće dodatno menjati."; t[344] = "Server SQLState: {0}"; t[345] = "SQLState servera: {0}"; t[348] = "The server''s standard_conforming_strings parameter was reported as {0}. The JDBC driver expected on or off."; t[349] = "Serverov standard_conforming_strings parametar javlja {0}. JDBC drajver ocekuje on ili off."; t[360] = "The driver currently does not support COPY operations."; t[361] = "Drajver trenutno ne podržava COPY operacije."; t[364] = "The array index is out of range: {0}, number of elements: {1}."; t[365] = "Indeks niza je van opsega: {0}, broj elemenata: {1}."; t[374] = "suspend/resume not implemented"; t[375] = "obustavljanje/nastavljanje nije implementirano."; t[378] = "Not implemented: one-phase commit must be issued using the same connection that was used to start it"; t[379] = "Nije implementirano: Commit iz jedne faze mora biti izdat uz korištenje iste konekcije koja je korištena za startovanje."; t[380] = "Error during one-phase commit. commit xid={0}"; t[381] = "Kreška prilikom commit-a iz jedne faze. commit xid={0}"; t[398] = "Cannot call cancelRowUpdates() when on the insert row."; t[399] = "Nije moguće pozvati cancelRowUpdates() prilikom ubacivanja redova."; t[400] = "Cannot reference a savepoint after it has been released."; t[401] = "Nije moguće referenciranje tačke snimanja nakon njenog oslobađanja."; t[402] = "You must specify at least one column value to insert a row."; t[403] = "Morate specificirati barem jednu vrednost za kolonu da bi ste ubacili red."; t[404] = "Unable to determine a value for MaxIndexKeys due to missing system catalog data."; t[405] = "Nije moguće odrediti vrednost za MaxIndexKezs zbog nedostatka podataka u sistemskom katalogu."; t[412] = "Illegal UTF-8 sequence: final value is out of range: {0}"; t[413] = "Ilegalna UTF-8 sekvenca: finalna vrednost je van opsega: {0}"; t[414] = "{0} function takes two or three arguments."; t[415] = "Funkcija {0} prima dva ili tri parametra."; t[428] = "Unable to convert DOMResult SQLXML data to a string."; t[429] = "Nije moguće konvertovati DOMResult SQLXML podatke u string."; t[434] = "Unable to decode xml data."; t[435] = "Neuspešno dekodiranje XML podataka."; t[440] = "Unexpected error writing large object to database."; t[441] = "Neočekivana greška prilikom upisa velikog objekta u bazu podataka."; t[442] = "Zero bytes may not occur in string parameters."; t[443] = "Nula bajtovji se ne smeju pojavljivati u string parametrima."; t[444] = "A result was returned when none was expected."; t[445] = "Rezultat vraćen ali nikakav rezultat nije očekivan."; t[450] = "ResultSet is not updateable. The query that generated this result set must select only one table, and must select all primary keys from that table. See the JDBC 2.1 API Specification, section 5.6 for more details."; t[451] = "ResultSet nije moguće ažurirati. Upit koji je generisao ovaj razultat mora selektoati jedino tabelu,i mora selektovati sve primrne ključeve iz te tabele. Pogledajte API specifikaciju za JDBC 2.1, sekciju 5.6 za više detalja."; t[454] = "Bind message length {0} too long. This can be caused by very large or incorrect length specifications on InputStream parameters."; t[455] = "Dužina vezivne poruke {0} prevelika. Ovo je možda rezultat veoma velike ili pogrešne dužine specifikacije za InputStream parametre."; t[460] = "Statement has been closed."; t[461] = "Statemen je već zatvoren."; t[462] = "No value specified for parameter {0}."; t[463] = "Nije zadata vrednost za parametar {0}."; t[468] = "The array index is out of range: {0}"; t[469] = "Indeks niza je van opsega: {0}"; t[474] = "Unable to bind parameter values for statement."; t[475] = "Nije moguće naći vrednost vezivnog parametra za izjavu (statement)."; t[476] = "Can''t refresh the insert row."; t[477] = "Nije moguće osvežiti ubačeni red."; t[480] = "No primary key found for table {0}."; t[481] = "Nije pronađen ključ za tabelu {0}."; t[482] = "Cannot change transaction isolation level in the middle of a transaction."; t[483] = "Nije moguće izmeniti nivo izolacije transakcije u sred izvršavanja transakcije."; t[498] = "Provided InputStream failed."; t[499] = "Pribaljeni InputStream zakazao."; t[500] = "The parameter index is out of range: {0}, number of parameters: {1}."; t[501] = "Index parametra je van opsega: {0}, broj parametara je: {1}."; t[502] = "The server''s DateStyle parameter was changed to {0}. The JDBC driver requires DateStyle to begin with ISO for correct operation."; t[503] = "Serverov DataStyle parametar promenjen u {0}. JDBC zahteva da DateStyle počinje sa ISO za uspešno završavanje operacije."; t[508] = "Connection attempt timed out."; t[509] = "Isteklo je vreme za pokušaj konektovanja."; t[512] = "Internal Query: {0}"; t[513] = "Interni upit: {0}"; t[514] = "Error preparing transaction. prepare xid={0}"; t[515] = "Greška u pripremanju transakcije. prepare xid={0}"; t[518] = "The authentication type {0} is not supported. Check that you have configured the pg_hba.conf file to include the client''s IP address or subnet, and that it is using an authentication scheme supported by the driver."; t[519] = "Tip autentifikacije {0} nije podržan. Proverite dali imate podešen pg_hba.conf fajl koji uključuje klijentovu IP adresu ili podmrežu, i da ta mreža koristi šemu autentifikacije koja je podržana od strane ovog drajvera."; t[526] = "Interval {0} not yet implemented"; t[527] = "Interval {0} još nije implementiran."; t[532] = "Conversion of interval failed"; t[533] = "Konverzija intervala propala."; t[540] = "Query timeout must be a value greater than or equals to 0."; t[541] = "Tajm-aut mora biti vrednost veća ili jednaka 0."; t[542] = "Connection has been closed automatically because a new connection was opened for the same PooledConnection or the PooledConnection has been closed."; t[543] = "Konekcija je zatvorena automatski zato što je nova konekcija otvorena za isti PooledConnection ili je PooledConnection zatvoren."; t[544] = "ResultSet not positioned properly, perhaps you need to call next."; t[545] = "ResultSet nije pravilno pozicioniran, možda je potrebno da pozovete next."; t[546] = "Prepare called before end. prepare xid={0}, state={1}"; t[547] = "Pripremanje poziva pre kraja. prepare xid={0}, state={1}"; t[548] = "Invalid UUID data."; t[549] = "Nevažeća UUID podatak."; t[550] = "This statement has been closed."; t[551] = "Statement je zatvoren."; t[552] = "Can''t infer the SQL type to use for an instance of {0}. Use setObject() with an explicit Types value to specify the type to use."; t[553] = "Nije moguće zaključiti SQL tip koji bi se koristio sa instancom {0}. Koristite setObject() sa zadatim eksplicitnim tipom vrednosti."; t[554] = "Cannot call updateRow() when on the insert row."; t[555] = "Nije moguće pozvati updateRow() prilikom ubacivanja redova."; t[562] = "Detail: {0}"; t[563] = "Detalji: {0}"; t[566] = "Cannot call deleteRow() when on the insert row."; t[567] = "Nije moguće pozvati deleteRow() prilikom ubacivanja redova."; t[568] = "Currently positioned before the start of the ResultSet. You cannot call deleteRow() here."; t[569] = "Trenutna pozicija pre početka ResultSet-a. Ne možete pozvati deleteRow() na toj poziciji."; t[576] = "Illegal UTF-8 sequence: final value is a surrogate value: {0}"; t[577] = "Ilegalna UTF-8 sekvenca: finalna vrednost je zamena vrednosti: {0}"; t[578] = "Unknown Response Type {0}."; t[579] = "Nepoznat tip odziva {0}."; t[582] = "Unsupported value for stringtype parameter: {0}"; t[583] = "Vrednost za parametar tipa string nije podržana: {0}"; t[584] = "Conversion to type {0} failed: {1}."; t[585] = "Konverzija u tip {0} propala: {1}."; t[586] = "This SQLXML object has not been initialized, so you cannot retrieve data from it."; t[587] = "SQLXML objekat nije inicijalizovan tako da nije moguće preuzimati podatke iz njega."; t[600] = "Unable to load the class {0} responsible for the datatype {1}"; t[601] = "Nije moguće učitati kalsu {0} odgovornu za tip podataka {1}"; t[604] = "The fastpath function {0} is unknown."; t[605] = "Fastpath funkcija {0} je nepoznata."; t[608] = "Malformed function or procedure escape syntax at offset {0}."; t[609] = "Pogrešna sintaksa u funkciji ili proceduri na poziciji {0}."; t[612] = "Provided Reader failed."; t[613] = "Pribavljeni čitač (Reader) zakazao."; t[614] = "Maximum number of rows must be a value grater than or equal to 0."; t[615] = "Maksimalni broj redova mora biti vrednosti veće ili jednake 0."; t[616] = "Failed to create object for: {0}."; t[617] = "Propao pokušaj kreiranja objekta za: {0}."; t[620] = "Conversion of money failed."; t[621] = "Konverzija novca (money) propala."; t[622] = "Premature end of input stream, expected {0} bytes, but only read {1}."; t[623] = "Prevremen završetak ulaznog toka podataka,očekivano {0} bajtova, a pročitano samo {1}."; t[626] = "An unexpected result was returned by a query."; t[627] = "Nepredviđen rezultat je vraćen od strane upita."; t[644] = "Invalid protocol state requested. Attempted transaction interleaving is not supported. xid={0}, currentXid={1}, state={2}, flags={3}"; t[645] = "Preplitanje transakcija nije implementirano. xid={0}, currentXid={1}, state={2}, flags={3}"; t[646] = "An error occurred while setting up the SSL connection."; t[647] = "Greška se dogodila prilikom podešavanja SSL konekcije."; t[654] = "Illegal UTF-8 sequence: {0} bytes used to encode a {1} byte value: {2}"; t[655] = "Ilegalna UTF-8 sekvenca: {0} bytes used to encode a {1} byte value: {2}"; t[656] = "Not implemented: Prepare must be issued using the same connection that started the transaction. currentXid={0}, prepare xid={1}"; t[657] = "Nije implementirano: Spremanje mora biti pozvano uz korišćenje iste konekcije koja se koristi za startovanje transakcije. currentXid={0}, prepare xid={1}"; t[658] = "The SSLSocketFactory class provided {0} could not be instantiated."; t[659] = "SSLSocketFactory klasa koju pruža {0} se nemože instancirati."; t[662] = "Failed to convert binary xml data to encoding: {0}."; t[663] = "Neuspešno konvertovanje binarnih XML podataka u kodnu stranu: {0}."; t[670] = "Position: {0}"; t[671] = "Pozicija: {0}"; t[676] = "Location: File: {0}, Routine: {1}, Line: {2}"; t[677] = "Lokacija: Fajl: {0}, Rutina: {1}, Linija: {2}"; t[684] = "Cannot tell if path is open or closed: {0}."; t[685] = "Nije moguće utvrditi dali je putanja otvorena ili zatvorena: {0}."; t[690] = "Unable to create StAXResult for SQLXML"; t[691] = "Nije moguće kreirati StAXResult za SQLXML"; t[700] = "Cannot convert an instance of {0} to type {1}"; t[701] = "Nije moguće konvertovati instancu {0} u tip {1}"; t[710] = "{0} function takes four and only four argument."; t[711] = "Funkcija {0} prima četiri i samo četiri parametra."; t[718] = "Interrupted while attempting to connect."; t[719] = "Prekinut pokušaj konektovanja."; t[722] = "Your security policy has prevented the connection from being attempted. You probably need to grant the connect java.net.SocketPermission to the database server host and port that you wish to connect to."; t[723] = "Sigurnosna podešavanja su sprečila konekciju. Verovatno je potrebno da dozvolite konekciju klasi java.net.SocketPermission na bazu na serveru."; t[734] = "No function outputs were registered."; t[735] = "Nije registrovan nikakv izlaz iz funkcije."; t[736] = "{0} function takes one and only one argument."; t[737] = "Funkcija {0} prima jedan i samo jedan parametar."; t[744] = "This ResultSet is closed."; t[745] = "ResultSet je zatvoren."; t[746] = "Invalid character data was found. This is most likely caused by stored data containing characters that are invalid for the character set the database was created in. The most common example of this is storing 8bit data in a SQL_ASCII database."; t[747] = "Pronađeni su nevažeći karakter podaci. Uzrok je najverovatnije to što pohranjeni podaci sadrže karaktere koji su nevažeći u setu karaktera sa kojima je baza kreirana. Npr. Čuvanje 8bit podataka u SQL_ASCII bazi podataka."; t[752] = "Error disabling autocommit"; t[753] = "Greška u isključivanju autokomita"; t[754] = "Ran out of memory retrieving query results."; t[755] = "Nestalo je memorije prilikom preuzimanja rezultata upita."; t[756] = "Returning autogenerated keys is not supported."; t[757] = "Vraćanje autogenerisanih ključeva nije podržano."; t[760] = "Operation requires a scrollable ResultSet, but this ResultSet is FORWARD_ONLY."; t[761] = "Operacija zahteva skrolabilan ResultSet,ali ovaj ResultSet je FORWARD_ONLY."; t[762] = "A CallableStatement function was executed and the out parameter {0} was of type {1} however type {2} was registered."; t[763] = "CallableStatement funkcija je izvršena dok je izlazni parametar {0} tipa {1} a tip {2} je registrovan kao izlazni parametar."; t[764] = "Unable to find server array type for provided name {0}."; t[765] = "Neuspešno nalaženje liste servera za zadato ime {0}."; t[768] = "Unknown ResultSet holdability setting: {0}."; t[769] = "Nepoznata ResultSet podešavanja za mogućnost držanja (holdability): {0}."; t[772] = "Transaction isolation level {0} not supported."; t[773] = "Nivo izolacije transakcije {0} nije podržan."; t[774] = "Zero bytes may not occur in identifiers."; t[775] = "Nula bajtovji se ne smeju pojavljivati u identifikatorima."; t[776] = "No results were returned by the query."; t[777] = "Nikakav rezultat nije vraćen od strane upita."; t[778] = "A CallableStatement was executed with nothing returned."; t[779] = "CallableStatement je izvršen ali ništa nije vrećeno kao rezultat."; t[780] = "wasNull cannot be call before fetching a result."; t[781] = "wasNull nemože biti pozvan pre zahvatanja rezultata."; t[784] = "Returning autogenerated keys by column index is not supported."; t[785] = "Vraćanje autogenerisanih ključeva po kloloni nije podržano."; t[786] = "This statement does not declare an OUT parameter. Use '{' ?= call ... '}' to declare one."; t[787] = "Izraz ne deklariše izlazni parametar. Koristite '{' ?= poziv ... '}' za deklarisanje."; t[788] = "Can''t use relative move methods while on the insert row."; t[789] = "Ne može se koristiti metod relativnog pomeranja prilikom ubacivanja redova."; t[790] = "A CallableStatement was executed with an invalid number of parameters"; t[791] = "CallableStatement je izvršen sa nevažećim brojem parametara"; t[792] = "Connection is busy with another transaction"; t[793] = "Konekcija je zauzeta sa drugom transakciom."; table = t; } public java.lang.Object handleGetObject (java.lang.String msgid) throws java.util.MissingResourceException { int hash_val = msgid.hashCode() & 0x7fffffff; int idx = (hash_val % 397) << 1; { java.lang.Object found = table[idx]; if (found == null) return null; if (msgid.equals(found)) return table[idx + 1]; } int incr = ((hash_val % 395) + 1) << 1; for (;;) { idx += incr; if (idx >= 794) idx -= 794; java.lang.Object found = table[idx]; if (found == null) return null; if (msgid.equals(found)) return table[idx + 1]; } } public java.util.Enumeration getKeys () { return new java.util.Enumeration() { private int idx = 0; { while (idx < 794 && table[idx] == null) idx += 2; } public boolean hasMoreElements () { return (idx < 794); } public java.lang.Object nextElement () { java.lang.Object key = table[idx]; do idx += 2; while (idx < 794 && table[idx] == null); return key; } }; } public java.util.ResourceBundle getParent () { return parent; } }
8,464
0
Create_ds/amazon-redshift-jdbc-driver/src/main/java/com/amazon/redshift
Create_ds/amazon-redshift-jdbc-driver/src/main/java/com/amazon/redshift/ds/RedshiftPooledConnection.java
/* * Copyright (c) 2004, PostgreSQL Global Development Group. * See the LICENSE file in the project root for more information. */ package com.amazon.redshift.ds; import com.amazon.redshift.RedshiftConnection; import com.amazon.redshift.core.BaseConnection; import com.amazon.redshift.logger.RedshiftLogger; import com.amazon.redshift.util.GT; import com.amazon.redshift.util.RedshiftException; import com.amazon.redshift.util.RedshiftState; import java.lang.reflect.InvocationHandler; import java.lang.reflect.InvocationTargetException; import java.lang.reflect.Method; import java.lang.reflect.Proxy; import java.sql.CallableStatement; import java.sql.Connection; import java.sql.PreparedStatement; import java.sql.SQLException; import java.sql.Statement; import java.util.LinkedList; import java.util.List; import javax.sql.ConnectionEvent; import javax.sql.ConnectionEventListener; import javax.sql.PooledConnection; import javax.sql.StatementEventListener; /** * Redshift implementation of the PooledConnection interface. This shouldn't be used directly, as * the pooling client should just interact with the ConnectionPool instead. * * @author Aaron Mulder (ammulder@chariotsolutions.com) * @author Csaba Nagy (ncsaba@yahoo.com) * @see com.amazon.redshift.ds.RedshiftConnectionPoolDataSource */ public class RedshiftPooledConnection implements PooledConnection { private final List<ConnectionEventListener> listeners = new LinkedList<ConnectionEventListener>(); private Connection con; private ConnectionHandler last; private final boolean autoCommit; private final boolean isXA; /** * Creates a new PooledConnection representing the specified physical connection. * * @param con connection * @param autoCommit whether to autocommit * @param isXA whether connection is a XA connection */ public RedshiftPooledConnection(Connection con, boolean autoCommit, boolean isXA) { this.con = con; this.autoCommit = autoCommit; this.isXA = isXA; } public RedshiftPooledConnection(Connection con, boolean autoCommit) { this(con, autoCommit, false); } /** * Adds a listener for close or fatal error events on the connection handed out to a client. */ @Override public void addConnectionEventListener(ConnectionEventListener connectionEventListener) { listeners.add(connectionEventListener); } /** * Removes a listener for close or fatal error events on the connection handed out to a client. */ @Override public void removeConnectionEventListener(ConnectionEventListener connectionEventListener) { listeners.remove(connectionEventListener); } /** * Closes the physical database connection represented by this PooledConnection. If any client has * a connection based on this PooledConnection, it is forcibly closed as well. */ @Override public void close() throws SQLException { if (RedshiftLogger.isEnable()) ((BaseConnection)con).getLogger().logFunction(true); if (last != null) { last.close(); if (!con.isClosed()) { if (!con.getAutoCommit()) { try { con.rollback(); } catch (SQLException ignored) { } } } } try { if (RedshiftLogger.isEnable()) ((BaseConnection)con).getLogger().logFunction(false); con.close(); } finally { con = null; } } /** * Gets a handle for a client to use. This is a wrapper around the physical connection, so the * client can call close and it will just return the connection to the pool without really closing * the pgysical connection. * * <p> * According to the JDBC 2.0 Optional Package spec (6.2.3), only one client may have an active * handle to the connection at a time, so if there is a previous handle active when this is * called, the previous one is forcibly closed and its work rolled back. * </p> */ @Override public Connection getConnection() throws SQLException { if (con == null) { // Before throwing the exception, let's notify the registered listeners about the error RedshiftException sqlException = new RedshiftException(GT.tr("This PooledConnection has already been closed."), RedshiftState.CONNECTION_DOES_NOT_EXIST); fireConnectionFatalError(sqlException); throw sqlException; } if (RedshiftLogger.isEnable()) ((BaseConnection)con).getLogger().logFunction(true); // If any error occurs while opening a new connection, the listeners // have to be notified. This gives a chance to connection pools to // eliminate bad pooled connections. try { // Only one connection can be open at a time from this PooledConnection. See JDBC 2.0 Optional // Package spec section 6.2.3 if (last != null) { last.close(); if (!con.getAutoCommit()) { try { con.rollback(); } catch (SQLException ignored) { } } con.clearWarnings(); } /* * In XA-mode, autocommit is handled in RedshiftXAConnection, because it depends on whether an * XA-transaction is open or not */ if (!isXA) { con.setAutoCommit(autoCommit); } } catch (SQLException sqlException) { fireConnectionFatalError(sqlException); throw (SQLException) sqlException.fillInStackTrace(); } ConnectionHandler handler = new ConnectionHandler(con); last = handler; Connection proxyCon = (Connection) Proxy.newProxyInstance(getClass().getClassLoader(), new Class[]{Connection.class, RedshiftConnection.class}, handler); last.setProxy(proxyCon); if (RedshiftLogger.isEnable()) ((BaseConnection)con).getLogger().logFunction(false, proxyCon); return proxyCon; } /** * Used to fire a connection closed event to all listeners. */ void fireConnectionClosed() { ConnectionEvent evt = null; // Copy the listener list so the listener can remove itself during this method call ConnectionEventListener[] local = listeners.toArray(new ConnectionEventListener[0]); for (ConnectionEventListener listener : local) { if (evt == null) { evt = createConnectionEvent(null); } listener.connectionClosed(evt); } } /** * Used to fire a connection error event to all listeners. */ void fireConnectionFatalError(SQLException e) { ConnectionEvent evt = null; // Copy the listener list so the listener can remove itself during this method call ConnectionEventListener[] local = listeners.toArray(new ConnectionEventListener[0]); for (ConnectionEventListener listener : local) { if (evt == null) { evt = createConnectionEvent(e); } listener.connectionErrorOccurred(evt); } } protected ConnectionEvent createConnectionEvent(SQLException e) { return new ConnectionEvent(this, e); } // Classes we consider fatal. private static String[] fatalClasses = { "08", // connection error "53", // insufficient resources // nb: not just "57" as that includes query cancel which is nonfatal "57P01", // admin shutdown "57P02", // crash shutdown "57P03", // cannot connect now "58", // system error (backend) "60", // system error (driver) "99", // unexpected error "F0", // configuration file error (backend) "XX", // internal error (backend) }; private static boolean isFatalState(String state) { if (state == null) { // no info, assume fatal return true; } if (state.length() < 2) { // no class info, assume fatal return true; } for (String fatalClass : fatalClasses) { if (state.startsWith(fatalClass)) { return true; // fatal } } return false; } /** * Fires a connection error event, but only if we think the exception is fatal. * * @param e the SQLException to consider */ private void fireConnectionError(SQLException e) { if (!isFatalState(e.getSQLState())) { return; } fireConnectionFatalError(e); } /** * Instead of declaring a class implementing Connection, which would have to be updated for every * JDK rev, use a dynamic proxy to handle all calls through the Connection interface. This is the * part that requires JDK 1.3 or higher, though JDK 1.2 could be supported with a 3rd-party proxy * package. */ private class ConnectionHandler implements InvocationHandler { private Connection con; private Connection proxy; // the Connection the client is currently using, which is a proxy private boolean automatic = false; ConnectionHandler(Connection con) { this.con = con; } @Override public Object invoke(Object proxy, Method method, Object[] args) throws Throwable { final String methodName = method.getName(); // From Object if (method.getDeclaringClass() == Object.class) { if (methodName.equals("toString")) { return "Pooled connection wrapping physical connection " + con; } if (methodName.equals("equals")) { return proxy == args[0]; } if (methodName.equals("hashCode")) { return System.identityHashCode(proxy); } try { return method.invoke(con, args); } catch (InvocationTargetException e) { throw e.getTargetException(); } } // All the rest is from the Connection or RedshiftConnection interface if (methodName.equals("isClosed")) { return con == null || con.isClosed(); } if (methodName.equals("close")) { // we are already closed and a double close // is not an error. if (con == null) { return null; } SQLException ex = null; if (!con.isClosed()) { if (!isXA && !con.getAutoCommit()) { try { con.rollback(); } catch (SQLException e) { ex = e; } } con.clearWarnings(); } con = null; this.proxy = null; last = null; fireConnectionClosed(); if (ex != null) { throw ex; } return null; } if (con == null || con.isClosed()) { throw new RedshiftException(automatic ? GT.tr( "Connection has been closed automatically because a new connection was opened for the same PooledConnection or the PooledConnection has been closed.") : GT.tr("Connection has been closed."), RedshiftState.CONNECTION_DOES_NOT_EXIST); } // From here on in, we invoke via reflection, catch exceptions, // and check if they're fatal before rethrowing. try { if (methodName.equals("createStatement")) { Statement st = (Statement) method.invoke(con, args); return Proxy.newProxyInstance(getClass().getClassLoader(), new Class[]{Statement.class, com.amazon.redshift.RedshiftStatement.class}, new StatementHandler(this, st)); } else if (methodName.equals("prepareCall")) { Statement st = (Statement) method.invoke(con, args); return Proxy.newProxyInstance(getClass().getClassLoader(), new Class[]{CallableStatement.class, com.amazon.redshift.RedshiftStatement.class}, new StatementHandler(this, st)); } else if (methodName.equals("prepareStatement")) { Statement st = (Statement) method.invoke(con, args); return Proxy.newProxyInstance(getClass().getClassLoader(), new Class[]{PreparedStatement.class, com.amazon.redshift.RedshiftStatement.class}, new StatementHandler(this, st)); } else { return method.invoke(con, args); } } catch (final InvocationTargetException ite) { final Throwable te = ite.getTargetException(); if (te instanceof SQLException) { fireConnectionError((SQLException) te); // Tell listeners about exception if it's fatal } throw te; } } Connection getProxy() { return proxy; } void setProxy(Connection proxy) { this.proxy = proxy; } public void close() { if (con != null) { automatic = true; } con = null; proxy = null; // No close event fired here: see JDBC 2.0 Optional Package spec section 6.3 } public boolean isClosed() { return con == null; } } /** * <p>Instead of declaring classes implementing Statement, PreparedStatement, and CallableStatement, * which would have to be updated for every JDK rev, use a dynamic proxy to handle all calls * through the Statement interfaces. This is the part that requires JDK 1.3 or higher, though JDK * 1.2 could be supported with a 3rd-party proxy package.</p> * * <p>The StatementHandler is required in order to return the proper Connection proxy for the * getConnection method.</p> */ private class StatementHandler implements InvocationHandler { private ConnectionHandler con; private Statement st; StatementHandler(ConnectionHandler con, Statement st) { this.con = con; this.st = st; } @Override public Object invoke(Object proxy, Method method, Object[] args) throws Throwable { final String methodName = method.getName(); // From Object if (method.getDeclaringClass() == Object.class) { if (methodName.equals("toString")) { return "Pooled statement wrapping physical statement " + st; } if (methodName.equals("hashCode")) { return System.identityHashCode(proxy); } if (methodName.equals("equals")) { return proxy == args[0]; } return method.invoke(st, args); } // All the rest is from the Statement interface if (methodName.equals("isClosed")) { return st == null || st.isClosed(); } if (methodName.equals("close")) { if (st == null || st.isClosed()) { return null; } con = null; final Statement oldSt = st; st = null; oldSt.close(); return null; } if (st == null || st.isClosed()) { throw new RedshiftException(GT.tr("Statement has been closed."), RedshiftState.OBJECT_NOT_IN_STATE); } if (methodName.equals("getConnection")) { return con.getProxy(); // the proxied connection, not a physical connection } // Delegate the call to the proxied Statement. try { return method.invoke(st, args); } catch (final InvocationTargetException ite) { final Throwable te = ite.getTargetException(); if (te instanceof SQLException) { fireConnectionError((SQLException) te); // Tell listeners about exception if it's fatal } throw te; } } } @Override public void removeStatementEventListener(StatementEventListener listener) { } @Override public void addStatementEventListener(StatementEventListener listener) { } }
8,465
0
Create_ds/amazon-redshift-jdbc-driver/src/main/java/com/amazon/redshift
Create_ds/amazon-redshift-jdbc-driver/src/main/java/com/amazon/redshift/ds/RedshiftPoolingDataSource.java
/* * Copyright (c) 2004, PostgreSQL Global Development Group * See the LICENSE file in the project root for more information. */ package com.amazon.redshift.ds; import com.amazon.redshift.ds.common.BaseDataSource; import com.amazon.redshift.util.GT; import com.amazon.redshift.util.RedshiftException; import com.amazon.redshift.util.RedshiftState; import java.sql.Connection; import java.sql.SQLException; import java.util.Stack; import java.util.concurrent.ConcurrentHashMap; import java.util.concurrent.ConcurrentMap; import javax.naming.NamingException; import javax.naming.Reference; import javax.naming.StringRefAddr; import javax.sql.ConnectionEvent; import javax.sql.ConnectionEventListener; import javax.sql.DataSource; import javax.sql.PooledConnection; /** * DataSource which uses connection pooling. <span style="color: red;">Don't use this if your * server/middleware vendor provides a connection pooling implementation which interfaces with the * Redshift ConnectionPoolDataSource implementation!</span> This class is provided as a * convenience, but the JDBC Driver is really not supposed to handle the connection pooling * algorithm. Instead, the server or middleware product is supposed to handle the mechanics of * connection pooling, and use the Redshift implementation of ConnectionPoolDataSource to provide * the connections to pool. * * <p> * If you're sure you want to use this, then you must set the properties dataSourceName, * databaseName, user, and password (if required for the user). The settings for serverName, * portNumber, initialConnections, and maxConnections are optional. Note that <i>only connections * for the default user will be pooled!</i> Connections for other users will be normal non-pooled * connections, and will not count against the maximum pool size limit. * </p> * * <p> * If you put this DataSource in JNDI, and access it from different JVMs (or otherwise load this * class from different ClassLoaders), you'll end up with one pool per ClassLoader or VM. This is * another area where a server-specific implementation may provide advanced features, such as using * a single pool across all VMs in a cluster. * </p> * * <p> * This implementation supports JDK 1.5 and higher. * </p> * * @author Aaron Mulder (ammulder@chariotsolutions.com) * * @deprecated Since 42.0.0, instead of this class you should use a fully featured connection pool * like HikariCP, vibur-dbcp, commons-dbcp, c3p0, etc. */ @Deprecated public class RedshiftPoolingDataSource extends BaseDataSource implements DataSource { protected static ConcurrentMap<String, RedshiftPoolingDataSource> dataSources = new ConcurrentHashMap<String, RedshiftPoolingDataSource>(); public static RedshiftPoolingDataSource getDataSource(String name) { return dataSources.get(name); } // Additional Data Source properties protected String dataSourceName; // Must be protected for subclasses to sync updates to it private int initialConnections = 0; private int maxConnections = 0; // State variables private boolean initialized = false; private Stack<PooledConnection> available = new Stack<PooledConnection>(); private Stack<PooledConnection> used = new Stack<PooledConnection>(); private Object lock = new Object(); private RedshiftConnectionPoolDataSource source; /** * Gets a description of this DataSource. */ public String getDescription() { return "Pooling DataSource '" + dataSourceName + " from " + com.amazon.redshift.util.DriverInfo.DRIVER_FULL_NAME; } /** * Ensures the DataSource properties are not changed after the DataSource has been used. * * @throws IllegalStateException The Server Name cannot be changed after the DataSource has been * used. */ public void setServerName(String serverName) { if (initialized) { throw new IllegalStateException( "Cannot set Data Source properties after DataSource has been used"); } super.setServerName(serverName); } /** * Ensures the DataSource properties are not changed after the DataSource has been used. * * @throws IllegalStateException The Database Name cannot be changed after the DataSource has been * used. */ public void setDatabaseName(String databaseName) { if (initialized) { throw new IllegalStateException( "Cannot set Data Source properties after DataSource has been used"); } super.setDatabaseName(databaseName); } /** * Ensures the DataSource properties are not changed after the DataSource has been used. * * @throws IllegalStateException The User cannot be changed after the DataSource has been used. */ public void setUser(String user) { if (initialized) { throw new IllegalStateException( "Cannot set Data Source properties after DataSource has been used"); } super.setUser(user); } /** * Ensures the DataSource properties are not changed after the DataSource has been used. * * @throws IllegalStateException The Password cannot be changed after the DataSource has been * used. */ public void setPassword(String password) { if (initialized) { throw new IllegalStateException( "Cannot set Data Source properties after DataSource has been used"); } super.setPassword(password); } /** * Ensures the DataSource properties are not changed after the DataSource has been used. * * @throws IllegalStateException The Port Number cannot be changed after the DataSource has been * used. */ public void setPortNumber(int portNumber) { if (initialized) { throw new IllegalStateException( "Cannot set Data Source properties after DataSource has been used"); } super.setPortNumber(portNumber); } /** * Gets the number of connections that will be created when this DataSource is initialized. If you * do not call initialize explicitly, it will be initialized the first time a connection is drawn * from it. * * @return number of connections that will be created when this DataSource is initialized */ public int getInitialConnections() { return initialConnections; } /** * Sets the number of connections that will be created when this DataSource is initialized. If you * do not call initialize explicitly, it will be initialized the first time a connection is drawn * from it. * * @param initialConnections number of initial connections * @throws IllegalStateException The Initial Connections cannot be changed after the DataSource * has been used. */ public void setInitialConnections(int initialConnections) { if (initialized) { throw new IllegalStateException( "Cannot set Data Source properties after DataSource has been used"); } this.initialConnections = initialConnections; } /** * Gets the maximum number of connections that the pool will allow. If a request comes in and this * many connections are in use, the request will block until a connection is available. Note that * connections for a user other than the default user will not be pooled and don't count against * this limit. * * @return The maximum number of pooled connection allowed, or 0 for no maximum. */ public int getMaxConnections() { return maxConnections; } /** * Sets the maximum number of connections that the pool will allow. If a request comes in and this * many connections are in use, the request will block until a connection is available. Note that * connections for a user other than the default user will not be pooled and don't count against * this limit. * * @param maxConnections The maximum number of pooled connection to allow, or 0 for no maximum. * @throws IllegalStateException The Maximum Connections cannot be changed after the DataSource * has been used. */ public void setMaxConnections(int maxConnections) { if (initialized) { throw new IllegalStateException( "Cannot set Data Source properties after DataSource has been used"); } this.maxConnections = maxConnections; } /** * Gets the name of this DataSource. This uniquely identifies the DataSource. You cannot use more * than one DataSource in the same VM with the same name. * * @return name of this DataSource */ public String getDataSourceName() { return dataSourceName; } /** * Sets the name of this DataSource. This is required, and uniquely identifies the DataSource. You * cannot create or use more than one DataSource in the same VM with the same name. * * @param dataSourceName datasource name * @throws IllegalStateException The Data Source Name cannot be changed after the DataSource has * been used. * @throws IllegalArgumentException Another PoolingDataSource with the same dataSourceName already * exists. */ public void setDataSourceName(String dataSourceName) { if (initialized) { throw new IllegalStateException( "Cannot set Data Source properties after DataSource has been used"); } if (this.dataSourceName != null && dataSourceName != null && dataSourceName.equals(this.dataSourceName)) { return; } RedshiftPoolingDataSource previous = dataSources.putIfAbsent(dataSourceName, this); if (previous != null) { throw new IllegalArgumentException( "DataSource with name '" + dataSourceName + "' already exists!"); } if (this.dataSourceName != null) { dataSources.remove(this.dataSourceName); } this.dataSourceName = dataSourceName; } /** * Initializes this DataSource. If the initialConnections is greater than zero, that number of * connections will be created. After this method is called, the DataSource properties cannot be * changed. If you do not call this explicitly, it will be called the first time you get a * connection from the DataSource. * * @throws SQLException Occurs when the initialConnections is greater than zero, but the * DataSource is not able to create enough physical connections. */ public void initialize() throws SQLException { synchronized (lock) { source = createConnectionPool(); try { source.initializeFrom(this); } catch (Exception e) { throw new RedshiftException(GT.tr("Failed to setup DataSource."), RedshiftState.UNEXPECTED_ERROR, e); } while (available.size() < initialConnections) { available.push(source.getPooledConnection()); } initialized = true; } } protected boolean isInitialized() { return initialized; } /** * Creates the appropriate ConnectionPool to use for this DataSource. * * @return appropriate ConnectionPool to use for this DataSource */ protected RedshiftConnectionPoolDataSource createConnectionPool() { return new RedshiftConnectionPoolDataSource(); } /** * Gets a <b>non-pooled</b> connection, unless the user and password are the same as the default * values for this connection pool. * * @return A pooled connection. * @throws SQLException Occurs when no pooled connection is available, and a new physical * connection cannot be created. */ public Connection getConnection(String user, String password) throws SQLException { // If this is for the default user/password, use a pooled connection if (user == null || (user.equals(getUser()) && ((password == null && getPassword() == null) || (password != null && password.equals(getPassword()))))) { return getConnection(); } // Otherwise, use a non-pooled connection if (!initialized) { initialize(); } return super.getConnection(user, password); } /** * Gets a connection from the connection pool. * * @return A pooled connection. * @throws SQLException Occurs when no pooled connection is available, and a new physical * connection cannot be created. */ public Connection getConnection() throws SQLException { if (!initialized) { initialize(); } return getPooledConnection(); } /** * Closes this DataSource, and all the pooled connections, whether in use or not. */ public void close() { synchronized (lock) { while (!available.isEmpty()) { PooledConnection pci = available.pop(); try { pci.close(); } catch (SQLException e) { } } available = null; while (!used.isEmpty()) { PooledConnection pci = used.pop(); pci.removeConnectionEventListener(connectionEventListener); try { pci.close(); } catch (SQLException e) { } } used = null; } removeStoredDataSource(); } protected void removeStoredDataSource() { dataSources.remove(dataSourceName); } protected void addDataSource(String dataSourceName) { dataSources.put(dataSourceName, this); } /** * Gets a connection from the pool. Will get an available one if present, or create a new one if * under the max limit. Will block if all used and a new one would exceed the max. */ private Connection getPooledConnection() throws SQLException { PooledConnection pc = null; synchronized (lock) { if (available == null) { throw new RedshiftException(GT.tr("DataSource has been closed."), RedshiftState.CONNECTION_DOES_NOT_EXIST); } while (true) { if (!available.isEmpty()) { pc = available.pop(); used.push(pc); break; } if (maxConnections == 0 || used.size() < maxConnections) { pc = source.getPooledConnection(); used.push(pc); break; } else { try { // Wake up every second at a minimum lock.wait(1000L); } catch (InterruptedException e) { } } } } pc.addConnectionEventListener(connectionEventListener); return pc.getConnection(); } /** * Notified when a pooled connection is closed, or a fatal error occurs on a pooled connection. * This is the only way connections are marked as unused. */ private ConnectionEventListener connectionEventListener = new ConnectionEventListener() { public void connectionClosed(ConnectionEvent event) { ((PooledConnection) event.getSource()).removeConnectionEventListener(this); synchronized (lock) { if (available == null) { return; // DataSource has been closed } boolean removed = used.remove(event.getSource()); if (removed) { available.push((PooledConnection) event.getSource()); // There's now a new connection available lock.notify(); } else { // a connection error occurred } } } /** * This is only called for fatal errors, where the physical connection is useless afterward and * should be removed from the pool. */ public void connectionErrorOccurred(ConnectionEvent event) { ((PooledConnection) event.getSource()).removeConnectionEventListener(this); synchronized (lock) { if (available == null) { return; // DataSource has been closed } used.remove(event.getSource()); // We're now at least 1 connection under the max lock.notify(); } } }; /** * Adds custom properties for this DataSource to the properties defined in the superclass. */ public Reference getReference() throws NamingException { Reference ref = super.getReference(); ref.add(new StringRefAddr("dataSourceName", dataSourceName)); if (initialConnections > 0) { ref.add(new StringRefAddr("initialConnections", Integer.toString(initialConnections))); } if (maxConnections > 0) { ref.add(new StringRefAddr("maxConnections", Integer.toString(maxConnections))); } return ref; } public boolean isWrapperFor(Class<?> iface) throws SQLException { return iface.isAssignableFrom(getClass()); } public <T> T unwrap(Class<T> iface) throws SQLException { if (iface.isAssignableFrom(getClass())) { return iface.cast(this); } throw new SQLException("Cannot unwrap to " + iface.getName()); } }
8,466
0
Create_ds/amazon-redshift-jdbc-driver/src/main/java/com/amazon/redshift
Create_ds/amazon-redshift-jdbc-driver/src/main/java/com/amazon/redshift/ds/RedshiftConnectionPoolDataSource.java
/* * Copyright (c) 2004, PostgreSQL Global Development Group * See the LICENSE file in the project root for more information. */ package com.amazon.redshift.ds; import com.amazon.redshift.ds.common.BaseDataSource; import java.io.IOException; import java.io.ObjectInputStream; import java.io.ObjectOutputStream; import java.io.Serializable; import java.sql.SQLException; import javax.sql.ConnectionPoolDataSource; import javax.sql.DataSource; import javax.sql.PooledConnection; /** * Redshift implementation of ConnectionPoolDataSource. The app server or middleware vendor should * provide a DataSource implementation that takes advantage of this ConnectionPoolDataSource. If * not, you can use the Redshift implementation known as PoolingDataSource, but that should only * be used if your server or middleware vendor does not provide their own. Why? The server may want * to reuse the same Connection across all EJBs requesting a Connection within the same Transaction, * or provide other similar advanced features. * * <p> * In any case, in order to use this ConnectionPoolDataSource, you must set the property * databaseName. The settings for serverName, portNumber, user, and password are optional. Note: * these properties are declared in the superclass. * </p> * * <p> * This implementation supports JDK 1.3 and higher. * </p> * * @author Aaron Mulder (ammulder@chariotsolutions.com) */ public class RedshiftConnectionPoolDataSource extends BaseDataSource implements DataSource, ConnectionPoolDataSource, Serializable { private boolean defaultAutoCommit = true; /** * Gets a description of this DataSource. */ public String getDescription() { return "ConnectionPoolDataSource from " + com.amazon.redshift.util.DriverInfo.DRIVER_FULL_NAME; } /** * Gets a connection which may be pooled by the app server or middleware implementation of * DataSource. * * @throws java.sql.SQLException Occurs when the physical database connection cannot be * established. */ public PooledConnection getPooledConnection() throws SQLException { return new RedshiftPooledConnection(getConnection(), defaultAutoCommit); } /** * Gets a connection which may be pooled by the app server or middleware implementation of * DataSource. * * @throws java.sql.SQLException Occurs when the physical database connection cannot be * established. */ public PooledConnection getPooledConnection(String user, String password) throws SQLException { return new RedshiftPooledConnection(getConnection(user, password), defaultAutoCommit); } /** * Gets whether connections supplied by this pool will have autoCommit turned on by default. The * default value is {@code true}, so that autoCommit will be turned on by default. * * @return true if connections supplied by this pool will have autoCommit */ public boolean isDefaultAutoCommit() { return defaultAutoCommit; } /** * Sets whether connections supplied by this pool will have autoCommit turned on by default. The * default value is {@code true}, so that autoCommit will be turned on by default. * * @param defaultAutoCommit whether connections supplied by this pool will have autoCommit */ public void setDefaultAutoCommit(boolean defaultAutoCommit) { this.defaultAutoCommit = defaultAutoCommit; } private void writeObject(ObjectOutputStream out) throws IOException { writeBaseObject(out); out.writeBoolean(defaultAutoCommit); } private void readObject(ObjectInputStream in) throws IOException, ClassNotFoundException { readBaseObject(in); defaultAutoCommit = in.readBoolean(); } public boolean isWrapperFor(Class<?> iface) throws SQLException { return iface.isAssignableFrom(getClass()); } public <T> T unwrap(Class<T> iface) throws SQLException { if (iface.isAssignableFrom(getClass())) { return iface.cast(this); } throw new SQLException("Cannot unwrap to " + iface.getName()); } }
8,467
0
Create_ds/amazon-redshift-jdbc-driver/src/main/java/com/amazon/redshift
Create_ds/amazon-redshift-jdbc-driver/src/main/java/com/amazon/redshift/ds/RedshiftSimpleDataSource.java
/* * Copyright (c) 2004, PostgreSQL Global Development Group * See the LICENSE file in the project root for more information. */ package com.amazon.redshift.ds; import com.amazon.redshift.ds.common.BaseDataSource; import java.io.IOException; import java.io.ObjectInputStream; import java.io.ObjectOutputStream; import java.io.Serializable; import java.sql.SQLException; import javax.sql.DataSource; /** * Simple DataSource which does not perform connection pooling. In order to use the DataSource, you * must set the property databaseName. The settings for serverName, portNumber, user, and password * are optional. Note: these properties are declared in the superclass. * * @author Aaron Mulder (ammulder@chariotsolutions.com) */ public class RedshiftSimpleDataSource extends BaseDataSource implements DataSource, Serializable { /** * Gets a description of this DataSource. */ public String getDescription() { return "Non-Pooling DataSource from " + com.amazon.redshift.util.DriverInfo.DRIVER_FULL_NAME; } private void writeObject(ObjectOutputStream out) throws IOException { writeBaseObject(out); } private void readObject(ObjectInputStream in) throws IOException, ClassNotFoundException { readBaseObject(in); } public boolean isWrapperFor(Class<?> iface) throws SQLException { return iface.isAssignableFrom(getClass()); } public <T> T unwrap(Class<T> iface) throws SQLException { if (iface.isAssignableFrom(getClass())) { return iface.cast(this); } throw new SQLException("Cannot unwrap to " + iface.getName()); } }
8,468
0
Create_ds/amazon-redshift-jdbc-driver/src/main/java/com/amazon/redshift/ds
Create_ds/amazon-redshift-jdbc-driver/src/main/java/com/amazon/redshift/ds/common/RedshiftObjectFactory.java
/* * Copyright (c) 2004, PostgreSQL Global Development Group * See the LICENSE file in the project root for more information. */ package com.amazon.redshift.ds.common; import com.amazon.redshift.ds.RedshiftConnectionPoolDataSource; import com.amazon.redshift.ds.RedshiftPoolingDataSource; import com.amazon.redshift.ds.RedshiftSimpleDataSource; import java.util.Hashtable; import javax.naming.Context; import javax.naming.Name; import javax.naming.RefAddr; import javax.naming.Reference; import javax.naming.spi.ObjectFactory; /** * Returns a DataSource-ish thing based on a JNDI reference. In the case of a SimpleDataSource or * ConnectionPool, a new instance is created each time, as there is no connection state to maintain. * In the case of a PoolingDataSource, the same DataSource will be returned for every invocation * within the same VM/ClassLoader, so that the state of the connections in the pool will be * consistent. * * @author Aaron Mulder (ammulder@chariotsolutions.com) */ public class RedshiftObjectFactory implements ObjectFactory { /** * Dereferences a Redshift DataSource. Other types of references are ignored. */ public Object getObjectInstance(Object obj, Name name, Context nameCtx, Hashtable<?, ?> environment) throws Exception { Reference ref = (Reference) obj; String className = ref.getClassName(); // Old names are here for those who still use them if (className.equals("com.amazon.redshift.ds.RedddddddshiftSimpleDataSource") || className.equals("com.amazon.redshift.jdbc2.optional.SimpleDataSource") || className.equals("com.amazon.redshift.jdbc3.Jdbc3SimpleDataSource")) { return loadSimpleDataSource(ref); } else if (className.equals("com.amazon.redshift.ds.RedshiftConnectionPoolDataSource") || className.equals("com.amazon.redshift.jdbc2.optional.ConnectionPool") || className.equals("com.amazon.redshift.jdbc3.Jdbc3ConnectionPool")) { return loadConnectionPool(ref); } else if (className.equals("com.amazon.redshift.ds.RedshiftPoolingDataSource") || className.equals("com.amazon.redshift.jdbc2.optional.PoolingDataSource") || className.equals("com.amazon.redshift.jdbc3.Jdbc3PoolingDataSource")) { return loadPoolingDataSource(ref); } else { return null; } } private Object loadPoolingDataSource(Reference ref) { // If DataSource exists, return it String name = getProperty(ref, "dataSourceName"); RedshiftPoolingDataSource pds = RedshiftPoolingDataSource.getDataSource(name); if (pds != null) { return pds; } // Otherwise, create a new one pds = new RedshiftPoolingDataSource(); pds.setDataSourceName(name); loadBaseDataSource(pds, ref); String min = getProperty(ref, "initialConnections"); if (min != null) { pds.setInitialConnections(Integer.parseInt(min)); } String max = getProperty(ref, "maxConnections"); if (max != null) { pds.setMaxConnections(Integer.parseInt(max)); } return pds; } private Object loadSimpleDataSource(Reference ref) { RedshiftSimpleDataSource ds = new RedshiftSimpleDataSource(); return loadBaseDataSource(ds, ref); } private Object loadConnectionPool(Reference ref) { RedshiftConnectionPoolDataSource cp = new RedshiftConnectionPoolDataSource(); return loadBaseDataSource(cp, ref); } protected Object loadBaseDataSource(BaseDataSource ds, Reference ref) { ds.setFromReference(ref); return ds; } protected String getProperty(Reference ref, String s) { RefAddr addr = ref.get(s); if (addr == null) { return null; } return (String) addr.getContent(); } }
8,469
0
Create_ds/amazon-redshift-jdbc-driver/src/main/java/com/amazon/redshift/ds
Create_ds/amazon-redshift-jdbc-driver/src/main/java/com/amazon/redshift/ds/common/BaseDataSource.java
/* * Copyright (c) 2004, PostgreSQL Global Development Group * See the LICENSE file in the project root for more information. */ package com.amazon.redshift.ds.common; import com.amazon.redshift.RedshiftProperty; import com.amazon.redshift.jdbc.AutoSave; import com.amazon.redshift.jdbc.PreferQueryMode; import com.amazon.redshift.logger.LogLevel; import com.amazon.redshift.logger.RedshiftLogger; import com.amazon.redshift.util.ExpressionProperties; import com.amazon.redshift.util.GT; import com.amazon.redshift.util.RedshiftException; import com.amazon.redshift.util.RedshiftState; import com.amazon.redshift.util.URLCoder; import com.amazon.redshift.core.BaseConnection; import com.amazon.redshift.util.RedshiftProperties; import java.io.ByteArrayInputStream; import java.io.ByteArrayOutputStream; import java.io.IOException; import java.io.ObjectInputStream; import java.io.ObjectOutputStream; import java.io.PrintWriter; import java.sql.Connection; import java.sql.DriverManager; import java.sql.SQLException; import java.sql.SQLFeatureNotSupportedException; import java.util.Arrays; import java.util.Properties; import javax.naming.NamingException; import javax.naming.RefAddr; import javax.naming.Reference; import javax.naming.Referenceable; import javax.naming.StringRefAddr; import javax.sql.CommonDataSource; /** * Base class for data sources and related classes. * * @author Aaron Mulder (ammulder@chariotsolutions.com) */ public abstract class BaseDataSource implements CommonDataSource, Referenceable { // Standard properties, defined in the JDBC 2.0 Optional Package spec private String[] serverNames = new String[] {"localhost"}; private String databaseName = ""; private String user; private String password; private int[] portNumbers = new int[] {0}; // Map for all other properties private Properties properties = new RedshiftProperties(); /* * Ensure the driver is loaded as JDBC Driver might be invisible to Java's ServiceLoader. * Usually, {@code Class.forName(...)} is not required as {@link DriverManager} detects JDBC drivers * via {@code META-INF/services/java.sql.Driver} entries. However there might be cases when the driver * is located at the application level classloader, thus it might be required to perform manual * registration of the driver. */ static { try { Class.forName("com.amazon.redshift.Driver"); } catch (ClassNotFoundException e) { throw new IllegalStateException( "BaseDataSource is unable to load com.amazon.redshift.Driver. Please check if you have proper Redshift JDBC Driver jar on the classpath", e); } } /** * Gets a connection to the Redshift database. The database is identified by the DataSource * properties serverName, databaseName, and portNumber. The user to connect as is identified by * the DataSource properties user and password. * * @return A valid database connection. * @throws SQLException Occurs when the database connection cannot be established. */ public Connection getConnection() throws SQLException { return getConnection(user, password); } /** * Gets a connection to the Redshift database. The database is identified by the DataSource * properties serverName, databaseName, and portNumber. The user to connect as is identified by * the arguments user and password, which override the DataSource properties by the same name. * * @param user user * @param password password * @return A valid database connection. * @throws SQLException Occurs when the database connection cannot be established. */ public Connection getConnection(String user, String password) throws SQLException { try { Connection con = DriverManager.getConnection(getUrl(), user, password); if (RedshiftLogger.isEnable()) { ((BaseConnection)con).getLogger().log(LogLevel.DEBUG, "Created a {0} for {1} at {2}", new Object[] {getDescription(), user, RedshiftLogger.maskSecureInfoInUrl(getUrl())}); } if (RedshiftLogger.isEnable()) ((BaseConnection)con).getLogger().logFunction(false, con); return con; } catch (SQLException e) { if (RedshiftLogger.isEnable()) RedshiftLogger.getDriverLogger().logError(e); throw e; } } /** * This implementation don't use a LogWriter. */ @Override public PrintWriter getLogWriter() { return null; } /** * This implementation don't use a LogWriter. * * @param printWriter Not used */ @Override public void setLogWriter(PrintWriter printWriter) { // NOOP } /** * Gets the name of the host the Redshift database is running on. * * @return name of the host the Redshift database is running on * @deprecated use {@link #getServerNames()} */ @Deprecated public String getServerName() { return serverNames[0]; } /** * Gets the name of the host(s) the Redshift database is running on. * * @return name of the host(s) the Redshift database is running on */ public String[] getServerNames() { return serverNames; } /** * Sets the name of the host the Redshift database is running on. If this is changed, it will * only affect future calls to getConnection. The default value is {@code localhost}. * * @param serverName name of the host the Redshift database is running on * @deprecated use {@link #setServerNames(String[])} */ @Deprecated public void setServerName(String serverName) { this.setServerNames(new String[] { serverName }); } /** * Sets the name of the host(s) the Redshift database is running on. If this is changed, it will * only affect future calls to getConnection. The default value is {@code localhost}. * * @param serverNames name of the host(s) the Redshift database is running on */ public void setServerNames(String[] serverNames) { if (serverNames == null || serverNames.length == 0) { this.serverNames = new String[] {"localhost"}; } else { serverNames = Arrays.copyOf(serverNames, serverNames.length); for (int i = 0; i < serverNames.length; i++) { if (serverNames[i] == null || serverNames[i].equals("")) { serverNames[i] = "localhost"; } } this.serverNames = serverNames; } } /** * Gets the name of the Redshift database, running on the server identified by the serverName * property. * * @return name of the Redshift database */ public String getDatabaseName() { return databaseName; } /** * Sets the name of the Redshift database, running on the server identified by the serverName * property. If this is changed, it will only affect future calls to getConnection. * * @param databaseName name of the Redshift database */ public void setDatabaseName(String databaseName) { this.databaseName = databaseName; } /** * Gets a description of this DataSource-ish thing. Must be customized by subclasses. * * @return description of this DataSource-ish thing */ public abstract String getDescription(); /** * Gets the user to connect as by default. If this is not specified, you must use the * getConnection method which takes a user and password as parameters. * * @return user to connect as by default */ public String getUser() { return user; } /** * Added for backward compatibility. * * @return user to connect as by default */ public String getUserID() { return getUser(); } /** * Sets the user to connect as by default. If this is not specified, you must use the * getConnection method which takes a user and password as parameters. If this is changed, it will * only affect future calls to getConnection. * * @param user user to connect as by default */ public void setUser(String user) { this.user = user; } /** * Added for backward compatibility. * * @param id user to connect as by default */ public void setUserID(String id) { setUser(id); } /** * Gets the password to connect with by default. If this is not specified but a password is needed * to log in, you must use the getConnection method which takes a user and password as parameters. * * @return password to connect with by default */ public String getPassword() { return password; } /** * Sets the password to connect with by default. If this is not specified but a password is needed * to log in, you must use the getConnection method which takes a user and password as parameters. * If this is changed, it will only affect future calls to getConnection. * * @param password password to connect with by default */ public void setPassword(String password) { this.password = password; } /** * Gets the port which the Redshift server is listening on for TCP/IP connections. * * @return The port, or 0 if the default port will be used. * @deprecated use {@link #getPortNumbers()} */ @Deprecated public int getPortNumber() { if (portNumbers == null || portNumbers.length == 0) { return 0; } return portNumbers[0]; } /** * Gets the port(s) which the Redshift server is listening on for TCP/IP connections. * * @return The port(s), or 0 if the default port will be used. */ public int[] getPortNumbers() { return portNumbers; } /** * Sets the port which the Redshift server is listening on for TCP/IP connections. Be sure the * -i flag is passed to postmaster when Redshift is started. If this is not set, or set to 0, * the default port will be used. * * @param portNumber port which the Redshift server is listening on for TCP/IP * @deprecated use {@link #setPortNumbers(int[])} */ @Deprecated public void setPortNumber(int portNumber) { setPortNumbers(new int[] { portNumber }); } /** * Sets the port(s) which the Redshift server is listening on for TCP/IP connections. Be sure the * -i flag is passed to postmaster when Redshift is started. If this is not set, or set to 0, * the default port will be used. * * @param portNumbers port(s) which the Redshift server is listening on for TCP/IP */ public void setPortNumbers(int[] portNumbers) { if (portNumbers == null || portNumbers.length == 0) { portNumbers = new int[] { 0 }; } this.portNumbers = Arrays.copyOf(portNumbers, portNumbers.length); } /** * @return command line options for this connection */ public String getOptions() { return RedshiftProperty.OPTIONS.get(properties); } /** * Set command line options for this connection * * @param options string to set options to */ public void setOptions(String options) { RedshiftProperty.OPTIONS.set(properties, options); } /** * @return login timeout * @see RedshiftProperty#LOGIN_TIMEOUT */ @Override public int getLoginTimeout() { return RedshiftProperty.LOGIN_TIMEOUT.getIntNoCheck(properties); } /** * @param loginTimeout login timeout * @see RedshiftProperty#LOGIN_TIMEOUT */ @Override public void setLoginTimeout(int loginTimeout) { RedshiftProperty.LOGIN_TIMEOUT.set(properties, loginTimeout); } /** * @return connect timeout * @see RedshiftProperty#CONNECT_TIMEOUT */ public int getConnectTimeout() { return RedshiftProperty.CONNECT_TIMEOUT.getIntNoCheck(properties); } /** * @param connectTimeout connect timeout * @see RedshiftProperty#CONNECT_TIMEOUT */ public void setConnectTimeout(int connectTimeout) { RedshiftProperty.CONNECT_TIMEOUT.set(properties, connectTimeout); } /** * @return protocol version * @see RedshiftProperty#PROTOCOL_VERSION */ public int getProtocolVersion() { if (!RedshiftProperty.PROTOCOL_VERSION.isPresent(properties)) { return 0; } else { return RedshiftProperty.PROTOCOL_VERSION.getIntNoCheck(properties); } } /** * @param protocolVersion protocol version * @see RedshiftProperty#PROTOCOL_VERSION */ public void setProtocolVersion(int protocolVersion) { if (protocolVersion == 0) { RedshiftProperty.PROTOCOL_VERSION.set(properties, null); } else { RedshiftProperty.PROTOCOL_VERSION.set(properties, protocolVersion); } } /** * @return receive buffer size * @see RedshiftProperty#RECEIVE_BUFFER_SIZE */ public int getReceiveBufferSize() { return RedshiftProperty.RECEIVE_BUFFER_SIZE.getIntNoCheck(properties); } /** * @param nbytes receive buffer size * @see RedshiftProperty#RECEIVE_BUFFER_SIZE */ public void setReceiveBufferSize(int nbytes) { RedshiftProperty.RECEIVE_BUFFER_SIZE.set(properties, nbytes); } /** * @return send buffer size * @see RedshiftProperty#SEND_BUFFER_SIZE */ public int getSendBufferSize() { return RedshiftProperty.SEND_BUFFER_SIZE.getIntNoCheck(properties); } /** * @param nbytes send buffer size * @see RedshiftProperty#SEND_BUFFER_SIZE */ public void setSendBufferSize(int nbytes) { RedshiftProperty.SEND_BUFFER_SIZE.set(properties, nbytes); } /** * @param count prepare threshold * @see RedshiftProperty#PREPARE_THRESHOLD */ public void setPrepareThreshold(int count) { RedshiftProperty.PREPARE_THRESHOLD.set(properties, count); } /** * @return prepare threshold * @see RedshiftProperty#PREPARE_THRESHOLD */ public int getPrepareThreshold() { return RedshiftProperty.PREPARE_THRESHOLD.getIntNoCheck(properties); } /** * @return prepared statement cache size (number of statements per connection) * @see RedshiftProperty#PREPARED_STATEMENT_CACHE_QUERIES */ public int getPreparedStatementCacheQueries() { return RedshiftProperty.PREPARED_STATEMENT_CACHE_QUERIES.getIntNoCheck(properties); } /** * @param cacheSize prepared statement cache size (number of statements per connection) * @see RedshiftProperty#PREPARED_STATEMENT_CACHE_QUERIES */ public void setPreparedStatementCacheQueries(int cacheSize) { RedshiftProperty.PREPARED_STATEMENT_CACHE_QUERIES.set(properties, cacheSize); } /** * @return prepared statement cache size (number of megabytes per connection) * @see RedshiftProperty#PREPARED_STATEMENT_CACHE_SIZE_MIB */ public int getPreparedStatementCacheSizeMiB() { return RedshiftProperty.PREPARED_STATEMENT_CACHE_SIZE_MIB.getIntNoCheck(properties); } /** * @param cacheSize statement cache size (number of megabytes per connection) * @see RedshiftProperty#PREPARED_STATEMENT_CACHE_SIZE_MIB */ public void setPreparedStatementCacheSizeMiB(int cacheSize) { RedshiftProperty.PREPARED_STATEMENT_CACHE_SIZE_MIB.set(properties, cacheSize); } /** * @return database metadata cache fields size (number of fields cached per connection) * @see RedshiftProperty#DATABASE_METADATA_CACHE_FIELDS */ public int getDatabaseMetadataCacheFields() { return RedshiftProperty.DATABASE_METADATA_CACHE_FIELDS.getIntNoCheck(properties); } /** * @param cacheSize database metadata cache fields size (number of fields cached per connection) * @see RedshiftProperty#DATABASE_METADATA_CACHE_FIELDS */ public void setDatabaseMetadataCacheFields(int cacheSize) { RedshiftProperty.DATABASE_METADATA_CACHE_FIELDS.set(properties, cacheSize); } /** * @return database metadata cache fields size (number of megabytes per connection) * @see RedshiftProperty#DATABASE_METADATA_CACHE_FIELDS_MIB */ public int getDatabaseMetadataCacheFieldsMiB() { return RedshiftProperty.DATABASE_METADATA_CACHE_FIELDS_MIB.getIntNoCheck(properties); } /** * @param cacheSize database metadata cache fields size (number of megabytes per connection) * @see RedshiftProperty#DATABASE_METADATA_CACHE_FIELDS_MIB */ public void setDatabaseMetadataCacheFieldsMiB(int cacheSize) { RedshiftProperty.DATABASE_METADATA_CACHE_FIELDS_MIB.set(properties, cacheSize); } /** * @param fetchSize default fetch size * @see RedshiftProperty#DEFAULT_ROW_FETCH_SIZE */ public void setDefaultRowFetchSize(int fetchSize) { RedshiftProperty.DEFAULT_ROW_FETCH_SIZE.set(properties, fetchSize); } /** * @return default fetch size * @see RedshiftProperty#DEFAULT_ROW_FETCH_SIZE */ public int getDefaultRowFetchSize() { return RedshiftProperty.DEFAULT_ROW_FETCH_SIZE.getIntNoCheck(properties); } /** * @param unknownLength unknown length * @see RedshiftProperty#UNKNOWN_LENGTH */ public void setUnknownLength(int unknownLength) { RedshiftProperty.UNKNOWN_LENGTH.set(properties, unknownLength); } /** * @return unknown length * @see RedshiftProperty#UNKNOWN_LENGTH */ public int getUnknownLength() { return RedshiftProperty.UNKNOWN_LENGTH.getIntNoCheck(properties); } /** * @param seconds socket timeout * @see RedshiftProperty#SOCKET_TIMEOUT */ public void setSocketTimeout(int seconds) { RedshiftProperty.SOCKET_TIMEOUT.set(properties, seconds); } /** * @return socket timeout * @see RedshiftProperty#SOCKET_TIMEOUT */ public int getSocketTimeout() { return RedshiftProperty.SOCKET_TIMEOUT.getIntNoCheck(properties); } /** * @param seconds timeout that is used for sending cancel command * @see RedshiftProperty#CANCEL_SIGNAL_TIMEOUT */ public void setCancelSignalTimeout(int seconds) { RedshiftProperty.CANCEL_SIGNAL_TIMEOUT.set(properties, seconds); } /** * @return timeout that is used for sending cancel command in seconds * @see RedshiftProperty#CANCEL_SIGNAL_TIMEOUT */ public int getCancelSignalTimeout() { return RedshiftProperty.CANCEL_SIGNAL_TIMEOUT.getIntNoCheck(properties); } /** * @param enabled if SSL is enabled * @see RedshiftProperty#SSL */ public void setSsl(boolean enabled) { if (enabled) { RedshiftProperty.SSL.set(properties, true); } else { RedshiftProperty.SSL.set(properties, false); } } /** * @return true if SSL is enabled * @see RedshiftProperty#SSL */ public boolean getSsl() { // "true" if "ssl" is set but empty return RedshiftProperty.SSL.getBoolean(properties) || "".equals(RedshiftProperty.SSL.get(properties)); } /** * @param classname SSL factory class name * @see RedshiftProperty#SSL_FACTORY */ public void setSslfactory(String classname) { RedshiftProperty.SSL_FACTORY.set(properties, classname); } /** * @return SSL factory class name * @see RedshiftProperty#SSL_FACTORY */ public String getSslfactory() { return RedshiftProperty.SSL_FACTORY.get(properties); } /** * @return SSL mode * @see RedshiftProperty#SSL_MODE */ public String getSslMode() { return RedshiftProperty.SSL_MODE.get(properties); } /** * @param mode SSL mode * @see RedshiftProperty#SSL_MODE */ public void setSslMode(String mode) { RedshiftProperty.SSL_MODE.set(properties, mode); } /** * @return SSL mode * @see RedshiftProperty#SSL_FACTORY_ARG */ public String getSslFactoryArg() { return RedshiftProperty.SSL_FACTORY_ARG.get(properties); } /** * @param arg argument forwarded to SSL factory * @see RedshiftProperty#SSL_FACTORY_ARG */ public void setSslFactoryArg(String arg) { RedshiftProperty.SSL_FACTORY_ARG.set(properties, arg); } /** * @return argument forwarded to SSL factory * @see RedshiftProperty#SSL_HOSTNAME_VERIFIER */ public String getSslHostnameVerifier() { return RedshiftProperty.SSL_HOSTNAME_VERIFIER.get(properties); } /** * @param className SSL hostname verifier * @see RedshiftProperty#SSL_HOSTNAME_VERIFIER */ public void setSslHostnameVerifier(String className) { RedshiftProperty.SSL_HOSTNAME_VERIFIER.set(properties, className); } /** * @return className SSL hostname verifier * @see RedshiftProperty#SSL_CERT */ public String getSslCert() { return RedshiftProperty.SSL_CERT.get(properties); } /** * @param file SSL certificate * @see RedshiftProperty#SSL_CERT */ public void setSslCert(String file) { RedshiftProperty.SSL_CERT.set(properties, file); } /** * @return SSL certificate * @see RedshiftProperty#SSL_KEY */ public String getSslKey() { return RedshiftProperty.SSL_KEY.get(properties); } /** * @param file SSL key * @see RedshiftProperty#SSL_KEY */ public void setSslKey(String file) { RedshiftProperty.SSL_KEY.set(properties, file); } /** * @return SSL root certificate * @see RedshiftProperty#SSL_ROOT_CERT */ public String getSslRootCert() { return RedshiftProperty.SSL_ROOT_CERT.get(properties); } /** * @param file SSL root certificate * @see RedshiftProperty#SSL_ROOT_CERT */ public void setSslRootCert(String file) { RedshiftProperty.SSL_ROOT_CERT.set(properties, file); } /** * @return SSL password * @see RedshiftProperty#SSL_PASSWORD */ public String getSslPassword() { return RedshiftProperty.SSL_PASSWORD.get(properties); } /** * @param password SSL password * @see RedshiftProperty#SSL_PASSWORD */ public void setSslPassword(String password) { RedshiftProperty.SSL_PASSWORD.set(properties, password); } /** * @return SSL password callback * @see RedshiftProperty#SSL_PASSWORD_CALLBACK */ public String getSslPasswordCallback() { return RedshiftProperty.SSL_PASSWORD_CALLBACK.get(properties); } /** * @param className SSL password callback class name * @see RedshiftProperty#SSL_PASSWORD_CALLBACK */ public void setSslPasswordCallback(String className) { RedshiftProperty.SSL_PASSWORD_CALLBACK.set(properties, className); } /** * @param applicationName application name * @see RedshiftProperty#APPLICATION_NAME */ public void setApplicationName(String applicationName) { RedshiftProperty.APPLICATION_NAME.set(properties, applicationName); } /** * @return application name * @see RedshiftProperty#APPLICATION_NAME */ public String getApplicationName() { return RedshiftProperty.APPLICATION_NAME.get(properties); } /** * @param targetServerType target server type * @see RedshiftProperty#TARGET_SERVER_TYPE */ public void setTargetServerType(String targetServerType) { RedshiftProperty.TARGET_SERVER_TYPE.set(properties, targetServerType); } /** * @return target server type * @see RedshiftProperty#TARGET_SERVER_TYPE */ public String getTargetServerType() { return RedshiftProperty.TARGET_SERVER_TYPE.get(properties); } /** * @param loadBalanceHosts load balance hosts * @see RedshiftProperty#LOAD_BALANCE_HOSTS */ public void setLoadBalanceHosts(boolean loadBalanceHosts) { RedshiftProperty.LOAD_BALANCE_HOSTS.set(properties, loadBalanceHosts); } /** * @return load balance hosts * @see RedshiftProperty#LOAD_BALANCE_HOSTS */ public boolean getLoadBalanceHosts() { return RedshiftProperty.LOAD_BALANCE_HOSTS.isPresent(properties); } /** * @param hostRecheckSeconds host recheck seconds * @see RedshiftProperty#HOST_RECHECK_SECONDS */ public void setHostRecheckSeconds(int hostRecheckSeconds) { RedshiftProperty.HOST_RECHECK_SECONDS.set(properties, hostRecheckSeconds); } /** * @return host recheck seconds * @see RedshiftProperty#HOST_RECHECK_SECONDS */ public int getHostRecheckSeconds() { return RedshiftProperty.HOST_RECHECK_SECONDS.getIntNoCheck(properties); } /** * @param enabled if TCP keep alive should be enabled * @see RedshiftProperty#TCP_KEEP_ALIVE */ public void setTcpKeepAlive(boolean enabled) { RedshiftProperty.TCP_KEEP_ALIVE.set(properties, enabled); } /** * @return true if TCP keep alive is enabled * @see RedshiftProperty#TCP_KEEP_ALIVE */ public boolean getTcpKeepAlive() { return RedshiftProperty.TCP_KEEP_ALIVE.getBoolean(properties); } /** * @param enabled if binary transfer should be enabled * @see RedshiftProperty#BINARY_TRANSFER */ public void setBinaryTransfer(boolean enabled) { RedshiftProperty.BINARY_TRANSFER.set(properties, enabled); } /** * @return true if binary transfer is enabled * @see RedshiftProperty#BINARY_TRANSFER */ public boolean getBinaryTransfer() { return RedshiftProperty.BINARY_TRANSFER.getBoolean(properties); } /** * @param oidList list of OIDs that are allowed to use binary transfer * @see RedshiftProperty#BINARY_TRANSFER_ENABLE */ public void setBinaryTransferEnable(String oidList) { RedshiftProperty.BINARY_TRANSFER_ENABLE.set(properties, oidList); } /** * @return list of OIDs that are allowed to use binary transfer * @see RedshiftProperty#BINARY_TRANSFER_ENABLE */ public String getBinaryTransferEnable() { return RedshiftProperty.BINARY_TRANSFER_ENABLE.get(properties); } /** * @param oidList list of OIDs that are not allowed to use binary transfer * @see RedshiftProperty#BINARY_TRANSFER_DISABLE */ public void setBinaryTransferDisable(String oidList) { RedshiftProperty.BINARY_TRANSFER_DISABLE.set(properties, oidList); } /** * @return list of OIDs that are not allowed to use binary transfer * @see RedshiftProperty#BINARY_TRANSFER_DISABLE */ public String getBinaryTransferDisable() { return RedshiftProperty.BINARY_TRANSFER_DISABLE.get(properties); } /** * @return string type * @see RedshiftProperty#STRING_TYPE */ public String getStringType() { return RedshiftProperty.STRING_TYPE.get(properties); } /** * @param stringType string type * @see RedshiftProperty#STRING_TYPE */ public void setStringType(String stringType) { RedshiftProperty.STRING_TYPE.set(properties, stringType); } /** * @return true if column sanitizer is disabled * @see RedshiftProperty#DISABLE_COLUMN_SANITISER */ public boolean isColumnSanitiserDisabled() { return RedshiftProperty.DISABLE_COLUMN_SANITISER.getBoolean(properties); } /** * @return true if column sanitizer is disabled * @see RedshiftProperty#DISABLE_COLUMN_SANITISER */ public boolean getDisableColumnSanitiser() { return RedshiftProperty.DISABLE_COLUMN_SANITISER.getBoolean(properties); } /** * @param disableColumnSanitiser if column sanitizer should be disabled * @see RedshiftProperty#DISABLE_COLUMN_SANITISER */ public void setDisableColumnSanitiser(boolean disableColumnSanitiser) { RedshiftProperty.DISABLE_COLUMN_SANITISER.set(properties, disableColumnSanitiser); } /** * @return current schema * @see RedshiftProperty#CURRENT_SCHEMA */ public String getCurrentSchema() { return RedshiftProperty.CURRENT_SCHEMA.get(properties); } /** * @param currentSchema current schema * @see RedshiftProperty#CURRENT_SCHEMA */ public void setCurrentSchema(String currentSchema) { RedshiftProperty.CURRENT_SCHEMA.set(properties, currentSchema); } /** * @return true if connection is readonly * @see RedshiftProperty#READ_ONLY */ public boolean getReadOnly() { return RedshiftProperty.READ_ONLY.getBoolean(properties); } /** * @param readOnly if connection should be readonly * @see RedshiftProperty#READ_ONLY */ public void setReadOnly(boolean readOnly) { RedshiftProperty.READ_ONLY.set(properties, readOnly); } /** * @return The behavior when set read only * @see RedshiftProperty#READ_ONLY_MODE */ public String getReadOnlyMode() { return RedshiftProperty.READ_ONLY_MODE.get(properties); } /** * @param mode the behavior when set read only * @see RedshiftProperty#READ_ONLY_MODE */ public void setReadOnlyMode(String mode) { RedshiftProperty.READ_ONLY_MODE.set(properties, mode); } /** * @return true if driver should log unclosed connections * @see RedshiftProperty#LOG_UNCLOSED_CONNECTIONS */ public boolean getLogUnclosedConnections() { return RedshiftProperty.LOG_UNCLOSED_CONNECTIONS.getBoolean(properties); } /** * @param enabled true if driver should log unclosed connections * @see RedshiftProperty#LOG_UNCLOSED_CONNECTIONS */ public void setLogUnclosedConnections(boolean enabled) { RedshiftProperty.LOG_UNCLOSED_CONNECTIONS.set(properties, enabled); } /** * @return true if driver should log include detail in server error messages * @see RedshiftProperty#LOG_SERVER_ERROR_DETAIL */ public boolean getLogServerErrorDetail() { return RedshiftProperty.LOG_SERVER_ERROR_DETAIL.getBoolean(properties); } /** * @param enabled true if driver should include detail in server error messages * @see RedshiftProperty#LOG_SERVER_ERROR_DETAIL */ public void setLogServerErrorDetail(boolean enabled) { RedshiftProperty.LOG_SERVER_ERROR_DETAIL.set(properties, enabled); } /** * @return assumed minimal server version * @see RedshiftProperty#ASSUME_MIN_SERVER_VERSION */ public String getAssumeMinServerVersion() { return RedshiftProperty.ASSUME_MIN_SERVER_VERSION.get(properties); } /** * @param minVersion assumed minimal server version * @see RedshiftProperty#ASSUME_MIN_SERVER_VERSION */ public void setAssumeMinServerVersion(String minVersion) { RedshiftProperty.ASSUME_MIN_SERVER_VERSION.set(properties, minVersion); } /** * @return JAAS application name * @see RedshiftProperty#JAAS_APPLICATION_NAME */ public String getJaasApplicationName() { return RedshiftProperty.JAAS_APPLICATION_NAME.get(properties); } /** * @param name JAAS application name * @see RedshiftProperty#JAAS_APPLICATION_NAME */ public void setJaasApplicationName(String name) { RedshiftProperty.JAAS_APPLICATION_NAME.set(properties, name); } /** * @return true if perform JAAS login before GSS authentication * @see RedshiftProperty#JAAS_LOGIN */ public boolean getJaasLogin() { return RedshiftProperty.JAAS_LOGIN.getBoolean(properties); } /** * @param doLogin true if perform JAAS login before GSS authentication * @see RedshiftProperty#JAAS_LOGIN */ public void setJaasLogin(boolean doLogin) { RedshiftProperty.JAAS_LOGIN.set(properties, doLogin); } /** * @return Kerberos server name * @see RedshiftProperty#KERBEROS_SERVER_NAME */ public String getKerberosServerName() { return RedshiftProperty.KERBEROS_SERVER_NAME.get(properties); } /** * @param serverName Kerberos server name * @see RedshiftProperty#KERBEROS_SERVER_NAME */ public void setKerberosServerName(String serverName) { RedshiftProperty.KERBEROS_SERVER_NAME.set(properties, serverName); } /** * @return true if use SPNEGO * @see RedshiftProperty#USE_SPNEGO */ public boolean getUseSpNego() { return RedshiftProperty.USE_SPNEGO.getBoolean(properties); } /** * @param use true if use SPNEGO * @see RedshiftProperty#USE_SPNEGO */ public void setUseSpNego(boolean use) { RedshiftProperty.USE_SPNEGO.set(properties, use); } /** * @return GSS mode: auto, sspi, or gssapi * @see RedshiftProperty#GSS_LIB */ public String getGssLib() { return RedshiftProperty.GSS_LIB.get(properties); } /** * @param lib GSS mode: auto, sspi, or gssapi * @see RedshiftProperty#GSS_LIB */ public void setGssLib(String lib) { RedshiftProperty.GSS_LIB.set(properties, lib); } /** * @return SSPI service class * @see RedshiftProperty#SSPI_SERVICE_CLASS */ public String getSspiServiceClass() { return RedshiftProperty.SSPI_SERVICE_CLASS.get(properties); } /** * @param serviceClass SSPI service class * @see RedshiftProperty#SSPI_SERVICE_CLASS */ public void setSspiServiceClass(String serviceClass) { RedshiftProperty.SSPI_SERVICE_CLASS.set(properties, serviceClass); } /** * @return if connection allows encoding changes * @see RedshiftProperty#ALLOW_ENCODING_CHANGES */ public boolean getAllowEncodingChanges() { return RedshiftProperty.ALLOW_ENCODING_CHANGES.getBoolean(properties); } /** * @param allow if connection allows encoding changes * @see RedshiftProperty#ALLOW_ENCODING_CHANGES */ public void setAllowEncodingChanges(boolean allow) { RedshiftProperty.ALLOW_ENCODING_CHANGES.set(properties, allow); } /** * @return socket factory class name * @see RedshiftProperty#SOCKET_FACTORY */ public String getSocketFactory() { return RedshiftProperty.SOCKET_FACTORY.get(properties); } /** * @param socketFactoryClassName socket factory class name * @see RedshiftProperty#SOCKET_FACTORY */ public void setSocketFactory(String socketFactoryClassName) { RedshiftProperty.SOCKET_FACTORY.set(properties, socketFactoryClassName); } /** * @return socket factory argument * @see RedshiftProperty#SOCKET_FACTORY_ARG */ public String getSocketFactoryArg() { return RedshiftProperty.SOCKET_FACTORY_ARG.get(properties); } /** * @param socketFactoryArg socket factory argument * @see RedshiftProperty#SOCKET_FACTORY_ARG */ public void setSocketFactoryArg(String socketFactoryArg) { RedshiftProperty.SOCKET_FACTORY_ARG.set(properties, socketFactoryArg); } /** * @param replication set to 'database' for logical replication or 'true' for physical replication * @see RedshiftProperty#REPLICATION */ public void setReplication(String replication) { RedshiftProperty.REPLICATION.set(properties, replication); } /** * @return 'select', "callIfNoReturn', or 'call' * @see RedshiftProperty#ESCAPE_SYNTAX_CALL_MODE */ public String getEscapeSyntaxCallMode() { return RedshiftProperty.ESCAPE_SYNTAX_CALL_MODE.get(properties); } /** * @param callMode the call mode to use for JDBC escape call syntax * @see RedshiftProperty#ESCAPE_SYNTAX_CALL_MODE */ public void setEscapeSyntaxCallMode(String callMode) { RedshiftProperty.ESCAPE_SYNTAX_CALL_MODE.set(properties, callMode); } /** * @return null, 'database', or 'true * @see RedshiftProperty#REPLICATION */ public String getReplication() { return RedshiftProperty.REPLICATION.get(properties); } /** * @return Log Level of the JDBC Driver * @see RedshiftProperty#LOG_LEVEL */ public String getLogLevel() { return RedshiftProperty.LOG_LEVEL.get(properties); } /** * Added for backward compatibility. * * @param level Log Level of the JDBC Driver * @see RedshiftProperty#LOG_LEVEL */ public void setLogLevel(String level) { RedshiftProperty.LOG_LEVEL.set(properties, level); } public String getLogDirectory() { ExpressionProperties exprProps = new ExpressionProperties(properties, System.getProperties()); return RedshiftProperty.LOG_PATH.get(exprProps); } /** * Added for backward compatibility. * * @param logDirectory output directory of the Logger. */ public void setLogDirectory(String logDirectory) { RedshiftProperty.LOG_PATH.set(properties, logDirectory); } /** * Generates a {@link DriverManager} URL from the other properties supplied. * * @return {@link DriverManager} URL from the other properties supplied */ public String getUrl() { StringBuilder url = new StringBuilder(100); url.append("jdbc:redshift://"); for (int i = 0; i < serverNames.length; i++) { if (i > 0) { url.append(","); } url.append(serverNames[i]); if (portNumbers != null && portNumbers.length >= i && portNumbers[i] != 0) { url.append(":").append(portNumbers[i]); } } url.append("/").append(URLCoder.encode(databaseName)); StringBuilder query = new StringBuilder(100); for (RedshiftProperty property : RedshiftProperty.values()) { if (property.isPresent(properties)) { if (query.length() != 0) { query.append("&"); } query.append(property.getName()); query.append("="); query.append(URLCoder.encode(property.get(properties))); } } if (query.length() > 0) { url.append("?"); url.append(query); } return url.toString(); } /** * Generates a {@link DriverManager} URL from the other properties supplied. * * @return {@link DriverManager} URL from the other properties supplied */ public String getURL() { return getUrl(); } /** * Sets properties from a {@link DriverManager} URL. * * @param url properties to set */ public void setUrl(String url) throws RedshiftException { Properties p = com.amazon.redshift.Driver.parseURL(url, null); if (p == null) { throw new IllegalArgumentException("URL invalid " + url); } for (RedshiftProperty property : RedshiftProperty.values()) { if (!this.properties.containsKey(property.getName())) { setProperty(property, property.get(p)); } } } /** * Sets properties from a {@link DriverManager} URL. * Added to follow convention used in other DBMS. * * @param url properties to set */ public void setURL(String url) throws RedshiftException { setUrl(url); } public String getProperty(String name) throws SQLException { RedshiftProperty pgProperty = RedshiftProperty.forName(name); if (pgProperty != null) { return getProperty(pgProperty); } else { throw new RedshiftException(GT.tr("Unsupported property name: {0}", name), RedshiftState.INVALID_PARAMETER_VALUE); } } public void setProperty(String name, String value) throws SQLException { RedshiftProperty pgProperty = RedshiftProperty.forName(name); if (pgProperty != null) { setProperty(pgProperty, value); } else { throw new RedshiftException(GT.tr("Unsupported property name: {0}", name), RedshiftState.INVALID_PARAMETER_VALUE); } } public String getProperty(RedshiftProperty property) { return property.get(properties); } public void setProperty(RedshiftProperty property, String value) { if (value == null) { return; } switch (property) { case HOST: setServerNames(value.split(",")); break; case PORT: String[] ps = value.split(","); int[] ports = new int[ps.length]; for (int i = 0 ; i < ps.length; i++) { try { ports[i] = Integer.parseInt(ps[i]); } catch (NumberFormatException e) { ports[i] = 0; } } setPortNumbers(ports); break; case DBNAME: setDatabaseName(value); break; case USER: case UID: setUser(value); break; case PASSWORD: case PWD: setPassword(value); break; default: properties.setProperty(property.getName(), value); } } /** * Generates a reference using the appropriate object factory. * * @return reference using the appropriate object factory */ protected Reference createReference() { return new Reference(getClass().getName(), RedshiftObjectFactory.class.getName(), null); } public Reference getReference() throws NamingException { Reference ref = createReference(); StringBuilder serverString = new StringBuilder(); for (int i = 0; i < serverNames.length; i++) { if (i > 0) { serverString.append(","); } String serverName = serverNames[i]; serverString.append(serverName); } ref.add(new StringRefAddr("serverName", serverString.toString())); StringBuilder portString = new StringBuilder(); for (int i = 0; i < portNumbers.length; i++) { if (i > 0) { portString.append(","); } int p = portNumbers[i]; portString.append(Integer.toString(p)); } ref.add(new StringRefAddr("portNumber", portString.toString())); ref.add(new StringRefAddr("databaseName", databaseName)); if (user != null) { ref.add(new StringRefAddr("user", user)); } if (password != null) { ref.add(new StringRefAddr("password", password)); } for (RedshiftProperty property : RedshiftProperty.values()) { if (property.isPresent(properties)) { ref.add(new StringRefAddr(property.getName(), property.get(properties))); } } return ref; } public void setFromReference(Reference ref) { databaseName = getReferenceProperty(ref, "databaseName"); String portNumberString = getReferenceProperty(ref, "portNumber"); if (portNumberString != null) { String[] ps = portNumberString.split(","); int[] ports = new int[ps.length]; for (int i = 0; i < ps.length; i++) { try { ports[i] = Integer.parseInt(ps[i]); } catch (NumberFormatException e) { ports[i] = 0; } } setPortNumbers(ports); } else { setPortNumbers(null); } setServerNames(getReferenceProperty(ref, "serverName").split(",")); for (RedshiftProperty property : RedshiftProperty.values()) { setProperty(property, getReferenceProperty(ref, property.getName())); } } private static String getReferenceProperty(Reference ref, String propertyName) { RefAddr addr = ref.get(propertyName); if (addr == null) { return null; } return (String) addr.getContent(); } protected void writeBaseObject(ObjectOutputStream out) throws IOException { out.writeObject(serverNames); out.writeObject(databaseName); out.writeObject(user); out.writeObject(password); out.writeObject(portNumbers); out.writeObject(properties); } protected void readBaseObject(ObjectInputStream in) throws IOException, ClassNotFoundException { serverNames = (String[]) in.readObject(); databaseName = (String) in.readObject(); user = (String) in.readObject(); password = (String) in.readObject(); portNumbers = (int[]) in.readObject(); properties = (Properties) in.readObject(); } public void initializeFrom(BaseDataSource source) throws IOException, ClassNotFoundException { ByteArrayOutputStream baos = new ByteArrayOutputStream(); ObjectOutputStream oos = new ObjectOutputStream(baos); source.writeBaseObject(oos); oos.close(); ByteArrayInputStream bais = new ByteArrayInputStream(baos.toByteArray()); ObjectInputStream ois = new ObjectInputStream(bais); readBaseObject(ois); } /** * @return preferred query execution mode * @see RedshiftProperty#PREFER_QUERY_MODE */ public PreferQueryMode getPreferQueryMode() { return PreferQueryMode.of(RedshiftProperty.PREFER_QUERY_MODE.get(properties)); } /** * @param preferQueryMode extended, simple, extendedForPrepared, or extendedCacheEverything * @see RedshiftProperty#PREFER_QUERY_MODE */ public void setPreferQueryMode(PreferQueryMode preferQueryMode) { RedshiftProperty.PREFER_QUERY_MODE.set(properties, preferQueryMode.value()); } /** * @return connection configuration regarding automatic per-query savepoints * @see RedshiftProperty#AUTOSAVE */ public AutoSave getAutosave() { return AutoSave.of(RedshiftProperty.AUTOSAVE.get(properties)); } /** * @param autoSave connection configuration regarding automatic per-query savepoints * @see RedshiftProperty#AUTOSAVE */ public void setAutosave(AutoSave autoSave) { RedshiftProperty.AUTOSAVE.set(properties, autoSave.value()); } /** * @return connection configuration regarding throwing exception from commit if database rolls back the transaction * @see RedshiftProperty#RAISE_EXCEPTION_ON_SILENT_ROLLBACK */ public boolean isRaiseExceptionOnSilentRollback() { return RedshiftProperty.RAISE_EXCEPTION_ON_SILENT_ROLLBACK.getBoolean(properties); } /** * @param raiseExceptionOnSilentRollback if the database should throw exception if commit silently rolls back * @see RedshiftProperty#RAISE_EXCEPTION_ON_SILENT_ROLLBACK */ public void setRaiseExceptionOnSilentRollback(boolean raiseExceptionOnSilentRollback) { RedshiftProperty.RAISE_EXCEPTION_ON_SILENT_ROLLBACK.set(properties, raiseExceptionOnSilentRollback); } /** * see RedshiftProperty#CLEANUP_SAVEPOINTS * * @return boolean indicating property set */ public boolean getCleanupSavepoints() { return RedshiftProperty.CLEANUP_SAVEPOINTS.getBoolean(properties); } /** * see RedshiftProperty#CLEANUP_SAVEPOINTS * * @param cleanupSavepoints will cleanup savepoints after a successful transaction */ public void setCleanupSavepoints(boolean cleanupSavepoints) { RedshiftProperty.CLEANUP_SAVEPOINTS.set(properties, cleanupSavepoints); } /** * @return boolean indicating property is enabled or not. * @see RedshiftProperty#REWRITE_BATCHED_INSERTS */ public boolean getReWriteBatchedInserts() { return RedshiftProperty.REWRITE_BATCHED_INSERTS.getBoolean(properties); } /** * @param reWrite boolean value to set the property in the properties collection * @see RedshiftProperty#REWRITE_BATCHED_INSERTS */ public void setReWriteBatchedInserts(boolean reWrite) { RedshiftProperty.REWRITE_BATCHED_INSERTS.set(properties, reWrite); } /** * @return boolean indicating property is enabled or not. * @see RedshiftProperty#HIDE_UNPRIVILEGED_OBJECTS */ public boolean getHideUnprivilegedObjects() { return RedshiftProperty.HIDE_UNPRIVILEGED_OBJECTS.getBoolean(properties); } /** * @param hideUnprivileged boolean value to set the property in the properties collection * @see RedshiftProperty#HIDE_UNPRIVILEGED_OBJECTS */ public void setHideUnprivilegedObjects(boolean hideUnprivileged) { RedshiftProperty.HIDE_UNPRIVILEGED_OBJECTS.set(properties, hideUnprivileged); } public String getMaxResultBuffer() { return RedshiftProperty.MAX_RESULT_BUFFER.get(properties); } public void setMaxResultBuffer(String maxResultBuffer) { RedshiftProperty.MAX_RESULT_BUFFER.set(properties, maxResultBuffer); } //JCP! if mvn.project.property.redshift.jdbc.spec >= "JDBC4.1" public java.util.logging.Logger getParentLogger() throws SQLFeatureNotSupportedException { // java.util.logging.logger is not used in Redshift JDBC throw new SQLFeatureNotSupportedException ("java.util.logging is not used"); } //JCP! endif /* * Alias methods below, these are to help with ease-of-use with other database tools / frameworks * which expect normal java bean getters / setters to exist for the property names. */ public boolean isSsl() { return getSsl(); } public String getSslfactoryarg() { return getSslFactoryArg(); } public void setSslfactoryarg(final String arg) { setSslFactoryArg(arg); } public String getSslcert() { return getSslCert(); } public void setSslcert(final String file) { setSslCert(file); } public String getSslmode() { return getSslMode(); } public void setSslmode(final String mode) { setSslMode(mode); } public String getSslhostnameverifier() { return getSslHostnameVerifier(); } public void setSslhostnameverifier(final String className) { setSslHostnameVerifier(className); } public String getSslkey() { return getSslKey(); } public void setSslkey(final String file) { setSslKey(file); } public String getSslrootcert() { return getSslRootCert(); } public void setSslrootcert(final String file) { setSslRootCert(file); } public String getSslpasswordcallback() { return getSslPasswordCallback(); } public void setSslpasswordcallback(final String className) { setSslPasswordCallback(className); } public String getSslpassword() { return getSslPassword(); } public void setSslpassword(final String sslpassword) { setSslPassword(sslpassword); } public int getRecvBufferSize() { return getReceiveBufferSize(); } public void setRecvBufferSize(final int nbytes) { setReceiveBufferSize(nbytes); } public boolean isAllowEncodingChanges() { return getAllowEncodingChanges(); } public boolean isLogUnclosedConnections() { return getLogUnclosedConnections(); } public boolean isTcpKeepAlive() { return getTcpKeepAlive(); } public boolean isReadOnly() { return getReadOnly(); } public boolean isDisableColumnSanitiser() { return getDisableColumnSanitiser(); } public boolean isLoadBalanceHosts() { return getLoadBalanceHosts(); } public boolean isCleanupSavePoints() { return getCleanupSavepoints(); } public void setCleanupSavePoints(final boolean cleanupSavepoints) { setCleanupSavepoints(cleanupSavepoints); } public boolean isReWriteBatchedInserts() { return getReWriteBatchedInserts(); } }
8,470
0
Create_ds/amazon-redshift-jdbc-driver/src/main/java/com/amazon/redshift/jre7
Create_ds/amazon-redshift-jdbc-driver/src/main/java/com/amazon/redshift/jre7/sasl/ScramAuthenticator.java
/* * Copyright (c) 2017, PostgreSQL Global Development Group * See the LICENSE file in the project root for more information. */ package com.amazon.redshift.jre7.sasl; import com.amazon.redshift.core.RedshiftStream; import com.amazon.redshift.util.GT; import com.amazon.redshift.util.RedshiftException; import com.amazon.redshift.util.RedshiftState; import com.ongres.scram.client.ScramClient; import com.ongres.scram.client.ScramSession; import com.ongres.scram.common.exception.ScramException; import com.ongres.scram.common.exception.ScramInvalidServerSignatureException; import com.ongres.scram.common.exception.ScramParseException; import com.ongres.scram.common.exception.ScramServerErrorException; import com.ongres.scram.common.stringprep.StringPreparations; import java.io.IOException; import java.nio.charset.StandardCharsets; import java.util.ArrayList; import java.util.List; public class ScramAuthenticator { private final String user; private final String password; private final RedshiftStream pgStream; private ScramClient scramClient; private ScramSession scramSession; private ScramSession.ServerFirstProcessor serverFirstProcessor; private ScramSession.ClientFinalProcessor clientFinalProcessor; private interface BodySender { void sendBody(RedshiftStream pgStream) throws IOException; } private void sendAuthenticationMessage(int bodyLength, BodySender bodySender) throws IOException { pgStream.sendChar('p'); pgStream.sendInteger4(Integer.SIZE / Byte.SIZE + bodyLength); bodySender.sendBody(pgStream); pgStream.flush(); } public ScramAuthenticator(String user, String password, RedshiftStream pgStream) { this.user = user; this.password = password; this.pgStream = pgStream; } public void processServerMechanismsAndInit() throws IOException, RedshiftException { List<String> mechanisms = new ArrayList<>(); do { mechanisms.add(pgStream.receiveString()); } while (pgStream.peekChar() != 0); int c = pgStream.receiveChar(); assert c == 0; if (mechanisms.size() < 1) { throw new RedshiftException( GT.tr("No SCRAM mechanism(s) advertised by the server"), RedshiftState.CONNECTION_REJECTED ); } try { scramClient = ScramClient .channelBinding(ScramClient.ChannelBinding.NO) .stringPreparation(StringPreparations.NO_PREPARATION) .selectMechanismBasedOnServerAdvertised(mechanisms.toArray(new String[]{})) .setup(); } catch (IllegalArgumentException e) { throw new RedshiftException( GT.tr("Invalid or unsupported by client SCRAM mechanisms", e), RedshiftState.CONNECTION_REJECTED ); } /* if (logger.isLoggable(Level.FINEST)) { logger.log(Level.FINEST, " Using SCRAM mechanism {0}", scramClient.getScramMechanism().getName()); } */ scramSession = scramClient.scramSession("*"); // Real username is ignored by server, uses startup one } public void sendScramClientFirstMessage() throws IOException { String clientFirstMessage = scramSession.clientFirstMessage(); // logger.log(LogLevel.DEBUG, " FE=> SASLInitialResponse( {0} )", clientFirstMessage); String scramMechanismName = scramClient.getScramMechanism().getName(); final byte[] scramMechanismNameBytes = scramMechanismName.getBytes(StandardCharsets.UTF_8); final byte[] clientFirstMessageBytes = clientFirstMessage.getBytes(StandardCharsets.UTF_8); sendAuthenticationMessage( (scramMechanismNameBytes.length + 1) + 4 + clientFirstMessageBytes.length, new BodySender() { @Override public void sendBody(RedshiftStream pgStream) throws IOException { pgStream.send(scramMechanismNameBytes); pgStream.sendChar(0); // List terminated in '\0' pgStream.sendInteger4(clientFirstMessageBytes.length); pgStream.send(clientFirstMessageBytes); } } ); } public void processServerFirstMessage(int length) throws IOException, RedshiftException { String serverFirstMessage = pgStream.receiveString(length); // logger.log(Level.FINEST, " <=BE AuthenticationSASLContinue( {0} )", serverFirstMessage); try { serverFirstProcessor = scramSession.receiveServerFirstMessage(serverFirstMessage); } catch (ScramException e) { throw new RedshiftException( GT.tr("Invalid server-first-message: {0}", serverFirstMessage), RedshiftState.CONNECTION_REJECTED, e ); } /* if (logger.isLoggable(Level.FINEST)) { logger.log(Level.FINEST, " <=BE AuthenticationSASLContinue(salt={0}, iterations={1})", new Object[] { serverFirstProcessor.getSalt(), serverFirstProcessor.getIteration() } ); } */ clientFinalProcessor = serverFirstProcessor.clientFinalProcessor(password); String clientFinalMessage = clientFinalProcessor.clientFinalMessage(); // logger.log(Level.FINEST, " FE=> SASLResponse( {0} )", clientFinalMessage); final byte[] clientFinalMessageBytes = clientFinalMessage.getBytes(StandardCharsets.UTF_8); sendAuthenticationMessage( clientFinalMessageBytes.length, new BodySender() { @Override public void sendBody(RedshiftStream pgStream) throws IOException { pgStream.send(clientFinalMessageBytes); } } ); } public void verifyServerSignature(int length) throws IOException, RedshiftException { String serverFinalMessage = pgStream.receiveString(length); // logger.log(Level.FINEST, " <=BE AuthenticationSASLFinal( {0} )", serverFinalMessage); try { clientFinalProcessor.receiveServerFinalMessage(serverFinalMessage); } catch (ScramParseException e) { throw new RedshiftException( GT.tr("Invalid server-final-message: {0}", serverFinalMessage), RedshiftState.CONNECTION_REJECTED, e ); } catch (ScramServerErrorException e) { throw new RedshiftException( GT.tr("SCRAM authentication failed, server returned error: {0}", e.getError().getErrorMessage()), RedshiftState.CONNECTION_REJECTED, e ); } catch (ScramInvalidServerSignatureException e) { throw new RedshiftException( GT.tr("Invalid server SCRAM signature"), RedshiftState.CONNECTION_REJECTED, e ); } } }
8,471
0
Create_ds/amazon-redshift-jdbc-driver/src/main/java/com/amazon/redshift
Create_ds/amazon-redshift-jdbc-driver/src/main/java/com/amazon/redshift/osgi/RedshiftDataSourceFactory.java
/* * Copyright (c) 2003, PostgreSQL Global Development Group * See the LICENSE file in the project root for more information. */ package com.amazon.redshift.osgi; import com.amazon.redshift.ds.common.BaseDataSource; import com.amazon.redshift.jdbc2.optional.ConnectionPool; import com.amazon.redshift.jdbc2.optional.PoolingDataSource; import com.amazon.redshift.jdbc2.optional.SimpleDataSource; import com.amazon.redshift.util.GT; import com.amazon.redshift.util.RedshiftException; import com.amazon.redshift.util.RedshiftState; import com.amazon.redshift.xa.RedshiftXADataSource; import org.osgi.service.jdbc.DataSourceFactory; import java.sql.SQLException; import java.util.Map.Entry; import java.util.Properties; import javax.sql.ConnectionPoolDataSource; import javax.sql.DataSource; import javax.sql.XADataSource; /** * This factory service is designed to be used in OSGi Enterprise environments to create and * configure JDBC data-sources. */ public class RedshiftDataSourceFactory implements DataSourceFactory { /** * A class that removes properties as they are used (without modifying the supplied initial * Properties). */ private static class SingleUseProperties extends Properties { private static final long serialVersionUID = 1L; SingleUseProperties(Properties initialProperties) { super(); if (initialProperties != null) { putAll(initialProperties); } } @Override public String getProperty(String key) { String value = super.getProperty(key); remove(key); return value; } } private void configureBaseDataSource(BaseDataSource ds, Properties props) throws SQLException { if (props.containsKey(JDBC_URL)) { ds.setUrl(props.getProperty(JDBC_URL)); } if (props.containsKey(JDBC_SERVER_NAME)) { ds.setServerName(props.getProperty(JDBC_SERVER_NAME)); } if (props.containsKey(JDBC_PORT_NUMBER)) { ds.setPortNumber(Integer.parseInt(props.getProperty(JDBC_PORT_NUMBER))); } if (props.containsKey(JDBC_DATABASE_NAME)) { ds.setDatabaseName(props.getProperty(JDBC_DATABASE_NAME)); } if (props.containsKey(JDBC_USER)) { ds.setUser(props.getProperty(JDBC_USER)); } if (props.containsKey(JDBC_PASSWORD)) { ds.setPassword(props.getProperty(JDBC_PASSWORD)); } for (Entry<Object, Object> entry : props.entrySet()) { ds.setProperty((String) entry.getKey(), (String) entry.getValue()); } } public java.sql.Driver createDriver(Properties props) throws SQLException { if (props != null && !props.isEmpty()) { throw new RedshiftException(GT.tr("Unsupported properties: {0}", props.stringPropertyNames()), RedshiftState.INVALID_PARAMETER_VALUE); } return new com.amazon.redshift.Driver(); } private DataSource createPoolingDataSource(Properties props) throws SQLException { PoolingDataSource dataSource = new PoolingDataSource(); if (props.containsKey(JDBC_INITIAL_POOL_SIZE)) { dataSource.setInitialConnections(Integer.parseInt(props.getProperty(JDBC_INITIAL_POOL_SIZE))); } if (props.containsKey(JDBC_MAX_POOL_SIZE)) { dataSource.setMaxConnections(Integer.parseInt(props.getProperty(JDBC_MAX_POOL_SIZE))); } if (props.containsKey(JDBC_DATASOURCE_NAME)) { dataSource.setDataSourceName(props.getProperty(JDBC_DATASOURCE_NAME)); } configureBaseDataSource(dataSource, props); return dataSource; } private DataSource createSimpleDataSource(Properties props) throws SQLException { SimpleDataSource dataSource = new SimpleDataSource(); configureBaseDataSource(dataSource, props); return dataSource; } /** * Will create and return either a {@link SimpleDataSource} or a {@link PoolingDataSource} * depending on the presence in the supplied properties of any pool-related property (eg.: {@code * JDBC_INITIAL_POOL_SIZE} or {@code JDBC_MAX_POOL_SIZE}). */ public DataSource createDataSource(Properties props) throws SQLException { props = new SingleUseProperties(props); if (props.containsKey(JDBC_INITIAL_POOL_SIZE) || props.containsKey(JDBC_MIN_POOL_SIZE) || props.containsKey(JDBC_MAX_POOL_SIZE) || props.containsKey(JDBC_MAX_IDLE_TIME) || props.containsKey(JDBC_MAX_STATEMENTS)) { return createPoolingDataSource(props); } else { return createSimpleDataSource(props); } } public ConnectionPoolDataSource createConnectionPoolDataSource(Properties props) throws SQLException { props = new SingleUseProperties(props); ConnectionPool dataSource = new ConnectionPool(); configureBaseDataSource(dataSource, props); return dataSource; } public XADataSource createXADataSource(Properties props) throws SQLException { props = new SingleUseProperties(props); RedshiftXADataSource dataSource = new RedshiftXADataSource(); configureBaseDataSource(dataSource, props); return dataSource; } }
8,472
0
Create_ds/amazon-redshift-jdbc-driver/src/main/java/com/amazon/redshift
Create_ds/amazon-redshift-jdbc-driver/src/main/java/com/amazon/redshift/osgi/RedshiftBundleActivator.java
/* * Copyright (c) 2003, PostgreSQL Global Development Group * See the LICENSE file in the project root for more information. */ package com.amazon.redshift.osgi; import com.amazon.redshift.Driver; import org.osgi.framework.BundleActivator; import org.osgi.framework.BundleContext; import org.osgi.framework.ServiceRegistration; import org.osgi.service.jdbc.DataSourceFactory; import java.util.Dictionary; import java.util.Hashtable; /** * This class is an OSGi Bundle Activator and should only be used internally by the OSGi Framework. */ public class RedshiftBundleActivator implements BundleActivator { private ServiceRegistration<?> registration; public void start(BundleContext context) throws Exception { Dictionary<String, Object> properties = new Hashtable<String, Object>(); properties.put(DataSourceFactory.OSGI_JDBC_DRIVER_CLASS, Driver.class.getName()); properties.put(DataSourceFactory.OSGI_JDBC_DRIVER_NAME, com.amazon.redshift.util.DriverInfo.DRIVER_NAME); properties.put(DataSourceFactory.OSGI_JDBC_DRIVER_VERSION, com.amazon.redshift.util.DriverInfo.DRIVER_VERSION); try { registration = context.registerService(DataSourceFactory.class.getName(), new RedshiftDataSourceFactory(), properties); } catch (NoClassDefFoundError e) { String msg = e.getMessage(); if (msg != null && msg.contains("org/osgi/service/jdbc/DataSourceFactory")) { if (!Boolean.getBoolean("rsjdbc.osgi.debug")) { return; } new IllegalArgumentException("Unable to load DataSourceFactory. " + "Will ignore DataSourceFactory registration. If you need one, " + "ensure org.osgi.enterprise is on the classpath", e).printStackTrace(); // just ignore. Assume OSGi-enterprise is not loaded return; } throw e; } } public void stop(BundleContext context) throws Exception { if (registration != null) { registration.unregister(); registration = null; } if (Driver.isRegistered()) { Driver.deregister(); } } }
8,473
0
Create_ds/amazon-redshift-jdbc-driver/src/main/java/com/amazon/redshift
Create_ds/amazon-redshift-jdbc-driver/src/main/java/com/amazon/redshift/plugin/CommonCredentialsProvider.java
/** * Copyright 2010-2023 Amazon.com, Inc. or its affiliates. All Rights Reserved. * * This file is licensed under the Apache License, Version 2.0 (the "License"). * You may not use this file except in compliance with the License. A copy of * the License is located at * * http://aws.amazon.com/apache2.0/ * * This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR * CONDITIONS OF ANY KIND, either express or implied. See the License for the * specific language governing permissions and limitations under the License. */ package com.amazon.redshift.plugin; import com.amazon.redshift.INativePlugin; import com.amazon.redshift.NativeTokenHolder; import com.amazon.redshift.logger.LogLevel; import com.amazon.redshift.logger.RedshiftLogger; import com.amazon.redshift.util.RedshiftException; import org.apache.commons.logging.LogFactory; import java.io.IOException; import java.net.URL; import java.util.Collections; import java.util.Enumeration; import java.util.HashMap; import java.util.Map; import static com.amazonaws.util.StringUtils.isNullOrEmpty; public abstract class CommonCredentialsProvider extends IdpCredentialsProvider implements INativePlugin { private static final Map<String, NativeTokenHolder> m_cache = new HashMap<String, NativeTokenHolder>(); protected Boolean m_disableCache = true; private NativeTokenHolder m_lastRefreshCredentials; // Used when cache is disabled /** * Log properties file name. */ private static final String LOG_PROPERTIES_FILE_NAME = "log-factory.properties"; /** * Log properties file path. */ private static final String LOG_PROPERTIES_FILE_PATH = "META-INF/services/org.apache.commons.logging.LogFactory"; /** * A custom context class loader which allows us to control which LogFactory is loaded. * Our CUSTOM_LOG_FACTORY_CLASS will divert any wire logging to NoOpLogger to suppress wire * messages being logged. */ private static final ClassLoader CONTEXT_CLASS_LOADER = new ClassLoader(CommonCredentialsProvider.class.getClassLoader()) { @Override public Class<?> loadClass(String name) throws ClassNotFoundException { Class<?> clazz = getParent().loadClass(name); return clazz; } @Override public Enumeration<URL> getResources(String name) throws IOException { if (LogFactory.FACTORY_PROPERTIES.equals(name)) { // make sure to not load any other commons-logging.properties files return Collections.enumeration(Collections.emptyList()); } return super.getResources(name); } @Override public URL getResource(String name) { if (LOG_PROPERTIES_FILE_PATH.equals(name)) { return CommonCredentialsProvider.class.getResource(LOG_PROPERTIES_FILE_NAME); } return super.getResource(name); } }; @Override public void addParameter(String key, String value) { if (RedshiftLogger.isEnable()) m_log.logDebug("add parameter key: {0}", key); } @Override public void setLogger(RedshiftLogger log) { m_log = log; } @Override public NativeTokenHolder getCredentials() throws RedshiftException { NativeTokenHolder credentials = null; if (!m_disableCache) { String key = getCacheKey(); credentials = m_cache.get(key); } if (credentials == null || credentials.isExpired()) { if (RedshiftLogger.isEnable()) { if (m_disableCache) { m_log.logInfo("Auth token Cache disabled : fetching new token"); } else { m_log.logInfo("Auth token Cache enabled - No auth token found from cache : fetching new token"); } } synchronized (this) { refresh(); if (m_disableCache) { credentials = m_lastRefreshCredentials; m_lastRefreshCredentials = null; } } } else { credentials.setRefresh(false); if (RedshiftLogger.isEnable()) m_log.logInfo("Auth token found from cache"); } if (!m_disableCache) { credentials = m_cache.get(getCacheKey()); } if (credentials == null) { m_log.logError("No credentials found"); throw new RedshiftException("There was an error during authentication."); } return credentials; } protected abstract NativeTokenHolder getAuthToken() throws IOException; @Override public void refresh() throws RedshiftException { // Get the current thread and set the context loader with our custom load class method. Thread currentThread = Thread.currentThread(); ClassLoader cl = currentThread.getContextClassLoader(); Thread.currentThread().setContextClassLoader(CONTEXT_CLASS_LOADER); try { NativeTokenHolder authTokenHolder = getAuthToken(); authTokenHolder.setRefresh(true); if (!m_disableCache) m_cache.put(getCacheKey(), authTokenHolder); else m_lastRefreshCredentials = authTokenHolder; } catch (IOException ex) { if(RedshiftLogger.isEnable()) m_log.log(LogLevel.ERROR, ex, "IOException while refreshing token"); throw new RedshiftException(!isNullOrEmpty(ex.getMessage()) ? ex.getMessage() : "There was an error during authentication.", ex); } catch (Exception ex) { if (RedshiftLogger.isEnable()) m_log.log(LogLevel.ERROR, ex, "Exception while refreshing token"); throw new RedshiftException("There was an error during authentication.", ex); } finally { currentThread.setContextClassLoader(cl); } } @Override public String getIdpToken() throws RedshiftException { // Get the current thread and set the context loader with our custom load class method. Thread currentThread = Thread.currentThread(); ClassLoader cl = currentThread.getContextClassLoader(); Thread.currentThread().setContextClassLoader(CONTEXT_CLASS_LOADER); try { NativeTokenHolder authTokenHolder = getAuthToken(); return authTokenHolder.getAccessToken(); } catch (IOException ex) { if(RedshiftLogger.isEnable()) m_log.log(LogLevel.ERROR, ex, "IOException during getIdpToken"); throw new RedshiftException(!isNullOrEmpty(ex.getMessage()) ? ex.getMessage() : "There was an error during authentication.", ex); } catch (Exception ex) { if (RedshiftLogger.isEnable()) m_log.log(LogLevel.ERROR, ex, "Exception during getIdpToken"); throw new RedshiftException("There was an error during authentication.", ex); } finally { currentThread.setContextClassLoader(cl); } } @Override public String getPluginSpecificCacheKey() { // This needs to be overridden this in each derived plugin. return ""; } @Override public String getCacheKey() { return getPluginSpecificCacheKey(); } }
8,474
0
Create_ds/amazon-redshift-jdbc-driver/src/main/java/com/amazon/redshift
Create_ds/amazon-redshift-jdbc-driver/src/main/java/com/amazon/redshift/plugin/BrowserAzureOAuth2CredentialsProvider.java
package com.amazon.redshift.plugin; import com.amazon.redshift.logger.LogLevel; import com.amazon.redshift.logger.RedshiftLogger; import com.amazon.redshift.plugin.httpserver.RequestHandler; import com.amazon.redshift.plugin.httpserver.Server; import com.amazon.redshift.plugin.utils.RandomStateUtil; import com.amazonaws.util.json.Jackson; import com.fasterxml.jackson.databind.JsonNode; import org.apache.http.HttpHeaders; import org.apache.http.NameValuePair; import org.apache.http.client.entity.UrlEncodedFormEntity; import org.apache.http.client.methods.CloseableHttpResponse; import org.apache.http.client.methods.HttpPost; import org.apache.http.client.utils.URIBuilder; import org.apache.http.entity.ContentType; import org.apache.http.impl.client.CloseableHttpClient; import org.apache.http.message.BasicNameValuePair; import org.apache.http.util.EntityUtils; import java.awt.*; import java.io.IOException; import java.net.URI; import java.net.URISyntaxException; import java.nio.charset.StandardCharsets; import java.security.GeneralSecurityException; import java.time.Duration; import java.util.ArrayList; import java.util.Arrays; import java.util.List; import java.util.function.Function; import static com.amazon.redshift.plugin.httpserver.RequestHandler.REDSHIFT_PATH; import static com.amazon.redshift.plugin.utils.CheckUtils.*; import static com.amazon.redshift.plugin.utils.ResponseUtils.findParameter; /** * Class to get JWT Token from any IDP using OAuth 2.0 API */ public class BrowserAzureOAuth2CredentialsProvider extends JwtCredentialsProvider { /** * Key for setting timeout for IDP response. */ public static final String KEY_IDP_RESPONSE_TIMEOUT = "idp_response_timeout"; /** * Key for setting the port number for listening. */ public static final String KEY_LISTEN_PORT = "listen_port"; /** * Key for setting idp tenant. */ public static final String KEY_IDP_TENANT = "idp_tenant"; /** * Key for setting client ID. */ public static final String KEY_CLIENT_ID = "client_id"; /** * Key for setting Scope. */ public static final String KEY_SCOPE = "scope"; // "api://" + m_clientId + "/User.Read" /** * Key for setting state. */ public static final String OAUTH_STATE_PARAMETER_NAME = "state"; /** * Key for setting redirect URI. */ public static final String OAUTH_REDIRECT_PARAMETER_NAME = "redirect_uri"; /** * Key for setting code. */ public static final String OAUTH_IDP_CODE_PARAMETER_NAME = "code"; /** * Key for setting client ID. */ public static final String OAUTH_CLIENT_ID_PARAMETER_NAME = "client_id"; /** * Key for setting OAUTH response type. */ public static final String OAUTH_RESPONSE_TYPE_PARAMETER_NAME = "response_type"; /** * Key for setting requested token type. */ public static final String OAUTH_REQUESTED_TOKEN_TYPE_PARAMETER_NAME = "requested_token_type"; /** * Key for setting grant type. */ public static final String OAUTH_GRANT_TYPE_PARAMETER_NAME = "grant_type"; /** * Key for setting scope. */ public static final String OAUTH_SCOPE_PARAMETER_NAME = "scope"; /** * Key for setting resource. */ public static final String OAUTH_RESOURCE_PARAMETER_NAME = "resource"; /** * Key for setting response mode. */ public static final String OAUTH_RESPONSE_MODE_PARAMETER_NAME = "response_mode"; /** * String containing Microsoft IDP host. */ private static final String MICROSOFT_IDP_HOST = "login.microsoftonline.com"; /** * String containing HTTPS. */ private static final String CURRENT_INTERACTION_SCHEMA = "https"; /** * IDP tenant variable. */ private String m_idp_tenant; /** * Client ID variable. */ private String m_clientId; /** * Application Scope variable. */ private String m_scope = ""; /** * Default timeout for IDP response. */ private int m_idp_response_timeout = 120; /** * Default port for local server. */ private int m_listen_port = 0; /** * Redirect URI variable. */ private String redirectUri; private void checkRequiredParameters() throws IOException { checkMissingAndThrows(m_idp_tenant, KEY_IDP_TENANT); checkMissingAndThrows(m_clientId, KEY_CLIENT_ID); checkAndThrowsWithMessage( m_idp_response_timeout < 10, KEY_IDP_RESPONSE_TIMEOUT + " should be 10 seconds or greater."); checkInvalidAndThrows( m_listen_port != 0 && ( m_listen_port < 1 || m_listen_port > 65535), KEY_LISTEN_PORT); } /** * Overridden method to grab the JWT Response. Used in base class to refresh temporary credentials. * * @return Base64 encoded JWT Response string * @throws IOException indicating the error */ @Override protected String getJwtAssertion() throws IOException { try { checkRequiredParameters(); if( m_listen_port == 0 ) { m_log.logDebug("Listen port set to 0. Will pick random port"); } String token = fetchAuthorizationToken(); String content = fetchJwtResponse(token); String jwtAssertion = extractJwtAssertion(content); return jwtAssertion; } catch (InternalPluginException | URISyntaxException ex) { if (RedshiftLogger.isEnable()) m_log.logError(ex); // Wrap any exception to be compatible with JwtCredentialsProvider API throw new IOException(ex); } } /** * Overwritten method to grab the field parameters from JDBC connection string. This method calls the base class' * addParameter method and adds to it new specific parameters. * * @param key parameter key passed to JDBC * @param value parameter value associated with the given key */ @Override public void addParameter(String key, String value) { if (RedshiftLogger.isEnable()) m_log.logDebug("key: {0}", key); switch (key) { case KEY_IDP_TENANT: m_idp_tenant = value; if (RedshiftLogger.isEnable()) m_log.logDebug("m_idp_tenant: {0}", m_idp_tenant); break; case KEY_CLIENT_ID: m_clientId = value; if (RedshiftLogger.isEnable()) m_log.logDebug("m_clientId: {0}", m_clientId); break; case KEY_SCOPE: m_scope = value; if (RedshiftLogger.isEnable()) m_log.logDebug("m_scope: {0}", m_scope); break; case KEY_IDP_RESPONSE_TIMEOUT: m_idp_response_timeout = Integer.parseInt(value); if (RedshiftLogger.isEnable()) m_log.logDebug("m_idp_response_timeout: {0}", m_idp_response_timeout); break; case KEY_LISTEN_PORT: m_listen_port = Integer.parseInt(value); if (RedshiftLogger.isEnable()) m_log.logDebug("m_listen_port: {0}", m_listen_port); break; default: super.addParameter(key, value); } } @Override public String getPluginSpecificCacheKey() { return ((m_idp_tenant != null) ? m_idp_tenant : "") + ((m_clientId != null) ? m_clientId : "") ; } /** * First authentication phase: * <ol> * <li> Set the state in order to check if the incoming request belongs to the current authentication process.</li> * <li> Start the Socket Server at the {@linkplain BrowserAzureOAuth2CredentialsProvider#m_listen_port} port.</li> * <li> Open the default browser with the link asking a User to enter the credentials.</li> * <li> Retrieve the JWT Assertion string from the response. Decode it, format, validate and return.</li> * </ol> * * @return Authorization token */ private String fetchAuthorizationToken() throws IOException, URISyntaxException { final String state = RandomStateUtil.generateRandomState(); RequestHandler requestHandler = new RequestHandler(new Function<List<NameValuePair>, Object>() { @Override public Object apply(List<NameValuePair> nameValuePairs) { String incomingState = findParameter(OAUTH_STATE_PARAMETER_NAME, nameValuePairs); if (!state.equals(incomingState)) { return new InternalPluginException( "Incoming state " + incomingState + " does not match the outgoing state " + state); } String code = findParameter(OAUTH_IDP_CODE_PARAMETER_NAME, nameValuePairs); if (isNullOrEmpty(code)) { return new InternalPluginException("No valid code found"); } return code; } }); Server server = new Server(m_listen_port, requestHandler, Duration.ofSeconds(m_idp_response_timeout), m_log); server.listen(); int localPort = server.getLocalPort(); this.redirectUri = "http://localhost:" + localPort + REDSHIFT_PATH; try { if(RedshiftLogger.isEnable()) m_log.log(LogLevel.DEBUG, String.format("Listening for connection on port %d", m_listen_port)); openBrowser(state); server.waitForResult(); } catch (URISyntaxException | IOException ex) { if (RedshiftLogger.isEnable()) m_log.logError(ex); server.stop(); throw ex; } Object result = requestHandler.getResult(); if (result instanceof InternalPluginException) { if (RedshiftLogger.isEnable()) m_log.logDebug("Error occurred while fetching JWT assertion: {0}", result); throw (InternalPluginException) result; } if (result instanceof String) { if(RedshiftLogger.isEnable()) m_log.log(LogLevel.DEBUG, "Got authorization token of length={0}", ((String) result).length()); return (String) result; } if (RedshiftLogger.isEnable()) m_log.logDebug("result: {0}", result); throw new InternalPluginException("Fail to login during timeout."); } /** * Initiates the request to the IDP and gets the response body * * @param token authorization token * @return Response body of the incoming response * @throws IOException indicating the error */ private String fetchJwtResponse(String token) throws IOException { HttpPost post = createAuthorizationRequest(token); try ( CloseableHttpClient client = getHttpClient(); CloseableHttpResponse resp = client.execute(post)) { String content = EntityUtils.toString(resp.getEntity()); if(RedshiftLogger.isEnable()) { String maskedContent = content.replaceAll(getRegexForJsonKey("access_token"), "$1***masked***\""); maskedContent = maskedContent.replaceAll(getRegexForJsonKey("id_token"), "$1***masked***\""); m_log.log(LogLevel.DEBUG, "fetchJwtResponse https response:" + maskedContent); } checkAndThrowsWithMessage( resp.getStatusLine().getStatusCode() != 200, "Unexpected response: " + resp.getStatusLine().getReasonPhrase()); return content; } catch (GeneralSecurityException ex) { if(RedshiftLogger.isEnable()) m_log.log(LogLevel.ERROR,ex.getMessage(),ex); throw new InternalPluginException(ex); } } /** * Get Base 64 encoded JWT assertion from the response body * * @param content response body * @return string containing Base 64 encoded JWT assertion */ private String extractJwtAssertion(String content) { String encodedJwtAssertion; JsonNode accessTokenField = Jackson.jsonNodeOf(content).findValue("access_token"); checkAndThrowsWithMessage(accessTokenField == null, "Failed to find access_token"); encodedJwtAssertion = accessTokenField.textValue(); checkAndThrowsWithMessage( isNullOrEmpty(encodedJwtAssertion), "Invalid access_token value."); if(RedshiftLogger.isEnable()) m_log.log(LogLevel.DEBUG, "Successfully got JWT assertion"); return encodedJwtAssertion; } /** * Populates request URI and parameters. * * @param authorizationCode authorization authorizationCode * @return object containing the request data * @throws IOException */ private HttpPost createAuthorizationRequest(String authorizationCode) throws IOException { URIBuilder builder = new URIBuilder().setScheme(CURRENT_INTERACTION_SCHEMA) .setHost(MICROSOFT_IDP_HOST) .setPath("/" + m_idp_tenant + "/oauth2/v2.0/token"); String tokenRequestUrl = builder.toString(); String scope = "openid " + m_scope; validateURL(tokenRequestUrl); HttpPost post = new HttpPost(tokenRequestUrl); final List<BasicNameValuePair> parameters = new ArrayList<>(); parameters.add(new BasicNameValuePair(OAUTH_IDP_CODE_PARAMETER_NAME, authorizationCode)); /* parameters.add( new BasicNameValuePair( OAUTH_REQUESTED_TOKEN_TYPE_PARAMETER_NAME, "urn:ietf:params:oauth:token-type:jwt")); */ parameters.add(new BasicNameValuePair(OAUTH_RESPONSE_TYPE_PARAMETER_NAME, "token")); parameters .add(new BasicNameValuePair(OAUTH_GRANT_TYPE_PARAMETER_NAME, "authorization_code")); parameters.add(new BasicNameValuePair(OAUTH_SCOPE_PARAMETER_NAME, scope)); // parameters.add(new BasicNameValuePair(OAUTH_RESOURCE_PARAMETER_NAME, m_clientId)); parameters.add(new BasicNameValuePair(OAUTH_CLIENT_ID_PARAMETER_NAME, m_clientId)); parameters.add(new BasicNameValuePair(OAUTH_REDIRECT_PARAMETER_NAME, redirectUri)); post.addHeader( HttpHeaders.CONTENT_TYPE, ContentType.APPLICATION_FORM_URLENCODED.toString()); post.addHeader(HttpHeaders.ACCEPT, ContentType.APPLICATION_JSON.toString()); post.setEntity(new UrlEncodedFormEntity(parameters, StandardCharsets.UTF_8)); if(RedshiftLogger.isEnable()) m_log.log(LogLevel.DEBUG, String.format( "Request token URI: \n%s\nredirectUri:%s", tokenRequestUrl, redirectUri) ); return post; } /** * Opens the default browser with the authorization request to the IDP * * @param state * @throws IOException indicating the error */ private void openBrowser(String state) throws URISyntaxException, IOException { String scope = "openid " + m_scope; URIBuilder builder = new URIBuilder().setScheme(CURRENT_INTERACTION_SCHEMA) .setHost(MICROSOFT_IDP_HOST) .setPath("/" + m_idp_tenant + "/oauth2/v2.0/authorize") .addParameter(OAUTH_SCOPE_PARAMETER_NAME, scope) .addParameter(OAUTH_RESPONSE_TYPE_PARAMETER_NAME, "code") .addParameter(OAUTH_RESPONSE_MODE_PARAMETER_NAME, "form_post") .addParameter(OAUTH_CLIENT_ID_PARAMETER_NAME, m_clientId) .addParameter(OAUTH_REDIRECT_PARAMETER_NAME, redirectUri) .addParameter(OAUTH_STATE_PARAMETER_NAME, state); URI authorizeRequestUrl; authorizeRequestUrl = builder.build(); validateURL(authorizeRequestUrl.toString()); Desktop.getDesktop().browse(authorizeRequestUrl); if(RedshiftLogger.isEnable()) m_log.log(LogLevel.DEBUG, String.format("Authorization code request URI: \n%s", authorizeRequestUrl.toString())); } }
8,475
0
Create_ds/amazon-redshift-jdbc-driver/src/main/java/com/amazon/redshift
Create_ds/amazon-redshift-jdbc-driver/src/main/java/com/amazon/redshift/plugin/BasicJwtCredentialsProvider.java
package com.amazon.redshift.plugin; import java.io.IOException; /** * A basic JWT credential provider class. This class can be changed and implemented to work with * any desired JWT service provider. */ public class BasicJwtCredentialsProvider extends JwtCredentialsProvider { private static final String KEY_WEB_IDENTITY_TOKEN = "webIdentityToken"; // Mandatory parameters private String m_jwt; /** * Optional default constructor. */ public BasicJwtCredentialsProvider() { m_disableCache = true; } private void checkRequiredParameters() throws IOException { if (isNullOrEmpty(m_jwt)) { throw new IOException("Missing required property: " + KEY_WEB_IDENTITY_TOKEN); } } @Override public String getPluginSpecificCacheKey() { return m_jwt; } @Override public void addParameter(String key, String value) { // The parent class will take care of setting up all other connection properties which are // mentioned in the Redshift JDBC driver documentation. super.addParameter(key, value); if (KEY_WEB_IDENTITY_TOKEN.equalsIgnoreCase(key)) { m_jwt = value; } } /** * This method needs to return the JWT string returned by the specific JWT provider * being used for this implementation. How you get this string will depend on the specific JWT * provider you are using. This method can decode jwt and process any custom claim/tag in it. * <p> * This will be used by the JwtCredentialsProvider parent class to get the temporary credentials. * * @return The JWT string. * @throws IOException throws exception when required parameters are missing. */ @Override protected String getJwtAssertion() throws IOException { checkRequiredParameters(); return m_jwt; } }
8,476
0
Create_ds/amazon-redshift-jdbc-driver/src/main/java/com/amazon/redshift
Create_ds/amazon-redshift-jdbc-driver/src/main/java/com/amazon/redshift/plugin/SamlCredentialsProvider.java
package com.amazon.redshift.plugin; import com.amazonaws.ClientConfiguration; import com.amazonaws.SdkClientException; import com.amazonaws.auth.AWSCredentials; import com.amazonaws.auth.AWSCredentialsProvider; import com.amazonaws.auth.AWSStaticCredentialsProvider; import com.amazonaws.auth.AnonymousAWSCredentials; import com.amazonaws.auth.BasicSessionCredentials; import com.amazonaws.services.securitytoken.AWSSecurityTokenService; import com.amazonaws.services.securitytoken.AWSSecurityTokenServiceClientBuilder; import com.amazonaws.services.securitytoken.model.AssumeRoleWithSAMLRequest; import com.amazonaws.services.securitytoken.model.AssumeRoleWithSAMLResult; import com.amazonaws.services.securitytoken.model.Credentials; import com.amazonaws.util.StringUtils; import com.amazon.redshift.CredentialsHolder; import com.amazon.redshift.CredentialsHolder.IamMetadata; import com.amazon.redshift.IPlugin; import com.amazon.redshift.RedshiftProperty; import com.amazon.redshift.core.IamHelper; import com.amazon.redshift.httpclient.log.IamCustomLogFactory; import com.amazon.redshift.logger.LogLevel; import com.amazon.redshift.logger.RedshiftLogger; import com.amazon.redshift.plugin.utils.RequestUtils; import java.io.ByteArrayInputStream; import java.io.IOException; import java.net.MalformedURLException; import java.net.URI; import java.net.URL; import java.util.ArrayList; import java.util.Collections; import java.util.Date; import java.util.Enumeration; import java.util.HashMap; import java.util.HashSet; import java.util.List; import java.util.Map; import java.util.Set; import java.util.regex.Matcher; import java.util.regex.Pattern; import javax.xml.parsers.DocumentBuilder; import javax.xml.parsers.DocumentBuilderFactory; import javax.xml.parsers.ParserConfigurationException; import javax.xml.xpath.XPath; import javax.xml.xpath.XPathConstants; import javax.xml.xpath.XPathExpressionException; import javax.xml.xpath.XPathFactory; import org.apache.commons.codec.binary.Base64; import org.apache.commons.logging.LogFactory; import org.w3c.dom.Document; import org.w3c.dom.Node; import org.w3c.dom.NodeList; import org.xml.sax.SAXException; public abstract class SamlCredentialsProvider extends IdpCredentialsProvider implements IPlugin { protected static final String KEY_IDP_HOST = "idp_host"; private static final String KEY_IDP_PORT = "idp_port"; private static final String KEY_DURATION = "duration"; private static final String KEY_PREFERRED_ROLE = "preferred_role"; protected String m_userName; protected String m_password; protected String m_idpHost; protected int m_idpPort = 443; protected int m_duration; protected String m_preferredRole; protected String m_dbUser; protected String m_dbGroups; protected String m_dbGroupsFilter; protected Boolean m_forceLowercase; protected Boolean m_autoCreate; protected String m_stsEndpoint; protected String m_region; protected Boolean m_disableCache = false; protected Boolean m_groupFederation = false; private static Map<String, CredentialsHolder> m_cache = new HashMap<String, CredentialsHolder>(); private CredentialsHolder m_lastRefreshCredentials; // Used when cache is disable. /** * The custom log factory class. */ private static final Class<?> CUSTOM_LOG_FACTORY_CLASS = IamCustomLogFactory.class; /** * Log properties file name. */ private static final String LOG_PROPERTIES_FILE_NAME = "log-factory.properties"; /** * Log properties file path. */ private static final String LOG_PROPERTIES_FILE_PATH = "META-INF/services/org.apache.commons.logging.LogFactory"; /** * A custom context class loader which allows us to control which LogFactory is loaded. * Our CUSTOM_LOG_FACTORY_CLASS will divert any wire logging to NoOpLogger to suppress wire * messages being logged. */ private static final ClassLoader CONTEXT_CLASS_LOADER = new ClassLoader( SamlCredentialsProvider.class.getClassLoader()) { @Override public Class<?> loadClass(String name) throws ClassNotFoundException { Class<?> clazz = getParent().loadClass(name); if (org.apache.commons.logging.LogFactory.class.isAssignableFrom(clazz)) { return CUSTOM_LOG_FACTORY_CLASS; } return clazz; } @Override public Enumeration<URL> getResources(String name) throws IOException { if (LogFactory.FACTORY_PROPERTIES.equals(name)) { // make sure not load any other commons-logging.properties files return Collections.enumeration(Collections.<URL>emptyList()); } return super.getResources(name); } @Override public URL getResource(String name) { if (LOG_PROPERTIES_FILE_PATH.equals(name)) { return SamlCredentialsProvider.class.getResource(LOG_PROPERTIES_FILE_NAME); } return super.getResource(name); } }; protected abstract String getSamlAssertion() throws IOException; @Override public void addParameter(String key, String value) { if (RedshiftLogger.isEnable()) m_log.logDebug("key: {0}", key); if (RedshiftProperty.UID.getName().equalsIgnoreCase(key) || RedshiftProperty.USER.getName().equalsIgnoreCase(key)) { m_userName = value; } else if (RedshiftProperty.PWD.getName().equalsIgnoreCase(key) || RedshiftProperty.PASSWORD.getName().equalsIgnoreCase(key)) { m_password = value; } else if (KEY_IDP_HOST.equalsIgnoreCase(key)) { m_idpHost = value; } else if (KEY_IDP_PORT.equalsIgnoreCase(key)) { m_idpPort = Integer.parseInt(value); } else if (KEY_DURATION.equalsIgnoreCase(key)) { m_duration = Integer.parseInt(value); } else if (KEY_PREFERRED_ROLE.equalsIgnoreCase(key)) { m_preferredRole = value; } else if (KEY_SSL_INSECURE.equalsIgnoreCase(key)) { m_sslInsecure = Boolean.parseBoolean(value); } else if (RedshiftProperty.DB_USER.getName().equalsIgnoreCase(key)) { m_dbUser = value; } else if (RedshiftProperty.DB_GROUPS.getName().equalsIgnoreCase(key)) { m_dbGroups = value; } else if (RedshiftProperty.DB_GROUPS_FILTER.getName().equalsIgnoreCase(key)) { m_dbGroupsFilter = value; } else if (RedshiftProperty.FORCE_LOWERCASE.getName().equalsIgnoreCase(key)) { m_forceLowercase = Boolean.valueOf(value); } else if (RedshiftProperty.USER_AUTOCREATE.getName().equalsIgnoreCase(key)) { m_autoCreate = Boolean.valueOf(value); } else if (RedshiftProperty.AWS_REGION.getName().equalsIgnoreCase(key)) { m_region = value; } else if (RedshiftProperty.STS_ENDPOINT_URL.getName().equalsIgnoreCase(key)) { m_stsEndpoint = value; } else if (RedshiftProperty.IAM_DISABLE_CACHE.getName().equalsIgnoreCase(key)) { m_disableCache = Boolean.valueOf(value); } } @Override public void setLogger(RedshiftLogger log) { m_log = log; } @Override public int getSubType() { return IamHelper.SAML_PLUGIN; } @Override public CredentialsHolder getCredentials() { CredentialsHolder credentials = null; if(!m_disableCache) { String key = getCacheKey(); credentials = m_cache.get(key); } if (credentials == null || credentials.isExpired()) { if(RedshiftLogger.isEnable()) m_log.logInfo("SAML getCredentials NOT from cache"); synchronized(this) { refresh(); if(m_disableCache) { credentials = m_lastRefreshCredentials; m_lastRefreshCredentials = null; } } } else { credentials.setRefresh(false); if(RedshiftLogger.isEnable()) m_log.logInfo("SAML getCredentials from cache"); } if(!m_disableCache) { // if the SAML response has dbUser argument, it will be picked up at this point. credentials = m_cache.get(getCacheKey()); } // if dbUser argument has been passed in the connection string, add it to metadata. if (!StringUtils.isNullOrEmpty(m_dbUser)) { credentials.getThisMetadata().setDbUser(this.m_dbUser); } if (credentials == null) { throw new SdkClientException("Unable to load AWS credentials from ADFS"); } if(RedshiftLogger.isEnable()) { Date now = new Date(); m_log.logInfo(now + ": Using entry for SamlCredentialsProvider.getCredentials cache with expiration " + credentials.getExpiration()); } return credentials; } @Override public void refresh() { // Get the current thread and set the context loader with our custom load class method. Thread currentThread = Thread.currentThread(); ClassLoader cl = currentThread.getContextClassLoader(); Thread.currentThread().setContextClassLoader(CONTEXT_CLASS_LOADER); try { String samlAssertion = getSamlAssertion(); if (RedshiftLogger.isEnable()) m_log.logDebug("SamlCredentialsProvider: Received SAML assertion of length={0}", samlAssertion != null ? samlAssertion.length() : -1); final Pattern SAML_PROVIDER_PATTERN = Pattern.compile("arn:aws[-a-z]*:iam::\\d*:saml-provider/\\S+"); final Pattern ROLE_PATTERN = Pattern.compile("arn:aws[-a-z]*:iam::\\d*:role/\\S+"); Document doc = parse(Base64.decodeBase64(samlAssertion)); XPath xPath = XPathFactory.newInstance().newXPath(); String expression = "//*[local-name()='Attribute'][@Name='https://aws.amazon.com/SAML/Attributes/Role']/*[local-name()='AttributeValue']/text()"; NodeList nodeList = (NodeList) xPath.compile(expression) .evaluate(doc, XPathConstants.NODESET); Map<String, String> roles = new HashMap<String, String>(); if (nodeList != null) { for (int i = 0; i < nodeList.getLength(); ++i) { Node node = nodeList.item(i); String value = node.getNodeValue(); String[] arns = value.split(","); if (arns.length >= 2) { String provider = null; String role = null; for (String arn : arns) { Matcher providerMatcher = SAML_PROVIDER_PATTERN.matcher(arn); if (providerMatcher.find()) { provider = providerMatcher.group(0); continue; } Matcher roleMatcher = ROLE_PATTERN.matcher(arn); if (roleMatcher.find()) { role = roleMatcher.group(0); } } if (!StringUtils.isNullOrEmpty(role) && !StringUtils.isNullOrEmpty(provider)) { roles.put(role, provider); } } } } if (roles.isEmpty()) { throw new SdkClientException("No role found in SamlAssertion: " + samlAssertion); } String roleArn; String principal; if (m_preferredRole != null) { roleArn = m_preferredRole; principal = roles.get(m_preferredRole); if (principal == null) { throw new SdkClientException("Preferred role not found in SamlAssertion: " + samlAssertion); } } else { Map.Entry<String, String> entry = roles.entrySet().iterator().next(); roleArn = entry.getKey(); principal = entry.getValue(); } AssumeRoleWithSAMLRequest samlRequest = new AssumeRoleWithSAMLRequest(); samlRequest.setSAMLAssertion(samlAssertion); samlRequest.setRoleArn(roleArn); samlRequest.setPrincipalArn(principal); if (m_duration > 0) { samlRequest.setDurationSeconds(m_duration); } AWSCredentialsProvider p = new AWSStaticCredentialsProvider(new AnonymousAWSCredentials()); AWSSecurityTokenServiceClientBuilder builder = AWSSecurityTokenServiceClientBuilder.standard(); ClientConfiguration config = null; builder.withClientConfiguration(config); AWSSecurityTokenService stsSvc = RequestUtils.buildSts(m_stsEndpoint, m_region, builder, p, m_log); AssumeRoleWithSAMLResult result = stsSvc.assumeRoleWithSAML(samlRequest); Credentials cred = result.getCredentials(); Date expiration = cred.getExpiration(); AWSCredentials c = new BasicSessionCredentials(cred.getAccessKeyId(), cred.getSecretAccessKey(), cred.getSessionToken()); CredentialsHolder credentials = CredentialsHolder.newInstance(c, expiration); credentials.setMetadata(readMetadata(doc)); credentials.setRefresh(true); if(!m_disableCache) m_cache.put(getCacheKey(), credentials); else m_lastRefreshCredentials = credentials; } catch (IOException e) { if (RedshiftLogger.isEnable()) m_log.logError(e); throw new SdkClientException("SAML error: " + e.getMessage(), e); } catch (SAXException e) { if (RedshiftLogger.isEnable()) m_log.logError(e); throw new SdkClientException("SAML error: " + e.getMessage(), e); } catch (ParserConfigurationException e) { if (RedshiftLogger.isEnable()) m_log.logError(e); throw new SdkClientException("SAML error: " + e.getMessage(), e); } catch (XPathExpressionException e) { if (RedshiftLogger.isEnable()) m_log.logError(e); throw new SdkClientException("SAML error: " + e.getMessage(), e); } catch (Exception e) { if (RedshiftLogger.isEnable()) m_log.logError(e); throw new SdkClientException("SAML error: " + e.getMessage(), e); } finally { currentThread.setContextClassLoader(cl); } } @Override public String getPluginSpecificCacheKey() { // Override this in each derived plugin such as Azure, Browser, Okta, Ping etc. return ""; } @Override public String getIdpToken() { String samlAssertion = null; // Get the current thread and set the context loader with our custom load class method. Thread currentThread = Thread.currentThread(); ClassLoader cl = currentThread.getContextClassLoader(); Thread.currentThread().setContextClassLoader(CONTEXT_CLASS_LOADER); try { samlAssertion = getSamlAssertion(); if (RedshiftLogger.isEnable()) m_log.logDebug("SamlCredentialsProvider: Got SAML assertion of " + "length={0}", samlAssertion != null ? samlAssertion.length() : -1); } catch (IOException e) { if (RedshiftLogger.isEnable()) m_log.logError(e); throw new SdkClientException("SAML error: " + e.getMessage(), e); } catch (Exception e) { if (RedshiftLogger.isEnable()) m_log.logError(e); throw new SdkClientException("SAML error: " + e.getMessage(), e); } finally { currentThread.setContextClassLoader(cl); } return samlAssertion; } @Override public void setGroupFederation(boolean groupFederation) { m_groupFederation = groupFederation; } @Override public String getCacheKey() { String pluginSpecificKey = getPluginSpecificCacheKey(); return m_userName + m_password + m_idpHost + m_idpPort + m_duration + m_preferredRole + pluginSpecificKey; } private IamMetadata readMetadata(Document doc) throws XPathExpressionException { IamMetadata metadata = new IamMetadata(); XPath xPath = XPathFactory.newInstance().newXPath(); List<String> attributeValues = GetSAMLAttributeValues(xPath, doc, "https://redshift.amazon.com/SAML/Attributes/AllowDbUserOverride"); if (!attributeValues.isEmpty()) { metadata.setAllowDbUserOverride(Boolean.valueOf(attributeValues.get(0))); } attributeValues = GetSAMLAttributeValues(xPath, doc, "https://redshift.amazon.com/SAML/Attributes/DbUser"); if (!attributeValues.isEmpty()) { metadata.setSamlDbUser(attributeValues.get(0)); } else { attributeValues = GetSAMLAttributeValues(xPath, doc, "https://aws.amazon.com/SAML/Attributes/RoleSessionName"); if (!attributeValues.isEmpty()) { metadata.setSamlDbUser(attributeValues.get(0)); } } attributeValues = GetSAMLAttributeValues(xPath, doc, "https://redshift.amazon.com/SAML/Attributes/AutoCreate"); if (!attributeValues.isEmpty()) { metadata.setAutoCreate(Boolean.valueOf(attributeValues.get(0))); } attributeValues = GetSAMLAttributeValues(xPath, doc, "https://redshift.amazon.com/SAML/Attributes/DbGroups"); if (!attributeValues.isEmpty()) { attributeValues = filterOutGroups(attributeValues); if (!attributeValues.isEmpty()) { StringBuilder sb = new StringBuilder(); for (String value : attributeValues) { if (sb.length() > 0) { sb.append(','); } sb.append(value); } metadata.setDbGroups(sb.toString()); } } attributeValues = GetSAMLAttributeValues(xPath, doc, "https://redshift.amazon.com/SAML/Attributes/ForceLowercase"); if (!attributeValues.isEmpty()) { metadata.setForceLowercase(Boolean.valueOf(attributeValues.get(0))); } return metadata; } /** * Method removes all groups from given lists matching {@link m_dbGroupsFilter} * regex. * @param attributeValues in * @return attributeValues filtered */ private List<String> filterOutGroups(List<String> attributeValues) { if ( m_dbGroupsFilter != null ) { final Pattern groupsFilter = Pattern.compile(m_dbGroupsFilter); List<String> ret = new ArrayList<>(); for (String attributeValue : attributeValues) { m_log.logDebug("Check group {0} with regexp {1}", attributeValue, m_dbGroupsFilter); if (!groupsFilter.matcher(attributeValue).matches()) { m_log.logDebug("Add {0} to dbgroups", attributeValue); ret.add(attributeValue); } } return ret; } else { return attributeValues; } } private static Document parse(byte[] samlAssertion) throws IOException, SAXException, ParserConfigurationException { DocumentBuilderFactory factory = DocumentBuilderFactory.newInstance(); factory.setFeature("http://apache.org/xml/features/disallow-doctype-decl", true); factory.setXIncludeAware(false); factory.setExpandEntityReferences(false); factory.setFeature("http://xml.org/sax/features/external-parameter-entities", false); factory.setFeature("http://xml.org/sax/features/external-general-entities", false); DocumentBuilder db = factory.newDocumentBuilder(); return db.parse(new ByteArrayInputStream(samlAssertion)); } private static List<String> GetSAMLAttributeValues(XPath xPath, Document doc, String attributeName) throws XPathExpressionException { String expression = String.format("//Attribute[@Name='%s']/AttributeValue/text()", attributeName); NodeList nodeList = (NodeList) xPath.compile(expression).evaluate(doc, XPathConstants.NODESET); if (null == nodeList || nodeList.getLength() == 0) { return Collections.emptyList(); } List<String> attributeValues = new ArrayList<String>(nodeList.getLength()); for (int i = 0; i < nodeList.getLength(); ++i) { Node node = nodeList.item(i); attributeValues.add(node.getNodeValue()); } return attributeValues; } protected List<String> getInputTagsfromHTML(String body) { Set<String> distinctInputTags = new HashSet<>(); List<String> inputTags = new ArrayList<String>(); Pattern inputTagPattern = Pattern.compile("<input(.+?)/>", Pattern.DOTALL); Matcher inputTagMatcher = inputTagPattern.matcher(body); while (inputTagMatcher.find()) { String tag = inputTagMatcher.group(0); String tagNameLower = getValueByKey(tag, "name").toLowerCase(); if (!tagNameLower.isEmpty() && distinctInputTags.add(tagNameLower)) { inputTags.add(tag); } } return inputTags; } protected String getFormAction(String body) { Pattern pattern = Pattern.compile("<form.*?action=\"([^\"]+)\""); Matcher m = pattern.matcher(body); if (m.find()) { return escapeHtmlEntity(m.group(1)); } return null; } protected String getValueByKey(String input, String key) { Pattern keyValuePattern = Pattern.compile("(" + Pattern.quote(key) + ")\\s*=\\s*\"(.*?)\""); Matcher keyValueMatcher = keyValuePattern.matcher(input); if (keyValueMatcher.find()) { return escapeHtmlEntity(keyValueMatcher.group(2)); } return ""; } protected String getValueByKeyWithoutQuotesAndValueInSingleQuote(String input, String key) { Pattern keyValuePattern = Pattern.compile("(" + key + ")\\s*=\\s*'(.*?)'"); Matcher keyValueMatcher = keyValuePattern.matcher(input); if (keyValueMatcher.find()) { return escapeHtmlEntity(keyValueMatcher.group(2)); } return ""; } /** * Escape certain HTML entities for the given input string. * * @param html The string to escape. * @return The string with the special HTML entities escaped. */ protected String escapeHtmlEntity(String html) { StringBuilder sb = new StringBuilder(html.length()); int i = 0; int length = html.length(); while (i < length) { char c = html.charAt(i); if (c != '&') { sb.append(c); i++; continue; } if (html.startsWith("&amp;", i)) { sb.append('&'); i += 5; } else if (html.startsWith("&apos;", i)) { sb.append('\''); i += 6; } else if (html.startsWith("&quot;", i)) { sb.append('"'); i += 6; } else if (html.startsWith("&lt;", i)) { sb.append('<'); i += 4; } else if (html.startsWith("&gt;", i)) { sb.append('>'); i += 4; } else { sb.append(c); ++i; } } return sb.toString(); } protected void checkRequiredParameters() throws IOException { if (StringUtils.isNullOrEmpty(m_userName)) { throw new IOException("Missing required property: " + RedshiftProperty.USER.getName()); } if (StringUtils.isNullOrEmpty(m_password)) { throw new IOException("Missing required property: " + RedshiftProperty.PASSWORD.getName()); } if (StringUtils.isNullOrEmpty(m_idpHost)) { throw new IOException("Missing required property: " + KEY_IDP_HOST); } } protected boolean isText(String inputTag) { String typeVal = getValueByKey(inputTag, "type"); if(typeVal == null || typeVal.length() == 0) { typeVal = getValueByKeyWithoutQuotesAndValueInSingleQuote(inputTag, "type"); } return "text".equals(typeVal); } protected boolean isPassword(String inputTag) { String typeVal = getValueByKey(inputTag, "type"); if(typeVal == null || typeVal.length() == 0) { typeVal = getValueByKeyWithoutQuotesAndValueInSingleQuote(inputTag, "type"); } return "password".equals(typeVal); } }
8,477
0
Create_ds/amazon-redshift-jdbc-driver/src/main/java/com/amazon/redshift
Create_ds/amazon-redshift-jdbc-driver/src/main/java/com/amazon/redshift/plugin/PingCredentialsProvider.java
package com.amazon.redshift.plugin; import com.amazon.redshift.logger.LogLevel; import com.amazon.redshift.logger.RedshiftLogger; import com.amazonaws.SdkClientException; import com.amazonaws.util.IOUtils; import com.amazonaws.util.StringUtils; import java.io.IOException; import java.net.URI; import java.net.URLEncoder; import java.security.GeneralSecurityException; import java.util.ArrayList; import java.util.List; import java.util.regex.Matcher; import java.util.regex.Pattern; import org.apache.http.HttpEntity; import org.apache.http.NameValuePair; import org.apache.http.client.entity.UrlEncodedFormEntity; import org.apache.http.client.methods.CloseableHttpResponse; import org.apache.http.client.methods.HttpGet; import org.apache.http.client.methods.HttpPost; import org.apache.http.impl.client.CloseableHttpClient; import org.apache.http.message.BasicNameValuePair; import org.apache.http.util.EntityUtils; import static java.lang.String.format; public class PingCredentialsProvider extends SamlCredentialsProvider { private static final Pattern SAML_PATTERN = Pattern.compile("SAMLResponse\\W+value=\"([^\"]+)\""); /** * Property for specifying partner SpId. */ private static final String KEY_PARTNER_SPID = "partner_spid"; /** * String to hold value of partner SpId. */ protected String m_partnerSpId; @Override public void addParameter(String key, String value) { super.addParameter(key, value); if (KEY_PARTNER_SPID.equalsIgnoreCase(key)) { m_partnerSpId = value; } } @Override public String getPluginSpecificCacheKey() { return ((m_partnerSpId != null) ? m_partnerSpId : "") ; } @Override protected String getSamlAssertion() throws IOException { checkRequiredParameters(); // If no value was specified for m_partnerSpid use the AWS default. if (StringUtils.isNullOrEmpty(m_partnerSpId)) { m_partnerSpId = "urn%3Aamazon%3Awebservices"; } else { // Ensure that the string is properly encoded. m_partnerSpId = URLEncoder.encode(m_partnerSpId, "UTF-8"); } String uri = "https://" + m_idpHost + ':' + m_idpPort + "/idp/startSSO.ping?PartnerSpId=" + m_partnerSpId; CloseableHttpClient client = null; List<NameValuePair> parameters = new ArrayList<NameValuePair>(5); try { CloseableHttpResponse resp; if (RedshiftLogger.isEnable()) m_log.logDebug("uri: {0}", uri); validateURL(uri); client = getHttpClient(); HttpGet get = new HttpGet(uri); resp = client.execute(get); if (resp.getStatusLine().getStatusCode() != 200) { if(RedshiftLogger.isEnable()) m_log.log(LogLevel.DEBUG, "getSamlAssertion https response:" + EntityUtils.toString(resp.getEntity())); throw new IOException( "Failed send request: " + resp.getStatusLine().getReasonPhrase()); } HttpEntity entity = resp.getEntity(); String body = EntityUtils.toString(entity); BasicNameValuePair username = null; BasicNameValuePair pass = null; String password_tag = null; if (RedshiftLogger.isEnable()) m_log.logDebug("body: {0}", body); for (String inputTag : getInputTagsfromHTML(body)) { String name = getValueByKey(inputTag, "name"); String id = getValueByKey(inputTag, "id"); String value = getValueByKey(inputTag, "value"); if (RedshiftLogger.isEnable()) m_log.logDebug("name: {0} , id: {1}", name, id); if (username == null && (("username".equals(id)) || ("pf.username".equals(id)) || ("username".equals(name)) || ("pf.username".equals(name)) ) && isText(inputTag)) { username = new BasicNameValuePair(name, m_userName); } else if (("pf.pass".equals(name) || name.contains("pass") ) && isPassword(inputTag)) { if (pass != null) { if(RedshiftLogger.isEnable()) { m_log.log(LogLevel.DEBUG, format("pass field: %s " + "has conflict with field: %s", password_tag, inputTag)); m_log.log(LogLevel.DEBUG, body); } throw new IOException("Duplicate password fields on " + "login page."); } password_tag = inputTag; pass = new BasicNameValuePair(name, m_password); } else if (!StringUtils.isNullOrEmpty(name)) { parameters.add(new BasicNameValuePair(name, value)); } } if( username == null ) { for (String inputTag : getInputTagsfromHTML(body)) { String name = getValueByKey(inputTag, "name"); if(RedshiftLogger.isEnable()) { m_log.log(LogLevel.DEBUG, format("inputTag: %s " + "has name with field: %s", inputTag, name)); } if (("email".equals(name) || name.contains("user") || name.contains("email")) && isText(inputTag)) { username = new BasicNameValuePair(name, m_userName); } } } if (username == null || pass == null) { boolean noUserName = (username == null); boolean noPass = (pass == null); if(RedshiftLogger.isEnable()) m_log.log(LogLevel.DEBUG, body); throw new IOException("Failed to parse login form. noUserName = " + noUserName + " noPass=" + noPass); } parameters.add(username); parameters.add(pass); String action = getFormAction(body); if (!StringUtils.isNullOrEmpty(action) && action.startsWith("/")) { uri = "https://" + m_idpHost + ':' + m_idpPort + action; } if (RedshiftLogger.isEnable()) m_log.logDebug("action uri: {0}", uri); validateURL(uri); HttpPost post = new HttpPost(uri); post.setEntity(new UrlEncodedFormEntity(parameters)); resp = client.execute(post); if (resp.getStatusLine().getStatusCode() != 200) { throw new IOException( "Failed send request: " + resp.getStatusLine().getReasonPhrase()); } String content = EntityUtils.toString(resp.getEntity()); Matcher matcher = SAML_PATTERN.matcher(content); if (!matcher.find()) { throw new IOException("Failed to retrieve SAMLAssertion."); } return matcher.group(1); } catch (GeneralSecurityException e) { throw new SdkClientException("Failed create SSLContext.", e); } finally { IOUtils.closeQuietly(client, null); } } }
8,478
0
Create_ds/amazon-redshift-jdbc-driver/src/main/java/com/amazon/redshift
Create_ds/amazon-redshift-jdbc-driver/src/main/java/com/amazon/redshift/plugin/InternalPluginException.java
package com.amazon.redshift.plugin; /** * All plugin exceptional state. * <p> * At the end would be wrapped into {@link java.io.IOException} for API compatibility reason. */ public class InternalPluginException extends RuntimeException { /** * Constructor. * * @param message Error message. */ public InternalPluginException(String message) { super(message); } /** * Constructor. * * @param message Error message. * @param cause Throwable object. */ public InternalPluginException(String message, Throwable cause) { super(message, cause); } /** * Constructor. * * @param cause Throwable object. */ public InternalPluginException(Throwable cause) { super(cause); } /** * Wrap Exception in this class. * * @param ex Exception object. * * @return instance of this class. */ public static InternalPluginException wrap(Exception ex) { return new InternalPluginException(ex); } }
8,479
0
Create_ds/amazon-redshift-jdbc-driver/src/main/java/com/amazon/redshift
Create_ds/amazon-redshift-jdbc-driver/src/main/java/com/amazon/redshift/plugin/BasicNativeSamlCredentialsProvider.java
package com.amazon.redshift.plugin; import com.amazon.redshift.INativePlugin; import java.io.IOException; /** * A Basic Native SAML credential provider class. This class can be changed and implemented to work with * any desired SAML service provider. */ public class BasicNativeSamlCredentialsProvider extends BrowserSamlCredentialsProvider { /** * Here we are defining a new connection property key called "saml_assertion". This property * will be specific to the BasicNativeSamlCredentialsProvider and will be used to provide some * information through the connection string. * <p> * This means that a user wanting to use this credential provider may include the following in * the connection string: * <p> * <code> * jdbc:redshift:iam://[host]:[port]/[database]?saml_assertion=[value] * </code> * <p> * If your implementation requires user input through the connection string, this is how you * can define the connection property name. You can add as many new connection properties as * needed following the same pattern: * <p> * <code> * public static final String PROPERTY_NAME = "key_name"; * </code> * <p> * The restrictions on "key_name" are: * <p> * - The name must be unique. It can not match any existing connection property key name in * the Redshift JDBC driver. The connection property names are case-insensitive, so even * if the case does not match what is found in the documentation, it is not allowed. * <p> * - The key name may not have any spaces. * <p> * - The key name may only contain the characters [a-z]|[A-Z] or underscore '_'. * */ public static final String KEY_SAML_ASSERTION = "saml_assertion"; /** * This field will store the value given with the associated connection property key. * <p> * If you are adding additional connection property keys, you will need to define additional * fields to hold those values. */ private String samlAssertion; /** * Optional default constructor. */ public BasicNativeSamlCredentialsProvider() { } /** * This method is used to get the values associated with different connection string properties. * <p> * We override it in this custom credentials provider to add a check for any additional * connection properties that were added, which are not included in the existing Redshift JDBC * driver. It allows us to store these values using the appropriate fields as mentioned above. * <p> * For any new connection property keys added to this class, add an if-condition to check, if * the current key matches the connection property key, store the value associated with the key * in the appropriate field. * <p> * If no new connection property keys are required, you may leave the implementation blank and * simply return a call to the parent class implementation. * <p> * Please see the example below. * * @param key A string representing the connection property key. * @param value The value associated with the connection property key. */ @Override public void addParameter(String key, String value) { // The parent class will take care of setting up all other connection properties which are // mentioned in the Redshift JDBC driver documentation. super.addParameter(key, value); // Add if-condition checks for any connection properties which are specific to your // implementation of this custom SAML credentials provider. if (KEY_SAML_ASSERTION.equalsIgnoreCase(key)) { samlAssertion = value; } } /** * This method needs to return the SAML assertion string returned by the specific SAML provider * being used for this implementation. How you get this string will depend on the specific SAML * provider you are using. * <p> * This will be used by the SamlCredentialsProvider parent class to get the temporary credentials. * * @return The SAML assertion string. * @throws IOException no error as such. It's an overridden method. */ @Override protected String getSamlAssertion() throws IOException { /* * If you wish to make a connection property required, you can check that the associated * field has been populated, and if not, throw an IOException. * if (StringUtils.isNullOrEmpty(samlAssertion)) * { * throw new IOException("Missing required property: " + KEY_SAML_ASSERTION); * } */ return samlAssertion; } }
8,480
0
Create_ds/amazon-redshift-jdbc-driver/src/main/java/com/amazon/redshift
Create_ds/amazon-redshift-jdbc-driver/src/main/java/com/amazon/redshift/plugin/BrowserIdcAuthPlugin.java
/** * Copyright 2010-2023 Amazon.com, Inc. or its affiliates. All Rights Reserved. * <p> * This file is licensed under the Apache License, Version 2.0 (the "License"). * You may not use this file except in compliance with the License. A copy of * the License is located at * <p> * http://aws.amazon.com/apache2.0/ * <p> * This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR * CONDITIONS OF ANY KIND, either express or implied. See the License for the * specific language governing permissions and limitations under the License. */ package com.amazon.redshift.plugin; import com.amazon.redshift.NativeTokenHolder; import com.amazon.redshift.RedshiftProperty; import com.amazon.redshift.logger.LogLevel; import com.amazon.redshift.logger.RedshiftLogger; import com.amazonaws.services.ssooidc.AWSSSOOIDC; import com.amazonaws.services.ssooidc.AWSSSOOIDCClientBuilder; import com.amazonaws.services.ssooidc.model.*; import com.amazonaws.util.StringUtils; import java.awt.*; import java.io.IOException; import java.net.URI; import java.util.Date; import java.util.HashMap; import java.util.Map; import static com.amazonaws.util.StringUtils.isNullOrEmpty; /** * Class to get IdC Token from AWS Identity Center (IdC) */ public class BrowserIdcAuthPlugin extends CommonCredentialsProvider { /** * Key for setting AWS access portal start URL. */ private static final String KEY_START_URL = "start_url"; /** * Key for setting IdC client display name */ private static final String KEY_IDC_CLIENT_DISPLAY_NAME = "idc_client_display_name"; /** * Key for setting IdC region */ private static final String KEY_IDC_REGION = "idc_region"; /** * Key for setting IdC browser auth timeout value */ private static final String KEY_IDC_RESPONSE_TIMEOUT = "idc_response_timeout"; private static final String M_CLIENT_TYPE = "public"; private static final String M_GRANT_TYPE = "urn:ietf:params:oauth:grant-type:device_code"; private static final String M_SCOPE = "redshift:connect"; private static final Map<String, RegisterClientResult> m_register_client_cache = new HashMap<String, RegisterClientResult>(); /** * The default time in seconds for which the client must wait between attempts when polling for a session * It is used if auth server doesn't provide any value for {@code interval} in start device authorization response */ public final int REQUEST_CREATE_TOKEN_DEFAULT_INTERVAL = 1; public final int DEFAULT_IDC_TOKEN_EXPIRY_IN_SEC = 900; protected AWSSSOOIDC m_sdk_client; private String m_idcRegion; private String m_startUrl; private String m_idcClientDisplayName = RedshiftProperty.IDC_CLIENT_DISPLAY_NAME.getDefaultValue(); private int m_idcResponseTimeout = 120; public BrowserIdcAuthPlugin() { } public BrowserIdcAuthPlugin(AWSSSOOIDC client) { m_sdk_client = client; } /** * Overridden method to grab the field parameters from JDBC connection string or extended params provided by user. * This method calls the base class' addParameter method and adds to it new specific parameters. * * @param key parameter key passed to JDBC driver * @param value parameter value associated with the given key */ @Override public void addParameter(String key, String value) { switch (key) { case KEY_START_URL: m_startUrl = value; if (RedshiftLogger.isEnable()) m_log.logDebug("Setting start_url: {0}", m_startUrl); break; case KEY_IDC_REGION: m_idcRegion = value; if (RedshiftLogger.isEnable()) m_log.logDebug("Setting idc_region: {0}", m_idcRegion); break; case KEY_IDC_CLIENT_DISPLAY_NAME: if (!StringUtils.isNullOrEmpty(value)) m_idcClientDisplayName = value; if (RedshiftLogger.isEnable()) m_log.logDebug("Setting idc_client_display_name: {0}", m_idcClientDisplayName); break; case KEY_IDC_RESPONSE_TIMEOUT: if (!StringUtils.isNullOrEmpty(value)) { int timeout = Integer.parseInt(value); if (timeout > 10) { // minimum allowed timeout value is 10 secs m_idcResponseTimeout = timeout; if (RedshiftLogger.isEnable()) m_log.logDebug("Setting idc_response_timeout: {0}", m_idcResponseTimeout); } else { // else use default timeout value itself if (RedshiftLogger.isEnable()) m_log.logDebug("Setting idc_response_timeout={0}; provided value={1}", m_idcResponseTimeout, timeout); } } break; default: super.addParameter(key, value); } } /** * @return The cache key against which the idc token holder is stored, specific to this plugin */ @Override public String getPluginSpecificCacheKey() { return ((m_startUrl != null) ? m_startUrl : ""); } /** * Overridden method to obtain the auth token from plugin specific implementation * * @return {@link NativeTokenHolder} A wrapper containing auth token and its expiration time information * @throws IOException indicating the error */ @Override protected NativeTokenHolder getAuthToken() throws IOException { return getIdcToken(); } /** * Plugin implementation method to grab the IdC token from AWS IAM Identity Center. * * @return {@link NativeTokenHolder} A wrapper containing IdC token and its expiration time information * @throws IOException indicating the error */ protected NativeTokenHolder getIdcToken() throws IOException { try { checkRequiredParameters(); m_sdk_client = AWSSSOOIDCClientBuilder.standard().withRegion(m_idcRegion).build(); RegisterClientResult registerClientResult = getRegisterClientResult(m_idcClientDisplayName, M_CLIENT_TYPE); StartDeviceAuthorizationResult startDeviceAuthorizationResult = getStartDeviceAuthorizationResult( registerClientResult.getClientId(), registerClientResult.getClientSecret(), m_startUrl); openBrowser(startDeviceAuthorizationResult.getVerificationUriComplete()); CreateTokenResult createTokenResult = fetchTokenResult(registerClientResult, startDeviceAuthorizationResult, M_GRANT_TYPE, M_SCOPE); return processCreateTokenResult(createTokenResult); } catch (InternalPluginException ex) { if (RedshiftLogger.isEnable()) m_log.log(LogLevel.ERROR, ex, "InternalPluginException in getIdcToken"); // Wrap any exception to be compatible with CommonCredentialsProvider API throw new IOException(ex.getMessage(), ex); } } private void checkRequiredParameters() throws InternalPluginException { if (isNullOrEmpty(m_startUrl)) { m_log.logDebug("IdC authentication failed: start_url needs to be provided in connection params"); throw new InternalPluginException("IdC authentication failed: The start URL must be included in the connection parameters."); } if (isNullOrEmpty(m_idcRegion)) { m_log.logDebug("IdC authentication failed: idc_region needs to be provided in connection params"); throw new InternalPluginException("IdC authentication failed: The IdC region must be included in the connection parameters."); } } /** * Registers a client with IAM Identity Center. This allows clients to initiate device authorization. * The output is persisted for reuse through many authentication requests. * * @param clientName The friendly name of the client * @param clientType The type of client. The service supports only {@code public} as a client type * @return {@link RegisterClientResult} Client registration result containing {@code clientId} and {@code clientSecret} required for device authorization * @throws IOException if an error occurs during the involved API call */ protected RegisterClientResult getRegisterClientResult(String clientName, String clientType) throws IOException { String registerClientCacheKey = clientName + ":" + m_idcRegion; RegisterClientResult cachedRegisterClientResult = m_register_client_cache.get(registerClientCacheKey); if (isCachedRegisterClientResultValid(cachedRegisterClientResult)) { if (RedshiftLogger.isEnable()) m_log.logDebug("Using cached register client result"); return cachedRegisterClientResult; } RegisterClientRequest registerClientRequest = new RegisterClientRequest(); registerClientRequest.withClientName(clientName); registerClientRequest.withClientType(clientType); registerClientRequest.withScopes(M_SCOPE); RegisterClientResult registerClientResult = null; try { registerClientResult = m_sdk_client.registerClient(registerClientRequest); if (RedshiftLogger.isEnable()) m_log.logDebug("registerClient response code: {0}", registerClientResult.getSdkHttpMetadata().getHttpStatusCode()); } catch (InternalServerException ex) { if (RedshiftLogger.isEnable()) m_log.log(LogLevel.ERROR, ex, "Error: Unexpected server error while registering client;"); throw new IOException("IdC authentication failed : An error occurred during the request.", ex); } catch (Exception ex) { if (RedshiftLogger.isEnable()) m_log.log(LogLevel.ERROR, ex, "Error: Unexpected register client error;"); throw new IOException("IdC authentication failed : There was an error during authentication.", ex); } m_register_client_cache.put(registerClientCacheKey, registerClientResult); return registerClientResult; } private boolean isCachedRegisterClientResultValid(RegisterClientResult cachedRegisterClientResult) { if (cachedRegisterClientResult == null || cachedRegisterClientResult.getClientSecretExpiresAt() == null) { return false; } return System.currentTimeMillis() < cachedRegisterClientResult.getClientSecretExpiresAt() * 1000; } /** * Initiates device authorization by requesting a pair of verification codes from the IAM Identity Center * * @param clientId The unique identifier string for the client that is registered with IAM Identity Center. * @param clientSecret A secret string that is generated for the client. * @param startUrl The URL for the AWS access portal * @return {@link StartDeviceAuthorizationResult} Device Authorization result containing {@code deviceCode} for creating token * @throws IOException if an error occurs during the involved API call */ protected StartDeviceAuthorizationResult getStartDeviceAuthorizationResult(String clientId, String clientSecret, String startUrl) throws IOException { StartDeviceAuthorizationRequest startDeviceAuthorizationRequest = new StartDeviceAuthorizationRequest(); startDeviceAuthorizationRequest.withClientId(clientId); startDeviceAuthorizationRequest.withClientSecret(clientSecret); startDeviceAuthorizationRequest.withStartUrl(startUrl); StartDeviceAuthorizationResult startDeviceAuthorizationResult = null; try { startDeviceAuthorizationResult = m_sdk_client.startDeviceAuthorization(startDeviceAuthorizationRequest); if (RedshiftLogger.isEnable()) m_log.logDebug("startDeviceAuthorization response code: {0}", startDeviceAuthorizationResult.getSdkHttpMetadata().getHttpStatusCode()); } catch (SlowDownException ex) { if (RedshiftLogger.isEnable()) m_log.log(LogLevel.ERROR, ex, "Error: Too frequent requests made by client;"); throw new IOException("IdC authentication failed : Requests to the IdC service are too frequent.", ex); } catch (InternalServerException ex) { if (RedshiftLogger.isEnable()) m_log.log(LogLevel.ERROR, ex, "Error: Server error in start device authorization;"); throw new IOException("IdC authentication failed : An error occurred during the request.", ex); } catch (Exception ex) { if (RedshiftLogger.isEnable()) m_log.log(LogLevel.ERROR, ex, "Error: Unexpected error in start device authorization;"); throw new IOException("IdC authentication failed : There was an error during authentication.", ex); } return startDeviceAuthorizationResult; } protected void openBrowser(String verificationUri) throws IOException { validateURL(verificationUri); Desktop.getDesktop().browse(URI.create(verificationUri)); if (RedshiftLogger.isEnable()) m_log.log(LogLevel.DEBUG, String.format("Authorization code request URI: \n%s", verificationUri)); } /** * Creates and returns an access token for the authorized client. * The access token issued will be used to fetch short-term credentials for the assigned roles in the AWS account. * * @param clientId The unique identifier string for each client * @param clientSecret A secret string generated for the client * @param deviceCode Used only when calling this API for the device code grant type. This short-term code is used to identify this authentication attempt * @param grantType Supports grant types for the device code request * @param scope The list of scopes that is defined by the client. Upon authorization, this list is used to restrict permissions when granting an access token * @return {@link CreateTokenResult} Create token result containing IdC token */ protected CreateTokenResult getCreateTokenResult(String clientId, String clientSecret, String deviceCode, String grantType, String... scope) { CreateTokenRequest createTokenRequest = new CreateTokenRequest(); createTokenRequest.withClientId(clientId); createTokenRequest.withClientSecret(clientSecret); createTokenRequest.withDeviceCode(deviceCode); createTokenRequest.withGrantType(grantType); createTokenRequest.withScope(scope); return m_sdk_client.createToken(createTokenRequest); } protected CreateTokenResult fetchTokenResult(RegisterClientResult registerClientResult, StartDeviceAuthorizationResult startDeviceAuthorizationResult, String grantType, String scope) throws IOException { long pollingEndTime = System.currentTimeMillis() + m_idcResponseTimeout * 1000L; int pollingIntervalInSec = REQUEST_CREATE_TOKEN_DEFAULT_INTERVAL; if (startDeviceAuthorizationResult.getInterval() != null && startDeviceAuthorizationResult.getInterval() > 0) { pollingIntervalInSec = startDeviceAuthorizationResult.getInterval(); // min wait time between attempts } // poll for create token with pollingIntervalInSec wait time between each attempt until pollingEndTime while (System.currentTimeMillis() < pollingEndTime) { try { CreateTokenResult createTokenResult = getCreateTokenResult(registerClientResult.getClientId(), registerClientResult.getClientSecret(), startDeviceAuthorizationResult.getDeviceCode(), grantType, scope); if (RedshiftLogger.isEnable()) m_log.logDebug("createToken response code: {0}", createTokenResult.getSdkHttpMetadata().getHttpStatusCode()); if (createTokenResult != null && createTokenResult.getAccessToken() != null) { return createTokenResult; } else { // auth server sent a non exception response without valid token, so throw error if (RedshiftLogger.isEnable()) m_log.logError("Failed to fetch an IdC access token"); throw new IOException("IdC authentication failed : The credential token couldn't be created."); } } catch (AuthorizationPendingException ex) { if (RedshiftLogger.isEnable()) m_log.logDebug("Browser authorization pending from user"); } catch (SlowDownException ex) { if (RedshiftLogger.isEnable()) m_log.log(LogLevel.ERROR, ex, "Error: Too frequent createToken requests made by client;"); throw new IOException("IdC authentication failed : Requests to the IdC service are too frequent.", ex); } catch (AccessDeniedException ex) { if (RedshiftLogger.isEnable()) m_log.log(LogLevel.ERROR, ex, "Error: Access denied, please ensure app assignment is done for the user;"); throw new IOException("IdC authentication failed : You don't have sufficient permission to perform the action.", ex); } catch (InternalServerException ex) { if (RedshiftLogger.isEnable()) m_log.log(LogLevel.ERROR, ex, "Error: Server error in creating token;"); throw new IOException("IdC authentication failed : An error occurred during the request.", ex); } catch (Exception ex) { if (RedshiftLogger.isEnable()) m_log.log(LogLevel.ERROR, ex, "Error: Unexpected error in create token;"); throw new IOException("IdC authentication failed : There was an error during authentication.", ex); } try { Thread.sleep(pollingIntervalInSec * 1000L); } catch (InterruptedException ex) { if (RedshiftLogger.isEnable()) m_log.log(LogLevel.ERROR, ex, "Thread interrupted during sleep"); } } if (RedshiftLogger.isEnable()) m_log.logError("Error: Request timed out while waiting for user authentication in the browser"); throw new IOException("IdC authentication failed : The request timed out. Authentication wasn't completed."); } protected NativeTokenHolder processCreateTokenResult(CreateTokenResult createTokenResult) throws IOException { String idcToken = createTokenResult.getAccessToken(); int expiresInSecs; if (createTokenResult.getExpiresIn() != null && createTokenResult.getExpiresIn() > 0) { expiresInSecs = createTokenResult.getExpiresIn(); } else { expiresInSecs = DEFAULT_IDC_TOKEN_EXPIRY_IN_SEC; } Date expiration = new Date(System.currentTimeMillis() + expiresInSecs * 1000L); return NativeTokenHolder.newInstance(idcToken, expiration); } }
8,481
0
Create_ds/amazon-redshift-jdbc-driver/src/main/java/com/amazon/redshift
Create_ds/amazon-redshift-jdbc-driver/src/main/java/com/amazon/redshift/plugin/AzureCredentialsProvider.java
package com.amazon.redshift.plugin; import com.amazonaws.SdkClientException; import com.amazonaws.util.IOUtils; import com.amazonaws.util.StringUtils; import com.amazonaws.util.json.Jackson; import com.fasterxml.jackson.databind.JsonNode; import com.amazon.redshift.RedshiftProperty; import com.amazon.redshift.logger.LogLevel; import com.amazon.redshift.logger.RedshiftLogger; import org.apache.commons.codec.binary.Base64; import org.apache.http.NameValuePair; import org.apache.http.client.entity.UrlEncodedFormEntity; import org.apache.http.client.methods.CloseableHttpResponse; import org.apache.http.client.methods.HttpPost; import org.apache.http.impl.client.CloseableHttpClient; import org.apache.http.message.BasicNameValuePair; import org.apache.http.util.EntityUtils; import java.io.IOException; import java.nio.charset.Charset; import java.security.GeneralSecurityException; import java.util.ArrayList; import java.util.List; /** * Class to get SAML Response from Microsoft Azure using OAuth 2.0 API */ public class AzureCredentialsProvider extends SamlCredentialsProvider { /** * String containing "idp_tenant" as a parameter key. */ private static final String KEY_IDP_TENANT = "idp_tenant"; /** * String containing "client_secret" as a parameter key. */ private static final String KEY_CLIENT_SECRET = "client_secret"; /** * String containing "client_id" as a parameter key. */ private static final String KEY_CLIENT_ID = "client_id"; /** * The value of parameter idp_tenant. */ private String m_idpTenant; /** * The value of parameter client_secret. */ private String m_clientSecret; /** * The value of parameter client_id. */ private String m_clientId; /** * Required method to grab the SAML Response. Used in base class to refresh temporary credentials. * * @return Base64 encoded SAML Response string * @throws IOException throws error when missing required parameters or unable to access IDP host. */ protected String getSamlAssertion() throws IOException { /** * idp_tenant, client_secret, and client_id are all required parameters to be able to authenticate with * Microsoft Azure. * * user and password are also required and need to be set to the username and password of the * Microsoft Azure account that is logging in. */ if (StringUtils.isNullOrEmpty(m_idpTenant)) { throw new IOException("Missing required property: " + KEY_IDP_TENANT); } else if (StringUtils.isNullOrEmpty(m_userName)) { throw new IOException( "Missing required property: " + RedshiftProperty.UID.getName() + " or " + RedshiftProperty.USER.getName()); } else if (StringUtils.isNullOrEmpty(m_password)) { throw new IOException( "Missing required property: " + RedshiftProperty.PWD.getName() + " or " + RedshiftProperty.PASSWORD.getName()); } else if (StringUtils.isNullOrEmpty(m_clientSecret)) { throw new IOException("Missing required property: " + KEY_CLIENT_SECRET); } else if (StringUtils.isNullOrEmpty(m_clientId)) { throw new IOException("Missing required property: " + KEY_CLIENT_ID); } return azureOauthBasedAuthentication(); } /** * Overwritten method to grab the field parameters from JDBC connection string. This method calls the base class * addParamter method and adds to it Azure specific parameters. * * @param key parameter key passed to JDBC * @param value paramter value associated with the given key */ @Override public void addParameter(String key, String value) { if (RedshiftLogger.isEnable()) m_log.logDebug("key: {0}", key); if (KEY_IDP_TENANT.equalsIgnoreCase(key)) { m_idpTenant = value; } else if (KEY_CLIENT_SECRET.equalsIgnoreCase(key)) { m_clientSecret = value; } else if (KEY_CLIENT_ID.equalsIgnoreCase(key)) { m_clientId = value; } else { super.addParameter(key, value); } } @Override public String getPluginSpecificCacheKey() { return ((m_idpTenant != null) ? m_idpTenant : "") + ((m_clientId != null) ? m_clientId : "") + ((m_clientSecret != null) ? m_clientSecret : "") ; } /** * Method to initiate a POST request to grab the SAML Assertion from Microsoft Azure and convert it to a * SAML Response. * * @return Base64 encoded SAML Response string * @throws IOException * @throws SdkClientException */ private String azureOauthBasedAuthentication() throws IOException, SdkClientException { // endpoint to connect with Microsoft Azure to get SAML Assertion token String uri = "https://login.microsoftonline.com/" + m_idpTenant + "/oauth2/token"; if (RedshiftLogger.isEnable()) m_log.logDebug("uri: {0}", uri); validateURL(uri); CloseableHttpClient client = null; CloseableHttpResponse resp = null; try { client = getHttpClient(); HttpPost post = new HttpPost(uri); // required parameters to pass in POST body List<NameValuePair> parameters = new ArrayList<NameValuePair>(7); parameters.add(new BasicNameValuePair("grant_type", "password")); parameters.add( new BasicNameValuePair( "requested_token_type", "urn:ietf:params:oauth:token-type:saml2")); parameters.add(new BasicNameValuePair("username", m_userName)); parameters.add(new BasicNameValuePair("password", m_password)); parameters.add(new BasicNameValuePair(KEY_CLIENT_SECRET, m_clientSecret)); parameters.add(new BasicNameValuePair(KEY_CLIENT_ID, m_clientId)); parameters.add(new BasicNameValuePair("resource", m_clientId)); // headers to pass with POST request post.addHeader("Content-Type", "application/x-www-form-urlencoded"); post.addHeader("Accept", "application/json"); post.setEntity(new UrlEncodedFormEntity(parameters, Charset.forName("UTF-8"))); resp = client.execute(post); String content = EntityUtils.toString(resp.getEntity()); JsonNode entityJson = Jackson.jsonNodeOf(content); // if we don't receive a 200 response, throw an error saying we failed to authenticate // with Azure if (resp.getStatusLine().getStatusCode() != 200) { if(RedshiftLogger.isEnable()) m_log.log(LogLevel.DEBUG, "azureOauthBasedAuthentication https response: " + content); String errorMessage = "Authentication failed on the Azure server. Please check the tenant, user, password, client secret, and client id."; JsonNode errorDescriptionNode = entityJson.findValue("error_description"); if (errorDescriptionNode != null && !StringUtils.isNullOrEmpty(errorDescriptionNode.textValue())) { String errorDescription = errorDescriptionNode.textValue().replaceAll("\r\n", " "); JsonNode errorCodeNode = entityJson.findValue("error"); if (errorCodeNode != null && !StringUtils.isNullOrEmpty(errorCodeNode.textValue())) { errorMessage = errorCodeNode.textValue() + ": " + errorDescription; } else { errorMessage = "Unexpected response: " + errorDescription; } } throw new IOException(errorMessage); } if(RedshiftLogger.isEnable()) { String maskedContent = content.replaceAll(getRegexForJsonKey("access_token"), "$1***masked***\""); maskedContent = maskedContent.replaceAll(getRegexForJsonKey("refresh_token"), "$1***masked***\""); m_log.log(LogLevel.DEBUG, "content:" + maskedContent); } // parse the JSON response to grab access_token field which contains Base64 encoded SAML // Assertion and decode it JsonNode accessTokenField = entityJson.findValue("access_token"); String encodedSamlAssertion; if (accessTokenField != null) { encodedSamlAssertion = accessTokenField.textValue(); if (StringUtils.isNullOrEmpty(encodedSamlAssertion)) { throw new IOException("Invalid Azure access_token response"); } } else { throw new IOException("Failed to find Azure access_token"); } // decode the SAML Assertion to a String to add XML tags to form a SAML Response String samlAssertion = new String(Base64.decodeBase64(encodedSamlAssertion), Charset.forName("UTF-8")); /** * SAML Response is required to be sent to base class. We need to provide a minimum of: * 1) samlp:Response XML tag with xmlns:samlp protocol value * 2) samlp:Status XML tag and samlpStatusCode XML tag with Value indicating Success * 3) followed by Signed SAML Assertion */ StringBuilder sb = new StringBuilder(); sb.append("<samlp:Response xmlns:samlp=\"urn:oasis:names:tc:SAML:2.0:protocol\">"); sb.append("<samlp:Status>"); sb.append("<samlp:StatusCode Value=\"urn:oasis:names:tc:SAML:2.0:status:Success\"/>"); sb.append("</samlp:Status>"); sb.append(samlAssertion); sb.append("</samlp:Response>"); // re-encode the SAML Resposne in Base64 and return this to the base class return new String(Base64.encodeBase64(sb.toString().getBytes())); } catch (GeneralSecurityException e) { // failed to get HttpClient and thus cannot continue so throw an error. throw new SdkClientException("Failed to create SSLContext", e); } finally { // close out closable resp and client. This does not throw any errors. IOUtils.closeQuietly(resp, null); IOUtils.closeQuietly(client, null); } } }
8,482
0
Create_ds/amazon-redshift-jdbc-driver/src/main/java/com/amazon/redshift
Create_ds/amazon-redshift-jdbc-driver/src/main/java/com/amazon/redshift/plugin/IdpTokenAuthPlugin.java
package com.amazon.redshift.plugin; import com.amazon.redshift.NativeTokenHolder; import com.amazon.redshift.logger.RedshiftLogger; import com.amazonaws.util.StringUtils; import java.io.IOException; import java.util.Date; /** * A basic credential provider class. * This plugin class allows clients to directly provide any auth token that is handled by Redshift. */ public class IdpTokenAuthPlugin extends CommonCredentialsProvider { private static final String KEY_TOKEN = "token"; private static final String KEY_TOKEN_TYPE = "token_type"; private static final int DEFAULT_IDP_TOKEN_EXPIRY_IN_SEC = 900; private String token; private String token_type; public IdpTokenAuthPlugin() { } /** * This overridden method needs to return the auth token provided by the client * * @return {@link NativeTokenHolder} A wrapper containing auth token and its expiration time information * @throws IOException indicating that some required parameter is missing. */ @Override protected NativeTokenHolder getAuthToken() throws IOException { checkRequiredParameters(); Date expiration = new Date(System.currentTimeMillis() + DEFAULT_IDP_TOKEN_EXPIRY_IN_SEC * 1000L); return NativeTokenHolder.newInstance(token, expiration); } private void checkRequiredParameters() throws IOException { if (StringUtils.isNullOrEmpty(token)) { throw new IOException("IdC authentication failed: The token must be included in the connection parameters."); } else if (StringUtils.isNullOrEmpty(token_type)) { throw new IOException("IdC authentication failed: The token type must be included in the connection parameters."); } } @Override public void addParameter(String key, String value) { super.addParameter(key, value); if (KEY_TOKEN.equalsIgnoreCase(key)) { token = value; if (RedshiftLogger.isEnable()) m_log.logDebug("Setting token of length={0}", token.length()); } else if (KEY_TOKEN_TYPE.equalsIgnoreCase(key)) { token_type = value; if (RedshiftLogger.isEnable()) m_log.logDebug("Setting token_type: {0}", token_type); } } }
8,483
0
Create_ds/amazon-redshift-jdbc-driver/src/main/java/com/amazon/redshift
Create_ds/amazon-redshift-jdbc-driver/src/main/java/com/amazon/redshift/plugin/AdfsCredentialsProvider.java
package com.amazon.redshift.plugin; import java.io.File; import java.io.FileOutputStream; import java.io.IOException; import java.io.InputStream; import java.io.OutputStream; import java.security.GeneralSecurityException; import java.util.ArrayList; import java.util.List; import java.util.Locale; import java.util.regex.Matcher; import java.util.regex.Pattern; import org.apache.http.NameValuePair; import org.apache.http.client.entity.UrlEncodedFormEntity; import org.apache.http.client.methods.CloseableHttpResponse; import org.apache.http.client.methods.HttpGet; import org.apache.http.client.methods.HttpPost; import org.apache.http.impl.client.CloseableHttpClient; import org.apache.http.message.BasicNameValuePair; import org.apache.http.util.EntityUtils; import com.amazon.redshift.logger.LogLevel; import com.amazon.redshift.logger.RedshiftLogger; import com.amazonaws.SdkClientException; import com.amazonaws.util.IOUtils; import com.amazonaws.util.StringUtils; public class AdfsCredentialsProvider extends SamlCredentialsProvider { private static final Pattern SAML_PATTERN = Pattern.compile("SAMLResponse\\W+value=\"([^\"]+)\""); /** * Property for specifying loginToRp. */ private static final String KEY_LOGINTORP = "loginToRp"; /** * String to hold value of loginToRp. */ protected String m_loginToRp = "urn:amazon:webservices"; @Override public void addParameter(String key, String value) { super.addParameter(key, value); if (KEY_LOGINTORP.equalsIgnoreCase(key)) { m_loginToRp = value; if (RedshiftLogger.isEnable()) m_log.logDebug("m_loginToRp: ", m_loginToRp); } } @Override public String getPluginSpecificCacheKey() { return ((m_loginToRp != null) ? m_loginToRp : ""); } protected String getSamlAssertion() throws IOException { if (StringUtils.isNullOrEmpty(m_idpHost)) { throw new IOException("Missing required property: " + KEY_IDP_HOST); } if (StringUtils.isNullOrEmpty(m_userName) || StringUtils.isNullOrEmpty(m_password)) { return windowsIntegratedAuthentication(); } return formBasedAuthentication(); } private String windowsIntegratedAuthentication() { String osName = System.getProperty("os.name").toLowerCase(Locale.getDefault()); if (!osName.contains("windows")) { throw new SdkClientException("WIA only support Windows platform."); } InputStream is = null; OutputStream os = null; File file = null; try { file = extractExecutable(); String[] cmd = new String[3]; cmd[0] = file.getAbsolutePath(); cmd[1] = "https://" + m_idpHost + ':' + m_idpPort + "/adfs/ls/IdpInitiatedSignOn.aspx?loginToRp=" + m_loginToRp; cmd[2] = String.valueOf(Boolean.getBoolean("adfs.insecure")); if (RedshiftLogger.isEnable()) m_log.logDebug("Command: {0}:{1}:{2}", cmd[0],cmd[1],cmd[2]); validateURL(cmd[1]); Process process = Runtime.getRuntime().exec(cmd); is = process.getInputStream(); os = process.getOutputStream(); String samlAssertion = IOUtils.toString(is); int code = process.waitFor(); if (code != 0) { throw new SdkClientException("Failed execute adfs command, return: " + code); } return samlAssertion; } catch (InterruptedException e) { throw new SdkClientException("Failed execute adfs command.", e); } catch (IOException e) { throw new SdkClientException("Failed execute adfs command.", e); } finally { IOUtils.closeQuietly(is, null); IOUtils.closeQuietly(os, null); if (file != null && !file.delete()) { file.deleteOnExit(); } } } private String formBasedAuthentication() throws IOException { String uri = "https://" + m_idpHost + ':' + m_idpPort + "/adfs/ls/IdpInitiatedSignOn.aspx?loginToRp=" + m_loginToRp; CloseableHttpClient client = null; try { if (RedshiftLogger.isEnable()) m_log.logDebug("uri: {0}", uri); validateURL(uri); client = getHttpClient(); HttpGet get = new HttpGet(uri); CloseableHttpResponse resp = client.execute(get); if (resp.getStatusLine().getStatusCode() != 200) { if(RedshiftLogger.isEnable()) m_log.log(LogLevel.DEBUG, "formBasedAuthentication https response:" + EntityUtils.toString(resp.getEntity())); throw new IOException( "Failed send request: " + resp.getStatusLine().getReasonPhrase()); } String body = EntityUtils.toString(resp.getEntity()); if (RedshiftLogger.isEnable()) m_log.logDebug("body: {0}", body); List<NameValuePair> parameters = new ArrayList<NameValuePair>(); for (String inputTag : getInputTagsfromHTML(body)) { String name = getValueByKey(inputTag, "name"); String value = getValueByKey(inputTag, "value"); String nameLower = name.toLowerCase(); if (RedshiftLogger.isEnable()) m_log.logDebug("name: {0}", name); if (nameLower.contains("username")) { parameters.add(new BasicNameValuePair(name, m_userName)); } else if (nameLower.contains("authmethod")) { if (!value.isEmpty()) { parameters.add(new BasicNameValuePair(name, value)); } } else if (nameLower.contains("password")) { parameters.add(new BasicNameValuePair(name, m_password)); } else if (!name.isEmpty()) { parameters.add(new BasicNameValuePair(name, value)); } } String action = getFormAction(body); if (!StringUtils.isNullOrEmpty(action) && action.startsWith("/")) { uri = "https://" + m_idpHost + ':' + m_idpPort + action; } if (RedshiftLogger.isEnable()) m_log.logDebug("action uri: {0}", uri); validateURL(uri); HttpPost post = new HttpPost(uri); post.setEntity(new UrlEncodedFormEntity(parameters)); resp = client.execute(post); if (resp.getStatusLine().getStatusCode() != 200) { throw new IOException( "Failed send request: " + resp.getStatusLine().getReasonPhrase()); } String content = EntityUtils.toString(resp.getEntity()); Matcher matcher = SAML_PATTERN.matcher(content); if (!matcher.find()) { throw new IOException("Failed to login ADFS."); } return matcher.group(1); } catch (GeneralSecurityException e) { throw new SdkClientException("Failed create SSLContext.", e); } finally { IOUtils.closeQuietly(client, null); } } private File extractExecutable() throws IOException { File file = File.createTempFile("adfs", ".exe"); InputStream is = null; OutputStream os = null; try { is = AdfsCredentialsProvider.class.getResourceAsStream("adfs.exe"); os = new FileOutputStream(file); IOUtils.copy(is, os); } finally { IOUtils.closeQuietly(is, null); IOUtils.closeQuietly(os, null); } return file; } }
8,484
0
Create_ds/amazon-redshift-jdbc-driver/src/main/java/com/amazon/redshift
Create_ds/amazon-redshift-jdbc-driver/src/main/java/com/amazon/redshift/plugin/BrowserSamlCredentialsProvider.java
package com.amazon.redshift.plugin; import com.amazon.redshift.logger.LogLevel; import com.amazon.redshift.logger.RedshiftLogger; import com.amazon.redshift.plugin.httpserver.RequestHandler; import com.amazon.redshift.plugin.httpserver.Server; import org.apache.http.NameValuePair; import java.awt.*; import java.io.IOException; import java.net.MalformedURLException; import java.net.URI; import java.time.Duration; import java.util.List; import java.util.function.Function; import java.util.regex.Matcher; import static com.amazon.redshift.plugin.utils.CheckUtils.*; import static com.amazon.redshift.plugin.utils.ResponseUtils.findParameter; /** * Class to get SAML Assertion from a web service which is able to produce * SAML assertion by requesting the specific URL. */ public class BrowserSamlCredentialsProvider extends SamlCredentialsProvider { /** * String containing "login_url" as a parameter key. */ public static final String KEY_LOGIN_URL = "login_url"; /** * String containing "idp_response_timeout" as a parameter key. */ public static final String KEY_IDP_RESPONSE_TIMEOUT = "idp_response_timeout"; /** * String containing "listen_port" as a parameter key. */ public static final String KEY_LISTEN_PORT = "listen_port"; /** * String containing "SAMLResponse" as a parameter key. */ private static final String SAML_RESPONSE_PARAM_NAME = "SAMLResponse"; /** * The value of parameter login_url */ private String m_login_url; /** * The value of parameter idp_response_timeout in seconds. */ private int m_idp_response_timeout = 120; /** * The value of parameter listen_port */ private int m_listen_port = 7890; /** * Overridden method to grab the SAML Response. Used in base class to refresh temporary credentials. * * @return Base64 encoded SAML Response string * @throws IOException as part of common API. mean parameters are not set or has invalid values. */ @Override protected String getSamlAssertion() throws IOException { try { checkMissingAndThrows(m_login_url, KEY_LOGIN_URL); checkAndThrowsWithMessage( m_idp_response_timeout < 10, KEY_IDP_RESPONSE_TIMEOUT + " should be 10 seconds or greater."); checkInvalidAndThrows((m_listen_port < 1 || m_listen_port > 65535), KEY_LISTEN_PORT); validateURL(m_login_url); return authenticate(); } catch (InternalPluginException ex) { // Wrap any exception to be compatible with SamlCredentialsProvider API throw new IOException(ex); } } /** * Overwritten method to grab the field parameters from JDBC connection string. This method calls the base class' * addParameter method and adds to it new specific parameters. * * @param key parameter key passed to JDBC * @param value parameter value associated with the given key */ @Override public void addParameter(String key, String value) { if (RedshiftLogger.isEnable()) m_log.logDebug("key: {0}", key); switch (key) { case KEY_LISTEN_PORT: m_listen_port = Integer.parseInt(value); if (RedshiftLogger.isEnable()) m_log.logDebug("m_listen_port: {0}", m_listen_port); break; case KEY_LOGIN_URL: m_login_url = value; if (RedshiftLogger.isEnable()) m_log.logDebug("m_login_url: {0}", m_login_url); break; case KEY_IDP_RESPONSE_TIMEOUT: m_idp_response_timeout = Integer.parseInt(value); if (RedshiftLogger.isEnable()) m_log.logDebug("m_idp_response_timeout: {0}", m_idp_response_timeout); break; default: super.addParameter(key, value); } } @Override public String getPluginSpecificCacheKey() { return ((m_login_url != null) ? m_login_url : "") ; } /** * Authentication consists of: * <ol> * <li> Start the Socket Server on the port {@link BrowserSamlCredentialsProvider#m_listen_port}.</li> * <li> Open the default browser with the link asking a User to enter the credentials.</li> * <li> Retrieve the SAML Assertion string from the response.</li> * </ol> * * @return Base64 encoded SAML Assertion string * @throws IOException indicating the error */ private String authenticate() throws IOException { RequestHandler requestHandler = new RequestHandler(new Function<List<NameValuePair>, Object>() { @Override public Object apply(List<NameValuePair> nameValuePairs) { if (RedshiftLogger.isEnable()) { for (NameValuePair pair : nameValuePairs) { if (pair.getName().equals(SAML_RESPONSE_PARAM_NAME)) { m_log.logDebug("nameValuePair:name= {0}", SAML_RESPONSE_PARAM_NAME); } else { m_log.logDebug("nameValuePair: {0}", pair); } } } return findParameter(SAML_RESPONSE_PARAM_NAME, nameValuePairs); } }); Server server = new Server(m_listen_port, requestHandler, Duration.ofSeconds(m_idp_response_timeout), m_log); server.listen(); if(RedshiftLogger.isEnable()) m_log.log(LogLevel.DEBUG, String.format("Listening for connection on port %d", m_listen_port)); try { openBrowser(); server.waitForResult(); } catch (IOException ex) { if (RedshiftLogger.isEnable()) m_log.logError(ex); server.stop(); throw ex; } server.waitForResult(); Object result = requestHandler.getResult(); if (result instanceof InternalPluginException) { if (RedshiftLogger.isEnable()) m_log.logDebug("Error occurred while fetching SAML assertion: {0}", result); throw (InternalPluginException) result; } if (result instanceof String) { if(RedshiftLogger.isEnable()) m_log.log(LogLevel.DEBUG, "Got SAML assertion of length={0}", ((String) result).length()); return (String) result; } if (RedshiftLogger.isEnable()) m_log.logDebug("result: {0}", result); throw new InternalPluginException("Fail to login during timeout."); } /** * Opens the default browser with the authorization request to the web service. * * @throws IOException in case of error */ private void openBrowser() throws IOException { URI authorizeRequestUrl = URI.create(m_login_url); if(RedshiftLogger.isEnable()) m_log.log(LogLevel.DEBUG, String.format("SSO URI: \n%s", authorizeRequestUrl.toString()) ); Desktop.getDesktop().browse(authorizeRequestUrl); } }
8,485
0
Create_ds/amazon-redshift-jdbc-driver/src/main/java/com/amazon/redshift
Create_ds/amazon-redshift-jdbc-driver/src/main/java/com/amazon/redshift/plugin/BrowserAzureCredentialsProvider.java
package com.amazon.redshift.plugin; import com.amazon.redshift.logger.LogLevel; import com.amazon.redshift.logger.RedshiftLogger; import com.amazon.redshift.plugin.httpserver.RequestHandler; import com.amazon.redshift.plugin.httpserver.Server; import com.amazon.redshift.plugin.utils.RandomStateUtil; import com.amazonaws.util.json.Jackson; import com.fasterxml.jackson.databind.JsonNode; import org.apache.commons.codec.binary.Base64; import org.apache.http.HttpHeaders; import org.apache.http.NameValuePair; import org.apache.http.client.entity.UrlEncodedFormEntity; import org.apache.http.client.methods.CloseableHttpResponse; import org.apache.http.client.methods.HttpPost; import org.apache.http.client.utils.URIBuilder; import org.apache.http.entity.ContentType; import org.apache.http.impl.client.CloseableHttpClient; import org.apache.http.message.BasicNameValuePair; import org.apache.http.util.EntityUtils; import java.awt.*; import java.io.IOException; import java.net.URI; import java.net.URISyntaxException; import java.nio.charset.StandardCharsets; import java.security.GeneralSecurityException; import java.time.Duration; import java.util.ArrayList; import java.util.Arrays; import java.util.List; import java.util.function.Function; import static com.amazon.redshift.plugin.httpserver.RequestHandler.REDSHIFT_PATH; import static com.amazon.redshift.plugin.utils.CheckUtils.*; import static com.amazon.redshift.plugin.utils.ResponseUtils.findParameter; import static com.amazonaws.util.StringUtils.isNullOrEmpty; import static org.apache.commons.codec.binary.StringUtils.newStringUtf8; /** * Class to get SAML Token from any IDP using OAuth 2.0 API */ public class BrowserAzureCredentialsProvider extends SamlCredentialsProvider { /** * Key for setting timeout for IDP response. */ public static final String KEY_IDP_RESPONSE_TIMEOUT = "idp_response_timeout"; /** * Key for setting the port number for listening. */ public static final String KEY_LISTEN_PORT = "listen_port"; /** * Key for setting idp tenant. */ public static final String KEY_IDP_TENANT = "idp_tenant"; /** * Key for setting client ID. */ public static final String KEY_CLIENT_ID = "client_id"; /** * Key for setting state. */ public static final String OAUTH_STATE_PARAMETER_NAME = "state"; /** * Key for setting redirect URI. */ public static final String OAUTH_REDIRECT_PARAMETER_NAME = "redirect_uri"; /** * Key for setting code. */ public static final String OAUTH_IDP_CODE_PARAMETER_NAME = "code"; /** * Key for setting client ID. */ public static final String OAUTH_CLIENT_ID_PARAMETER_NAME = "client_id"; /** * Key for setting OAUTH response type. */ public static final String OAUTH_RESPONSE_TYPE_PARAMETER_NAME = "response_type"; /** * Key for setting requested token type. */ public static final String OAUTH_REQUESTED_TOKEN_TYPE_PARAMETER_NAME = "requested_token_type"; /** * Key for setting grant type. */ public static final String OAUTH_GRANT_TYPE_PARAMETER_NAME = "grant_type"; /** * Key for setting scope. */ public static final String OAUTH_SCOPE_PARAMETER_NAME = "scope"; /** * Key for setting resource. */ public static final String OAUTH_RESOURCE_PARAMETER_NAME = "resource"; /** * Key for setting response mode. */ public static final String OAUTH_RESPONSE_MODE_PARAMETER_NAME = "response_mode"; /** * String containing Microsoft IDP host. */ private static final String MICROSOFT_IDP_HOST = "login.microsoftonline.com"; /** * String containing HTTPS. */ private static final String CURRENT_INTERACTION_SCHEMA = "https"; /** * IDP tenant variable. */ private String m_idp_tenant; /** * Client ID variable. */ private String m_clientId; /** * Default timeout for IDP response. */ private int m_idp_response_timeout = 120; /** * Default port for local server. */ private int m_listen_port = 0; /** * Redirect URI variable. */ private String redirectUri; /** * Overridden method to grab the SAML Response. Used in base class to refresh temporary credentials. * * @return Base64 encoded SAML Response string * @throws IOException indicating the error */ @Override protected String getSamlAssertion() throws IOException { try { checkMissingAndThrows(m_idp_tenant, KEY_IDP_TENANT); checkMissingAndThrows(m_clientId, KEY_CLIENT_ID); checkAndThrowsWithMessage( m_idp_response_timeout < 10, KEY_IDP_RESPONSE_TIMEOUT + " should be 10 seconds or greater."); checkInvalidAndThrows( m_listen_port != 0 && ( m_listen_port < 1 || m_listen_port > 65535), KEY_LISTEN_PORT); if( m_listen_port == 0 ) { m_log.logDebug("Listen port set to 0. Will pick random port"); } String token = fetchAuthorizationToken(); String content = fetchSamlResponse(token); String samlAssertion = extractSamlAssertion(content); return wrapAndEncodeAssertion(samlAssertion); } catch (InternalPluginException | URISyntaxException ex) { if (RedshiftLogger.isEnable()) m_log.logError(ex); // Wrap any exception to be compatible with SamlCredentialsProvider API throw new IOException(ex); } } /** * Overwritten method to grab the field parameters from JDBC connection string. This method calls the base class' * addParameter method and adds to it new specific parameters. * * @param key parameter key passed to JDBC * @param value parameter value associated with the given key */ @Override public void addParameter(String key, String value) { if (RedshiftLogger.isEnable()) m_log.logDebug("key: {0}", key); switch (key) { case KEY_IDP_TENANT: m_idp_tenant = value; if (RedshiftLogger.isEnable()) m_log.logDebug("m_idp_tenant: {0}", m_idp_tenant); break; case KEY_CLIENT_ID: m_clientId = value; if (RedshiftLogger.isEnable()) m_log.logDebug("m_clientId: {0}", m_clientId); break; case KEY_IDP_RESPONSE_TIMEOUT: m_idp_response_timeout = Integer.parseInt(value); if (RedshiftLogger.isEnable()) m_log.logDebug("m_idp_response_timeout: {0}", m_idp_response_timeout); break; case KEY_LISTEN_PORT: m_listen_port = Integer.parseInt(value); if (RedshiftLogger.isEnable()) m_log.logDebug("m_listen_port: {0}", m_listen_port); break; default: super.addParameter(key, value); } } @Override public String getPluginSpecificCacheKey() { return ((m_idp_tenant != null) ? m_idp_tenant : "") + ((m_clientId != null) ? m_clientId : "") ; } /** * First authentication phase: * <ol> * <li> Set the state in order to check if the incoming request belongs to the current authentication process.</li> * <li> Start the Socket Server at the {@linkplain BrowserAzureCredentialsProvider#m_listen_port} port.</li> * <li> Open the default browser with the link asking a User to enter the credentials.</li> * <li> Retrieve the SAML Assertion string from the response. Decode it, format, validate and return.</li> * </ol> * * @return Authorization token */ private String fetchAuthorizationToken() throws IOException, URISyntaxException { final String state = RandomStateUtil.generateRandomState(); RequestHandler requestHandler = new RequestHandler(new Function<List<NameValuePair>, Object>() { @Override public Object apply(List<NameValuePair> nameValuePairs) { String incomingState = findParameter(OAUTH_STATE_PARAMETER_NAME, nameValuePairs); if (!state.equals(incomingState)) { return new InternalPluginException( "Incoming state " + incomingState + " does not match the outgoing state " + state); } String code = findParameter(OAUTH_IDP_CODE_PARAMETER_NAME, nameValuePairs); if (isNullOrEmpty(code)) { return new InternalPluginException("No valid code found"); } return code; } }); Server server = new Server(m_listen_port, requestHandler, Duration.ofSeconds(m_idp_response_timeout), m_log); server.listen(); int localPort = server.getLocalPort(); this.redirectUri = "http://localhost:" + localPort + REDSHIFT_PATH; try { if(RedshiftLogger.isEnable()) m_log.log(LogLevel.DEBUG, String.format("Listening for connection on port %d", m_listen_port)); openBrowser(state); server.waitForResult(); } catch (URISyntaxException | IOException ex) { if (RedshiftLogger.isEnable()) m_log.logError(ex); server.stop(); throw ex; } Object result = requestHandler.getResult(); if (result instanceof InternalPluginException) { if (RedshiftLogger.isEnable()) m_log.logDebug("Error occurred while fetching SAML assertion: {0}", result); throw (InternalPluginException) result; } if (result instanceof String) { if(RedshiftLogger.isEnable()) m_log.log(LogLevel.DEBUG, "Got authorization token of length={0}", ((String) result).length()); return (String) result; } if (RedshiftLogger.isEnable()) m_log.logDebug("result: {0}", result); throw new InternalPluginException("Fail to login during timeout."); } /** * SAML Response is required to be sent to base class. We need to provide a minimum of: * 1) samlp:Response XML tag with xmlns:samlp protocol value * 2) samlp:Status XML tag and samlpStatusCode XML tag with Value indicating Success * 3) followed by Signed SAML Assertion */ private String wrapAndEncodeAssertion(String samlAssertion) { String samlAssertionString = "<samlp:Response xmlns:samlp=\"urn:oasis:names:tc:SAML:2.0:protocol\">" + "<samlp:Status><samlp:StatusCode Value=\"urn:oasis:names:tc:SAML:2.0:status:Success\"/>" + "</samlp:Status>" + samlAssertion + "</samlp:Response>"; return newStringUtf8(Base64.encodeBase64(samlAssertionString.getBytes())); } /** * Initiates the request to the IDP and gets the response body * * @param token authorization token * @return Response body of the incoming response * @throws IOException indicating the error */ private String fetchSamlResponse(String token) throws IOException { HttpPost post = createAuthorizationRequest(token); try ( CloseableHttpClient client = getHttpClient(); CloseableHttpResponse resp = client.execute(post)) { String content = EntityUtils.toString(resp.getEntity()); if(RedshiftLogger.isEnable()) { String maskedContent = content.replaceAll(getRegexForJsonKey("access_token"), "$1***masked***\""); maskedContent = maskedContent.replaceAll(getRegexForJsonKey("refresh_token"), "$1***masked***\""); maskedContent = maskedContent.replaceAll(getRegexForJsonKey("id_token"), "$1***masked***\""); m_log.log(LogLevel.DEBUG, "fetchSamlResponse https response:" + maskedContent); } checkAndThrowsWithMessage( resp.getStatusLine().getStatusCode() != 200, "Unexpected response: " + resp.getStatusLine().getReasonPhrase()); return content; } catch (GeneralSecurityException ex) { if(RedshiftLogger.isEnable()) m_log.log(LogLevel.ERROR,ex.getMessage(),ex); throw new InternalPluginException(ex); } } /** * Get Base 64 encoded saml assertion from the response body * * @param content response body * @return string containing Base 64 encoded saml assetion */ private String extractSamlAssertion(String content) { String encodedSamlAssertion; JsonNode accessTokenField = Jackson.jsonNodeOf(content).findValue("access_token"); checkAndThrowsWithMessage(accessTokenField == null, "Failed to find access_token"); encodedSamlAssertion = accessTokenField.textValue(); checkAndThrowsWithMessage( isNullOrEmpty(encodedSamlAssertion), "Invalid access_token value."); if(RedshiftLogger.isEnable()) m_log.log(LogLevel.DEBUG, "Successfully got SAML assertion of length={0}", encodedSamlAssertion.length()); return newStringUtf8(Base64.decodeBase64(encodedSamlAssertion)); } /** * Populates request URI and parameters. * * @param authorizationCode authorization authorizationCode * @return object containing the request data * @throws IOException */ private HttpPost createAuthorizationRequest(String authorizationCode) throws IOException { URIBuilder builder = new URIBuilder().setScheme(CURRENT_INTERACTION_SCHEMA) .setHost(MICROSOFT_IDP_HOST) .setPath("/" + m_idp_tenant + "/oauth2/token"); String tokenRequestUrl = builder.toString(); validateURL(tokenRequestUrl); HttpPost post = new HttpPost(tokenRequestUrl); final List<BasicNameValuePair> parameters = new ArrayList<>(); parameters.add(new BasicNameValuePair(OAUTH_IDP_CODE_PARAMETER_NAME, authorizationCode)); parameters.add( new BasicNameValuePair( OAUTH_REQUESTED_TOKEN_TYPE_PARAMETER_NAME, "urn:ietf:params:oauth:token-type:saml2")); parameters .add(new BasicNameValuePair(OAUTH_GRANT_TYPE_PARAMETER_NAME, "authorization_code")); parameters.add(new BasicNameValuePair(OAUTH_SCOPE_PARAMETER_NAME, "openid")); parameters.add(new BasicNameValuePair(OAUTH_RESOURCE_PARAMETER_NAME, m_clientId)); parameters.add(new BasicNameValuePair(OAUTH_CLIENT_ID_PARAMETER_NAME, m_clientId)); parameters.add(new BasicNameValuePair(OAUTH_REDIRECT_PARAMETER_NAME, redirectUri)); post.addHeader( HttpHeaders.CONTENT_TYPE, ContentType.APPLICATION_FORM_URLENCODED.toString()); post.addHeader(HttpHeaders.ACCEPT, ContentType.APPLICATION_JSON.toString()); post.setEntity(new UrlEncodedFormEntity(parameters, StandardCharsets.UTF_8)); if(RedshiftLogger.isEnable()) m_log.log(LogLevel.DEBUG, String.format( "Request token URI: \n%s\nredirectUri:%s", tokenRequestUrl, redirectUri) ); return post; } /** * Opens the default browser with the authorization request to the IDP * * @param state * @throws IOException indicating the error */ private void openBrowser(String state) throws URISyntaxException, IOException { URIBuilder builder = new URIBuilder().setScheme(CURRENT_INTERACTION_SCHEMA) .setHost(MICROSOFT_IDP_HOST) .setPath("/" + m_idp_tenant + "/oauth2/authorize") .addParameter(OAUTH_SCOPE_PARAMETER_NAME, "openid") .addParameter(OAUTH_RESPONSE_TYPE_PARAMETER_NAME, "code") .addParameter(OAUTH_RESPONSE_MODE_PARAMETER_NAME, "form_post") .addParameter(OAUTH_CLIENT_ID_PARAMETER_NAME, m_clientId) .addParameter(OAUTH_REDIRECT_PARAMETER_NAME, redirectUri) .addParameter(OAUTH_STATE_PARAMETER_NAME, state); URI authorizeRequestUrl; authorizeRequestUrl = builder.build(); validateURL(authorizeRequestUrl.toString()); Desktop.getDesktop().browse(authorizeRequestUrl); if(RedshiftLogger.isEnable()) m_log.log(LogLevel.DEBUG, String.format("Authorization code request URI: \n%s", authorizeRequestUrl.toString())); } }
8,486
0
Create_ds/amazon-redshift-jdbc-driver/src/main/java/com/amazon/redshift
Create_ds/amazon-redshift-jdbc-driver/src/main/java/com/amazon/redshift/plugin/IdpCredentialsProvider.java
package com.amazon.redshift.plugin; import java.io.IOException; import java.net.MalformedURLException; import java.net.URI; import java.security.GeneralSecurityException; import java.util.regex.Matcher; import java.util.regex.Pattern; import javax.net.ssl.SSLContext; import javax.net.ssl.SSLSocketFactory; import javax.net.ssl.TrustManager; import org.apache.http.client.config.CookieSpecs; import org.apache.http.client.config.RequestConfig; import org.apache.http.conn.ssl.NoopHostnameVerifier; import org.apache.http.conn.ssl.SSLConnectionSocketFactory; import org.apache.http.impl.client.CloseableHttpClient; import org.apache.http.impl.client.HttpClientBuilder; import org.apache.http.impl.client.HttpClients; import org.apache.http.impl.client.LaxRedirectStrategy; import com.amazon.redshift.logger.LogLevel; import com.amazon.redshift.logger.RedshiftLogger; import com.amazon.redshift.ssl.NonValidatingFactory; abstract class IdpCredentialsProvider { protected static final String KEY_SSL_INSECURE = "ssl_insecure"; protected boolean m_sslInsecure; protected static final Pattern IAM_URL_PATTERN = Pattern.compile("^(https)://[-a-zA-Z0-9+&@#/%?=~_!:,.']*[-a-zA-Z0-9+&@#/%=~_']"); protected static final Pattern IAM_HTTP_URL_PATTERN = Pattern.compile("^(http)://[-a-zA-Z0-9+&@#/%?=~_!:,.']*[-a-zA-Z0-9+&@#/%=~_']"); protected RedshiftLogger m_log; protected CloseableHttpClient getHttpClient() throws GeneralSecurityException { RequestConfig rc = RequestConfig.custom() .setSocketTimeout(60000) .setConnectTimeout(60000) .setExpectContinueEnabled(false) .setCookieSpec(CookieSpecs.STANDARD) .build(); HttpClientBuilder builder = HttpClients.custom() .setDefaultRequestConfig(rc) .setRedirectStrategy(new LaxRedirectStrategy()) .useSystemProperties(); // this is needed for proxy setting using system properties. if (m_sslInsecure) { SSLContext ctx = SSLContext.getInstance("TLSv1.2"); TrustManager[] tma = new TrustManager[]{ new NonValidatingFactory.NonValidatingTM()}; ctx.init(null, tma, null); SSLSocketFactory factory = ctx.getSocketFactory(); SSLConnectionSocketFactory sf = new SSLConnectionSocketFactory( factory, new NoopHostnameVerifier()); builder.setSSLSocketFactory(sf); } return builder.build(); } protected void validateURL(String paramString) throws IOException { URI authorizeRequestUrl = URI.create(paramString); String error = "Invalid url:" + paramString; if(RedshiftLogger.isEnable()) m_log.log(LogLevel.DEBUG, String.format("URI: \n%s", authorizeRequestUrl.toString()) ); try { if(!authorizeRequestUrl.toURL().getProtocol().equalsIgnoreCase("https")) { m_log.log(LogLevel.ERROR, error); throw new IOException(error); } Matcher matcher = IAM_URL_PATTERN.matcher(paramString); if (!matcher.find()) { m_log.log(LogLevel.ERROR, "Pattern matching failed:" + error); throw new IOException("Pattern matching failed:" + error); } } catch (MalformedURLException e) { throw new IOException(error + " " + e.getMessage(), e); } } protected static String getRegexForJsonKey(String keyName) { return "(\"" + keyName + "\"\\s*:\\s*\")[^\"]*\""; } }
8,487
0
Create_ds/amazon-redshift-jdbc-driver/src/main/java/com/amazon/redshift
Create_ds/amazon-redshift-jdbc-driver/src/main/java/com/amazon/redshift/plugin/JwtCredentialsProvider.java
package com.amazon.redshift.plugin; import com.amazon.redshift.RedshiftProperty; import com.amazon.redshift.INativePlugin; import com.amazon.redshift.NativeTokenHolder; import com.amazon.redshift.core.IamHelper; import com.amazon.redshift.logger.RedshiftLogger; import com.amazon.redshift.util.RedshiftException; import java.io.IOException; import java.net.URL; import java.util.Collections; import java.util.Date; import java.util.Enumeration; import java.util.HashMap; import java.util.Map; import org.apache.commons.logging.LogFactory; public abstract class JwtCredentialsProvider extends IdpCredentialsProvider implements INativePlugin { private static final String KEY_PROVIDER_NAME = "providerName"; protected Boolean m_disableCache = false; // Optional parameters private String m_providerName; private static Map<String, NativeTokenHolder> m_cache = new HashMap<String, NativeTokenHolder>(); private NativeTokenHolder m_lastRefreshCredentials; // Used when cache is disable. /** * The custom log factory class. */ // private static final Class<?> CUSTOM_LOG_FACTORY_CLASS = JwtCredentialsProvider.class; /** * Log properties file name. */ private static final String LOG_PROPERTIES_FILE_NAME = "log-factory.properties"; /** * Log properties file path. */ private static final String LOG_PROPERTIES_FILE_PATH = "META-INF/services/org.apache.commons.logging.LogFactory"; /** * A custom context class loader which allows us to control which LogFactory is loaded. * Our CUSTOM_LOG_FACTORY_CLASS will divert any wire logging to NoOpLogger to suppress wire * messages being logged. */ private static final ClassLoader CONTEXT_CLASS_LOADER = new ClassLoader( JwtCredentialsProvider.class.getClassLoader()) { @Override public Class<?> loadClass(String name) throws ClassNotFoundException { Class<?> clazz = getParent().loadClass(name); /* if (org.apache.commons.logging.LogFactory.class.isAssignableFrom(clazz)) { return CUSTOM_LOG_FACTORY_CLASS; } */ return clazz; } @Override public Enumeration<URL> getResources(String name) throws IOException { if (LogFactory.FACTORY_PROPERTIES.equals(name)) { // make sure not load any other commons-logging.properties files return Collections.enumeration(Collections.<URL>emptyList()); } return super.getResources(name); } @Override public URL getResource(String name) { if (LOG_PROPERTIES_FILE_PATH.equals(name)) { return JwtCredentialsProvider.class.getResource(LOG_PROPERTIES_FILE_NAME); } return super.getResource(name); } }; @Override public void addParameter(String key, String value) { if (RedshiftLogger.isEnable()) m_log.logDebug("key: {0}", key); if (RedshiftProperty.IAM_DISABLE_CACHE.getName().equalsIgnoreCase(key)) { m_disableCache = Boolean.valueOf(value); } else if (KEY_PROVIDER_NAME.equalsIgnoreCase(key)) { m_providerName = value; } else if (KEY_SSL_INSECURE.equalsIgnoreCase(key)) { m_sslInsecure = Boolean.parseBoolean(value); } } protected boolean isNullOrEmpty(String val) { return (val == null || val.length() == 0); } @Override public void setLogger(RedshiftLogger log) { m_log = log; } @Override public NativeTokenHolder getCredentials() throws RedshiftException { NativeTokenHolder credentials = null; if(!m_disableCache) { String key = getCacheKey(); credentials = m_cache.get(key); } if (credentials == null || credentials.isExpired()) { if(RedshiftLogger.isEnable()) m_log.logInfo("JWT getCredentials NOT from cache"); synchronized(this) { refresh(); if(m_disableCache) { credentials = m_lastRefreshCredentials; m_lastRefreshCredentials = null; } } } else { credentials.setRefresh(false); if(RedshiftLogger.isEnable()) m_log.logInfo("JWT getCredentials from cache"); } if(!m_disableCache) { credentials = m_cache.get(getCacheKey()); } if (credentials == null) { throw new RedshiftException("Unable to get IDP credentials"); } return credentials; } protected abstract String getJwtAssertion() throws IOException; @Override public void refresh() throws RedshiftException { // Get the current thread and set the context loader with our custom load class method. Thread currentThread = Thread.currentThread(); ClassLoader cl = currentThread.getContextClassLoader(); Thread.currentThread().setContextClassLoader(CONTEXT_CLASS_LOADER); try { String jwt = getJwtAssertion(); if (RedshiftLogger.isEnable()) m_log.logDebug("JwtCredentialsProvider: refreshed JWT assertion of length={0}", jwt != null ? jwt.length() : -1); // Default expiration until server sends actual expirations Date expiration = new Date(System.currentTimeMillis() + 15 * 60 * 1000); NativeTokenHolder credentials = NativeTokenHolder.newInstance(jwt, expiration); credentials.setRefresh(true); if(!m_disableCache) m_cache.put(getCacheKey(), credentials); else m_lastRefreshCredentials = credentials; } catch (Exception e) { if (RedshiftLogger.isEnable()) m_log.logError(e); throw new RedshiftException("JWT error: " + e.getMessage(), e); } finally { currentThread.setContextClassLoader(cl); } } @Override public String getPluginSpecificCacheKey() { // Override this in each derived plugin. return ""; } @Override public String getIdpToken() throws RedshiftException { String jwt = null; // Get the current thread and set the context loader with our custom load class method. Thread currentThread = Thread.currentThread(); ClassLoader cl = currentThread.getContextClassLoader(); Thread.currentThread().setContextClassLoader(CONTEXT_CLASS_LOADER); try { jwt = getJwtAssertion(); if (RedshiftLogger.isEnable()) m_log.logDebug("JwtCredentialsProvider: got JWT asssertion of length={0}", jwt != null ? jwt.length() : -1); } catch (Exception e) { if (RedshiftLogger.isEnable()) m_log.logError(e); throw new RedshiftException("JWT error: " + e.getMessage(), e); } finally { currentThread.setContextClassLoader(cl); } return jwt; } @Override public String getCacheKey() { String pluginSpecificKey = getPluginSpecificCacheKey(); return pluginSpecificKey; } }
8,488
0
Create_ds/amazon-redshift-jdbc-driver/src/main/java/com/amazon/redshift
Create_ds/amazon-redshift-jdbc-driver/src/main/java/com/amazon/redshift/plugin/BasicSamlCredentialsProvider.java
package com.amazon.redshift.plugin; import java.io.IOException; /** * A basic SAML credential provider class. This class can be changed and implemented to work with * any desired SAML service provider. */ public class BasicSamlCredentialsProvider extends SamlCredentialsProvider { /** * Here we are defining a new connection property key called "saml_assertion". This property * will be specific to the BasicSamlCredentialsProvider and will be used to provide some * information through the connection string. * <p> * This means that a user wanting to use this credential provider may include the following in * the connection string: * <p> * <code> * jdbc:redshift:iam://[host]:[port]/[database]?saml_assertion=[value] * </code> * <p> * If your implementation requires user input through the connection string, this is how you * can define the connection property name. You can add as many new connection properties as * needed following the same pattern: * <p> * <code> * public static final String PROPERTY_NAME = "key_name"; * </code> * <p> * The restrictions on "key_name" are: * <p> * - The name must be unique. It can not match any existing connection property key name in * the Redshift JDBC driver. The connection property names are case-insensitive, so even * if the case does not match what is found in the documentation, it is not allowed. * <p> * - The key name may not have any spaces. * <p> * - The key name may only contain the characters [a-z]|[A-Z] or underscore '_'. * */ public static final String KEY_SAML_ASSERTION = "saml_assertion"; /** * This field will store the value given with the associated connection property key. * <p> * If you are adding additional connection property keys, you will need to define additional * fields to hold those values. */ private String samlAssertion; /** * Optional default constructor. */ public BasicSamlCredentialsProvider() { } /** * This method is used to get the values associated with different connection string properties. * <p> * We override it in this custom credentials provider to add a check for any additional * connection properties that were added, which are not included in the existing Redshift JDBC * driver. It allows us to store these values using the appropriate fields as mentioned above. * <p> * For any new connection property keys added to this class, add an if-condition to check, if * the current key matches the connection property key, store the value associated with the key * in the appropriate field. * <p> * If no new connection property keys are required, you may leave the implementation blank and * simply return a call to the parent class implementation. * <p> * Please see the example below. * * @param key A string representing the connection property key. * @param value The value associated with the connection property key. */ @Override public void addParameter(String key, String value) { // The parent class will take care of setting up all other connection properties which are // mentioned in the Redshift JDBC driver documentation. super.addParameter(key, value); // Add if-condition checks for any connection properties which are specific to your // implementation of this custom SAML credentials provider. if (KEY_SAML_ASSERTION.equalsIgnoreCase(key)) { samlAssertion = value; } } /** * This method needs to return the SAML assertion string returned by the specific SAML provider * being used for this implementation. How you get this string will depend on the specific SAML * provider you are using. * <p> * This will be used by the SamlCredentialsProvider parent class to get the temporary credentials. * * @return The SAML assertion string. * @throws IOException no error as such. It's an overridden method. */ @Override protected String getSamlAssertion() throws IOException { /* * If you wish to make a connection property required, you can check that the associated * field has been populated, and if not, throw an IOException. * if (StringUtils.isNullOrEmpty(samlAssertion)) * { * throw new IOException("Missing required property: " + KEY_SAML_ASSERTION); * } */ return samlAssertion; } }
8,489
0
Create_ds/amazon-redshift-jdbc-driver/src/main/java/com/amazon/redshift
Create_ds/amazon-redshift-jdbc-driver/src/main/java/com/amazon/redshift/plugin/BrowserOktaSAMLCredentialsProvider.java
package com.amazon.redshift.plugin; import com.amazon.redshift.INativePlugin; import com.amazon.redshift.NativeTokenHolder; import com.amazon.redshift.core.IamHelper; import com.amazon.redshift.logger.LogLevel; import com.amazon.redshift.logger.RedshiftLogger; import com.amazon.redshift.plugin.httpserver.RequestHandler; import com.amazon.redshift.plugin.httpserver.Server; import com.amazon.redshift.util.RedshiftException; import org.apache.commons.logging.LogFactory; import org.apache.http.NameValuePair; import java.awt.*; import java.io.IOException; import java.net.URI; import java.net.URL; import java.time.Duration; import java.util.*; import java.util.List; import java.util.function.Function; import static com.amazon.redshift.plugin.utils.CheckUtils.*; import static com.amazon.redshift.plugin.utils.ResponseUtils.findParameter; public class BrowserOktaSAMLCredentialsProvider extends IdpCredentialsProvider implements INativePlugin { /** * The value of parameter login_url */ private String m_login_url; /** * String containing "login_url" as a parameter key. */ public static final String KEY_LOGIN_URL = "login_url"; /** * String containing "idp_response_timeout" as a parameter key. */ public static final String KEY_IDP_RESPONSE_TIMEOUT = "idp_response_timeout"; /** * String containing "listen_port" as a parameter key. */ public static final String KEY_LISTEN_PORT = "listen_port"; /** * String containing "SAMLResponse" as a browser response Key */ private static final String SAML_RESPONSE_PARAM_NAME = "SAMLResponse"; /** * The value of parameter idp_response_timeout in seconds. */ private int m_idp_response_timeout = 120; /** * The value of parameter listen_port */ private int m_listen_port = 7890; /** * The value of expiry time */ private int EXPIRY_TIME = 5; private static Map<String, NativeTokenHolder> m_cache = new HashMap<String, NativeTokenHolder>(); private NativeTokenHolder m_lastRefreshCredentials; // Used when cache is disable. protected Boolean m_disableCache = false; /** * The custom log factory class. */ // private static final Class<?> CUSTOM_LOG_FACTORY_CLASS = JwtCredentialsProvider.class; /** * Log properties file name. */ private static final String LOG_PROPERTIES_FILE_NAME = "log-factory.properties"; /** * Log properties file path. */ private static final String LOG_PROPERTIES_FILE_PATH = "META-INF/services/org.apache.commons.logging.LogFactory"; /** * A custom context class loader which allows us to control which LogFactory is loaded. * Our CUSTOM_LOG_FACTORY_CLASS will divert any wire logging to NoOpLogger to suppress wire * messages being logged. */ private static final ClassLoader CONTEXT_CLASS_LOADER = new ClassLoader( BrowserOktaSAMLCredentialsProvider.class.getClassLoader()) { @Override public Class<?> loadClass(String name) throws ClassNotFoundException { Class<?> clazz = getParent().loadClass(name); return clazz; } @Override public Enumeration<URL> getResources(String name) throws IOException { if (LogFactory.FACTORY_PROPERTIES.equals(name)) { // make sure not load any other commons-logging.properties files return Collections.enumeration(Collections.<URL>emptyList()); } return super.getResources(name); } @Override public URL getResource(String name) { if (LOG_PROPERTIES_FILE_PATH.equals(name)) { return BrowserOktaSAMLCredentialsProvider.class.getResource(LOG_PROPERTIES_FILE_NAME); } return super.getResource(name); } }; @Override public void addParameter(String key, String value) { switch (key) { case KEY_LISTEN_PORT: m_listen_port = Integer.parseInt(value); if (RedshiftLogger.isEnable()) m_log.logDebug("m_listen_port: {0}", m_listen_port); break; case KEY_LOGIN_URL: m_login_url = value; if (RedshiftLogger.isEnable()) m_log.logDebug("m_login_url: {0}", m_login_url); break; case KEY_IDP_RESPONSE_TIMEOUT: m_idp_response_timeout = Integer.parseInt(value); if (RedshiftLogger.isEnable()) m_log.logDebug("m_idp_response_timeout: {0}", m_idp_response_timeout); break; } } @Override public void setLogger(RedshiftLogger log) { m_log = log; } @Override public String getPluginSpecificCacheKey() { return ((m_login_url != null) ? m_login_url : ""); } @Override public String getIdpToken() throws RedshiftException { String saml = null; // Get the current thread and set the context loader with our custom load class method. Thread currentThread = Thread.currentThread(); ClassLoader cl = currentThread.getContextClassLoader(); Thread.currentThread().setContextClassLoader(CONTEXT_CLASS_LOADER); try { saml = getSamlAssertion(); if (RedshiftLogger.isEnable()) m_log.logDebug("BrowserOktaSAMLCredentialsProvider: got SAML token"); } catch (Exception e) { if (RedshiftLogger.isEnable()) m_log.logError(e); throw new RedshiftException("SAML error: " + e.getMessage(), e); } finally { currentThread.setContextClassLoader(cl); } return saml; } @Override public String getCacheKey() { String pluginSpecificKey = getPluginSpecificCacheKey(); return pluginSpecificKey; } @Override public NativeTokenHolder getCredentials() throws RedshiftException { NativeTokenHolder credentials = null; if(!m_disableCache) { String key = getCacheKey(); credentials = m_cache.get(key); } if (credentials == null || credentials.isExpired()) { if(RedshiftLogger.isEnable()) m_log.logInfo("SAML getCredentials NOT from cache"); synchronized(this) { refresh(); if(m_disableCache) { credentials = m_lastRefreshCredentials; m_lastRefreshCredentials = null; } } } else { credentials.setRefresh(false); if(RedshiftLogger.isEnable()) m_log.logInfo("SAML getCredentials from cache"); } if(!m_disableCache) { credentials = m_cache.get(getCacheKey()); } if (credentials == null) { throw new RedshiftException("Unable to get IDP credentials"); } return credentials; } @Override public void refresh() throws RedshiftException { // Get the current thread and set the context loader with our custom load class method. Thread currentThread = Thread.currentThread(); ClassLoader cl = currentThread.getContextClassLoader(); Thread.currentThread().setContextClassLoader(CONTEXT_CLASS_LOADER); try { String saml = getSamlAssertion(); if (RedshiftLogger.isEnable()) m_log.logDebug("BrowserOktaSAMLCredentialsProvider: refreshed SAML assertion token"); // Default expiration until server sends actual expirations Date expiration = new Date(System.currentTimeMillis() + EXPIRY_TIME * 60 * 1000); NativeTokenHolder credentials = NativeTokenHolder.newInstance(saml, expiration); credentials.setRefresh(true); if(!m_disableCache) m_cache.put(getCacheKey(), credentials); else m_lastRefreshCredentials = credentials; } catch (Exception e) { if (RedshiftLogger.isEnable()) m_log.logError(e); throw new RedshiftException("SAML error: " + e.getMessage(), e); } finally { currentThread.setContextClassLoader(cl); } } protected String getSamlAssertion() throws IOException { try { checkMissingAndThrows(m_login_url, KEY_LOGIN_URL); checkAndThrowsWithMessage( m_idp_response_timeout < 10, KEY_IDP_RESPONSE_TIMEOUT + " should be 10 seconds or greater."); checkInvalidAndThrows((m_listen_port < 1 || m_listen_port > 65535), KEY_LISTEN_PORT); validateURL(m_login_url); return authenticate(); } catch (InternalPluginException ex) { // Wrap any exception to be compatible with SamlCredentialsProvider API throw new IOException(ex); } } /** * Authentication consists of: * <ol> * <li> Start the Socket Server on the port {@link BrowserOktaSAMLCredentialsProvider#m_listen_port}.</li> * <li> Open the default browser with the link asking a User to enter the credentials.</li> * <li> Retrieve the SAML Assertion string from the response.</li> * </ol> * * @return Base64 encoded SAML Assertion string * @throws IOException indicating the error */ private String authenticate() throws IOException { RequestHandler requestHandler = new RequestHandler(new Function<List<NameValuePair>, Object>() { @Override public Object apply(List<NameValuePair> nameValuePairs) { if (RedshiftLogger.isEnable()) { for (NameValuePair pair : nameValuePairs) { if (pair.getName().equals(SAML_RESPONSE_PARAM_NAME)) { m_log.logDebug("nameValuePair:name= {0}", SAML_RESPONSE_PARAM_NAME); } else { m_log.logDebug("nameValuePair: {0}", pair); } } } return findParameter(SAML_RESPONSE_PARAM_NAME, nameValuePairs); } }); Server server = new Server(m_listen_port, requestHandler, Duration.ofSeconds(m_idp_response_timeout), m_log); server.listen(); if(RedshiftLogger.isEnable()) m_log.log(LogLevel.DEBUG, String.format("Listening for connection on port %d", m_listen_port)); try { openBrowser(); server.waitForResult(); } catch (IOException ex) { if (RedshiftLogger.isEnable()) m_log.logError(ex); server.stop(); throw ex; } server.waitForResult(); Object result = requestHandler.getResult(); if (result instanceof InternalPluginException) { if (RedshiftLogger.isEnable()) m_log.logDebug("Error occurred while fetching SAML assertion: {0}", result); throw (InternalPluginException) result; } if (result instanceof String) { if(RedshiftLogger.isEnable()) m_log.log(LogLevel.DEBUG, "Got SAML assertion of length={0}", ((String) result).length()); return (String) result; } if (RedshiftLogger.isEnable()) m_log.logDebug("result: {0}", result); throw new InternalPluginException("Fail to login during timeout."); } /** * Opens the default browser with the authorization request to the web service. * * @throws IOException in case of error */ private void openBrowser() throws IOException { URI authorizeRequestUrl = URI.create(m_login_url); if(RedshiftLogger.isEnable()) m_log.log(LogLevel.DEBUG, String.format("SSO URI: \n%s", authorizeRequestUrl) ); validateURL(authorizeRequestUrl.toString()); Desktop.getDesktop().browse(authorizeRequestUrl); } }
8,490
0
Create_ds/amazon-redshift-jdbc-driver/src/main/java/com/amazon/redshift
Create_ds/amazon-redshift-jdbc-driver/src/main/java/com/amazon/redshift/plugin/OktaCredentialsProvider.java
package com.amazon.redshift.plugin; import com.amazon.redshift.logger.LogLevel; import com.amazon.redshift.logger.RedshiftLogger; import com.amazonaws.SdkClientException; import com.amazonaws.util.IOUtils; import com.amazonaws.util.StringUtils; import com.fasterxml.jackson.databind.JsonNode; import com.fasterxml.jackson.databind.ObjectMapper; import java.io.IOException; import java.io.StringWriter; import java.net.URLEncoder; import java.security.GeneralSecurityException; import java.util.HashMap; import java.util.Map; import org.apache.http.StatusLine; import org.apache.http.client.methods.CloseableHttpResponse; import org.apache.http.client.methods.HttpGet; import org.apache.http.client.methods.HttpPost; import org.apache.http.entity.StringEntity; import org.apache.http.impl.client.CloseableHttpClient; import org.apache.http.util.EntityUtils; import org.jsoup.Jsoup; import org.jsoup.nodes.Document; import org.jsoup.nodes.Element; public class OktaCredentialsProvider extends SamlCredentialsProvider { private static final String KEY_APP_URL = "app_id"; private static final String KEY_APP_NAME = "app_name"; protected String m_app_id; protected String m_app_name; @Override public void addParameter(String key, String value) { super.addParameter(key, value); if (KEY_APP_URL.equalsIgnoreCase(key)) { m_app_id = value; } if (KEY_APP_NAME.equalsIgnoreCase(key)) { m_app_name = value; } } @Override public String getPluginSpecificCacheKey() { return ((m_app_id != null) ? m_app_id : "") + ((m_app_name != null) ? m_app_name : "") ; } @Override protected String getSamlAssertion() throws IOException { checkRequiredParameters(); if (StringUtils.isNullOrEmpty(m_app_id)) { throw new IOException("Missing required property: " + KEY_APP_URL); } CloseableHttpClient httpClient = null; try { httpClient = getHttpClient(); String strOktaSessionToken = oktaAuthentication(httpClient); return handleSamlAssertion(httpClient, strOktaSessionToken); } catch (GeneralSecurityException e) { throw new SdkClientException("Failed create SSLContext.", e); } finally { IOUtils.closeQuietly(httpClient, null); } } /** * Authenticates users credentials via Okta, return Okta session token. */ private String oktaAuthentication(CloseableHttpClient httpClient) throws IOException { CloseableHttpResponse responseAuthenticate = null; try { ObjectMapper mapper = new ObjectMapper(); //HTTP Post request to Okta API for session token String uri = "https://" + m_idpHost + "/api/v1/authn"; if (RedshiftLogger.isEnable()) m_log.logDebug("uri: {0}", uri); validateURL(uri); HttpPost httpost = new HttpPost(uri); httpost.addHeader("Accept", "application/json"); httpost.addHeader("Content-Type", "application/json"); httpost.addHeader("Cache-Control", "no-cache"); //construction of JSON request Map<String,String> creds = new HashMap<String,String>(); creds.put("username", m_userName); creds.put("password", m_password); StringWriter writer = new StringWriter(); mapper.writeValue(writer, creds); StringEntity entity = new StringEntity(writer.toString(), "UTF-8"); entity.setContentType("application/json"); httpost.setEntity(entity); responseAuthenticate = httpClient.execute(httpost); String content = EntityUtils.toString(responseAuthenticate.getEntity()); if(RedshiftLogger.isEnable()) { String maskedContent = content.replaceAll(getRegexForJsonKey("sessionToken"), "$1***masked***\""); maskedContent = maskedContent.replaceAll(getRegexForJsonKey("id"), "$1***masked***\""); maskedContent = maskedContent.replaceAll(getRegexForJsonKey("passwordChanged"), "$1***masked***\""); m_log.log(LogLevel.DEBUG, "oktaAuthentication https response:" + maskedContent); } StatusLine statusLine = responseAuthenticate.getStatusLine(); int requestStatus = statusLine.getStatusCode(); if (requestStatus != 200) { throw new IOException(statusLine.getReasonPhrase()); } //Retrieve and parse the Okta response for session token JsonNode json = mapper.readTree(content); if ("SUCCESS".equals(json.get("status").asText())) { return json.get("sessionToken").asText(); } throw new IOException("No session token in the response."); } finally { IOUtils.closeQuietly(responseAuthenticate, null); } } /** * Retrieves SAML assertion from Okta containing AWS roles. */ private String handleSamlAssertion(CloseableHttpClient httpClient, String oktaSessionToken) throws IOException { // If no value was specified for m_app_name, use the current default. if (StringUtils.isNullOrEmpty(m_app_name)) { m_app_name = "amazon_aws"; } else { // Ensure that the string is properly encoded. m_app_name = URLEncoder.encode(m_app_name, "UTF-8"); } String oktaAWSAppUrl = "https://" + m_idpHost + "/home/" + m_app_name + "/" + m_app_id; String oktaAWSAppUrlWithToken = oktaAWSAppUrl + "?onetimetoken=" + oktaSessionToken; if (RedshiftLogger.isEnable()) m_log.logDebug("oktaAWSAppUrl: {0}", oktaAWSAppUrl); validateURL(oktaAWSAppUrlWithToken); HttpGet httpget = new HttpGet(oktaAWSAppUrlWithToken); CloseableHttpResponse responseSAML = httpClient.execute(httpget); int requestStatus = responseSAML.getStatusLine().getStatusCode(); if (requestStatus != 200) { throw new RuntimeException("Failed : HTTP error code : " + responseSAML.getStatusLine().getStatusCode() + " : Reason : " + responseSAML.getStatusLine().getReasonPhrase()); } String body = EntityUtils.toString(responseSAML.getEntity()); if (RedshiftLogger.isEnable()) m_log.logDebug("body: {0}", sanitizeResponseBody(body)); for (String inputTags : getInputTagsfromHTML(body)) { String name = getValueByKey(inputTags, "name"); String value = getValueByKey(inputTags, "value"); if (RedshiftLogger.isEnable()) m_log.logDebug("name: {0}", name); if ("SAMLResponse".equalsIgnoreCase(name)) { return value.replace("&#x2b;", "+").replace("&#x3d;", "="); } } throw new IOException("Failed to retrieve SAMLAssertion."); } private String sanitizeResponseBody(String response) { Document document = Jsoup.parse(response); // find input tag with name as SAMLResponse Element samlElement = document.selectFirst("input[name=SAMLResponse]"); if(samlElement != null) { samlElement.val("***masked***"); // mask the value attribute for this element return document.toString(); } return response; } }
8,491
0
Create_ds/amazon-redshift-jdbc-driver/src/main/java/com/amazon/redshift/plugin
Create_ds/amazon-redshift-jdbc-driver/src/main/java/com/amazon/redshift/plugin/utils/RandomStateUtil.java
package com.amazon.redshift.plugin.utils; import java.util.Random; /** * Random state string generating util. */ public class RandomStateUtil { /** * Length of the random state string. */ private static final int DEFAULT_STATE_STRING_LENGTH = 10; /** * Generates random state string 10 char length. * * @return generated randomly. */ public static String generateRandomState() { return generateRandomString(); } /** * @return string generated randomly. */ private static String generateRandomString() { Random random = new Random(System.currentTimeMillis()); StringBuilder buffer = new StringBuilder(DEFAULT_STATE_STRING_LENGTH); for (int i = 0; i < DEFAULT_STATE_STRING_LENGTH; i++) { buffer.append((char) (random.nextInt(26) + 'a')); } return buffer.toString(); } }
8,492
0
Create_ds/amazon-redshift-jdbc-driver/src/main/java/com/amazon/redshift/plugin
Create_ds/amazon-redshift-jdbc-driver/src/main/java/com/amazon/redshift/plugin/utils/ResponseUtils.java
package com.amazon.redshift.plugin.utils; import org.apache.http.NameValuePair; import java.util.List; /** * Http Request/Response utils. */ public class ResponseUtils { private ResponseUtils() { } /** * Find parameter by name in http request/response {@link NameValuePair} List. * * @param name name of the parameter * @param list list of parameters * @return returns value of the found parameter, otherwise null. */ public static String findParameter(String name, List<NameValuePair> list) { for (NameValuePair pair : list) { if (name.equals(pair.getName())) { return pair.getValue(); } } return null; } }
8,493
0
Create_ds/amazon-redshift-jdbc-driver/src/main/java/com/amazon/redshift/plugin
Create_ds/amazon-redshift-jdbc-driver/src/main/java/com/amazon/redshift/plugin/utils/RequestUtils.java
package com.amazon.redshift.plugin.utils; import com.amazon.redshift.logger.RedshiftLogger; import com.amazonaws.ClientConfiguration; import com.amazonaws.auth.AWSCredentialsProvider; import com.amazonaws.client.builder.AwsClientBuilder.EndpointConfiguration; import com.amazonaws.services.securitytoken.AWSSecurityTokenService; import com.amazonaws.services.securitytoken.AWSSecurityTokenServiceClientBuilder; import java.net.URL; import java.util.Date; /** * Http Request utils. */ public class RequestUtils { private RequestUtils() { } public static AWSSecurityTokenService buildSts(String stsEndpoint, String region, AWSSecurityTokenServiceClientBuilder builder, AWSCredentialsProvider p, RedshiftLogger log) throws Exception { AWSSecurityTokenService stsSvc; ClientConfiguration clientConfig = getProxyClientConfig(log); if (clientConfig != null) { builder.setClientConfiguration(clientConfig); } if (isCustomStsEndpointUrl(stsEndpoint)) { EndpointConfiguration endpointConfiguration = new EndpointConfiguration(stsEndpoint, null); stsSvc = builder .withCredentials(p) .withEndpointConfiguration(endpointConfiguration) .build(); } else { builder.setRegion(region); stsSvc = builder.withCredentials(p).build(); } return stsSvc; } public static ClientConfiguration getProxyClientConfig(RedshiftLogger log) { boolean useProxy = false; ClientConfiguration clientConfig = null; try { String useProxyStr = System.getProperty("http.useProxy"); if(useProxyStr != null) { useProxy = Boolean.parseBoolean(useProxyStr); } } catch(Exception ex) { // Ignore if (RedshiftLogger.isEnable()) log.logError(ex); } if (useProxy) { clientConfig = new ClientConfiguration(); String proxyHost = System.getProperty("https.proxyHost"); String proxyPort = System.getProperty("https.proxyPort"); String nonProxyHosts = System.getProperty("http.nonProxyHosts"); if (proxyHost != null) clientConfig.setProxyHost(proxyHost); if (nonProxyHosts != null) clientConfig.setNonProxyHosts(nonProxyHosts); if (proxyPort != null) clientConfig.setProxyPort(Integer.parseInt(proxyPort)); if (RedshiftLogger.isEnable()) log.logDebug( String.format("useProxy: %s proxyHost: %s proxyPort:%s nonProxyHosts:%s" , useProxy, proxyHost, proxyPort, nonProxyHosts)); } else { if (RedshiftLogger.isEnable()) log.logDebug( String.format("useProxy: %s", useProxy)); } return clientConfig; } private static boolean isCustomStsEndpointUrl(String stsEndpoint) throws Exception { boolean isCustomStsEndPoint = false; if(stsEndpoint != null && !stsEndpoint.isEmpty()) { URL aUrl = new URL(stsEndpoint); String protocol = aUrl.getProtocol(); if(protocol != null && protocol.equals("https")) { isCustomStsEndPoint = true; } else { throw new Exception("Only https STS URL is supported:" + stsEndpoint); } } return isCustomStsEndPoint; } /* * Checks expiry for credential. * Note that this method returns true (i.e. credential is "expired") 1 minute before actual expiry time - This * arbitrary buffer has been added to accommodate corner cases and allow enough time for retries if implemented. * * Returns true (i.e. credential is "expired") if expiry time is null. */ public static boolean isCredentialExpired(Date expiryTime) { // We preemptively conclude the credential as expired 1 minute before actual expiry. return expiryTime==null || expiryTime.before(new Date(System.currentTimeMillis() + 1000 * 60)); } }
8,494
0
Create_ds/amazon-redshift-jdbc-driver/src/main/java/com/amazon/redshift/plugin
Create_ds/amazon-redshift-jdbc-driver/src/main/java/com/amazon/redshift/plugin/utils/CheckUtils.java
package com.amazon.redshift.plugin.utils; import com.amazon.redshift.plugin.InternalPluginException; import java.io.IOException; import static com.amazonaws.util.StringUtils.isNullOrEmpty; /** * All for plugin parameters check. */ public class CheckUtils { private CheckUtils() { } public static void checkMissingAndThrows(String parameter, String parameterName) throws InternalPluginException { if (isNullOrEmpty(parameter)) { throw new InternalPluginException("Missing required property: " + parameterName); } } public static void checkInvalidAndThrows(boolean condition, String parameterName) throws InternalPluginException { if (condition) { throw new InternalPluginException("Invalid property value: " + parameterName); } } public static void checkAndThrowsWithMessage(boolean condition, String message) throws InternalPluginException { if (condition) { throw new InternalPluginException(message); } } }
8,495
0
Create_ds/amazon-redshift-jdbc-driver/src/main/java/com/amazon/redshift/plugin
Create_ds/amazon-redshift-jdbc-driver/src/main/java/com/amazon/redshift/plugin/httpserver/ValidHttpRequestHandler.java
package com.amazon.redshift.plugin.httpserver; import org.apache.http.*; import org.apache.http.entity.ContentType; import org.apache.http.entity.StringEntity; import org.apache.http.protocol.HttpContext; import org.apache.http.protocol.HttpRequestHandler; import java.io.IOException; import java.nio.charset.StandardCharsets; /** * Return valid HTML for all requests. */ public class ValidHttpRequestHandler implements HttpRequestHandler { private static final String VALID_RESPONSE = "<!DOCTYPE html><html><body>" + "<p style=\"font: italic bold 30px Arial,sans-serif; background-color: #fff;" + "color:#202c2d;text-shadow:0 1px #808d93,-1px 0 #cdd2d5,-3px 4px #cdd2d5;\">" + "Thank you for using Amazon Redshift! You can now close this window.</p>" + "</body></html>"; @Override public void handle(HttpRequest request, HttpResponse response, HttpContext context) throws HttpException, IOException { response.setEntity(new StringEntity(VALID_RESPONSE, StandardCharsets.UTF_8)); response.setHeader(HttpHeaders.CONTENT_TYPE, ContentType.TEXT_HTML.withCharset(StandardCharsets.UTF_8).toString()); response.setStatusCode(HttpStatus.SC_OK); } }
8,496
0
Create_ds/amazon-redshift-jdbc-driver/src/main/java/com/amazon/redshift/plugin
Create_ds/amazon-redshift-jdbc-driver/src/main/java/com/amazon/redshift/plugin/httpserver/RequestHandler.java
package com.amazon.redshift.plugin.httpserver; import org.apache.http.*; import org.apache.http.client.utils.URLEncodedUtils; import org.apache.http.message.BasicHttpEntityEnclosingRequest; import org.apache.http.protocol.HttpContext; import org.apache.http.protocol.HttpRequestHandler; import java.io.IOException; import java.util.List; import java.util.function.Function; /** * Post http request handler. * Responsible on showing "Complete request" page. */ public class RequestHandler implements HttpRequestHandler { /** * String containing the path. */ public static final String REDSHIFT_PATH = "/redshift/"; /** * String containing the supported Rest API. */ private static final String SUPPORTED_METHOD = "POST"; /** * Instance of Function. */ private final Function<List<NameValuePair>, Object> m_requestProcessLogic; /** * Instance of HttpRequestHandler for invalid requests. */ private final HttpRequestHandler m_invalidRequestHandler; /** * Instance of HttpRequestHandler for valid requests. */ private final HttpRequestHandler m_validRequestHandler; /** * Result object. */ private Object m_result; /** * Constructor. * * @param requestProcessLogic Function with List of NameValuePair. */ public RequestHandler(Function<List<NameValuePair>, Object> requestProcessLogic) { this.m_requestProcessLogic = requestProcessLogic; this.m_invalidRequestHandler = new InvalidHttpRequestHandler(); this.m_validRequestHandler = new ValidHttpRequestHandler(); } @Override public void handle(HttpRequest request, HttpResponse response, HttpContext context) throws HttpException, IOException { if (isRequestValid(request)) { m_result = m_requestProcessLogic.apply( URLEncodedUtils.parse(((BasicHttpEntityEnclosingRequest) request).getEntity())); m_validRequestHandler.handle(request, response, context); } else { m_invalidRequestHandler.handle(request, response, context); } } /** * Check METHOD and path. * * @param request {@linkplain HttpRequest} */ private boolean isRequestValid(HttpRequest request) { RequestLine requestLine = request.getRequestLine(); if (!SUPPORTED_METHOD.equalsIgnoreCase(requestLine.getMethod())) { return false; } return requestLine.getUri().startsWith(REDSHIFT_PATH); } /** * @return the result object. */ public Object getResult() { return m_result; } /** * @return true if result is not null. */ public boolean hasResult() { return m_result != null; } }
8,497
0
Create_ds/amazon-redshift-jdbc-driver/src/main/java/com/amazon/redshift/plugin
Create_ds/amazon-redshift-jdbc-driver/src/main/java/com/amazon/redshift/plugin/httpserver/InternalServerException.java
package com.amazon.redshift.plugin.httpserver; import com.amazon.redshift.plugin.InternalPluginException; /** * Wrapper exception for http server errors. * <p> * Thread can`t throw any checked exceptions from run(), so it needs to be wrapped into RuntimeException. */ public class InternalServerException extends InternalPluginException { /** * Constructor. * * @param cause Throwable object. */ public InternalServerException(Throwable cause) { super(cause); } /** * Wrap Exception in this class. * * @param exceptionToWrap Exception object. * * @return instance of this class. */ public static InternalServerException wrap(Exception exceptionToWrap) { return new InternalServerException(exceptionToWrap); } }
8,498
0
Create_ds/amazon-redshift-jdbc-driver/src/main/java/com/amazon/redshift/plugin
Create_ds/amazon-redshift-jdbc-driver/src/main/java/com/amazon/redshift/plugin/httpserver/InvalidHttpRequestHandler.java
package com.amazon.redshift.plugin.httpserver; import org.apache.http.*; import org.apache.http.entity.ContentType; import org.apache.http.entity.StringEntity; import org.apache.http.protocol.HttpContext; import org.apache.http.protocol.HttpRequestHandler; import java.io.IOException; import java.nio.charset.StandardCharsets; /** * Return invalid HTML for all requests. */ public class InvalidHttpRequestHandler implements HttpRequestHandler { private static final String INVALID_RESPONSE = "<!DOCTYPE html><html><body><p>The request could not be understood by the server!</p></body></html>"; @Override public void handle(HttpRequest request, HttpResponse response, HttpContext context) throws HttpException, IOException { response.setEntity(new StringEntity(INVALID_RESPONSE, StandardCharsets.UTF_8)); response.setHeader( HttpHeaders.CONTENT_TYPE, ContentType.TEXT_HTML.withCharset(StandardCharsets.UTF_8).toString()); response.setStatusCode(HttpStatus.SC_BAD_REQUEST); } }
8,499