index
int64
0
0
repo_id
stringlengths
9
205
file_path
stringlengths
31
246
content
stringlengths
1
12.2M
__index_level_0__
int64
0
10k
0
Create_ds/gobblin/gobblin-modules/gobblin-azkaban/src/main/java/org/apache/gobblin/service/modules
Create_ds/gobblin/gobblin-modules/gobblin-azkaban/src/main/java/org/apache/gobblin/service/modules/orchestration/SessionManager.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.gobblin.service.modules.orchestration; /** * Implements a session manager to refresh the session id. * * {@link AzkabanClient} needs this class to periodically refresh * the seesion id when current session was expired. Please refer * to {@link AzkabanClient#refreshSession}. */ public interface SessionManager { /** * Get session id using Azkaban authentication mechanism. * @return session id */ String fetchSession() throws AzkabanClientException; }
3,400
0
Create_ds/gobblin/gobblin-modules/gobblin-azkaban/src/main/java/org/apache/gobblin/service/modules
Create_ds/gobblin/gobblin-modules/gobblin-azkaban/src/main/java/org/apache/gobblin/service/modules/orchestration/AzkabanClientException.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.gobblin.service.modules.orchestration; import java.io.IOException; /** * Exception raised by {@link AzkabanClient}. */ public class AzkabanClientException extends IOException { private static final long serialVersionUID = 11324144L; public AzkabanClientException(String message, Throwable e) { super(message, e); } public AzkabanClientException(String message) { super(message); } }
3,401
0
Create_ds/gobblin/gobblin-modules/gobblin-azkaban/src/main/java/org/apache/gobblin/service/modules
Create_ds/gobblin/gobblin-modules/gobblin-azkaban/src/main/java/org/apache/gobblin/service/modules/orchestration/AzkabanAjaxAPIClient.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.gobblin.service.modules.orchestration; import java.io.File; import java.io.IOException; import java.security.KeyManagementException; import java.security.KeyStoreException; import java.security.NoSuchAlgorithmException; import java.text.SimpleDateFormat; import java.util.Calendar; import java.util.Date; import java.util.Map; import java.util.Random; import org.apache.commons.codec.EncoderException; import org.apache.commons.codec.net.URLCodec; import org.apache.commons.lang3.StringUtils; import org.apache.http.HttpEntity; import org.apache.http.HttpResponse; import org.apache.http.client.methods.HttpGet; import org.apache.http.client.methods.HttpPost; import org.apache.http.conn.ssl.SSLConnectionSocketFactory; import org.apache.http.conn.ssl.TrustSelfSignedStrategy; import org.apache.http.entity.ContentType; import org.apache.http.entity.StringEntity; import org.apache.http.entity.mime.MultipartEntityBuilder; import org.apache.http.impl.client.BasicCookieStore; import org.apache.http.impl.client.CloseableHttpClient; import org.apache.http.impl.client.HttpClients; import org.apache.http.ssl.SSLContextBuilder; import org.apache.http.ssl.TrustStrategy; import com.google.common.annotations.VisibleForTesting; import com.google.common.base.Splitter; import com.google.common.collect.Maps; import lombok.Cleanup; import lombok.extern.slf4j.Slf4j; @Slf4j @Deprecated /** * This format of azkaban client is obsolete. Please use {@link AzkabanClient} as the new alternative. */ public class AzkabanAjaxAPIClient { private static Splitter SPLIT_ON_COMMA = Splitter.on(",").omitEmptyStrings().trimResults(); // TODO: Ensure GET call urls do not grow too big private static final int LOW_NETWORK_TRAFFIC_BEGIN_HOUR = 17; private static final int LOW_NETWORK_TRAFFIC_END_HOUR = 22; private static final int JOB_START_DELAY_MINUTES = 5; private static final long MILLISECONDS_IN_HOUR = 60 * 60 * 1000; private static final URLCodec codec = new URLCodec(); /*** * Authenticate a user and obtain a session.id from response. Once a session.id has been obtained, * until the session expires, this id can be used to do any API requests with a proper permission granted. * A session expires if user log's out, changes machine, browser or location, if Azkaban is restarted, * or if the session expires. The default session timeout is 24 hours (one day). User can re-login irrespective * of wheter the session has expired or not. For the same user, a new session will always override the old one. * @param username Username. * @param password Password. * @param azkabanServerUrl Azkaban Server Url. * @return Session Id. * @throws IOException * @throws EncoderException */ public static String authenticateAndGetSessionId(String username, String password, String azkabanServerUrl) throws IOException, EncoderException { // Create post request Map<String, String> params = Maps.newHashMap(); params.put("action", "login"); params.put("username", username); params.put("password", codec.encode(password)); return executePostRequest(preparePostRequest(azkabanServerUrl, null, params)).get("session.id"); } /*** * Get project.id for a Project Name. * @param sessionId Session Id. * @param azkabanProjectConfig Azkaban Project Config. * @return Project Id. * @throws IOException */ public static String getProjectId(String sessionId, AzkabanProjectConfig azkabanProjectConfig) throws IOException { // Note: Every get call to Azkaban provides a projectId in response, so we have are using fetchProjectFlows call // .. because it does not need any additional params other than project name Map<String, String> params = Maps.newHashMap(); params.put("ajax", "fetchprojectflows"); params.put("project", azkabanProjectConfig.getAzkabanProjectName()); return executeGetRequest(prepareGetRequest(azkabanProjectConfig.getAzkabanServerUrl() + "/manager", sessionId, params)).get("projectId"); } /*** * Creates an Azkaban project and uploads the zip file. If proxy user and group permissions are specified in * Azkaban Project Config, then this method also adds it to the project configuration. * @param sessionId Session Id. * @param zipFilePath Zip file to upload. * @param azkabanProjectConfig Azkaban Project Config. * @return Project Id. * @throws IOException */ public static String createAzkabanProject(String sessionId, String zipFilePath, AzkabanProjectConfig azkabanProjectConfig) throws IOException { Map<String, String> params = Maps.newHashMap(); params.put("ajax", "executeFlow"); params.put("name", azkabanProjectConfig.getAzkabanProjectName()); params.put("description", azkabanProjectConfig.getAzkabanProjectDescription()); executePostRequest(preparePostRequest(azkabanProjectConfig.getAzkabanServerUrl() + "/manager?action=create", sessionId, params)); // Add proxy user if any if (azkabanProjectConfig.getAzkabanUserToProxy().isPresent()) { Iterable<String> proxyUsers = SPLIT_ON_COMMA.split(azkabanProjectConfig.getAzkabanUserToProxy().get()); for (String user : proxyUsers) { addProxyUser(sessionId, azkabanProjectConfig.getAzkabanServerUrl(), azkabanProjectConfig.getAzkabanProjectName(), user); } } // Add group permissions if any // TODO: Support users (not just groups), and different permission types // (though we can add users, we only support groups at the moment and award them with admin permissions) if (StringUtils.isNotBlank(azkabanProjectConfig.getAzkabanGroupAdminUsers())) { String [] groups = StringUtils.split(azkabanProjectConfig.getAzkabanGroupAdminUsers(), ","); for (String group : groups) { addUserPermission(sessionId, azkabanProjectConfig.getAzkabanServerUrl(), azkabanProjectConfig.getAzkabanProjectName(), group, true, true, false, false,false, false); } } // Upload zip file to azkaban and return projectId return uploadZipFileToAzkaban(sessionId, azkabanProjectConfig.getAzkabanServerUrl(), azkabanProjectConfig.getAzkabanProjectName(), zipFilePath); } /*** * Deletes an Azkaban project. * @param sessionId Session Id. * @param azkabanProjectConfig Azkaban Project Config. * @throws IOException */ public static void deleteAzkabanProject(String sessionId, AzkabanProjectConfig azkabanProjectConfig) throws IOException { Map<String, String> params = Maps.newHashMap(); params.put("delete", "true"); params.put("project", azkabanProjectConfig.getAzkabanProjectName()); executeGetRequest(prepareGetRequest(azkabanProjectConfig.getAzkabanServerUrl() + "/manager", sessionId, params)); } /*** * Replace an existing Azkaban Project. If proxy user and group permissions are specified in * Azkaban Project Config, then this method also adds it to the project configuration. * @param sessionId Session Id. * @param zipFilePath Zip file to upload. * @param azkabanProjectConfig Azkaban Project Config. * @return Project Id. * @throws IOException */ public static String replaceAzkabanProject(String sessionId, String zipFilePath, AzkabanProjectConfig azkabanProjectConfig) throws IOException { // Change project description changeProjectDescription(sessionId, azkabanProjectConfig.getAzkabanServerUrl(), azkabanProjectConfig.getAzkabanProjectName(), azkabanProjectConfig.getAzkabanProjectDescription()); // Add proxy user if any // Note: 1. We cannot remove previous proxy-user because there is no way to read it from Azkaban // 2. Adding same proxy user multiple times is a non-issue // Add proxy user if any if (azkabanProjectConfig.getAzkabanUserToProxy().isPresent()) { Iterable<String> proxyUsers = SPLIT_ON_COMMA.split(azkabanProjectConfig.getAzkabanUserToProxy().get()); for (String user : proxyUsers) { addProxyUser(sessionId, azkabanProjectConfig.getAzkabanServerUrl(), azkabanProjectConfig.getAzkabanProjectName(), user); } } // Add group permissions if any // TODO: Support users (not just groups), and different permission types // Note: 1. We cannot remove previous group-user because there is no way to read it from Azkaban // 2. Adding same group-user will return an error message, but we will ignore it // (though we can add users, we only support groups at the moment and award them with admin permissions) if (StringUtils.isNotBlank(azkabanProjectConfig.getAzkabanGroupAdminUsers())) { String [] groups = StringUtils.split(azkabanProjectConfig.getAzkabanGroupAdminUsers(), ","); for (String group : groups) { try { addUserPermission(sessionId, azkabanProjectConfig.getAzkabanServerUrl(), azkabanProjectConfig.getAzkabanProjectName(), group, true, true, false, false, false,false); } catch (IOException e) { // Ignore if group already exists, we cannot list existing groups; so its okay to attempt adding exiting // .. groups if (!"Group permission already exists.".equalsIgnoreCase(e.getMessage())) { throw e; } } } } // Upload zip file to azkaban and return projectId return uploadZipFileToAzkaban(sessionId, azkabanProjectConfig.getAzkabanServerUrl(), azkabanProjectConfig.getAzkabanProjectName(), zipFilePath); } private static void addProxyUser(String sessionId, String azkabanServerUrl, String azkabanProjectName, String proxyUser) throws IOException { // Create get request (adding same proxy user multiple times is a non-issue, Azkaban handles it) Map<String, String> params = Maps.newHashMap(); params.put("ajax", "addProxyUser"); params.put("project", azkabanProjectName); params.put("name", proxyUser); executeGetRequest(prepareGetRequest(azkabanServerUrl + "/manager", sessionId, params)); } private static void addUserPermission(String sessionId, String azkabanServerUrl, String azkabanProjectName, String name, boolean isGroup, boolean adminPermission, boolean readPermission, boolean writePermission, boolean executePermission, boolean schedulePermission) throws IOException { // NOTE: We are not listing the permissions before adding them, because Azkaban in its current state only // .. returns user permissions and not group permissions // Create get request (adding same normal user permission multiple times will throw an error, but we cannot // list whole list of permissions anyways) Map<String, String> params = Maps.newHashMap(); params.put("ajax", "addPermission"); params.put("project", azkabanProjectName); params.put("name", name); params.put("group", Boolean.toString(isGroup)); params.put("permissions[admin]", Boolean.toString(adminPermission)); params.put("permissions[read]", Boolean.toString(readPermission)); params.put("permissions[write]", Boolean.toString(writePermission)); params.put("permissions[execute]", Boolean.toString(executePermission)); params.put("permissions[schedule]", Boolean.toString(schedulePermission)); executeGetRequest(prepareGetRequest(azkabanServerUrl + "/manager", sessionId, params)); } /*** * Schedule the Azkaban Project to run with a schedule. * @param sessionId Session Id. * @param azkabanProjectId Project Id. * @param azkabanProjectConfig Azkaban Project Config. * @throws IOException */ public static void scheduleAzkabanProject(String sessionId, String azkabanProjectId, AzkabanProjectConfig azkabanProjectConfig) throws IOException { Map<String, String> params = Maps.newHashMap(); params.put("ajax", "scheduleFlow"); params.put("projectName", azkabanProjectConfig.getAzkabanProjectName()); params.put("flow", azkabanProjectConfig.getAzkabanProjectFlowName()); params.put("projectId", azkabanProjectId); params.put("scheduleTime", getScheduledTimeInAzkabanFormat(LOW_NETWORK_TRAFFIC_BEGIN_HOUR, LOW_NETWORK_TRAFFIC_END_HOUR, JOB_START_DELAY_MINUTES)); params.put("scheduleDate", getScheduledDateInAzkabanFormat()); params.put("is_recurring", "off"); // Run once OR push down schedule (TODO: Enable when push down is finalized) // if (azkabanProjectConfig.isScheduled()) { // params.put("is_recurring", "on"); // params.put("period", "1d"); // } else { // params.put("is_recurring", "off"); // } executePostRequest(preparePostRequest(azkabanProjectConfig.getAzkabanServerUrl() + "/schedule", sessionId, params)); } private static void changeProjectDescription(String sessionId, String azkabanServerUrl, String azkabanProjectName, String projectDescription) throws IOException { String encodedProjectDescription; try { encodedProjectDescription = new URLCodec().encode(projectDescription); } catch (EncoderException e) { throw new IOException("Could not encode Azkaban project description", e); } Map<String, String> params = Maps.newHashMap(); params.put("ajax", "changeDescription"); params.put("project", azkabanProjectName); params.put("description", encodedProjectDescription); executeGetRequest(prepareGetRequest(azkabanServerUrl + "/manager", sessionId, params)); } /*** * Execute an existing Azkaban project. * @param sessionId Session Id. * @param azkabanProjectId Project Id. * @param azkabanProjectConfig Azkaban Project Config. * @throws IOException */ public static void executeAzkabanProject(String sessionId, String azkabanProjectId, AzkabanProjectConfig azkabanProjectConfig) throws IOException { Map<String, String> params = Maps.newHashMap(); params.put("ajax", "executeFlow"); params.put("project", azkabanProjectConfig.getAzkabanProjectName()); params.put("flow", azkabanProjectConfig.getAzkabanProjectFlowName()); executePostRequest(preparePostRequest(azkabanProjectConfig.getAzkabanServerUrl() + "/executor", sessionId, params)); } private static HttpGet prepareGetRequest(String requestUrl, String sessionId, Map<String, String> params) throws IOException { // Create get request StringBuilder stringEntityBuilder = new StringBuilder(); stringEntityBuilder.append(String.format("?session.id=%s", sessionId)); for (Map.Entry<String, String> entry : params.entrySet()) { stringEntityBuilder.append(String.format("&%s=%s", entry.getKey(), entry.getValue())); } return new HttpGet(requestUrl + stringEntityBuilder); } private static HttpPost preparePostRequest(String requestUrl, String sessionId, Map<String, String> params) throws IOException { // Create post request HttpPost postRequest = new HttpPost(requestUrl); StringBuilder stringEntityBuilder = new StringBuilder(); stringEntityBuilder.append(String.format("session.id=%s", sessionId)); for (Map.Entry<String, String> entry : params.entrySet()) { if (stringEntityBuilder.length() > 0) { stringEntityBuilder.append("&"); } stringEntityBuilder.append(String.format("%s=%s", entry.getKey(), entry.getValue())); } StringEntity input = new StringEntity(stringEntityBuilder.toString()); input.setContentType("application/x-www-form-urlencoded"); postRequest.setEntity(input); postRequest.setHeader("X-Requested-With", "XMLHttpRequest"); return postRequest; } @VisibleForTesting protected static Map<String, String> executeGetRequest(HttpGet getRequest) throws IOException { // Make the call, get response @Cleanup CloseableHttpClient httpClient = getHttpClient(); HttpResponse response = httpClient.execute(getRequest); return AzkabanClient.handleResponse(response); } @VisibleForTesting protected static Map<String, String> executePostRequest(HttpPost postRequest) throws IOException { // Make the call, get response @Cleanup CloseableHttpClient httpClient = getHttpClient(); HttpResponse response = httpClient.execute(postRequest); return AzkabanClient.handleResponse(response); } private static String uploadZipFileToAzkaban(String sessionId, String azkabanServerUrl, String azkabanProjectName, String jobZipFile) throws IOException { // Create post request HttpPost postRequest = new HttpPost(azkabanServerUrl + "/manager"); HttpEntity entity = MultipartEntityBuilder .create() .addTextBody("session.id", sessionId) .addTextBody("ajax", "upload") .addBinaryBody("file", new File(jobZipFile), ContentType.create("application/zip"), azkabanProjectName + ".zip") .addTextBody("project", azkabanProjectName) .build(); postRequest.setEntity(entity); // Make the call, get response @Cleanup CloseableHttpClient httpClient = getHttpClient(); HttpResponse response = httpClient.execute(postRequest); // Obtaining projectId is hard. Uploading zip file is one avenue to get it from Azkaban return AzkabanClient.handleResponse(response).get("projectId"); } private static CloseableHttpClient getHttpClient() throws IOException { try { // Self sign SSL SSLContextBuilder builder = new SSLContextBuilder(); builder.loadTrustMaterial(null, (TrustStrategy) new TrustSelfSignedStrategy()); SSLConnectionSocketFactory sslsf = new SSLConnectionSocketFactory(builder.build()); // Create client return HttpClients.custom().setSSLSocketFactory(sslsf).setDefaultCookieStore(new BasicCookieStore()).build(); } catch (NoSuchAlgorithmException | KeyManagementException | KeyStoreException e) { throw new IOException("Issue with creating http client", e); } } /*** * Generate a random scheduled time between specified execution time window in the Azkaban compatible format * which is: hh,mm,a,z Eg. ScheduleTime=12,00,PM,PDT * * @param windowStartHour Window start hour in 24 hr (HH) format (inclusive) * @param windowEndHour Window end hour in 24 hr (HH) format (exclusive) * @param delayMinutes If current time is within window, then additional delay for bootstrapping if desired * @return Scheduled time string of the format hh,mm,a,z */ @edu.umd.cs.findbugs.annotations.SuppressWarnings( value = "DMI_RANDOM_USED_ONLY_ONCE", justification = "As expected for randomization") public static String getScheduledTimeInAzkabanFormat(int windowStartHour, int windowEndHour, int delayMinutes) { // Validate if (windowStartHour < 0 || windowEndHour > 23 || windowStartHour >= windowEndHour) { throw new IllegalArgumentException("Window start should be less than window end, and both should be between " + "0 and 23"); } if (delayMinutes < 0 || delayMinutes > 59) { throw new IllegalArgumentException("Delay in minutes should be between 0 and 59 (inclusive)"); } // Setup window Calendar windowStartTime = Calendar.getInstance(); windowStartTime.set(Calendar.HOUR_OF_DAY, windowStartHour); windowStartTime.set(Calendar.MINUTE, 0); windowStartTime.set(Calendar.SECOND, 0); Calendar windowEndTime = Calendar.getInstance(); windowEndTime.set(Calendar.HOUR_OF_DAY, windowEndHour); windowEndTime.set(Calendar.MINUTE, 0); windowEndTime.set(Calendar.SECOND, 0); // Check if current time is between windowStartTime and windowEndTime, then let the execution happen // after delayMinutes minutes Calendar now = Calendar.getInstance(); if (now.after(windowStartTime) && now.before(windowEndTime)) { // Azkaban takes a few seconds / a minute to bootstrap, // so extra few minutes get the first execution to run instantly now.add(Calendar.MINUTE, delayMinutes); return new SimpleDateFormat("hh,mm,a,z").format(now.getTime()); } // Current time is not between windowStartTime and windowEndTime, so get random execution time for next day int allowedSchedulingWindow = (int)((windowEndTime.getTimeInMillis() - windowStartTime.getTimeInMillis()) / MILLISECONDS_IN_HOUR); int randomHourInWindow = new Random(System.currentTimeMillis()).nextInt(allowedSchedulingWindow); int randomMinute = new Random(System.currentTimeMillis()).nextInt(60); windowStartTime.add(Calendar.HOUR, randomHourInWindow); windowStartTime.set(Calendar.MINUTE, randomMinute); return new SimpleDateFormat("hh,mm,a,z").format(windowStartTime.getTime()); } private static String getScheduledDateInAzkabanFormat() { // Eg. ScheduleDate=07/22/2014" return new SimpleDateFormat("MM/dd/yyyy").format(new Date()); } }
3,402
0
Create_ds/gobblin/gobblin-modules/gobblin-azkaban/src/main/java/org/apache/gobblin/service/modules
Create_ds/gobblin/gobblin-modules/gobblin-azkaban/src/main/java/org/apache/gobblin/service/modules/orchestration/AzkabanProjectConfig.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.gobblin.service.modules.orchestration; import java.util.List; import java.util.Optional; import lombok.AllArgsConstructor; import lombok.Builder; import lombok.Getter; import lombok.ToString; import org.apache.commons.lang.StringUtils; import org.apache.gobblin.runtime.api.JobSpec; import org.apache.gobblin.util.ConfigUtils; import com.typesafe.config.Config; import com.typesafe.config.ConfigFactory; @Getter @ToString @AllArgsConstructor @Builder(builderMethodName = "hiddenBuilder") /*** * Class to hold Azkaban project specific configs */ public class AzkabanProjectConfig { private final String azkabanServerUrl; private final String azkabanProjectName; private final String azkabanProjectDescription; private final String azkabanProjectFlowName; private final String azkabanGroupAdminUsers; private final Optional<String> azkabanUserToProxy; private final Optional<List<String>> azkabanZipJarNames; private final Optional<String> azkabanZipJarUrlTemplate; private final Optional<String> azkabanZipJarVersion; private final Optional<List<String>> azkabanZipAdditionalFiles; private final Boolean failIfJarNotFound; private final JobSpec jobSpec; public static final String USER_TO_PROXY = "user.to.proxy"; public AzkabanProjectConfig(JobSpec jobSpec) { // Extract config objects this.jobSpec = jobSpec; Config defaultConfig = ConfigFactory.load(ServiceAzkabanConfigKeys.DEFAULT_AZKABAN_PROJECT_CONFIG_FILE); Config config = jobSpec.getConfig().withFallback(defaultConfig); // Azkaban Infrastructure this.azkabanServerUrl = config.getString(ServiceAzkabanConfigKeys.AZKABAN_SERVER_URL_KEY); // Azkaban Project Metadata this.azkabanProjectName = constructProjectName(jobSpec, config); this.azkabanProjectDescription = config.getString(ServiceAzkabanConfigKeys.AZKABAN_PROJECT_DESCRIPTION_KEY); this.azkabanProjectFlowName = config.getString(ServiceAzkabanConfigKeys.AZKABAN_PROJECT_FLOW_NAME_KEY); this.azkabanGroupAdminUsers = ConfigUtils.getString(config, ServiceAzkabanConfigKeys.AZKABAN_PROJECT_GROUP_ADMINS_KEY, ""); this.azkabanUserToProxy = Optional.ofNullable(ConfigUtils.getString(config, ServiceAzkabanConfigKeys.AZKABAN_PROJECT_USER_TO_PROXY_KEY, null)); // Azkaban Project Zip this.azkabanZipJarNames = Optional.ofNullable(ConfigUtils.getStringList(config, ServiceAzkabanConfigKeys.AZKABAN_PROJECT_ZIP_JAR_NAMES_KEY)); this.azkabanZipJarUrlTemplate = Optional.ofNullable(ConfigUtils.getString(config, ServiceAzkabanConfigKeys.AZKABAN_PROJECT_ZIP_JAR_URL_TEMPLATE_KEY, null)); this.azkabanZipJarVersion = Optional.ofNullable(ConfigUtils.getString(config, ServiceAzkabanConfigKeys.AZKABAN_PROJECT_ZIP_JAR_VERSION_KEY, null)); if (config.hasPath(ServiceAzkabanConfigKeys.AZKABAN_PROJECT_ZIP_ADDITIONAL_FILE_URLS_KEY) && StringUtils.isNotBlank(config.getString(ServiceAzkabanConfigKeys.AZKABAN_PROJECT_ZIP_ADDITIONAL_FILE_URLS_KEY))) { this.azkabanZipAdditionalFiles = Optional.ofNullable( ConfigUtils.getStringList(config, ServiceAzkabanConfigKeys.AZKABAN_PROJECT_ZIP_ADDITIONAL_FILE_URLS_KEY)); } else { this.azkabanZipAdditionalFiles = Optional.empty(); } this.failIfJarNotFound = ConfigUtils.getBoolean(config, ServiceAzkabanConfigKeys.AZKABAN_PROJECT_ZIP_FAIL_IF_JARNOTFOUND_KEY, false); } public static String constructProjectName(JobSpec jobSpec, Config config) { String projectNamePrefix = ConfigUtils.getString(config, ServiceAzkabanConfigKeys.AZKABAN_PROJECT_NAME_PREFIX_KEY, ""); String projectNamePostfix = null == jobSpec.getUri() ? "" : jobSpec.getUri().toString().replaceAll("_", "-").replaceAll("[^A-Za-z0-9\\-]", "_"); return trimProjectName(String.format("%s_%s", projectNamePrefix, projectNamePostfix)); } /*** * Get Azkaban project zip file name * @return Azkaban project zip file name */ public String getAzkabanProjectZipFilename() { return String.format("%s.zip", azkabanProjectName); } /*** * Get Azkaban project working directory, generated by prefixing a temp name * @return Azkaban project working directory */ public String getWorkDir() { return String.format("%s/%s/%s/%s", System.getProperty("user.dir"), "serviceAzkaban", azkabanProjectName, System.currentTimeMillis()); } private static String trimProjectName(String projectName) { // Azkaban does not support name greater than 64 chars, so limit it to 64 chars if (projectName.length() > 64) { // We are using string.hashcode() so that for same path the generated project name is same (and hence checking // .. for path duplicates is deterministic. Using UUID or currentMillis will produce different shortened path // .. for the same path every time) int pathHash = projectName.hashCode(); if (pathHash < 0) { pathHash *= -1; } projectName = String.format("%s_%s", projectName.substring(0, 53), pathHash); } return projectName; } }
3,403
0
Create_ds/gobblin/gobblin-modules/gobblin-azkaban/src/main/java/org/apache/gobblin/service/modules
Create_ds/gobblin/gobblin-modules/gobblin-azkaban/src/main/java/org/apache/gobblin/service/modules/orchestration/AzkabanSpecProducer.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.gobblin.service.modules.orchestration; import java.io.Closeable; import java.io.IOException; import java.net.URI; import java.util.List; import java.util.Properties; import java.util.concurrent.Future; import org.apache.commons.codec.EncoderException; import org.apache.commons.lang3.StringUtils; import org.apache.gobblin.runtime.api.JobSpec; import org.apache.gobblin.runtime.api.Spec; import org.apache.gobblin.runtime.api.SpecProducer; import org.apache.gobblin.util.CompletedFuture; import org.apache.gobblin.util.ConfigUtils; import org.slf4j.Logger; import com.google.common.base.Optional; import com.typesafe.config.Config; import lombok.extern.slf4j.Slf4j; @Slf4j public class AzkabanSpecProducer implements SpecProducer<Spec>, Closeable { // Session Id for GaaS User private String _sessionId; private Config _config; public AzkabanSpecProducer(Config config, Optional<Logger> log) { this._config = config; try { // Initialize Azkaban client / producer and cache credentials String azkabanUsername = _config.getString(ServiceAzkabanConfigKeys.AZKABAN_USERNAME_KEY); String azkabanPassword = getAzkabanPassword(_config); String azkabanServerUrl = _config.getString(ServiceAzkabanConfigKeys.AZKABAN_SERVER_URL_KEY); _sessionId = AzkabanAjaxAPIClient.authenticateAndGetSessionId(azkabanUsername, azkabanPassword, azkabanServerUrl); } catch (IOException | EncoderException e) { throw new RuntimeException("Could not authenticate with Azkaban", e); } } private String getAzkabanPassword(Config config) { if (StringUtils.isNotBlank(System.getProperty(ServiceAzkabanConfigKeys.AZKABAN_PASSWORD_SYSTEM_KEY))) { return System.getProperty(ServiceAzkabanConfigKeys.AZKABAN_PASSWORD_SYSTEM_KEY); } return ConfigUtils.getString(config, ServiceAzkabanConfigKeys.AZKABAN_PASSWORD_KEY, StringUtils.EMPTY); } public AzkabanSpecProducer(Config config, Logger log) { this(config, Optional.of(log)); } /** Constructor with no logging */ public AzkabanSpecProducer(Config config) { this(config, Optional.<Logger>absent()); } @Override public void close() throws IOException { } @Override public Future<?> addSpec(Spec addedSpec) { // If project already exists, execute it try { AzkabanProjectConfig azkabanProjectConfig = new AzkabanProjectConfig((JobSpec) addedSpec); boolean azkabanProjectExists = AzkabanJobHelper.isAzkabanJobPresent(_sessionId, azkabanProjectConfig); // If project does not already exists, create and execute it if (azkabanProjectExists) { log.info("Executing Azkaban Project: " + azkabanProjectConfig.getAzkabanProjectName()); AzkabanJobHelper.executeJob(_sessionId, AzkabanJobHelper.getProjectId(_sessionId, azkabanProjectConfig), azkabanProjectConfig); } else { log.info("Setting up Azkaban Project: " + azkabanProjectConfig.getAzkabanProjectName()); // Deleted project also returns true if-project-exists check, so optimistically first create the project // .. (it will create project if it was never created or deleted), if project exists it will fail with // .. appropriate exception message, catch that and run in replace project mode if force overwrite is // .. specified try { createNewAzkabanProject(_sessionId, azkabanProjectConfig); } catch (IOException e) { if ("Project already exists.".equalsIgnoreCase(e.getMessage())) { if (ConfigUtils.getBoolean(((JobSpec) addedSpec).getConfig(), ServiceAzkabanConfigKeys.AZKABAN_PROJECT_OVERWRITE_IF_EXISTS_KEY, false)) { log.info("Project already exists for this Spec, but force overwrite specified"); updateExistingAzkabanProject(_sessionId, azkabanProjectConfig); } else { log.info(String.format("Azkaban project already exists: " + "%smanager?project=%s", azkabanProjectConfig.getAzkabanServerUrl(), azkabanProjectConfig.getAzkabanProjectName())); } } else { throw e; } } } } catch (IOException e) { throw new RuntimeException("Issue in setting up Azkaban project.", e); } return new CompletedFuture<>(_config, null); } @Override public Future<?> updateSpec(Spec updatedSpec) { // Re-create project AzkabanProjectConfig azkabanProjectConfig = new AzkabanProjectConfig((JobSpec) updatedSpec); try { updateExistingAzkabanProject(_sessionId, azkabanProjectConfig); } catch (IOException e) { throw new RuntimeException("Issue in setting up Azkaban project.", e); } return new CompletedFuture<>(_config, null); } @Override public Future<?> deleteSpec(URI deletedSpecURI, Properties headers) { // Delete project JobSpec jobSpec = new JobSpec.Builder(deletedSpecURI).build(); try { AzkabanJobHelper.deleteAzkabanJob(_sessionId, new AzkabanProjectConfig(jobSpec)); } catch (IOException e) { throw new RuntimeException("Issue in deleting Azkaban project.", e); } throw new UnsupportedOperationException(); } @Override public Future<? extends List<Spec>> listSpecs() { throw new UnsupportedOperationException(); } private void createNewAzkabanProject(String sessionId, AzkabanProjectConfig azkabanProjectConfig) throws IOException { // Create Azkaban Job String azkabanProjectId = AzkabanJobHelper.createAzkabanJob(sessionId, azkabanProjectConfig); // Schedule Azkaban Job AzkabanJobHelper.scheduleJob(sessionId, azkabanProjectId, azkabanProjectConfig); log.info(String.format("Azkaban project created: %smanager?project=%s", azkabanProjectConfig.getAzkabanServerUrl(), azkabanProjectConfig.getAzkabanProjectName())); } private void updateExistingAzkabanProject(String sessionId, AzkabanProjectConfig azkabanProjectConfig) throws IOException { log.info(String.format("Updating project: %smanager?project=%s", azkabanProjectConfig.getAzkabanServerUrl(), azkabanProjectConfig.getAzkabanProjectName())); // Get project Id String azkabanProjectId = AzkabanJobHelper.getProjectId(sessionId, azkabanProjectConfig); // Replace Azkaban Job AzkabanJobHelper.replaceAzkabanJob(sessionId, azkabanProjectId, azkabanProjectConfig); // Change schedule AzkabanJobHelper.changeJobSchedule(sessionId, azkabanProjectId, azkabanProjectConfig); } }
3,404
0
Create_ds/gobblin/gobblin-modules/gobblin-azkaban/src/main/java/org/apache/gobblin/service/modules
Create_ds/gobblin/gobblin-modules/gobblin-azkaban/src/main/java/org/apache/gobblin/service/modules/orchestration/AzkabanClient.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.gobblin.service.modules.orchestration; import com.github.rholder.retry.AttemptTimeLimiters; import com.github.rholder.retry.RetryException; import com.github.rholder.retry.Retryer; import com.github.rholder.retry.RetryerBuilder; import com.github.rholder.retry.StopStrategies; import com.github.rholder.retry.WaitStrategies; import com.google.common.base.Preconditions; import com.google.common.base.Throwables; import com.google.common.io.Closer; import com.google.gson.Gson; import com.google.gson.JsonElement; import com.google.gson.JsonObject; import com.google.gson.JsonParser; import java.io.Closeable; import java.io.File; import java.io.IOException; import java.io.OutputStream; import java.time.Duration; import java.util.HashMap; import java.util.Map; import java.util.concurrent.Callable; import java.util.concurrent.ExecutionException; import java.util.concurrent.ExecutorService; import java.util.concurrent.Executors; import java.util.concurrent.TimeUnit; import lombok.Builder; import org.apache.commons.io.IOUtils; import org.apache.commons.lang3.ObjectUtils; import org.apache.commons.lang3.StringUtils; import org.apache.http.HttpEntity; import org.apache.http.HttpResponse; import org.apache.http.HttpStatus; import org.apache.http.client.config.RequestConfig; import org.apache.http.conn.ssl.SSLConnectionSocketFactory; import org.apache.http.conn.ssl.TrustSelfSignedStrategy; import org.apache.http.impl.client.CloseableHttpClient; import org.apache.http.impl.client.HttpClientBuilder; import org.apache.http.impl.conn.BasicHttpClientConnectionManager; import org.apache.http.ssl.SSLContextBuilder; import org.apache.http.ssl.TrustStrategy; import org.apache.http.util.EntityUtils; import org.slf4j.Logger; import org.slf4j.LoggerFactory; /** * A simple http based client that uses Ajax API to communicate with Azkaban server. * * @see <a href="https://azkaban.github.io/azkaban/docs/latest/#ajax-api"> * https://azkaban.github.io/azkaban/docs/latest/#ajax-api * </a> */ public class AzkabanClient implements Closeable { protected final String username; protected final String password; protected final String url; protected final long sessionExpireInMin; // default value is 12h. protected SessionManager sessionManager; protected String sessionId; protected long sessionCreationTime = 0; protected CloseableHttpClient httpClient; private ExecutorService executorService; private Closer closer = Closer.create(); private Retryer<AzkabanClientStatus> retryer; private static Logger log = LoggerFactory.getLogger(AzkabanClient.class); private Duration requestTimeout; /** * Child class should have a different builderMethodName. */ @Builder protected AzkabanClient(String username, String password, String url, long sessionExpireInMin, CloseableHttpClient httpClient, SessionManager sessionManager, ExecutorService executorService, Duration requestTimeout) throws AzkabanClientException { this.username = username; this.password = password; this.url = url; this.sessionExpireInMin = sessionExpireInMin; this.httpClient = httpClient; this.sessionManager = sessionManager; this.executorService = executorService; this.requestTimeout = ObjectUtils.defaultIfNull(requestTimeout, Duration.ofSeconds(10)); this.initializeClient(); this.initializeSessionManager(); this.intializeExecutorService(); this.retryer = RetryerBuilder.<AzkabanClientStatus>newBuilder() .retryIfExceptionOfType(InvalidSessionException.class) .withAttemptTimeLimiter(AttemptTimeLimiters.fixedTimeLimit(this.requestTimeout.toMillis(), TimeUnit.MILLISECONDS, this.executorService)) .withWaitStrategy(WaitStrategies.exponentialWait(60, TimeUnit.SECONDS)) .withStopStrategy(StopStrategies.stopAfterAttempt(3)) .build(); try { this.sessionId = this.sessionManager.fetchSession(); } catch (Exception e) { this.sessionId = null; this.sessionCreationTime = -1; log.error("Failed to fetch session in constructor due to: ", e); return; } this.sessionCreationTime = System.nanoTime(); } private void initializeClient() throws AzkabanClientException { if (this.httpClient == null) { this.httpClient = createHttpClient(); this.closer.register(this.httpClient); } } private void initializeSessionManager() { if (sessionManager == null) { this.sessionManager = new AzkabanSessionManager(this.httpClient, this.url, this.username, this.password); } } private void intializeExecutorService() { if (this.executorService == null) { this.executorService = Executors.newFixedThreadPool(30); } } /** * Create a {@link CloseableHttpClient} used to communicate with Azkaban server. * Derived class can configure different http client by overriding this method. * * @return A closeable http client. */ private CloseableHttpClient createHttpClient() throws AzkabanClientException { try { // SSLSocketFactory using custom TrustStrategy that ignores warnings about untrusted certificates // Self sign SSL SSLContextBuilder sslcb = new SSLContextBuilder(); sslcb.loadTrustMaterial(null, (TrustStrategy) new TrustSelfSignedStrategy()); SSLConnectionSocketFactory sslsf = new SSLConnectionSocketFactory(sslcb.build()); HttpClientBuilder builder = HttpClientBuilder.create(); RequestConfig requestConfig = RequestConfig.copy(RequestConfig.DEFAULT) .setSocketTimeout((int) this.requestTimeout.toMillis()) .setConnectTimeout((int) this.requestTimeout.toMillis()) .setConnectionRequestTimeout((int) this.requestTimeout.toMillis()) .build(); builder.disableCookieManagement() .useSystemProperties() .setDefaultRequestConfig(requestConfig) .setConnectionManager(new BasicHttpClientConnectionManager()) .setSSLSocketFactory(sslsf); return builder.build(); } catch (Exception e) { throw new AzkabanClientException("HttpClient cannot be created", e); } } /** * When current session expired, use {@link SessionManager} to refresh the session id. */ void refreshSession(boolean forceRefresh) throws AzkabanClientException { Preconditions.checkArgument(this.sessionCreationTime != 0); boolean expired = (System.nanoTime() - this.sessionCreationTime) > Duration .ofMinutes(this.sessionExpireInMin) .toNanos(); if (expired) { log.debug("Session expired. Generating a new session."); } else if (forceRefresh) { log.info("Force to refresh session. Generating a new session."); } if (expired || forceRefresh) { this.sessionId = this.sessionManager.fetchSession(); this.sessionCreationTime = System.nanoTime(); } } /** * Convert a {@link HttpResponse} to a <string, string> map. * Put protected modifier here so it is visible to {@link AzkabanAjaxAPIClient}. * * @param response An http response returned by {@link org.apache.http.client.HttpClient} execution. * This should be JSON string. * @return A map composed by the first level of KV pair of json object */ protected static Map<String, String> handleResponse(HttpResponse response) throws IOException { verifyStatusCode(response); JsonObject json = getResponseJson(response); return getFlatMap(json); } protected static <T> T handleResponse(HttpResponse response, Class<T> responseClass) throws IOException { verifyStatusCode(response); JsonObject json = getResponseJson(response); Gson gson = new Gson(); return gson.fromJson(json, responseClass); } private static JsonObject getResponseJson(HttpResponse response) throws IOException { HttpEntity entity = null; String jsonResponseString; try { entity = response.getEntity(); jsonResponseString = IOUtils.toString(entity.getContent(), "UTF-8"); log.debug("Response string: {}", jsonResponseString); } catch (Exception e) { throw new AzkabanClientException("Cannot convert response to a string", e); } finally { if (entity != null) { EntityUtils.consume(entity); } } return parseResponse(jsonResponseString); } protected static void verifyStatusCode(HttpResponse response) throws AzkabanClientException { int code = response.getStatusLine().getStatusCode(); if (code != HttpStatus.SC_CREATED && code != HttpStatus.SC_OK) { log.error("Failed : HTTP error code : " + response.getStatusLine().getStatusCode()); throw new AzkabanClientException("Failed : HTTP error code : " + response.getStatusLine().getStatusCode()); } } static Map<String, String> getFlatMap(JsonObject jsonObject) { if (jsonObject == null) { return null; } Map<String, String> responseMap = new HashMap<>(); for (Map.Entry<String, JsonElement> entry : jsonObject.entrySet()) { responseMap.put(entry.getKey(), entry.getValue().toString().replaceAll("\"", "")); } return responseMap; } static JsonObject parseResponse(String jsonResponseString) throws IOException { if (!StringUtils.isNotBlank(jsonResponseString)) { return null; } JsonObject jsonObject = new JsonParser().parse(jsonResponseString).getAsJsonObject(); handleResponseError(jsonObject); return jsonObject; } private static void handleResponseError(JsonObject jsonObject) throws IOException { // Azkaban does not has a standard for error messages tag if (null != jsonObject.get(AzkabanClientParams.STATUS) && AzkabanClientParams.ERROR.equalsIgnoreCase(jsonObject.get(AzkabanClientParams.STATUS).toString() .replaceAll("\"", ""))) { String message = (null != jsonObject.get(AzkabanClientParams.MESSAGE)) ? jsonObject.get(AzkabanClientParams.MESSAGE).toString() .replaceAll("\"", "") : "Unknown issue"; if (message.contains("Invalid Session")) { throw new InvalidSessionException(message); } throw new IOException(message); } if (null != jsonObject.get(AzkabanClientParams.ERROR)) { String error = jsonObject.get(AzkabanClientParams.ERROR).toString().replaceAll("\"", ""); throw new AzkabanClientException(error); } } /** * Creates a project. * * @param projectName project name * @param description project description * * @return A status object indicating if AJAX request is successful. */ public AzkabanClientStatus createProject(String projectName, String description) throws AzkabanClientException { AzkabanMultiCallables.CreateProjectCallable callable = AzkabanMultiCallables.CreateProjectCallable.builder() .client(this) .projectName(projectName) .description(description) .build(); return runWithRetry(callable, AzkabanClientStatus.class); } /** * Deletes a project. Currently no response message will be returned after finishing * the delete operation. Thus success status is always expected. * * @param projectName project name * * @return A status object indicating if AJAX request is successful. */ public AzkabanClientStatus deleteProject(String projectName) throws AzkabanClientException { AzkabanMultiCallables.DeleteProjectCallable callable = AzkabanMultiCallables.DeleteProjectCallable.builder() .client(this) .projectName(projectName) .build(); return runWithRetry(callable, AzkabanClientStatus.class); } /** * Checks if the project with specified name exists in Azkaban */ public Boolean projectExists(String projectName) throws AzkabanClientException { try { fetchProjectFlows(projectName); return true; } catch (AzkabanClientException e) { // Azkaban does not return a strongly typed error code, so we are checking the message if (e.getCause().getMessage().contains("doesn't exist")) { return false; } else { throw e; } } } /** * Updates a project by uploading a new zip file. Before uploading any project zip files, * the project should be created first. * * @param projectName project name * @param zipFile zip file * * @return A status object indicating if AJAX request is successful. */ public AzkabanClientStatus uploadProjectZip(String projectName, File zipFile) throws AzkabanClientException { AzkabanMultiCallables.UploadProjectCallable callable = AzkabanMultiCallables.UploadProjectCallable.builder() .client(this) .projectName(projectName) .zipFile(zipFile) .build(); return runWithRetry(callable, AzkabanClientStatus.class); } /** * Execute a flow by providing flow parameters and options. The project and flow should be created first. * * @param projectName project name * @param flowName flow name * @param flowOptions flow options * @param flowParameters flow parameters * * @return The status object which contains success status and execution id. */ public AzkabanExecuteFlowStatus executeFlowWithOptions(String projectName, String flowName, Map<String, String> flowOptions, Map<String, String> flowParameters) throws AzkabanClientException { AzkabanMultiCallables.ExecuteFlowCallable callable = AzkabanMultiCallables.ExecuteFlowCallable.builder() .client(this) .projectName(projectName) .flowName(flowName) .flowOptions(flowOptions) .flowParameters(flowParameters) .build(); return runWithRetry(callable, AzkabanExecuteFlowStatus.class); } /** * Execute a flow with flow parameters. The project and flow should be created first. * * @param projectName project name * @param flowName flow name * @param flowParameters flow parameters * * @return The status object which contains success status and execution id. */ public AzkabanExecuteFlowStatus executeFlow(String projectName, String flowName, Map<String, String> flowParameters) throws AzkabanClientException { return executeFlowWithOptions(projectName, flowName, null, flowParameters); } /** * Cancel a flow by execution id. */ public AzkabanClientStatus cancelFlow(String execId) throws AzkabanClientException { AzkabanMultiCallables.CancelFlowCallable callable = AzkabanMultiCallables.CancelFlowCallable.builder() .client(this) .execId(execId) .build(); return runWithRetry(callable, AzkabanClientStatus.class); } /** * Fetch an execution log. */ public AzkabanClientStatus fetchExecutionLog(String execId, String jobId, long offset, long length, OutputStream logStream) throws AzkabanClientException { AzkabanMultiCallables.FetchExecLogCallable callable = AzkabanMultiCallables.FetchExecLogCallable.builder() .client(this) .execId(execId) .jobId(jobId) .offset(offset) .length(length) .output(logStream) .build(); return runWithRetry(callable, AzkabanClientStatus.class); } /** * Given an execution id, fetches all the detailed information of that execution, * including a list of all the job executions. * * @param execId execution id to be fetched. * * @return The status object which contains success status and all the detailed * information of that execution. */ public AzkabanFetchExecuteFlowStatus fetchFlowExecution(String execId) throws AzkabanClientException { AzkabanMultiCallables.FetchFlowExecCallable callable = AzkabanMultiCallables.FetchFlowExecCallable.builder() .client(this) .execId(execId) .build(); return runWithRetry(callable, AzkabanFetchExecuteFlowStatus.class); } /** * Returns a list of flow ids in a specified project. * * @param projectName name of the project. */ public AzkabanProjectFlowsStatus fetchProjectFlows(String projectName) throws AzkabanClientException { AzkabanMultiCallables.FetchProjectFlowsCallable callable = AzkabanMultiCallables.FetchProjectFlowsCallable.builder() .client(this) .projectName(projectName) .build(); return runWithRetry(callable, AzkabanProjectFlowsStatus.class); } /** * Given a project and user, add that user as a proxy user in the project. * * @param projectName project name * @param proxyUserName proxy user * * @return A status object indicating if AJAX request is successful. */ public AzkabanClientStatus addProxyUser(String projectName, String proxyUserName) throws AzkabanClientException { AzkabanMultiCallables.AddProxyUserCallable callable = AzkabanMultiCallables.AddProxyUserCallable.builder() .client(this) .projectName(projectName) .proxyUserName(proxyUserName) .build(); return runWithRetry(callable, AzkabanClientStatus.class); } /** * Get the list of proxy users for a given project. * * @param projectName project name * * @return {@link AzkabanGetProxyUsersStatus} containing the response map. The response should have a key "proxyUsers" * which will be in the format "[user1, user2, user3]" */ public AzkabanGetProxyUsersStatus getProxyUsers(String projectName) throws AzkabanClientException { AzkabanMultiCallables.GetProxyUserCallable callable = AzkabanMultiCallables.GetProxyUserCallable.builder() .client(this) .projectName(projectName) .build(); return runWithRetry(callable, AzkabanGetProxyUsersStatus.class); } private <T> T runWithRetry(Callable callable, Class<T> cls) throws AzkabanClientException { try { AzkabanClientStatus status = this.retryer.call(callable); if (cls.isAssignableFrom(status.getClass())) { return ((T)status); } else { throw new AzkabanClientException(String.format("Unexpected response type, expected: %s actual: %s", cls, status.getClass())); } } catch (ExecutionException e) { Throwables.propagateIfPossible(e.getCause(), AzkabanClientException.class); } catch (RetryException e) { throw new AzkabanClientException("RetryException occurred ", e); } // should never reach to here. throw new UnreachableStatementException("Cannot reach here."); } @Override public void close() throws IOException { this.closer.close(); } }
3,405
0
Create_ds/gobblin/gobblin-modules/gobblin-azkaban/src/main/java/org/apache/gobblin/service/modules
Create_ds/gobblin/gobblin-modules/gobblin-azkaban/src/main/java/org/apache/gobblin/service/modules/orchestration/InvalidSessionException.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.gobblin.service.modules.orchestration; /** * Used by {@link AzkabanClient} to indicate current session is invalid. */ public class InvalidSessionException extends AzkabanClientException { public InvalidSessionException(String message, Exception e) { super(message, e); } public InvalidSessionException(String message) { super(message); } }
3,406
0
Create_ds/gobblin/gobblin-modules/gobblin-azkaban/src/main/java/org/apache/gobblin/service/modules
Create_ds/gobblin/gobblin-modules/gobblin-azkaban/src/main/java/org/apache/gobblin/service/modules/orchestration/AzkabanClientParams.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.gobblin.service.modules.orchestration; /** * A collection of attributes used by {@link AzkabanClient} to form an HTTP request, * and parse the HTTP response. More details can be found at * {@linktourl https://azkaban.github.io/azkaban/docs/latest/#ajax-api} */ public class AzkabanClientParams { public static final String ACTION = "action"; public static final String USERNAME = "username"; public static final String PASSWORD = "password"; public static final String SESSION_ID = "session.id"; public static final String NAME = "name"; public static final String DELETE = "delete"; public static final String DESCRIPTION = "description"; public static final String PROJECT = "project"; public static final String FLOW = "flow"; public static final String AJAX = "ajax"; public static final String CONCURRENT_OPTION = "concurrentOption"; public static final String MESSAGE = "message"; public static final String STATUS = "status"; public static final String ERROR = "error"; public static final String EXECID = "execid"; public static final String JOBID = "jobId"; public static final String DATA = "data"; public static final String OFFSET = "offset"; public static final String LENGTH = "length"; }
3,407
0
Create_ds/gobblin/gobblin-modules/gobblin-azkaban/src/main/java/org/apache/gobblin/data/management
Create_ds/gobblin/gobblin-modules/gobblin-azkaban/src/main/java/org/apache/gobblin/data/management/retention/DatasetCleanerJob.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.gobblin.data.management.retention; import java.io.IOException; import java.util.Properties; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.util.Tool; import org.apache.hadoop.util.ToolRunner; import org.apache.log4j.Logger; import azkaban.jobExecutor.AbstractJob; import azkaban.utils.Props; /** * Job to run {@link org.apache.gobblin.data.management.retention.DatasetCleanerNew} job in Azkaban or Hadoop. */ public class DatasetCleanerJob extends AbstractJob implements Tool { private Configuration conf; private DatasetCleaner datasetCleaner; public static void main(String[] args) throws Exception { ToolRunner.run(new DatasetCleanerJob(DatasetCleanerJob.class.getName()), args); } public DatasetCleanerJob(String id) throws Exception { super(id, Logger.getLogger(DatasetCleanerJob.class)); } public DatasetCleanerJob(String id, Properties props) throws IOException { super(id, Logger.getLogger(DatasetCleanerJob.class)); this.conf = new Configuration(); this.datasetCleaner = new DatasetCleaner(FileSystem.get(getConf()), props); } public DatasetCleanerJob(String id, Props props) throws IOException { super(id, Logger.getLogger(DatasetCleanerJob.class)); this.conf = new Configuration(); this.datasetCleaner = new DatasetCleaner(FileSystem.get(getConf()), props.toProperties()); } @Override public void run() throws Exception { if (this.datasetCleaner != null) { try { this.datasetCleaner.clean(); } finally { this.datasetCleaner.close(); } } } @Override public int run(String[] args) throws Exception { if (args.length < 1) { System.out.println("Must provide properties file as first argument."); return 1; } Props props = new Props(null, args[0]); new DatasetCleanerJob(DatasetCleanerJob.class.getName(), props).run(); return 0; } @Override public void setConf(Configuration configuration) { this.conf = configuration; } @Override public Configuration getConf() { return this.conf; } }
3,408
0
Create_ds/gobblin/gobblin-modules/gobblin-azkaban/src/main/java/org/apache/gobblin/data/management
Create_ds/gobblin/gobblin-modules/gobblin-azkaban/src/main/java/org/apache/gobblin/data/management/retention/Avro2OrcStaleDatasetCleaner.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.gobblin.data.management.retention; import java.io.IOException; import java.util.Arrays; import java.util.HashSet; import java.util.Iterator; import java.util.Properties; import java.util.Set; import java.util.concurrent.TimeUnit; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileStatus; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; import org.apache.hadoop.hive.metastore.IMetaStoreClient; import org.apache.hadoop.hive.ql.metadata.Partition; import org.apache.log4j.Logger; import com.google.common.base.Optional; import com.typesafe.config.Config; import com.typesafe.config.ConfigFactory; import azkaban.jobExecutor.AbstractJob; import org.apache.gobblin.data.management.conversion.hive.dataset.ConvertibleHiveDataset; import org.apache.gobblin.data.management.conversion.hive.dataset.ConvertibleHiveDatasetFinder; import org.apache.gobblin.data.management.conversion.hive.events.EventConstants; import org.apache.gobblin.data.management.conversion.hive.validation.ValidationJob; import org.apache.gobblin.data.management.copy.hive.HiveDataset; import org.apache.gobblin.data.management.copy.hive.HiveDatasetFinder; import org.apache.gobblin.data.management.copy.hive.HiveUtils; import org.apache.gobblin.instrumented.Instrumented; import org.apache.gobblin.metrics.MetricContext; import org.apache.gobblin.metrics.event.EventSubmitter; import org.apache.gobblin.util.AutoReturnableObject; import org.apache.gobblin.util.ConfigUtils; public class Avro2OrcStaleDatasetCleaner extends AbstractJob { private static final Logger log = Logger.getLogger(ValidationJob.class); private static final String HIVE_PARTITION_DELETION_GRACE_TIME_IN_DAYS = "hive.partition.deletion.graceTime.inDays"; private static final String DEFAULT_HIVE_PARTITION_DELETION_GRACE_TIME_IN_DAYS = "2"; private final MetricContext metricContext; private final EventSubmitter eventSubmitter; private final ConvertibleHiveDatasetFinder datasetFinder; private static final String HIVE_DATASET_CONFIG_AVRO_PREFIX = "hive.conversion.avro"; private final FileSystem fs; private final long graceTimeInMillis; public Avro2OrcStaleDatasetCleaner(String jobId, Properties props) throws IOException { super(jobId, log); props.setProperty(HiveDatasetFinder.HIVE_DATASET_CONFIG_PREFIX_KEY, HIVE_DATASET_CONFIG_AVRO_PREFIX); this.graceTimeInMillis = TimeUnit.DAYS.toMillis(Long.parseLong(props .getProperty(HIVE_PARTITION_DELETION_GRACE_TIME_IN_DAYS, DEFAULT_HIVE_PARTITION_DELETION_GRACE_TIME_IN_DAYS))); Config config = ConfigFactory.parseProperties(props); this.fs = FileSystem.newInstance(new Configuration()); this.metricContext = Instrumented.getMetricContext(ConfigUtils.configToState(config), ValidationJob.class); this.eventSubmitter = new EventSubmitter.Builder(this.metricContext, EventConstants.CONVERSION_NAMESPACE).build(); this.datasetFinder = new ConvertibleHiveDatasetFinder(this.fs, props, this.eventSubmitter); } @Override public void run() throws Exception { Iterator<HiveDataset> iterator = this.datasetFinder.getDatasetsIterator(); while (iterator.hasNext()) { ConvertibleHiveDataset hiveDataset = (ConvertibleHiveDataset) iterator.next(); try (AutoReturnableObject<IMetaStoreClient> client = hiveDataset.getClientPool().getClient()) { Set<Partition> sourcePartitions = new HashSet<>(HiveUtils.getPartitions(client.get(), hiveDataset.getTable(), Optional.<String>absent())); sourcePartitions.parallelStream().filter(partition -> isUnixTimeStamp(partition.getDataLocation().getName())) .forEach(partition -> { Arrays.stream(listFiles(partition.getDataLocation().getParent())).filter( fileStatus -> !fileStatus.getPath().toString() .equalsIgnoreCase(partition.getDataLocation().toString())).forEach(fileStatus -> { deletePath(fileStatus, this.graceTimeInMillis, true); }); }); } } } private FileStatus[] listFiles(Path path) { try { return this.fs.listStatus(path); } catch (IOException e) { log.error("Unalbe to list files for directory " + path, e); return new FileStatus[0]; } } private void deletePath(FileStatus fileStatus, long graceTimeInMillis, boolean recursively) { long modificationTime = fileStatus.getModificationTime(); long currentTime = System.currentTimeMillis(); if ((currentTime - modificationTime) < 0) { log.error("Modification time cannot be greater than current time: " + fileStatus.getPath()); return; } if ((currentTime - modificationTime) < graceTimeInMillis) { log.info("Modification time is still within grace time for deletion: " + fileStatus.getPath()); return; } try { this.fs.delete(fileStatus.getPath(), recursively); log.info("Deleted path " + fileStatus.getPath()); } catch (IOException e) { log.error("Unable to delete directory " + fileStatus.getPath(), e); } } /** * Check if a given string is a valid unixTimeStamp */ private static boolean isUnixTimeStamp(String timeStamp) { int TIME_STAMP_LENGTH = 13; if (timeStamp.length() != TIME_STAMP_LENGTH) { return false; } try { Long.parseLong(timeStamp); return true; } catch (NumberFormatException e) { return false; } } }
3,409
0
Create_ds/gobblin/gobblin-modules/gobblin-azkaban/src/main/java/org/apache/gobblin/data/management
Create_ds/gobblin/gobblin-modules/gobblin-azkaban/src/main/java/org/apache/gobblin/data/management/trash/TrashCollectorJob.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.gobblin.data.management.trash; import java.io.IOException; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; import org.apache.hadoop.util.Tool; import org.apache.hadoop.util.ToolRunner; import org.apache.log4j.Logger; import azkaban.jobExecutor.AbstractJob; import azkaban.utils.Props; /** * Job to run {@link Trash} cleanup in Azkaban or Hadoop. */ public class TrashCollectorJob extends AbstractJob implements Tool { private Configuration conf; private Trash trash; public static void main(String[] args) throws Exception { ToolRunner.run(new TrashCollectorJob(TrashCollectorJob.class.getName()), args); } public TrashCollectorJob(String id) { super(id, Logger.getLogger(TrashCollectorJob.class)); } public TrashCollectorJob(String id, Props props) throws IOException { super(id, Logger.getLogger(TrashCollectorJob.class)); this.conf = new Configuration(); this.trash = createTrash(props); } Trash createTrash(Props props) throws IOException { return TrashFactory.createTrash(FileSystem.get(getConf()), props.toProperties()); } /** * Move a path to trash. The absolute path of the input path will be replicated under the trash directory. * @param fs {@link org.apache.hadoop.fs.FileSystem} where path and trash exist. * @param path {@link org.apache.hadoop.fs.FileSystem} path to move to trash. * @param props {@link java.util.Properties} containing trash configuration. * @return true if move to trash was done successfully. * @throws IOException */ public static boolean moveToTrash(FileSystem fs, Path path, Props props) throws IOException { return TrashFactory.createTrash(fs, props.toProperties()).moveToTrash(path); } @Override public int run(String[] args) throws Exception { if (args.length < 1) { System.out.println("Must provide properties file as first argument."); return 1; } Props props = new Props(null, args[0]); new TrashCollectorJob(TrashCollectorJob.class.getName(), props).run(); return 0; } @Override public void setConf(Configuration configuration) { this.conf = configuration; } @Override public Configuration getConf() { return this.conf; } @Override public void run() throws Exception { if (this.trash != null) { this.trash.createTrashSnapshot(); this.trash.purgeTrashSnapshots(); } } }
3,410
0
Create_ds/gobblin/gobblin-modules/gobblin-azkaban/src/main/java/org/apache/gobblin/data/management/conversion/hive
Create_ds/gobblin/gobblin-modules/gobblin-azkaban/src/main/java/org/apache/gobblin/data/management/conversion/hive/validation/ValidationJob.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.gobblin.data.management.conversion.hive.validation; import org.apache.gobblin.config.client.ConfigClient; import org.apache.gobblin.config.client.api.VersionStabilityPolicy; import org.apache.gobblin.configuration.ConfigurationKeys; import org.apache.gobblin.data.management.conversion.hive.task.HiveConverterUtils; import org.apache.gobblin.util.PathUtils; import java.io.IOException; import java.io.InputStreamReader; import java.net.URI; import java.sql.ResultSet; import java.sql.SQLException; import java.sql.Statement; import java.text.DateFormat; import java.text.ParseException; import java.text.SimpleDateFormat; import java.util.ArrayList; import java.util.Collection; import java.util.Collections; import java.util.Iterator; import java.util.List; import java.util.Map; import java.util.Properties; import java.util.UUID; import java.util.concurrent.Callable; import java.util.concurrent.ExecutorService; import java.util.concurrent.Executors; import java.util.concurrent.Future; import java.util.concurrent.TimeUnit; import org.apache.commons.io.IOUtils; import org.apache.commons.lang3.StringUtils; import org.apache.commons.lang3.tuple.Pair; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileStatus; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; import org.apache.hadoop.hive.metastore.IMetaStoreClient; import org.apache.hadoop.hive.ql.metadata.Partition; import org.apache.hadoop.hive.ql.metadata.Table; import org.apache.log4j.Logger; import org.joda.time.DateTime; import org.slf4j.LoggerFactory; import azkaban.jobExecutor.AbstractJob; import com.google.common.base.Charsets; import com.google.common.base.Enums; import com.google.common.base.Optional; import com.google.common.base.Preconditions; import com.google.common.base.Splitter; import com.google.common.base.Throwables; import com.google.common.collect.ImmutableMap; import com.google.common.collect.Lists; import com.google.common.collect.Maps; import com.google.common.io.Closer; import com.google.common.util.concurrent.UncheckedExecutionException; import com.typesafe.config.Config; import com.typesafe.config.ConfigFactory; import org.apache.gobblin.data.management.conversion.hive.dataset.ConvertibleHiveDataset; import org.apache.gobblin.data.management.conversion.hive.dataset.ConvertibleHiveDatasetFinder; import org.apache.gobblin.data.management.conversion.hive.events.EventConstants; import org.apache.gobblin.data.management.conversion.hive.provider.HiveUnitUpdateProvider; import org.apache.gobblin.data.management.conversion.hive.provider.UpdateNotFoundException; import org.apache.gobblin.data.management.conversion.hive.provider.UpdateProviderFactory; import org.apache.gobblin.data.management.conversion.hive.query.HiveValidationQueryGenerator; import org.apache.gobblin.data.management.conversion.hive.source.HiveSource; import org.apache.gobblin.data.management.copy.hive.HiveDataset; import org.apache.gobblin.data.management.copy.hive.HiveDatasetFinder; import org.apache.gobblin.data.management.copy.hive.HiveUtils; import org.apache.gobblin.hive.HiveMetastoreClientPool; import org.apache.gobblin.hive.HiveSerDeWrapper; import org.apache.gobblin.util.HiveJdbcConnector; import org.apache.gobblin.instrumented.Instrumented; import org.apache.gobblin.metrics.MetricContext; import org.apache.gobblin.metrics.event.EventSubmitter; import org.apache.gobblin.util.AutoReturnableObject; import org.apache.gobblin.util.ConfigUtils; import org.apache.gobblin.util.ExecutorsUtils; import org.apache.gobblin.util.HadoopUtils; /** * Azkaban job that runs validation of conversion between two Hive tables * * @author Abhishek Tiwari */ public class ValidationJob extends AbstractJob { private static final Logger log = Logger.getLogger(ValidationJob.class); /*** * Validation Job validates the table and / or partitions updated within a specific window. * This window is determined as follows: * Start ($start_time) : CURRENT_TIME - hive.source.maximum.lookbackDays * End ($end_time) : CURRENT_TIME - hive.source.skip.recentThanDays * ie. the resultant window for validation is: $start_time <= window <= $end_time */ private static final String HIVE_SOURCE_SKIP_RECENT_THAN_DAYS_KEY = "hive.source.skip.recentThanDays"; private static final String HIVE_DATASET_CONFIG_AVRO_PREFIX = "hive.conversion.avro"; private static final String DEFAULT_HIVE_SOURCE_MAXIMUM_LOOKBACK_DAYS = "3"; private static final String DEFAULT_HIVE_SOURCE_SKIP_RECENT_THAN_DAYS = "1"; private static final String MAX_THREAD_COUNT = "validation.maxThreadCount"; private static final String DEFAULT_MAX_THREAD_COUNT = "50"; private static final String VALIDATION_TYPE_KEY = "hive.validation.type"; private static final String HIVE_VALIDATION_IGNORE_DATA_PATH_IDENTIFIER_KEY = "hive.validation.ignoreDataPathIdentifier"; private static final String DEFAULT_HIVE_VALIDATION_IGNORE_DATA_PATH_IDENTIFIER = org.apache.commons.lang.StringUtils.EMPTY; private static final Splitter COMMA_BASED_SPLITTER = Splitter.on(",").omitEmptyStrings().trimResults(); private static final Splitter EQUALITY_SPLITTER = Splitter.on("=").omitEmptyStrings().trimResults(); private static final Splitter SLASH_SPLITTER = Splitter.on("/").omitEmptyStrings().trimResults(); private static final String VALIDATION_FILE_FORMAT_KEY = "hive.validation.fileFormat"; private static final String IS_NESTED_ORC = "hive.validation.isNestedORC"; private static final String DEFAULT_IS_NESTED_ORC = "false"; private static final String HIVE_SETTINGS = "hive.settings"; private static final String DATEPARTITION = "datepartition"; private static final String DATE_FORMAT = "yyyy-MM-dd-HH"; public static final String GOBBLIN_CONFIG_TAGS_WHITELIST = "gobblin.config.tags.whitelist"; private final ValidationType validationType; private List<String> ignoreDataPathIdentifierList; private final List<Throwable> throwables; private final Properties props; private final MetricContext metricContext; private final EventSubmitter eventSubmitter; private final HiveUnitUpdateProvider updateProvider; private final ConvertibleHiveDatasetFinder datasetFinder; private final long maxLookBackTime; private final long skipRecentThanTime; private final HiveMetastoreClientPool pool; private final FileSystem fs; private final ExecutorService exec; private final List<Future<Void>> futures; private final Boolean isNestedORC; private final List<String> hiveSettings; protected Optional<String> configStoreUri; private static final short maxParts = 1000; private Map<String, String> successfulConversions; private Map<String, String> failedConversions; private Map<String, String> warnConversions; private Map<String, String> dataValidationFailed; private Map<String, String> dataValidationSuccessful; public ValidationJob(String jobId, Properties props) throws IOException { super(jobId, log); // Set the conversion config prefix for Avro to ORC props.setProperty(HiveDatasetFinder.HIVE_DATASET_CONFIG_PREFIX_KEY, HIVE_DATASET_CONFIG_AVRO_PREFIX); Config config = ConfigFactory.parseProperties(props); this.props = props; this.metricContext = Instrumented.getMetricContext(ConfigUtils.configToState(config), ValidationJob.class); this.eventSubmitter = new EventSubmitter.Builder(this.metricContext, EventConstants.CONVERSION_NAMESPACE).build(); this.updateProvider = UpdateProviderFactory.create(props); this.datasetFinder = new ConvertibleHiveDatasetFinder(getSourceFs(), props, this.eventSubmitter); this.fs = FileSystem.get(new Configuration()); int maxLookBackDays = Integer.parseInt(props.getProperty(HiveSource.HIVE_SOURCE_MAXIMUM_LOOKBACK_DAYS_KEY, DEFAULT_HIVE_SOURCE_MAXIMUM_LOOKBACK_DAYS)); int skipRecentThanDays = Integer.parseInt(props.getProperty(HIVE_SOURCE_SKIP_RECENT_THAN_DAYS_KEY, DEFAULT_HIVE_SOURCE_SKIP_RECENT_THAN_DAYS)); this.maxLookBackTime = new DateTime().minusDays(maxLookBackDays).getMillis(); this.skipRecentThanTime = new DateTime().minusDays(skipRecentThanDays).getMillis(); int maxThreadCount = Integer.parseInt(props.getProperty(MAX_THREAD_COUNT, DEFAULT_MAX_THREAD_COUNT)); this.exec = Executors.newFixedThreadPool(maxThreadCount, ExecutorsUtils.newThreadFactory(Optional.of(LoggerFactory.getLogger(ValidationJob.class)), Optional.of("getValidationOutputFromHive"))); this.futures = Lists.newArrayList(); EventSubmitter.submit(Optional.of(this.eventSubmitter), EventConstants.VALIDATION_SETUP_EVENT); this.pool = HiveMetastoreClientPool.get(props, Optional.fromNullable(props.getProperty(HiveDatasetFinder.HIVE_METASTORE_URI_KEY))); Preconditions.checkArgument(props.containsKey(VALIDATION_TYPE_KEY), "Missing property " + VALIDATION_TYPE_KEY); this.validationType = ValidationType.valueOf(props.getProperty(VALIDATION_TYPE_KEY)); this.ignoreDataPathIdentifierList = COMMA_BASED_SPLITTER.splitToList(props .getProperty(HIVE_VALIDATION_IGNORE_DATA_PATH_IDENTIFIER_KEY, DEFAULT_HIVE_VALIDATION_IGNORE_DATA_PATH_IDENTIFIER)); this.throwables = new ArrayList<>(); this.isNestedORC = Boolean.parseBoolean(props.getProperty(IS_NESTED_ORC, DEFAULT_IS_NESTED_ORC)); this.hiveSettings = Splitter.on(";").trimResults().omitEmptyStrings() .splitToList(props.getProperty(HIVE_SETTINGS, StringUtils.EMPTY)); } @Override public void run() throws Exception { if (this.validationType == ValidationType.COUNT_VALIDATION) { runCountValidation(); } else if (this.validationType == ValidationType.FILE_FORMAT_VALIDATION) { runFileFormatValidation(); } } /** * Validates that partitions are in a given format * Partitions to be processed are picked up from the config store which are tagged. * Tag can be passed through key GOBBLIN_CONFIG_TAGS_WHITELIST * Datasets tagged by the above key will be picked up. * PathName will be treated as tableName and ParentPathName will be treated as dbName * * For example if the dataset uri picked up by is /data/hive/myDb/myTable * Then myTable is tableName and myDb is dbName */ private void runFileFormatValidation() throws IOException { Preconditions.checkArgument(this.props.containsKey(VALIDATION_FILE_FORMAT_KEY)); this.configStoreUri = StringUtils.isNotBlank(this.props.getProperty(ConfigurationKeys.CONFIG_MANAGEMENT_STORE_URI)) ? Optional.of( this.props.getProperty(ConfigurationKeys.CONFIG_MANAGEMENT_STORE_URI)) : Optional.<String>absent(); if (!Boolean.valueOf(this.props.getProperty(ConfigurationKeys.CONFIG_MANAGEMENT_STORE_ENABLED, ConfigurationKeys.DEFAULT_CONFIG_MANAGEMENT_STORE_ENABLED))) { this.configStoreUri = Optional.<String>absent(); } List<Partition> partitions = new ArrayList<>(); if (this.configStoreUri.isPresent()) { Preconditions.checkArgument(this.props.containsKey(GOBBLIN_CONFIG_TAGS_WHITELIST), "Missing required property " + GOBBLIN_CONFIG_TAGS_WHITELIST); String tag = this.props.getProperty(GOBBLIN_CONFIG_TAGS_WHITELIST); ConfigClient configClient = ConfigClient.createConfigClient(VersionStabilityPolicy.WEAK_LOCAL_STABILITY); Path tagUri = PathUtils.mergePaths(new Path(this.configStoreUri.get()), new Path(tag)); try (AutoReturnableObject<IMetaStoreClient> client = pool.getClient()) { Collection<URI> importedBy = configClient.getImportedBy(new URI(tagUri.toString()), true); for (URI uri : importedBy) { String dbName = new Path(uri).getParent().getName(); Table table = new Table(client.get().getTable(dbName, new Path(uri).getName())); for (org.apache.hadoop.hive.metastore.api.Partition partition : client.get() .listPartitions(dbName, table.getTableName(), maxParts)) { partitions.add(new Partition(table, partition)); } } } catch (Exception e) { this.throwables.add(e); } } for (Partition partition : partitions) { if (!shouldValidate(partition)) { continue; } String fileFormat = this.props.getProperty(VALIDATION_FILE_FORMAT_KEY); Optional<HiveSerDeWrapper.BuiltInHiveSerDe> hiveSerDe = Enums.getIfPresent(HiveSerDeWrapper.BuiltInHiveSerDe.class, fileFormat.toUpperCase()); if (!hiveSerDe.isPresent()) { throwables.add(new Throwable("Partition SerDe is either not supported or absent")); continue; } String serdeLib = partition.getTPartition().getSd().getSerdeInfo().getSerializationLib(); if (!hiveSerDe.get().toString().equalsIgnoreCase(serdeLib)) { throwables.add(new Throwable("Partition " + partition.getCompleteName() + " SerDe " + serdeLib + " doesn't match with the required SerDe " + hiveSerDe.get().toString())); } } if (!this.throwables.isEmpty()) { for (Throwable e : this.throwables) { log.error("Failed to validate due to " + e); } throw new RuntimeException("Validation Job Failed"); } } private void runCountValidation() throws InterruptedException { try { // Validation results this.successfulConversions = Maps.newConcurrentMap(); this.failedConversions = Maps.newConcurrentMap(); this.warnConversions = Maps.newConcurrentMap(); this.dataValidationFailed = Maps.newConcurrentMap(); this.dataValidationSuccessful = Maps.newConcurrentMap(); // Find datasets to validate Iterator<HiveDataset> iterator = this.datasetFinder.getDatasetsIterator(); EventSubmitter.submit(Optional.of(this.eventSubmitter), EventConstants.VALIDATION_FIND_HIVE_TABLES_EVENT); while (iterator.hasNext()) { ConvertibleHiveDataset hiveDataset = (ConvertibleHiveDataset) iterator.next(); try (AutoReturnableObject<IMetaStoreClient> client = hiveDataset.getClientPool().getClient()) { // Validate dataset log.info(String.format("Validating dataset: %s", hiveDataset)); if (hiveDataset.getTable().isPartitioned()) { processPartitionedTable(hiveDataset, client); } else { processNonPartitionedTable(hiveDataset); } } } // Wait for all validation queries to finish log.info(String.format("Waiting for %d futures to complete", this.futures.size())); this.exec.shutdown(); this.exec.awaitTermination(4, TimeUnit.HOURS); boolean oneFutureFailure = false; // Check if there were any exceptions for (Future<Void> future : this.futures) { try { future.get(); } catch (Throwable t) { log.error("getValidationOutputFromHive failed", t); oneFutureFailure = true; } } // Log validation results: // Validation results are consolidated into the successfulConversions and failedConversions // These are then converted into log lines in the Azkaban logs as done below for (Map.Entry<String, String> successfulConversion : this.successfulConversions.entrySet()) { log.info(String.format("Successful conversion: %s [%s]", successfulConversion.getKey(), successfulConversion.getValue())); } for (Map.Entry<String, String> successfulConversion : this.warnConversions.entrySet()) { log.warn(String.format("No conversion found for: %s [%s]", successfulConversion.getKey(), successfulConversion.getValue())); } for (Map.Entry<String, String> failedConverion : this.failedConversions.entrySet()) { log.error(String.format("Failed conversion: %s [%s]", failedConverion.getKey(), failedConverion.getValue())); } for (Map.Entry<String, String> success : this.dataValidationSuccessful.entrySet()) { log.info(String.format("Data validation successful: %s [%s]", success.getKey(), success.getValue())); } for (Map.Entry<String, String> failed : this.dataValidationFailed.entrySet()) { log.error(String.format("Data validation failed: %s [%s]", failed.getKey(), failed.getValue())); } if (!this.failedConversions.isEmpty() || !this.dataValidationFailed.isEmpty()) { throw new RuntimeException(String.format("Validation failed for %s conversions. See previous logs for exact validation failures", failedConversions.size())); } if (oneFutureFailure) { throw new RuntimeException("At least one hive ddl failed. Check previous logs"); } } catch (IOException e) { Throwables.propagate(e); } } /*** * Validate a {@link Table} if it was updated recently by checking if its update time * lies between between maxLookBackTime and skipRecentThanTime window. * @param hiveDataset {@link ConvertibleHiveDataset} containing {@link Table} info. * @throws IOException Issue in validating {@link HiveDataset} */ private void processNonPartitionedTable(final ConvertibleHiveDataset hiveDataset) throws IOException { try { // Validate table final long updateTime = this.updateProvider.getUpdateTime(hiveDataset.getTable()); log.info(String.format("Validating table: %s", hiveDataset.getTable())); for (final String format : hiveDataset.getDestFormats()) { Optional<ConvertibleHiveDataset.ConversionConfig> conversionConfigOptional = hiveDataset.getConversionConfigForFormat(format); if (conversionConfigOptional.isPresent()) { ConvertibleHiveDataset.ConversionConfig conversionConfig = conversionConfigOptional.get(); String orcTableName = conversionConfig.getDestinationTableName(); String orcTableDatabase = conversionConfig.getDestinationDbName(); Pair<Optional<org.apache.hadoop.hive.metastore.api.Table>, Optional<List<Partition>>> destinationMeta = HiveConverterUtils.getDestinationTableMeta(orcTableDatabase, orcTableName, this.props); // Generate validation queries final List<String> validationQueries = HiveValidationQueryGenerator.generateCountValidationQueries(hiveDataset, Optional.<Partition> absent(), conversionConfig); final List<String> dataValidationQueries = Lists.newArrayList(HiveValidationQueryGenerator.generateDataValidationQuery(hiveDataset.getTable().getTableName(), hiveDataset.getTable() .getDbName(), destinationMeta.getKey().get(), Optional.<Partition> absent(), this.isNestedORC)); this.futures.add(this.exec.submit(new Callable<Void>() { @Override public Void call() throws Exception { // Execute validation queries log.debug(String.format("Going to execute queries: %s for format: %s", validationQueries, format)); List<Long> rowCounts = ValidationJob.this.getValidationOutputFromHive(validationQueries); log.debug(String.format("Going to execute queries: %s for format: %s", dataValidationQueries, format)); List<Long> rowDataValidatedCount = ValidationJob.this.getValidationOutputFromHive(dataValidationQueries); // Validate and populate report validateAndPopulateReport(hiveDataset.getTable().getCompleteName(), updateTime, rowCounts, rowDataValidatedCount); return null; } })); } else { log.warn(String.format("No config found for format: %s So skipping table: %s for this format", format, hiveDataset.getTable().getCompleteName())); } } } catch (UncheckedExecutionException e) { log.warn(String.format("Not validating table: %s %s", hiveDataset.getTable().getCompleteName(), e.getMessage())); } catch (UpdateNotFoundException e) { log.warn(String .format("Not validating table: %s as update time was not found. %s", hiveDataset.getTable().getCompleteName(), e.getMessage())); } } /*** * Validate all {@link Partition}s for a {@link Table} if it was updated recently by checking if its update time * lies between between maxLookBackTime and skipRecentThanTime window. * @param hiveDataset {@link HiveDataset} containing {@link Table} and {@link Partition} info. * @param client {@link IMetaStoreClient} to query Hive. * @throws IOException Issue in validating {@link HiveDataset} */ private void processPartitionedTable(ConvertibleHiveDataset hiveDataset, AutoReturnableObject<IMetaStoreClient> client) throws IOException { // Get partitions for the table List<Partition> sourcePartitions = HiveUtils.getPartitions(client.get(), hiveDataset.getTable(), Optional.<String> absent()); for (final String format : hiveDataset.getDestFormats()) { Optional<ConvertibleHiveDataset.ConversionConfig> conversionConfigOptional = hiveDataset.getConversionConfigForFormat(format); if (conversionConfigOptional.isPresent()) { // Get conversion config ConvertibleHiveDataset.ConversionConfig conversionConfig = conversionConfigOptional.get(); String orcTableName = conversionConfig.getDestinationTableName(); String orcTableDatabase = conversionConfig.getDestinationDbName(); Pair<Optional<org.apache.hadoop.hive.metastore.api.Table>, Optional<List<Partition>>> destinationMeta = HiveConverterUtils.getDestinationTableMeta(orcTableDatabase, orcTableName, this.props); // Validate each partition for (final Partition sourcePartition : sourcePartitions) { try { final long updateTime = this.updateProvider.getUpdateTime(sourcePartition); if (shouldValidate(sourcePartition)) { log.info(String.format("Validating partition: %s", sourcePartition.getCompleteName())); // Generate validation queries final List<String> countValidationQueries = HiveValidationQueryGenerator.generateCountValidationQueries(hiveDataset, Optional.of(sourcePartition), conversionConfig); final List<String> dataValidationQueries = Lists.newArrayList(HiveValidationQueryGenerator.generateDataValidationQuery(hiveDataset.getTable().getTableName(), hiveDataset.getTable() .getDbName(), destinationMeta.getKey().get(), Optional.of(sourcePartition), this.isNestedORC)); this.futures.add(this.exec.submit(new Callable<Void>() { @Override public Void call() throws Exception { // Execute validation queries log.debug(String.format("Going to execute count validation queries queries: %s for format: %s " + "and partition %s", countValidationQueries, format, sourcePartition.getCompleteName())); List<Long> rowCounts = ValidationJob.this.getValidationOutputFromHive(countValidationQueries); log.debug(String.format("Going to execute data validation queries: %s for format: %s and partition %s", dataValidationQueries, format, sourcePartition.getCompleteName())); List<Long> rowDataValidatedCount = ValidationJob.this.getValidationOutputFromHive(dataValidationQueries); // Validate and populate report validateAndPopulateReport(sourcePartition.getCompleteName(), updateTime, rowCounts, rowDataValidatedCount); return null; } })); } else { log.debug(String.format("Not validating partition: %s as updateTime: %s is not in range of max look back: %s " + "and skip recent than: %s", sourcePartition.getCompleteName(), updateTime, this.maxLookBackTime, this.skipRecentThanTime)); } } catch (UncheckedExecutionException e) { log.warn( String.format("Not validating partition: %s %s", sourcePartition.getCompleteName(), e.getMessage())); } catch (UpdateNotFoundException e) { log.warn(String.format("Not validating partition: %s as update time was not found. %s", sourcePartition.getCompleteName(), e.getMessage())); } } } else { log.info(String.format("No conversion config found for format %s. Ignoring data validation", format)); } } } /*** * Execute Hive queries using {@link HiveJdbcConnector} and validate results. * @param queries Queries to execute. */ @SuppressWarnings("unused") private List<Long> getValidationOutputFromHiveJdbc(List<String> queries) throws IOException { if (null == queries || queries.size() == 0) { log.warn("No queries specified to be executed"); return Collections.emptyList(); } Statement statement = null; List<Long> rowCounts = Lists.newArrayList(); Closer closer = Closer.create(); try { HiveJdbcConnector hiveJdbcConnector = HiveJdbcConnector.newConnectorWithProps(props); statement = hiveJdbcConnector.getConnection().createStatement(); for (String query : queries) { log.info("Executing query: " + query); boolean result = statement.execute(query); if (result) { ResultSet resultSet = statement.getResultSet(); if (resultSet.next()) { rowCounts.add(resultSet.getLong(1)); } } else { log.warn("Query output for: " + query + " : " + result); } } } catch (SQLException e) { throw new RuntimeException(e); } finally { try { closer.close(); } catch (Exception e) { log.warn("Could not close HiveJdbcConnector", e); } if (null != statement) { try { statement.close(); } catch (SQLException e) { log.warn("Could not close Hive statement", e); } } } return rowCounts; } /*** * Execute Hive queries using {@link HiveJdbcConnector} and validate results. * @param queries Queries to execute. */ @edu.umd.cs.findbugs.annotations.SuppressWarnings(value = "SQL_NONCONSTANT_STRING_PASSED_TO_EXECUTE", justification = "Temporary fix") private List<Long> getValidationOutputFromHive(List<String> queries) throws IOException { if (null == queries || queries.size() == 0) { log.warn("No queries specified to be executed"); return Collections.emptyList(); } List<Long> rowCounts = Lists.newArrayList(); Closer closer = Closer.create(); try { HiveJdbcConnector hiveJdbcConnector = closer.register(HiveJdbcConnector.newConnectorWithProps(props)); for (String query : queries) { String hiveOutput = "hiveConversionValidationOutput_" + UUID.randomUUID().toString(); Path hiveTempDir = new Path("/tmp" + Path.SEPARATOR + hiveOutput); query = "INSERT OVERWRITE DIRECTORY '" + hiveTempDir + "' " + query; log.info("Executing query: " + query); try { if (this.hiveSettings.size() > 0) { hiveJdbcConnector.executeStatements(this.hiveSettings.toArray(new String[this.hiveSettings.size()])); } hiveJdbcConnector.executeStatements("SET hive.exec.compress.output=false","SET hive.auto.convert.join=false", query); FileStatus[] fileStatusList = this.fs.listStatus(hiveTempDir); List<FileStatus> files = new ArrayList<>(); for (FileStatus fileStatus : fileStatusList) { if (fileStatus.isFile()) { files.add(fileStatus); } } if (files.size() > 1) { log.warn("Found more than one output file. Should have been one."); } else if (files.size() == 0) { log.warn("Found no output file. Should have been one."); } else { String theString = IOUtils.toString(new InputStreamReader(this.fs.open(files.get(0).getPath()), Charsets.UTF_8)); log.info("Found row count: " + theString.trim()); if (StringUtils.isBlank(theString.trim())) { rowCounts.add(0l); } else { try { rowCounts.add(Long.parseLong(theString.trim())); } catch (NumberFormatException e) { throw new RuntimeException("Could not parse Hive output: " + theString.trim(), e); } } } } finally { if (this.fs.exists(hiveTempDir)) { log.debug("Deleting temp dir: " + hiveTempDir); this.fs.delete(hiveTempDir, true); } } } } catch (SQLException e) { log.warn("Execution failed for query set " + queries.toString(), e); } finally { try { closer.close(); } catch (Exception e) { log.warn("Could not close HiveJdbcConnector", e); } } return rowCounts; } private void validateAndPopulateReport(String datasetIdentifier, long conversionInstance, List<Long> rowCounts, List<Long> rowDataValidatedCount) { if (null == rowCounts || rowCounts.size() == 0) { this.warnConversions.put(String.format("Dataset: %s Instance: %s", datasetIdentifier, conversionInstance), "No conversion details found"); this.eventSubmitter.submit(EventConstants.VALIDATION_NOOP_EVENT, ImmutableMap.of("datasetUrn", datasetIdentifier)); return; } if (null == rowDataValidatedCount || rowDataValidatedCount.size() == 0) { this.warnConversions.put(String.format("Dataset: %s Instance: %s", datasetIdentifier, conversionInstance), "No conversion details found"); this.eventSubmitter.submit(EventConstants.VALIDATION_NOOP_EVENT, ImmutableMap.of("datasetUrn", datasetIdentifier)); return; } long rowCountCached = -1; boolean isFirst = true; for (Long rowCount : rowCounts) { // First is always source partition / table (refer HiveValidationQueryGenerator) if (isFirst) { rowCountCached = rowCount; isFirst = false; continue; } // Row count validation if (rowCount != rowCountCached) { if (rowCount == 0) { this.warnConversions.put(String.format("Dataset: %s Instance: %s", datasetIdentifier, conversionInstance), "Row counts found 0, may be the conversion is delayed."); this.eventSubmitter.submit(EventConstants.VALIDATION_NOOP_EVENT, ImmutableMap.of("datasetUrn", datasetIdentifier)); } else { this.failedConversions.put(String.format("Dataset: %s Instance: %s", datasetIdentifier, conversionInstance), String.format("Row counts did not match across all conversions. Row count expected: %d, Row count got: %d", rowCountCached, rowCount)); this.eventSubmitter.submit(EventConstants.VALIDATION_FAILED_EVENT, ImmutableMap.of("datasetUrn", datasetIdentifier)); return; } } else { this.successfulConversions.put(String.format("Dataset: %s Instance: %s", datasetIdentifier, conversionInstance), String.format("Row counts matched across all conversions. Row count expected: %d, Row count got: %d", rowCountCached, rowCount)); this.eventSubmitter.submit(EventConstants.VALIDATION_SUCCESSFUL_EVENT, ImmutableMap.of("datasetUrn", datasetIdentifier)); } } // Data count validation if (rowCountCached == rowDataValidatedCount.get(0)) { this.dataValidationSuccessful.put(String.format("Dataset: %s Instance: %s", datasetIdentifier, conversionInstance), "Common rows matched expected value. Expected: " + rowCountCached + " Found: " + rowDataValidatedCount); } else { this.dataValidationFailed.put(String.format("Dataset: %s Instance: %s", datasetIdentifier, conversionInstance), "Common rows did not match expected value. Expected: " + rowCountCached + " Found: " + rowDataValidatedCount); } } /*** * Get source {@link FileSystem} * @return Source {@link FileSystem} * @throws IOException Issue in fetching {@link FileSystem} */ private static FileSystem getSourceFs() throws IOException { return FileSystem.get(HadoopUtils.newConfiguration()); } /** * Determine if the {@link Table} or {@link Partition} should be validated by checking if its create time * lies between maxLookBackTime and skipRecentThanTime window. */ private boolean shouldValidate(Partition partition) { for (String pathToken : this.ignoreDataPathIdentifierList) { if (partition.getDataLocation().toString().toLowerCase().contains(pathToken.toLowerCase())) { log.info("Skipping partition " + partition.getCompleteName() + " containing invalid token " + pathToken .toLowerCase()); return false; } } try { long createTime = getPartitionCreateTime(partition.getName()); boolean withinTimeWindow = new DateTime(createTime).isAfter(this.maxLookBackTime) && new DateTime(createTime) .isBefore(this.skipRecentThanTime); if (!withinTimeWindow) { log.info("Skipping partition " + partition.getCompleteName() + " as create time " + new DateTime(createTime) .toString() + " is not within validation time window "); } else { log.info("Validating partition " + partition.getCompleteName()); return withinTimeWindow; } } catch (ParseException e) { Throwables.propagate(e); } return false; } public static Long getPartitionCreateTime(String partitionName) throws ParseException { String dateString = null; for (String st : SLASH_SPLITTER.splitToList(partitionName)) { if (st.startsWith(DATEPARTITION)) { dateString = EQUALITY_SPLITTER.splitToList(st).get(1); } } Preconditions.checkNotNull(dateString, "Unable to get partition date"); DateFormat dateFormat = new SimpleDateFormat(DATE_FORMAT); return dateFormat.parse(dateString).getTime(); } } enum ValidationType { COUNT_VALIDATION, FILE_FORMAT_VALIDATION; ValidationType() { } }
3,411
0
Create_ds/gobblin/gobblin-modules/gobblin-metrics-influxdb/src/test/java/org/apache/gobblin/metrics
Create_ds/gobblin/gobblin-modules/gobblin-metrics-influxdb/src/test/java/org/apache/gobblin/metrics/influxdb/TestInfluxDB.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.gobblin.metrics.influxdb; import org.apache.gobblin.metrics.test.TimestampedValue; import java.util.List; import java.util.Map; import java.util.concurrent.TimeUnit; import org.influxdb.InfluxDB; import org.influxdb.dto.BatchPoints; import org.influxdb.dto.Point; import org.influxdb.dto.Pong; import org.influxdb.dto.Query; import org.influxdb.dto.QueryResult; import com.google.common.collect.Maps; /** * A test implementation of {@link org.influxdb.InfluxDB}. * * @author Lorand Bendig * */ public class TestInfluxDB implements InfluxDB { private final Map<String, TimestampedValue> data = Maps.newHashMap(); @Override public InfluxDB setLogLevel(LogLevel logLevel) { // Nothing to do return this; } @Override public InfluxDB enableBatch(int actions, int flushDuration, TimeUnit flushDurationTimeUnit) { // Nothing to do return this; } @Override public void disableBatch() { // Nothing to do } @Override public Pong ping() { // Nothing to do return null; } @Override public String version() { // Nothing to do return null; } @Override public void write(String database, String retentionPolicy, Point point) { BatchPoints batchPoints = BatchPoints.database(database).retentionPolicy(retentionPolicy).build(); batchPoints.point(point); this.write(batchPoints); } @Override public void write(BatchPoints batchPoints) { for (Point point : batchPoints.getPoints()) { write(point.lineProtocol()); } } @Override public QueryResult query(Query query) { // Nothing to do return null; } @Override public QueryResult query(Query query, TimeUnit timeUnit) { // Nothing to do return null; } @Override public void createDatabase(String name) { // Nothing to do } @Override public void deleteDatabase(String name) { // Nothing to do } @Override public List<String> describeDatabases() { // Nothing to do return null; } @Override public void setConnectTimeout(long connectTimeout, TimeUnit timeUnit) { // Nothing to do } @Override public void setReadTimeout(long readTimeout, TimeUnit timeUnit) { // Nothing to do } @Override public void setWriteTimeout(long writeTimeout, TimeUnit timeUnit) { // Nothing to do } /** * Get a metric with a given name. * * @param name metric name * @return a {@link org.apache.gobblin.metrics.test.TimestampedValue} */ public TimestampedValue getMetric(String name) { return this.data.get(name); } private void write(String lineProtocol) { String[] split = lineProtocol.split(" "); String key = split[0]; String value = split[1].substring(split[1].indexOf('=') + 1, split[1].length()); long timestamp = Long.valueOf(split[2]) / 1000000l; data.put(key, new TimestampedValue(timestamp, value)); } }
3,412
0
Create_ds/gobblin/gobblin-modules/gobblin-metrics-influxdb/src/test/java/org/apache/gobblin/metrics
Create_ds/gobblin/gobblin-modules/gobblin-metrics-influxdb/src/test/java/org/apache/gobblin/metrics/influxdb/InfluxDBEventReporterTest.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.gobblin.metrics.influxdb; import org.apache.gobblin.metrics.GobblinTrackingEvent; import org.apache.gobblin.metrics.MetricContext; import org.apache.gobblin.metrics.event.EventSubmitter; import org.apache.gobblin.metrics.event.JobEvent; import org.apache.gobblin.metrics.event.MultiPartEvent; import org.apache.gobblin.metrics.event.TaskEvent; import org.apache.gobblin.metrics.test.TimestampedValue; import java.io.IOException; import java.util.Map; import org.mockito.Mockito; import org.testng.Assert; import org.testng.annotations.BeforeClass; import org.testng.annotations.Test; import com.google.common.collect.Maps; /** * Test for InfluxDBReporter using a mock backend ({@link TestInfluxDB}) * * @author Lorand Bendig * */ @Test(groups = { "gobblin.metrics" }) public class InfluxDBEventReporterTest { private TestInfluxDB influxDB = new TestInfluxDB(); private InfluxDBPusher influxDBPusher; private static String DEFAULT_URL = "http://localhost:8086"; private static String DEFAULT_USERNAME = "user"; private static String DEFAULT_PASSWORD = "password"; private static String DEFAULT_DATABASE = "default"; private static String NAMESPACE = "gobblin.metrics.test"; @BeforeClass public void setUp() throws IOException { InfluxDBConnectionType connectionType = Mockito.mock(InfluxDBConnectionType.class); Mockito.when(connectionType.createConnection(DEFAULT_URL, DEFAULT_USERNAME, DEFAULT_PASSWORD)).thenReturn(influxDB); this.influxDBPusher = new InfluxDBPusher.Builder(DEFAULT_URL, DEFAULT_USERNAME, DEFAULT_PASSWORD, DEFAULT_DATABASE, connectionType) .build(); } private InfluxDBEventReporter.BuilderImpl getBuilder(MetricContext metricContext) { return InfluxDBEventReporter.Factory.forContext(metricContext).withInfluxDBPusher(influxDBPusher); } @Test public void testSimpleEvent() throws IOException { try ( MetricContext metricContext = MetricContext.builder(this.getClass().getCanonicalName() + ".testInfluxDBReporter1").build(); InfluxDBEventReporter influxEventReporter = getBuilder(metricContext).build();) { Map<String, String> metadata = Maps.newHashMap(); metadata.put(JobEvent.METADATA_JOB_ID, "job1"); metadata.put(TaskEvent.METADATA_TASK_ID, "task1"); metricContext.submitEvent(GobblinTrackingEvent.newBuilder() .setName(JobEvent.TASKS_SUBMITTED) .setNamespace(NAMESPACE) .setMetadata(metadata).build()); try { Thread.sleep(100); } catch (InterruptedException ex) { Thread.currentThread().interrupt(); } influxEventReporter.report(); try { Thread.sleep(100); } catch (InterruptedException ex) { Thread.currentThread().interrupt(); } TimestampedValue retrievedEvent = influxDB.getMetric( "gobblin.metrics.job1.task1.events.TasksSubmitted"); Assert.assertEquals(retrievedEvent.getValue(), "0.0"); Assert.assertTrue(retrievedEvent.getTimestamp() <= (System.currentTimeMillis())); } } @Test public void testMultiPartEvent() throws IOException { try ( MetricContext metricContext = MetricContext.builder(this.getClass().getCanonicalName() + ".testInfluxDBReporter2").build(); InfluxDBEventReporter influxEventReporter = getBuilder(metricContext).build();) { Map<String, String> metadata = Maps.newHashMap(); metadata.put(JobEvent.METADATA_JOB_ID, "job2"); metadata.put(TaskEvent.METADATA_TASK_ID, "task2"); metadata.put(EventSubmitter.EVENT_TYPE, "JobStateEvent"); metadata.put(JobEvent.METADATA_JOB_START_TIME, "1457736710521"); metadata.put(JobEvent.METADATA_JOB_END_TIME, "1457736710734"); metadata.put(JobEvent.METADATA_JOB_LAUNCHED_TASKS, "3"); metadata.put(JobEvent.METADATA_JOB_COMPLETED_TASKS, "2"); metadata.put(JobEvent.METADATA_JOB_STATE, "FAILED"); metricContext.submitEvent(GobblinTrackingEvent.newBuilder() .setName(MultiPartEvent.JOBSTATE_EVENT.getEventName()) .setNamespace(NAMESPACE) .setMetadata(metadata).build()); try { Thread.sleep(100); } catch (InterruptedException ex) { Thread.currentThread().interrupt(); } influxEventReporter.report(); try { Thread.sleep(100); } catch (InterruptedException ex) { Thread.currentThread().interrupt(); } String prefix = "gobblin.metrics.job2.task2.events.JobStateEvent"; Assert.assertEquals(influxDB.getMetric(prefix + ".jobBeginTime").getValue(), "1457736710521.0"); Assert.assertEquals(influxDB.getMetric(prefix + ".jobEndTime").getValue(), "1457736710734.0"); Assert.assertEquals(influxDB.getMetric(prefix + ".jobLaunchedTasks").getValue(), "3.0"); Assert.assertEquals(influxDB.getMetric(prefix + ".jobCompletedTasks").getValue(), "2.0"); Assert.assertEquals(influxDB.getMetric(prefix + ".jobState").getValue(), "\"FAILED\""); } } }
3,413
0
Create_ds/gobblin/gobblin-modules/gobblin-metrics-influxdb/src/test/java/org/apache/gobblin/metrics
Create_ds/gobblin/gobblin-modules/gobblin-metrics-influxdb/src/test/java/org/apache/gobblin/metrics/influxdb/InfluxDBReporterTest.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.gobblin.metrics.influxdb; import static org.apache.gobblin.metrics.test.TestConstants.CONTEXT_NAME; import static org.apache.gobblin.metrics.test.TestConstants.COUNTER; import static org.apache.gobblin.metrics.test.TestConstants.GAUGE; import static org.apache.gobblin.metrics.test.TestConstants.HISTOGRAM; import static org.apache.gobblin.metrics.test.TestConstants.METER; import static org.apache.gobblin.metrics.test.TestConstants.METRIC_PREFIX; import static org.apache.gobblin.metrics.test.TestConstants.TIMER; import org.apache.gobblin.metrics.ContextAwareGauge; import org.apache.gobblin.metrics.Measurements; import org.apache.gobblin.metrics.MetricContext; import org.apache.gobblin.metrics.Tag; import java.io.IOException; import java.util.Properties; import java.util.TreeMap; import java.util.concurrent.TimeUnit; import org.mockito.Mockito; import org.testng.Assert; import org.testng.annotations.BeforeClass; import org.testng.annotations.Test; import com.codahale.metrics.Counter; import com.codahale.metrics.Gauge; import com.codahale.metrics.Histogram; import com.codahale.metrics.Meter; import com.codahale.metrics.MetricRegistry; import com.codahale.metrics.Timer; /** * Test for InfluxDBReporter using a mock backend ({@link TestInfluxDB}) * * @author Lorand Bendig * */ @Test(groups = { "gobblin.metrics" }) public class InfluxDBReporterTest { private TestInfluxDB influxDB = new TestInfluxDB(); private InfluxDBPusher influxDBPusher; private static String DEFAULT_URL = "http://localhost:8086"; private static String DEFAULT_USERNAME = "user"; private static String DEFAULT_PASSWORD = "password"; private static String DEFAULT_DATABASE = "default"; @BeforeClass public void setUp() throws IOException { InfluxDBConnectionType connectionType = Mockito.mock(InfluxDBConnectionType.class); Mockito.when(connectionType.createConnection(DEFAULT_URL, DEFAULT_USERNAME, DEFAULT_PASSWORD)).thenReturn(influxDB); this.influxDBPusher = new InfluxDBPusher.Builder(DEFAULT_URL, DEFAULT_USERNAME, DEFAULT_PASSWORD, DEFAULT_DATABASE, connectionType) .build(); } @Test public void testWithoutTags() throws IOException { try ( MetricContext metricContext = MetricContext.builder(this.getClass().getCanonicalName() + ".testInfluxDBReporter").build(); InfluxDBReporter influxDBReporter = InfluxDBReporter.Factory.newBuilder() .withInfluxDBPusher(influxDBPusher) .withMetricContextName(CONTEXT_NAME) .build(new Properties());) { ContextAwareGauge<Long> contextAwareGauge = metricContext.newContextAwareGauge("com.linkedin.example.gauge", new Gauge<Long>() { @Override public Long getValue() { return 1000l; } }); metricContext.register(MetricRegistry.name(METRIC_PREFIX, GAUGE), contextAwareGauge); Counter counter = metricContext.counter(MetricRegistry.name(METRIC_PREFIX, COUNTER)); Meter meter = metricContext.meter(MetricRegistry.name(METRIC_PREFIX, METER)); Histogram histogram = metricContext.histogram(MetricRegistry.name(METRIC_PREFIX, HISTOGRAM)); Timer timer = metricContext.timer(MetricRegistry.name(METRIC_PREFIX, TIMER)); counter.inc(3l); meter.mark(1l); meter.mark(2l); meter.mark(3l); histogram.update(1); histogram.update(1); histogram.update(2); timer.update(1, TimeUnit.SECONDS); timer.update(2, TimeUnit.SECONDS); timer.update(3, TimeUnit.SECONDS); influxDBReporter.report(metricContext.getGauges(), metricContext.getCounters(), metricContext.getHistograms(), metricContext.getMeters(), metricContext.getTimers(), metricContext.getTagMap()); //InfluxDB converts all values to float64 internally Assert.assertEquals(getMetricValue(COUNTER, Measurements.COUNT), Float.toString(3f)); Assert.assertEquals(getMetricValue(GAUGE, null), Float.toString(1000l)); Assert.assertTrue(getMetricTimestamp(GAUGE, null) <= System.currentTimeMillis()); Assert.assertEquals(getMetricValue(HISTOGRAM, Measurements.PERCENTILE_75TH), Float.toString(2f)); Assert.assertEquals(getMetricValue(HISTOGRAM, Measurements.PERCENTILE_98TH), Float.toString(2f)); Assert.assertEquals(getMetricValue(HISTOGRAM, Measurements.PERCENTILE_99TH), Float.toString(2f)); Assert.assertEquals(getMetricValue(HISTOGRAM, Measurements.PERCENTILE_999TH), Float.toString(2f)); Assert.assertEquals(getMetricValue(HISTOGRAM, Measurements.COUNT), Float.toString(3f)); Assert.assertEquals(getMetricValue(HISTOGRAM, Measurements.MIN), Float.toString(1f)); Assert.assertEquals(getMetricValue(HISTOGRAM, Measurements.MAX), Float.toString(2f)); Assert.assertEquals(getMetricValue(HISTOGRAM, Measurements.MEDIAN), Float.toString(1f)); Assert.assertTrue(Double.valueOf(getMetricValue(HISTOGRAM, Measurements.MEAN)) > 1f); Assert.assertTrue(Double.valueOf(getMetricValue(HISTOGRAM, Measurements.STDDEV)) < 0.5f); Assert.assertEquals(getMetricValue(METER, Measurements.RATE_1MIN), Float.toString(0f)); Assert.assertEquals(getMetricValue(METER, Measurements.RATE_5MIN), Float.toString(0f)); Assert.assertEquals(getMetricValue(METER, Measurements.COUNT), Float.toString(6f)); Assert.assertTrue(Double.valueOf(getMetricValue(METER, Measurements.MEAN_RATE)) > 0f); Assert.assertEquals(getMetricValue(TIMER, Measurements.RATE_1MIN), Float.toString(0f)); Assert.assertEquals(getMetricValue(TIMER, Measurements.RATE_5MIN), Float.toString(0f)); Assert.assertEquals(getMetricValue(TIMER, Measurements.PERCENTILE_75TH), Float.toString(3000f)); Assert.assertEquals(getMetricValue(TIMER, Measurements.PERCENTILE_98TH), Float.toString(3000f)); Assert.assertEquals(getMetricValue(TIMER, Measurements.PERCENTILE_99TH), Float.toString(3000f)); Assert.assertEquals(getMetricValue(TIMER, Measurements.PERCENTILE_999TH), Float.toString(3000f)); Assert.assertEquals(getMetricValue(TIMER, Measurements.COUNT), Float.toString(3f)); Assert.assertEquals(getMetricValue(TIMER, Measurements.MIN), Float.toString(1000f)); Assert.assertEquals(getMetricValue(TIMER, Measurements.MAX), Float.toString(3000f)); Assert.assertEquals(getMetricValue(TIMER, Measurements.MEAN), Float.toString(2000f)); Assert.assertEquals(getMetricValue(TIMER, Measurements.MEDIAN), Float.toString(2000f)); Assert.assertTrue(Double.valueOf(getMetricValue(TIMER, Measurements.MEAN_RATE)) > 0f); Assert.assertTrue(Double.valueOf(getMetricValue(TIMER, Measurements.STDDEV)) > 0f); } } @Test public void testWithTags() throws IOException { try ( MetricContext metricContext = MetricContext.builder(this.getClass().getCanonicalName() + ".testGraphiteReporter") .addTag(new Tag<String>("taskId", "task_testjob_123")) .addTag(new Tag<String>("forkBranchName", "fork_1")).build(); InfluxDBReporter influxDBReporter = InfluxDBReporter.Factory.newBuilder() .withInfluxDBPusher(influxDBPusher) .withMetricContextName(CONTEXT_NAME) .build(new Properties());) { Counter counter = metricContext.counter(MetricRegistry.name(METRIC_PREFIX, COUNTER)); counter.inc(5l); influxDBReporter.report(new TreeMap<String, Gauge>(), metricContext.getCounters(), new TreeMap<String, Histogram>(), new TreeMap<String, Meter>(), new TreeMap<String, Timer>(), metricContext.getTagMap()); //InfluxDB converts all values to float64 internally Assert.assertEquals(getMetricValue("task_testjob_123.fork_1." + METRIC_PREFIX, COUNTER, Measurements.COUNT), Float.toString(5f)); } } private String getMetricValue(String metric, Measurements key) { return getMetricValue(METRIC_PREFIX, metric, key); } private String getMetricValue(String metricPrefix, String metric, Measurements key) { String metricKey = (key == null) ? MetricRegistry.name(CONTEXT_NAME, metricPrefix, metric) : MetricRegistry.name(CONTEXT_NAME, metricPrefix, metric, key.getName()); return influxDB.getMetric(metricKey).getValue(); } private long getMetricTimestamp(String metric, Measurements key) { String metricKey = (key == null) ? MetricRegistry.name(CONTEXT_NAME, METRIC_PREFIX, metric) : MetricRegistry.name(CONTEXT_NAME, METRIC_PREFIX, metric, key.getName()); return influxDB.getMetric(metricKey).getTimestamp(); } }
3,414
0
Create_ds/gobblin/gobblin-modules/gobblin-metrics-influxdb/src/main/java/org/apache/gobblin/metrics
Create_ds/gobblin/gobblin-modules/gobblin-metrics-influxdb/src/main/java/org/apache/gobblin/metrics/influxdb/InfluxDBReporter.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.gobblin.metrics.influxdb; import java.io.IOException; import java.util.List; import java.util.Map; import java.util.Properties; import java.util.SortedMap; import java.util.concurrent.TimeUnit; import org.influxdb.dto.Point; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import com.codahale.metrics.Counter; import com.codahale.metrics.Counting; import com.codahale.metrics.Gauge; import com.codahale.metrics.Histogram; import com.codahale.metrics.Meter; import com.codahale.metrics.Metered; import com.codahale.metrics.MetricFilter; import com.codahale.metrics.Snapshot; import com.codahale.metrics.Timer; import com.google.common.base.Optional; import com.google.common.collect.Lists; import com.typesafe.config.Config; import org.apache.gobblin.configuration.ConfigurationKeys; import org.apache.gobblin.metrics.Measurements; import org.apache.gobblin.metrics.reporter.ConfiguredScheduledReporter; import org.apache.gobblin.util.ConfigUtils; import static org.apache.gobblin.metrics.Measurements.*; /** * InfluxDB reporter for metrics * * @author Lorand Bendig * */ public class InfluxDBReporter extends ConfiguredScheduledReporter { private final InfluxDBPusher influxDBPusher; private static final Logger LOGGER = LoggerFactory.getLogger(InfluxDBReporter.class); public InfluxDBReporter(Builder<?> builder, Config config) { super(builder, config); if (builder.influxDBPusher.isPresent()) { this.influxDBPusher = builder.influxDBPusher.get(); } else { this.influxDBPusher = new InfluxDBPusher.Builder(builder.url, builder.username, builder.password, builder.database, builder.connectionType).build(); } } /** * A static factory class for obtaining new {@link org.apache.gobblin.metrics.influxdb.InfluxDBReporter.Builder}s * * @see org.apache.gobblin.metrics.influxdb.InfluxDBReporter.Builder */ public static class Factory { public static BuilderImpl newBuilder() { return new BuilderImpl(); } } public static class BuilderImpl extends Builder<BuilderImpl> { @Override protected BuilderImpl self() { return this; } } /** * Builder for {@link InfluxDBReporter}. Defaults to no filter, reporting rates in seconds and times in * milliseconds using TCP sending type */ public static abstract class Builder<T extends ConfiguredScheduledReporter.Builder<T>> extends ConfiguredScheduledReporter.Builder<T> { protected MetricFilter filter; protected String url; protected String username; protected String password; protected String database; protected InfluxDBConnectionType connectionType; protected Optional<InfluxDBPusher> influxDBPusher; protected Builder() { super(); this.name = "InfluxDBReporter"; this.influxDBPusher = Optional.absent(); this.filter = MetricFilter.ALL; this.connectionType = InfluxDBConnectionType.TCP; } /** * Set {@link org.apache.gobblin.metrics.influxdb.InfluxDBPusher} to use. */ public T withInfluxDBPusher(InfluxDBPusher pusher) { this.influxDBPusher = Optional.of(pusher); return self(); } /** * Set connection parameters for the {@link org.apache.gobblin.metrics.influxdb.InfluxDBPusher} creation */ public T withConnection(String url, String username, String password, String database) { this.url = url; this.username = username; this.password = password; this.database = database; return self(); } /** * Set {@link org.apache.gobblin.metrics.influxdb.InfluxDBConnectionType} to use. */ public T withConnectionType(InfluxDBConnectionType connectionType) { this.connectionType = connectionType; return self(); } /** * Only report metrics which match the given filter. * * @param filter a {@link MetricFilter} * @return {@code this} */ public T filter(MetricFilter filter) { this.filter = filter; return self(); } /** * Builds and returns {@link InfluxDBReporter}. * * @return InfluxDBReporter */ public InfluxDBReporter build(Properties props) throws IOException { return new InfluxDBReporter(this, ConfigUtils.propertiesToConfig(props, Optional.of(ConfigurationKeys.METRICS_CONFIGURATIONS_PREFIX))); } } @Override protected void report(SortedMap<String, Gauge> gauges, SortedMap<String, Counter> counters, SortedMap<String, Histogram> histograms, SortedMap<String, Meter> meters, SortedMap<String, Timer> timers, Map<String, Object> tags) { String prefix = getMetricNamePrefix(tags); long timestamp = System.currentTimeMillis(); List<Point> points = Lists.newArrayList(); try { for (Map.Entry<String, Gauge> gauge : gauges.entrySet()) { reportGauge(points, prefix, gauge.getKey(), gauge.getValue(), timestamp); } for (Map.Entry<String, Counter> counter : counters.entrySet()) { reportCounter(points, prefix, counter.getKey(), counter.getValue(), timestamp); } for (Map.Entry<String, Histogram> histogram : histograms.entrySet()) { reportHistogram(points, prefix, histogram.getKey(), histogram.getValue(), timestamp); } for (Map.Entry<String, Meter> meter : meters.entrySet()) { reportMetered(points, prefix, meter.getKey(), meter.getValue(), timestamp); } for (Map.Entry<String, Timer> timer : timers.entrySet()) { reportTimer(points, prefix, timer.getKey(), timer.getValue(), timestamp); } influxDBPusher.push(points); } catch (IOException ioe) { LOGGER.error("Error sending metrics to InfluxDB", ioe); } } private void reportGauge(List<Point> points, String prefix, String name, Gauge gauge, long timestamp) throws IOException { String metricName = getKey(prefix, name); points.add(buildMetricAsPoint(metricName, gauge.getValue(), timestamp)); } private void reportCounter(List<Point> points, String prefix, String name, Counting counter, long timestamp) throws IOException { String metricName = getKey(prefix, name, COUNT.getName()); points.add(buildMetricAsPoint(metricName, counter.getCount(), false, timestamp)); } private void reportHistogram(List<Point> points, String prefix, String name, Histogram histogram, long timestamp) throws IOException { reportCounter(points, prefix, name, histogram, timestamp); reportSnapshot(points, prefix, name, histogram.getSnapshot(), timestamp, false); } private void reportTimer(List<Point> points, String prefix, String name, Timer timer, long timestamp) throws IOException { reportSnapshot(points, prefix, name, timer.getSnapshot(), timestamp, true); reportMetered(points, prefix, name, timer, timestamp); } private void reportSnapshot(List<Point> points, String prefix, String name, Snapshot snapshot, long timestamp, boolean convertDuration) throws IOException { String baseMetricName = getKey(prefix, name); points.add(buildMetricAsPoint(getKey(baseMetricName, MIN), snapshot.getMin(), convertDuration, timestamp)); points.add(buildMetricAsPoint(getKey(baseMetricName, MAX), snapshot.getMax(), convertDuration, timestamp)); points.add(buildMetricAsPoint(getKey(baseMetricName, MEAN), snapshot.getMean(), convertDuration, timestamp)); points.add(buildMetricAsPoint(getKey(baseMetricName, STDDEV), snapshot.getStdDev(), convertDuration, timestamp)); points.add(buildMetricAsPoint(getKey(baseMetricName, MEDIAN), snapshot.getMedian(), convertDuration, timestamp)); points.add(buildMetricAsPoint(getKey(baseMetricName, PERCENTILE_75TH), snapshot.get75thPercentile(), convertDuration, timestamp)); points.add(buildMetricAsPoint(getKey(baseMetricName, PERCENTILE_95TH), snapshot.get95thPercentile(), convertDuration, timestamp)); points.add(buildMetricAsPoint(getKey(baseMetricName, PERCENTILE_98TH), snapshot.get98thPercentile(), convertDuration, timestamp)); points.add(buildMetricAsPoint(getKey(baseMetricName, PERCENTILE_99TH), snapshot.get99thPercentile(), convertDuration, timestamp)); points.add(buildMetricAsPoint(getKey(baseMetricName, PERCENTILE_999TH), snapshot.get999thPercentile(), convertDuration, timestamp)); } private void reportMetered(List<Point> points, String prefix, String name, Metered metered, long timestamp) throws IOException { reportCounter(points,prefix, name, metered, timestamp); String baseMetricName = getKey(prefix, name); points.add(buildRateAsPoint(getKey(baseMetricName, RATE_1MIN), metered.getOneMinuteRate(), timestamp)); points.add(buildRateAsPoint(getKey(baseMetricName, RATE_5MIN), metered.getFiveMinuteRate(), timestamp)); points.add(buildRateAsPoint(getKey(baseMetricName, RATE_15MIN), metered.getFifteenMinuteRate(), timestamp)); points.add(buildRateAsPoint(getKey(baseMetricName, MEAN_RATE), metered.getMeanRate(), timestamp)); } private Point buildMetricAsPoint(String metricName, Number value, boolean toDuration, long timestamp) throws IOException { Number metricValue = toDuration ? convertDuration(value.doubleValue()) : value; return buildMetricAsPoint(metricName, metricValue, timestamp); } private Point buildRateAsPoint(String metricName, double value, long timestamp) throws IOException { return buildMetricAsPoint(metricName, convertRate(value), timestamp); } private Point buildMetricAsPoint(String name, Object value, long timestamp) throws IOException { return Point.measurement(name).field("value", value).time(timestamp, TimeUnit.MILLISECONDS).build(); } private String getKey(String baseName, Measurements measurements) { return getKey(baseName, measurements.getName()); } private String getKey(String... keys) { return JOINER.join(keys); } }
3,415
0
Create_ds/gobblin/gobblin-modules/gobblin-metrics-influxdb/src/main/java/org/apache/gobblin/metrics
Create_ds/gobblin/gobblin-modules/gobblin-metrics-influxdb/src/main/java/org/apache/gobblin/metrics/influxdb/InfluxDBEventReporter.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.gobblin.metrics.influxdb; import java.io.IOException; import java.util.List; import java.util.Map; import java.util.Queue; import java.util.concurrent.TimeUnit; import org.influxdb.dto.Point; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import com.google.common.base.Optional; import com.google.common.collect.Lists; import com.google.common.primitives.Doubles; import org.apache.gobblin.metrics.GobblinTrackingEvent; import org.apache.gobblin.metrics.MetricContext; import org.apache.gobblin.metrics.event.MultiPartEvent; import org.apache.gobblin.metrics.event.EventSubmitter; import org.apache.gobblin.metrics.reporter.EventReporter; import static org.apache.gobblin.metrics.event.TimingEvent.METADATA_DURATION; /** * * {@link org.apache.gobblin.metrics.reporter.EventReporter} that emits {@link org.apache.gobblin.metrics.GobblinTrackingEvent} events * as timestamped name - value pairs to InfluxDB * * @author Lorand Bendig * */ public class InfluxDBEventReporter extends EventReporter { private final InfluxDBPusher influxDBPusher; private static final double EMTPY_VALUE = 0d; private static final Logger LOGGER = LoggerFactory.getLogger(InfluxDBEventReporter.class); public InfluxDBEventReporter(Builder<?> builder) throws IOException { super(builder); if (builder.influxDBPusher.isPresent()) { this.influxDBPusher = builder.influxDBPusher.get(); } else { this.influxDBPusher = new InfluxDBPusher.Builder(builder.url, builder.username, builder.password, builder.database, builder.connectionType).build(); } } @Override public void reportEventQueue(Queue<GobblinTrackingEvent> queue) { GobblinTrackingEvent nextEvent; try { while (null != (nextEvent = queue.poll())) { pushEvent(nextEvent); } } catch (IOException e) { LOGGER.error("Error sending event to InfluxDB", e); } } /** * Extracts the event and its metadata from {@link GobblinTrackingEvent} and creates * timestamped name value pairs * * @param event {@link GobblinTrackingEvent} to be reported * @throws IOException */ private void pushEvent(GobblinTrackingEvent event) throws IOException { Map<String, String> metadata = event.getMetadata(); String name = getMetricName(metadata, event.getName()); long timestamp = event.getTimestamp(); MultiPartEvent multiPartEvent = MultiPartEvent.getEvent(metadata.get(EventSubmitter.EVENT_TYPE)); if (multiPartEvent == null) { influxDBPusher.push(buildEventAsPoint(name, EMTPY_VALUE, timestamp)); } else { List<Point> points = Lists.newArrayList(); for (String field : multiPartEvent.getMetadataFields()) { Point point = buildEventAsPoint(JOINER.join(name, field), convertValue(field, metadata.get(field)), timestamp); points.add(point); } influxDBPusher.push(points); } } /** * Convert the event value taken from the metadata to double (default type). * It falls back to string type if the value is missing or it is non-numeric * is of string or missing * Metadata entries are emitted as distinct events (see {@link MultiPartEvent}) * * @param field {@link GobblinTrackingEvent} metadata key * @param value {@link GobblinTrackingEvent} metadata value * @return The converted event value */ private Object convertValue(String field, String value) { if (value == null) return EMTPY_VALUE; if (METADATA_DURATION.equals(field)) { return convertDuration(TimeUnit.MILLISECONDS.toNanos(Long.parseLong(value))); } else { Double doubleValue = Doubles.tryParse(value); return (doubleValue == null) ? value : doubleValue; } } /** * Returns a new {@link InfluxDBEventReporter.Builder} for {@link InfluxDBEventReporter}. * Will automatically add all Context tags to the reporter. * * @param context the {@link org.apache.gobblin.metrics.MetricContext} to report * @return InfluxDBEventReporter builder * @deprecated this method is bugged. Use {@link InfluxDBEventReporter.Factory#forContext} instead. */ @Deprecated public static Builder<? extends Builder> forContext(MetricContext context) { return new BuilderImpl(context); } public static class BuilderImpl extends Builder<BuilderImpl> { private BuilderImpl(MetricContext context) { super(context); } @Override protected BuilderImpl self() { return this; } } public static class Factory { /** * Returns a new {@link InfluxDBEventReporter.Builder} for {@link InfluxDBEventReporter}. * Will automatically add all Context tags to the reporter. * * @param context the {@link org.apache.gobblin.metrics.MetricContext} to report * @return InfluxDBEventReporter builder */ public static BuilderImpl forContext(MetricContext context) { return new BuilderImpl(context); } } /** * Builder for {@link InfluxDBEventReporter}. * Defaults to no filter, reporting rates in seconds and times in milliseconds using TCP connection */ public static abstract class Builder<T extends EventReporter.Builder<T>> extends EventReporter.Builder<T> { protected String url; protected String username; protected String password; protected String database; protected InfluxDBConnectionType connectionType; protected Optional<InfluxDBPusher> influxDBPusher; protected Builder(MetricContext context) { super(context); this.influxDBPusher = Optional.absent(); this.connectionType = InfluxDBConnectionType.TCP; } /** * Set {@link org.apache.gobblin.metrics.influxdb.InfluxDBPusher} to use. */ public T withInfluxDBPusher(InfluxDBPusher pusher) { this.influxDBPusher = Optional.of(pusher); return self(); } /** * Set connection parameters for the {@link org.apache.gobblin.metrics.influxdb.InfluxDBPusher} creation */ public T withConnection(String url, String username, String password, String database) { this.url = url; this.username = username; this.password = password; this.database = database; return self(); } /** * Set {@link org.apache.gobblin.metrics.influxdb.InfluxDBConnectionType} to use. */ public T withConnectionType(InfluxDBConnectionType connectionType) { this.connectionType = connectionType; return self(); } /** * Builds and returns {@link InfluxDBEventReporter}. * * @return InfluxDBEventReporter */ public InfluxDBEventReporter build() throws IOException { return new InfluxDBEventReporter(this); } } private Point buildEventAsPoint(String name, Object value, long timestamp) throws IOException { return Point.measurement(name).field("value", value).time(timestamp, TimeUnit.MILLISECONDS).build(); } }
3,416
0
Create_ds/gobblin/gobblin-modules/gobblin-metrics-influxdb/src/main/java/org/apache/gobblin/metrics
Create_ds/gobblin/gobblin-modules/gobblin-metrics-influxdb/src/main/java/org/apache/gobblin/metrics/influxdb/InfluxDBConnectionType.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.gobblin.metrics.influxdb; import org.influxdb.InfluxDB; import org.influxdb.InfluxDBFactory; /** * Connection types used by {@link InfluxDBReporter} and {@link InfluxDBEventReporter}. * * @author Lorand Bendig * */ public enum InfluxDBConnectionType { TCP { @Override public InfluxDB createConnection(String url, String username, String password) { return InfluxDBFactory.connect(url, username, password); } }; // UDP will be added once InfluxDB-java will support it public abstract InfluxDB createConnection(String url, String username, String password); }
3,417
0
Create_ds/gobblin/gobblin-modules/gobblin-metrics-influxdb/src/main/java/org/apache/gobblin/metrics
Create_ds/gobblin/gobblin-modules/gobblin-metrics-influxdb/src/main/java/org/apache/gobblin/metrics/influxdb/InfluxDBPusher.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.gobblin.metrics.influxdb; import java.util.List; import java.util.concurrent.TimeUnit; import org.influxdb.InfluxDB; import org.influxdb.dto.BatchPoints; import org.influxdb.dto.Point; /** * Establishes a connection to InfluxDB and pushes {@link Point}s. * * @author Lorand Bendig * */ public class InfluxDBPusher { private final InfluxDB influxDB; private final String database; private InfluxDBPusher(Builder builder) { this.influxDB = builder.influxDB; this.database = builder.database; } public static class Builder { private final InfluxDB influxDB; private final String database; public Builder(String url, String username, String password, String database, InfluxDBConnectionType connectionType) { this.influxDB = connectionType.createConnection(url, username, password); this.database = database; } /** * Set the connection timeout for InfluxDB * @param connectTimeout * @param timeUnit * @return */ public Builder withConnectTimeout(long connectTimeout, TimeUnit timeUnit) { influxDB.setConnectTimeout(connectTimeout, timeUnit); return this; } /** * Set the writer timeout for the InfluxDB connection * @param writeTimeout * @param timeUnit * @return */ public Builder withWriteTimeout(long writeTimeout, TimeUnit timeUnit) { influxDB.setWriteTimeout(writeTimeout, timeUnit); return this; } public InfluxDBPusher build() { return new InfluxDBPusher(this); } } /** * Push a single Point * @param point the {@link Point} to report */ public void push(Point point) { BatchPoints.Builder batchPointsBuilder = BatchPoints.database(database); batchPointsBuilder.point(point); influxDB.write(batchPointsBuilder.build()); } /** * Push multiple points at once. * @param points list of {@link Point}s to report */ public void push(List<Point> points) { BatchPoints.Builder batchPointsBuilder = BatchPoints.database(database); for (Point point : points) { batchPointsBuilder.point(point); } influxDB.write(batchPointsBuilder.build()); } }
3,418
0
Create_ds/gobblin/gobblin-modules/gobblin-parquet-apache/src/test/java/org/apache/gobblin/converter
Create_ds/gobblin/gobblin-modules/gobblin-parquet-apache/src/test/java/org/apache/gobblin/converter/parquet/JsonIntermediateToParquetGroupConverterTest.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.gobblin.converter.parquet; import java.io.InputStreamReader; import java.lang.reflect.Type; import org.apache.parquet.example.data.Group; import org.apache.parquet.schema.MessageType; import org.testng.annotations.BeforeClass; import org.testng.annotations.Test; import com.google.gson.Gson; import com.google.gson.JsonObject; import com.google.gson.reflect.TypeToken; import org.apache.gobblin.configuration.SourceState; import org.apache.gobblin.configuration.WorkUnitState; import org.apache.gobblin.converter.DataConversionException; import org.apache.gobblin.converter.SchemaConversionException; import org.apache.gobblin.source.workunit.Extract; import static org.testng.Assert.assertEquals; @Test(groups = {"gobblin.converter"}) public class JsonIntermediateToParquetGroupConverterTest { private static final String RESOURCE_PATH = "/converter/JsonIntermediateToParquetConverter.json"; private static JsonObject testCases; private static WorkUnitState workUnit; private static JsonIntermediateToParquetGroupConverter parquetConverter; @BeforeClass public static void setUp() { Type listType = new TypeToken<JsonObject>() { }.getType(); Gson gson = new Gson(); JsonObject testData = gson.fromJson( new InputStreamReader(JsonIntermediateToParquetGroupConverter.class.getResourceAsStream(RESOURCE_PATH)), listType); testCases = testData.getAsJsonObject(); SourceState source = new SourceState(); workUnit = new WorkUnitState( source.createWorkUnit(source.createExtract(Extract.TableType.SNAPSHOT_ONLY, "test_namespace", "test_table"))); } private void testCase(String testCaseName) throws SchemaConversionException, DataConversionException { JsonObject test = testCases.get(testCaseName).getAsJsonObject(); parquetConverter = new JsonIntermediateToParquetGroupConverter(); MessageType schema = parquetConverter.convertSchema(test.get("schema").getAsJsonArray(), workUnit); Group record = parquetConverter.convertRecord(schema, test.get("record").getAsJsonObject(), workUnit).iterator().next(); assertEqualsIgnoreSpaces(schema.toString(), test.get("expectedSchema").getAsString()); assertEqualsIgnoreSpaces(record.toString(), test.get("expectedRecord").getAsString()); } @Test(expectedExceptions = RuntimeException.class, expectedExceptionsMessageRegExp = "Symbol .* does not belong to set \\[.*?\\]") public void testEnumTypeBelongsToEnumSet() throws Exception { JsonObject test = deepCopy(testCases.get("enum").getAsJsonObject(), JsonObject.class); parquetConverter = new JsonIntermediateToParquetGroupConverter(); MessageType schema = parquetConverter.convertSchema(test.get("schema").getAsJsonArray(), workUnit); JsonObject jsonRecord = test.get("record").getAsJsonObject(); jsonRecord.addProperty("some_enum", "HELL"); parquetConverter.convertRecord(schema, jsonRecord, workUnit).iterator().next(); } @Test public void testPrimitiveTypes() throws Exception { testCase("simplePrimitiveTypes"); } @Test public void testArrayType() throws Exception { testCase("array"); } @Test public void testEnumTypeWithNullableTrue() throws Exception { testCase("enum"); } @Test public void testEnumTypeWithNullableFalse() throws Exception { testCase("enum1"); } @Test public void testRecordType() throws Exception { testCase("record"); } @Test public void testMapType() throws Exception { testCase("map"); } @Test public void testNullValueInOptionalField() throws Exception { testCase("nullValueInOptionalField"); } private void assertEqualsIgnoreSpaces(String actual, String expected) { assertEquals(actual.replaceAll("\\n", ";").replaceAll("\\s|\\t", ""), expected.replaceAll("\\n", ";").replaceAll("\\s|\\t", "")); } public <T> T deepCopy(T object, Class<T> type) { try { Gson gson = new Gson(); return gson.fromJson(gson.toJson(object, type), type); } catch (Exception e) { e.printStackTrace(); return null; } } }
3,419
0
Create_ds/gobblin/gobblin-modules/gobblin-parquet-apache/src/test/java/org/apache/gobblin
Create_ds/gobblin/gobblin-modules/gobblin-parquet-apache/src/test/java/org/apache/gobblin/writer/TestConstants.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.gobblin.writer; import org.apache.parquet.example.data.Group; import org.apache.parquet.example.data.simple.SimpleGroup; import org.apache.parquet.schema.MessageType; import org.apache.parquet.schema.OriginalType; import org.apache.parquet.schema.PrimitiveType; import org.apache.parquet.schema.Types; import org.apache.gobblin.parquet.writer.test.TestConstantsBase; import org.apache.gobblin.test.TestRecord; public class TestConstants extends TestConstantsBase<Group> { public static final MessageType PARQUET_SCHEMA = Types.buildMessage() .addFields( Types.required(PrimitiveType.PrimitiveTypeName.BINARY).as(OriginalType.UTF8) .named(TestConstants.PAYLOAD_FIELD_NAME), Types.required(PrimitiveType.PrimitiveTypeName.INT32).named(TestConstants.PARTITION_FIELD_NAME), Types.required(PrimitiveType.PrimitiveTypeName.INT64).named(TestConstants.SEQUENCE_FIELD_NAME)) .named("Data"); @Override public Group convertToParquetGroup(TestRecord record) { Group group = new SimpleGroup(PARQUET_SCHEMA); group.add(PAYLOAD_FIELD_NAME, record.getPayload()); group.add(SEQUENCE_FIELD_NAME, Long.valueOf(record.getSequence())); group.add(PARTITION_FIELD_NAME, record.getPartition()); return group; } }
3,420
0
Create_ds/gobblin/gobblin-modules/gobblin-parquet-apache/src/test/java/org/apache/gobblin
Create_ds/gobblin/gobblin-modules/gobblin-parquet-apache/src/test/java/org/apache/gobblin/writer/ParquetHdfsDataWriterTest.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.gobblin.writer; import java.io.File; import java.io.IOException; import java.util.ArrayList; import java.util.List; import java.util.Map; import java.util.stream.Collectors; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.Path; import org.apache.parquet.avro.AvroParquetReader; import org.apache.parquet.example.data.Group; import org.apache.parquet.example.data.simple.convert.GroupRecordConverter; import org.apache.parquet.hadoop.ParquetReader; import org.apache.parquet.hadoop.api.InitContext; import org.apache.parquet.hadoop.api.ReadSupport; import org.apache.parquet.io.api.RecordMaterializer; import org.apache.parquet.proto.ProtoParquetReader; import org.apache.parquet.schema.MessageType; import org.testng.annotations.AfterClass; import org.testng.annotations.BeforeMethod; import org.testng.annotations.Test; import lombok.extern.slf4j.Slf4j; import org.apache.gobblin.parquet.writer.ParquetRecordFormat; import org.apache.gobblin.parquet.writer.test.ParquetHdfsDataWriterTestBase; import org.apache.gobblin.test.TestRecord; import org.apache.gobblin.test.proto.TestRecordProtos; @Test(groups = {"gobblin.writer"}) @Slf4j public class ParquetHdfsDataWriterTest extends ParquetHdfsDataWriterTestBase { public ParquetHdfsDataWriterTest() { super(new TestConstants()); } @Override protected DataWriterBuilder getDataWriterBuilder() { return new ParquetDataWriterBuilder(); } @BeforeMethod public void setUp() throws Exception { super.setUp(); } @Override protected List<TestRecord> readParquetRecordsFromFile(File outputFile, ParquetRecordFormat format) throws IOException { switch (format) { case GROUP: { return readParquetFilesGroup(outputFile); } case PROTOBUF: { return readParquetFilesProto(outputFile); } case AVRO: { return readParquetFilesAvro(outputFile); } default: throw new RuntimeException(format + " is not supported"); } } private List<TestRecord> readParquetFilesAvro(File outputFile) throws IOException { ParquetReader<org.apache.gobblin.test.avro.TestRecord> reader = null; List<TestRecord> records = new ArrayList<>(); try { reader = new AvroParquetReader<>(new Path(outputFile.toString())); for (org.apache.gobblin.test.avro.TestRecord value = reader.read(); value != null; value = reader.read()) { records.add(new TestRecord(value.getPartition(), value.getSequence(), value.getPayload())); } } finally { if (reader != null) { try { reader.close(); } catch (Exception ex) { System.out.println(ex.getMessage()); } } } return records; } protected List<TestRecord> readParquetFilesProto(File outputFile) throws IOException { ParquetReader<TestRecordProtos.TestRecordOrBuilder> reader = null; List<TestRecord> records = new ArrayList<>(); try { reader = new ProtoParquetReader<>(new Path(outputFile.toString())); for (TestRecordProtos.TestRecordOrBuilder value = reader.read(); value != null; value = reader.read()) { records.add(new TestRecord(value.getPartition(), value.getSequence(), value.getPayload())); } } finally { if (reader != null) { try { reader.close(); } catch (Exception ex) { System.out.println(ex.getMessage()); } } } return records; } protected List<TestRecord> readParquetFilesGroup(File outputFile) throws IOException { ParquetReader<Group> reader = null; List<Group> records = new ArrayList<>(); try { reader = new ParquetReader<>(new Path(outputFile.toString()), new SimpleReadSupport()); for (Group value = reader.read(); value != null; value = reader.read()) { records.add(value); } } finally { if (reader != null) { try { reader.close(); } catch (Exception ex) { System.out.println(ex.getMessage()); } } } return records.stream().map(value -> new TestRecord( value.getInteger(TestConstants.PARTITION_FIELD_NAME, 0), value.getLong(TestConstants.SEQUENCE_FIELD_NAME, 0), value.getString(TestConstants.PAYLOAD_FIELD_NAME, 0) )).collect(Collectors.toList()); } @Test public void testWrite() throws Exception { super.testWrite(); } @Override protected Object getSchema(ParquetRecordFormat format) { switch (format) { case GROUP: { return TestConstants.PARQUET_SCHEMA; } case PROTOBUF: { return TestRecordProtos.TestRecord.class; } case AVRO: { return org.apache.gobblin.test.avro.TestRecord.getClassSchema(); } default: throw new RuntimeException(format.name() + " is not implemented"); } } @AfterClass public void tearDown() throws IOException { super.tearDown(); } class SimpleReadSupport extends ReadSupport<Group> { @Override public RecordMaterializer<Group> prepareForRead(Configuration conf, Map<String, String> metaData, MessageType schema, ReadContext context) { return new GroupRecordConverter(schema); } @Override public ReadContext init(InitContext context) { return new ReadContext(context.getFileSchema()); } } }
3,421
0
Create_ds/gobblin/gobblin-modules/gobblin-parquet-apache/src/main/java/org/apache/gobblin/converter
Create_ds/gobblin/gobblin-modules/gobblin-parquet-apache/src/main/java/org/apache/gobblin/converter/parquet/ParquetGroup.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.gobblin.converter.parquet; import java.util.ArrayList; import java.util.List; import org.apache.parquet.example.data.Group; import org.apache.parquet.example.data.simple.BinaryValue; import org.apache.parquet.example.data.simple.BooleanValue; import org.apache.parquet.example.data.simple.DoubleValue; import org.apache.parquet.example.data.simple.FloatValue; import org.apache.parquet.example.data.simple.Int96Value; import org.apache.parquet.example.data.simple.IntegerValue; import org.apache.parquet.example.data.simple.LongValue; import org.apache.parquet.example.data.simple.NanoTime; import org.apache.parquet.example.data.simple.Primitive; import org.apache.parquet.io.api.Binary; import org.apache.parquet.io.api.RecordConsumer; import org.apache.parquet.schema.GroupType; import org.apache.parquet.schema.PrimitiveType; import org.apache.parquet.schema.Type; import static org.apache.parquet.schema.Type.Repetition.REPEATED; /** * Custom Implementation of {@link Group} to support adding {@link Object} of type {@link Primitive} or {@link Group}. * Also provides methods to add {@link Primitive} and {@link Group} with {@link String} key if index is not known. * @author tilakpatidar */ public class ParquetGroup extends Group { private final GroupType schema; //each item represents data of a field, which is indexed by the fieldIndex of the schema private final List<Object>[] data; public ParquetGroup(GroupType schema) { this.schema = schema; this.data = new List[schema.getFields().size()]; for (int i = 0; i < schema.getFieldCount(); ++i) { this.data[i] = new ArrayList(); } } public String toString() { return this.toString(""); } public String toString(String indent) { StringBuilder result = new StringBuilder(); int i = 0; for (Type field : this.schema.getFields()) { String name = field.getName(); List<Object> values = this.data[i]; for (Object value : values) { result.append(indent).append(name); if (value == null) { result.append(": NULL\n"); } else if (value instanceof Group) { result.append("\n").append(((ParquetGroup) value).toString(indent + " ")); } else { result.append(": ").append(value.toString()).append("\n"); } } i++; } return result.toString(); } @Override public Group addGroup(int fieldIndex) { ParquetGroup g = new ParquetGroup(this.schema.getType(fieldIndex).asGroupType()); this.data[fieldIndex].add(g); return g; } public Group getGroup(int fieldIndex, int index) { return (Group) this.getValue(fieldIndex, index); } private Object getValue(int fieldIndex, int index) { List<Object> list; try { list = this.data[fieldIndex]; } catch (IndexOutOfBoundsException var6) { throw new RuntimeException( "not found " + fieldIndex + "(" + this.schema.getFieldName(fieldIndex) + ") in group:\n" + this); } try { return list.get(index); } catch (IndexOutOfBoundsException var5) { throw new RuntimeException( "not found " + fieldIndex + "(" + this.schema.getFieldName(fieldIndex) + ") element number " + index + " in group:\n" + this); } } public void add(int fieldIndex, Primitive value) { Type type = this.schema.getType(fieldIndex); List<Object> list = this.data[fieldIndex]; if (!type.isRepetition(REPEATED) && !list.isEmpty()) { throw new IllegalStateException( "field " + fieldIndex + " (" + type.getName() + ") can not have more than one value: " + list); } else { list.add(value); } } public int getFieldRepetitionCount(int fieldIndex) { List<Object> list = this.data[fieldIndex]; return list == null ? 0 : list.size(); } public String getValueToString(int fieldIndex, int index) { return String.valueOf(this.getValue(fieldIndex, index)); } public String getString(int fieldIndex, int index) { return ((BinaryValue) this.getValue(fieldIndex, index)).getString(); } public int getInteger(int fieldIndex, int index) { return ((IntegerValue) this.getValue(fieldIndex, index)).getInteger(); } @Override public long getLong(int fieldIndex, int index) { return ((LongValue) this.getValue(fieldIndex, index)).getLong(); } @Override public double getDouble(int fieldIndex, int index) { return ((DoubleValue) this.getValue(fieldIndex, index)).getDouble(); } @Override public float getFloat(int fieldIndex, int index) { return ((FloatValue) this.getValue(fieldIndex, index)).getFloat(); } public boolean getBoolean(int fieldIndex, int index) { return ((BooleanValue) this.getValue(fieldIndex, index)).getBoolean(); } public Binary getBinary(int fieldIndex, int index) { return ((BinaryValue) this.getValue(fieldIndex, index)).getBinary(); } public Binary getInt96(int fieldIndex, int index) { return ((Int96Value) this.getValue(fieldIndex, index)).getInt96(); } public void add(int fieldIndex, int value) { this.add(fieldIndex, new IntegerValue(value)); } public void add(int fieldIndex, long value) { this.add(fieldIndex, new LongValue(value)); } public void add(int fieldIndex, String value) { this.add(fieldIndex, new BinaryValue(Binary.fromString(value))); } public void add(int fieldIndex, NanoTime value) { this.add(fieldIndex, value.toInt96()); } public void add(int fieldIndex, boolean value) { this.add(fieldIndex, new BooleanValue(value)); } public void add(int fieldIndex, Binary value) { switch (this.getType().getType(fieldIndex).asPrimitiveType().getPrimitiveTypeName()) { case BINARY: this.add(fieldIndex, new BinaryValue(value)); break; case INT96: this.add(fieldIndex, new Int96Value(value)); break; default: throw new UnsupportedOperationException( this.getType().asPrimitiveType().getName() + " not supported for Binary"); } } public void add(int fieldIndex, float value) { this.add(fieldIndex, new FloatValue(value)); } public void add(int fieldIndex, double value) { this.add(fieldIndex, new DoubleValue(value)); } @Override public void add(int fieldIndex, Group value) { this.data[fieldIndex].add(value); } public GroupType getType() { return this.schema; } public void writeValue(int field, int index, RecordConsumer recordConsumer) { ((Primitive) this.getValue(field, index)).writeValue(recordConsumer); } /** * Add any object of {@link PrimitiveType} or {@link Group} type with a String key. * @param key * @param object */ public void add(String key, Object object) { int fieldIndex = getIndex(key); if (object.getClass() == ParquetGroup.class) { this.addGroup(key, (Group) object); } else { this.add(fieldIndex, (Primitive) object); } } private int getIndex(String key) { return getType().getFieldIndex(key); } /** * Add a {@link Group} given a String key. * @param key * @param object */ private void addGroup(String key, Group object) { int fieldIndex = getIndex(key); this.schema.getType(fieldIndex).asGroupType(); this.data[fieldIndex].add(object); } }
3,422
0
Create_ds/gobblin/gobblin-modules/gobblin-parquet-apache/src/main/java/org/apache/gobblin/converter
Create_ds/gobblin/gobblin-modules/gobblin-parquet-apache/src/main/java/org/apache/gobblin/converter/parquet/JsonElementConversionFactory.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.gobblin.converter.parquet; import java.util.ArrayList; import java.util.HashMap; import java.util.HashSet; import java.util.List; import java.util.Map; import org.apache.parquet.example.data.Group; import org.apache.parquet.example.data.simple.BinaryValue; import org.apache.parquet.example.data.simple.BooleanValue; import org.apache.parquet.example.data.simple.DoubleValue; import org.apache.parquet.example.data.simple.FloatValue; import org.apache.parquet.example.data.simple.IntegerValue; import org.apache.parquet.example.data.simple.LongValue; import org.apache.parquet.io.api.Binary; import org.apache.parquet.schema.GroupType; import org.apache.parquet.schema.MessageType; import org.apache.parquet.schema.PrimitiveType; import org.apache.parquet.schema.PrimitiveType.PrimitiveTypeName; import org.apache.parquet.schema.Type; import org.apache.parquet.schema.Types; import com.google.gson.JsonArray; import com.google.gson.JsonElement; import com.google.gson.JsonObject; import org.apache.gobblin.converter.parquet.JsonSchema.*; import static org.apache.gobblin.converter.parquet.JsonSchema.*; import static org.apache.gobblin.converter.parquet.JsonSchema.InputType.STRING; import static org.apache.gobblin.converter.parquet.JsonElementConversionFactory.RecordConverter.RecordType.CHILD; import static org.apache.parquet.schema.OriginalType.UTF8; import static org.apache.parquet.schema.PrimitiveType.PrimitiveTypeName.BINARY; import static org.apache.parquet.schema.PrimitiveType.PrimitiveTypeName.INT32; import static org.apache.parquet.schema.PrimitiveType.PrimitiveTypeName.INT64; import static org.apache.parquet.schema.Type.Repetition.OPTIONAL; import static org.apache.parquet.schema.Type.Repetition.REPEATED; import static org.apache.parquet.schema.Type.Repetition.REQUIRED; /** * <p> * Creates a JsonElement to Parquet converter for all supported data types. * </p> * * @author tilakpatidar * */ public class JsonElementConversionFactory { /** * Use to create a converter for a single field from a parquetSchema. * * @param schema * @param repeated - Is the {@link Type} repeated in the parent {@link Group} * @return */ public static JsonElementConverter getConverter(JsonSchema schema, boolean repeated) { InputType fieldType = schema.getInputType(); switch (fieldType) { case INT: return new IntConverter(schema, repeated); case LONG: return new LongConverter(schema, repeated); case FLOAT: return new FloatConverter(schema, repeated); case DOUBLE: return new DoubleConverter(schema, repeated); case BOOLEAN: return new BooleanConverter(schema, repeated); case STRING: return new StringConverter(schema, repeated); case ARRAY: return new ArrayConverter(schema); case ENUM: return new EnumConverter(schema); case RECORD: return new RecordConverter(schema); case MAP: return new MapConverter(schema); case DATE: case TIMESTAMP: return new StringConverter(schema, repeated); default: throw new UnsupportedOperationException(fieldType + " is unsupported"); } } /** * Converts a JsonElement into a supported ParquetType * @author tilakpatidar * */ public static abstract class JsonElementConverter { protected final JsonSchema jsonSchema; protected JsonElementConverter(JsonSchema schema) { this.jsonSchema = schema; } /** * Convert value to a parquet type and perform null check. * @param value * @return Parquet safe type */ public Object convert(JsonElement value) { if (value.isJsonNull()) { if (this.jsonSchema.isNullable()) { return null; } throw new RuntimeException( "Field: " + this.jsonSchema.getColumnName() + " is not nullable and contains a null value"); } return convertField(value); } /** * Returns a {@link Type} parquet schema * @return */ abstract public Type schema(); /** * Convert JsonElement to Parquet type * @param value * @return */ abstract Object convertField(JsonElement value); } /** * Converts a {@link JsonSchema} to a {@link PrimitiveType} */ public static abstract class PrimitiveConverter extends JsonElementConverter { protected final boolean repeated; private PrimitiveTypeName outputType; protected Type schema; /** * @param jsonSchema * @param repeated * @param outputType */ public PrimitiveConverter(JsonSchema jsonSchema, boolean repeated, PrimitiveTypeName outputType) { super(jsonSchema); this.repeated = repeated; this.outputType = outputType; this.schema = buildSchema(); } protected Type buildSchema() { return new PrimitiveType(this.repeated ? REPEATED : optionalOrRequired(this.jsonSchema), this.outputType, this.jsonSchema.getColumnName()); } @Override public Type schema() { return this.schema; } } /** * Converts {@link JsonSchema} having collection of elements of {@link InputType} into a {@link GroupType}. */ public static abstract class CollectionConverter extends JsonElementConverter { protected InputType elementType; protected JsonElementConverter elementConverter; protected Type schema; public CollectionConverter(JsonSchema collectionSchema, InputType elementType, boolean repeated) { super(collectionSchema); this.elementType = elementType; this.elementConverter = getConverter(getElementSchema(), repeated); this.schema = buildSchema(); } @Override public Type schema() { return this.schema; } /** * Prepare a {@link JsonSchema} for the elements in a collection. * @return */ abstract JsonSchema getElementSchema(); abstract Type buildSchema(); } public static class IntConverter extends PrimitiveConverter { public IntConverter(JsonSchema schema, boolean repeated) { super(schema, repeated, INT32); } @Override IntegerValue convertField(JsonElement value) { return new IntegerValue(value.getAsInt()); } } public static class LongConverter extends PrimitiveConverter { public LongConverter(JsonSchema schema, boolean repeated) { super(schema, repeated, INT64); } @Override LongValue convertField(JsonElement value) { return new LongValue(value.getAsLong()); } } public static class FloatConverter extends PrimitiveConverter { public FloatConverter(JsonSchema schema, boolean repeated) { super(schema, repeated, PrimitiveTypeName.FLOAT); } @Override FloatValue convertField(JsonElement value) { return new FloatValue(value.getAsFloat()); } } public static class DoubleConverter extends PrimitiveConverter { public DoubleConverter(JsonSchema schema, boolean repeated) { super(schema, repeated, PrimitiveTypeName.DOUBLE); } @Override DoubleValue convertField(JsonElement value) { return new DoubleValue(value.getAsDouble()); } } public static class BooleanConverter extends PrimitiveConverter { public BooleanConverter(JsonSchema schema, boolean repeated) { super(schema, repeated, PrimitiveTypeName.BOOLEAN); } @Override BooleanValue convertField(JsonElement value) { return new BooleanValue(value.getAsBoolean()); } } public static class StringConverter extends PrimitiveConverter { public StringConverter(JsonSchema schema, boolean repeated) { super(schema, repeated, BINARY); this.schema = buildSchema(); } @Override BinaryValue convertField(JsonElement value) { return new BinaryValue(Binary.fromString(value.getAsString())); } @Override protected Type buildSchema() { String columnName = this.jsonSchema.getColumnName(); if (this.repeated) { return Types.repeated(BINARY).as(UTF8).named(columnName); } switch (optionalOrRequired(this.jsonSchema)) { case OPTIONAL: return Types.optional(BINARY).as(UTF8).named(columnName); case REQUIRED: return Types.required(BINARY).as(UTF8).named(columnName); default: throw new RuntimeException("Unsupported Repetition type"); } } } public static Type.Repetition optionalOrRequired(JsonSchema jsonBaseSchema) { return jsonBaseSchema.isNullable() ? OPTIONAL : REQUIRED; } public static class ArrayConverter extends CollectionConverter { public ArrayConverter(JsonSchema arraySchema) { super(arraySchema, arraySchema.getElementTypeUsingKey(ARRAY_ITEMS_KEY), true); } @Override Object convertField(JsonElement value) { ParquetGroup array = new ParquetGroup((GroupType) schema()); JsonElementConverter converter = this.elementConverter; for (JsonElement elem : (JsonArray) value) { array.add(ARRAY_KEY, converter.convert(elem)); } return array; } @Override protected Type buildSchema() { List<Type> fields = new ArrayList<>(); fields.add(0, this.elementConverter.schema()); return new GroupType(optionalOrRequired(jsonSchema), this.jsonSchema.getColumnName(), fields); } @Override JsonSchema getElementSchema() { JsonSchema jsonSchema = JsonSchema.buildBaseSchema(this.elementType, true); jsonSchema.setColumnName(ARRAY_KEY); return jsonSchema; } } public static class EnumConverter extends CollectionConverter { private final HashSet<String> symbols = new HashSet<>(); public EnumConverter(JsonSchema enumSchema) { super(enumSchema, STRING, false); JsonArray symbolsArray = enumSchema.getSymbols(); symbolsArray.forEach(e -> symbols.add(e.getAsString())); } @Override Object convertField(JsonElement value) { if (symbols.contains(value.getAsString()) || (this.jsonSchema.isNullable() && value.isJsonNull())) { return this.elementConverter.convert(value); } throw new RuntimeException("Symbol " + value.getAsString() + " does not belong to set " + symbols.toString()); } @Override protected Type buildSchema() { return this.elementConverter.schema(); } @Override JsonSchema getElementSchema() { JsonSchema jsonSchema = JsonSchema.buildBaseSchema(STRING, this.jsonSchema.isNullable()); jsonSchema.setColumnName(this.jsonSchema.getColumnName()); return jsonSchema; } } public static class RecordConverter extends JsonElementConverter { private final HashMap<String, JsonElementConverter> converters; private final RecordType recordType; private final Type schema; public enum RecordType { ROOT, CHILD } public RecordConverter(JsonSchema recordSchema) { this(recordSchema, CHILD); } public RecordConverter(JsonSchema recordSchema, RecordType recordType) { super(recordSchema); this.converters = new HashMap<>(); this.recordType = recordType; this.schema = buildSchema(); } @Override Object convertField(JsonElement value) { ParquetGroup r1 = new ParquetGroup((GroupType) schema()); JsonObject inputRecord = value.getAsJsonObject(); for (Map.Entry<String, JsonElement> entry : inputRecord.entrySet()) { String key = entry.getKey(); JsonElementConverter converter = this.converters.get(key); Object convertedValue = converter.convert(entry.getValue()); boolean valueIsNull = convertedValue == null; Type.Repetition repetition = optionalOrRequired(converter.jsonSchema); if (valueIsNull && repetition.equals(OPTIONAL)) { continue; } r1.add(key, convertedValue); } return r1; } private Type buildSchema() { JsonArray inputSchema = this.jsonSchema.getDataTypeValues(); List<Type> parquetTypes = new ArrayList<>(); for (JsonElement element : inputSchema) { JsonObject map = (JsonObject) element; JsonSchema elementSchema = new JsonSchema(map); String columnName = elementSchema.getColumnName(); JsonElementConverter converter = JsonElementConversionFactory.getConverter(elementSchema, false); Type schemaType = converter.schema(); this.converters.put(columnName, converter); parquetTypes.add(schemaType); } String docName = this.jsonSchema.getColumnName(); switch (recordType) { case ROOT: return new MessageType(docName, parquetTypes); case CHILD: return new GroupType(optionalOrRequired(this.jsonSchema), docName, parquetTypes); default: throw new RuntimeException("Unsupported Record type"); } } @Override public Type schema() { return this.schema; } } public static class MapConverter extends CollectionConverter { public MapConverter(JsonSchema mapSchema) { super(mapSchema, mapSchema.getElementTypeUsingKey(MAP_ITEMS_KEY), false); } @Override Object convertField(JsonElement value) { ParquetGroup mapGroup = new ParquetGroup((GroupType) schema()); JsonElementConverter converter = this.elementConverter; JsonObject map = (JsonObject) value; for (Map.Entry<String, JsonElement> entry : map.entrySet()) { ParquetGroup entrySet = (ParquetGroup) mapGroup.addGroup(MAP_KEY); entrySet.add(MAP_KEY_COLUMN_NAME, entry.getKey()); entrySet.add(MAP_VALUE_COLUMN_NAME, converter.convert(entry.getValue())); } return mapGroup; } @Override protected Type buildSchema() { JsonElementConverter elementConverter = this.elementConverter; JsonElementConverter keyConverter = getKeyConverter(); GroupType mapGroup = Types.repeatedGroup().addFields(keyConverter.schema(), elementConverter.schema()).named(MAP_KEY) .asGroupType(); String columnName = this.jsonSchema.getColumnName(); switch (optionalOrRequired(this.jsonSchema)) { case OPTIONAL: return Types.optionalGroup().addFields(mapGroup).named(columnName).asGroupType(); case REQUIRED: return Types.requiredGroup().addFields(mapGroup).named(columnName).asGroupType(); default: return null; } } @Override JsonSchema getElementSchema() { JsonSchema jsonSchema = JsonSchema.buildBaseSchema(this.elementType, false); jsonSchema.setColumnName(MAP_VALUE_COLUMN_NAME); return jsonSchema; } public JsonElementConverter getKeyConverter() { JsonSchema jsonSchema = JsonSchema.buildBaseSchema(STRING, false); jsonSchema.setColumnName(MAP_KEY_COLUMN_NAME); return getConverter(jsonSchema, false); } } }
3,423
0
Create_ds/gobblin/gobblin-modules/gobblin-parquet-apache/src/main/java/org/apache/gobblin/converter
Create_ds/gobblin/gobblin-modules/gobblin-parquet-apache/src/main/java/org/apache/gobblin/converter/parquet/JsonIntermediateToParquetGroupConverter.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.gobblin.converter.parquet; import org.apache.parquet.example.data.Group; import org.apache.parquet.schema.MessageType; import com.google.gson.JsonArray; import com.google.gson.JsonObject; import org.apache.gobblin.configuration.WorkUnitState; import org.apache.gobblin.converter.Converter; import org.apache.gobblin.converter.DataConversionException; import org.apache.gobblin.converter.SchemaConversionException; import org.apache.gobblin.converter.SingleRecordIterable; import org.apache.gobblin.converter.parquet.JsonElementConversionFactory.RecordConverter; import static org.apache.gobblin.converter.parquet.JsonElementConversionFactory.RecordConverter.RecordType.ROOT; /** * A converter to Convert JsonIntermediate to Parquet * @author tilakpatidar */ public class JsonIntermediateToParquetGroupConverter extends Converter<JsonArray, MessageType, JsonObject, Group> { private RecordConverter recordConverter; @Override public MessageType convertSchema(JsonArray inputSchema, WorkUnitState workUnit) throws SchemaConversionException { String fieldName = workUnit.getExtract().getTable(); JsonSchema jsonSchema = new JsonSchema(inputSchema); jsonSchema.setColumnName(fieldName); recordConverter = new RecordConverter(jsonSchema, ROOT); return (MessageType) recordConverter.schema(); } @Override public Iterable<Group> convertRecord(MessageType outputSchema, JsonObject inputRecord, WorkUnitState workUnit) throws DataConversionException { return new SingleRecordIterable<>((Group) recordConverter.convert(inputRecord)); } }
3,424
0
Create_ds/gobblin/gobblin-modules/gobblin-parquet-apache/src/main/java/org/apache/gobblin
Create_ds/gobblin/gobblin-modules/gobblin-parquet-apache/src/main/java/org/apache/gobblin/writer/ParquetDataWriterBuilder.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.gobblin.writer; import java.io.IOException; import org.apache.avro.Schema; import org.apache.hadoop.conf.Configuration; import org.apache.parquet.avro.AvroParquetWriter; import org.apache.parquet.column.ParquetProperties; import org.apache.parquet.example.data.Group; import org.apache.parquet.hadoop.ParquetWriter; import org.apache.parquet.hadoop.api.WriteSupport; import org.apache.parquet.hadoop.example.GroupWriteSupport; import org.apache.parquet.hadoop.metadata.CompressionCodecName; import org.apache.parquet.proto.ProtoParquetWriter; import org.apache.parquet.schema.MessageType; import com.google.protobuf.Message; import lombok.extern.slf4j.Slf4j; import org.apache.gobblin.parquet.writer.AbstractParquetDataWriterBuilder; import org.apache.gobblin.parquet.writer.ParquetWriterConfiguration; import org.apache.gobblin.parquet.writer.ParquetWriterShim; @Slf4j public class ParquetDataWriterBuilder<S,D> extends AbstractParquetDataWriterBuilder<S,D> { /** * Build a version-specific {@link ParquetWriter} for given {@link ParquetWriterConfiguration} * @param writerConfiguration * @return * @throws IOException */ @Override public ParquetWriterShim getVersionSpecificWriter(ParquetWriterConfiguration writerConfiguration) throws IOException { CompressionCodecName codecName = CompressionCodecName.fromConf(writerConfiguration.getCodecName()); ParquetProperties.WriterVersion writerVersion = ParquetProperties.WriterVersion .fromString(writerConfiguration.getWriterVersion()); Configuration conf = new Configuration(); ParquetWriter versionSpecificWriter = null; switch (writerConfiguration.getRecordFormat()) { case GROUP: { GroupWriteSupport.setSchema((MessageType) this.schema, conf); WriteSupport support = new GroupWriteSupport(); versionSpecificWriter = new ParquetWriter<Group>( writerConfiguration.getAbsoluteStagingFile(), support, codecName, writerConfiguration.getBlockSize(), writerConfiguration.getPageSize(), writerConfiguration.getDictPageSize(), writerConfiguration.isDictionaryEnabled(), writerConfiguration.isValidate(), writerVersion, conf); break; } case AVRO: { versionSpecificWriter = new AvroParquetWriter( writerConfiguration.getAbsoluteStagingFile(), (Schema) this.schema, codecName, writerConfiguration.getBlockSize(), writerConfiguration.getPageSize(), writerConfiguration.isDictionaryEnabled(), conf); break; } case PROTOBUF: { versionSpecificWriter = new ProtoParquetWriter( writerConfiguration.getAbsoluteStagingFile(), (Class<? extends Message>) this.schema, codecName, writerConfiguration.getBlockSize(), writerConfiguration.getPageSize(), writerConfiguration.isDictionaryEnabled(), writerConfiguration.isValidate()); break; } default: throw new RuntimeException("Record format not supported"); } ParquetWriter finalVersionSpecificWriter = versionSpecificWriter; return new ParquetWriterShim() { @Override public void write(Object record) throws IOException { finalVersionSpecificWriter.write(record); } @Override public void close() throws IOException { finalVersionSpecificWriter.close(); } }; } }
3,425
0
Create_ds/gobblin/gobblin-modules/gobblin-metadata/src/test/java/org/apache/gobblin
Create_ds/gobblin/gobblin-modules/gobblin-metadata/src/test/java/org/apache/gobblin/converter/EnvelopedRecordWithMetadataToRecordWithMetadataTest.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.gobblin.converter; import java.io.IOException; import java.util.HashMap; import java.util.Iterator; import org.apache.gobblin.metadata.types.Metadata; import org.apache.gobblin.type.RecordWithMetadata; import org.codehaus.jackson.JsonNode; import org.codehaus.jackson.map.ObjectMapper; import org.testng.Assert; import org.testng.annotations.Test; @Test public class EnvelopedRecordWithMetadataToRecordWithMetadataTest { @Test public void testSuccessWithRecord() throws DataConversionException, IOException { ObjectMapper objectMapper = new ObjectMapper(); String innerRecord = "abracadabra"; // Build the input record HashMap<String, Object> map = new HashMap<>(); map.put("r", innerRecord); Metadata md = new Metadata(); md.getRecordMetadata().put("test1", "test2"); map.put("rMd", md); JsonNode jsonNode = objectMapper.valueToTree(map); RecordWithMetadata<byte[]> inputRecord = new RecordWithMetadata<>(jsonNode.toString().getBytes(), null); EnvelopedRecordWithMetadataToRecordWithMetadata converter = new EnvelopedRecordWithMetadataToRecordWithMetadata(); Iterator<RecordWithMetadata<?>> iterator = converter.convertRecord(null, inputRecord, null).iterator(); Assert.assertTrue(iterator.hasNext()); RecordWithMetadata<?> outputRecord = iterator.next(); Assert.assertEquals(outputRecord.getRecord(), innerRecord); Assert.assertEquals(outputRecord.getMetadata().getRecordMetadata().get("test1"), "test2"); } @Test(expectedExceptions = DataConversionException.class, expectedExceptionsMessageRegExp = "Input data does not have record.") public void testFailureWithoutRecord() throws DataConversionException, IOException { ObjectMapper objectMapper = new ObjectMapper(); // Build the input record without data HashMap<String, Object> map = new HashMap<>(); Metadata md = new Metadata(); md.getRecordMetadata().put("test1", "test2"); map.put("rMd", md); JsonNode jsonNode = objectMapper.valueToTree(map); RecordWithMetadata<byte[]> inputRecord = new RecordWithMetadata<>(jsonNode.toString().getBytes(), null); EnvelopedRecordWithMetadataToRecordWithMetadata converter = new EnvelopedRecordWithMetadataToRecordWithMetadata(); converter.convertRecord(null, inputRecord, null); } }
3,426
0
Create_ds/gobblin/gobblin-modules/gobblin-metadata/src/test/java/org/apache/gobblin
Create_ds/gobblin/gobblin-modules/gobblin-metadata/src/test/java/org/apache/gobblin/converter/RecordWithMetadataToEnvelopedRecordWithMetadataTest.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.gobblin.converter; import java.io.IOException; import java.nio.charset.StandardCharsets; import java.util.HashMap; import java.util.Iterator; import org.codehaus.jackson.JsonNode; import org.codehaus.jackson.map.ObjectMapper; import org.testng.Assert; import org.testng.annotations.Test; import org.apache.gobblin.metadata.types.Metadata; import org.apache.gobblin.type.RecordWithMetadata; @Test public class RecordWithMetadataToEnvelopedRecordWithMetadataTest { @Test public void testSuccessWithJson() throws SchemaConversionException, DataConversionException, IOException { final String innerContentType = "randomJsonRecord"; ObjectMapper objectMapper = new ObjectMapper(); RecordWithMetadataToEnvelopedRecordWithMetadata converter = new RecordWithMetadataToEnvelopedRecordWithMetadata(); converter.convertSchema("", null); // Build Test Record HashMap<String, String> map = new HashMap<>(); map.put("test", "test"); map.put("value", "value"); JsonNode jsonElement = objectMapper.valueToTree(map); Metadata md = new Metadata(); md.getGlobalMetadata().setDatasetUrn("my-dataset"); md.getGlobalMetadata().setContentType(innerContentType); md.getRecordMetadata().put("foo", "bar"); RecordWithMetadata<JsonNode> record = new RecordWithMetadata(jsonElement, md); // Convert it Iterator<RecordWithMetadata<byte[]>> recordWithMetadataIterator = converter.convertRecord("", record, null).iterator(); RecordWithMetadata recordWithMetadata = recordWithMetadataIterator.next(); // Verify it JsonNode parsedElement = objectMapper.readValue((byte[]) recordWithMetadata.getRecord(), JsonNode.class); Assert.assertEquals(parsedElement.get("mId").getTextValue(), record.getMetadata().getGlobalMetadata().getId()); Assert.assertEquals(parsedElement.get("r"), jsonElement); Assert.assertEquals(parsedElement.get("rMd").get("foo").getTextValue(), "bar"); Assert .assertEquals(recordWithMetadata.getMetadata().getGlobalMetadata().getContentType(), "lnkd+recordWithMetadata"); Assert.assertEquals(recordWithMetadata.getMetadata().getGlobalMetadata().getInnerContentType(), innerContentType); } @Test public void testSuccessWithString() throws DataConversionException, IOException { ObjectMapper objectMapper = new ObjectMapper(); String innerRecord = "abracadabra"; RecordWithMetadataToEnvelopedRecordWithMetadata converter = new RecordWithMetadataToEnvelopedRecordWithMetadata(); RecordWithMetadata<String> record = new RecordWithMetadata<>(innerRecord, new Metadata()); Iterator<RecordWithMetadata<byte[]>> recordWithMetadataIterator = converter.convertRecord("", record, null).iterator(); RecordWithMetadata recordWithMetadata = recordWithMetadataIterator.next(); JsonNode parsedElement = objectMapper.readValue((byte[]) recordWithMetadata.getRecord(), JsonNode.class); Assert.assertEquals(parsedElement.get("mId").getTextValue(), record.getMetadata().getGlobalMetadata().getId()); Assert.assertEquals(parsedElement.get("r").getTextValue(), innerRecord); Assert .assertEquals(recordWithMetadata.getMetadata().getGlobalMetadata().getContentType(), "lnkd+recordWithMetadata"); Assert.assertNull(recordWithMetadata.getMetadata().getGlobalMetadata().getInnerContentType()); } @Test public void testSuccessWithInferredPrintableByteArray() throws DataConversionException, IOException { ObjectMapper objectMapper = new ObjectMapper(); byte[] record = "abrac\\adabra".getBytes(StandardCharsets.UTF_8); Metadata md = new Metadata(); md.getGlobalMetadata().setContentType("application/binary"); md.getGlobalMetadata().addTransferEncoding("base64"); RecordWithMetadataToEnvelopedRecordWithMetadata converter = new RecordWithMetadataToEnvelopedRecordWithMetadata(); Iterator<RecordWithMetadata<byte[]>> recordWithMetadataIterator = converter.convertRecord("", new RecordWithMetadata<>(record, md), null).iterator(); RecordWithMetadata recordWithMetadata = recordWithMetadataIterator.next(); JsonNode parsedElement = objectMapper.readValue((byte[]) recordWithMetadata.getRecord(), JsonNode.class); Assert.assertEquals(parsedElement.get("r").getTextValue(), new String(record, StandardCharsets.UTF_8)); } @Test public void testSuccessWithBinary() throws DataConversionException, IOException { ObjectMapper objectMapper = new ObjectMapper(); byte[] record = "aaaa".getBytes(StandardCharsets.UTF_8); Metadata md = new Metadata(); md.getGlobalMetadata().setContentType("application/binary"); RecordWithMetadataToEnvelopedRecordWithMetadata converter = new RecordWithMetadataToEnvelopedRecordWithMetadata(); Iterator<RecordWithMetadata<byte[]>> recordWithMetadataIterator = converter.convertRecord("", new RecordWithMetadata<>(record, md), null).iterator(); RecordWithMetadata recordWithMetadata = recordWithMetadataIterator.next(); JsonNode parsedElement = objectMapper.readValue((byte[]) recordWithMetadata.getRecord(), JsonNode.class); Assert.assertEquals(parsedElement.get("r").getTextValue(), "YWFhYQ=="); } }
3,427
0
Create_ds/gobblin/gobblin-modules/gobblin-metadata/src/test/java/org/apache/gobblin
Create_ds/gobblin/gobblin-modules/gobblin-metadata/src/test/java/org/apache/gobblin/converter/MetadataConverterWrapperTest.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.gobblin.converter; import java.util.ArrayList; import java.util.Iterator; import java.util.List; import org.testng.Assert; import org.testng.annotations.Test; import org.apache.gobblin.configuration.WorkUnitState; import org.apache.gobblin.metadata.types.Metadata; import org.apache.gobblin.type.RecordWithMetadata; public class MetadataConverterWrapperTest { @Test public void testConvertsMetadataNoOutput() throws DataConversionException { DummyConverter dummyConverter = new DummyConverter(0); MetadataConverterWrapper<String, String, String, String> wrapper = new MetadataConverterWrapper<>(dummyConverter); Iterable<RecordWithMetadata<String>> records = wrapper.convertRecord("foo", new RecordWithMetadata<String>("bar", buildMetadata(1)), new WorkUnitState()); Assert.assertFalse(records.iterator().hasNext()); } @Test public void testConvertsMetadataMultiOutput() throws DataConversionException { final int numRecordsToReturn = 2; DummyConverter dummyConverter = new DummyConverter(numRecordsToReturn); MetadataConverterWrapper<String, String, String, String> wrapper = new MetadataConverterWrapper<>(dummyConverter); Iterable<RecordWithMetadata<String>> records1 = wrapper.convertRecord("foo", new RecordWithMetadata<String>("bar", buildMetadata(1)), new WorkUnitState()); Iterable<RecordWithMetadata<String>> records2 = wrapper.convertRecord("foo", new RecordWithMetadata<String>("baz", buildMetadata(2)), new WorkUnitState()); Iterator<RecordWithMetadata<String>> record1It = records1.iterator(); Iterator<RecordWithMetadata<String>> record2It = records2.iterator(); for (int i = 0; i < numRecordsToReturn; i++) { RecordWithMetadata<String> record1 = record1It.next(); Assert.assertEquals(record1.getRecord(), "converted" + String.valueOf(i)); Assert.assertEquals(record1.getMetadata().getGlobalMetadata().getDatasetUrn(), "dataset-id:1"); RecordWithMetadata<String> record2 = record2It.next(); Assert.assertEquals(record2.getRecord(), "converted" + String.valueOf(i)); Assert.assertEquals(record2.getMetadata().getGlobalMetadata().getDatasetUrn(), "dataset-id:2"); } } @Test public void testAcceptsRawRecords() throws DataConversionException { final int numRecordsToReturn = 1; DummyConverter dummyConverter = new DummyConverter(numRecordsToReturn); MetadataConverterWrapper<String, String, String, String> wrapper = new MetadataConverterWrapper<>(dummyConverter); Iterable<RecordWithMetadata<String>> records = wrapper.convertRecord("foo", "bar", new WorkUnitState()); Iterator<RecordWithMetadata<String>> recordsIt = records.iterator(); RecordWithMetadata<String> record = recordsIt.next(); Assert.assertFalse(recordsIt.hasNext()); Assert.assertEquals(record.getRecord(), "converted0"); Assert.assertEquals(record.getMetadata().getGlobalMetadata().getId(), "0"); } private Metadata buildMetadata(int id) { Metadata md = new Metadata(); md.getGlobalMetadata().setDatasetUrn("dataset-id:" + String.valueOf(id)); return md; } private static class DummyConverter extends Converter<String, String, String, String> { private final int numRecordsToReturn; DummyConverter(int numRecordsToReturn) { this.numRecordsToReturn = numRecordsToReturn; } @Override public String convertSchema(String inputSchema, WorkUnitState workUnit) throws SchemaConversionException { return ""; } @Override public Iterable<String> convertRecord(String outputSchema, String inputRecord, WorkUnitState workUnit) throws DataConversionException { List<String> records = new ArrayList<>(); for (int i = 0; i < numRecordsToReturn; i++) { records.add(String.format("converted%d", i)); } return records; } } }
3,428
0
Create_ds/gobblin/gobblin-modules/gobblin-metadata/src/test/java/org/apache/gobblin
Create_ds/gobblin/gobblin-modules/gobblin-metadata/src/test/java/org/apache/gobblin/metadata/GlobalMetadataCollectorTest.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.gobblin.metadata; import java.util.Set; import org.testng.Assert; import org.testng.annotations.Test; import org.apache.gobblin.metadata.types.GlobalMetadata; public class GlobalMetadataCollectorTest { private static final String CONTENT_TYPE = "foo"; @Test public void testMergesWithDefaults() { final String DATASET_URN = "foo"; GlobalMetadata defaultMetadata = new GlobalMetadata(); defaultMetadata.setDatasetUrn(DATASET_URN); GlobalMetadataCollector collector = new GlobalMetadataCollector(defaultMetadata, -1); GlobalMetadata metadataRecord = new GlobalMetadata(); metadataRecord.setContentType(CONTENT_TYPE); GlobalMetadata mergedRecord = collector.processMetadata(metadataRecord); Assert.assertEquals(mergedRecord.getDatasetUrn(), DATASET_URN); Assert.assertEquals(mergedRecord.getContentType(), CONTENT_TYPE); Assert.assertEquals(1, collector.getMetadataRecords().size()); Assert.assertTrue(collector.getMetadataRecords().contains(mergedRecord), "Expected merged record to exist in metadata cache"); } @Test public void handlesNullRecord() { // If no defaults exist { GlobalMetadataCollector collectorWithNoDefaults = new GlobalMetadataCollector(-1); GlobalMetadata newRecord = collectorWithNoDefaults.processMetadata(null); Assert.assertNull(newRecord); Assert.assertEquals(collectorWithNoDefaults.getMetadataRecords().size(), 0); } // With defaults { GlobalMetadata defaults = buildMetadataWithUrn("DEFAULT"); GlobalMetadataCollector collectorWithDefaults = new GlobalMetadataCollector(defaults, -1); GlobalMetadata newRecord = collectorWithDefaults.processMetadata(null); Assert.assertEquals(newRecord, defaults); Assert.assertEquals(collectorWithDefaults.getMetadataRecords().size(), 1); } } @Test public void handlesNullDefaults() { GlobalMetadataCollector collectorWithNoDefaults = new GlobalMetadataCollector(-1); GlobalMetadata record = buildMetadataWithContentType(CONTENT_TYPE); GlobalMetadata newRecord = collectorWithNoDefaults.processMetadata(record); Assert.assertEquals(newRecord, record); Assert.assertEquals(collectorWithNoDefaults.getMetadataRecords().size(), 1); } @Test public void testDoesNotStoreRecordTwice() { GlobalMetadata defaults = buildMetadataWithUrn("DEFAULT"); GlobalMetadataCollector collectorWithDefaults = new GlobalMetadataCollector(defaults, -1); GlobalMetadata r1 = buildMetadataWithContentType(CONTENT_TYPE); GlobalMetadata newRecord = collectorWithDefaults.processMetadata(r1); Assert.assertNotNull(newRecord); GlobalMetadata r2 = buildMetadataWithContentType(CONTENT_TYPE); newRecord = collectorWithDefaults.processMetadata(r2); Assert.assertNull(newRecord); Assert.assertEquals(collectorWithDefaults.getMetadataRecords().size(), 1); } @Test public void evictsRecordsLRUBased() { GlobalMetadata r1 = buildMetadataWithContentType(CONTENT_TYPE + "_1"); GlobalMetadata r1_1 = buildMetadataWithContentType(r1.getContentType()); GlobalMetadata r2 = buildMetadataWithContentType(CONTENT_TYPE + "_2"); GlobalMetadata r3 = buildMetadataWithContentType(CONTENT_TYPE + "_3"); GlobalMetadataCollector collector = new GlobalMetadataCollector(2); GlobalMetadata newRecord; newRecord = collector.processMetadata(r1); Assert.assertNotNull(newRecord); newRecord = collector.processMetadata(r2); Assert.assertNotNull(newRecord); newRecord = collector.processMetadata(r1_1); Assert.assertNull(newRecord); // r2 should be evicted as r1 was more recently seen newRecord = collector.processMetadata(r3); Assert.assertNotNull(newRecord); Set<GlobalMetadata> cachedRecords = collector.getMetadataRecords(); Assert.assertEquals(cachedRecords.size(), 2); Assert.assertTrue(cachedRecords.contains(r1)); Assert.assertTrue(cachedRecords.contains(r3)); } private GlobalMetadata buildMetadataWithUrn(String urn) { GlobalMetadata metadata = new GlobalMetadata(); metadata.setDatasetUrn(urn); return metadata; } private GlobalMetadata buildMetadataWithContentType(String contentType) { GlobalMetadata metadata = new GlobalMetadata(); metadata.setContentType(contentType); return metadata; } }
3,429
0
Create_ds/gobblin/gobblin-modules/gobblin-metadata/src/test/java/org/apache/gobblin/metadata
Create_ds/gobblin/gobblin-modules/gobblin-metadata/src/test/java/org/apache/gobblin/metadata/types/GlobalMetadataTest.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.gobblin.metadata.types; import java.io.IOException; import java.nio.charset.StandardCharsets; import java.util.Iterator; import java.util.List; import org.codehaus.jackson.JsonNode; import org.codehaus.jackson.map.ObjectMapper; import org.testng.Assert; import org.testng.annotations.Test; import com.google.common.collect.ImmutableList; public class GlobalMetadataTest { @Test public void testMerge() { GlobalMetadata m = new GlobalMetadata(); m.setFileMetadata("file1", "key1", "val1"); m.addTransferEncoding("gzip"); GlobalMetadata m2 = new GlobalMetadata(); m2.setFileMetadata("file2", "key1", "val2"); m2.addTransferEncoding("gzip"); GlobalMetadata merged = new GlobalMetadata(); merged.addAll(m); merged.addAll(m2); Assert.assertEquals(merged.getFileMetadata("file1", "key1"), "val1"); Assert.assertEquals(merged.getFileMetadata("file2", "key1"), "val2"); List<String> mergedEncoding = merged.getTransferEncoding(); Assert.assertEquals(mergedEncoding.size(), 1); Assert.assertEquals(mergedEncoding.get(0), "gzip"); } @Test public void testToJson() throws IOException { GlobalMetadata m = new GlobalMetadata(); m.addTransferEncoding("foo"); m.addTransferEncoding("bar"); byte[] utf8 = m.toJsonUtf8(); String parsed = new String(utf8, StandardCharsets.UTF_8); JsonNode root = new ObjectMapper().readTree(parsed); Assert.assertTrue(root.isObject()); Iterator<JsonNode> children = root.getElements(); int numChildren = 0; while (children.hasNext()) { children.next(); numChildren++; } Assert.assertEquals(numChildren, 3, "expected only 3 child nodes - file, dataset, id"); Assert.assertEquals(root.get("file").size(), 0, "expected no children in file node"); Assert.assertTrue(root.get("id").isTextual(), "expected ID to be textual"); JsonNode transferEncoding = root.get("dataset").get("Transfer-Encoding"); Assert.assertEquals(transferEncoding.size(), m.getTransferEncoding().size()); for (int i = 0; i < m.getTransferEncoding().size(); i++) { Assert.assertEquals(transferEncoding.get(i).getTextValue(), m.getTransferEncoding().get(i)); } } @Test public void testFromJson() throws IOException { String serialized = "{\"dataset\":{\"Transfer-Encoding\":[\"gzip\",\"aes_rotating\"]},\"file\":{\"part.task_hello-world_1488584636479_1.json.gzip.aes_rotating\":{\"exists\":\"true\"}}}"; GlobalMetadata md = GlobalMetadata.fromJson(serialized); List<String> transferEncoding = md.getTransferEncoding(); Assert.assertEquals(transferEncoding, ImmutableList.of("gzip", "aes_rotating")); Assert.assertEquals(md.getFileMetadata("part.task_hello-world_1488584636479_1.json.gzip.aes_rotating", "exists"), "true"); } @Test public void testGetId() { GlobalMetadata m1 = buildMetadata(); GlobalMetadata m2 = buildMetadata(); Assert.assertEquals(m1.getId(), m2.getId()); m1.addTransferEncoding("baz"); Assert.assertNotEquals(m1.getId(), m2.getId()); } @Test(expectedExceptions = UnsupportedOperationException.class) public void testImmutableGlobal() { GlobalMetadata md = new GlobalMetadata(); md.setDatasetUrn("Hello"); md.markImmutable(); Assert.assertEquals(md.getDatasetUrn(), "Hello"); md.setDatasetUrn("World"); // should throw } @Test(expectedExceptions = UnsupportedOperationException.class) public void testImmutableDataset() { GlobalMetadata md = new GlobalMetadata(); md.setFileMetadata("file1", "key1", "val1"); md.markImmutable(); Assert.assertEquals(md.getFileMetadata("file1", "key1"), "val1"); md.setFileMetadata("file1", "key1", "val2"); // should throw } protected GlobalMetadata buildMetadata() { GlobalMetadata m = new GlobalMetadata(); m.addTransferEncoding("foo"); m.addTransferEncoding("bar"); return m; } }
3,430
0
Create_ds/gobblin/gobblin-modules/gobblin-metadata/src/main/java/org/apache/gobblin
Create_ds/gobblin/gobblin-modules/gobblin-metadata/src/main/java/org/apache/gobblin/converter/EnvelopedRecordWithMetadataToRecordWithMetadata.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.gobblin.converter; import java.io.IOException; import java.util.Collections; import java.util.Map; import org.apache.gobblin.configuration.WorkUnitState; import org.apache.gobblin.metadata.types.Metadata; import org.apache.gobblin.type.RecordWithMetadata; import org.codehaus.jackson.JsonFactory; import org.codehaus.jackson.JsonNode; import org.codehaus.jackson.JsonParser; import org.codehaus.jackson.map.DeserializationConfig; import org.codehaus.jackson.map.ObjectMapper; /** * A converter that takes a {@link RecordWithMetadata} and deserializes it by trying to parse it into a * json format. It looks up two fields: "rMd" for record metadata and "r" for record details represented * as a string. */ public class EnvelopedRecordWithMetadataToRecordWithMetadata extends Converter<String, Object, RecordWithMetadata<byte[]>, RecordWithMetadata<?>> { private static final String RECORD_KEY = "r"; private static final String METADATA_KEY = "rMd"; private static final String METADATA_RECORD_KEY = "recordMetadata"; private static final ObjectMapper objectMapper = new ObjectMapper().configure(DeserializationConfig.Feature.FAIL_ON_UNKNOWN_PROPERTIES, false); private static final JsonFactory jsonFactory = new JsonFactory(); @Override public String convertSchema(String inputSchema, WorkUnitState workUnit) throws SchemaConversionException { return ""; } @Override public Iterable<RecordWithMetadata<?>> convertRecord(Object outputSchema, RecordWithMetadata<byte[]> inputRecord, WorkUnitState workUnit) throws DataConversionException { try { try (JsonParser parser = jsonFactory.createJsonParser(inputRecord.getRecord())) { parser.setCodec(objectMapper); JsonNode jsonNode = parser.readValueAsTree(); // extracts required record if (!jsonNode.has(RECORD_KEY)) { throw new DataConversionException("Input data does not have record."); } String record = jsonNode.get(RECORD_KEY).getTextValue(); // Extract metadata field Metadata md = new Metadata(); if (jsonNode.has(METADATA_KEY) && jsonNode.get(METADATA_KEY).has(METADATA_RECORD_KEY)) { md.getRecordMetadata().putAll(objectMapper.readValue(jsonNode.get(METADATA_KEY).get(METADATA_RECORD_KEY), Map.class)); } return Collections.singleton(new RecordWithMetadata<>(record, md)); } } catch (IOException e) { throw new DataConversionException(e); } } }
3,431
0
Create_ds/gobblin/gobblin-modules/gobblin-metadata/src/main/java/org/apache/gobblin
Create_ds/gobblin/gobblin-modules/gobblin-metadata/src/main/java/org/apache/gobblin/converter/BytesToRecordWithMetadataConverter.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.gobblin.converter; import java.util.Collections; import org.apache.gobblin.configuration.WorkUnitState; import org.apache.gobblin.metadata.types.Metadata; import org.apache.gobblin.type.RecordWithMetadata; /** * A converter that takes an array of bytes and convert it to {@link RecordWithMetadata} * where record will be array of bytes and Metadata will be empty initialization */ public class BytesToRecordWithMetadataConverter extends Converter<Object, Object, byte[], RecordWithMetadata<?>> { @Override public Object convertSchema(Object inputSchema, WorkUnitState workUnit) throws SchemaConversionException { return inputSchema; } @Override public Iterable<RecordWithMetadata<?>> convertRecord(Object outputSchema, byte[] inputRecord, WorkUnitState workUnit) throws DataConversionException { return Collections.singleton(new RecordWithMetadata<>(inputRecord, new Metadata())); } }
3,432
0
Create_ds/gobblin/gobblin-modules/gobblin-metadata/src/main/java/org/apache/gobblin
Create_ds/gobblin/gobblin-modules/gobblin-metadata/src/main/java/org/apache/gobblin/converter/MetadataConverterWrapper.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.gobblin.converter; import java.io.IOException; import com.google.common.base.Function; import com.google.common.collect.Iterables; import javax.annotation.Nullable; import org.apache.gobblin.configuration.State; import org.apache.gobblin.configuration.WorkUnitState; import org.apache.gobblin.metadata.types.Metadata; import org.apache.gobblin.type.RecordWithMetadata; /** * Wraps a given converter to accept a RecordWithMetadata with a given data type, convert the underlying record using * the wrapped converter, and pass the metadata through untouched */ public class MetadataConverterWrapper<SI, SO, DI, DO> extends Converter<SI, SO, Object, RecordWithMetadata<DO>> { private final Converter<SI, SO, DI, DO> innerConverter; public MetadataConverterWrapper(Converter<SI, SO, DI, DO> innerConverter) { this.innerConverter = innerConverter; } @Override public Converter<SI, SO, Object, RecordWithMetadata<DO>> init(WorkUnitState workUnit) { super.init(workUnit); innerConverter.init(workUnit); return this; } @Override public void close() throws IOException { innerConverter.close(); } @Override public SO convertSchema(SI inputSchema, WorkUnitState workUnit) throws SchemaConversionException { return innerConverter.convertSchema(inputSchema, workUnit); } @Override public Iterable<RecordWithMetadata<DO>> convertRecord(SO outputSchema, Object inputRecord, WorkUnitState workUnit) throws DataConversionException { final Metadata metadata = getMetadataFromRecord(inputRecord); final DI innerRecord = getRecordFromRecord(inputRecord); Iterable<DO> outputRecords = innerConverter.convertRecord(outputSchema, innerRecord, workUnit); return Iterables.transform(outputRecords, new Function<DO, RecordWithMetadata<DO>>() { @Nullable @Override public RecordWithMetadata<DO> apply(@Nullable DO input) { return new RecordWithMetadata<>(input, metadata); } }); } @SuppressWarnings("unchecked") private DI getRecordFromRecord(Object inputRecord) { if (inputRecord instanceof RecordWithMetadata<?>) { Object uncastedRecord = ((RecordWithMetadata) inputRecord).getRecord(); return (DI)uncastedRecord; } else { return (DI)inputRecord; } } private Metadata getMetadataFromRecord(Object inputRecord) { if (inputRecord instanceof RecordWithMetadata<?>) { return ((RecordWithMetadata) inputRecord).getMetadata(); } else { return new Metadata(); } } @Override public State getFinalState() { return innerConverter.getFinalState(); } /** * Intended to be overridden by converters who want to manipulate metadata as it flows through * @param metadata * @return */ protected Metadata convertMetadata(Metadata metadata) { return metadata; } }
3,433
0
Create_ds/gobblin/gobblin-modules/gobblin-metadata/src/main/java/org/apache/gobblin
Create_ds/gobblin/gobblin-modules/gobblin-metadata/src/main/java/org/apache/gobblin/converter/RecordWithMetadataToEnvelopedRecordWithMetadata.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.gobblin.converter; import java.io.ByteArrayOutputStream; import java.io.IOException; import java.util.Collections; import org.codehaus.jackson.JsonEncoding; import org.codehaus.jackson.JsonFactory; import org.codehaus.jackson.JsonGenerator; import org.codehaus.jackson.map.ObjectMapper; import org.apache.gobblin.configuration.WorkUnitState; import org.apache.gobblin.metadata.types.GlobalMetadata; import org.apache.gobblin.type.ContentTypeUtils; import org.apache.gobblin.type.RecordWithMetadata; /** * A converter that takes a {@link RecordWithMetadata} and serializes it using the following format: * {mId: "global metadata id", "rMd": recordMetadata, "r": record} * * The converter will also change the contentType in globalMetadata to lnkd+recordWithMetadata and record the * original contentType inside an inner-content-type header. * * The output of this converter is a valid UTF8-string encoded as a byte[]. * * Note that this should be the last step in a converter chain - if global metadata is changed, its ID may change * as well which would lead to us embedding an incorrect metadata ID in the record. */ public class RecordWithMetadataToEnvelopedRecordWithMetadata extends Converter<Object, String, RecordWithMetadata<?>, RecordWithMetadata<byte[]>> { private static final ObjectMapper objectMapper = new ObjectMapper(); private static final JsonFactory jsonFactory = new JsonFactory(); private static final String CONTENT_TYPE = "lnkd+recordWithMetadata"; @Override public String convertSchema(Object inputSchema, WorkUnitState workUnit) throws SchemaConversionException { return ""; } @Override public Iterable<RecordWithMetadata<byte[]>> convertRecord(String outputSchema, RecordWithMetadata<?> inputRecord, WorkUnitState workUnit) throws DataConversionException { try { updateRecordMetadata(inputRecord); ByteArrayOutputStream bOs = new ByteArrayOutputStream(512); try (JsonGenerator generator = jsonFactory.createJsonGenerator(bOs, JsonEncoding.UTF8).setCodec(objectMapper)) { generator.writeStartObject(); writeHeaders(inputRecord, generator); writeRecord(inputRecord, generator); generator.writeEndObject(); } return Collections.singleton(new RecordWithMetadata<byte[]>(bOs.toByteArray(), inputRecord.getMetadata())); } catch (IOException e) { throw new DataConversionException(e); } } private void writeRecord(RecordWithMetadata<?> inputRecord, JsonGenerator generator) throws IOException { if (shouldInterpretRecordAsUtf8ByteArray(inputRecord)) { generator.writeFieldName("r"); byte[] bytes = (byte[]) inputRecord.getRecord(); generator.writeUTF8String(bytes, 0, bytes.length); } else { generator.writeObjectField("r", inputRecord.getRecord()); } } private void writeHeaders(RecordWithMetadata<?> inputRecord, JsonGenerator generator) throws IOException { generator.writeStringField("mId", inputRecord.getMetadata().getGlobalMetadata().getId()); if (!inputRecord.getMetadata().getRecordMetadata().isEmpty()) { generator.writeObjectField("rMd", inputRecord.getMetadata().getRecordMetadata()); } } private void updateRecordMetadata(RecordWithMetadata<?> inputRecord) { GlobalMetadata md = inputRecord.getMetadata().getGlobalMetadata(); String origContentType = md.getContentType(); if (origContentType != null) { md.setInnerContentType(origContentType); } md.setContentType(CONTENT_TYPE); md.markImmutable(); } private boolean shouldInterpretRecordAsUtf8ByteArray(RecordWithMetadata<?> inputRecord) { if (inputRecord.getRecord() instanceof byte[]) { return ContentTypeUtils.getInstance().inferPrintableFromMetadata(inputRecord.getMetadata()); } return false; } }
3,434
0
Create_ds/gobblin/gobblin-modules/gobblin-metadata/src/main/java/org/apache/gobblin
Create_ds/gobblin/gobblin-modules/gobblin-metadata/src/main/java/org/apache/gobblin/converter/RecordWithMetadataSchemaRegistrationConverter.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.gobblin.converter; import java.util.Collections; import java.util.Properties; import org.apache.avro.Schema; import org.apache.gobblin.configuration.WorkUnitState; import org.apache.gobblin.metadata.types.Metadata; import org.apache.gobblin.metrics.kafka.KafkaAvroSchemaRegistry; import org.apache.gobblin.metrics.kafka.KafkaAvroSchemaRegistryFactory; import org.apache.gobblin.metrics.kafka.SchemaRegistryException; import org.apache.gobblin.type.RecordWithMetadata; import com.google.common.base.Preconditions; /** * A converter that takes {@link RecordWithMetadata}, * tries to register the Avro Schema with KafkaSchemaRegistry * and returns a {@link RecordWithMetadata} with schemaId inside Metadata */ public class RecordWithMetadataSchemaRegistrationConverter extends Converter<String, String, RecordWithMetadata<?>, RecordWithMetadata<?>> { private static final String SCHEMA_ID_KEY = "Schema-Id"; private static final String CONTENT_TYPE = "application/avro"; private String schemaId; @Override public String convertSchema(String inputSchema, WorkUnitState workUnit) throws SchemaConversionException { Schema schema = Schema.parse(inputSchema); if (null == schemaId) { try { schemaId = getSchemaId(workUnit.getProperties(), schema); } catch (SchemaRegistryException e) { throw new SchemaConversionException(e); } } return schema.toString(); } private static String getSchemaId(Properties properties, Schema schema) throws SchemaRegistryException { KafkaAvroSchemaRegistry kafkaAvroSchemaRegistry = (KafkaAvroSchemaRegistry) new KafkaAvroSchemaRegistryFactory().create(properties); return kafkaAvroSchemaRegistry.register(schema); } @Override public Iterable<RecordWithMetadata<?>> convertRecord(String outputSchema, RecordWithMetadata<?> inputRecord, WorkUnitState workUnit) throws DataConversionException { Preconditions.checkNotNull(schemaId); Metadata metadata = inputRecord.getMetadata(); metadata.getGlobalMetadata().setContentType(CONTENT_TYPE); metadata.getRecordMetadata().put(SCHEMA_ID_KEY, schemaId); return Collections.singleton(new RecordWithMetadata<>(inputRecord.getRecord(), metadata)); } }
3,435
0
Create_ds/gobblin/gobblin-modules/gobblin-metadata/src/main/java/org/apache/gobblin
Create_ds/gobblin/gobblin-modules/gobblin-metadata/src/main/java/org/apache/gobblin/type/ContentTypeUtils.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.gobblin.type; import java.util.List; import java.util.concurrent.ConcurrentHashMap; import lombok.extern.slf4j.Slf4j; import org.apache.gobblin.metadata.types.Metadata; /** * Utilities to work with MIME content-types */ @Slf4j public class ContentTypeUtils { private static final ContentTypeUtils INSTANCE = new ContentTypeUtils(); public static ContentTypeUtils getInstance() { return INSTANCE; } private ConcurrentHashMap<String, String> knownCharsets; /** * Check which character set a given content-type corresponds to. * @param contentType Content-type to check * @return Charset the mimetype represents. "BINARY" if binary data. */ public String getCharset(String contentType) { String charSet = knownCharsets.get(contentType); if (charSet != null) { return charSet; } // Special cases if (contentType.startsWith("text/") || contentType.endsWith("+json") || contentType.endsWith("+xml")) { return "UTF-8"; } return "BINARY"; } /** * Heuristic to infer if content is printable from metadata. */ public boolean inferPrintableFromMetadata(Metadata md) { String inferredCharset = "BINARY"; List<String> transferEncoding = md.getGlobalMetadata().getTransferEncoding(); if (transferEncoding != null) { inferredCharset = getCharset(transferEncoding.get(transferEncoding.size() - 1)); } else if (md.getGlobalMetadata().getContentType() != null) { inferredCharset = getCharset(md.getGlobalMetadata().getContentType()); } return inferredCharset.equals("UTF-8"); } /** * Register a new contentType to charSet mapping. * @param contentType Content-type to register * @param charSet charSet associated with the content-type */ public void registerCharsetMapping(String contentType, String charSet) { if (knownCharsets.contains(contentType)) { log.warn("{} is already registered; re-registering"); } knownCharsets.put(contentType, charSet); } private ContentTypeUtils() { knownCharsets = new ConcurrentHashMap<>(); knownCharsets.put("base64", "UTF-8"); knownCharsets.put("aes_rotating", "UTF-8"); knownCharsets.put("gzip", "BINARY"); knownCharsets.put("application/xml", "UTF-8"); knownCharsets.put("application/json", "UTF-8"); } }
3,436
0
Create_ds/gobblin/gobblin-modules/gobblin-metadata/src/main/java/org/apache/gobblin
Create_ds/gobblin/gobblin-modules/gobblin-metadata/src/main/java/org/apache/gobblin/type/RecordWithMetadata.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.gobblin.type; import lombok.AllArgsConstructor; import lombok.Getter; import org.apache.gobblin.metadata.types.Metadata; /** * A holder for records with metadata */ @Getter @AllArgsConstructor public class RecordWithMetadata<D> { public static final String RECORD_NAME = "recordName"; private final D record; private final Metadata metadata; }
3,437
0
Create_ds/gobblin/gobblin-modules/gobblin-metadata/src/main/java/org/apache/gobblin
Create_ds/gobblin/gobblin-modules/gobblin-metadata/src/main/java/org/apache/gobblin/type/SerializedRecordWithMetadata.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.gobblin.type; import org.apache.gobblin.metadata.types.Metadata; /** * A holder for serialized records with Metadata. */ public class SerializedRecordWithMetadata extends RecordWithMetadata<byte[]> { public SerializedRecordWithMetadata(byte[] record, Metadata metadata) { super(record, metadata); } }
3,438
0
Create_ds/gobblin/gobblin-modules/gobblin-metadata/src/main/java/org/apache/gobblin
Create_ds/gobblin/gobblin-modules/gobblin-metadata/src/main/java/org/apache/gobblin/metadata/GlobalMetadataCollector.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.gobblin.metadata; import java.util.Collections; import java.util.Iterator; import java.util.LinkedHashSet; import java.util.Set; import com.google.common.base.Preconditions; import org.apache.gobblin.metadata.types.GlobalMetadata; /** * This class collects metadata records, optionally merging them with a set of default metadata. It also * keeps track of all of the merged records so they can be published at a later date. */ public class GlobalMetadataCollector { public static final int UNLIMITED_SIZE = -1; private final LinkedHashSet<GlobalMetadata> metadataRecords; private final GlobalMetadata defaultMetadata; private final int cacheSize; private String lastSeenMetadataId; /** * Initialize a MetdataCollector with the given cache size. * @param cacheSize You can pass the value -1 to have an unlimited cache size. */ public GlobalMetadataCollector(int cacheSize) { this(null, cacheSize); } /** * Initialize a MetadataCollector with some default metadata to merge incoming records with. * (Eg: a dataset-URN or a set of Transfer-Encodings). */ public GlobalMetadataCollector(GlobalMetadata defaultMetadata, int cacheSize) { Preconditions.checkArgument(cacheSize == -1 || cacheSize > 0, "cacheSize must be -1 or greater than 0"); this.defaultMetadata = defaultMetadata; this.cacheSize = cacheSize; this.lastSeenMetadataId = ""; this.metadataRecords = new LinkedHashSet<>(); } /** * Process a metadata record, merging it with default metadata. * <p> * If the combined (metadata + defaultMetadata) record is not present in the Collector's cache, * then the new metadata record will be stored in cache and returned. The oldest record in the cache will be evicted * if necessary. * <p>> * If the new record already exists in the cache, then the LRU time will be updated but this method will return null. */ public synchronized GlobalMetadata processMetadata(GlobalMetadata metadata) { GlobalMetadata recordToAdd = getRecordToAdd(metadata); if (recordToAdd != null) { boolean isNew = addRecordAndEvictIfNecessary(recordToAdd); return isNew ? recordToAdd : null; } return null; } /** * Return a Set of all merged metadata records in the cache. The set is immutable. */ public Set<GlobalMetadata> getMetadataRecords() { return Collections.unmodifiableSet(metadataRecords); } private boolean addRecordAndEvictIfNecessary(GlobalMetadata recordToAdd) { // First remove the element from the HashSet if it's already in there to reset // the 'LRU' piece; then add it back in boolean isNew = !metadataRecords.remove(recordToAdd); metadataRecords.add(recordToAdd); // Now remove the first element (which should be the oldest) from the list // if we've exceeded the cache size if (cacheSize != -1 && metadataRecords.size() > cacheSize) { Iterator<GlobalMetadata> recordIt = metadataRecords.iterator(); recordIt.next(); // Remove the oldest element - don't care what it is recordIt.remove(); } return isNew; } private GlobalMetadata getRecordToAdd(GlobalMetadata metadata) { if (metadata == null) { return defaultMetadata; } // Optimization - we know this record already has been seen, so don't // merge with defaults if (metadata.getId().equals(lastSeenMetadataId)) { return null; } lastSeenMetadataId = metadata.getId(); if (defaultMetadata != null) { metadata.mergeWithDefaults(defaultMetadata); } return metadata; } }
3,439
0
Create_ds/gobblin/gobblin-modules/gobblin-metadata/src/main/java/org/apache/gobblin/metadata
Create_ds/gobblin/gobblin-modules/gobblin-metadata/src/main/java/org/apache/gobblin/metadata/types/StaticStringMetadataMerger.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.gobblin.metadata.types; import org.apache.gobblin.metadata.MetadataMerger; import org.apache.gobblin.writer.FsWriterMetrics; /** * Metadata 'Merger' that is pre-provisioned with a static string and can not * actually merge data. This is to support backwards-compatible uses cases where a * user specifies metadata in job config and does _not_ want to publish * any other pipeline-produced metadata. */ public class StaticStringMetadataMerger implements MetadataMerger<String> { private final String metadata; public StaticStringMetadataMerger(String staticMetadata) { this.metadata = staticMetadata; } @Override public void update(String metadata) { /* * Since we don't know what format the string is in, we also don't know how to * merge anything. */ throw new UnsupportedOperationException("Do not know how to merge with other strings!"); } @Override public void update(FsWriterMetrics metrics) { throw new UnsupportedOperationException("Do not know how to merge FsWriterMetrics"); } @Override public String getMergedMetadata() { return metadata; } }
3,440
0
Create_ds/gobblin/gobblin-modules/gobblin-metadata/src/main/java/org/apache/gobblin/metadata
Create_ds/gobblin/gobblin-modules/gobblin-metadata/src/main/java/org/apache/gobblin/metadata/types/GlobalMetadataJsonMerger.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.gobblin.metadata.types; import java.io.IOException; import org.apache.gobblin.metadata.MetadataMerger; import org.apache.gobblin.writer.FsWriterMetrics; /** * Merges a set of GlobalMetadata objects that have been serialized as JSON together to * create a final output. */ public class GlobalMetadataJsonMerger implements MetadataMerger<String> { private GlobalMetadata mergedMetadata; public GlobalMetadataJsonMerger() { mergedMetadata = new GlobalMetadata(); } @Override public void update(String metadata) { try { GlobalMetadata parsedMetadata = GlobalMetadata.fromJson(metadata); mergedMetadata.addAll(parsedMetadata); } catch (IOException e) { throw new IllegalArgumentException("Error parsing metadata", e); } } @Override public void update(FsWriterMetrics metrics) { long numRecords = mergedMetadata.getNumRecords(); int numFiles = mergedMetadata.getNumFiles(); for (FsWriterMetrics.FileInfo fileInfo: metrics.getFileInfos()) { numRecords += fileInfo.getNumRecords(); numFiles += 1; mergedMetadata.setFileMetadata(fileInfo.getFileName(), GlobalMetadata.NUM_RECORDS_KEY, Long.valueOf(fileInfo.getNumRecords())); } mergedMetadata.setNumRecords(numRecords); mergedMetadata.setNumOutputFiles(numFiles); } @Override public String getMergedMetadata() { try { return mergedMetadata.toJson(); } catch (IOException e) { throw new AssertionError("Unexpected IOException serializing to JSON", e); } } }
3,441
0
Create_ds/gobblin/gobblin-modules/gobblin-metadata/src/main/java/org/apache/gobblin/metadata
Create_ds/gobblin/gobblin-modules/gobblin-metadata/src/main/java/org/apache/gobblin/metadata/types/Metadata.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.gobblin.metadata.types; import java.util.HashMap; import java.util.Map; /** * Represents a collection of global and record-level metadata that can be * attached to a record as it flows through a Gobblin pipeline. */ public class Metadata { private GlobalMetadata globalMetadata; private Map<String, Object> recordMetadata; public Metadata() { globalMetadata = new GlobalMetadata(); recordMetadata = new HashMap<>(); } public GlobalMetadata getGlobalMetadata() { return globalMetadata; } public Map<String, Object> getRecordMetadata() { return recordMetadata; } }
3,442
0
Create_ds/gobblin/gobblin-modules/gobblin-metadata/src/main/java/org/apache/gobblin/metadata
Create_ds/gobblin/gobblin-modules/gobblin-metadata/src/main/java/org/apache/gobblin/metadata/types/GlobalMetadata.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.gobblin.metadata.types; import java.io.ByteArrayOutputStream; import java.io.IOException; import java.io.StringWriter; import java.security.MessageDigest; import java.security.NoSuchAlgorithmException; import java.util.ArrayList; import java.util.List; import java.util.Map; import java.util.concurrent.ConcurrentHashMap; import org.codehaus.jackson.JsonEncoding; import org.codehaus.jackson.JsonFactory; import org.codehaus.jackson.JsonGenerator; import org.codehaus.jackson.annotate.JsonProperty; import org.codehaus.jackson.map.ObjectMapper; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import javax.xml.bind.DatatypeConverter; /** * Represents metadata for a pipeline. There are two 'levels' of metadata - one that is global to an entire * dataset, and one that is applicable to each file present in a dataset. */ public class GlobalMetadata { private static final Logger log = LoggerFactory.getLogger(GlobalMetadata.class); private static final ObjectMapper objectMapper = new ObjectMapper(); private static final JsonFactory jsonFactory = new JsonFactory(); private static final String EMPTY_ID = "0"; @JsonProperty("dataset") private final Map<String, Object> datasetLevel; @JsonProperty("file") private final Map<String, Map<String, Object>> fileLevel; @JsonProperty("id") private String cachedId; private transient boolean markedImmutable; public final static String DATASET_URN_KEY = "Dataset-URN"; public final static String TRANSFER_ENCODING_KEY = "Transfer-Encoding"; public final static String CONTENT_TYPE_KEY = "Content-Type"; public final static String INNER_CONTENT_TYPE_KEY = "Inner-Content-Type"; public final static String NUM_RECORDS_KEY = "Num-Records"; public final static String NUM_FILES_KEY = "Num-Files"; /** * Create a new, empty, metadata descriptor. */ public GlobalMetadata() { this.datasetLevel = new ConcurrentHashMap<>(); this.fileLevel = new ConcurrentHashMap<>(); this.markedImmutable = false; } /** * Mark the metadata as immutable. Once this flag is set all attempts to modify the object * will fail with {@link UnsupportedOperationException}. */ public void markImmutable() { this.markedImmutable = true; } public boolean isImmutable() { return this.markedImmutable; } /** * Create a new GlobalMetadata object from its serialized representation. * @throws IOException If the JSON string cannot be parsed. */ public static GlobalMetadata fromJson(String json) throws IOException { return objectMapper.readValue(json, GlobalMetadata.class); } /** * Merge another GlobalMetadata object into this one. All keys from 'other' will be placed into * this object, replacing any already existing keys. * @param other Metadata object to add */ public void addAll(GlobalMetadata other) { throwIfImmutable(); datasetLevel.putAll(other.datasetLevel); for (Map.Entry<String, Map<String, Object>> e : other.fileLevel.entrySet()) { Map<String, Object> val = new ConcurrentHashMap<>(); val.putAll(e.getValue()); fileLevel.put(e.getKey(), val); } cachedId = null; } /** * Merge default settings into this object. Logic is very similar to addAll(), but Transfer-Encoding gets * special treatment; the 'default' transfer-encoding settings are appended to any transfer-encoding * already set (vs simply overwriting them). */ public void mergeWithDefaults(GlobalMetadata defaults) { List<String> defaultTransferEncoding = defaults.getTransferEncoding(); List<String> myEncoding = getTransferEncoding(); if (defaultTransferEncoding != null) { if (myEncoding == null) { setDatasetMetadata(TRANSFER_ENCODING_KEY, defaultTransferEncoding); } else { List<String> combinedEncoding = new ArrayList<>(); combinedEncoding.addAll(myEncoding); combinedEncoding.addAll(defaultTransferEncoding); setDatasetMetadata(TRANSFER_ENCODING_KEY, combinedEncoding); } } for (Map.Entry<String, Object> entry : defaults.datasetLevel.entrySet()) { if (!datasetLevel.containsKey(entry.getKey())) { setDatasetMetadata(entry.getKey(), entry.getValue()); } } } /** * Serialize as a UTF8 encoded JSON string. */ public byte[] toJsonUtf8() { try { ByteArrayOutputStream bOs = new ByteArrayOutputStream(512); try (JsonGenerator generator = jsonFactory.createJsonGenerator(bOs, JsonEncoding.UTF8) .setCodec(objectMapper)) { toJsonUtf8(generator); } return bOs.toByteArray(); } catch (IOException e) { throw new RuntimeException("Unexpected IOException serializing to ByteArray", e); } } /** * Serialize as a String */ public String toJson() throws IOException { StringWriter writer = new StringWriter(); try (JsonGenerator generator = jsonFactory.createJsonGenerator(writer) .setCodec(objectMapper)) { toJsonUtf8(generator); } return writer.toString(); } protected void toJsonUtf8(JsonGenerator generator) throws IOException { generator.writeStartObject(); generator.writeStringField("id", getId()); bodyToJsonUtf8(generator); generator.writeEndObject(); generator.flush(); } /** * Write this object out to an existing JSON stream */ protected void bodyToJsonUtf8(JsonGenerator generator) throws IOException { generator.writeObjectField("dataset", datasetLevel); generator.writeObjectFieldStart("file"); for (Map.Entry<String, Map<String, Object>> entry : fileLevel.entrySet()) { generator.writeObjectField(entry.getKey(), entry.getValue()); } generator.writeEndObject(); } // Dataset-level metadata /** * Convenience method to retrieve the Dataset-URN dataset-level property. */ public String getDatasetUrn() { return (String) datasetLevel.get(DATASET_URN_KEY); } /** * Convenience method to set the Dataset-URN property. */ public void setDatasetUrn(String urn) { setDatasetMetadata(DATASET_URN_KEY, urn); } /** * Convenience method to set the Content-Type property */ public void setContentType(String contentType) { setDatasetMetadata(CONTENT_TYPE_KEY, contentType); } /** * Convenience method to retrieve the Content-Type property * @return */ public String getContentType() { return (String)getDatasetMetadata(CONTENT_TYPE_KEY); } /** * Convenience method to set the Inner-Content-Type property */ public void setInnerContentType(String innerContentType) { setDatasetMetadata(INNER_CONTENT_TYPE_KEY, innerContentType); } /** * Convenience method to retrieve the Inner-Content-Type property */ public String getInnerContentType() { return (String)getDatasetMetadata(INNER_CONTENT_TYPE_KEY); } /** * Convenience method to set the number of files in the dataset */ public void setNumOutputFiles(int numFiles) { setDatasetMetadata(NUM_FILES_KEY, numFiles); } /** * Convenience method to set the number of records in the dataset */ public void setNumRecords(long numRecords) { setDatasetMetadata(NUM_RECORDS_KEY, numRecords); } /** * Get an arbitrary dataset-level metadata key */ public Object getDatasetMetadata(String key) { return datasetLevel.get(key); } /** * Set an arbitrary dataset-level metadata key */ public void setDatasetMetadata(String key, Object val) { throwIfImmutable(); datasetLevel.put(key, val); cachedId = null; } /** * Convenience method to retrieve the transfer-encodings that have been applied to the dataset */ @SuppressWarnings("unchecked") public List<String> getTransferEncoding() { return (List<String>) getDatasetMetadata(TRANSFER_ENCODING_KEY); } /** * Convenience method to add a new transfer-encoding to a dataset */ public synchronized void addTransferEncoding(String encoding) { throwIfImmutable(); List<String> encodings = getTransferEncoding(); if (encodings == null) { encodings = new ArrayList<>(); } encodings.add(encoding); setDatasetMetadata(TRANSFER_ENCODING_KEY, encodings); } public long getNumRecords() { // When reading from JSON, Jackson could parse as an int so we need to use a more generic type Number numRecords = (Number)getDatasetMetadata(NUM_RECORDS_KEY); return (numRecords != null) ? numRecords.longValue() : 0L; } public int getNumFiles() { Integer numFiles = (Integer)getDatasetMetadata(NUM_FILES_KEY); return (numFiles != null) ? numFiles : 0; } // File-level metadata /** * Get an arbitrary file-level metadata key */ public Object getFileMetadata(String file, String key) { Map<String, Object> fileKeys = fileLevel.get(file); if (fileKeys == null) { return null; } return fileKeys.get(key); } /** * Set an arbitrary file-level metadata key */ public void setFileMetadata(String file, String key, Object val) { throwIfImmutable(); Map<String, Object> fileKeys = fileLevel.get(file); if (fileKeys == null) { fileKeys = new ConcurrentHashMap<>(); fileLevel.put(file, fileKeys); } fileKeys.put(key, val); cachedId = null; } @Override public boolean equals(Object o) { if (this == o) { return true; } if (o == null || getClass() != o.getClass()) { return false; } GlobalMetadata that = (GlobalMetadata) o; return this.getId().equals(that.getId()); } @Override public int hashCode() { return getId().hashCode(); } public String getId() { if (cachedId != null) { return cachedId; } if (datasetLevel.size() == 0 && fileLevel.size() == 0) { cachedId = EMPTY_ID; return cachedId; } try { // ID is calculated by serializing body to JSON and then taking that hash ByteArrayOutputStream bOs = new ByteArrayOutputStream(512); MessageDigest md5Digest = MessageDigest.getInstance("MD5"); try (JsonGenerator generator = jsonFactory.createJsonGenerator(bOs, JsonEncoding.UTF8).setCodec(objectMapper)) { generator.writeStartObject(); bodyToJsonUtf8(generator); generator.writeEndObject(); } byte[] digestBytes = md5Digest.digest(bOs.toByteArray()); cachedId = DatatypeConverter.printHexBinary(digestBytes); return cachedId; } catch (IOException|NoSuchAlgorithmException e) { throw new RuntimeException("Unexpected exception generating id", e); } } public boolean isEmpty() { return getId().equals(EMPTY_ID); } private void throwIfImmutable() { if (this.markedImmutable) { throw new UnsupportedOperationException("Metadata is marked as immutable -- cannot modify"); } } }
3,443
0
Create_ds/gobblin/gobblin-modules/gobblin-metadata/src/main/java/org/apache/gobblin/metadata
Create_ds/gobblin/gobblin-modules/gobblin-metadata/src/main/java/org/apache/gobblin/metadata/provider/SimpleConfigMetadataProvider.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.gobblin.metadata.provider; import org.apache.hadoop.fs.Path; import org.apache.gobblin.annotation.Alpha; import org.apache.gobblin.metadata.types.GlobalMetadata; import lombok.RequiredArgsConstructor; /** * Simple implementation of {@link DatasetAwareMetadataProvider}, which directly * reads global permission from config. */ @Alpha @RequiredArgsConstructor public class SimpleConfigMetadataProvider extends DatasetAwareFsMetadataProvider { private final String permission; @Override public GlobalMetadata getGlobalMetadataForDataset(String datasetUrn) { GlobalMetadata defaultMetadata = new GlobalMetadata(); defaultMetadata.setDatasetUrn(datasetUrn); PermissionMetadataParser.setPermission(defaultMetadata, permission); return defaultMetadata; } @Override public String datasetUrnAtPath(Path path) { return path.toString(); } }
3,444
0
Create_ds/gobblin/gobblin-modules/gobblin-metadata/src/main/java/org/apache/gobblin/metadata
Create_ds/gobblin/gobblin-modules/gobblin-metadata/src/main/java/org/apache/gobblin/metadata/provider/PermissionMetadataParser.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.gobblin.metadata.provider; import org.apache.gobblin.metadata.types.GlobalMetadata; /** * Parses permission information from a given {@link GlobalMetadata}. */ public class PermissionMetadataParser { private final static String PERMISSION_KEY = "Permission"; private final static String GROUP_OWNER = "GroupOwner"; public static void setPermission(GlobalMetadata metadata, String permission) { metadata.setDatasetMetadata(PERMISSION_KEY, permission); } public static String getPermission(GlobalMetadata metadata) { return (String) metadata.getDatasetMetadata(PERMISSION_KEY); } public static void setGroupOwner(GlobalMetadata metadata, String groupOwner) { metadata.setDatasetMetadata(GROUP_OWNER, groupOwner); } public static String getGroupOwner(GlobalMetadata metadata) { return (String) metadata.getDatasetMetadata(GROUP_OWNER); } }
3,445
0
Create_ds/gobblin/gobblin-modules/gobblin-metadata/src/main/java/org/apache/gobblin/metadata
Create_ds/gobblin/gobblin-modules/gobblin-metadata/src/main/java/org/apache/gobblin/metadata/provider/DatasetAwareMetadataProviderFactory.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.gobblin.metadata.provider; import com.typesafe.config.Config; /** * A factory that creates {@link DatasetAwareMetadataProvider} from {@link Config}. */ interface DatasetAwareMetadataProviderFactory { DatasetAwareMetadataProvider createMetadataProvider(Config config); }
3,446
0
Create_ds/gobblin/gobblin-modules/gobblin-metadata/src/main/java/org/apache/gobblin/metadata
Create_ds/gobblin/gobblin-modules/gobblin-metadata/src/main/java/org/apache/gobblin/metadata/provider/MetadataAwareFileSystem.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.gobblin.metadata.provider; import java.io.IOException; import org.apache.hadoop.fs.FSDataOutputStream; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.permission.FsPermission; import org.apache.hadoop.util.Progressable; import com.typesafe.config.Config; import org.apache.gobblin.broker.iface.ConfigView; import org.apache.gobblin.broker.iface.ScopeType; import org.apache.gobblin.broker.iface.SharedResourcesBroker; import org.apache.gobblin.configuration.ConfigurationKeys; import org.apache.gobblin.util.ClassAliasResolver; import org.apache.gobblin.util.ConfigUtils; import org.apache.gobblin.util.filesystem.FileSystemInstrumentation; import org.apache.gobblin.util.filesystem.FileSystemInstrumentationFactory; import org.apache.gobblin.util.filesystem.FileSystemKey; import lombok.extern.slf4j.Slf4j; /** * Extends {@link FileSystemInstrumentation} and is metadata aware when setting permissions and owners. */ @Slf4j public class MetadataAwareFileSystem extends FileSystemInstrumentation { public static final String METADATA_PROVIDER_ALIAS = "metadataProviderAlias"; public static class Factory<S extends ScopeType<S>> extends FileSystemInstrumentationFactory<S> { @Override public FileSystem instrumentFileSystem(FileSystem fs, SharedResourcesBroker<S> broker, ConfigView<S, FileSystemKey> config) { Config metaConfig = config.getConfig(); String metadataProviderAlias = ConfigUtils.getString(metaConfig, METADATA_PROVIDER_ALIAS, ""); log.info("Metadata provider alias is: " + metadataProviderAlias); if (!metadataProviderAlias.isEmpty()) { DatasetAwareFsMetadataProvider metadataProvider = null; try { metadataProvider = (DatasetAwareFsMetadataProvider) new ClassAliasResolver<>(DatasetAwareMetadataProviderFactory.class) .resolveClass(metadataProviderAlias).newInstance().createMetadataProvider(metaConfig); } catch (ClassNotFoundException | IllegalAccessException | InstantiationException e) { log.error("Failed to create metadataProvider.", e); } if (metadataProvider != null) { return new MetadataAwareFileSystem(fs, metadataProvider); } } log.warn("No valid {} found. Will use filesystem {}.", METADATA_PROVIDER_ALIAS, fs.getClass().getName()); return fs; } } private DatasetAwareFsMetadataProvider metadataProvider; public MetadataAwareFileSystem(FileSystem underlying, DatasetAwareFsMetadataProvider provider) { super(underlying); this.metadataProvider = provider; } @Override public boolean mkdirs(Path f, FsPermission permission) throws IOException { FsPermission realPermission = getPermAtPathFromMetadataIfPresent(permission, f); return super.mkdirs(f, realPermission); } @Override public FSDataOutputStream create(Path f, FsPermission permission, boolean overwrite, int bufferSize, short replication, long blockSize, Progressable progress) throws IOException { FsPermission realPermission = getPermAtPathFromMetadataIfPresent(permission, f); return super.create(f, realPermission, overwrite, bufferSize, replication, blockSize, progress); } @Override public void setPermission(Path f, final FsPermission permission) throws IOException { super.setPermission(f, getPermAtPathFromMetadataIfPresent(permission, f)); } @Override public void setOwner(Path f, String user, String group) throws IOException { super.setOwner(f, user, getGroupAtPathFromMetadataIfPresent(group, f)); } private String getGroupAtPathFromMetadataIfPresent(String defaultGroup, Path path) { String realGroup = defaultGroup; if (this.metadataProvider != null) { realGroup = PermissionMetadataParser.getGroupOwner(metadataProvider.getGlobalMetadataForDatasetAtPath(path)); } return realGroup; } private FsPermission getPermAtPathFromMetadataIfPresent(FsPermission defaultPermission, Path path) { FsPermission realPermission = defaultPermission; if (this.metadataProvider != null) { realPermission = new FsPermission(Short .parseShort(PermissionMetadataParser.getPermission(metadataProvider.getGlobalMetadataForDatasetAtPath(path)), ConfigurationKeys.PERMISSION_PARSING_RADIX)); } return realPermission; } }
3,447
0
Create_ds/gobblin/gobblin-modules/gobblin-metadata/src/main/java/org/apache/gobblin/metadata
Create_ds/gobblin/gobblin-modules/gobblin-metadata/src/main/java/org/apache/gobblin/metadata/provider/SimpleMetadataProviderFactory.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.gobblin.metadata.provider; import com.typesafe.config.Config; import org.apache.gobblin.annotation.Alias; import org.apache.gobblin.util.ConfigUtils; import lombok.extern.slf4j.Slf4j; /** * Simple {@link DatasetAwareMetadataProviderFactory} that uses a user-defined permission and creates * {@link SimpleConfigMetadataProvider}. */ @Slf4j @Alias(value = "SimpleMetadataProvider") public class SimpleMetadataProviderFactory implements DatasetAwareMetadataProviderFactory { /** * Permission defined in config that will be used for all paths. */ public static final String ALL_PERMISSOIN = "allPermission"; @Override public DatasetAwareMetadataProvider createMetadataProvider(Config metaConfig) { String permission = ConfigUtils.getString(metaConfig, ALL_PERMISSOIN, ""); log.info("User defined permission is: " + permission); return new SimpleConfigMetadataProvider(permission); } }
3,448
0
Create_ds/gobblin/gobblin-modules/gobblin-metadata/src/main/java/org/apache/gobblin/metadata
Create_ds/gobblin/gobblin-modules/gobblin-metadata/src/main/java/org/apache/gobblin/metadata/provider/DatasetAwareFsMetadataProvider.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.gobblin.metadata.provider; import org.apache.hadoop.fs.Path; import org.apache.gobblin.metadata.types.GlobalMetadata; /** * Fs based {@link DatasetAwareMetadataProvider} which can extract datasetUrn from a given {@link Path}, * and provide {@link GlobalMetadata} for it. */ public abstract class DatasetAwareFsMetadataProvider implements DatasetAwareMetadataProvider { public abstract String datasetUrnAtPath(Path path); public GlobalMetadata getGlobalMetadataForDatasetAtPath(Path datasetAwarePath) { return this.getGlobalMetadataForDataset(datasetUrnAtPath(datasetAwarePath)); } }
3,449
0
Create_ds/gobblin/gobblin-modules/gobblin-metadata/src/main/java/org/apache/gobblin/metadata
Create_ds/gobblin/gobblin-modules/gobblin-metadata/src/main/java/org/apache/gobblin/metadata/provider/DatasetAwareMetadataProvider.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.gobblin.metadata.provider; import org.apache.gobblin.annotation.Alpha; import org.apache.gobblin.metadata.types.GlobalMetadata; /** * Provides dataset-aware {@link GlobalMetadata}. */ @Alpha public interface DatasetAwareMetadataProvider { /** * @param datasetUrnSource for retrieving dataset urn. * @return returns a given dataset's related {@link GlobalMetadata}. */ GlobalMetadata getGlobalMetadataForDataset(String datasetUrnSource); }
3,450
0
Create_ds/gobblin/gobblin-modules/gobblin-sql/src/test/java/org/apache/gobblin/converter
Create_ds/gobblin/gobblin-modules/gobblin-sql/src/test/java/org/apache/gobblin/converter/jdbc/AvroToJdbcEntryConverterTest.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.gobblin.converter.jdbc; import java.io.File; import java.io.IOException; import java.io.InputStreamReader; import java.lang.reflect.Type; import java.net.URISyntaxException; import java.sql.Connection; import java.sql.Date; import java.sql.SQLException; import java.sql.Time; import java.sql.Timestamp; import java.util.ArrayList; import java.util.HashMap; import java.util.List; import java.util.Map; import org.apache.avro.Schema; import org.apache.avro.file.DataFileReader; import org.apache.avro.generic.GenericDatumReader; import org.apache.avro.generic.GenericRecord; import org.testng.Assert; import org.testng.annotations.Test; import com.google.common.collect.Maps; import com.google.gson.Gson; import com.google.gson.GsonBuilder; import com.google.gson.JsonArray; import com.google.gson.JsonElement; import com.google.gson.JsonObject; import com.google.gson.JsonParser; import com.google.gson.JsonSerializationContext; import com.google.gson.JsonSerializer; import org.apache.gobblin.configuration.ConfigurationKeys; import org.apache.gobblin.configuration.State; import org.apache.gobblin.configuration.WorkUnitState; import org.apache.gobblin.converter.DataConversionException; import org.apache.gobblin.converter.SchemaConversionException; import org.apache.gobblin.publisher.JdbcPublisher; import org.apache.gobblin.writer.Destination.DestinationType; import org.apache.gobblin.writer.commands.JdbcWriterCommands; import org.apache.gobblin.writer.commands.JdbcWriterCommandsFactory; import static org.mockito.Mockito.any; import static org.mockito.Mockito.mock; import static org.mockito.Mockito.when; @Test(groups = {"gobblin.converter"}) public class AvroToJdbcEntryConverterTest { @Test public void testDateConversion() throws IOException, SchemaConversionException, SQLException { final String db = "db"; final String table = "users"; Map<String, JdbcType> dateColums = new HashMap<>(); dateColums.put("date_of_birth", JdbcType.DATE); dateColums.put("last_modified", JdbcType.TIME); dateColums.put("created", JdbcType.TIMESTAMP); JdbcWriterCommands mockWriterCommands = mock(JdbcWriterCommands.class); when(mockWriterCommands.retrieveDateColumns(db, table)).thenReturn(dateColums); JdbcWriterCommandsFactory factory = mock(JdbcWriterCommandsFactory.class); when(factory.newInstance(any(State.class), any(Connection.class))).thenReturn(mockWriterCommands); List<JdbcEntryMetaDatum> jdbcEntryMetaData = new ArrayList<>(); jdbcEntryMetaData.add(new JdbcEntryMetaDatum("name", JdbcType.VARCHAR)); jdbcEntryMetaData.add(new JdbcEntryMetaDatum("favorite_number", JdbcType.VARCHAR)); jdbcEntryMetaData.add(new JdbcEntryMetaDatum("favorite_color", JdbcType.VARCHAR)); jdbcEntryMetaData.add(new JdbcEntryMetaDatum("date_of_birth", JdbcType.DATE)); jdbcEntryMetaData.add(new JdbcEntryMetaDatum("last_modified", JdbcType.TIME)); jdbcEntryMetaData.add(new JdbcEntryMetaDatum("created", JdbcType.TIMESTAMP)); JdbcEntrySchema expected = new JdbcEntrySchema(jdbcEntryMetaData); Schema inputSchema = new Schema.Parser().parse(getClass().getResourceAsStream("/converter/fieldPickInput.avsc")); WorkUnitState workUnitState = new WorkUnitState(); workUnitState.appendToListProp(JdbcPublisher.JDBC_PUBLISHER_FINAL_TABLE_NAME, table); AvroToJdbcEntryConverter converter = new AvroToJdbcEntryConverter(workUnitState); Map<String, JdbcType> dateColumnMapping = Maps.newHashMap(); dateColumnMapping.put("date_of_birth", JdbcType.DATE); dateColumnMapping.put("last_modified", JdbcType.TIME); dateColumnMapping.put("created", JdbcType.TIMESTAMP); workUnitState.appendToListProp(AvroToJdbcEntryConverter.CONVERTER_AVRO_JDBC_DATE_FIELDS, new Gson().toJson(dateColumnMapping)); JdbcEntrySchema actual = converter.convertSchema(inputSchema, workUnitState); Assert.assertEquals(expected, actual); } @Test public void testFieldNameConversion() throws IOException, SchemaConversionException, SQLException { Map<String, JdbcType> dateColums = new HashMap<>(); dateColums.put("last_updated", JdbcType.TIMESTAMP); final String db = "db"; final String table = "users"; JdbcWriterCommands mockWriterCommands = mock(JdbcWriterCommands.class); when(mockWriterCommands.retrieveDateColumns(db, table)).thenReturn(dateColums); JdbcWriterCommandsFactory factory = mock(JdbcWriterCommandsFactory.class); when(factory.newInstance(any(State.class), any(Connection.class))).thenReturn(mockWriterCommands); WorkUnitState workUnitState = new WorkUnitState(); workUnitState.appendToListProp(JdbcPublisher.JDBC_PUBLISHER_FINAL_TABLE_NAME, table); String fieldPairJson = "{\"userId\":\"user_id\" , \"memberId\":\"member_id\" , \"businessUnit\":\"business_unit\", \"geoRegion\":\"geo_region\", \"superRegion\":\"super_region\", \"subRegion\":\"sub_region\"}"; workUnitState.appendToListProp(ConfigurationKeys.CONVERTER_AVRO_JDBC_ENTRY_FIELDS_PAIRS, fieldPairJson); workUnitState.appendToListProp(ConfigurationKeys.WRITER_DESTINATION_TYPE_KEY, DestinationType.MYSQL.name()); AvroToJdbcEntryConverter converter = new AvroToJdbcEntryConverter(workUnitState); Schema inputSchema = new Schema.Parser().parse(getClass().getResourceAsStream("/converter/user.avsc")); List<JdbcEntryMetaDatum> jdbcEntryMetaData = new ArrayList<>(); jdbcEntryMetaData.add(new JdbcEntryMetaDatum("user_id", JdbcType.VARCHAR)); jdbcEntryMetaData.add(new JdbcEntryMetaDatum("member_id", JdbcType.BIGINT)); jdbcEntryMetaData.add(new JdbcEntryMetaDatum("business_unit", JdbcType.VARCHAR)); jdbcEntryMetaData.add(new JdbcEntryMetaDatum("level", JdbcType.VARCHAR)); jdbcEntryMetaData.add(new JdbcEntryMetaDatum("geo_region", JdbcType.VARCHAR)); jdbcEntryMetaData.add(new JdbcEntryMetaDatum("super_region", JdbcType.VARCHAR)); jdbcEntryMetaData.add(new JdbcEntryMetaDatum("sub_region", JdbcType.VARCHAR)); jdbcEntryMetaData.add(new JdbcEntryMetaDatum("currency", JdbcType.VARCHAR)); jdbcEntryMetaData.add(new JdbcEntryMetaDatum("segment", JdbcType.VARCHAR)); jdbcEntryMetaData.add(new JdbcEntryMetaDatum("vertical", JdbcType.VARCHAR)); JdbcEntrySchema expected = new JdbcEntrySchema(jdbcEntryMetaData); Map<String, JdbcType> dateColumnMapping = Maps.newHashMap(); workUnitState.appendToListProp(AvroToJdbcEntryConverter.CONVERTER_AVRO_JDBC_DATE_FIELDS, new Gson().toJson(dateColumnMapping)); JdbcEntrySchema actual = converter.convertSchema(inputSchema, workUnitState); Assert.assertEquals(expected, actual); } @Test public void testFlattening() throws IOException, SchemaConversionException, SQLException, URISyntaxException, DataConversionException { final String db = "db"; final String table = "users"; Map<String, JdbcType> dateColums = new HashMap<>(); dateColums.put("date_of_birth", JdbcType.DATE); dateColums.put("last_modified", JdbcType.TIME); dateColums.put("created", JdbcType.TIMESTAMP); JdbcWriterCommands mockWriterCommands = mock(JdbcWriterCommands.class); when(mockWriterCommands.retrieveDateColumns(db, table)).thenReturn(dateColums); JdbcWriterCommandsFactory factory = mock(JdbcWriterCommandsFactory.class); when(factory.newInstance(any(State.class), any(Connection.class))).thenReturn(mockWriterCommands); List<JdbcEntryMetaDatum> jdbcEntryMetaData = new ArrayList<>(); jdbcEntryMetaData.add(new JdbcEntryMetaDatum("name", JdbcType.VARCHAR)); jdbcEntryMetaData.add(new JdbcEntryMetaDatum("favorite_number", JdbcType.VARCHAR)); jdbcEntryMetaData.add(new JdbcEntryMetaDatum("favorite_color", JdbcType.VARCHAR)); jdbcEntryMetaData.add(new JdbcEntryMetaDatum("date_of_birth", JdbcType.DATE)); jdbcEntryMetaData.add(new JdbcEntryMetaDatum("last_modified", JdbcType.TIME)); jdbcEntryMetaData.add(new JdbcEntryMetaDatum("created", JdbcType.TIMESTAMP)); jdbcEntryMetaData.add(new JdbcEntryMetaDatum("nested1_nested1_string", JdbcType.VARCHAR)); jdbcEntryMetaData.add(new JdbcEntryMetaDatum("nested1_nested1_int", JdbcType.INTEGER)); jdbcEntryMetaData.add(new JdbcEntryMetaDatum("nested1_nested2_union_nested2_string", JdbcType.VARCHAR)); jdbcEntryMetaData.add(new JdbcEntryMetaDatum("nested1_nested2_union_nested2_int", JdbcType.INTEGER)); JdbcEntrySchema expected = new JdbcEntrySchema(jdbcEntryMetaData); Schema inputSchema = new Schema.Parser().parse(getClass().getResourceAsStream("/converter/pickfields_nested_with_union.avsc")); WorkUnitState workUnitState = new WorkUnitState(); workUnitState.appendToListProp(JdbcPublisher.JDBC_PUBLISHER_FINAL_TABLE_NAME, table); AvroToJdbcEntryConverter converter = new AvroToJdbcEntryConverter(workUnitState); Map<String, JdbcType> dateColumnMapping = Maps.newHashMap(); dateColumnMapping.put("date_of_birth", JdbcType.DATE); dateColumnMapping.put("last_modified", JdbcType.TIME); dateColumnMapping.put("created", JdbcType.TIMESTAMP); workUnitState.appendToListProp(AvroToJdbcEntryConverter.CONVERTER_AVRO_JDBC_DATE_FIELDS, new Gson().toJson(dateColumnMapping)); JdbcEntrySchema actualSchema = converter.convertSchema(inputSchema, workUnitState); Assert.assertEquals(expected, actualSchema); try ( DataFileReader<GenericRecord> srcDataFileReader = new DataFileReader<GenericRecord>(new File(getClass().getResource( "/converter/pickfields_nested_with_union.avro").toURI()), new GenericDatumReader<GenericRecord>( inputSchema))) { List<JdbcEntryData> entries = new ArrayList<>(); while (srcDataFileReader.hasNext()) { JdbcEntryData actualData = converter.convertRecord(actualSchema, srcDataFileReader.next(), workUnitState).iterator().next(); entries.add(actualData); } final JsonSerializer<JdbcEntryDatum> datumSer = new JsonSerializer<JdbcEntryDatum>() { @Override public JsonElement serialize(JdbcEntryDatum datum, Type typeOfSrc, JsonSerializationContext context) { JsonObject jso = new JsonObject(); if (datum.getVal() == null) { jso.add(datum.getColumnName(), null); return jso; } if (datum.getVal() instanceof Date) { jso.addProperty(datum.getColumnName(), ((Date) datum.getVal()).getTime()); } else if (datum.getVal() instanceof Timestamp) { jso.addProperty(datum.getColumnName(), ((Timestamp) datum.getVal()).getTime()); } else if (datum.getVal() instanceof Time) { jso.addProperty(datum.getColumnName(), ((Time) datum.getVal()).getTime()); } else { jso.addProperty(datum.getColumnName(), datum.getVal().toString()); } return jso; } }; JsonSerializer<JdbcEntryData> serializer = new JsonSerializer<JdbcEntryData>() { @Override public JsonElement serialize(JdbcEntryData src, Type typeOfSrc, JsonSerializationContext context) { JsonArray arr = new JsonArray(); for (JdbcEntryDatum datum : src) { arr.add(datumSer.serialize(datum, datum.getClass(), context)); } return arr; } }; Gson gson = new GsonBuilder().registerTypeAdapter(JdbcEntryData.class, serializer).serializeNulls().create(); JsonElement actualSerialized = gson.toJsonTree(entries); JsonElement expectedSerialized = new JsonParser().parse(new InputStreamReader(getClass().getResourceAsStream("/converter/pickfields_nested_with_union.json"))); Assert.assertEquals(actualSerialized, expectedSerialized); } converter.close(); } }
3,451
0
Create_ds/gobblin/gobblin-modules/gobblin-sql/src/test/java/org/apache/gobblin
Create_ds/gobblin/gobblin-modules/gobblin-sql/src/test/java/org/apache/gobblin/source/TimestampWatermarkTest.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.gobblin.source; import org.testng.Assert; import org.testng.annotations.BeforeClass; import org.testng.annotations.Test; import org.apache.gobblin.configuration.WorkUnitState; import org.apache.gobblin.source.extractor.watermark.TimestampWatermark; import org.apache.gobblin.source.jdbc.MysqlExtractor; import org.apache.gobblin.source.jdbc.SqlServerExtractor; /** * Complementary tests for {@link TimestampWatermark} */ public class TimestampWatermarkTest { private static final long WATERMARK_VALUE = 20141029133015L; private static final String COLUMN = "my_column"; private static final String OPERATOR = ">="; private TimestampWatermark tsWatermark; private final String watermarkFormat = "yyyyMMddHHmmss"; private final WorkUnitState workunitState = new WorkUnitState(); @BeforeClass public void setUpBeforeClass() throws Exception { this.tsWatermark = new TimestampWatermark(COLUMN, this.watermarkFormat); this.workunitState.setId(""); } @Test public void testGetWatermarkConditionMySql() throws Exception { MysqlExtractor extractor = new MysqlExtractor(this.workunitState); Assert.assertEquals(this.tsWatermark.getWatermarkCondition(extractor, WATERMARK_VALUE, OPERATOR), COLUMN + " " + OPERATOR + " '2014-10-29 13:30:15'"); } @Test public void testGetWatermarkConditionSqlServer() throws Exception { SqlServerExtractor extractor = new SqlServerExtractor(this.workunitState); Assert.assertEquals(this.tsWatermark.getWatermarkCondition(extractor, WATERMARK_VALUE, OPERATOR), COLUMN + " " + OPERATOR + " '2014-10-29 13:30:15'"); } }
3,452
0
Create_ds/gobblin/gobblin-modules/gobblin-sql/src/test/java/org/apache/gobblin/source
Create_ds/gobblin/gobblin-modules/gobblin-sql/src/test/java/org/apache/gobblin/source/jdbc/MockJdbcColumn.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.gobblin.source.jdbc; import lombok.AllArgsConstructor; import lombok.Getter; @Getter @AllArgsConstructor public class MockJdbcColumn { private final String columnName; private final String value; private final int type; }
3,453
0
Create_ds/gobblin/gobblin-modules/gobblin-sql/src/test/java/org/apache/gobblin/source
Create_ds/gobblin/gobblin-modules/gobblin-sql/src/test/java/org/apache/gobblin/source/jdbc/PostgresqlExtractorTest.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.gobblin.source.jdbc; import java.sql.ResultSet; import java.sql.Types; import java.util.List; import org.apache.commons.lang.StringUtils; import org.apache.gobblin.configuration.State; import org.apache.gobblin.configuration.WorkUnitState; import org.apache.gobblin.source.extractor.extract.CommandOutput; import org.testng.annotations.BeforeClass; import org.testng.annotations.Test; import com.google.common.collect.ImmutableList; import com.mockrunner.mock.jdbc.MockResultSet; import static org.testng.Assert.assertEquals; @Test(groups = {"gobblin.source.jdbc"}) public class PostgresqlExtractorTest { private final static List<MockJdbcColumn> COLUMNS = ImmutableList .of(new MockJdbcColumn("id", "1", Types.INTEGER), new MockJdbcColumn("name", "name_1", Types.VARCHAR), new MockJdbcColumn("age", "20", Types.INTEGER)); private static final String QUERY_1 = "SELECT * FROM x WHERE LIMIT 532"; private static final String QUERY_2 = "SELECT * FROM x WHERE x.a < 10 LIMIT 50"; private static final String QUERY_3 = "SELECT * FROM x WHERE x.a < 10 AND x.b = 20 LIMIT 50"; private static final String QUERY_EMPTY = ""; private static final String QUERY_REG = "SELECT * FROM x WHERE x.a < 10"; private CommandOutput<JdbcCommand, ResultSet> output; private State state; private PostgresqlExtractor postgresqlExtractor; @BeforeClass public void setup() { output = new JdbcCommandOutput(); try { output.put(new JdbcCommand(), buildMockResultSet()); } catch (Exception e) { // hack for test failure assertEquals("PostgresqlExtractorTest: error initializing mock result set", "false"); } state = new WorkUnitState(); state.setId("id"); postgresqlExtractor = new PostgresqlExtractor((WorkUnitState) state); } @Test public void testConstructSampleClause() throws Exception { String sClause = postgresqlExtractor.constructSampleClause(); assertEquals(sClause.trim(), (" limit " + postgresqlExtractor.getSampleRecordCount()).trim()); } @Test public void testRemoveSampleClauseFromQuery() throws Exception { String q1Expected = "SELECT * FROM x WHERE 1=1"; String q2Expected = "SELECT * FROM x WHERE x.a < 10 AND 1=1"; String q3Expected = "SELECT * FROM x WHERE x.a < 10 AND x.b = 20 AND 1=1"; String q1Parsed = postgresqlExtractor.removeSampleClauseFromQuery(QUERY_1); String q2Parsed = postgresqlExtractor.removeSampleClauseFromQuery(QUERY_2); String q3Parsed = postgresqlExtractor.removeSampleClauseFromQuery(QUERY_3); assertEquals(q1Parsed, q1Expected); assertEquals(q2Parsed, q2Expected); assertEquals(q3Parsed, q3Expected); } @Test public void testExractSampleRecordCountFromQuery() throws Exception { long res1 = postgresqlExtractor.extractSampleRecordCountFromQuery(QUERY_1); long res2 = postgresqlExtractor.extractSampleRecordCountFromQuery(QUERY_2); long res3 = postgresqlExtractor.extractSampleRecordCountFromQuery(QUERY_3); long res4 = postgresqlExtractor.extractSampleRecordCountFromQuery(QUERY_EMPTY); long res5 = postgresqlExtractor.extractSampleRecordCountFromQuery(QUERY_REG); assertEquals(res1, (long) 532); assertEquals(res2, (long) 50); assertEquals(res3, (long) 50); assertEquals(res4, (long) -1); assertEquals(res5, (long) -1); } @Test public void testHourPredicateCondition() throws Exception { String res1 = postgresqlExtractor.getHourPredicateCondition("my_time", 24L, "h", ">"); String res2 = postgresqlExtractor.getHourPredicateCondition("my_time", 23L, "HH", ">"); String res3 = postgresqlExtractor.getHourPredicateCondition("my_time", 2L, "h", ">"); assertEquals(res1, "my_time > '00'"); assertEquals(res2, "my_time > '23'"); assertEquals(res3, "my_time > '02'"); } @Test public void testDatePredicateCondition() throws Exception { String res1 = postgresqlExtractor.getDatePredicateCondition("my_date", 12061992L, "ddMMyyyy", ">"); assertEquals(res1, "my_date > '1992-06-12'"); } @Test public void testTimePredicateCondition() throws Exception { String res1 = postgresqlExtractor.getTimestampPredicateCondition("my_date", 12061992080809L, "ddMMyyyyhhmmss", ">"); assertEquals(res1, "my_date > '1992-06-12 08:08:09'"); } /** * Build a mock implementation of Result using Mockito */ private ResultSet buildMockResultSet() throws Exception { MockResultSet mrs = new MockResultSet(StringUtils.EMPTY); for (MockJdbcColumn column : COLUMNS) { mrs.addColumn(column.getColumnName(), ImmutableList.of(column.getValue())); } return mrs; } }
3,454
0
Create_ds/gobblin/gobblin-modules/gobblin-sql/src/test/java/org/apache/gobblin/source
Create_ds/gobblin/gobblin-modules/gobblin-sql/src/test/java/org/apache/gobblin/source/jdbc/MockTimestampResultSet.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.gobblin.source.jdbc; import com.mockrunner.mock.jdbc.MockResultSet; import java.sql.SQLException; import java.sql.Timestamp; import java.sql.Types; import java.text.SimpleDateFormat; /** * A class that mocks the getTimestamp() behavior of a ResultSet that is returned by mysql-connector-8. * This class expects that all entries in the ResultSet are timestamps */ class MockTimestampResultSet extends MockResultSet { enum ZeroDateTimeBehavior { CONVERT_TO_NULL, ROUND, EXCEPTION }; private ZeroDateTimeBehavior _behavior; MockTimestampResultSet(String id, String behavior) { super(id); this._behavior = ZeroDateTimeBehavior.EXCEPTION; // default behavior is to throw an exception on a zero timestamp if (behavior != null) { this._behavior = ZeroDateTimeBehavior.valueOf(behavior); } } private boolean isZeroTimestamp(String s) { return s.startsWith("0000-00-00 00:00:00"); } // mimic the behavior of mysql-connector-8's getTimestamp(). // see com.mysql.cj.result.AbstractDateTimeValueFactory for details. @Override public Timestamp getTimestamp(int columnIndex) throws SQLException { Object obj = getObject(columnIndex); if (isZeroTimestamp(obj.toString())) { switch (_behavior) { case ROUND: return Timestamp.valueOf("0001-01-01 00:00:00.0"); case CONVERT_TO_NULL: return null; case EXCEPTION: return Timestamp.valueOf(obj.toString()); // this throws an exception since timestamps cannot be zero in Java. } } return super.getTimestamp(columnIndex); } // mimic the behavior of mysql-connector-8's getString() // for timestamps, getString() formats the timestamp to "yyyy-MM-dd HH:mm:ss". @Override public String getString(int columnIndex) throws SQLException { if (this.getMetaData().getColumnType(columnIndex) == Types.TIMESTAMP) { if (isZeroTimestamp(getObject(columnIndex).toString())) { return "0000-00-00 00:00:00"; } return new SimpleDateFormat("yyyy-MM-dd HH:mm:ss").format(getTimestamp(columnIndex)); } return super.getString(columnIndex); } }
3,455
0
Create_ds/gobblin/gobblin-modules/gobblin-sql/src/test/java/org/apache/gobblin/source
Create_ds/gobblin/gobblin-modules/gobblin-sql/src/test/java/org/apache/gobblin/source/jdbc/OracleExtractorTest.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.gobblin.source.jdbc; import static org.testng.Assert.assertEquals; import java.sql.ResultSet; import java.sql.Types; import java.util.List; import org.apache.commons.lang.StringUtils; import org.testng.annotations.BeforeClass; import org.testng.annotations.Test; import com.google.common.collect.ImmutableList; import com.mockrunner.mock.jdbc.MockResultSet; import org.apache.gobblin.configuration.State; import org.apache.gobblin.configuration.WorkUnitState; import org.apache.gobblin.source.extractor.extract.CommandOutput; @Test(groups = { "gobblin.source.jdbc" }) public class OracleExtractorTest { private final static List<MockJdbcColumn> COLUMNS = ImmutableList.of( new MockJdbcColumn("id", "1", Types.INTEGER), new MockJdbcColumn("name", "name_1", Types.VARCHAR), new MockJdbcColumn("age", "20", Types.INTEGER)); private static final String QUERY_1 = "SELECT * FROM x WHERE ROWNUM <= 532"; private static final String QUERY_2 = "SELECT * FROM x WHERE ROWNUM <= 5 AND x.a < 10"; private static final String QUERY_3 = "SELECT * FROM x WHERE x.a < 10 AND ROWNUM <= 50"; private static final String QUERY_4 = "SELECT * FROM x WHERE x.a < 10 AND ROWNUM <= 50 AND x.b = 20"; private static final String QUERY_EMPTY = ""; private static final String QUERY_REG = "SELECT * FROM x WHERE x.a < 10"; private CommandOutput<JdbcCommand, ResultSet> output; private State state; private OracleExtractor oracleExtractor; @BeforeClass public void setup() { output = new JdbcCommandOutput(); try { output.put(new JdbcCommand(), buildMockResultSet()); } catch (Exception e) { // hack for test failure assertEquals("OracleExtractorTest: error initializing mock result set", "false"); } state = new WorkUnitState(); state.setId("id"); oracleExtractor = new OracleExtractor((WorkUnitState) state); } @Test public void testConstructSampleClause() { String sClause = oracleExtractor.constructSampleClause(); assertEquals(sClause.trim(), (" rownum <= " + oracleExtractor.getSampleRecordCount()).trim()); } @Test public void testRemoveSampleClauseFromQuery() { String q1Expected = "SELECT * FROM x WHERE 1=1"; String q2Expected = "SELECT * FROM x WHERE 1=1 AND x.a < 10"; String q3Expected = "SELECT * FROM x WHERE x.a < 10 AND 1=1"; String q4Expected = "SELECT * FROM x WHERE x.a < 10 AND 1=1 AND x.b = 20"; String q1Parsed = oracleExtractor.removeSampleClauseFromQuery(QUERY_1); String q2Parsed = oracleExtractor.removeSampleClauseFromQuery(QUERY_2); String q3Parsed = oracleExtractor.removeSampleClauseFromQuery(QUERY_3); String q4Parsed = oracleExtractor.removeSampleClauseFromQuery(QUERY_4); assertEquals(q1Parsed, q1Expected); assertEquals(q2Parsed, q2Expected); assertEquals(q3Parsed, q3Expected); assertEquals(q4Parsed, q4Expected); } @Test public void testExractSampleRecordCountFromQuery() { long res1 = oracleExtractor.extractSampleRecordCountFromQuery(QUERY_1); long res2 = oracleExtractor.extractSampleRecordCountFromQuery(QUERY_2); long res3 = oracleExtractor.extractSampleRecordCountFromQuery(QUERY_3); long res4 = oracleExtractor.extractSampleRecordCountFromQuery(QUERY_4); long res5 = oracleExtractor.extractSampleRecordCountFromQuery(QUERY_EMPTY); long res6 = oracleExtractor.extractSampleRecordCountFromQuery(QUERY_REG); assertEquals(res1, (long) 532); assertEquals(res2, (long) 5); assertEquals(res3, (long) 50); assertEquals(res4, (long) 50); assertEquals(res5, (long) -1); assertEquals(res6, (long) -1); } /** * Build a mock implementation of Result using Mockito */ private ResultSet buildMockResultSet() { MockResultSet mrs = new MockResultSet(StringUtils.EMPTY); for (MockJdbcColumn column : COLUMNS) { mrs.addColumn(column.getColumnName(), ImmutableList.of(column.getValue())); } return mrs; } }
3,456
0
Create_ds/gobblin/gobblin-modules/gobblin-sql/src/test/java/org/apache/gobblin/source
Create_ds/gobblin/gobblin-modules/gobblin-sql/src/test/java/org/apache/gobblin/source/jdbc/SqlQueryUtilsTest.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.gobblin.source.jdbc; import org.testng.Assert; import org.testng.annotations.Test; /** * Unit tests for {@link SqlQueryUtils} */ public class SqlQueryUtilsTest { @Test public void testAddPredicate() { Assert.assertEquals(SqlQueryUtils.addPredicate("SELECT foo FROM bar", "foo != 'blah'"), "SELECT foo FROM bar where (foo != 'blah')"); Assert.assertEquals(SqlQueryUtils.addPredicate("SELECT foo,whereTo FROM bar WHERE whereTo==foo", "foo != 'blah'"), "SELECT foo,whereTo FROM bar WHERE whereTo==foo and (foo != 'blah')"); Assert.assertEquals(SqlQueryUtils.addPredicate("SELECT foo,andThis FROM bar WHERE andThis>foo", "foo != 'blah'"), "SELECT foo,andThis FROM bar WHERE andThis>foo and (foo != 'blah')"); Assert.assertEquals(SqlQueryUtils.addPredicate("SELECT foo FROM bar", null), "SELECT foo FROM bar"); Assert.assertEquals(SqlQueryUtils.addPredicate("SELECT foo FROM bar", ""), "SELECT foo FROM bar"); try { SqlQueryUtils.addPredicate("SELECT foo,foo1 WHERE foo1==foo", "foo != 'blah'"); } catch (IllegalArgumentException e) { Assert.assertTrue(e.toString().contains("'from'")); } try { SqlQueryUtils.addPredicate("SELECT foo,foo1 FROM blah WHERE foo1==foo ORDER by foo", "foo != 'blah'"); } catch (IllegalArgumentException e) { Assert.assertTrue(e.toString().contains("'order by'")); } try { SqlQueryUtils.addPredicate("SELECT foo,foo1 FROM blah WHERE foo1==foo GROUP BY foo", "foo != 'blah'"); } catch (IllegalArgumentException e) { Assert.assertTrue(e.toString().contains("'group by'")); } try { SqlQueryUtils.addPredicate("SELECT foo,foo1 FROM blah WHERE foo1==foo HAVING foo1 is null", "foo != 'blah'"); } catch (IllegalArgumentException e) { Assert.assertTrue(e.toString().contains("'having'")); } try { SqlQueryUtils.addPredicate("SELECT foo,foo1 FROM blah WHERE foo1==foo LIMIT 10", "foo != 'blah'"); } catch (IllegalArgumentException e) { Assert.assertTrue(e.toString().contains("'limit'")); } } @Test public void testCastToBoolean() { Assert.assertTrue(SqlQueryUtils.castToBoolean("y")); Assert.assertTrue(SqlQueryUtils.castToBoolean("yes")); Assert.assertTrue(SqlQueryUtils.castToBoolean("t")); Assert.assertTrue(SqlQueryUtils.castToBoolean("true")); Assert.assertTrue(SqlQueryUtils.castToBoolean("1")); Assert.assertFalse(SqlQueryUtils.castToBoolean("n")); Assert.assertFalse(SqlQueryUtils.castToBoolean("no")); Assert.assertFalse(SqlQueryUtils.castToBoolean("f")); Assert.assertFalse(SqlQueryUtils.castToBoolean("false")); Assert.assertFalse(SqlQueryUtils.castToBoolean("0")); Assert.assertTrue(SqlQueryUtils.castToBoolean("YeS")); Assert.assertFalse(SqlQueryUtils.castToBoolean("")); Assert.assertFalse(SqlQueryUtils.castToBoolean("asdfafgsagareg")); } }
3,457
0
Create_ds/gobblin/gobblin-modules/gobblin-sql/src/test/java/org/apache/gobblin/source
Create_ds/gobblin/gobblin-modules/gobblin-sql/src/test/java/org/apache/gobblin/source/jdbc/JdbcExtractorTest.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.gobblin.source.jdbc; import com.google.common.collect.ImmutableList; import com.google.common.collect.Lists; import com.google.gson.JsonElement; import com.google.gson.JsonObject; import com.mockrunner.mock.jdbc.MockResultSet; import com.mockrunner.mock.jdbc.MockResultSetMetaData; import java.sql.ResultSet; import java.sql.Types; import java.util.ArrayList; import java.util.Collections; import java.util.Iterator; import java.util.LinkedHashMap; import java.util.List; import org.apache.commons.lang.StringUtils; import org.apache.gobblin.configuration.ConfigurationKeys; import org.apache.gobblin.configuration.State; import org.apache.gobblin.configuration.WorkUnitState; import org.apache.gobblin.source.extractor.exception.SchemaException; import org.apache.gobblin.source.extractor.extract.Command; import org.apache.gobblin.source.extractor.extract.CommandOutput; import org.testng.Assert; import org.testng.annotations.Test; import static org.testng.Assert.*; @Test(groups = { "gobblin.source.jdbc" }) public class JdbcExtractorTest { private static final List<MockJdbcColumn> COLUMNS = ImmutableList.of(new MockJdbcColumn("id", "1", Types.INTEGER), new MockJdbcColumn("name", "name_1", Types.VARCHAR), new MockJdbcColumn("age", "20", Types.INTEGER)); private static final String TIME_COLUMN = "time"; @Test public void testGetData() throws Exception { CommandOutput<JdbcCommand, ResultSet> output = new JdbcCommandOutput(); output.put(new JdbcCommand(), buildMockResultSet()); State state = new WorkUnitState(); state.setId("id"); JdbcExtractor jdbcExtractor = new MysqlExtractor((WorkUnitState) state); List<String> columnNames = Lists.newArrayListWithCapacity(COLUMNS.size()); for (MockJdbcColumn mockJdbcColumn:COLUMNS) { columnNames.add(mockJdbcColumn.getColumnName()); } jdbcExtractor.setHeaderRecord(columnNames); Iterator<JsonElement> itr = jdbcExtractor.getData(output); // Make sure there is an element in the iterator assertTrue(itr.hasNext()); JsonObject obj = itr.next().getAsJsonObject(); // Verify the columns for (MockJdbcColumn column : COLUMNS) { assertEquals(obj.get(column.getColumnName()).getAsString(), column.getValue()); } } /* * Build a mock implementation of Result using Mockito */ private ResultSet buildMockResultSet() throws Exception { MockResultSet mrs = new MockResultSet(StringUtils.EMPTY); for (MockJdbcColumn column : COLUMNS) { mrs.addColumn(column.getColumnName(), ImmutableList.of(column.getValue())); } return mrs; } /** * Test for the metadata query to see if the check for unsigned int is present */ @Test public void testUnsignedInt() throws SchemaException { State state = new WorkUnitState(); state.setId("id"); MysqlExtractor mysqlExtractor = new MysqlExtractor((WorkUnitState) state); List<Command> commands = mysqlExtractor.getSchemaMetadata("db", "table"); assertTrue(commands.get(0).getCommandType() == JdbcCommand.JdbcCommandType.QUERY); assertTrue(commands.get(0).getParams().get(0).contains("bigint")); assertTrue(commands.get(1).getCommandType() == JdbcCommand.JdbcCommandType.QUERYPARAMS); assertTrue(!commands.get(1).getParams().get(0).contains("unsigned")); // set option to promote unsigned int to bigint state.setProp(ConfigurationKeys.SOURCE_QUERYBASED_PROMOTE_UNSIGNED_INT_TO_BIGINT, "true"); commands = mysqlExtractor.getSchemaMetadata("db", "table"); assertTrue(commands.get(0).getCommandType() == JdbcCommand.JdbcCommandType.QUERY); assertTrue(commands.get(0).getParams().get(0).contains("bigint")); assertTrue(commands.get(1).getCommandType() == JdbcCommand.JdbcCommandType.QUERYPARAMS); assertTrue(commands.get(1).getParams().get(0).contains("unsigned")); } public void testHasJoinOperation() { boolean result; // no space result = JdbcExtractor.hasJoinOperation("select a.fromLoc from a,b"); Assert.assertTrue(result); // has space result = JdbcExtractor.hasJoinOperation("select a.fromLoc from a aliasA , b aliasB"); Assert.assertTrue(result); result = JdbcExtractor.hasJoinOperation("select a.fromLoc from a , b"); Assert.assertTrue(result); result = JdbcExtractor.hasJoinOperation("select a.fromLoc from a , b limit 100"); Assert.assertTrue(result); result = JdbcExtractor.hasJoinOperation("select a.fromLoc from a limit 100"); Assert.assertFalse(result); result = JdbcExtractor.hasJoinOperation("select a.fromLoc from a , b"); Assert.assertTrue(result); // simple query result = JdbcExtractor.hasJoinOperation("select a.fromLoc from a"); Assert.assertFalse(result); result = JdbcExtractor.hasJoinOperation("select a.fromLoc from a where a.id=\"hello,world\""); Assert.assertFalse(result); result = JdbcExtractor.hasJoinOperation("select a.fromLoc from a where a.id=\"hello,world\" limit 100"); Assert.assertFalse(result); // complex query result = JdbcExtractor.hasJoinOperation( "select a.fromLoc from (Select dest as fromLoc, id from b) as a, c where a.id < c.id"); Assert.assertTrue(result); result = JdbcExtractor.hasJoinOperation( "select a.fromLoc from (Select dest as fromLoc, id from b) as a, c where a.id < c.id limit 10"); Assert.assertTrue(result); result = JdbcExtractor.hasJoinOperation( "select a.fromLoc from (Select dest as fromLoc, id from b) as a limit 10"); Assert.assertFalse(result); } /** * Helper function to build MockTimestampResultSet containing a single timestamp column. * @param testCases the list of test cases * @param behavior the expected behavior for the MockTimestampResultSet * @return a MockTimestampResultSet containing the test cases */ private ResultSet buildMockResultSetForTimeColumn(List<String> testCases, String behavior) { MockResultSetMetaData mrsMetaData = new MockResultSetMetaData(); mrsMetaData.setColumnCount(1); mrsMetaData.setColumnName(1, TIME_COLUMN); mrsMetaData.setColumnType(1, Types.TIMESTAMP); MockTimestampResultSet mrs = new MockTimestampResultSet(StringUtils.EMPTY, behavior); mrs.setResultSetMetaData(mrsMetaData); mrs.addColumn(TIME_COLUMN, testCases); return mrs; } /** * Helper function to test when zeroDateTimeBehavior is set. * @param testCases A LinkedHashMap mapping the input timestamp as a string to the expected output. * We use LinkedHashMap to preserve the order of the inputs/outputs. * @param zeroDateTimeBehavior the expected behavior of a zero timestamp. Should be one of these values: * null, "CONVERT_TO_NULL", "ROUND", "EXCEPTION" * @throws Exception */ private void testZeroDateTimeBehavior(LinkedHashMap<String, String> testCases, String zeroDateTimeBehavior) throws Exception { WorkUnitState workUnitState = new WorkUnitState(); workUnitState.setId("id"); if (zeroDateTimeBehavior != null) { workUnitState.setProp(ConfigurationKeys.SOURCE_CONN_PROPERTIES, "zeroDateTimeBehavior=" + zeroDateTimeBehavior); } JdbcExtractor jdbcExtractor = new MysqlExtractor(workUnitState); jdbcExtractor.setHeaderRecord(Collections.singletonList(TIME_COLUMN)); CommandOutput<JdbcCommand, ResultSet> output = new JdbcCommandOutput(); output.put(new JdbcCommand(), buildMockResultSetForTimeColumn(new ArrayList<>(testCases.keySet()), zeroDateTimeBehavior)); Iterator<JsonElement> dataIterator = jdbcExtractor.getData(output); // Make sure there is an element in the iterator assertTrue(dataIterator.hasNext()); // Iterate through the output and verify that they are equal to the expected output Iterator<String> expectedIterator = testCases.values().iterator(); while (dataIterator.hasNext()) { JsonElement element = dataIterator.next().getAsJsonObject().get(TIME_COLUMN); String expectedString = expectedIterator.next(); if (element.isJsonNull()) { assert expectedString == null; } else { assertEquals(element.getAsString(), expectedString); } } } // zeroDateTimeBehavior=CONVERT_TO_NULL // Zero timestamps should be converted to null. // Other timestamps should be returned formatted as "yyyy-MM-dd HH:mm:ss". public void testZeroDateTimeBehaviorConvertToNull() throws Exception { LinkedHashMap<String, String> testCases = new LinkedHashMap<>(); testCases.put("2000-01-01 12:34:56.789", "2000-01-01 12:34:56"); testCases.put("1999-12-12 13:14:15.16", "1999-12-12 13:14:15"); testCases.put("0000-00-00 00:00:00.0", null); testZeroDateTimeBehavior(testCases, "CONVERT_TO_NULL"); } // zeroDateTimeBehavior=ROUND // Zero timestamps should be converted to "0001-01-01 00:00:00". // Other timestamps should be returned formatted as "yyyy-MM-dd HH:mm:ss". public void testZeroDateTimeBehaviorRound() throws Exception { LinkedHashMap<String, String> testCases = new LinkedHashMap<>(); testCases.put("2000-01-01 12:34:56.789", "2000-01-01 12:34:56"); testCases.put("1999-12-12 13:14:15.16", "1999-12-12 13:14:15"); testCases.put("0000-00-00 00:00:00.0", "0001-01-01 00:00:00"); testZeroDateTimeBehavior(testCases, "ROUND"); } // zeroDateTimeBehavior=EXCEPTION // Zero timestamps should cause an exception to be thrown. // Other timestamps should be returned formatted as "yyyy-MM-dd HH:mm:ss". public void testZeroDateTimeBehaviorException() throws Exception { LinkedHashMap<String, String> testThrows = new LinkedHashMap<>(); testThrows.put("0000-00-00 00:00:00", "this value is irrelevant"); Assert.assertThrows(() -> testZeroDateTimeBehavior(testThrows, "EXCEPTION")); LinkedHashMap<String, String> testPasses = new LinkedHashMap<>(); testPasses.put("2000-01-01 12:34:56.789", "2000-01-01 12:34:56"); testZeroDateTimeBehavior(testPasses, "EXCEPTION"); } // zeroDateTimeBehavior is not set. // All timestamps should be returned formatted as "yyyy-MM-dd HH:mm:ss" public void testZeroDateTimeBehaviorNotSpecified() throws Exception { LinkedHashMap<String, String> testCases = new LinkedHashMap<>(); testCases.put("2000-01-01 12:34:56.789", "2000-01-01 12:34:56"); testCases.put("1999-12-12 13:14:15.16", "1999-12-12 13:14:15"); testCases.put("0000-00-00 00:00:00.0", "0000-00-00 00:00:00"); testZeroDateTimeBehavior(testCases, null); } }
3,458
0
Create_ds/gobblin/gobblin-modules/gobblin-sql/src/test/java/org/apache/gobblin
Create_ds/gobblin/gobblin-modules/gobblin-sql/src/test/java/org/apache/gobblin/writer/JdbcWriterCommandsTest.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.gobblin.writer; import static org.mockito.Mockito.*; import org.apache.gobblin.configuration.State; import org.apache.gobblin.converter.jdbc.JdbcType; import org.apache.gobblin.writer.commands.MySqlWriterCommands; import java.sql.Connection; import java.sql.PreparedStatement; import java.sql.ResultSet; import java.sql.SQLException; import java.util.ArrayList; import java.util.HashMap; import java.util.Iterator; import java.util.List; import java.util.Map; import org.testng.Assert; import org.testng.annotations.Test; import com.google.common.collect.ImmutableMap; import com.sun.rowset.JdbcRowSetImpl; @Test(groups = {"gobblin.writer"}) public class JdbcWriterCommandsTest { @Test public void testMySqlDateTypeRetrieval() throws SQLException { Connection conn = mock(Connection.class); PreparedStatement pstmt = mock(PreparedStatement.class); when(conn.prepareStatement(any(String.class))).thenReturn(pstmt); ResultSet rs = createMockResultSet(); when(pstmt.executeQuery()).thenReturn(rs); MySqlWriterCommands writerCommands = new MySqlWriterCommands(new State(), conn, false); Map<String, JdbcType> actual = writerCommands.retrieveDateColumns("db", "users"); ImmutableMap.Builder<String, JdbcType> builder = ImmutableMap.builder(); builder.put("date_of_birth",JdbcType.DATE); builder.put("last_modified", JdbcType.TIME); builder.put("created", JdbcType.TIMESTAMP); Map<String, JdbcType> expected = builder.build(); Assert.assertEquals(expected, actual); } private ResultSet createMockResultSet() { final List<Map<String, String>> expected = new ArrayList<>(); Map<String, String> entry = new HashMap<>(); entry.put("column_name", "name"); entry.put("column_type", "varchar"); expected.add(entry); entry = new HashMap<>(); entry.put("column_name", "favorite_number"); entry.put("column_type", "varchar"); expected.add(entry); entry = new HashMap<>(); entry.put("column_name", "favorite_color"); entry.put("column_type", "varchar"); expected.add(entry); entry = new HashMap<>(); entry.put("column_name", "date_of_birth"); entry.put("column_type", "date"); expected.add(entry); entry = new HashMap<>(); entry.put("column_name", "last_modified"); entry.put("column_type", "time"); expected.add(entry); entry = new HashMap<>(); entry.put("column_name", "created"); entry.put("column_type", "timestamp"); expected.add(entry); return new JdbcRowSetImpl(){ private Iterator<Map<String, String>> it = expected.iterator(); private Map<String, String> curr = null; @Override public boolean first() { it = expected.iterator(); return next(); } @Override public boolean next() { if(it.hasNext()) { curr = it.next(); return true; } return false; } @Override public String getString(String columnLabel) throws SQLException { if (curr == null) { throw new SQLException("NPE on current cursor."); } String val = curr.get(columnLabel); if (val == null) { throw new SQLException(columnLabel + " does not exist."); } return val; } }; } }
3,459
0
Create_ds/gobblin/gobblin-modules/gobblin-sql/src/test/java/org/apache/gobblin
Create_ds/gobblin/gobblin-modules/gobblin-sql/src/test/java/org/apache/gobblin/writer/TeradataBufferedInserterTest.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.gobblin.writer; import java.sql.Connection; import java.sql.PreparedStatement; import java.sql.SQLException; import java.util.List; import org.mockito.Mockito; import org.testng.annotations.Test; import com.mockrunner.mock.jdbc.MockParameterMetaData; import org.apache.gobblin.configuration.State; import org.apache.gobblin.converter.jdbc.JdbcEntryData; import org.apache.gobblin.writer.commands.JdbcBufferedInserter; import org.apache.gobblin.writer.commands.TeradataBufferedInserter; import static org.apache.gobblin.writer.commands.JdbcBufferedInserter.WRITER_JDBC_INSERT_BATCH_SIZE; import static org.mockito.Mockito.*; @Test(groups = { "gobblin.writer" }, singleThreaded = true) public class TeradataBufferedInserterTest extends JdbcBufferedInserterTestBase { public void testTeradataBufferedInsert() throws SQLException { final int colNums = 20; final int batchSize = 10; final int entryCount = 107; final int colSize = 7; State state = new State(); state.setProp(WRITER_JDBC_INSERT_BATCH_SIZE, Integer.toString(batchSize)); JdbcBufferedInserter inserter = getJdbcBufferedInserter(state, conn); MockParameterMetaData mockMetadata = new MockParameterMetaData(); mockMetadata.setParameterCount(2); mockMetadata.setParameterType(0, 12); mockMetadata.setParameterType(1, -5); PreparedStatement pstmt = Mockito.mock(PreparedStatement.class); when(pstmt.getParameterMetaData()).thenReturn(mockMetadata); when(pstmt.executeBatch()).thenReturn(new int[] { 1, 1, 1 }); when(conn.prepareStatement(anyString())).thenReturn(pstmt); List<JdbcEntryData> jdbcEntries = createJdbcEntries(colNums, colSize, entryCount); for (JdbcEntryData entry : jdbcEntries) { inserter.insert(db, table, entry); } inserter.flush(); verify(conn, times(2)).prepareStatement(anyString()); verify(pstmt, times(107)).addBatch(); verify(pstmt, times((int) Math.ceil((double) entryCount / batchSize))).executeBatch(); verify(pstmt, times(entryCount)).clearParameters(); verify(pstmt, times(colNums * entryCount)).setObject(anyInt(), any()); reset(pstmt); } @Override protected JdbcBufferedInserter getJdbcBufferedInserter(State state, Connection conn) { return new TeradataBufferedInserter(state, conn); } }
3,460
0
Create_ds/gobblin/gobblin-modules/gobblin-sql/src/test/java/org/apache/gobblin
Create_ds/gobblin/gobblin-modules/gobblin-sql/src/test/java/org/apache/gobblin/writer/MySqlBufferedInserterTest.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.gobblin.writer; import java.sql.Connection; import java.sql.PreparedStatement; import java.sql.SQLException; import java.util.List; import org.testng.annotations.Test; import org.apache.gobblin.configuration.State; import org.apache.gobblin.converter.jdbc.JdbcEntryData; import org.apache.gobblin.writer.commands.JdbcBufferedInserter; import org.apache.gobblin.writer.commands.MySqlBufferedInserter; import static org.apache.gobblin.writer.commands.JdbcBufferedInserter.WRITER_JDBC_INSERT_BATCH_SIZE; import static org.apache.gobblin.writer.commands.JdbcBufferedInserter.WRITER_JDBC_MAX_PARAM_SIZE; import static org.mockito.Mockito.*; @Test(groups = {"gobblin.writer"}, singleThreaded=true) public class MySqlBufferedInserterTest extends JdbcBufferedInserterTestBase { public void testMySqlBufferedInsert() throws SQLException { final int colNums = 20; final int batchSize = 10; final int entryCount = 107; final int colSize = 7; State state = new State(); state.setProp(WRITER_JDBC_INSERT_BATCH_SIZE, Integer.toString(batchSize)); MySqlBufferedInserter inserter = new MySqlBufferedInserter(state, conn, false); PreparedStatement pstmt = mock(PreparedStatement.class); when(conn.prepareStatement(anyString())).thenReturn(pstmt); List<JdbcEntryData> jdbcEntries = createJdbcEntries(colNums, colSize, entryCount); for(JdbcEntryData entry : jdbcEntries) { inserter.insert(db, table, entry); } inserter.flush(); verify(conn, times(2)).prepareStatement(matches("INSERT INTO .*")); verify(pstmt, times(11)).clearParameters(); verify(pstmt, times(11)).execute(); verify(pstmt, times(colNums * entryCount)).setObject(anyInt(), any()); reset(pstmt); } public void testMySqlBufferedReplace() throws SQLException { final int colNums = 20; final int batchSize = 10; final int entryCount = 107; final int colSize = 7; State state = new State(); state.setProp(WRITER_JDBC_INSERT_BATCH_SIZE, Integer.toString(batchSize)); MySqlBufferedInserter inserter = new MySqlBufferedInserter(state, conn, true); PreparedStatement pstmt = mock(PreparedStatement.class); when(conn.prepareStatement(anyString())).thenReturn(pstmt); List<JdbcEntryData> jdbcEntries = createJdbcEntries(colNums, colSize, entryCount); for(JdbcEntryData entry : jdbcEntries) { inserter.insert(db, table, entry); } inserter.flush(); verify(conn, times(2)).prepareStatement(matches("REPLACE INTO .*")); verify(pstmt, times(11)).clearParameters(); verify(pstmt, times(11)).execute(); verify(pstmt, times(colNums * entryCount)).setObject(anyInt(), any()); reset(pstmt); } public void testMySqlBufferedInsertParamLimit() throws SQLException { final int colNums = 50; final int batchSize = 10; final int entryCount = 107; final int colSize = 3; final int maxParamSize = 500; State state = new State(); state.setProp(WRITER_JDBC_INSERT_BATCH_SIZE, Integer.toString(batchSize)); state.setProp(WRITER_JDBC_MAX_PARAM_SIZE, maxParamSize); MySqlBufferedInserter inserter = new MySqlBufferedInserter(state, conn, false); PreparedStatement pstmt = mock(PreparedStatement.class); when(conn.prepareStatement(anyString())).thenReturn(pstmt); List<JdbcEntryData> jdbcEntries = createJdbcEntries(colNums, colSize, entryCount); for(JdbcEntryData entry : jdbcEntries) { inserter.insert(db, table, entry); } inserter.flush(); int expectedBatchSize = maxParamSize / colNums; int expectedExecuteCount = entryCount / expectedBatchSize + 1; verify(conn, times(2)).prepareStatement(matches("INSERT INTO .*")); verify(pstmt, times(expectedExecuteCount)).clearParameters(); verify(pstmt, times(expectedExecuteCount)).execute(); verify(pstmt, times(colNums * entryCount)).setObject(anyInt(), any()); reset(pstmt); } @Override protected JdbcBufferedInserter getJdbcBufferedInserter(State state, Connection conn) { return new MySqlBufferedInserter(state, conn, false); } }
3,461
0
Create_ds/gobblin/gobblin-modules/gobblin-sql/src/test/java/org/apache/gobblin
Create_ds/gobblin/gobblin-modules/gobblin-sql/src/test/java/org/apache/gobblin/writer/PostgresWriterCommandsTest.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.gobblin.writer; import java.sql.Connection; import java.sql.PreparedStatement; import java.sql.ResultSet; import java.sql.SQLException; import java.util.ArrayList; import java.util.HashMap; import java.util.Iterator; import java.util.List; import java.util.Map; import org.apache.gobblin.configuration.State; import org.apache.gobblin.converter.jdbc.JdbcType; import org.apache.gobblin.writer.commands.PostgresWriterCommands; import org.testng.Assert; import org.testng.annotations.Test; import com.google.common.collect.ImmutableMap; import com.sun.rowset.JdbcRowSetImpl; import static org.mockito.Mockito.any; import static org.mockito.Mockito.mock; import static org.mockito.Mockito.when; @Test(groups = {"gobblin.writer"}) public class PostgresWriterCommandsTest { @Test public void testPostgresDateTypeRetrieval() throws SQLException { Connection conn = mock(Connection.class); PreparedStatement pstmt = mock(PreparedStatement.class); when(conn.prepareStatement(any(String.class), any(Integer.class), any(Integer.class))).thenReturn(pstmt); ResultSet rs = createMockResultSet(); when(pstmt.executeQuery()).thenReturn(rs); PostgresWriterCommands writerCommands = new PostgresWriterCommands(new State(), conn, false); Map<String, JdbcType> actual = writerCommands.retrieveDateColumns("db", "users"); ImmutableMap.Builder<String, JdbcType> builder = ImmutableMap.builder(); builder.put("date_of_birth", JdbcType.DATE); builder.put("last_modified", JdbcType.TIME); builder.put("created", JdbcType.TIMESTAMP); Map<String, JdbcType> expected = builder.build(); Assert.assertEquals(expected, actual); } private ResultSet createMockResultSet() { final List<Map<String, String>> expected = new ArrayList<>(); Map<String, String> entry = new HashMap<>(); entry.put("column_name", "name"); entry.put("data_type", "varchar"); expected.add(entry); entry = new HashMap<>(); entry.put("column_name", "favorite_number"); entry.put("data_type", "varchar"); expected.add(entry); entry = new HashMap<>(); entry.put("column_name", "favorite_color"); entry.put("data_type", "varchar"); expected.add(entry); entry = new HashMap<>(); entry.put("column_name", "date_of_birth"); entry.put("data_type", "date"); expected.add(entry); entry = new HashMap<>(); entry.put("column_name", "last_modified"); entry.put("data_type", "time without time zone"); expected.add(entry); entry = new HashMap<>(); entry.put("column_name", "created"); entry.put("data_type", "timestamp with time zone"); expected.add(entry); return new JdbcRowSetImpl() { private Iterator<Map<String, String>> it = expected.iterator(); private Map<String, String> curr = null; @Override public boolean first() { it = expected.iterator(); return next(); } @Override public boolean next() { if (it.hasNext()) { curr = it.next(); return true; } return false; } @Override public String getString(String columnLabel) throws SQLException { if (curr == null) { throw new SQLException("NPE on current cursor."); } String val = curr.get(columnLabel); if (val == null) { throw new SQLException(columnLabel + " does not exist."); } return val; } }; } }
3,462
0
Create_ds/gobblin/gobblin-modules/gobblin-sql/src/test/java/org/apache/gobblin
Create_ds/gobblin/gobblin-modules/gobblin-sql/src/test/java/org/apache/gobblin/writer/JdbcWriterTest.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.gobblin.writer; import java.io.IOException; import java.sql.Connection; import java.sql.SQLException; import org.testng.Assert; import org.testng.annotations.Test; import org.apache.gobblin.configuration.State; import org.apache.gobblin.writer.commands.JdbcWriterCommands; import static org.mockito.Mockito.*; @Test(groups = {"gobblin.writer"}) public class JdbcWriterTest { @Test public void writeAndCommitTest() throws SQLException, IOException { final String database = "db"; final String table = "users"; final int writeCount = 25; JdbcWriterCommands writerCommands = mock(JdbcWriterCommands.class); Connection conn = mock(Connection.class); try (JdbcWriter writer = new JdbcWriter(writerCommands, new State(), database, table, conn)) { for(int i = 0; i < writeCount; i++) { writer.write(null); } writer.commit(); Assert.assertEquals(writer.recordsWritten(), writeCount); } verify(writerCommands, times(writeCount)).insert(anyString(), anyString(), any()); verify(conn, times(1)).commit(); verify(conn, never()).rollback(); verify(writerCommands, times(1)).flush(); verify(conn, times(1)).close(); } @Test public void writeFailRollbackTest() throws SQLException, IOException { final String database = "db"; final String table = "users"; JdbcWriterCommands writerCommands = mock(JdbcWriterCommands.class); Connection conn = mock(Connection.class); doThrow(RuntimeException.class).when(writerCommands).insert(anyString(), anyString(), any()); JdbcWriter writer = new JdbcWriter(writerCommands, new State(), database, table, conn); try { writer.write(null); Assert.fail("Test case didn't throw Exception."); } catch (RuntimeException e) { Assert.assertTrue(e instanceof RuntimeException); } writer.close(); verify(writerCommands, times(1)).insert(anyString(), anyString(), any()); verify(conn, times(1)).rollback(); verify(conn, never()).commit(); verify(conn, times(1)).close(); Assert.assertEquals(writer.recordsWritten(), 0L); } }
3,463
0
Create_ds/gobblin/gobblin-modules/gobblin-sql/src/test/java/org/apache/gobblin
Create_ds/gobblin/gobblin-modules/gobblin-sql/src/test/java/org/apache/gobblin/writer/JdbcBufferedInserterTestBase.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.gobblin.writer; import static org.mockito.Mockito.mock; import java.sql.Connection; import java.util.ArrayList; import java.util.Collection; import java.util.HashSet; import java.util.List; import java.util.Set; import org.apache.commons.lang.RandomStringUtils; import org.testng.annotations.BeforeMethod; import org.testng.annotations.Test; import org.apache.gobblin.configuration.State; import org.apache.gobblin.converter.jdbc.JdbcEntryData; import org.apache.gobblin.converter.jdbc.JdbcEntryDatum; import org.apache.gobblin.writer.commands.JdbcBufferedInserter; @Test(groups = {"gobblin.writer"}, singleThreaded=true) public abstract class JdbcBufferedInserterTestBase { protected Connection conn; protected String db = "db"; protected String table = "stg"; protected abstract JdbcBufferedInserter getJdbcBufferedInserter(State state, Connection conn); @BeforeMethod public void setUp() throws Exception { this.conn = mock(Connection.class); } protected List<JdbcEntryData> createJdbcEntries(int colNums, int colSize, int entryCount) { Set<String> colNames = new HashSet<>(); while (colNames.size() < colNums) { String colName = RandomStringUtils.randomAlphabetic(colSize); if (colNames.contains(colName)) { continue; } colNames.add(colName); } List<JdbcEntryData> result = new ArrayList<>(); for (int i = 0; i < entryCount; i++) { result.add(createJdbcEntry(colNames, colSize)); } return result; } private JdbcEntryData createJdbcEntry(Collection<String> colNames, int colSize) { List<JdbcEntryDatum> datumList = new ArrayList<>(); for (String colName : colNames) { JdbcEntryDatum datum = new JdbcEntryDatum(colName, RandomStringUtils.randomAlphabetic(colSize)); datumList.add(datum); } return new JdbcEntryData(datumList); } }
3,464
0
Create_ds/gobblin/gobblin-modules/gobblin-sql/src/test/java/org/apache/gobblin
Create_ds/gobblin/gobblin-modules/gobblin-sql/src/test/java/org/apache/gobblin/writer/JdbcWriterInitializerTest.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.gobblin.writer; import java.sql.Connection; import java.sql.DatabaseMetaData; import java.sql.ResultSet; import java.sql.SQLException; import java.util.List; import org.apache.commons.lang.StringUtils; import org.mockito.InOrder; import org.testng.Assert; import org.testng.annotations.BeforeMethod; import org.testng.annotations.Test; import com.google.common.collect.Lists; import org.apache.gobblin.configuration.ConfigurationKeys; import org.apache.gobblin.configuration.State; import org.apache.gobblin.publisher.JdbcPublisher; import org.apache.gobblin.source.workunit.WorkUnit; import org.apache.gobblin.writer.Destination.DestinationType; import org.apache.gobblin.writer.commands.JdbcWriterCommands; import org.apache.gobblin.writer.commands.JdbcWriterCommandsFactory; import org.apache.gobblin.writer.initializer.JdbcWriterInitializer; import static org.mockito.Mockito.*; @Test(groups = { "gobblin.writer" }) public class JdbcWriterInitializerTest { private static final String DB = "db"; private static final String DEST_TABLE = "dest"; private static final String STAGING_TABLE = "stage"; private State state; private WorkUnit workUnit; private List<WorkUnit> workUnits; private JdbcWriterCommandsFactory factory; private JdbcWriterCommands commands; private JdbcWriterInitializer initializer; private Connection conn; @BeforeMethod private void setup() throws SQLException { this.state = new State(); this.state.setProp(ConfigurationKeys.WRITER_DESTINATION_TYPE_KEY, DestinationType.MYSQL.name()); this.state.setProp(JdbcPublisher.JDBC_PUBLISHER_DATABASE_NAME, DB); this.state.setProp(JdbcPublisher.JDBC_PUBLISHER_FINAL_TABLE_NAME, DEST_TABLE); this.workUnit = WorkUnit.createEmpty(); this.workUnits = Lists.newArrayList(); this.workUnits.add(this.workUnit); this.factory = mock(JdbcWriterCommandsFactory.class); this.commands = mock(JdbcWriterCommands.class); this.conn = mock(Connection.class); doReturn(this.commands).when(this.factory).newInstance(any(Destination.class), eq(this.conn)); this.initializer = new JdbcWriterInitializer(this.state, this.workUnits, this.factory, 1, 0); this.initializer = spy(this.initializer); doReturn(this.conn).when(this.initializer).createConnection(); } public void skipStagingTable() throws SQLException { this.state.setProp(ConfigurationKeys.JOB_COMMIT_POLICY_KEY, "partial"); this.state.setProp(ConfigurationKeys.PUBLISH_DATA_AT_JOB_LEVEL, Boolean.toString(false)); this.initializer.initialize(); this.initializer.close(); Assert.assertEquals(DEST_TABLE, this.workUnit.getProp(ConfigurationKeys.WRITER_STAGING_TABLE)); verify(this.commands, never()).createTableStructure(anyString(), anyString(), anyString()); verify(this.commands, never()).truncate(anyString(), anyString()); verify(this.commands, never()).drop(anyString(), anyString()); } public void skipStagingTableTruncateDestTable() throws SQLException { this.state.setProp(ConfigurationKeys.JOB_COMMIT_POLICY_KEY, "partial"); this.state.setProp(ConfigurationKeys.PUBLISH_DATA_AT_JOB_LEVEL, Boolean.toString(false)); this.state.setProp(JdbcPublisher.JDBC_PUBLISHER_REPLACE_FINAL_TABLE, Boolean.toString(true)); this.initializer.initialize(); Assert.assertEquals(DEST_TABLE, this.workUnit.getProp(ConfigurationKeys.WRITER_STAGING_TABLE)); verify(this.commands, never()).createTableStructure(anyString(), anyString(), anyString()); InOrder inOrder = inOrder(this.commands); inOrder.verify(this.commands, times(1)).truncate(DB, DEST_TABLE); this.initializer.close(); inOrder.verify(this.commands, never()).truncate(anyString(), anyString()); verify(this.commands, never()).drop(anyString(), anyString()); } public void userCreatedStagingTable() throws SQLException { this.state.setProp(ConfigurationKeys.WRITER_STAGING_TABLE, STAGING_TABLE); when(this.commands.isEmpty(DB, STAGING_TABLE)).thenReturn(Boolean.TRUE); this.initializer.initialize(); Assert.assertEquals(STAGING_TABLE, this.workUnit.getProp(ConfigurationKeys.WRITER_STAGING_TABLE)); verify(this.commands, never()).createTableStructure(anyString(), anyString(), anyString()); verify(this.commands, never()).truncate(anyString(), anyString()); verify(this.commands, never()).drop(anyString(), anyString()); } public void userCreatedStagingTableTruncate() throws SQLException { this.state.setProp(ConfigurationKeys.WRITER_STAGING_TABLE, STAGING_TABLE); this.state.setProp(ConfigurationKeys.WRITER_TRUNCATE_STAGING_TABLE, Boolean.toString(true)); when(this.commands.isEmpty(DB, STAGING_TABLE)).thenReturn(Boolean.TRUE); this.initializer.initialize(); Assert.assertEquals(STAGING_TABLE, this.workUnit.getProp(ConfigurationKeys.WRITER_STAGING_TABLE)); InOrder inOrder = inOrder(this.commands); inOrder.verify(this.commands, times(1)).truncate(DB, STAGING_TABLE); this.initializer.close(); inOrder.verify(this.commands, times(1)).truncate(DB, STAGING_TABLE); verify(this.commands, never()).createTableStructure(anyString(), anyString(), anyString()); verify(this.commands, never()).drop(anyString(), anyString()); } public void initializeWithCreatingStagingTable() throws SQLException { when(this.commands.isEmpty(DB, STAGING_TABLE)).thenReturn(Boolean.TRUE); DatabaseMetaData metadata = mock(DatabaseMetaData.class); when(this.conn.getMetaData()).thenReturn(metadata); ResultSet rs = mock(ResultSet.class); when(metadata.getTables(any(), anyString(), anyString(), any(String[].class))).thenReturn(rs); when(rs.next()).thenReturn(Boolean.FALSE); this.initializer.initialize(); Assert.assertTrue(!StringUtils.isEmpty(this.workUnit.getProp(ConfigurationKeys.WRITER_STAGING_TABLE))); InOrder inOrder = inOrder(this.commands); inOrder.verify(this.commands, times(1)).createTableStructure(anyString(), anyString(), anyString()); inOrder.verify(this.commands, times(1)).drop(anyString(), anyString()); inOrder.verify(this.commands, times(1)).createTableStructure(anyString(), anyString(), anyString()); this.initializer.close(); inOrder.verify(this.commands, times(1)).drop(anyString(), anyString()); inOrder.verify(this.commands, never()).truncate(anyString(), anyString()); } }
3,465
0
Create_ds/gobblin/gobblin-modules/gobblin-sql/src/test/java/org/apache/gobblin
Create_ds/gobblin/gobblin-modules/gobblin-sql/src/test/java/org/apache/gobblin/writer/JdbcPublisherTest.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.gobblin.writer; import static org.mockito.Mockito.*; import java.io.IOException; import java.sql.Connection; import java.sql.SQLException; import java.util.ArrayList; import java.util.Collection; import org.apache.gobblin.configuration.ConfigurationKeys; import org.apache.gobblin.configuration.State; import org.apache.gobblin.configuration.WorkUnitState; import org.apache.gobblin.publisher.JdbcPublisher; import org.apache.gobblin.writer.commands.JdbcWriterCommands; import org.apache.gobblin.writer.commands.JdbcWriterCommandsFactory; import org.mockito.InOrder; import org.testng.Assert; import org.testng.annotations.AfterMethod; import org.testng.annotations.BeforeMethod; import org.testng.annotations.Test; @Test(groups = {"gobblin.writer"}) public class JdbcPublisherTest { private String database = "db"; private String stagingTable = "stg"; private String destinationTable = "dest"; private State state; private JdbcWriterCommands commands; private JdbcWriterCommandsFactory factory; private Connection conn; private WorkUnitState workUnitState; private Collection<WorkUnitState> workUnitStates; private JdbcPublisher publisher; @BeforeMethod private void setup() { state = new State(); state.setProp(JdbcPublisher.JDBC_PUBLISHER_FINAL_TABLE_NAME, destinationTable); state.setProp(JdbcPublisher.JDBC_PUBLISHER_DATABASE_NAME, database); commands = mock(JdbcWriterCommands.class); factory = mock(JdbcWriterCommandsFactory.class); conn = mock(Connection.class); when(factory.newInstance(state, conn)).thenReturn(commands); workUnitStates = new ArrayList<>(); workUnitState = mock(WorkUnitState.class); when(workUnitState.getProp(ConfigurationKeys.WRITER_STAGING_TABLE)).thenReturn(stagingTable); workUnitStates.add(workUnitState); publisher = new JdbcPublisher(state, factory); publisher = spy(publisher); doReturn(conn).when(publisher).createConnection(); } @AfterMethod private void cleanup() throws IOException { publisher.close(); } public void testPublish() throws IOException, SQLException { publisher.publish(workUnitStates); InOrder inOrder = inOrder(conn, commands, workUnitState); inOrder.verify(conn, times(1)).setAutoCommit(false); inOrder.verify(commands, times(1)).copyTable(database, stagingTable, destinationTable); inOrder.verify(workUnitState, times(1)).setWorkingState(WorkUnitState.WorkingState.COMMITTED); inOrder.verify(conn, times(1)).commit(); inOrder.verify(conn, times(1)).close(); verify(commands, never()).deleteAll(database, destinationTable); } public void testPublishReplaceOutput() throws IOException, SQLException { state.setProp(JdbcPublisher.JDBC_PUBLISHER_REPLACE_FINAL_TABLE, Boolean.toString(true)); publisher.publish(workUnitStates); InOrder inOrder = inOrder(conn, commands, workUnitState); inOrder.verify(conn, times(1)).setAutoCommit(false); inOrder.verify(commands, times(1)).deleteAll(database, destinationTable); inOrder.verify(commands, times(1)).copyTable(database, stagingTable, destinationTable); inOrder.verify(workUnitState, times(1)).setWorkingState(WorkUnitState.WorkingState.COMMITTED); inOrder.verify(conn, times(1)).commit(); inOrder.verify(conn, times(1)).close(); } public void testPublishFailure() throws SQLException, IOException { doThrow(RuntimeException.class).when(commands).copyTable(database, stagingTable, destinationTable); try { publisher.publish(workUnitStates); Assert.fail("Test case didn't throw Exception."); } catch (RuntimeException e) { Assert.assertTrue(e instanceof RuntimeException); } InOrder inOrder = inOrder(conn, commands, workUnitState); inOrder.verify(conn, times(1)).setAutoCommit(false); inOrder.verify(commands, times(1)).copyTable(database, stagingTable, destinationTable); inOrder.verify(conn, times(1)).rollback(); inOrder.verify(conn, times(1)).close(); verify(conn, never()).commit(); verify(commands, never()).deleteAll(database, destinationTable); verify(workUnitState, never()).setWorkingState(any(WorkUnitState.WorkingState.class)); } }
3,466
0
Create_ds/gobblin/gobblin-modules/gobblin-sql/src/main/java/org/apache/gobblin/converter
Create_ds/gobblin/gobblin-modules/gobblin-sql/src/main/java/org/apache/gobblin/converter/initializer/AvroToJdbcEntryConverterInitializer.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.gobblin.converter.initializer; import java.sql.Connection; import java.sql.SQLException; import java.util.Collection; import java.util.Map; import javax.sql.DataSource; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import com.google.common.annotations.VisibleForTesting; import com.google.common.base.Preconditions; import com.google.gson.Gson; import org.apache.gobblin.configuration.State; import org.apache.gobblin.converter.jdbc.AvroToJdbcEntryConverter; import org.apache.gobblin.converter.jdbc.JdbcType; import org.apache.gobblin.publisher.JdbcPublisher; import org.apache.gobblin.source.workunit.WorkUnit; import org.apache.gobblin.util.ForkOperatorUtils; import org.apache.gobblin.util.jdbc.DataSourceBuilder; import org.apache.gobblin.writer.commands.JdbcWriterCommands; import org.apache.gobblin.writer.commands.JdbcWriterCommandsFactory; /** * Initialize for AvroToJdbcEntryConverter. ConverterInitializer is being invoked at driver which means * it will only invoked once per converter. This is to remove any duplication work among task, and * any initialization that is same per task can be put in here. */ public class AvroToJdbcEntryConverterInitializer implements ConverterInitializer { private static final Logger LOG = LoggerFactory.getLogger(AvroToJdbcEntryConverterInitializer.class); private final State state; private final Collection<WorkUnit> workUnits; private final JdbcWriterCommandsFactory jdbcWriterCommandsFactory; private final int branches; private final int branchId; public AvroToJdbcEntryConverterInitializer(State state, Collection<WorkUnit> workUnits) { this(state, workUnits, new JdbcWriterCommandsFactory(), 1, 0); } public AvroToJdbcEntryConverterInitializer(State state, Collection<WorkUnit> workUnits, JdbcWriterCommandsFactory jdbcWriterCommandsFactory, int branches, int branchId) { this.state = state; this.workUnits = workUnits; this.jdbcWriterCommandsFactory = jdbcWriterCommandsFactory; this.branches = branches; this.branchId = branchId; } /** * AvroToJdbcEntryConverter list of date columns existing in the table. As we don't want each converter * making a connection against database to get the same information. Here, ConverterInitializer will * retrieve it and store it into WorkUnit so that AvroToJdbcEntryConverter will use it later. * * {@inheritDoc} * @see org.apache.gobblin.initializer.Initializer#initialize() */ @Override public void initialize() { String table = Preconditions.checkNotNull(this.state.getProp(ForkOperatorUtils .getPropertyNameForBranch(JdbcPublisher.JDBC_PUBLISHER_FINAL_TABLE_NAME, this.branches, this.branchId))); String db = Preconditions.checkNotNull(this.state.getProp(ForkOperatorUtils .getPropertyNameForBranch(JdbcPublisher.JDBC_PUBLISHER_DATABASE_NAME, this.branches, this.branchId))); try (Connection conn = createConnection()) { JdbcWriterCommands commands = this.jdbcWriterCommandsFactory.newInstance(this.state, conn); Map<String, JdbcType> dateColumnMapping = commands.retrieveDateColumns(db, table); LOG.info("Date column mapping: " + dateColumnMapping); final String dateFieldsKey = ForkOperatorUtils.getPropertyNameForBranch( AvroToJdbcEntryConverter.CONVERTER_AVRO_JDBC_DATE_FIELDS, this.branches, this.branchId); for (WorkUnit wu : this.workUnits) { wu.setProp(dateFieldsKey, new Gson().toJson(dateColumnMapping)); } } catch (SQLException e) { throw new RuntimeException(e); } } @Override public void close() {} @VisibleForTesting public Connection createConnection() throws SQLException { DataSource dataSource = DataSourceBuilder.builder().url(this.state.getProp(JdbcPublisher.JDBC_PUBLISHER_URL)) .driver(this.state.getProp(JdbcPublisher.JDBC_PUBLISHER_DRIVER)) .userName(this.state.getProp(JdbcPublisher.JDBC_PUBLISHER_USERNAME)) .passWord(this.state.getProp(JdbcPublisher.JDBC_PUBLISHER_PASSWORD)) .cryptoKeyLocation(this.state.getProp(JdbcPublisher.JDBC_PUBLISHER_ENCRYPTION_KEY_LOC)).maxActiveConnections(1) .state(this.state).build(); return dataSource.getConnection(); } }
3,467
0
Create_ds/gobblin/gobblin-modules/gobblin-sql/src/main/java/org/apache/gobblin/converter
Create_ds/gobblin/gobblin-modules/gobblin-sql/src/main/java/org/apache/gobblin/converter/jdbc/JdbcEntryData.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.gobblin.converter.jdbc; import java.util.Iterator; import java.util.Map; import lombok.ToString; import com.google.common.base.Preconditions; import com.google.common.collect.ImmutableMap; import com.google.common.collect.ImmutableSortedMap; @ToString public class JdbcEntryData implements Iterable<JdbcEntryDatum> { private final Map<String, JdbcEntryDatum> jdbcEntryData; //Pair of column name and Object public JdbcEntryData(Iterable<JdbcEntryDatum> jdbcEntryDatumEntries) { Preconditions.checkNotNull(jdbcEntryDatumEntries); ImmutableMap.Builder<String, JdbcEntryDatum> builder = ImmutableSortedMap.naturalOrder(); for (JdbcEntryDatum datum : jdbcEntryDatumEntries) { builder.put(datum.getColumnName(), datum); } this.jdbcEntryData = builder.build(); } /** * @param columnName Column name case sensitive, as most of RDBMS does. * @return Returns Object which is JDBC compatible -- can be used for PreparedStatement.setObject */ public Object getVal(String columnName) { JdbcEntryDatum datum = this.jdbcEntryData.get(columnName); return datum == null ? null : datum.getVal(); } /** * Provides iterator sorted by column name * {@inheritDoc} * @see java.lang.Iterable#iterator() */ @Override public Iterator<JdbcEntryDatum> iterator() { return this.jdbcEntryData.values().iterator(); } }
3,468
0
Create_ds/gobblin/gobblin-modules/gobblin-sql/src/main/java/org/apache/gobblin/converter
Create_ds/gobblin/gobblin-modules/gobblin-sql/src/main/java/org/apache/gobblin/converter/jdbc/JdbcEntryDatum.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.gobblin.converter.jdbc; import lombok.EqualsAndHashCode; import lombok.ToString; import com.google.common.base.Preconditions; @ToString @EqualsAndHashCode(of = { "columnName" }) public class JdbcEntryDatum { private final String columnName; private final Object val; public JdbcEntryDatum(String columnName, Object val) { this.columnName = Preconditions.checkNotNull(columnName); this.val = val; } public String getColumnName() { return this.columnName; } public Object getVal() { return this.val; } }
3,469
0
Create_ds/gobblin/gobblin-modules/gobblin-sql/src/main/java/org/apache/gobblin/converter
Create_ds/gobblin/gobblin-modules/gobblin-sql/src/main/java/org/apache/gobblin/converter/jdbc/JdbcEntrySchema.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.gobblin.converter.jdbc; import java.util.Iterator; import java.util.Map; import java.util.Set; import lombok.EqualsAndHashCode; import lombok.ToString; import com.google.common.base.Preconditions; import com.google.common.collect.ImmutableMap; import com.google.common.collect.ImmutableSortedMap; @ToString @EqualsAndHashCode public class JdbcEntrySchema implements Iterable<JdbcEntryMetaDatum> { private final Map<String, JdbcEntryMetaDatum> jdbcMetaData; //Pair of column name and JdbcType public JdbcEntrySchema(Iterable<JdbcEntryMetaDatum> jdbcMetaDatumEntries) { Preconditions.checkNotNull(jdbcMetaDatumEntries); ImmutableMap.Builder<String, JdbcEntryMetaDatum> builder = ImmutableSortedMap.naturalOrder(); for (JdbcEntryMetaDatum datum : jdbcMetaDatumEntries) { builder.put(datum.getColumnName(), datum); } this.jdbcMetaData = builder.build(); } /** * @param columnName Column name case sensitive, as most of RDBMS does. * @return Returns JdbcType. If column name does not exist, returns null. */ public JdbcType getJdbcType(String columnName) { JdbcEntryMetaDatum datum = this.jdbcMetaData.get(columnName); return datum == null ? null : datum.getJdbcType(); } public Set<String> getColumnNames() { return this.jdbcMetaData.keySet(); } /** * Provides iterator sorted by column name * {@inheritDoc} * @see java.lang.Iterable#iterator() */ @Override public Iterator<JdbcEntryMetaDatum> iterator() { return this.jdbcMetaData.values().iterator(); } }
3,470
0
Create_ds/gobblin/gobblin-modules/gobblin-sql/src/main/java/org/apache/gobblin/converter
Create_ds/gobblin/gobblin-modules/gobblin-sql/src/main/java/org/apache/gobblin/converter/jdbc/JdbcEntryMetaDatum.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.gobblin.converter.jdbc; import lombok.EqualsAndHashCode; import lombok.ToString; import com.google.common.base.Preconditions; @ToString @EqualsAndHashCode(of = { "columnName" }) public class JdbcEntryMetaDatum { private final String columnName; private final JdbcType jdbcType; public JdbcEntryMetaDatum(String columnName, JdbcType jdbcType) { this.columnName = Preconditions.checkNotNull(columnName); this.jdbcType = Preconditions.checkNotNull(jdbcType); } public String getColumnName() { return this.columnName; } public JdbcType getJdbcType() { return this.jdbcType; } }
3,471
0
Create_ds/gobblin/gobblin-modules/gobblin-sql/src/main/java/org/apache/gobblin/converter
Create_ds/gobblin/gobblin-modules/gobblin-sql/src/main/java/org/apache/gobblin/converter/jdbc/AvroToJdbcEntryConverter.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.gobblin.converter.jdbc; import java.sql.Date; import java.sql.Time; import java.sql.Timestamp; import java.util.HashMap; import java.util.Iterator; import java.util.LinkedHashMap; import java.util.List; import java.util.Map; import java.util.Map.Entry; import java.util.Set; import org.apache.avro.Schema; import org.apache.avro.Schema.Field; import org.apache.avro.Schema.Type; import org.apache.avro.generic.GenericRecord; import org.apache.commons.lang3.StringUtils; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import com.google.common.annotations.VisibleForTesting; import com.google.common.base.Optional; import com.google.common.base.Preconditions; import com.google.common.base.Splitter; import com.google.common.collect.ImmutableMap; import com.google.common.collect.ImmutableSet; import com.google.common.collect.Lists; import com.google.gson.Gson; import com.google.gson.JsonElement; import com.google.gson.JsonObject; import com.google.gson.JsonParser; import com.google.gson.reflect.TypeToken; import org.apache.gobblin.configuration.ConfigurationKeys; import org.apache.gobblin.configuration.State; import org.apache.gobblin.configuration.WorkUnitState; import org.apache.gobblin.converter.Converter; import org.apache.gobblin.converter.DataConversionException; import org.apache.gobblin.converter.SchemaConversionException; import org.apache.gobblin.converter.SingleRecordIterable; import org.apache.gobblin.converter.initializer.AvroToJdbcEntryConverterInitializer; import org.apache.gobblin.converter.initializer.ConverterInitializer; import org.apache.gobblin.source.workunit.WorkUnitStream; import org.apache.gobblin.writer.commands.JdbcWriterCommandsFactory; /** * Converts Avro Schema into JdbcEntrySchema * Converts Avro GenericRecord into JdbcEntryData * Converts Avro field name for JDBC counterpart. * * This converter is written based on Avro 1.7.7 specification https://avro.apache.org/docs/1.7.7/spec.html */ public class AvroToJdbcEntryConverter extends Converter<Schema, JdbcEntrySchema, GenericRecord, JdbcEntryData> { public static final String CONVERTER_AVRO_JDBC_DATE_FIELDS = "converter.avro.jdbc.date_fields"; private static final String AVRO_NESTED_COLUMN_DELIMITER = "."; private static final String JDBC_FLATTENED_COLUMN_DELIMITER = "_"; private static final String AVRO_NESTED_COLUMN_DELIMITER_REGEX_COMPATIBLE = "\\."; private static final Splitter AVRO_RECORD_LEVEL_SPLITTER = Splitter.on(AVRO_NESTED_COLUMN_DELIMITER).omitEmptyStrings(); private static final Logger LOG = LoggerFactory.getLogger(AvroToJdbcEntryConverter.class); private static final Map<Type, JdbcType> AVRO_TYPE_JDBC_TYPE_MAPPING = ImmutableMap.<Type, JdbcType> builder() .put(Type.BOOLEAN, JdbcType.BOOLEAN) .put(Type.INT, JdbcType.INTEGER) .put(Type.LONG, JdbcType.BIGINT) .put(Type.FLOAT, JdbcType.FLOAT) .put(Type.DOUBLE, JdbcType.DOUBLE) .put(Type.STRING, JdbcType.VARCHAR) .put(Type.ENUM, JdbcType.VARCHAR).build(); private static final Set<Type> AVRO_SUPPORTED_TYPES = ImmutableSet.<Type> builder() .addAll(AVRO_TYPE_JDBC_TYPE_MAPPING.keySet()) .add(Type.UNION) .add(Type.RECORD) .build(); private static final Set<JdbcType> JDBC_SUPPORTED_TYPES = ImmutableSet.<JdbcType> builder() .addAll(AVRO_TYPE_JDBC_TYPE_MAPPING.values()) .add(JdbcType.DATE) .add(JdbcType.TIME) .add(JdbcType.TIMESTAMP) .build(); private Optional<Map<String, String>> avroToJdbcColPairs = Optional.absent(); private Map<String, String> jdbcToAvroColPairs = new HashMap<>(); public AvroToJdbcEntryConverter() { super(); } @VisibleForTesting public AvroToJdbcEntryConverter(WorkUnitState workUnit) { init(workUnit); } /** * Fetches JdbcWriterCommands. * Builds field name mapping between Avro and JDBC. * {@inheritDoc} * @see org.apache.gobblin.converter.Converter#init(org.apache.gobblin.configuration.WorkUnitState) */ @Override public Converter<Schema, JdbcEntrySchema, GenericRecord, JdbcEntryData> init(WorkUnitState workUnit) { String avroToJdbcFieldsPairJsonStr = workUnit.getProp(ConfigurationKeys.CONVERTER_AVRO_JDBC_ENTRY_FIELDS_PAIRS); if (!StringUtils.isEmpty(avroToJdbcFieldsPairJsonStr)) { if (!this.avroToJdbcColPairs.isPresent()) { ImmutableMap.Builder<String, String> avroToJdbcBuilder = ImmutableMap.builder(); ImmutableMap.Builder<String, String> jdbcToAvroBuilder = ImmutableMap.builder(); JsonObject json = new JsonParser().parse(avroToJdbcFieldsPairJsonStr).getAsJsonObject(); for (Map.Entry<String, JsonElement> entry : json.entrySet()) { if (!entry.getValue().isJsonPrimitive()) { throw new IllegalArgumentException("Json value should be a primitive String. " + ConfigurationKeys.CONVERTER_AVRO_JDBC_ENTRY_FIELDS_PAIRS + " : " + avroToJdbcFieldsPairJsonStr); } avroToJdbcBuilder.put(entry.getKey(), entry.getValue().getAsString()); jdbcToAvroBuilder.put(entry.getValue().getAsString(), entry.getKey()); } this.avroToJdbcColPairs = Optional.of((Map<String, String>) avroToJdbcBuilder.build()); } } return this; } /** * Converts Avro schema to JdbcEntrySchema. * * Few precondition to the Avro schema * 1. Avro schema should have one entry type record at first depth. * 2. Avro schema can recurse by having record inside record. * 3. Supported Avro primitive types and conversion * boolean --> java.lang.Boolean * int --> java.lang.Integer * long --> java.lang.Long or java.sql.Date , java.sql.Time , java.sql.Timestamp * float --> java.lang.Float * double --> java.lang.Double * bytes --> byte[] * string --> java.lang.String * null: only allowed if it's within union (see complex types for more details) * 4. Supported Avro complex types * Records: Supports nested record type as well. * Enum --> java.lang.String * Unions --> Only allowed if it have one primitive type in it, along with Record type, or null type with one primitive type where null will be ignored. * Once Union is narrowed down to one primitive type, it will follow conversion of primitive type above. * {@inheritDoc} * * 5. In order to make conversion from Avro long type to java.sql.Date or java.sql.Time or java.sql.Timestamp, * converter will get table metadata from JDBC. * 6. As it needs JDBC connection from condition 5, it also assumes that it will use JDBC publisher where it will get connection information from. * 7. Conversion assumes that both schema, Avro and JDBC, uses same column name where name space in Avro is ignored. * For case sensitivity, Avro is case sensitive where it differs in JDBC based on underlying database. As Avro is case sensitive, column name equality also take case sensitive in to account. * * @see org.apache.gobblin.converter.Converter#convertSchema(java.lang.Object, org.apache.gobblin.configuration.WorkUnitState) */ @Override public JdbcEntrySchema convertSchema(Schema inputSchema, WorkUnitState workUnit) throws SchemaConversionException { LOG.info("Converting schema " + inputSchema); Preconditions.checkArgument(Type.RECORD.equals(inputSchema.getType()), "%s is expected for the first level element in Avro schema %s", Type.RECORD, inputSchema); Map<String, Type> avroColumnType = flatten(inputSchema); String jsonStr = Preconditions.checkNotNull(workUnit.getProp(CONVERTER_AVRO_JDBC_DATE_FIELDS)); java.lang.reflect.Type typeOfMap = new TypeToken<Map<String, JdbcType>>() {}.getType(); Map<String, JdbcType> dateColumnMapping = new Gson().fromJson(jsonStr, typeOfMap); LOG.info("Date column mapping: " + dateColumnMapping); List<JdbcEntryMetaDatum> jdbcEntryMetaData = Lists.newArrayList(); for (Map.Entry<String, Type> avroEntry : avroColumnType.entrySet()) { String colName = tryConvertAvroColNameToJdbcColName(avroEntry.getKey()); JdbcType JdbcType = dateColumnMapping.get(colName); if (JdbcType == null) { JdbcType = AVRO_TYPE_JDBC_TYPE_MAPPING.get(avroEntry.getValue()); } Preconditions.checkNotNull(JdbcType, "Failed to convert " + avroEntry + " AVRO_TYPE_JDBC_TYPE_MAPPING: " + AVRO_TYPE_JDBC_TYPE_MAPPING + " , dateColumnMapping: " + dateColumnMapping); jdbcEntryMetaData.add(new JdbcEntryMetaDatum(colName, JdbcType)); } JdbcEntrySchema converted = new JdbcEntrySchema(jdbcEntryMetaData); LOG.info("Converted schema into " + converted); return converted; } /** * Convert Avro column name to JDBC column name. If name mapping is defined, follow it. Otherwise, just return avro column name, * while replacing nested column delimiter, dot, to underscore. * This method also updates, mapping from JDBC column name to Avro column name for reverse look up. * @param avroColName * @return */ private String tryConvertAvroColNameToJdbcColName(String avroColName) { if (!avroToJdbcColPairs.isPresent()) { String converted = avroColName.replaceAll(AVRO_NESTED_COLUMN_DELIMITER_REGEX_COMPATIBLE, JDBC_FLATTENED_COLUMN_DELIMITER); jdbcToAvroColPairs.put(converted, avroColName); return converted; } String converted = avroToJdbcColPairs.get().get(avroColName); converted = converted != null ? converted : avroColName; jdbcToAvroColPairs.put(converted, avroColName); return converted; } /** * Provides JDBC column name based on Avro column name. It's a one liner method but contains knowledge on where the mapping is. * @param colName * @return */ private String convertJdbcColNameToAvroColName(String colName) { return Preconditions.checkNotNull(jdbcToAvroColPairs.get(colName)); } /** * Flattens Avro's (possibly recursive) structure and provides field name and type. * It assumes that the leaf level field name has unique name. * @param schema * @return * @throws SchemaConversionException if there's duplicate name in leaf level of Avro Schema */ private static Map<String, Type> flatten(Schema schema) throws SchemaConversionException { Map<String, Type> flattened = new LinkedHashMap<>(); Schema recordSchema = determineType(schema); Preconditions.checkArgument(Type.RECORD.equals(recordSchema.getType()), "%s is expected. Schema: %s", Type.RECORD, recordSchema); for (Field f : recordSchema.getFields()) { produceFlattenedHelper(f, flattened); } return flattened; } private static void produceFlattenedHelper(Field field, Map<String, Type> flattened) throws SchemaConversionException { Schema actualSchema = determineType(field.schema()); if (Type.RECORD.equals(actualSchema.getType())) { Map<String, Type> map = flatten(actualSchema); for (Entry<String, Type> entry : map.entrySet()) { String key = String.format("%s" + AVRO_NESTED_COLUMN_DELIMITER + "%s", field.name(), entry.getKey()); Type existing = flattened.put(key, entry.getValue()); Preconditions.checkArgument(existing == null, "Duplicate name detected in Avro schema. Field: " + key); } return; } Type existing = flattened.put(field.name(), actualSchema.getType()); if (existing != null) { //No duplicate name allowed when flattening (not considering name space we don't have any assumption between namespace and actual database field name) throw new SchemaConversionException("Duplicate name detected in Avro schema. " + field.name()); } } private static Schema determineType(Schema schema) throws SchemaConversionException { if (!AVRO_SUPPORTED_TYPES.contains(schema.getType())) { throw new SchemaConversionException(schema.getType() + " is not supported"); } if (!Type.UNION.equals(schema.getType())) { return schema; } //For UNION, only supported avro type with NULL is allowed. List<Schema> schemas = schema.getTypes(); if (schemas.size() > 2) { throw new SchemaConversionException("More than two types are not supported " + schemas); } for (Schema s : schemas) { if (Type.NULL.equals(s.getType())) { continue; } return s; } throw new SchemaConversionException("Cannot determine type of " + schema); } @Override public Iterable<JdbcEntryData> convertRecord(JdbcEntrySchema outputSchema, GenericRecord record, WorkUnitState workUnit) throws DataConversionException { if (LOG.isDebugEnabled()) { LOG.debug("Converting " + record); } List<JdbcEntryDatum> jdbcEntryData = Lists.newArrayList(); for (JdbcEntryMetaDatum entry : outputSchema) { final String jdbcColName = entry.getColumnName(); final JdbcType jdbcType = entry.getJdbcType(); String avroColName = convertJdbcColNameToAvroColName(jdbcColName); final Object val = avroRecordValueGet(record, AVRO_RECORD_LEVEL_SPLITTER.split(avroColName).iterator()); if (val == null) { jdbcEntryData.add(new JdbcEntryDatum(jdbcColName, null)); continue; } if (!JDBC_SUPPORTED_TYPES.contains(jdbcType)) { throw new DataConversionException("Unsupported JDBC type detected " + jdbcType); } switch (jdbcType) { case VARCHAR: jdbcEntryData.add(new JdbcEntryDatum(jdbcColName, val.toString())); continue; case INTEGER: case BOOLEAN: case BIGINT: case FLOAT: case DOUBLE: jdbcEntryData.add(new JdbcEntryDatum(jdbcColName, val)); continue; case DATE: jdbcEntryData.add(new JdbcEntryDatum(jdbcColName, new Date((long) val))); continue; case TIME: jdbcEntryData.add(new JdbcEntryDatum(jdbcColName, new Time((long) val))); continue; case TIMESTAMP: jdbcEntryData.add(new JdbcEntryDatum(jdbcColName, new Timestamp((long) val))); continue; default: throw new DataConversionException(jdbcType + " is not supported"); } } JdbcEntryData converted = new JdbcEntryData(jdbcEntryData); if (LOG.isDebugEnabled()) { LOG.debug("Converted data into " + converted); } return new SingleRecordIterable<>(converted); } private Object avroRecordValueGet(GenericRecord record, Iterator<String> recordNameIterator) { String name = recordNameIterator.next(); Object val = record.get(name); if (val == null) { //Either leaf value is null or nested Record (represented as UNION) is null return null; } if (!recordNameIterator.hasNext()) { //Leaf return val; } //Recurse return avroRecordValueGet((GenericRecord) val, recordNameIterator); } @Override public ConverterInitializer getInitializer(State state, WorkUnitStream workUnits, int branches, int branchId) { JdbcWriterCommandsFactory factory = new JdbcWriterCommandsFactory(); if (workUnits.isSafeToMaterialize()) { return new AvroToJdbcEntryConverterInitializer(state, workUnits.getMaterializedWorkUnitCollection(), factory, branches, branchId); } else { throw new RuntimeException(AvroToJdbcEntryConverter.class.getName() + " does not support work unit streams."); } } }
3,472
0
Create_ds/gobblin/gobblin-modules/gobblin-sql/src/main/java/org/apache/gobblin/converter
Create_ds/gobblin/gobblin-modules/gobblin-sql/src/main/java/org/apache/gobblin/converter/jdbc/JdbcType.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.gobblin.converter.jdbc; /** * Alternative to JAVA 8 JDBCType as Gobblin needs to be Java 7 compatible. */ public enum JdbcType { BIT, TINYINT, SMALLINT, INTEGER, BIGINT, FLOAT, REAL, DOUBLE, NUMERIC, DECIMAL, CHAR, VARCHAR, LONGVARCHAR, DATE, TIME, TIMESTAMP, BINARY, VARBINARY, LONGVARBINARY, NULL, OTHER, JAVA_OBJECT, DISTINCT, ARRAY, BLOB, CLOB, BOOLEAN; }
3,473
0
Create_ds/gobblin/gobblin-modules/gobblin-sql/src/main/java/org/apache/gobblin
Create_ds/gobblin/gobblin-modules/gobblin-sql/src/main/java/org/apache/gobblin/publisher/JdbcPublisher.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.gobblin.publisher; import java.io.IOException; import java.sql.Connection; import java.sql.SQLException; import java.util.Collection; import java.util.List; import java.util.Map; import java.util.Set; import javax.sql.DataSource; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import com.google.common.annotations.VisibleForTesting; import com.google.common.base.Preconditions; import com.google.common.collect.Lists; import com.google.common.collect.Maps; import com.google.common.collect.Sets; import org.apache.gobblin.configuration.ConfigurationKeys; import org.apache.gobblin.configuration.State; import org.apache.gobblin.configuration.WorkUnitState; import org.apache.gobblin.source.extractor.JobCommitPolicy; import org.apache.gobblin.util.ForkOperatorUtils; import org.apache.gobblin.util.jdbc.DataSourceBuilder; import org.apache.gobblin.writer.commands.JdbcWriterCommands; import org.apache.gobblin.writer.commands.JdbcWriterCommandsFactory; /** * Publishes data into JDBC RDBMS. Expects all the data has been already in staging table. */ public class JdbcPublisher extends DataPublisher { public static final String JDBC_PUBLISHER_PREFIX = "jdbc.publisher."; public static final String JDBC_PUBLISHER_DATABASE_NAME = JDBC_PUBLISHER_PREFIX + "database_name"; public static final String JDBC_PUBLISHER_FINAL_TABLE_NAME = JDBC_PUBLISHER_PREFIX + "table_name"; public static final String JDBC_PUBLISHER_REPLACE_FINAL_TABLE = JDBC_PUBLISHER_PREFIX + "replace_table"; public static final String JDBC_PUBLISHER_USERNAME = JDBC_PUBLISHER_PREFIX + "username"; public static final String JDBC_PUBLISHER_PASSWORD = JDBC_PUBLISHER_PREFIX + "password"; public static final String JDBC_PUBLISHER_ENCRYPTION_KEY_LOC = JDBC_PUBLISHER_PREFIX + "encrypt_key_loc"; public static final String JDBC_PUBLISHER_URL = JDBC_PUBLISHER_PREFIX + "url"; public static final String JDBC_PUBLISHER_TIMEOUT = JDBC_PUBLISHER_PREFIX + "timeout"; public static final String JDBC_PUBLISHER_DRIVER = JDBC_PUBLISHER_PREFIX + "driver"; private static final Logger LOG = LoggerFactory.getLogger(JdbcPublisher.class); private final JdbcWriterCommandsFactory jdbcWriterCommandsFactory; /** * Expects all data is in staging table ready to be published. To validate this, it checks COMMIT_ON_FULL_SUCCESS and PUBLISH_DATA_AT_JOB_LEVEL * @param state * @param jdbcWriterCommandsFactory * @param conn */ @VisibleForTesting public JdbcPublisher(State state, JdbcWriterCommandsFactory jdbcWriterCommandsFactory) { super(state); this.jdbcWriterCommandsFactory = jdbcWriterCommandsFactory; validate(getState()); } public JdbcPublisher(State state) { this(state, new JdbcWriterCommandsFactory()); validate(getState()); } /** * @param state * @throws IllegalArgumentException If job commit policy is not COMMIT_ON_FULL_SUCCESS or is not on PUBLISH_DATA_AT_JOB_LEVEL */ private void validate(State state) { JobCommitPolicy jobCommitPolicy = JobCommitPolicy.getCommitPolicy(this.getState().getProperties()); if (JobCommitPolicy.COMMIT_ON_FULL_SUCCESS != jobCommitPolicy) { throw new IllegalArgumentException(this.getClass().getSimpleName() + " won't publish as already commited by task. Job commit policy " + jobCommitPolicy); } if (!state.getPropAsBoolean(ConfigurationKeys.PUBLISH_DATA_AT_JOB_LEVEL, ConfigurationKeys.DEFAULT_PUBLISH_DATA_AT_JOB_LEVEL)) { throw new IllegalArgumentException(this.getClass().getSimpleName() + " won't publish as " + ConfigurationKeys.PUBLISH_DATA_AT_JOB_LEVEL + " is set as false"); } } @VisibleForTesting public Connection createConnection() { DataSource dataSource = DataSourceBuilder.builder().url(this.state.getProp(JDBC_PUBLISHER_URL)) .driver(this.state.getProp(JDBC_PUBLISHER_DRIVER)).userName(this.state.getProp(JDBC_PUBLISHER_USERNAME)) .passWord(this.state.getProp(JDBC_PUBLISHER_PASSWORD)) .cryptoKeyLocation(this.state.getProp(JDBC_PUBLISHER_ENCRYPTION_KEY_LOC)).maxActiveConnections(1) .state(this.state).build(); try { return dataSource.getConnection(); } catch (SQLException e) { throw new RuntimeException(e); } } @Override public void close() throws IOException {} @Override public void initialize() throws IOException {} /** * 1. Truncate destination table if requested * 2. Move data from staging to destination * 3. Update Workunit state * * TODO: Research on running this in parallel. While testing publishing it in parallel, it turns out delete all from the table locks the table * so that copying table threads wait until transaction lock times out and throwing exception(MySQL). Is there a way to avoid this? * * {@inheritDoc} * @see org.apache.gobblin.publisher.DataPublisher#publishData(java.util.Collection) */ @Override public void publishData(Collection<? extends WorkUnitState> states) throws IOException { LOG.info("Start publishing data"); int branches = this.state.getPropAsInt(ConfigurationKeys.FORK_BRANCHES_KEY, 1); Set<String> emptiedDestTables = Sets.newHashSet(); final Connection conn = createConnection(); final JdbcWriterCommands commands = this.jdbcWriterCommandsFactory.newInstance(this.state, conn); try { conn.setAutoCommit(false); for (int i = 0; i < branches; i++) { final String destinationTable = this.state .getProp(ForkOperatorUtils.getPropertyNameForBranch(JDBC_PUBLISHER_FINAL_TABLE_NAME, branches, i)); final String databaseName = this.state.getProp(ForkOperatorUtils.getPropertyNameForBranch(JDBC_PUBLISHER_DATABASE_NAME, branches, i)); Preconditions.checkNotNull(destinationTable); if (this.state.getPropAsBoolean( ForkOperatorUtils.getPropertyNameForBranch(JDBC_PUBLISHER_REPLACE_FINAL_TABLE, branches, i), false) && !emptiedDestTables.contains(destinationTable)) { LOG.info("Deleting table " + destinationTable); commands.deleteAll(databaseName, destinationTable); emptiedDestTables.add(destinationTable); } Map<String, List<WorkUnitState>> stagingTables = getStagingTables(states, branches, i); for (Map.Entry<String, List<WorkUnitState>> entry : stagingTables.entrySet()) { String stagingTable = entry.getKey(); LOG.info("Copying data from staging table " + stagingTable + " into destination table " + destinationTable); commands.copyTable(databaseName, stagingTable, destinationTable); for (WorkUnitState workUnitState : entry.getValue()) { workUnitState.setWorkingState(WorkUnitState.WorkingState.COMMITTED); } } } LOG.info("Commit publish data"); conn.commit(); } catch (Exception e) { try { LOG.error("Failed publishing. Rolling back."); conn.rollback(); } catch (SQLException se) { LOG.error("Failed rolling back.", se); } throw new RuntimeException("Failed publishing", e); } finally { try { conn.close(); } catch (SQLException e) { throw new RuntimeException(e); } } } private static Map<String, List<WorkUnitState>> getStagingTables(Collection<? extends WorkUnitState> states, int branches, int i) { Map<String, List<WorkUnitState>> stagingTables = Maps.newHashMap(); for (WorkUnitState workUnitState : states) { String stagingTableKey = ForkOperatorUtils.getPropertyNameForBranch(ConfigurationKeys.WRITER_STAGING_TABLE, branches, i); String stagingTable = Preconditions.checkNotNull(workUnitState.getProp(stagingTableKey)); List<WorkUnitState> existing = stagingTables.get(stagingTable); if (existing == null) { existing = Lists.newArrayList(); stagingTables.put(stagingTable, existing); } existing.add(workUnitState); } return stagingTables; } @Override public void publishMetadata(Collection<? extends WorkUnitState> states) throws IOException {} }
3,474
0
Create_ds/gobblin/gobblin-modules/gobblin-sql/src/main/java/org/apache/gobblin/source/extractor/extract
Create_ds/gobblin/gobblin-modules/gobblin-sql/src/main/java/org/apache/gobblin/source/extractor/extract/jdbc/TeradataSource.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.gobblin.source.extractor.extract.jdbc; import java.io.IOException; import lombok.extern.slf4j.Slf4j; import org.apache.gobblin.configuration.WorkUnitState; import org.apache.gobblin.source.extractor.Extractor; import org.apache.gobblin.source.extractor.extract.QueryBasedSource; import org.apache.gobblin.source.extractor.exception.ExtractPrepareException; import org.apache.gobblin.source.jdbc.TeradataExtractor; import com.google.gson.JsonArray; import com.google.gson.JsonElement; /** * An implementation of Teradata source to get work units * * @author ypopov */ @Slf4j public class TeradataSource extends QueryBasedSource<JsonArray, JsonElement> { public Extractor<JsonArray, JsonElement> getExtractor(WorkUnitState state) throws IOException { Extractor<JsonArray, JsonElement> extractor = null; try { extractor = new TeradataExtractor(state).build(); } catch (ExtractPrepareException e) { log.error("Failed to prepare extractor: error - {}", e.getMessage()); throw new IOException(e); } return extractor; } }
3,475
0
Create_ds/gobblin/gobblin-modules/gobblin-sql/src/main/java/org/apache/gobblin/source/extractor/extract
Create_ds/gobblin/gobblin-modules/gobblin-sql/src/main/java/org/apache/gobblin/source/extractor/extract/jdbc/MysqlSource.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.gobblin.source.extractor.extract.jdbc; import com.google.gson.JsonArray; import com.google.gson.JsonElement; import java.io.IOException; import java.net.URI; import org.apache.gobblin.configuration.ConfigurationKeys; import org.apache.gobblin.configuration.SourceState; import org.apache.gobblin.configuration.WorkUnitState; import org.apache.gobblin.dataset.DatasetConstants; import org.apache.gobblin.dataset.DatasetDescriptor; import org.apache.gobblin.source.extractor.Extractor; import org.apache.gobblin.source.extractor.exception.ExtractPrepareException; import org.apache.gobblin.source.extractor.extract.QueryBasedSource; import org.apache.gobblin.source.jdbc.MysqlExtractor; import org.apache.gobblin.source.workunit.WorkUnit; import org.slf4j.Logger; import org.slf4j.LoggerFactory; /** * An implementation of mysql source to get work units * * @author nveeramr */ public class MysqlSource extends QueryBasedSource<JsonArray, JsonElement> { private static final Logger LOG = LoggerFactory.getLogger(MysqlSource.class); @Override public Extractor<JsonArray, JsonElement> getExtractor(WorkUnitState state) throws IOException { Extractor<JsonArray, JsonElement> extractor = null; try { extractor = new MysqlExtractor(state).build(); } catch (ExtractPrepareException e) { LOG.error("Failed to prepare extractor: error - " + e.getMessage()); throw new IOException(e); } return extractor; } protected void addLineageSourceInfo(SourceState sourceState, SourceEntity entity, WorkUnit workUnit) { String host = sourceState.getProp(ConfigurationKeys.SOURCE_CONN_HOST_NAME); String port = sourceState.getProp(ConfigurationKeys.SOURCE_CONN_PORT); String database = sourceState.getProp(ConfigurationKeys.SOURCE_QUERYBASED_SCHEMA); String serverUrl = "mysql://" + host.trim() + ":" + port; String connectionUrl = "jdbc:" + serverUrl + "/" + database.trim(); DatasetDescriptor source = new DatasetDescriptor(DatasetConstants.PLATFORM_MYSQL, URI.create(serverUrl), database + "." + entity.getSourceEntityName()); source.addMetadata(DatasetConstants.CONNECTION_URL, connectionUrl); if (lineageInfo.isPresent()) { lineageInfo.get().setSource(source, workUnit); } } }
3,476
0
Create_ds/gobblin/gobblin-modules/gobblin-sql/src/main/java/org/apache/gobblin/source/extractor/extract
Create_ds/gobblin/gobblin-modules/gobblin-sql/src/main/java/org/apache/gobblin/source/extractor/extract/jdbc/OracleSource.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.gobblin.source.extractor.extract.jdbc; import org.apache.gobblin.source.extractor.Extractor; import org.apache.gobblin.source.extractor.exception.ExtractPrepareException; import java.io.IOException; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import com.google.gson.JsonArray; import com.google.gson.JsonElement; import org.apache.gobblin.configuration.WorkUnitState; import org.apache.gobblin.source.extractor.extract.QueryBasedSource; import org.apache.gobblin.source.jdbc.OracleExtractor; /** * An implementation of oracle source to get work units * * @author bjvanov */ public class OracleSource extends QueryBasedSource<JsonArray, JsonElement> { private static final Logger LOG = LoggerFactory.getLogger(OracleSource.class); @Override public Extractor<JsonArray, JsonElement> getExtractor(WorkUnitState state) throws IOException { Extractor<JsonArray, JsonElement> extractor = null; try { extractor = new OracleExtractor(state).build(); } catch (ExtractPrepareException e) { LOG.error("Failed to prepare extractor: error - " + e.getMessage()); throw new IOException(e); } return extractor; } }
3,477
0
Create_ds/gobblin/gobblin-modules/gobblin-sql/src/main/java/org/apache/gobblin/source/extractor/extract
Create_ds/gobblin/gobblin-modules/gobblin-sql/src/main/java/org/apache/gobblin/source/extractor/extract/jdbc/PostgresqlSource.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.gobblin.source.extractor.extract.jdbc; import java.io.IOException; import org.apache.gobblin.configuration.WorkUnitState; import org.apache.gobblin.source.extractor.Extractor; import org.apache.gobblin.source.extractor.exception.ExtractPrepareException; import org.apache.gobblin.source.extractor.extract.QueryBasedSource; import org.apache.gobblin.source.jdbc.PostgresqlExtractor; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import com.google.gson.JsonArray; import com.google.gson.JsonElement; /** * An implementation of postgresql source to get work units * * @author tilakpatidar */ public class PostgresqlSource extends QueryBasedSource<JsonArray, JsonElement> { private static final Logger LOG = LoggerFactory.getLogger(PostgresqlSource.class); @Override public Extractor<JsonArray, JsonElement> getExtractor(WorkUnitState state) throws IOException { Extractor<JsonArray, JsonElement> extractor; try { extractor = new PostgresqlExtractor(state).build(); } catch (ExtractPrepareException e) { LOG.error("Failed to prepare extractor: error - " + e.getMessage()); throw new IOException(e); } return extractor; } }
3,478
0
Create_ds/gobblin/gobblin-modules/gobblin-sql/src/main/java/org/apache/gobblin/source/extractor/extract
Create_ds/gobblin/gobblin-modules/gobblin-sql/src/main/java/org/apache/gobblin/source/extractor/extract/jdbc/SqlServerSource.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.gobblin.source.extractor.extract.jdbc; import org.apache.gobblin.source.extractor.Extractor; import org.apache.gobblin.source.extractor.exception.ExtractPrepareException; import java.io.IOException; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import com.google.gson.JsonArray; import com.google.gson.JsonElement; import org.apache.gobblin.configuration.WorkUnitState; import org.apache.gobblin.source.extractor.extract.QueryBasedSource; import org.apache.gobblin.source.jdbc.SqlServerExtractor; /** * An implementation of sqlserver source to get work units * * @author nveeramr */ public class SqlServerSource extends QueryBasedSource<JsonArray, JsonElement> { private static final Logger LOG = LoggerFactory.getLogger(SqlServerSource.class); @Override public Extractor<JsonArray, JsonElement> getExtractor(WorkUnitState state) throws IOException { Extractor<JsonArray, JsonElement> extractor = null; try { extractor = new SqlServerExtractor(state).build(); } catch (ExtractPrepareException e) { LOG.error("Failed to prepare extractor: error - " + e.getMessage()); throw new IOException(e); } return extractor; } }
3,479
0
Create_ds/gobblin/gobblin-modules/gobblin-sql/src/main/java/org/apache/gobblin/source
Create_ds/gobblin/gobblin-modules/gobblin-sql/src/main/java/org/apache/gobblin/source/jdbc/PostgresqlExtractor.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.gobblin.source.jdbc; import java.io.IOException; import java.util.ArrayList; import java.util.Arrays; import java.util.Iterator; import java.util.List; import java.util.Map; import org.apache.commons.lang3.StringUtils; import org.apache.gobblin.configuration.ConfigurationKeys; import org.apache.gobblin.configuration.WorkUnitState; import org.apache.gobblin.source.extractor.DataRecordException; import org.apache.gobblin.source.extractor.exception.HighWatermarkException; import org.apache.gobblin.source.extractor.exception.RecordCountException; import org.apache.gobblin.source.extractor.exception.SchemaException; import org.apache.gobblin.source.extractor.extract.Command; import org.apache.gobblin.source.extractor.utils.Utils; import org.apache.gobblin.source.extractor.watermark.Predicate; import org.apache.gobblin.source.extractor.watermark.WatermarkType; import org.apache.gobblin.source.workunit.WorkUnit; import com.google.common.collect.ImmutableMap; import com.google.gson.JsonElement; import lombok.extern.slf4j.Slf4j; @Slf4j public class PostgresqlExtractor extends JdbcExtractor { private static final String CONNECTION_DATABASE = "source.conn.database"; private static final String POSTGRES_TIMESTAMP_FORMAT = "yyyy-MM-dd HH:mm:ss"; private static final String POSTGRES_DATE_FORMAT = "yyyy-MM-dd"; private static final String POSTGRES_HOUR_FORMAT = "HH"; private static final long SAMPLERECORDCOUNT = -1; public PostgresqlExtractor(WorkUnitState workUnitState) { super(workUnitState); } @Override public String getHourPredicateCondition(String column, long value, String valueFormat, String operator) { log.debug("Getting hour predicate for Postgres"); String formattedvalue = Utils.toDateTimeFormat(Long.toString(value), valueFormat, POSTGRES_HOUR_FORMAT); return Utils.getCoalesceColumnNames(column) + " " + operator + " '" + formattedvalue + "'"; } @Override public String getDatePredicateCondition(String column, long value, String valueFormat, String operator) { log.debug("Getting date predicate for Postgres"); String formattedvalue = Utils.toDateTimeFormat(Long.toString(value), valueFormat, POSTGRES_DATE_FORMAT); return Utils.getCoalesceColumnNames(column) + " " + operator + " '" + formattedvalue + "'"; } @Override public String getTimestampPredicateCondition(String column, long value, String valueFormat, String operator) { log.debug("Getting timestamp predicate for Postgres"); String formattedvalue = Utils.toDateTimeFormat(Long.toString(value), valueFormat, POSTGRES_TIMESTAMP_FORMAT); return Utils.getCoalesceColumnNames(column) + " " + operator + " '" + formattedvalue + "'"; } @Override public List<Command> getSchemaMetadata(String schema, String entity) throws SchemaException { log.debug("Build query to get schema"); List<Command> commands = new ArrayList<>(); List<String> queryParams = Arrays.asList(entity, schema); String metadataSql = "select col.column_name, col.data_type, " + "case when CHARACTER_OCTET_LENGTH is null then 0 else 0 end as length, " + "case when NUMERIC_PRECISION is null then 0 else NUMERIC_PRECISION end as precesion, " + "case when NUMERIC_SCALE is null then 0 else NUMERIC_SCALE end as scale, " + "case when is_nullable='NO' then 'false' else 'true' end as nullable, '' as format, " + "'' as comment " + "from information_schema.COLUMNS col " + "WHERE upper(col.table_name)=upper(?) AND upper(col.table_schema)=upper(?) " + "order by col.ORDINAL_POSITION"; commands.add(getCommand(metadataSql, JdbcCommand.JdbcCommandType.QUERY)); commands.add(getCommand(queryParams, JdbcCommand.JdbcCommandType.QUERYPARAMS)); return commands; } @Override public List<Command> getHighWatermarkMetadata(String schema, String entity, String watermarkColumn, List<Predicate> predicateList) throws HighWatermarkException { log.debug("Build query to get high watermark"); List<Command> commands = new ArrayList<>(); String columnProjection = "max(" + Utils.getCoalesceColumnNames(watermarkColumn) + ")"; String watermarkFilter = this.concatPredicates(predicateList); String query = this.getExtractSql(); if (StringUtils.isBlank(watermarkFilter)) { watermarkFilter = "1=1"; } query = query.replace(this.getOutputColumnProjection(), columnProjection) .replace(ConfigurationKeys.DEFAULT_SOURCE_QUERYBASED_WATERMARK_PREDICATE_SYMBOL, watermarkFilter); commands.add(getCommand(query, JdbcCommand.JdbcCommandType.QUERY)); return commands; } @Override public List<Command> getCountMetadata(String schema, String entity, WorkUnit workUnit, List<Predicate> predicateList) throws RecordCountException { log.debug("Build query to get source record count"); List<Command> commands = new ArrayList<>(); String columnProjection = "COUNT(1)"; String watermarkFilter = this.concatPredicates(predicateList); String query = this.getExtractSql(); if (StringUtils.isBlank(watermarkFilter)) { watermarkFilter = "1=1"; } query = query.replace(this.getOutputColumnProjection(), columnProjection) .replace(ConfigurationKeys.DEFAULT_SOURCE_QUERYBASED_WATERMARK_PREDICATE_SYMBOL, watermarkFilter); String sampleFilter = this.constructSampleClause(); query = query + sampleFilter; if (!StringUtils.isEmpty(sampleFilter)) { query = "SELECT COUNT(1) FROM (" + query.replace(" COUNT(1) ", " 1 ") + ")temp"; } commands.add(getCommand(query, JdbcCommand.JdbcCommandType.QUERY)); return commands; } @Override public List<Command> getDataMetadata(String schema, String entity, WorkUnit workUnit, List<Predicate> predicateList) throws DataRecordException { log.debug("Build query to extract data"); List<Command> commands = new ArrayList<>(); int fetchsize = this.workUnitState.getPropAsInt(ConfigurationKeys.SOURCE_QUERYBASED_JDBC_RESULTSET_FETCH_SIZE, ConfigurationKeys.DEFAULT_SOURCE_QUERYBASED_JDBC_RESULTSET_FETCH_SIZE); String watermarkFilter = this.concatPredicates(predicateList); String query = this.getExtractSql(); if (StringUtils.isBlank(watermarkFilter)) { watermarkFilter = "1=1"; } query = query.replace(ConfigurationKeys.DEFAULT_SOURCE_QUERYBASED_WATERMARK_PREDICATE_SYMBOL, watermarkFilter); String sampleFilter = this.constructSampleClause(); query = query + sampleFilter; commands.add(getCommand(query, JdbcCommand.JdbcCommandType.QUERY)); commands.add(getCommand(fetchsize, JdbcCommand.JdbcCommandType.FETCHSIZE)); return commands; } @Override public String getConnectionUrl() { String host = this.workUnitState.getProp(ConfigurationKeys.SOURCE_CONN_HOST_NAME); String port = this.workUnitState.getProp(ConfigurationKeys.SOURCE_CONN_PORT); String database = this.workUnitState.getProp(CONNECTION_DATABASE); return "jdbc:postgresql://" + host.trim() + ":" + port + "/" + database.trim(); } /** {@inheritdoc} */ @Override protected boolean convertBitToBoolean() { return false; } @Override public Map<String, String> getDataTypeMap() { Map<String, String> dataTypeMap = ImmutableMap.<String, String>builder().put("tinyint", "int").put("smallint", "int").put("mediumint", "int") .put("integer", "int").put("int", "int").put("bigint", "long").put("float", "float").put("double", "double") .put("double precision", "double").put("decimal", "double").put("numeric", "double").put("date", "date").put("timestamp", "timestamp") .put("timestamp without time zone", "timestamp").put("timestamp with time zone", "timestamp") .put("datetime", "timestamp").put("time", "time").put("char", "string").put("varchar", "string") .put("varbinary", "string").put("text", "string").put("tinytext", "string").put("mediumtext", "string") .put("character varying", "string").put("longtext", "string").put("blob", "string").put("tinyblob", "string").put("mediumblob", "string") .put("longblob", "string").put("enum", "string").build(); return dataTypeMap; } @Override public String getWatermarkSourceFormat(WatermarkType watermarkType) { String columnFormat = null; switch (watermarkType) { case TIMESTAMP: columnFormat = "yyyy-MM-dd HH:mm:ss"; break; case DATE: columnFormat = "yyyy-MM-dd"; break; default: log.error("Watermark type " + watermarkType.toString() + " not recognized"); } return columnFormat; } @Override public long extractSampleRecordCountFromQuery(String query) { if (StringUtils.isBlank(query)) { return SAMPLERECORDCOUNT; } long recordcount = SAMPLERECORDCOUNT; String limit = null; String inputQuery = query.toLowerCase(); int limitIndex = inputQuery.indexOf(" limit "); if (limitIndex > 0) { limit = query.substring(limitIndex + 7).trim(); } if (StringUtils.isNotBlank(limit)) { try { recordcount = Long.parseLong(limit); } catch (Exception e) { log.error("Ignoring incorrct limit value in input query:" + limit); } } return recordcount; } @Override public String removeSampleClauseFromQuery(String query) { if (StringUtils.isBlank(query)) { return null; } String limitString = ""; String inputQuery = query.toLowerCase(); int limitIndex = inputQuery.indexOf(" limit"); if (limitIndex > 0) { limitString = query.substring(limitIndex); } if (inputQuery.contains(" where ")) { String newQuery = query.replace(limitString, " AND 1=1"); if (newQuery.toLowerCase().contains(" where and 1=1")) { return query.replace(limitString, " 1=1"); } return newQuery; } return query.replace(limitString, " where 1=1"); } @Override public String constructSampleClause() { long sampleRowCount = this.getSampleRecordCount(); if (sampleRowCount >= 0) { return " limit " + sampleRowCount; } return ""; } @Override public String getLeftDelimitedIdentifier() { return this.enableDelimitedIdentifier ? "`" : ""; } @Override public String getRightDelimitedIdentifier() { return this.enableDelimitedIdentifier ? "`" : ""; } @Override public Iterator<JsonElement> getRecordSetFromSourceApi(String schema, String entity, WorkUnit workUnit, List<Predicate> predicateList) throws IOException { return null; } }
3,480
0
Create_ds/gobblin/gobblin-modules/gobblin-sql/src/main/java/org/apache/gobblin/source
Create_ds/gobblin/gobblin-modules/gobblin-sql/src/main/java/org/apache/gobblin/source/jdbc/JdbcCommandFormatException.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.gobblin.source.jdbc; /** * Exception if jdbc command is failed to execute * * @author nveeramr */ public class JdbcCommandFormatException extends Exception { private static final long serialVersionUID = 1L; public JdbcCommandFormatException(String message) { super(message); } public JdbcCommandFormatException(String message, Exception e) { super(message, e); } }
3,481
0
Create_ds/gobblin/gobblin-modules/gobblin-sql/src/main/java/org/apache/gobblin/source
Create_ds/gobblin/gobblin-modules/gobblin-sql/src/main/java/org/apache/gobblin/source/jdbc/JdbcExtractor.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.gobblin.source.jdbc; import java.io.IOException; import java.sql.Blob; import java.sql.Clob; import java.sql.Connection; import java.sql.PreparedStatement; import java.sql.ResultSet; import java.sql.ResultSetMetaData; import java.sql.SQLException; import java.sql.Statement; import java.sql.Timestamp; import java.sql.Types; import java.text.ParseException; import java.text.SimpleDateFormat; import java.util.ArrayList; import java.util.Arrays; import java.util.Date; import java.util.HashMap; import java.util.Iterator; import java.util.List; import java.util.Map; import org.apache.calcite.sql.SqlKind; import org.apache.calcite.sql.SqlNode; import org.apache.calcite.sql.SqlOrderBy; import org.apache.calcite.sql.SqlSelect; import org.apache.calcite.sql.parser.SqlParseException; import org.apache.calcite.sql.parser.SqlParser; import org.apache.commons.codec.binary.Base64; import org.apache.commons.lang3.StringUtils; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import com.google.common.base.Joiner; import com.google.gson.Gson; import com.google.gson.JsonArray; import com.google.gson.JsonElement; import com.google.gson.JsonObject; import org.apache.gobblin.configuration.ConfigurationKeys; import org.apache.gobblin.configuration.WorkUnitState; import org.apache.gobblin.password.PasswordManager; import org.apache.gobblin.source.extractor.DataRecordException; import org.apache.gobblin.source.extractor.exception.HighWatermarkException; import org.apache.gobblin.source.extractor.exception.RecordCountException; import org.apache.gobblin.source.extractor.exception.SchemaException; import org.apache.gobblin.source.extractor.extract.Command; import org.apache.gobblin.source.extractor.extract.CommandOutput; import org.apache.gobblin.source.extractor.extract.QueryBasedExtractor; import org.apache.gobblin.source.extractor.extract.SourceSpecificLayer; import org.apache.gobblin.source.jdbc.JdbcCommand.JdbcCommandType; import org.apache.gobblin.source.extractor.resultset.RecordSetList; import org.apache.gobblin.source.extractor.schema.ColumnAttributes; import org.apache.gobblin.source.extractor.schema.ColumnNameCase; import org.apache.gobblin.source.extractor.schema.Schema; import org.apache.gobblin.source.extractor.utils.Utils; import org.apache.gobblin.source.extractor.watermark.Predicate; import org.apache.gobblin.source.extractor.watermark.WatermarkType; import org.apache.gobblin.source.workunit.WorkUnit; /** * Extract data using JDBC protocol * * @author nveeramr */ public abstract class JdbcExtractor extends QueryBasedExtractor<JsonArray, JsonElement> implements SourceSpecificLayer<JsonArray, JsonElement>, JdbcSpecificLayer { private static final Gson gson = new Gson(); private List<String> headerRecord; private boolean firstPull = true; private CommandOutput<?, ?> dataResponse = null; protected String extractSql; protected long sampleRecordCount; protected JdbcProvider jdbcSource; protected Connection dataConnection; protected int timeOut; private List<ColumnAttributes> columnAliasMap = new ArrayList<>(); private Map<String, Schema> metadataColumnMap = new HashMap<>(); private List<String> metadataColumnList = new ArrayList<>(); private String inputColumnProjection; private String outputColumnProjection; private long totalRecordCount = 0; private boolean nextRecord = true; private int unknownColumnCounter = 1; protected boolean enableDelimitedIdentifier = false; private Logger log = LoggerFactory.getLogger(JdbcExtractor.class); /** * Metadata column mapping to lookup columns specified in input query * * @return metadata(schema) column mapping */ public Map<String, Schema> getMetadataColumnMap() { return this.metadataColumnMap; } /** * @param metadataColumnMap metadata column mapping */ public void setMetadataColumnMap(Map<String, Schema> metadataColumnMap) { this.metadataColumnMap = metadataColumnMap; } /** * Metadata column list * * @return metadata(schema) column list */ public List<String> getMetadataColumnList() { return this.metadataColumnList; } /** * @param metadataColumnList metadata column list */ public void setMetadataColumnList(List<String> metadataColumnList) { this.metadataColumnList = metadataColumnList; } /** * Sample Records specified in input query * * @return sample record count */ public long getSampleRecordCount() { return this.sampleRecordCount; } /** * @param sampleRecordCount sample record count */ public void setSampleRecordCount(long sampleRecordCount) { this.sampleRecordCount = sampleRecordCount; } /** * query to extract data from data source * * @return query */ public String getExtractSql() { return this.extractSql; } /** * @param extractSql extract query */ public void setExtractSql(String extractSql) { this.extractSql = extractSql; } /** * output column projection with aliases specified in input sql * * @return column projection */ public String getOutputColumnProjection() { return this.outputColumnProjection; } /** * @param outputColumnProjection output column projection */ public void setOutputColumnProjection(String outputColumnProjection) { this.outputColumnProjection = outputColumnProjection; } /** * input column projection with source columns specified in input sql * * @return column projection */ public String getInputColumnProjection() { return this.inputColumnProjection; } /** * @param inputColumnProjection input column projection */ public void setInputColumnProjection(String inputColumnProjection) { this.inputColumnProjection = inputColumnProjection; } /** * source column and alias mapping * * @return map of column name and alias name */ public List<ColumnAttributes> getColumnAliasMap() { return this.columnAliasMap; } /** * add column and alias mapping * * @param columnAliasMap column alias mapping */ public void addToColumnAliasMap(ColumnAttributes columnAliasMap) { this.columnAliasMap.add(columnAliasMap); } /** * check whether is first pull or not * * @return true, for the first run and it will be set to false after the * first run */ public boolean isFirstPull() { return this.firstPull; } /** * @param firstPull */ public void setFirstPull(boolean firstPull) { this.firstPull = firstPull; } /** * Header record to convert csv to json * * @return header record with list of columns */ protected List<String> getHeaderRecord() { return this.headerRecord; } /** * @param headerRecord list of column names */ protected void setHeaderRecord(List<String> headerRecord) { this.headerRecord = headerRecord; } /** * @return connection timeout */ public int getTimeOut() { return this.timeOut; } /** * @return true, if records available. Otherwise, false */ public boolean hasNextRecord() { return this.nextRecord; } /** * @param nextRecord next Record */ public void setNextRecord(boolean nextRecord) { this.nextRecord = nextRecord; } /** * @param timeOut connection timeout */ @Override public void setTimeOut(int timeOut) { this.timeOut = timeOut; } /** * @return private static final Gson factory */ public Gson getGson() { return this.gson; } public JdbcExtractor(WorkUnitState workUnitState) { super(workUnitState); } @Override public void extractMetadata(String schema, String entity, WorkUnit workUnit) throws SchemaException, IOException { this.log.info("Extract metadata using JDBC"); String inputQuery = workUnitState.getProp(ConfigurationKeys.SOURCE_QUERYBASED_QUERY); if (workUnitState.getPropAsBoolean(ConfigurationKeys.SOURCE_QUERYBASED_IS_METADATA_COLUMN_CHECK_ENABLED, Boolean.valueOf(ConfigurationKeys.DEFAULT_SOURCE_QUERYBASED_IS_METADATA_COLUMN_CHECK_ENABLED)) && hasJoinOperation(inputQuery)) { throw new RuntimeException("Query across multiple tables not supported"); } String watermarkColumn = workUnitState.getProp(ConfigurationKeys.EXTRACT_DELTA_FIELDS_KEY); this.enableDelimitedIdentifier = workUnitState.getPropAsBoolean( ConfigurationKeys.ENABLE_DELIMITED_IDENTIFIER, ConfigurationKeys.DEFAULT_ENABLE_DELIMITED_IDENTIFIER); JsonObject defaultWatermark = this.getDefaultWatermark(); String derivedWatermarkColumnName = defaultWatermark.get("columnName").getAsString(); this.setSampleRecordCount(this.extractSampleRecordCountFromQuery(inputQuery)); inputQuery = this.removeSampleClauseFromQuery(inputQuery); JsonArray targetSchema = new JsonArray(); List<String> headerColumns = new ArrayList<>(); try { List<Command> cmds = this.getSchemaMetadata(schema, entity); CommandOutput<?, ?> response = this.executePreparedSql(cmds); JsonArray array = this.getSchema(response); this.buildMetadataColumnMap(array); this.parseInputQuery(inputQuery); List<String> sourceColumns = this.getMetadataColumnList(); for (ColumnAttributes colMap : this.columnAliasMap) { String alias = colMap.getAliasName(); String columnName = colMap.getColumnName(); String sourceColumnName = colMap.getSourceColumnName(); if (this.isMetadataColumn(columnName, sourceColumns)) { String targetColumnName = this.getTargetColumnName(columnName, alias); Schema obj = this.getUpdatedSchemaObject(columnName, alias, targetColumnName); String jsonStr = gson.toJson(obj); JsonObject jsonObject = gson.fromJson(jsonStr, JsonObject.class).getAsJsonObject(); targetSchema.add(jsonObject); headerColumns.add(targetColumnName); sourceColumnName = getLeftDelimitedIdentifier() + sourceColumnName + getRightDelimitedIdentifier(); this.columnList.add(sourceColumnName); } } if (this.hasMultipleWatermarkColumns(watermarkColumn)) { derivedWatermarkColumnName = getLeftDelimitedIdentifier() + derivedWatermarkColumnName + getRightDelimitedIdentifier(); this.columnList.add(derivedWatermarkColumnName); headerColumns.add(derivedWatermarkColumnName); targetSchema.add(defaultWatermark); this.workUnitState.setProp(ConfigurationKeys.EXTRACT_DELTA_FIELDS_KEY, derivedWatermarkColumnName); } String outputColProjection = Joiner.on(",").useForNull("null").join(this.columnList); outputColProjection = outputColProjection.replace(derivedWatermarkColumnName, Utils.getCoalesceColumnNames(watermarkColumn) + " AS " + derivedWatermarkColumnName); this.setOutputColumnProjection(outputColProjection); String extractQuery = this.getExtractQuery(schema, entity, inputQuery); this.setHeaderRecord(headerColumns); this.setOutputSchema(targetSchema); this.setExtractSql(extractQuery); // this.workUnit.getProp(ConfigurationKeys.EXTRACT_TABLE_NAME_KEY, // this.escapeCharsInColumnName(this.workUnit.getProp(ConfigurationKeys.SOURCE_ENTITY), // ConfigurationKeys.ESCAPE_CHARS_IN_COLUMN_NAME, "_")); this.log.info("Schema:" + targetSchema); this.log.info("Extract query: " + this.getExtractSql()); } catch (RuntimeException | IOException | SchemaException e) { throw new SchemaException("Failed to get metadata using JDBC; error - " + e.getMessage(), e); } } /** * Build/Format input query in the required format * * @param schema * @param entity * @param inputQuery * @return formatted extract query */ private String getExtractQuery(String schema, String entity, String inputQuery) { String inputColProjection = this.getInputColumnProjection(); String outputColProjection = this.getOutputColumnProjection(); String query = inputQuery; if (query == null) { // if input query is null, build the query from metadata query = "SELECT " + outputColProjection + " FROM " + schema + "." + entity; } else { // replace input column projection with output column projection if (StringUtils.isNotBlank(inputColProjection)) { query = query.replace(inputColProjection, outputColProjection); } } query = addOptionalWatermarkPredicate(query); return query; } /** * @param query * @return query with watermark predicate symbol */ protected String addOptionalWatermarkPredicate(String query) { String watermarkPredicateSymbol = ConfigurationKeys.DEFAULT_SOURCE_QUERYBASED_WATERMARK_PREDICATE_SYMBOL; if (!query.contains(watermarkPredicateSymbol)) { query = SqlQueryUtils.addPredicate(query, watermarkPredicateSymbol); } return query; } /** * Update schema of source column Update column name with target column * name/alias Update watermark, nullable and primary key flags * * @param sourceColumnName * @param targetColumnName * @return schema object of a column */ private Schema getUpdatedSchemaObject(String sourceColumnName, String alias, String targetColumnName) { // Check for source column and alias Schema obj = this.getMetadataColumnMap().get(sourceColumnName.toLowerCase()); if (obj == null && alias != null) { obj = this.getMetadataColumnMap().get(alias.toLowerCase()); } if (obj == null) { obj = getCustomColumnSchema(targetColumnName); } else { String watermarkColumn = this.workUnitState.getProp(ConfigurationKeys.EXTRACT_DELTA_FIELDS_KEY); String primarykeyColumn = this.workUnitState.getProp(ConfigurationKeys.EXTRACT_PRIMARY_KEY_FIELDS_KEY); boolean isMultiColumnWatermark = this.hasMultipleWatermarkColumns(watermarkColumn); obj.setColumnName(targetColumnName); boolean isWatermarkColumn = this.isWatermarkColumn(watermarkColumn, sourceColumnName); if (isWatermarkColumn) { this.updateDeltaFieldConfig(sourceColumnName, targetColumnName); } else if (alias != null) { // Check for alias isWatermarkColumn = this.isWatermarkColumn(watermarkColumn, alias); this.updateDeltaFieldConfig(alias, targetColumnName); } // If there is only one watermark column, then consider it as a // watermark. Otherwise add a default watermark column in the end if (!isMultiColumnWatermark) { obj.setWaterMark(isWatermarkColumn); } // override all columns to nullable except primary key and watermark // columns if ((isWatermarkColumn && !isMultiColumnWatermark) || this.getPrimarykeyIndex(primarykeyColumn, sourceColumnName) > 0) { obj.setNullable(false); } else { obj.setNullable(true); } // set primary key index for all the primary key fields int primarykeyIndex = this.getPrimarykeyIndex(primarykeyColumn, sourceColumnName); if (primarykeyIndex > 0 && (!sourceColumnName.equalsIgnoreCase(targetColumnName))) { this.updatePrimaryKeyConfig(sourceColumnName, targetColumnName); } obj.setPrimaryKey(primarykeyIndex); } return obj; } /** * Get target column name if column is not found in metadata, then name it * as unknown column If alias is not found, target column is nothing but * source column * * @param sourceColumnName * @param alias * @return targetColumnName */ private String getTargetColumnName(String sourceColumnName, String alias) { String targetColumnName = alias; Schema obj = this.getMetadataColumnMap().get(sourceColumnName.toLowerCase()); if (obj == null) { targetColumnName = (targetColumnName == null ? "unknown" + this.unknownColumnCounter : targetColumnName); this.unknownColumnCounter++; } else { targetColumnName = (StringUtils.isNotBlank(targetColumnName) ? targetColumnName : sourceColumnName); } targetColumnName = this.toCase(targetColumnName); return Utils.escapeSpecialCharacters(targetColumnName, ConfigurationKeys.ESCAPE_CHARS_IN_COLUMN_NAME, "_"); } /** * Build metadata column map with column name and column schema object. * Build metadata column list with list columns in metadata * * @param array Schema of all columns */ private void buildMetadataColumnMap(JsonArray array) { if (array != null) { for (JsonElement columnElement : array) { Schema schemaObj = gson.fromJson(columnElement, Schema.class); String columnName = schemaObj.getColumnName(); this.metadataColumnMap.put(columnName.toLowerCase(), schemaObj); this.metadataColumnList.add(columnName.toLowerCase()); } } } /** * Update water mark column property if there is an alias defined in query * * @param srcColumnName source column name * @param tgtColumnName target column name */ private void updateDeltaFieldConfig(String srcColumnName, String tgtColumnName) { if (this.workUnitState.contains(ConfigurationKeys.EXTRACT_DELTA_FIELDS_KEY)) { String watermarkCol = this.workUnitState.getProp(ConfigurationKeys.EXTRACT_DELTA_FIELDS_KEY); this.workUnitState.setProp(ConfigurationKeys.EXTRACT_DELTA_FIELDS_KEY, watermarkCol.replaceAll(srcColumnName, tgtColumnName)); } } /** * Update primary key column property if there is an alias defined in query * * @param srcColumnName source column name * @param tgtColumnName target column name */ private void updatePrimaryKeyConfig(String srcColumnName, String tgtColumnName) { if (this.workUnitState.contains(ConfigurationKeys.EXTRACT_PRIMARY_KEY_FIELDS_KEY)) { String primarykey = this.workUnitState.getProp(ConfigurationKeys.EXTRACT_PRIMARY_KEY_FIELDS_KEY); this.workUnitState.setProp(ConfigurationKeys.EXTRACT_PRIMARY_KEY_FIELDS_KEY, primarykey.replaceAll(srcColumnName, tgtColumnName)); } } /** * If input query is null or '*' in the select list, consider all columns. * * @return true, to select all colums. else, false. */ private boolean isSelectAllColumns() { String columnProjection = this.getInputColumnProjection(); if (columnProjection == null || columnProjection.trim().equals("*") || columnProjection.contains(".*")) { return true; } return false; } /** * Parse query provided in pull file Set input column projection - column * projection in the input query Set columnAlias map - column and its alias * mentioned in input query * * @param query input query */ private void parseInputQuery(String query) { List<String> projectedColumns = new ArrayList<>(); if (StringUtils.isNotBlank(query)) { String queryLowerCase = query.toLowerCase(); int startIndex = queryLowerCase.indexOf("select ") + 7; int endIndex = queryLowerCase.indexOf(" from "); if (startIndex >= 0 && endIndex >= 0) { String columnProjection = query.substring(startIndex, endIndex); this.setInputColumnProjection(columnProjection); // parse the select list StringBuffer sb = new StringBuffer(); int bracketCount = 0; for (int i = 0; i < columnProjection.length(); i++) { char c = columnProjection.charAt(i); if (c == '(') { bracketCount++; } if (c == ')') { bracketCount--; } if (bracketCount != 0) { sb.append(c); } else { if (c != ',') { sb.append(c); } else { projectedColumns.add(sb.toString()); sb = new StringBuffer(); } } } projectedColumns.add(sb.toString()); } } if (this.isSelectAllColumns()) { List<String> columnList = this.getMetadataColumnList(); for (String columnName : columnList) { ColumnAttributes col = new ColumnAttributes(); col.setColumnName(columnName); col.setAliasName(columnName); col.setSourceColumnName(columnName); this.addToColumnAliasMap(col); } } else { for (String projectedColumn : projectedColumns) { String column = projectedColumn.trim(); String alias = null; String sourceColumn = column; int spaceOccurences = StringUtils.countMatches(column.trim(), " "); if (spaceOccurences > 0) { // separate column and alias if they are separated by "as" // or space int lastSpaceIndex = column.toLowerCase().lastIndexOf(" as "); sourceColumn = column.substring(0, lastSpaceIndex); alias = column.substring(lastSpaceIndex + 4); } // extract column name if projection has table name in it String columnName = sourceColumn; if (sourceColumn.contains(".")) { columnName = sourceColumn.substring(sourceColumn.indexOf(".") + 1); } ColumnAttributes col = new ColumnAttributes(); col.setColumnName(columnName); col.setAliasName(alias); col.setSourceColumnName(sourceColumn); this.addToColumnAliasMap(col); } } } /** * Execute query using JDBC simple Statement Set fetch size * * @param cmds commands - query, fetch size * @return JDBC ResultSet * @throws Exception */ private CommandOutput<?, ?> executeSql(List<Command> cmds) { String query = null; int fetchSize = 0; for (Command cmd : cmds) { if (cmd instanceof JdbcCommand) { JdbcCommandType type = (JdbcCommandType) cmd.getCommandType(); switch (type) { case QUERY: query = cmd.getParams().get(0); break; case FETCHSIZE: fetchSize = Integer.parseInt(cmd.getParams().get(0)); break; default: this.log.error("Command " + type.toString() + " not recognized"); break; } } } this.log.info("Executing query:" + query); ResultSet resultSet = null; try { this.jdbcSource = createJdbcSource(); if (this.dataConnection == null) { this.dataConnection = this.jdbcSource.getConnection(); } Statement statement = this.dataConnection.createStatement(); if (fetchSize != 0 && this.getExpectedRecordCount() > 2000) { statement.setFetchSize(fetchSize); } final boolean status = statement.execute(query); if (status == false) { this.log.error("Failed to execute sql:" + query); } resultSet = statement.getResultSet(); } catch (Exception e) { this.log.error("Failed to execute sql:" + query + " ;error-" + e.getMessage(), e); } CommandOutput<JdbcCommand, ResultSet> output = new JdbcCommandOutput(); output.put((JdbcCommand) cmds.get(0), resultSet); return output; } /** * Execute query using JDBC PreparedStatement to pass query parameters Set * fetch size * * @param cmds commands - query, fetch size, query parameters * @return JDBC ResultSet * @throws Exception */ private CommandOutput<?, ?> executePreparedSql(List<Command> cmds) { String query = null; List<String> queryParameters = null; int fetchSize = 0; for (Command cmd : cmds) { if (cmd instanceof JdbcCommand) { JdbcCommandType type = (JdbcCommandType) cmd.getCommandType(); switch (type) { case QUERY: query = cmd.getParams().get(0); break; case QUERYPARAMS: queryParameters = cmd.getParams(); break; case FETCHSIZE: fetchSize = Integer.parseInt(cmd.getParams().get(0)); break; default: this.log.error("Command " + type.toString() + " not recognized"); break; } } } this.log.info("Executing query:" + query); ResultSet resultSet = null; try { this.jdbcSource = createJdbcSource(); if (this.dataConnection == null) { this.dataConnection = this.jdbcSource.getConnection(); } PreparedStatement statement = this.dataConnection.prepareStatement(query, ResultSet.TYPE_FORWARD_ONLY, ResultSet.CONCUR_READ_ONLY); int parameterPosition = 1; if (queryParameters != null && queryParameters.size() > 0) { for (String parameter : queryParameters) { statement.setString(parameterPosition, parameter); parameterPosition++; } } if (fetchSize != 0) { statement.setFetchSize(fetchSize); } final boolean status = statement.execute(); if (status == false) { this.log.error("Failed to execute sql:" + query); } resultSet = statement.getResultSet(); } catch (Exception e) { this.log.error("Failed to execute sql:" + query + " ;error-" + e.getMessage(), e); } CommandOutput<JdbcCommand, ResultSet> output = new JdbcCommandOutput(); output.put((JdbcCommand) cmds.get(0), resultSet); return output; } /** * Create JDBC source to get connection * * @return JDBCSource */ protected JdbcProvider createJdbcSource() { String driver = this.workUnitState.getProp(ConfigurationKeys.SOURCE_CONN_DRIVER); String userName = this.workUnitState.getProp(ConfigurationKeys.SOURCE_CONN_USERNAME); String password = PasswordManager.getInstance(this.workUnitState) .readPassword(this.workUnitState.getProp(ConfigurationKeys.SOURCE_CONN_PASSWORD)); String connectionUrl = this.getConnectionUrl(); String proxyHost = this.workUnitState.getProp(ConfigurationKeys.SOURCE_CONN_USE_PROXY_URL); int proxyPort = this.workUnitState.getProp(ConfigurationKeys.SOURCE_CONN_USE_PROXY_PORT) != null ? this.workUnitState.getPropAsInt(ConfigurationKeys.SOURCE_CONN_USE_PROXY_PORT) : -1; if (this.jdbcSource == null || this.jdbcSource.isClosed()) { this.jdbcSource = new JdbcProvider(driver, connectionUrl, userName, password, 1, this.getTimeOut(), "DEFAULT", proxyHost, proxyPort); return this.jdbcSource; } else { return this.jdbcSource; } } @Override public long getMaxWatermark(String schema, String entity, String watermarkColumn, List<Predicate> predicateList, String watermarkSourceFormat) throws HighWatermarkException { this.log.info("Get high watermark using JDBC"); long calculatedHighWatermark = ConfigurationKeys.DEFAULT_WATERMARK_VALUE; try { List<Command> cmds = this.getHighWatermarkMetadata(schema, entity, watermarkColumn, predicateList); CommandOutput<?, ?> response = this.executeSql(cmds); calculatedHighWatermark = this.getHighWatermark(response, watermarkColumn, watermarkSourceFormat); return calculatedHighWatermark; } catch (Exception e) { throw new HighWatermarkException("Failed to get high watermark using JDBC; error - " + e.getMessage(), e); } } @Override public long getSourceCount(String schema, String entity, WorkUnit workUnit, List<Predicate> predicateList) throws RecordCountException { this.log.info("Get source record count using JDBC"); long count = 0; try { List<Command> cmds = this.getCountMetadata(schema, entity, workUnit, predicateList); CommandOutput<?, ?> response = this.executeSql(cmds); count = this.getCount(response); this.log.info("Source record count:" + count); return count; } catch (Exception e) { throw new RecordCountException("Failed to get source record count using JDBC; error - " + e.getMessage(), e); } } @Override public Iterator<JsonElement> getRecordSet(String schema, String entity, WorkUnit workUnit, List<Predicate> predicateList) throws DataRecordException, IOException { Iterator<JsonElement> rs = null; List<Command> cmds; try { if (isFirstPull()) { this.log.info("Get data recordset using JDBC"); cmds = this.getDataMetadata(schema, entity, workUnit, predicateList); this.dataResponse = this.executePreparedSql(cmds); this.setFirstPull(false); } rs = this.getData(this.dataResponse); return rs; } catch (Exception e) { throw new DataRecordException("Failed to get record set using JDBC; error - " + e.getMessage(), e); } } @Override public JsonArray getSchema(CommandOutput<?, ?> response) throws SchemaException, IOException { this.log.debug("Extract schema from resultset"); ResultSet resultset = null; Iterator<ResultSet> itr = (Iterator<ResultSet>) response.getResults().values().iterator(); if (itr.hasNext()) { resultset = itr.next(); } else { throw new SchemaException("Failed to get schema from database - Resultset has no records"); } JsonArray fieldJsonArray = new JsonArray(); try { while (resultset.next()) { Schema schema = new Schema(); String columnName = resultset.getString(1); schema.setColumnName(columnName); String dataType = resultset.getString(2); String elementDataType = "string"; List<String> mapSymbols = null; JsonObject newDataType = this.convertDataType(columnName, dataType, elementDataType, mapSymbols); schema.setDataType(newDataType); schema.setLength(resultset.getLong(3)); schema.setPrecision(resultset.getInt(4)); schema.setScale(resultset.getInt(5)); schema.setNullable(resultset.getBoolean(6)); schema.setFormat(resultset.getString(7)); schema.setComment(resultset.getString(8)); schema.setDefaultValue(null); schema.setUnique(false); String jsonStr = gson.toJson(schema); JsonObject obj = gson.fromJson(jsonStr, JsonObject.class).getAsJsonObject(); fieldJsonArray.add(obj); } } catch (Exception e) { throw new SchemaException("Failed to get schema from database; error - " + e.getMessage(), e); } return fieldJsonArray; } @Override public long getHighWatermark(CommandOutput<?, ?> response, String watermarkColumn, String watermarkColumnFormat) throws HighWatermarkException { this.log.debug("Extract high watermark from resultset"); ResultSet resultset = null; Iterator<ResultSet> itr = (Iterator<ResultSet>) response.getResults().values().iterator(); if (itr.hasNext()) { resultset = itr.next(); } else { throw new HighWatermarkException("Failed to get high watermark from database - Resultset has no records"); } Long HighWatermark; try { String watermark; if (resultset.next()) { watermark = resultset.getString(1); } else { watermark = null; } if (watermark == null) { return ConfigurationKeys.DEFAULT_WATERMARK_VALUE; } if (watermarkColumnFormat != null) { SimpleDateFormat inFormat = new SimpleDateFormat(watermarkColumnFormat); Date date = null; try { date = inFormat.parse(watermark); } catch (ParseException e) { this.log.error("ParseException: " + e.getMessage(), e); } SimpleDateFormat outFormat = new SimpleDateFormat("yyyyMMddHHmmss"); HighWatermark = Long.parseLong(outFormat.format(date)); } else { HighWatermark = Long.parseLong(watermark); } } catch (Exception e) { throw new HighWatermarkException("Failed to get high watermark from database; error - " + e.getMessage(), e); } return HighWatermark; } @Override public long getCount(CommandOutput<?, ?> response) throws RecordCountException { this.log.debug("Extract source record count from resultset"); ResultSet resultset = null; long count = 0; Iterator<ResultSet> itr = (Iterator<ResultSet>) response.getResults().values().iterator(); if (itr.hasNext()) { resultset = itr.next(); try { if (resultset.next()) { count = resultset.getLong(1); } } catch (Exception e) { throw new RecordCountException("Failed to get source record count from database; error - " + e.getMessage(), e); } } else { throw new RuntimeException("Failed to get source record count from database - Resultset has no records"); } return count; } @Override public Iterator<JsonElement> getData(CommandOutput<?, ?> response) throws DataRecordException, IOException { this.log.debug("Extract data records from resultset"); RecordSetList<JsonElement> recordSet = this.getNewRecordSetList(); if (response == null || !this.hasNextRecord()) { return recordSet.iterator(); } ResultSet resultset = null; Iterator<ResultSet> itr = (Iterator<ResultSet>) response.getResults().values().iterator(); if (itr.hasNext()) { resultset = itr.next(); } else { throw new DataRecordException("Failed to get source record count from database - Resultset has no records"); } try { final ResultSetMetaData resultsetMetadata = resultset.getMetaData(); int batchSize = this.workUnitState.getPropAsInt(ConfigurationKeys.SOURCE_QUERYBASED_FETCH_SIZE, 0); batchSize = (batchSize == 0 ? ConfigurationKeys.DEFAULT_SOURCE_FETCH_SIZE : batchSize); String sourceConnProps = this.workUnitState.getProp(ConfigurationKeys.SOURCE_CONN_PROPERTIES); boolean convertZeroDateTime = sourceConnProps != null && sourceConnProps.contains("zeroDateTimeBehavior"); int recordCount = 0; while (resultset.next()) { final int numColumns = resultsetMetadata.getColumnCount(); JsonObject jsonObject = new JsonObject(); for (int i = 1; i < numColumns + 1; i++) { final String columnName = this.getHeaderRecord().get(i - 1); jsonObject.addProperty(columnName, parseColumnAsString(resultset, resultsetMetadata, i, convertZeroDateTime)); } recordSet.add(jsonObject); recordCount++; this.totalRecordCount++; // Insert records in record set until it reaches the batch size if (recordCount >= batchSize) { this.log.info("Total number of records processed so far: " + this.totalRecordCount); return recordSet.iterator(); } } this.setNextRecord(false); this.log.info("Total number of records processed so far: " + this.totalRecordCount); return recordSet.iterator(); } catch (Exception e) { throw new DataRecordException("Failed to get records from database; error - " + e.getMessage(), e); } } /* * For Blob data, need to get the bytes and use base64 encoding to encode the byte[] * When reading from the String, need to use base64 decoder * String tmp = ... ( get the String value ) * byte[] foo = Base64.decodeBase64(tmp); */ private String readBlobAsString(Blob logBlob) throws SQLException { if (logBlob == null) { return StringUtils.EMPTY; } byte[] ba = logBlob.getBytes(1L, (int) (logBlob.length())); if (ba == null) { return StringUtils.EMPTY; } String baString = Base64.encodeBase64String(ba); return baString; } /* * For Clob data, we need to use the substring function to extract the string */ private String readClobAsString(Clob logClob) throws SQLException { if (logClob == null) { return StringUtils.EMPTY; } long length = logClob.length(); return logClob.getSubString(1, (int) length); } /** * HACK: there is a bug in the MysqlExtractor where tinyint columns are always treated as ints. * There are MySQL jdbc driver setting (tinyInt1isBit=true and transformedBitIsBoolean=false) that * can cause tinyint(1) columns to be treated as BIT/BOOLEAN columns. The default behavior is to * treat tinyint(1) as BIT. * * Currently, {@link MysqlExtractor#getDataTypeMap()} uses the information_schema to check types. * That does not do the above conversion. {@link #parseColumnAsString(ResultSet, ResultSetMetaData, int, boolean)} * which does the above type mapping. * * On the other hand, SqlServerExtractor treats BIT columns as Booleans. So we can be in a bind * where sometimes BIT has to be converted to an int (for backwards compatibility in MySQL) and * sometimes to a Boolean (for SqlServer). * * This function adds configurable behavior depending on the Extractor type. **/ protected boolean convertBitToBoolean() { return true; } private String parseColumnAsString(final ResultSet resultset, final ResultSetMetaData resultsetMetadata, int i, boolean convertZeroDateTime) throws SQLException { if (isBlob(resultsetMetadata.getColumnType(i))) { return readBlobAsString(resultset.getBlob(i)); } if (isClob(resultsetMetadata.getColumnType(i))) { return readClobAsString(resultset.getClob(i)); } if ((resultsetMetadata.getColumnType(i) == Types.BIT || resultsetMetadata.getColumnType(i) == Types.BOOLEAN) && convertBitToBoolean()) { return Boolean.toString(resultset.getBoolean(i)); } // Workaround for when `zeroDateTimeBehavior` is set // returns null or a rounded timestamp instead of "0000-00-00 00:00:00" for zero timestamps if (convertZeroDateTime && isTimestamp(resultsetMetadata.getColumnType(i))) { Timestamp ts = resultset.getTimestamp(i); if (ts == null) { return null; } else { return new SimpleDateFormat("yyyy-MM-dd HH:mm:ss").format(ts); } } return resultset.getString(i); } private static boolean isBlob(int columnType) { return columnType == Types.LONGVARBINARY || columnType == Types.BINARY; } private static boolean isClob(int columnType) { return columnType == Types.CLOB; } private static boolean isTimestamp(int columnType) { return columnType == Types.TIMESTAMP || columnType == Types.TIMESTAMP_WITH_TIMEZONE; } protected static Command getCommand(String query, JdbcCommandType commandType) { return new JdbcCommand().build(Arrays.asList(query), commandType); } protected static Command getCommand(int fetchSize, JdbcCommandType commandType) { return new JdbcCommand().build(Arrays.asList(Integer.toString(fetchSize)), commandType); } protected static Command getCommand(List<String> params, JdbcCommandType commandType) { return new JdbcCommand().build(params, commandType); } /** * Concatenate all predicates with "and" clause * * @param predicateList list of predicate(filter) conditions * @return predicate */ protected String concatPredicates(List<Predicate> predicateList) { List<String> conditions = new ArrayList<>(); for (Predicate predicate : predicateList) { conditions.add(predicate.getCondition()); } return Joiner.on(" and ").skipNulls().join(conditions); } /** * Schema of default watermark column-required if there are multiple watermarks * * @return column schema */ private JsonObject getDefaultWatermark() { Schema schema = new Schema(); String dataType; String columnName = "derivedwatermarkcolumn"; schema.setColumnName(columnName); WatermarkType wmType = WatermarkType.valueOf( this.workUnitState.getProp(ConfigurationKeys.SOURCE_QUERYBASED_WATERMARK_TYPE, "TIMESTAMP").toUpperCase()); switch (wmType) { case TIMESTAMP: dataType = "timestamp"; break; case DATE: dataType = "date"; break; default: dataType = "int"; break; } String elementDataType = "string"; List<String> mapSymbols = null; JsonObject newDataType = this.convertDataType(columnName, dataType, elementDataType, mapSymbols); schema.setDataType(newDataType); schema.setWaterMark(true); schema.setPrimaryKey(0); schema.setLength(0); schema.setPrecision(0); schema.setScale(0); schema.setNullable(false); schema.setFormat(null); schema.setComment("Default watermark column"); schema.setDefaultValue(null); schema.setUnique(false); String jsonStr = gson.toJson(schema); JsonObject obj = gson.fromJson(jsonStr, JsonObject.class).getAsJsonObject(); return obj; } /** * Schema of a custom column - required if column not found in metadata * * @return column schema */ private Schema getCustomColumnSchema(String columnName) { Schema schema = new Schema(); String dataType = "string"; schema.setColumnName(columnName); String elementDataType = "string"; List<String> mapSymbols = null; JsonObject newDataType = this.convertDataType(columnName, dataType, elementDataType, mapSymbols); schema.setDataType(newDataType); schema.setWaterMark(false); schema.setPrimaryKey(0); schema.setLength(0); schema.setPrecision(0); schema.setScale(0); schema.setNullable(true); schema.setFormat(null); schema.setComment("Custom column"); schema.setDefaultValue(null); schema.setUnique(false); return schema; } /** * Check if the SELECT query has join operation */ public static boolean hasJoinOperation(String selectQuery) { if (selectQuery == null || selectQuery.length() == 0) { return false; } SqlParser sqlParser = SqlParser.create(selectQuery); try { SqlNode all = sqlParser.parseQuery(); SqlSelect query; if (all instanceof SqlSelect) { query = (SqlSelect) all; } else if (all instanceof SqlOrderBy) { query = (SqlSelect) ((SqlOrderBy) all).query; } else { throw new UnsupportedOperationException("The select query is type of " + all.getClass() + " which is not supported here"); } return query.getFrom().getKind() == SqlKind.JOIN; } catch (SqlParseException e) { return false; } } /** * New record set for iterator * * @return RecordSetList */ private static RecordSetList<JsonElement> getNewRecordSetList() { return new RecordSetList<>(); } /** * Change the column name case to upper, lower or nochange; Default nochange * * @return column name with the required case */ private String toCase(String targetColumnName) { String columnName = targetColumnName; ColumnNameCase caseType = ColumnNameCase.valueOf(this.workUnitState .getProp(ConfigurationKeys.SOURCE_COLUMN_NAME_CASE, ConfigurationKeys.DEFAULT_COLUMN_NAME_CASE).toUpperCase()); switch (caseType) { case TOUPPER: columnName = targetColumnName.toUpperCase(); break; case TOLOWER: columnName = targetColumnName.toLowerCase(); break; default: columnName = targetColumnName; break; } return columnName; } /** * Default DelimitedIdentifier is 'double quotes', * but that would make the column name case sensitive in some of the systems, e.g. Oracle. * Queries may fail if * (1) enableDelimitedIdentifier is true, and * (2) Queried system is case sensitive when using double quotes as delimited identifier, and * (3) Intended column name does not match the column name in the schema including case. * * @return leftDelimitedIdentifier */ public String getLeftDelimitedIdentifier() { return this.enableDelimitedIdentifier ? "\"" : ""; } public String getRightDelimitedIdentifier() { return this.enableDelimitedIdentifier ? "\"" : ""; } @Override public void closeConnection() throws Exception { if (this.dataConnection != null) { try { this.dataConnection.close(); } catch (SQLException e) { this.log.error("Failed to close connection ;error-" + e.getMessage(), e); } } this.jdbcSource.close(); } }
3,482
0
Create_ds/gobblin/gobblin-modules/gobblin-sql/src/main/java/org/apache/gobblin/source
Create_ds/gobblin/gobblin-modules/gobblin-sql/src/main/java/org/apache/gobblin/source/jdbc/TeradataExtractor.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.gobblin.source.jdbc; import java.io.IOException; import java.sql.ResultSet; import java.sql.ResultSetMetaData; import java.util.ArrayList; import java.util.Iterator; import java.util.List; import java.util.Map; import lombok.extern.slf4j.Slf4j; import static com.google.common.base.Strings.isNullOrEmpty; import static com.google.common.base.Preconditions.checkArgument; import com.google.common.collect.ImmutableMap; import com.google.gson.Gson; import com.google.gson.JsonArray; import com.google.gson.JsonElement; import com.google.gson.JsonObject; import org.apache.gobblin.configuration.ConfigurationKeys; import org.apache.gobblin.configuration.WorkUnitState; import org.apache.gobblin.source.extractor.DataRecordException; import org.apache.gobblin.source.extractor.exception.HighWatermarkException; import org.apache.gobblin.source.extractor.exception.RecordCountException; import org.apache.gobblin.source.extractor.exception.SchemaException; import org.apache.gobblin.source.extractor.extract.Command; import org.apache.gobblin.source.extractor.extract.CommandOutput; import org.apache.gobblin.source.extractor.schema.Schema; import org.apache.gobblin.source.extractor.utils.Utils; import org.apache.gobblin.source.extractor.watermark.Predicate; import org.apache.gobblin.source.extractor.watermark.WatermarkType; import org.apache.gobblin.source.workunit.WorkUnit; /** * Teradata extractor using JDBC protocol * * @author ypopov */ @Slf4j public class TeradataExtractor extends JdbcExtractor { private static final String TERADATA_TIMESTAMP_FORMAT = "yyyy-MM-dd HH:mm:ss"; private static final String TERADATA_DATE_FORMAT = "yyyy-MM-dd"; private static final String TERADATA_HOUR_FORMAT = "HH"; private static final long SAMPLE_RECORD_COUNT = -1; private static final String ELEMENT_DATA_TYPE = "string"; private static final String TERADATA_SAMPLE_CLAUSE = " sample "; private static final Gson gson = new Gson(); public TeradataExtractor(WorkUnitState workUnitState) { super(workUnitState); } @Override public List<Command> getSchemaMetadata(String schema, String entity) throws SchemaException { log.debug("Build query to get schema"); List<Command> commands = new ArrayList<Command>(); String inputQuery = this.workUnit.getProp(ConfigurationKeys.SOURCE_QUERYBASED_QUERY); String metadataSql, predicate = "1=0"; if(isNullOrEmpty(inputQuery)) { metadataSql = "select * from " + schema + "." + entity; } else { metadataSql = this.removeSampleClauseFromQuery(inputQuery); } metadataSql = SqlQueryUtils.addPredicate(metadataSql, predicate); commands.add(JdbcExtractor.getCommand(metadataSql, JdbcCommand.JdbcCommandType.QUERY)); return commands; } @Override public List<Command> getHighWatermarkMetadata(String schema, String entity, String watermarkColumn, List<Predicate> predicateList) throws HighWatermarkException { log.debug("Build query to get high watermark"); List<Command> commands = new ArrayList<Command>(); String columnProjection = "max(" + Utils.getCoalesceColumnNames(watermarkColumn) + ")"; String watermarkFilter = this.concatPredicates(predicateList); String query = this.getExtractSql(); if (isNullOrEmpty(watermarkFilter)) { watermarkFilter = "1=1"; } query = query.replace(this.getOutputColumnProjection(), columnProjection) .replace(ConfigurationKeys.DEFAULT_SOURCE_QUERYBASED_WATERMARK_PREDICATE_SYMBOL, watermarkFilter); commands.add(JdbcExtractor.getCommand(query, JdbcCommand.JdbcCommandType.QUERY)); return commands; } @Override public List<Command> getCountMetadata(String schema, String entity, WorkUnit workUnit, List<Predicate> predicateList) throws RecordCountException { log.debug("Build query to get source record count"); List<Command> commands = new ArrayList<Command>(); String columnProjection = "CAST(COUNT(1) AS BIGINT)"; String watermarkFilter = this.concatPredicates(predicateList); String query = this.getExtractSql(); if (isNullOrEmpty(watermarkFilter)) { watermarkFilter = "1=1"; } query = query.replace(this.getOutputColumnProjection(), columnProjection) .replace(ConfigurationKeys.DEFAULT_SOURCE_QUERYBASED_WATERMARK_PREDICATE_SYMBOL, watermarkFilter); String sampleFilter = this.constructSampleClause(); query = query + sampleFilter; if (!isNullOrEmpty(sampleFilter)) { query = "SELECT " + columnProjection + " FROM (" + query.replace(columnProjection, "1 as t") + ") temp"; } commands.add(JdbcExtractor.getCommand(query, JdbcCommand.JdbcCommandType.QUERY)); return commands; } @Override public List<Command> getDataMetadata(String schema, String entity, WorkUnit workUnit, List<Predicate> predicateList) throws DataRecordException { log.debug("Build query to extract data"); List<Command> commands = new ArrayList<Command>(); int fetchSize = this.workUnitState.getPropAsInt(ConfigurationKeys.SOURCE_QUERYBASED_JDBC_RESULTSET_FETCH_SIZE, ConfigurationKeys.DEFAULT_SOURCE_QUERYBASED_JDBC_RESULTSET_FETCH_SIZE); String watermarkFilter = this.concatPredicates(predicateList); String query = this.getExtractSql(); if (isNullOrEmpty(watermarkFilter)) { watermarkFilter = "1=1"; } query = query.replace(ConfigurationKeys.DEFAULT_SOURCE_QUERYBASED_WATERMARK_PREDICATE_SYMBOL, watermarkFilter); String sampleFilter = this.constructSampleClause(); query = query + sampleFilter; commands.add(JdbcExtractor.getCommand(query, JdbcCommand.JdbcCommandType.QUERY)); commands.add(JdbcExtractor.getCommand(fetchSize, JdbcCommand.JdbcCommandType.FETCHSIZE)); return commands; } @Override public Map<String, String> getDataTypeMap() { Map<String, String> dataTypeMap = ImmutableMap.<String, String>builder() .put("byteint", "int") .put("smallint", "int") .put("integer", "int") .put("bigint", "long") .put("float", "float") .put("decimal", "double") .put("char", "string") .put("varchar", "string") .put("byte", "bytes") .put("varbyte", "bytes") .put("date", "date") .put("time", "time") .put("timestamp", "timestamp") .put("clob", "string") .put("blob", "string") .put("structured udt", "array") .put("double precision", "float") .put("numeric", "double") .put("real", "float") .put("character", "string") .put("char varying", "string") .put("character varying", "string") .put("long varchar", "string") .put("interval", "string") .build(); return dataTypeMap; } @Override public Iterator<JsonElement> getRecordSetFromSourceApi(String schema, String entity, WorkUnit workUnit, List<Predicate> predicateList) throws IOException { return null; } @Override public String getConnectionUrl() { String urlPrefix = "jdbc:teradata://"; String host = this.workUnit.getProp(ConfigurationKeys.SOURCE_CONN_HOST_NAME); checkArgument(!isNullOrEmpty(host), "Connectionn host cannot be null or empty at %s", ConfigurationKeys.SOURCE_CONN_HOST_NAME); String port = this.workUnit.getProp(ConfigurationKeys.SOURCE_CONN_PORT,"1025"); String database = this.workUnit.getProp(ConfigurationKeys.SOURCE_QUERYBASED_SCHEMA); String defaultUrl = urlPrefix + host.trim() + "/TYPE=FASTEXPORT,DATABASE=" + database.trim() + ",DBS_PORT=" + port.trim() ; // use custom url from source.conn.host if Teradata jdbc url available return host.contains(urlPrefix) ? host.trim() : defaultUrl; } @Override public long extractSampleRecordCountFromQuery(String query) { if (isNullOrEmpty(query)) { return SAMPLE_RECORD_COUNT; } long recordcount = SAMPLE_RECORD_COUNT; String limit = null; String inputQuery = query.toLowerCase(); int limitIndex = inputQuery.indexOf(TERADATA_SAMPLE_CLAUSE); if (limitIndex > 0) { limit = query.substring(limitIndex + TERADATA_SAMPLE_CLAUSE.length()).trim(); } if (!isNullOrEmpty(limit)) { try { recordcount = Long.parseLong(limit); } catch (Exception e) { log.error("Ignoring incorrect limit value in input query: {}", limit); } } return recordcount; } @Override public String removeSampleClauseFromQuery(String query) { if (isNullOrEmpty(query)) { return null; } String limitString = ""; String inputQuery = query.toLowerCase(); int limitIndex = inputQuery.indexOf(TERADATA_SAMPLE_CLAUSE); if (limitIndex > 0) { limitString = query.substring(limitIndex); } return query.replace(limitString, ""); } @Override public String constructSampleClause() { long sampleRowCount = this.getSampleRecordCount(); if (sampleRowCount >= 0) { return TERADATA_SAMPLE_CLAUSE + sampleRowCount; } return ""; } @Override public String getWatermarkSourceFormat(WatermarkType watermarkType) { String columnFormat = null; switch (watermarkType) { case TIMESTAMP: columnFormat = TERADATA_TIMESTAMP_FORMAT; break; case DATE: columnFormat = TERADATA_DATE_FORMAT; break; case HOUR: columnFormat = TERADATA_HOUR_FORMAT; break; case SIMPLE: break; default: log.error("Watermark type {} not recognized", watermarkType.toString()); } return columnFormat; } @Override public String getHourPredicateCondition(String column, long value, String valueFormat, String operator) { log.debug("Getting hour predicate for Teradata"); String formattedvalue = Utils.toDateTimeFormat(Long.toString(value), valueFormat, TERADATA_HOUR_FORMAT); return Utils.getCoalesceColumnNames(column) + " " + operator + " '" + formattedvalue + "'"; } @Override public String getDatePredicateCondition(String column, long value, String valueFormat, String operator) { log.debug("Getting date predicate for Teradata"); String formattedvalue = Utils.toDateTimeFormat(Long.toString(value), valueFormat, TERADATA_DATE_FORMAT); return Utils.getCoalesceColumnNames(column) + " " + operator + " '" + formattedvalue + "'"; } @Override public String getTimestampPredicateCondition(String column, long value, String valueFormat, String operator) { log.debug("Getting timestamp predicate for Teradata"); String formattedvalue = Utils.toDateTimeFormat(Long.toString(value), valueFormat, TERADATA_TIMESTAMP_FORMAT); return Utils.getCoalesceColumnNames(column) + " " + operator + " '" + formattedvalue + "'"; } @Override public JsonArray getSchema(CommandOutput<?, ?> response) throws SchemaException, IOException { log.debug("Extract schema from resultset"); ResultSet resultset = null; Iterator<ResultSet> itr = (Iterator<ResultSet>) response.getResults().values().iterator(); if (itr.hasNext()) { resultset = itr.next(); } else { throw new SchemaException("Failed to get schema from Teradata - empty schema resultset"); } JsonArray fieldJsonArray = new JsonArray(); try { Schema schema = new Schema(); ResultSetMetaData rsmd = resultset.getMetaData(); String columnName, columnTypeName; for (int i = 1; i <= rsmd.getColumnCount(); i++) { columnName = rsmd.getColumnName(i); columnTypeName = rsmd.getColumnTypeName(i); schema.setColumnName(columnName); List<String> mapSymbols = null; JsonObject newDataType = this.convertDataType(columnName, columnTypeName, ELEMENT_DATA_TYPE, mapSymbols); schema.setDataType(newDataType); schema.setLength(rsmd.getColumnDisplaySize(i)); schema.setPrecision(rsmd.getPrecision(i)); schema.setScale(rsmd.getScale(i)); schema.setNullable(rsmd.isNullable(i) == ResultSetMetaData.columnNullable); schema.setComment(rsmd.getColumnLabel(i)); String jsonStr = gson.toJson(schema); JsonObject obj = gson.fromJson(jsonStr, JsonObject.class).getAsJsonObject(); fieldJsonArray.add(obj); } } catch (Exception e) { throw new SchemaException("Failed to get schema from Teradaa; error - " + e.getMessage(), e); } return fieldJsonArray; } }
3,483
0
Create_ds/gobblin/gobblin-modules/gobblin-sql/src/main/java/org/apache/gobblin/source
Create_ds/gobblin/gobblin-modules/gobblin-sql/src/main/java/org/apache/gobblin/source/jdbc/JdbcSpecificLayer.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.gobblin.source.jdbc; /** * Interface for JDBC sources * * @author nveeramr */ public interface JdbcSpecificLayer { /** * Url for JDBC connection * * @return url */ public String getConnectionUrl(); /** * Sample record count specified in input query * * @param query * @return record count */ public long extractSampleRecordCountFromQuery(String query); /** * Remove sample clause in input query * * @param query * @return query */ public String removeSampleClauseFromQuery(String query); /** * Remove sample clause * * @return sample clause */ public String constructSampleClause(); }
3,484
0
Create_ds/gobblin/gobblin-modules/gobblin-sql/src/main/java/org/apache/gobblin/source
Create_ds/gobblin/gobblin-modules/gobblin-sql/src/main/java/org/apache/gobblin/source/jdbc/MysqlExtractor.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.gobblin.source.jdbc; import java.io.IOException; import java.util.ArrayList; import java.util.Arrays; import java.util.Iterator; import java.util.List; import java.util.Map; import org.apache.commons.lang3.StringUtils; import com.google.common.collect.ImmutableMap; import com.google.gson.JsonElement; import org.apache.gobblin.configuration.ConfigurationKeys; import org.apache.gobblin.configuration.WorkUnitState; import org.apache.gobblin.source.extractor.DataRecordException; import org.apache.gobblin.source.extractor.exception.HighWatermarkException; import org.apache.gobblin.source.extractor.exception.RecordCountException; import org.apache.gobblin.source.extractor.exception.SchemaException; import org.apache.gobblin.source.extractor.extract.Command; import org.apache.gobblin.source.extractor.utils.Utils; import org.apache.gobblin.source.extractor.watermark.Predicate; import org.apache.gobblin.source.extractor.watermark.WatermarkType; import org.apache.gobblin.source.workunit.WorkUnit; import lombok.extern.slf4j.Slf4j; /** * MySql extractor using JDBC protocol * * @author nveeramr */ @Slf4j public class MysqlExtractor extends JdbcExtractor { private static final String MYSQL_TIMESTAMP_FORMAT = "yyyy-MM-dd HH:mm:ss"; private static final String MYSQL_DATE_FORMAT = "yyyy-MM-dd"; private static final String MYSQL_HOUR_FORMAT = "HH"; private static final long SAMPLERECORDCOUNT = -1; public MysqlExtractor(WorkUnitState workUnitState) { super(workUnitState); } @Override public String getHourPredicateCondition(String column, long value, String valueFormat, String operator) { log.debug("Getting hour predicate for Mysql"); String formattedvalue = Utils.toDateTimeFormat(Long.toString(value), valueFormat, MYSQL_HOUR_FORMAT); return Utils.getCoalesceColumnNames(column) + " " + operator + " '" + formattedvalue + "'"; } @Override public String getDatePredicateCondition(String column, long value, String valueFormat, String operator) { log.debug("Getting date predicate for Mysql"); String formattedvalue = Utils.toDateTimeFormat(Long.toString(value), valueFormat, MYSQL_DATE_FORMAT); return Utils.getCoalesceColumnNames(column) + " " + operator + " '" + formattedvalue + "'"; } @Override public String getTimestampPredicateCondition(String column, long value, String valueFormat, String operator) { log.debug("Getting timestamp predicate for Mysql"); String formattedvalue = Utils.toDateTimeFormat(Long.toString(value), valueFormat, MYSQL_TIMESTAMP_FORMAT); return Utils.getCoalesceColumnNames(column) + " " + operator + " '" + formattedvalue + "'"; } @Override public List<Command> getSchemaMetadata(String schema, String entity) throws SchemaException { log.debug("Build query to get schema"); List<Command> commands = new ArrayList<>(); boolean promoteUnsignedInt = this.workUnitState.getPropAsBoolean( ConfigurationKeys.SOURCE_QUERYBASED_PROMOTE_UNSIGNED_INT_TO_BIGINT, ConfigurationKeys.DEFAULT_SOURCE_QUERYBASED_PROMOTE_UNSIGNED_INT_TO_BIGINT); String promoteUnsignedIntQueryParam = promoteUnsignedInt ? "% unsigned" : "dummy"; List<String> queryParams = Arrays.asList(promoteUnsignedIntQueryParam, entity, schema); String metadataSql = "select " + " col.column_name, " + " case when col.column_type like (?) and col.data_type = 'int' then 'bigint' else col.data_type end" + " as data_type," + " case when CHARACTER_OCTET_LENGTH is null then 0 else 0 end as length, " + " case when NUMERIC_PRECISION is null then 0 else NUMERIC_PRECISION end as precesion, " + " case when NUMERIC_SCALE is null then 0 else NUMERIC_SCALE end as scale, " + " case when is_nullable='NO' then 'false' else 'true' end as nullable, " + " '' as format, " + " case when col.column_comment is null then '' else col.column_comment end as comment " + " from information_schema.COLUMNS col " + " WHERE upper(col.table_name)=upper(?) AND upper(col.table_schema)=upper(?) " + " order by col.ORDINAL_POSITION "; commands.add(getCommand(metadataSql, JdbcCommand.JdbcCommandType.QUERY)); commands.add(getCommand(queryParams, JdbcCommand.JdbcCommandType.QUERYPARAMS)); return commands; } @Override public List<Command> getHighWatermarkMetadata(String schema, String entity, String watermarkColumn, List<Predicate> predicateList) throws HighWatermarkException { log.debug("Build query to get high watermark"); List<Command> commands = new ArrayList<>(); String columnProjection = "max(" + Utils.getCoalesceColumnNames(watermarkColumn) + ")"; String watermarkFilter = this.concatPredicates(predicateList); String query = this.getExtractSql(); if (StringUtils.isBlank(watermarkFilter)) { watermarkFilter = "1=1"; } query = query.replace(this.getOutputColumnProjection(), columnProjection) .replace(ConfigurationKeys.DEFAULT_SOURCE_QUERYBASED_WATERMARK_PREDICATE_SYMBOL, watermarkFilter); commands.add(getCommand(query, JdbcCommand.JdbcCommandType.QUERY)); return commands; } @Override public List<Command> getCountMetadata(String schema, String entity, WorkUnit workUnit, List<Predicate> predicateList) throws RecordCountException { log.debug("Build query to get source record count"); List<Command> commands = new ArrayList<>(); String columnProjection = "COUNT(1)"; String watermarkFilter = this.concatPredicates(predicateList); String query = this.getExtractSql(); if (StringUtils.isBlank(watermarkFilter)) { watermarkFilter = "1=1"; } query = query.replace(this.getOutputColumnProjection(), columnProjection) .replace(ConfigurationKeys.DEFAULT_SOURCE_QUERYBASED_WATERMARK_PREDICATE_SYMBOL, watermarkFilter); String sampleFilter = this.constructSampleClause(); query = query + sampleFilter; if (!StringUtils.isEmpty(sampleFilter)) { query = "SELECT COUNT(1) FROM (" + query.replace(" COUNT(1) ", " 1 ") + ")temp"; } commands.add(getCommand(query, JdbcCommand.JdbcCommandType.QUERY)); return commands; } @Override public List<Command> getDataMetadata(String schema, String entity, WorkUnit workUnit, List<Predicate> predicateList) throws DataRecordException { log.debug("Build query to extract data"); List<Command> commands = new ArrayList<>(); int fetchsize = Integer.MIN_VALUE; String watermarkFilter = this.concatPredicates(predicateList); String query = this.getExtractSql(); if (StringUtils.isBlank(watermarkFilter)) { watermarkFilter = "1=1"; } query = query.replace(ConfigurationKeys.DEFAULT_SOURCE_QUERYBASED_WATERMARK_PREDICATE_SYMBOL, watermarkFilter); String sampleFilter = this.constructSampleClause(); query = query + sampleFilter; commands.add(getCommand(query, JdbcCommand.JdbcCommandType.QUERY)); commands.add(getCommand(fetchsize, JdbcCommand.JdbcCommandType.FETCHSIZE)); return commands; } @Override public String getConnectionUrl() { String host = this.workUnitState.getProp(ConfigurationKeys.SOURCE_CONN_HOST_NAME); String port = this.workUnitState.getProp(ConfigurationKeys.SOURCE_CONN_PORT); String database = this.workUnitState.getProp(ConfigurationKeys.SOURCE_QUERYBASED_SCHEMA); String url = "jdbc:mysql://" + host.trim() + ":" + port + "/" + database.trim(); String connProps = this.workUnitState.getProp(ConfigurationKeys.SOURCE_CONN_PROPERTIES, ""); if (Boolean.valueOf(this.workUnitState.getProp(ConfigurationKeys.SOURCE_QUERYBASED_IS_COMPRESSION_ENABLED))) { connProps = connProps + (connProps.isEmpty() ? "" : "&" + "useCompression=true"); } if (!connProps.isEmpty()) { url = url + "?" + connProps; } return url; } /** {@inheritdoc} */ @Override protected boolean convertBitToBoolean() { return false; } @Override public Map<String, String> getDataTypeMap() { Map<String, String> dataTypeMap = ImmutableMap.<String, String> builder().put("tinyint", "int") .put("smallint", "int").put("mediumint", "int").put("int", "int").put("bigint", "long").put("float", "float") .put("double", "double").put("decimal", "double").put("numeric", "double").put("date", "date") .put("timestamp", "timestamp").put("datetime", "timestamp").put("time", "time").put("char", "string") .put("varchar", "string").put("varbinary", "string").put("text", "string").put("tinytext", "string") .put("mediumtext", "string").put("longtext", "string").put("blob", "string").put("tinyblob", "string") .put("mediumblob", "string").put("longblob", "string").put("enum", "string").build(); return dataTypeMap; } @Override public String getWatermarkSourceFormat(WatermarkType watermarkType) { String columnFormat = null; switch (watermarkType) { case TIMESTAMP: columnFormat = "yyyy-MM-dd HH:mm:ss"; break; case DATE: columnFormat = "yyyy-MM-dd"; break; default: log.error("Watermark type " + watermarkType.toString() + " not recognized"); } return columnFormat; } @Override public long extractSampleRecordCountFromQuery(String query) { if (StringUtils.isBlank(query)) { return SAMPLERECORDCOUNT; } long recordcount = SAMPLERECORDCOUNT; String limit = null; String inputQuery = query.toLowerCase(); int limitIndex = inputQuery.indexOf(" limit "); if (limitIndex > 0) { limit = query.substring(limitIndex + 7).trim(); } if (StringUtils.isNotBlank(limit)) { try { recordcount = Long.parseLong(limit); } catch (Exception e) { log.error("Ignoring incorrct limit value in input query:" + limit); } } return recordcount; } @Override public String removeSampleClauseFromQuery(String query) { if (StringUtils.isBlank(query)) { return null; } String limitString = ""; String inputQuery = query.toLowerCase(); int limitIndex = inputQuery.indexOf(" limit"); if (limitIndex > 0) { limitString = query.substring(limitIndex); } return query.replace(limitString, ""); } @Override public String constructSampleClause() { long sampleRowCount = this.getSampleRecordCount(); if (sampleRowCount >= 0) { return " limit " + sampleRowCount; } return ""; } @Override public String getLeftDelimitedIdentifier() { return this.enableDelimitedIdentifier ? "`" : ""; } @Override public String getRightDelimitedIdentifier() { return this.enableDelimitedIdentifier ? "`" : ""; } @Override public Iterator<JsonElement> getRecordSetFromSourceApi(String schema, String entity, WorkUnit workUnit, List<Predicate> predicateList) throws IOException { return null; } }
3,485
0
Create_ds/gobblin/gobblin-modules/gobblin-sql/src/main/java/org/apache/gobblin/source
Create_ds/gobblin/gobblin-modules/gobblin-sql/src/main/java/org/apache/gobblin/source/jdbc/JdbcCommand.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.gobblin.source.jdbc; import org.apache.gobblin.source.extractor.extract.CommandType; import java.util.ArrayList; import java.util.Collection; import java.util.List; import com.google.common.base.Joiner; import org.apache.gobblin.source.extractor.extract.Command; /** * JDBC command with command type and parameters to execute a command * * @author nveeramr */ public class JdbcCommand implements Command { /** * JDBC command types */ public enum JdbcCommandType implements CommandType { QUERY, QUERYPARAMS, FETCHSIZE, DELETE, UPDATE, SELECT } private List<String> params; private JdbcCommandType cmd; public JdbcCommand() { this.params = new ArrayList<>(); } @Override public List<String> getParams() { return this.params; } @Override public CommandType getCommandType() { return this.cmd; } @Override public String toString() { Joiner joiner = Joiner.on(":").skipNulls(); return this.cmd.toString() + ":" + joiner.join(this.params); } @Override public Command build(Collection<String> params, CommandType cmd) { this.params.addAll(params); this.cmd = (JdbcCommandType) cmd; return this; } }
3,486
0
Create_ds/gobblin/gobblin-modules/gobblin-sql/src/main/java/org/apache/gobblin/source
Create_ds/gobblin/gobblin-modules/gobblin-sql/src/main/java/org/apache/gobblin/source/jdbc/OracleExtractor.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.gobblin.source.jdbc; import java.io.IOException; import java.util.ArrayList; import java.util.Arrays; import java.util.Iterator; import java.util.List; import java.util.Map; import java.util.regex.Pattern; import java.util.regex.Matcher; import lombok.extern.slf4j.Slf4j; import org.apache.commons.lang3.StringUtils; import com.google.common.base.Preconditions; import com.google.common.collect.ImmutableMap; import com.google.gson.JsonElement; import org.apache.gobblin.configuration.ConfigurationKeys; import org.apache.gobblin.configuration.WorkUnitState; import org.apache.gobblin.source.extractor.DataRecordException; import org.apache.gobblin.source.extractor.exception.HighWatermarkException; import org.apache.gobblin.source.extractor.exception.RecordCountException; import org.apache.gobblin.source.extractor.exception.SchemaException; import org.apache.gobblin.source.extractor.extract.Command; import org.apache.gobblin.source.extractor.utils.Utils; import org.apache.gobblin.source.extractor.watermark.Predicate; import org.apache.gobblin.source.extractor.watermark.WatermarkType; import org.apache.gobblin.source.workunit.WorkUnit; /** * Oracle extractor using JDBC protocol * * @author bjvanov, jinhyukchang, Lorand Bendig */ @Slf4j public class OracleExtractor extends JdbcExtractor { private static final String TIMESTAMP_FORMAT = "yyyy-MM-dd HH:mm:ss"; private static final String HOUR_FORMAT = "HH"; private static final long SAMPLERECORDCOUNT = -1; private static final Pattern SAMPLE_CLAUSE_PATTERN = Pattern.compile(".*(rownum\\s*<\\s*=\\s*(\\d+)).*", Pattern.CASE_INSENSITIVE); private static final String EMPTY_CONDITION = "1=1"; private static final String METADATA_SCHEMA_PSTMT_FORMAT = "SELECT " + "column_name, " + "LOWER(data_type), " + "NVL(data_length, 0) as length, " + "NVL(data_precision, 0) as precesion, " + "NVL(data_scale, 0) as scale, " + "CASE NVL(NULLABLE, 'Y') WHEN 'Y' THEN 1 ELSE 0 END as nullable, " + "' ' as format, " + "NVL(comments, ' ') as \"COMMENT\", " + "column_id " + "FROM " + "all_tab_columns " + "JOIN all_col_comments USING (owner, table_name, column_name) " + "WHERE UPPER(owner) = (?) " + "AND UPPER(table_name) = (?) " + "ORDER BY " + "column_id, column_name"; private static Map<String, String> dataTypeMap = ImmutableMap.<String, String> builder() .put("char", "string") .put("varchar2", "string") .put("varchar", "string") .put("nchar", "string") .put("nvarchar2", "string") .put("nclob", "string") .put("clob", "string") .put("long", "string") .put("raw", "string") .put("long raw", "string") .put("rowid", "string") .put("urowid", "string") .put("xmltype", "string") .put("smallint", "int") .put("int", "int") .put("integer", "int") .put("bigint", "long") .put("binary_float", "float") .put("binary_double", "double") .put("float", "double") .put("number", "double") .put("numeric", "double") .put("dec", "double") .put("decimal", "double") .put("real", "double") .put("double precision", "double") .put("date", "date") .put("interval year", "date") .put("interval day", "timestamp") .put("datetime", "timestamp") .put("timestamp", "timestamp") .put("timestamp(0)", "timestamp") .put("timestamp(1)", "timestamp") .put("timestamp(2)", "timestamp") .put("timestamp(3)", "timestamp") .put("timestamp(4)", "timestamp") .put("timestamp(5)", "timestamp") .put("timestamp(6)", "timestamp") .put("timestamp(7)", "timestamp") .put("timestamp(8)", "timestamp") .put("timestamp(9)", "timestamp") .put("timestamp with time zone", "timestamp") .put("timezone with local timezone", "timestamp") .build(); public OracleExtractor(WorkUnitState workUnitState) { super(workUnitState); } @Override public List<Command> getSchemaMetadata(String schema, String entity) throws SchemaException { log.debug("Build query to get schema"); Preconditions.checkNotNull(schema); Preconditions.checkNotNull(entity); List<Command> commands = new ArrayList<>(); commands.add(getCommand(METADATA_SCHEMA_PSTMT_FORMAT, JdbcCommand.JdbcCommandType.QUERY)); commands.add(getCommand(Arrays.asList(schema, entity), JdbcCommand.JdbcCommandType.QUERYPARAMS)); return commands; } @Override public List<Command> getHighWatermarkMetadata(String schema, String entity, String watermarkColumn, List<Predicate> predicateList) throws HighWatermarkException { log.debug("Build query to get high watermark"); List<Command> commands = new ArrayList<>(); String columnProjection = "max(" + Utils.getCoalesceColumnNames(watermarkColumn) + ")"; String watermarkFilter = StringUtils.defaultIfBlank(this.concatPredicates(predicateList), EMPTY_CONDITION); String query = this.getExtractSql(); query = query.replace(this.getOutputColumnProjection(), columnProjection) .replace(ConfigurationKeys.DEFAULT_SOURCE_QUERYBASED_WATERMARK_PREDICATE_SYMBOL, watermarkFilter); commands.add(getCommand(query, JdbcCommand.JdbcCommandType.QUERY)); return commands; } @Override public List<Command> getCountMetadata(String schema, String entity, WorkUnit workUnit, List<Predicate> predicateList) throws RecordCountException { log.debug("Build query to get source record count"); List<Command> commands = new ArrayList<>(); String columnProjection = "count(1)"; String watermarkFilter = StringUtils.defaultIfBlank(this.concatPredicates(predicateList), EMPTY_CONDITION); String query = this.getExtractSql(); query = query.replace(this.getOutputColumnProjection(), columnProjection) .replace(ConfigurationKeys.DEFAULT_SOURCE_QUERYBASED_WATERMARK_PREDICATE_SYMBOL, watermarkFilter); query = addSampleQueryPart(query); query = castCountQuery(query); commands.add(getCommand(query, JdbcCommand.JdbcCommandType.QUERY)); return commands; } @Override public List<Command> getDataMetadata(String schema, String entity, WorkUnit workUnit, List<Predicate> predicateList) throws DataRecordException { log.debug("Build query to extract data"); List<Command> commands = new ArrayList<>(); int fetchSize = this.workUnitState.getPropAsInt(ConfigurationKeys.SOURCE_QUERYBASED_JDBC_RESULTSET_FETCH_SIZE, ConfigurationKeys.DEFAULT_SOURCE_QUERYBASED_JDBC_RESULTSET_FETCH_SIZE); log.info("Setting jdbc resultset fetch size as " + fetchSize); String watermarkFilter = StringUtils.defaultIfBlank(this.concatPredicates(predicateList), EMPTY_CONDITION); String query = this.getExtractSql(); query = query.replace(ConfigurationKeys.DEFAULT_SOURCE_QUERYBASED_WATERMARK_PREDICATE_SYMBOL, watermarkFilter); query = addSampleQueryPart(query); commands.add(getCommand(query, JdbcCommand.JdbcCommandType.QUERY)); commands.add(getCommand(fetchSize, JdbcCommand.JdbcCommandType.FETCHSIZE)); return commands; } @Override public Map<String, String> getDataTypeMap() { return dataTypeMap; } @Override public long extractSampleRecordCountFromQuery(String query) { if (StringUtils.isBlank(query)) { return SAMPLERECORDCOUNT; } long recordcount = SAMPLERECORDCOUNT; Matcher matcher = SAMPLE_CLAUSE_PATTERN.matcher(query); if (matcher.matches()) { String limit = matcher.group(2); try { recordcount = Long.parseLong(limit); } catch (Exception e) { log.error("Ignoring incorrct limit value in input query:" + limit); } } return recordcount; } @Override public String removeSampleClauseFromQuery(String query) { if (StringUtils.isBlank(query)) { return null; } Matcher matcher = SAMPLE_CLAUSE_PATTERN.matcher(query); if (matcher.matches()) { query = query.replace(matcher.group(1), EMPTY_CONDITION); } return query; } @Override public String constructSampleClause() { long sampleRowCount = this.getSampleRecordCount(); if (sampleRowCount >= 0) { return " rownum <= " + sampleRowCount; } return ""; } @Override public Iterator<JsonElement> getRecordSetFromSourceApi(String schema, String entity, WorkUnit workUnit, List<Predicate> predicateList) throws IOException { return null; } @Override public String getConnectionUrl() { String host = this.workUnitState.getProp(ConfigurationKeys.SOURCE_CONN_HOST_NAME); String port = this.workUnitState.getProp(ConfigurationKeys.SOURCE_CONN_PORT); String sid = this.workUnitState.getProp(ConfigurationKeys.SOURCE_CONN_SID).trim(); String url = "jdbc:oracle:thin:@" + host.trim() + (StringUtils.isEmpty(port) ? "" : ":" + port) + ":" + sid; return url; } @Override public String getWatermarkSourceFormat(WatermarkType watermarkType) { String columnFormat = null; switch (watermarkType) { case TIMESTAMP: columnFormat = "YYYY-MM-dd HH:mm:ss"; break; case DATE: columnFormat = "YYYY-MM-dd HH:mm:ss"; break; case SIMPLE: break; default: log.error("Watermark type " + watermarkType.toString() + " not recognized"); } return columnFormat; } @Override public String getHourPredicateCondition(String column, long value, String valueFormat, String operator) { log.debug("Getting hour predicate for Oracle"); String formattedValue = Utils.toDateTimeFormat(Long.toString(value), valueFormat, HOUR_FORMAT); return Utils.getCoalesceColumnNames(column) + " " + operator + " '" + formattedValue + "'"; } @Override public String getDatePredicateCondition(String column, long value, String valueFormat, String operator) { log.debug("Getting date predicate for Oracle"); return getTimestampPredicateCondition(column, value, valueFormat, operator); } /** * Oracle timestamp can go up to 9 digit precision. Existing behavior of Gobblin on extractor is to support * up to second and Oracle extractor will keep the same behavior. * * {@inheritDoc} * @see org.apache.gobblin.source.extractor.extract.ProtocolSpecificLayer#getTimestampPredicateCondition(java.lang.String, long, java.lang.String, java.lang.String) */ @Override public String getTimestampPredicateCondition(String column, long value, String valueFormat, String operator) { log.debug("Getting timestamp predicate for Oracle"); String formattedvalue = Utils.toDateTimeFormat(Long.toString(value), valueFormat, TIMESTAMP_FORMAT); return "cast(" + Utils.getCoalesceColumnNames(column) + " as timestamp(0)) " //Support up to second. + operator + " " + "to_timestamp('" + formattedvalue + "', 'YYYY-MM-DD HH24:MI:SS')"; } private String addSampleQueryPart(String query) { String sampleClause = constructSampleClause(); if (sampleClause.isEmpty()) { return query; } String where = "where"; query = query.replaceFirst(where, String.format("where %s and ", sampleClause)); return query; } private String castCountQuery(String query) { if (this.getSampleRecordCount() >= 0) { return "select cast(count(1) as number) from (" + query.replace(" count(1) ", " * ") + ")temp"; } else { return query.replace("count(1)", "cast(count(1) as number)"); } } }
3,487
0
Create_ds/gobblin/gobblin-modules/gobblin-sql/src/main/java/org/apache/gobblin/source
Create_ds/gobblin/gobblin-modules/gobblin-sql/src/main/java/org/apache/gobblin/source/jdbc/SqlServerExtractor.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.gobblin.source.jdbc; import org.apache.gobblin.source.extractor.DataRecordException; import org.apache.gobblin.source.extractor.exception.HighWatermarkException; import org.apache.gobblin.source.extractor.utils.Utils; import org.apache.gobblin.source.extractor.watermark.Predicate; import org.apache.gobblin.source.extractor.watermark.WatermarkType; import java.io.IOException; import java.util.ArrayList; import java.util.Arrays; import java.util.Iterator; import java.util.List; import java.util.Map; import org.apache.commons.lang3.StringUtils; import com.google.common.collect.ImmutableMap; import com.google.gson.JsonElement; import org.apache.gobblin.configuration.ConfigurationKeys; import org.apache.gobblin.configuration.WorkUnitState; import org.apache.gobblin.source.extractor.exception.RecordCountException; import org.apache.gobblin.source.extractor.exception.SchemaException; import org.apache.gobblin.source.extractor.extract.Command; import org.apache.gobblin.source.workunit.WorkUnit; import lombok.extern.slf4j.Slf4j; /** * SqlServer extractor using JDBC protocol * * @author nveeramr */ @Slf4j public class SqlServerExtractor extends JdbcExtractor { private static final String TIMESTAMP_FORMAT = "yyyy-MM-dd HH:mm:ss"; private static final String DATE_FORMAT = "yyyy-MM-dd"; private static final String HOUR_FORMAT = "HH"; private static final long SAMPLERECORDCOUNT = -1; public SqlServerExtractor(WorkUnitState workUnitState) { super(workUnitState); } @Override public String getLeftDelimitedIdentifier() { return this.enableDelimitedIdentifier ? "[" : ""; } @Override public String getRightDelimitedIdentifier() { return this.enableDelimitedIdentifier ? "]" : ""; } @Override public List<Command> getSchemaMetadata(String schema, String entity) throws SchemaException { log.debug("Build query to get schema"); List<Command> commands = new ArrayList<>(); List<String> queryParams = Arrays.asList(entity, schema); String metadataSql = "select " + " col.column_name, " + " col.data_type, " + " case when CHARACTER_OCTET_LENGTH is null then 0 else 0 end as length, " + " case when NUMERIC_PRECISION is null then 0 else NUMERIC_PRECISION end as precesion, " + " case when NUMERIC_SCALE is null then 0 else NUMERIC_SCALE end as scale, " + " case when is_nullable='NO' then 'false' else 'true' end as nullable, " + " '' as format, " + " '' as comment " + " from information_schema.COLUMNS col " + " WHERE upper(col.table_name)=upper(?) AND upper(col.table_schema)=upper(?) " + " order by col.ORDINAL_POSITION "; commands.add(JdbcExtractor.getCommand(metadataSql, JdbcCommand.JdbcCommandType.QUERY)); commands.add(JdbcExtractor.getCommand(queryParams, JdbcCommand.JdbcCommandType.QUERYPARAMS)); return commands; } @Override public List<Command> getHighWatermarkMetadata(String schema, String entity, String watermarkColumn, List<Predicate> predicateList) throws HighWatermarkException { log.debug("Build query to get high watermark"); List<Command> commands = new ArrayList<>(); String columnProjection = "max(" + Utils.getCoalesceColumnNames(watermarkColumn) + ")"; String watermarkFilter = this.concatPredicates(predicateList); String query = this.getExtractSql(); if (StringUtils.isBlank(watermarkFilter)) { watermarkFilter = "1=1"; } query = query.replace(this.getOutputColumnProjection(), columnProjection) .replace(ConfigurationKeys.DEFAULT_SOURCE_QUERYBASED_WATERMARK_PREDICATE_SYMBOL, watermarkFilter); commands.add(JdbcExtractor.getCommand(query, JdbcCommand.JdbcCommandType.QUERY)); return commands; } @Override public List<Command> getCountMetadata(String schema, String entity, WorkUnit workUnit, List<Predicate> predicateList) throws RecordCountException { log.debug("Build query to get source record count"); List<Command> commands = new ArrayList<>(); String columnProjection = "COUNT(1)"; String watermarkFilter = this.concatPredicates(predicateList); String query = this.getExtractSql(); if (StringUtils.isBlank(watermarkFilter)) { watermarkFilter = "1=1"; } query = query.replace(this.getOutputColumnProjection(), columnProjection) .replace(ConfigurationKeys.DEFAULT_SOURCE_QUERYBASED_WATERMARK_PREDICATE_SYMBOL, watermarkFilter); String sampleFilter = this.constructSampleClause(); if (!StringUtils.isEmpty(sampleFilter)) { String col = sampleFilter + " 1 as col "; query = "SELECT COUNT(1) FROM (" + query.replace(" COUNT(1) ", col) + ")temp"; } commands.add(JdbcExtractor.getCommand(query, JdbcCommand.JdbcCommandType.QUERY)); return commands; } @Override public List<Command> getDataMetadata(String schema, String entity, WorkUnit workUnit, List<Predicate> predicateList) throws DataRecordException { log.debug("Build query to extract data"); List<Command> commands = new ArrayList<>(); int fetchSize = this.workUnitState.getPropAsInt(ConfigurationKeys.SOURCE_QUERYBASED_JDBC_RESULTSET_FETCH_SIZE, ConfigurationKeys.DEFAULT_SOURCE_QUERYBASED_JDBC_RESULTSET_FETCH_SIZE); log.info("Setting jdbc resultset fetch size as " + fetchSize); String watermarkFilter = this.concatPredicates(predicateList); String query = this.getExtractSql(); if (StringUtils.isBlank(watermarkFilter)) { watermarkFilter = "1=1"; } query = query.replace(ConfigurationKeys.DEFAULT_SOURCE_QUERYBASED_WATERMARK_PREDICATE_SYMBOL, watermarkFilter); String sampleFilter = this.constructSampleClause(); if (!StringUtils.isEmpty(sampleFilter)) { String columnProjection = this.getOutputColumnProjection(); String newColumnProjection = sampleFilter + " " + columnProjection; query = query.replace(columnProjection, newColumnProjection); } commands.add(JdbcExtractor.getCommand(query, JdbcCommand.JdbcCommandType.QUERY)); commands.add(JdbcExtractor.getCommand(fetchSize, JdbcCommand.JdbcCommandType.FETCHSIZE)); return commands; } @Override public Map<String, String> getDataTypeMap() { Map<String, String> dataTypeMap = ImmutableMap.<String, String> builder().put("smallint", "int") .put("tinyint", "int").put("int", "int").put("bigint", "long").put("decimal", "double").put("numeric", "double") .put("float", "float").put("real", "double").put("money", "double").put("smallmoney", "double") .put("binary", "string").put("varbinary", "string").put("char", "string").put("varchar", "string") .put("nchar", "string").put("nvarchar", "string").put("text", "string").put("ntext", "string") .put("image", "string").put("hierarchyid", "string").put("uniqueidentifier", "string").put("date", "date") .put("datetime", "timestamp").put("datetime2", "timestamp").put("datetimeoffset", "timestamp") .put("smalldatetime", "timestamp").put("time", "time").put("bit", "boolean").build(); return dataTypeMap; } @Override public Iterator<JsonElement> getRecordSetFromSourceApi(String schema, String entity, WorkUnit workUnit, List<Predicate> predicateList) throws IOException { return null; } @Override public String getConnectionUrl() { String host = this.workUnitState.getProp(ConfigurationKeys.SOURCE_CONN_HOST_NAME); String port = this.workUnitState.getProp(ConfigurationKeys.SOURCE_CONN_PORT); String parameters = this.workUnitState.getProp(ConfigurationKeys.SQL_SERVER_CONNECTION_PARAMETERS); // For backwards compatibility, need to allow using SOURCE_QUERYBASED_SCHEMA to specify db name // This is highly discouraged, as this property is overloaded String database = this.workUnitState.getProp(ConfigurationKeys.SOURCE_QUERYBASED_SCHEMA); if (parameters == null && !StringUtils.isEmpty(database)) { parameters = "databaseName=" + database; } String url = "jdbc:sqlserver://" + host.trim() + ":" + port + (StringUtils.isEmpty(parameters) ? "" : (";" + parameters.trim())); return url; } @Override public long extractSampleRecordCountFromQuery(String query) { if (StringUtils.isBlank(query)) { return SAMPLERECORDCOUNT; } long recordcount = SAMPLERECORDCOUNT; String inputQuery = query.toLowerCase(); int limitStartIndex = inputQuery.indexOf(" top "); int limitEndIndex = getLimitEndIndex(inputQuery, limitStartIndex); if (limitStartIndex > 0) { String limitValue = query.substring(limitStartIndex + 5, limitEndIndex); try { recordcount = Long.parseLong(limitValue); } catch (Exception e) { log.error("Ignoring incorrct limit value in input query:" + limitValue); } } return recordcount; } @Override public String removeSampleClauseFromQuery(String query) { if (StringUtils.isBlank(query)) { return null; } String outputQuery = query; String inputQuery = query.toLowerCase(); int limitStartIndex = inputQuery.indexOf(" top "); int limitEndIndex = getLimitEndIndex(inputQuery, limitStartIndex); if (limitStartIndex > 0) { outputQuery = query.substring(0, limitStartIndex) + " " + query.substring(limitEndIndex); } return outputQuery; } private static int getLimitEndIndex(String inputQuery, int limitStartIndex) { int limitEndIndex = -1; if (limitStartIndex > 0) { limitEndIndex = limitStartIndex + 5; String remainingQuery = inputQuery.substring(limitEndIndex); boolean numFound = false; int pos = 0; while (pos < remainingQuery.length()) { char ch = remainingQuery.charAt(pos); if (ch == ' ' && !numFound) { pos++; continue; } else if (numFound && (!Character.isDigit(ch))) { break; } else { numFound = true; } pos++; } limitEndIndex = limitEndIndex + pos; } return limitEndIndex; } @Override public String constructSampleClause() { long sampleRowCount = this.getSampleRecordCount(); if (sampleRowCount >= 0) { return " top " + sampleRowCount; } return ""; } @Override public String getWatermarkSourceFormat(WatermarkType watermarkType) { String columnFormat = null; switch (watermarkType) { case TIMESTAMP: columnFormat = "yyyy-MM-dd HH:mm:ss"; break; case DATE: columnFormat = "yyyy-MM-dd"; break; default: log.error("Watermark type " + watermarkType.toString() + " not recognized"); } return columnFormat; } @Override public String getHourPredicateCondition(String column, long value, String valueFormat, String operator) { log.debug("Getting hour predicate for Sqlserver"); String formattedvalue = Utils.toDateTimeFormat(Long.toString(value), valueFormat, HOUR_FORMAT); return Utils.getCoalesceColumnNames(column) + " " + operator + " '" + formattedvalue + "'"; } @Override public String getDatePredicateCondition(String column, long value, String valueFormat, String operator) { log.debug("Getting date predicate for Sqlserver"); String formattedvalue = Utils.toDateTimeFormat(Long.toString(value), valueFormat, DATE_FORMAT); return Utils.getCoalesceColumnNames(column) + " " + operator + " '" + formattedvalue + "'"; } @Override public String getTimestampPredicateCondition(String column, long value, String valueFormat, String operator) { log.debug("Getting timestamp predicate for Sqlserver"); String formattedvalue = Utils.toDateTimeFormat(Long.toString(value), valueFormat, TIMESTAMP_FORMAT); return Utils.getCoalesceColumnNames(column) + " " + operator + " '" + formattedvalue + "'"; } }
3,488
0
Create_ds/gobblin/gobblin-modules/gobblin-sql/src/main/java/org/apache/gobblin/source
Create_ds/gobblin/gobblin-modules/gobblin-sql/src/main/java/org/apache/gobblin/source/jdbc/SqlQueryUtils.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.gobblin.source.jdbc; import static com.google.common.base.Preconditions.checkArgument; import com.google.common.base.Strings; /** * A helper class for handling SQL queries as strings. This is a temp solution until something more * robust is implemented. * * @see https://github.com/linkedin/gobblin/issues/236 */ public class SqlQueryUtils { /** * Add a new predicate(filter condition) to the query. The method will add a "where" clause if * none exists. Otherwise, it will add the condition as a conjunction (and). * * <b>Note that this method is rather limited. It works only if there are no other clauses after * "where"</b> * * @param query the query string to modify * @param predicateCond the predicate to add to the query * @return query the new query string * @throws IllegalArgumentException if the predicate cannot be added because of additional clauses */ public static String addPredicate(String query, String predicateCond) { if (Strings.isNullOrEmpty(predicateCond)) { return query; } String normalizedQuery = query.toLowerCase().trim(); checkArgument(normalizedQuery.contains(" from "), "query does not contain 'from': " + query); checkArgument(! normalizedQuery.contains(" by "), "query contains 'order by' or 'group by': " + query); checkArgument(! normalizedQuery.contains(" having "), "query contains 'having': " + query); checkArgument(! normalizedQuery.contains(" limit "), "query contains 'limit': " + query); String keyword = " where "; if (normalizedQuery.contains(keyword)) { keyword = " and "; } query = query + keyword + "(" + predicateCond + ")"; return query; } /** * Cast a string representation of a boolean value to a boolean primitive. * Used especially for Oracle representation of booleans as varchar2(1) * Returns true for values such as [t|true|yes|1] and false for [f|false|no]. * If a boolean value cannot be trivially parsed, false is returned. * * @param fieldValue the value of the boolean string field */ public static boolean castToBoolean(String fieldValue) { String lowerField = fieldValue.toLowerCase(); switch(lowerField) { case "y": return true; case "n": return false; case "true": return true; case "false": return false; case "t": return true; case "f": return false; case "yes": return true; case "no": return false; case "0": return false; case "1": return true; } return false; } }
3,489
0
Create_ds/gobblin/gobblin-modules/gobblin-sql/src/main/java/org/apache/gobblin/source
Create_ds/gobblin/gobblin-modules/gobblin-sql/src/main/java/org/apache/gobblin/source/jdbc/JdbcCommandOutput.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.gobblin.source.jdbc; import java.sql.ResultSet; import java.util.HashMap; import java.util.Map; import org.apache.gobblin.source.extractor.extract.CommandOutput; /** * Captures output of a JDBC command, keep track of commands and its outputs * * @author nveeramr */ public class JdbcCommandOutput implements CommandOutput<JdbcCommand, ResultSet> { private Map<JdbcCommand, ResultSet> results; public JdbcCommandOutput() { this.results = new HashMap<>(); } @Override public void storeResults(Map<JdbcCommand, ResultSet> results) { this.results = results; } @Override public Map<JdbcCommand, ResultSet> getResults() { return this.results; } @Override public void put(JdbcCommand key, ResultSet value) { this.results.put(key, value); } }
3,490
0
Create_ds/gobblin/gobblin-modules/gobblin-sql/src/main/java/org/apache/gobblin/source
Create_ds/gobblin/gobblin-modules/gobblin-sql/src/main/java/org/apache/gobblin/source/jdbc/JdbcProvider.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.gobblin.source.jdbc; import java.io.IOException; import java.util.concurrent.atomic.AtomicInteger; import org.apache.gobblin.tunnel.Tunnel; import com.zaxxer.hikari.HikariDataSource; /** * Create JDBC data source * * @author nveeramr */ public class JdbcProvider extends HikariDataSource { private static final AtomicInteger POOL_NUM = new AtomicInteger(0); private Tunnel tunnel; // If extract type is not provided then consider it as a default type public JdbcProvider(String driver, String connectionUrl, String user, String password, int numconn, int timeout) { this.connect(driver, connectionUrl, user, password, numconn, timeout, "DEFAULT", null, -1); } public JdbcProvider(String driver, String connectionUrl, String user, String password, int numconn, int timeout, String type) { this.connect(driver, connectionUrl, user, password, numconn, timeout, type, null, -1); } public JdbcProvider(String driver, String connectionUrl, String user, String password, int numconn, int timeout, String type, String proxyHost, int proxyPort) { this.connect(driver, connectionUrl, user, password, numconn, timeout, type, proxyHost, proxyPort); } public void connect(String driver, String connectionUrl, String user, String password, int numconn, int timeout, String type, String proxyHost, int proxyPort) { if (proxyHost != null && proxyPort > 0) { String remoteHost = ""; int remotePort = 0; // TODO make connection Url parsing much more robust -- some connections URLs can have colons and slashes in the // weirdest places int hostStart = connectionUrl.indexOf("://") + 3; int portStart = connectionUrl.indexOf(":", hostStart); remoteHost = connectionUrl.substring(hostStart, portStart); remotePort = Integer.decode(connectionUrl.substring(portStart + 1, connectionUrl.indexOf("/", portStart))); try { this.tunnel = Tunnel.build(remoteHost, remotePort, proxyHost, proxyPort); int tunnelPort = this.tunnel.getPort(); //mangle connectionUrl, replace hostname with localhost -- hopefully the hostname is not needed!!! String newConnectionUrl = connectionUrl.replaceFirst(remoteHost, "127.0.0.1").replaceFirst(":" + remotePort, ":" + tunnelPort); connectionUrl = newConnectionUrl; } catch (IOException ioe) { throw new IllegalStateException("Failed to open tunnel to remote host " + remoteHost + ":" + remotePort + " via proxy " + proxyHost + ":" + proxyPort, ioe); } } this.setPoolName("HikariPool-" + POOL_NUM.incrementAndGet() + "-" + getClass().getSimpleName()); this.setDriverClassName(driver); this.setUsername(user); this.setPassword(password); this.setJdbcUrl(connectionUrl); // TODO: revisit following verification of successful connection pool migration: // whereas `o.a.commons.dbcp.BasicDataSource` defaults min idle conns to 0, hikari defaults to 10. // perhaps non-zero would have desirable runtime perf, but anything >0 currently fails unit tests (even 1!); // (so experimenting with a higher number would first require adjusting tests) this.setMinimumIdle(0); this.setMaximumPoolSize(numconn); this.setConnectionTimeout(timeout); } @Override public synchronized void close() { super.close(); if (this.tunnel != null) { this.tunnel.close(); } } }
3,491
0
Create_ds/gobblin/gobblin-modules/gobblin-sql/src/main/java/org/apache/gobblin
Create_ds/gobblin/gobblin-modules/gobblin-sql/src/main/java/org/apache/gobblin/writer/JdbcWriterBuilder.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.gobblin.writer; import org.apache.gobblin.configuration.State; import org.apache.gobblin.converter.jdbc.JdbcEntryData; import org.apache.gobblin.converter.jdbc.JdbcEntrySchema; import org.apache.gobblin.source.workunit.WorkUnitStream; import org.apache.gobblin.writer.commands.JdbcWriterCommandsFactory; import org.apache.gobblin.writer.initializer.JdbcWriterInitializer; import org.apache.gobblin.writer.initializer.WriterInitializer; import java.io.IOException; public class JdbcWriterBuilder extends DataWriterBuilder<JdbcEntrySchema, JdbcEntryData> { @Override public DataWriter<JdbcEntryData> build() throws IOException { return new JdbcWriter(this); } public WriterInitializer getInitializer(State state, WorkUnitStream workUnits, int branches, int branchId) { JdbcWriterCommandsFactory factory = new JdbcWriterCommandsFactory(); if (workUnits.isSafeToMaterialize()) { return new JdbcWriterInitializer(state, workUnits.getMaterializedWorkUnitCollection(), factory, branches, branchId); } else { throw new RuntimeException(JdbcWriterBuilder.class.getName() + " does not support work unit streams."); } } }
3,492
0
Create_ds/gobblin/gobblin-modules/gobblin-sql/src/main/java/org/apache/gobblin
Create_ds/gobblin/gobblin-modules/gobblin-sql/src/main/java/org/apache/gobblin/writer/JdbcWriter.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.gobblin.writer; import org.apache.gobblin.publisher.JdbcPublisher; import org.apache.gobblin.util.ForkOperatorUtils; import org.apache.gobblin.util.jdbc.DataSourceBuilder; import org.apache.gobblin.writer.commands.JdbcWriterCommands; import org.apache.gobblin.writer.commands.JdbcWriterCommandsFactory; import org.apache.gobblin.configuration.ConfigurationKeys; import org.apache.gobblin.configuration.State; import org.apache.gobblin.converter.jdbc.JdbcEntryData; import java.io.IOException; import java.sql.Connection; import java.sql.SQLException; import javax.sql.DataSource; import org.slf4j.LoggerFactory; import org.slf4j.Logger; import com.google.common.annotations.VisibleForTesting; import com.google.common.base.Preconditions; /** * Uses JDBC to persist data in task level. * For interaction with JDBC underlying RDBMS, it uses JdbcWriterCommands. * * JdbcWriter will open single transaction for it’s simplicity on failure handling. * * Scalability issue may come with long transaction can be overcome by increasing partition which will make transaction short. * (Data with 200M record was tested with single transaction and it had no problem in MySQL 5.6.) * * Having one transaction per writer has its tradeoffs: * Pro: Simple on failure handling as you can just simply execute rollback on failure. Basically, it will revert back to previous state so that the job can retry the task. * Con: It can lead up to long lived transaction and it can face scalability issue. (Not enough disk space for transaction log, number of record limit on one transaction (200M for Postgre sql), etc) * * During the design meeting, we’ve discussed that long transaction could be a problem. One suggestion came out during the meeting was commit periodically. * This will address long transaction problem, but we also discussed it would be hard on failure handling. * Currently, Gobblin does task level retry on failure and there were three options we’ve discussed. * (There was no silver bullet solution from the meeting.) Note that these are all with committing periodically. * Revert to previous state: For writer, this will be delete the record it wrote. * For JdbcWriter, it could use it’s own staging table or could share staging table with other writer. * As staging table can be passed by user where we don’t have control of, not able to add partition information, it is hard to revert back to previous state for all cases. * Ignore duplicate: The idea is to use Upsert to perform insert or update. * As it needs to check the current existence in the dataset, it is expected to show performance degradation. * Also, possibility of duplicate entry was also discussed. * Water mark: In order to use water mark in task level, writer needs to send same order when retried which is not guaranteed. */ public class JdbcWriter implements DataWriter<JdbcEntryData> { private static final Logger LOG = LoggerFactory.getLogger(JdbcWriter.class); public static final String ENABLE_AUTO_COMMIT = "jdbcWriter.enableAutoCommit"; private final Connection conn; private final State state; private final JdbcWriterCommands commands; private final String databaseName; private final String tableName; private boolean failed; private long recordWrittenCount; public JdbcWriter(JdbcWriterBuilder builder) { this.state = builder.destination.getProperties(); this.state.setProp(ConfigurationKeys.FORK_BRANCH_ID_KEY, Integer.toString(builder.branch)); String databaseTableKey = ForkOperatorUtils.getPropertyNameForBranch(JdbcPublisher.JDBC_PUBLISHER_DATABASE_NAME, builder.branches, builder.branch); this.databaseName = Preconditions.checkNotNull(this.state.getProp(databaseTableKey), "Staging table is missing with key " + databaseTableKey); String stagingTableKey = ForkOperatorUtils.getPropertyNameForBranch(ConfigurationKeys.WRITER_STAGING_TABLE, builder.branches, builder.branch); this.tableName = Preconditions.checkNotNull(this.state.getProp(stagingTableKey), "Staging table is missing with key " + stagingTableKey); try { this.conn = createConnection(); this.conn.setAutoCommit(this.state.getPropAsBoolean(ENABLE_AUTO_COMMIT, false)); this.commands = new JdbcWriterCommandsFactory().newInstance(this.state, this.conn); this.commands.setConnectionParameters(this.state.getProperties(), this.conn); } catch (SQLException e) { throw new RuntimeException(e); } } @VisibleForTesting public JdbcWriter(JdbcWriterCommands commands, State state, String databaseName, String table, Connection conn) { this.commands = commands; this.state = state; this.databaseName = databaseName; this.tableName = table; this.conn = conn; } private Connection createConnection() throws SQLException { DataSource dataSource = DataSourceBuilder.builder().url(this.state.getProp(JdbcPublisher.JDBC_PUBLISHER_URL)) .driver(this.state.getProp(JdbcPublisher.JDBC_PUBLISHER_DRIVER)) .userName(this.state.getProp(JdbcPublisher.JDBC_PUBLISHER_USERNAME)) .passWord(this.state.getProp(JdbcPublisher.JDBC_PUBLISHER_PASSWORD)) .cryptoKeyLocation(this.state.getProp(JdbcPublisher.JDBC_PUBLISHER_ENCRYPTION_KEY_LOC)).maxActiveConnections(1) .state(this.state).build(); return dataSource.getConnection(); } /** * Invokes JdbcWriterCommands.insert * {@inheritDoc} * @see org.apache.gobblin.writer.DataWriter#write(java.lang.Object) */ @Override public void write(JdbcEntryData record) throws IOException { if (LOG.isDebugEnabled()) { LOG.debug("Writing " + record); } try { this.commands.insert(this.databaseName, this.tableName, record); this.recordWrittenCount++; } catch (Exception e) { this.failed = true; throw new RuntimeException(e); } } /** * Flushes JdbcWriterCommands and commit. * {@inheritDoc} * @see org.apache.gobblin.writer.DataWriter#commit() */ @Override public void commit() throws IOException { try { LOG.info("Flushing pending insert."); this.commands.flush(); LOG.info("Commiting transaction."); this.conn.commit(); } catch (Exception e) { this.failed = true; throw new RuntimeException(e); } } /** * Staging table is needed by publisher and won't be cleaned here. * {@inheritDoc} * @see org.apache.gobblin.writer.DataWriter#cleanup() */ @Override public void cleanup() throws IOException {} /** * If there's a failure, it will execute roll back. * {@inheritDoc} * @see java.io.Closeable#close() */ @Override public void close() throws IOException { try { try { if (this.failed && this.conn != null) { this.conn.rollback(); } } finally { if (this.conn != null) { this.conn.close(); } } } catch (SQLException e) { throw new RuntimeException(e); } } @Override public long recordsWritten() { return this.recordWrittenCount; } /** * This is not supported for JDBC writer. * {@inheritDoc} * @see org.apache.gobblin.writer.DataWriter#bytesWritten() */ @Override public long bytesWritten() throws IOException { return -1L; } }
3,493
0
Create_ds/gobblin/gobblin-modules/gobblin-sql/src/main/java/org/apache/gobblin/writer
Create_ds/gobblin/gobblin-modules/gobblin-sql/src/main/java/org/apache/gobblin/writer/initializer/JdbcWriterInitializer.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.gobblin.writer.initializer; import org.apache.gobblin.configuration.ConfigurationKeys; import org.apache.gobblin.configuration.State; import org.apache.gobblin.publisher.JdbcPublisher; import org.apache.gobblin.source.workunit.WorkUnit; import org.apache.gobblin.util.ForkOperatorUtils; import org.apache.gobblin.util.jdbc.DataSourceBuilder; import org.apache.gobblin.writer.Destination; import org.apache.gobblin.writer.Destination.DestinationType; import org.apache.gobblin.writer.commands.JdbcWriterCommands; import org.apache.gobblin.writer.commands.JdbcWriterCommandsFactory; import org.apache.gobblin.source.extractor.JobCommitPolicy; import java.sql.Connection; import java.sql.ResultSet; import java.sql.SQLException; import java.util.Collection; import java.util.Random; import java.util.Set; import java.util.concurrent.TimeUnit; import javax.sql.DataSource; import lombok.ToString; import org.apache.commons.lang3.StringUtils; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import com.google.common.annotations.VisibleForTesting; import com.google.common.base.Preconditions; import com.google.common.collect.Lists; import com.google.common.collect.Sets; /** * Initialize for JDBC writer and also performs clean up. */ @ToString public class JdbcWriterInitializer implements WriterInitializer { private static final Logger LOG = LoggerFactory.getLogger(JdbcWriterInitializer.class); private static final String STAGING_TABLE_FORMAT = "stage_%d"; private static final int NAMING_STAGING_TABLE_TRIAL = 10; private static final Random RANDOM = new Random(); private final int branches; private final int branchId; private final State state; private final Collection<WorkUnit> workUnits; private final JdbcWriterCommandsFactory jdbcWriterCommandsFactory; private final String database; private String userCreatedStagingTable; private Set<String> createdStagingTables; public JdbcWriterInitializer(State state, Collection<WorkUnit> workUnits) { this(state, workUnits, new JdbcWriterCommandsFactory(), 1, 0); } public JdbcWriterInitializer(State state, Collection<WorkUnit> workUnits, JdbcWriterCommandsFactory jdbcWriterCommandsFactory, int branches, int branchId) { validateInput(state); this.state = state; this.workUnits = Lists.newArrayList(workUnits); this.branches = branches; this.branchId = branchId; this.jdbcWriterCommandsFactory = jdbcWriterCommandsFactory; this.database = getProp(this.state, JdbcPublisher.JDBC_PUBLISHER_DATABASE_NAME, this.branches, this.branchId); this.createdStagingTables = Sets.newHashSet(); //AbstractJobLauncher assumes that the staging is in HDFS and trying to clean it. //As WriterInitializer will clean staging table, we don't need AbstractJobLauncher to clean. state.setProp(ConfigurationKeys.CLEANUP_STAGING_DATA_BY_INITIALIZER, Boolean.toString(true)); } /** * Drop table if it's created by this instance. * Truncate staging tables passed by user. * {@inheritDoc} * @see org.apache.gobblin.Initializer#close() */ @Override public void close() { LOG.info("Closing " + this.getClass().getSimpleName()); try (Connection conn = createConnection()) { JdbcWriterCommands commands = createJdbcWriterCommands(conn); if (!this.createdStagingTables.isEmpty()) { for (String stagingTable : this.createdStagingTables) { LOG.info("Dropping staging table " + this.createdStagingTables); commands.drop(database, stagingTable); } } if (this.userCreatedStagingTable != null) { LOG.info("Truncating staging table " + this.userCreatedStagingTable); commands.truncate(database, this.userCreatedStagingTable); } } catch (SQLException e) { throw new RuntimeException("Failed to close", e); } } /** * Creating JDBC connection using publisher's connection information. It is OK to use publisher's information * as JdbcWriter is coupled with JdbcPublisher. * * @return JDBC Connection * @throws SQLException */ @VisibleForTesting public Connection createConnection() throws SQLException { DataSource dataSource = DataSourceBuilder.builder().url(this.state.getProp(JdbcPublisher.JDBC_PUBLISHER_URL)) .driver(this.state.getProp(JdbcPublisher.JDBC_PUBLISHER_DRIVER)) .userName(this.state.getProp(JdbcPublisher.JDBC_PUBLISHER_USERNAME)) .passWord(this.state.getProp(JdbcPublisher.JDBC_PUBLISHER_PASSWORD)) .cryptoKeyLocation(this.state.getProp(JdbcPublisher.JDBC_PUBLISHER_ENCRYPTION_KEY_LOC)).maxActiveConnections(1) .state(this.state).build(); return dataSource.getConnection(); } private String createStagingTable(Connection conn, JdbcWriterCommands commands) throws SQLException { String destTableKey = ForkOperatorUtils.getPropertyNameForBranch(JdbcPublisher.JDBC_PUBLISHER_FINAL_TABLE_NAME, this.branches, this.branchId); String destinationTable = this.state.getProp(destTableKey); if (StringUtils.isEmpty(destinationTable)) { throw new IllegalArgumentException(JdbcPublisher.JDBC_PUBLISHER_FINAL_TABLE_NAME + " is required for " + this.getClass().getSimpleName() + " for branch " + this.branchId); } String stagingTable = null; for (int i = 0; i < NAMING_STAGING_TABLE_TRIAL; i++) { String tmp = String.format(STAGING_TABLE_FORMAT, System.nanoTime()); LOG.info("Check if staging table " + tmp + " exists."); ResultSet res = conn.getMetaData().getTables(null, database, tmp, new String[] { "TABLE" }); if (!res.next()) { LOG.info("Staging table " + tmp + " does not exist. Creating."); try { commands.createTableStructure(database, destinationTable, tmp); LOG.info("Test if staging table can be dropped. Test by dropping and Creating staging table."); commands.drop(database, tmp); commands.createTableStructure(database, destinationTable, tmp); stagingTable = tmp; break; } catch (SQLException e) { LOG.warn("Failed to create table. Retrying up to " + NAMING_STAGING_TABLE_TRIAL + " times", e); } } else { LOG.info("Staging table " + tmp + " exists."); } try { TimeUnit.MILLISECONDS.sleep(RANDOM.nextInt(1000)); } catch (InterruptedException e) { LOG.info("Sleep has been interrupted.", e); } } if (!StringUtils.isEmpty(stagingTable)) { return stagingTable; } throw new RuntimeException("Failed to create staging table"); } private static String getProp(State state, String key, int branches, int branchId) { String forkedKey = ForkOperatorUtils.getPropertyNameForBranch(key, branches, branchId); return state.getProp(forkedKey); } private static boolean getPropAsBoolean(State state, String key, int branches, int branchId) { return Boolean.parseBoolean(getProp(state, key, branches, branchId)); } /** * Initializes AvroFileJdbcSource for Writer that needs to be happen in single threaded environment. * On each branch: * 1. Check if user chose to skip the staging table * 1.1. If user chose to skip the staging table, and user decided to replace final table, truncate final table. * 2. (User didn't choose to skip the staging table.) Check if user passed the staging table. * 2.1. Truncate staging table, if requested. * 2.2. Confirm if staging table is empty. * 3. Create staging table (At this point user hasn't passed the staging table, and not skipping staging table). * 3.1. Create staging table with unique name. * 3.2. Try to drop and recreate the table to confirm if we can drop it later. * 4. Update Workunit state with staging table information. * @param state */ @Override public void initialize() { try (Connection conn = createConnection()) { JdbcWriterCommands commands = createJdbcWriterCommands(conn); //1. Check if user chose to skip the staging table JobCommitPolicy jobCommitPolicy = JobCommitPolicy.getCommitPolicy(this.state); boolean isSkipStaging = !JobCommitPolicy.COMMIT_ON_FULL_SUCCESS.equals(jobCommitPolicy); if (isSkipStaging) { LOG.info("Writer will write directly to destination table as JobCommitPolicy is " + jobCommitPolicy); } final String publishTable = getProp(this.state, JdbcPublisher.JDBC_PUBLISHER_FINAL_TABLE_NAME, this.branches, this.branchId); final String stagingTableKey = ForkOperatorUtils.getPropertyNameForBranch(ConfigurationKeys.WRITER_STAGING_TABLE, this.branches, this.branchId); String stagingTable = this.state.getProp(stagingTableKey); int i = -1; for (WorkUnit wu : this.workUnits) { i++; if (isSkipStaging) { LOG.info("User chose to skip staing table on branch " + this.branchId + " workunit " + i); wu.setProp(stagingTableKey, publishTable); if (i == 0) { //1.1. If user chose to skip the staging table, and user decided to replace final table, truncate final table. if (getPropAsBoolean(this.state, JdbcPublisher.JDBC_PUBLISHER_REPLACE_FINAL_TABLE, this.branches, this.branchId)) { LOG.info("User chose to replace final table " + publishTable + " on branch " + this.branchId + " workunit " + i); commands.truncate(database, publishTable); } } continue; } //2. (User didn't choose to skip the staging table.) Check if user passed the staging table. if (!StringUtils.isEmpty(stagingTable)) { LOG.info("Staging table for branch " + this.branchId + " from user: " + stagingTable); wu.setProp(stagingTableKey, stagingTable); if (i == 0) { //2.1. Truncate staging table, if requested. if (this.state.getPropAsBoolean(ForkOperatorUtils.getPropertyNameForBranch( ConfigurationKeys.WRITER_TRUNCATE_STAGING_TABLE, this.branches, this.branchId), false)) { LOG.info("Truncating staging table " + stagingTable + " as requested."); commands.truncate(database, stagingTable); } //2.2. Confirm if staging table is empty. if (!commands.isEmpty(database, stagingTable)) { LOG.error("Staging table " + stagingTable + " is not empty. Failing."); throw new IllegalArgumentException("Staging table " + stagingTable + " should be empty."); } this.userCreatedStagingTable = stagingTable; } continue; } //3. Create staging table (At this point user hasn't passed the staging table, and not skipping staging table). LOG.info("Staging table has not been passed from user for branch " + this.branchId + " workunit " + i + " . Creating."); String createdStagingTable = createStagingTable(conn, commands); wu.setProp(stagingTableKey, createdStagingTable); this.createdStagingTables.add(createdStagingTable); LOG.info("Staging table " + createdStagingTable + " has been created for branchId " + this.branchId + " workunit " + i); } } catch (SQLException e) { throw new RuntimeException("Failed with SQL", e); } } private JdbcWriterCommands createJdbcWriterCommands(Connection conn) { String destKey = ForkOperatorUtils.getPropertyNameForBranch(ConfigurationKeys.WRITER_DESTINATION_TYPE_KEY, this.branches, this.branchId); String destType = Preconditions.checkNotNull(this.state.getProp(destKey), destKey + " is required for underlying JDBC product name"); Destination dest = Destination.of(DestinationType.valueOf(destType.toUpperCase()), this.state); return this.jdbcWriterCommandsFactory.newInstance(dest, conn); } /** * 1. User should not define same destination table across different branches. * 2. User should not define same staging table across different branches. * 3. If commit policy is not full, Gobblin will try to write into final table even there's a failure. This will let Gobblin to write in task level. * However, publish data at job level is true, it contradicts with the behavior of Gobblin writing in task level. Thus, validate publish data at job level is false if commit policy is not full. * @param state */ private static void validateInput(State state) { int branches = state.getPropAsInt(ConfigurationKeys.FORK_BRANCHES_KEY, 1); Set<String> publishTables = Sets.newHashSet(); for (int branchId = 0; branchId < branches; branchId++) { String publishTable = Preconditions.checkNotNull(getProp(state, JdbcPublisher.JDBC_PUBLISHER_FINAL_TABLE_NAME, branches, branchId), JdbcPublisher.JDBC_PUBLISHER_FINAL_TABLE_NAME + " should not be null."); if (publishTables.contains(publishTable)) { throw new IllegalArgumentException( "Duplicate " + JdbcPublisher.JDBC_PUBLISHER_FINAL_TABLE_NAME + " is not allowed across branches"); } publishTables.add(publishTable); } Set<String> stagingTables = Sets.newHashSet(); for (int branchId = 0; branchId < branches; branchId++) { String stagingTable = getProp(state, ConfigurationKeys.WRITER_STAGING_TABLE, branches, branchId); if (!StringUtils.isEmpty(stagingTable) && stagingTables.contains(stagingTable)) { throw new IllegalArgumentException( "Duplicate " + ConfigurationKeys.WRITER_STAGING_TABLE + " is not allowed across branches"); } stagingTables.add(stagingTable); } JobCommitPolicy policy = JobCommitPolicy.getCommitPolicy(state); boolean isPublishJobLevel = state.getPropAsBoolean(ConfigurationKeys.PUBLISH_DATA_AT_JOB_LEVEL, ConfigurationKeys.DEFAULT_PUBLISH_DATA_AT_JOB_LEVEL); if (JobCommitPolicy.COMMIT_ON_FULL_SUCCESS.equals(policy) ^ isPublishJobLevel) { throw new IllegalArgumentException("Job commit policy should be only " + JobCommitPolicy.COMMIT_ON_FULL_SUCCESS + " when " + ConfigurationKeys.PUBLISH_DATA_AT_JOB_LEVEL + " is true." + " Or Job commit policy should not be " + JobCommitPolicy.COMMIT_ON_FULL_SUCCESS + " and " + ConfigurationKeys.PUBLISH_DATA_AT_JOB_LEVEL + " is false."); } } }
3,494
0
Create_ds/gobblin/gobblin-modules/gobblin-sql/src/main/java/org/apache/gobblin/writer
Create_ds/gobblin/gobblin-modules/gobblin-sql/src/main/java/org/apache/gobblin/writer/commands/TeradataWriterCommands.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.gobblin.writer.commands; import java.sql.Connection; import java.sql.PreparedStatement; import java.sql.ResultSet; import java.sql.SQLException; import java.util.Map; import java.util.Properties; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import com.google.common.collect.ImmutableMap; import org.apache.gobblin.configuration.ConfigurationKeys; import org.apache.gobblin.configuration.State; import org.apache.gobblin.converter.jdbc.JdbcEntryData; import org.apache.gobblin.converter.jdbc.JdbcType; import org.apache.gobblin.source.extractor.JobCommitPolicy; /** * The implementation of JdbcWriterCommands for Teradata. * It is assumed that the final output table is of type MULTISET without primary index. * If primary index need to be defined, it is advised to adapt the staging table creation * ({@link #CREATE_TABLE_SQL_FORMAT}) to avoid unnecessary redistribution of data among the AMPs during * the insertion to the final table. * * @author Lorand Bendig * */ public class TeradataWriterCommands implements JdbcWriterCommands { private static final Logger LOG = LoggerFactory.getLogger(TeradataWriterCommands.class); private static final String CREATE_TABLE_SQL_FORMAT = "CREATE MULTISET TABLE %s.%s AS (SELECT * FROM %s.%s) WITH NO DATA NO PRIMARY INDEX"; private static final String SELECT_SQL_FORMAT = "SELECT COUNT(*) FROM %s.%s"; private static final String TRUNCATE_TABLE_FORMAT = "DELETE FROM %s.%s ALL"; private static final String DROP_TABLE_SQL_FORMAT = "DROP TABLE %s.%s"; private static final String DBC_COLUMNS_SELECT_SQL_PSTMT = "SELECT columnName, columnType FROM dbc.columns WHERE databasename = ? AND tablename = ?"; private static final String COPY_INSERT_STATEMENT_FORMAT = "INSERT INTO %s.%s SELECT * FROM %s.%s"; private static final String DELETE_STATEMENT_FORMAT = "DELETE FROM %s.%s"; private final JdbcBufferedInserter jdbcBufferedWriter; private final Connection conn; public TeradataWriterCommands(State state, Connection conn, boolean overwriteRecords) throws UnsupportedOperationException { if (overwriteRecords) { throw new IllegalArgumentException("Replace existing records is not supported in TeradataWriterCommands"); } this.conn = conn; this.jdbcBufferedWriter = new TeradataBufferedInserter(state, conn); } @Override public void setConnectionParameters(Properties properties, Connection conn) throws SQLException { // If staging tables are skipped i.e task level and partial commits are allowed to the target table, // then transaction handling will be managed by the JDBC driver to avoid deadlocks in the database. boolean jobCommitPolicyIsFull = JobCommitPolicy.COMMIT_ON_FULL_SUCCESS.equals(JobCommitPolicy.getCommitPolicy(properties)); boolean publishDataAtJobLevel = Boolean.parseBoolean(properties.getProperty(ConfigurationKeys.PUBLISH_DATA_AT_JOB_LEVEL, String.valueOf(ConfigurationKeys.DEFAULT_PUBLISH_DATA_AT_JOB_LEVEL))); if (jobCommitPolicyIsFull || publishDataAtJobLevel) { this.conn.setAutoCommit(false); } else { LOG.info("Writing without staging tables, transactions are handled by the driver"); } } @Override public void insert(String databaseName, String table, JdbcEntryData jdbcEntryData) throws SQLException { this.jdbcBufferedWriter.insert(databaseName, table, jdbcEntryData); } @Override public void flush() throws SQLException { this.jdbcBufferedWriter.flush(); } @Override public void createTableStructure(String databaseName, String fromStructure, String targetTableName) throws SQLException { String sql = String.format(CREATE_TABLE_SQL_FORMAT, databaseName, targetTableName, databaseName, fromStructure); execute(sql); } @Override public boolean isEmpty(String database, String table) throws SQLException { String sql = String.format(SELECT_SQL_FORMAT, database, table); try (PreparedStatement pstmt = this.conn.prepareStatement(sql); ResultSet resultSet = pstmt.executeQuery();) { if (!resultSet.first()) { throw new RuntimeException("Should have received at least one row from SQL " + pstmt); } return 0 == resultSet.getInt(1); } } @Override public void truncate(String database, String table) throws SQLException { String sql = String.format(TRUNCATE_TABLE_FORMAT, database, table); execute(sql); } @Override public void deleteAll(String database, String table) throws SQLException { String deleteSql = String.format(DELETE_STATEMENT_FORMAT, database, table); execute(deleteSql); } @Override public void drop(String database, String table) throws SQLException { LOG.info("Dropping table " + table); String sql = String.format(DROP_TABLE_SQL_FORMAT, database, table); execute(sql); } /** * {@inheritDoc} * @see org.apache.gobblin.writer.commands.JdbcWriterCommands#retrieveDateColumns(java.sql.Connection, java.lang.String) */ @Override public Map<String, JdbcType> retrieveDateColumns(String database, String table) throws SQLException { Map<String, JdbcType> targetDataTypes = ImmutableMap.<String, JdbcType> builder() .put("AT", JdbcType.TIME) .put("DA", JdbcType.DATE) .put("TS", JdbcType.TIMESTAMP) .build(); ImmutableMap.Builder<String, JdbcType> dateColumnsBuilder = ImmutableMap.builder(); try (PreparedStatement pstmt = this.conn.prepareStatement(DBC_COLUMNS_SELECT_SQL_PSTMT, ResultSet.TYPE_SCROLL_SENSITIVE, ResultSet.CONCUR_UPDATABLE)) { pstmt.setString(1, database); pstmt.setString(2, table); LOG.info("Retrieving column type information from SQL: " + pstmt); try (ResultSet rs = pstmt.executeQuery()) { if (!rs.first()) { throw new IllegalArgumentException("No result from information_schema.columns"); } do { String type = rs.getString("columnType").toUpperCase(); JdbcType convertedType = targetDataTypes.get(type); if (convertedType != null) { dateColumnsBuilder.put(rs.getString("columnName"), convertedType); } } while (rs.next()); } } return dateColumnsBuilder.build(); } @Override public void copyTable(String databaseName, String from, String to) throws SQLException { String sql = String.format(COPY_INSERT_STATEMENT_FORMAT, databaseName, to, databaseName, from); execute(sql); } private void execute(String sql) throws SQLException { LOG.info("Executing SQL " + sql); try (PreparedStatement pstmt = this.conn.prepareStatement(sql)) { pstmt.execute(); } } @Override public String toString() { return String.format("TeradataWriterCommands [bufferedWriter=%s]", this.jdbcBufferedWriter); } }
3,495
0
Create_ds/gobblin/gobblin-modules/gobblin-sql/src/main/java/org/apache/gobblin/writer
Create_ds/gobblin/gobblin-modules/gobblin-sql/src/main/java/org/apache/gobblin/writer/commands/JdbcBufferedInserter.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.gobblin.writer.commands; import static org.apache.gobblin.configuration.ConfigurationKeys.WRITER_PREFIX; import org.apache.gobblin.converter.jdbc.JdbcEntryData; import java.sql.SQLException; public interface JdbcBufferedInserter { public static final String WRITER_JDBC_INSERT_BATCH_SIZE = WRITER_PREFIX + ".jdbc.batch_size"; public static final int DEFAULT_WRITER_JDBC_INSERT_BATCH_SIZE = 30; public static final String WRITER_JDBC_INSERT_BUFFER_SIZE = WRITER_PREFIX + ".jdbc.insert_buffer_size"; public static final int DEFAULT_WRITER_JDBC_INSERT_BUFFER_SIZE = 1024 * 1024; //1 MBytes public static final int MAX_WRITER_JDBC_INSERT_BUFFER_SIZE = 10 * 1024 * 1024; //10 MBytes public static final String WRITER_JDBC_MAX_PARAM_SIZE = WRITER_PREFIX + ".jdbc.insert_max_param_size"; public static final int DEFAULT_WRITER_JDBC_MAX_PARAM_SIZE = 100000; //MySQL limit public static final String WRITER_JDBC_INSERT_RETRY_TIMEOUT = WRITER_PREFIX + ".jdbc.insert_retry_timeout"; public static final int DEFAULT_WRITER_JDBC_INSERT_RETRY_TIMEOUT = 30; // in seconds public static final String WRITER_JDBC_INSERT_RETRY_MAX_ATTEMPT = WRITER_PREFIX + ".jdbc.insert_retry_max_attempt"; public static final int DEFAULT_WRITER_JDBC_INSERT_RETRY_MAX_ATTEMPT = 5; /** * Inserts entry. Depends on the current batch size, buffer size, param size, it can either put into buffer * or it will actually call underlying JDBC RDBMS to be inserted. * * The number of input columns is expected to be equal or smaller than the number of columns in Jdbc. * This is to prevent unintended outcome from schema evolution such as additional column. * As underlying Jdbc RDBMS can declare constraints on its schema, writer will allow if number of columns in Jdbc is greater than number of input columns. * number of input columns <= number of columns in Jdbc * * @param conn * @param databaseName * @param table * @param jdbcEntryData * @throws SQLException */ public void insert(String databaseName, String table, JdbcEntryData jdbcEntryData) throws SQLException; /** * Flushes all the entries in buffer into JDBC RDBMS. * @param conn * @throws SQLException */ public void flush() throws SQLException; }
3,496
0
Create_ds/gobblin/gobblin-modules/gobblin-sql/src/main/java/org/apache/gobblin/writer
Create_ds/gobblin/gobblin-modules/gobblin-sql/src/main/java/org/apache/gobblin/writer/commands/MySqlWriterCommands.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.gobblin.writer.commands; import java.sql.Connection; import java.sql.PreparedStatement; import java.sql.ResultSet; import java.sql.SQLException; import java.util.Map; import java.util.Properties; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import com.google.common.collect.ImmutableMap; import org.apache.gobblin.configuration.State; import org.apache.gobblin.converter.jdbc.JdbcEntryData; import org.apache.gobblin.converter.jdbc.JdbcType; /** * The implementation of JdbcWriterCommands for MySQL. */ public class MySqlWriterCommands implements JdbcWriterCommands { private static final Logger LOG = LoggerFactory.getLogger(MySqlWriterCommands.class); private static final String CREATE_TABLE_SQL_FORMAT = "CREATE TABLE %s.%s LIKE %s.%s"; private static final String SELECT_SQL_FORMAT = "SELECT COUNT(*) FROM %s.%s"; private static final String TRUNCATE_TABLE_FORMAT = "TRUNCATE TABLE %s.%s"; private static final String DROP_TABLE_SQL_FORMAT = "DROP TABLE %s.%s"; private static final String INFORMATION_SCHEMA_SELECT_SQL_PSTMT = "SELECT column_name, column_type FROM information_schema.columns WHERE table_schema = ? AND table_name = ?"; private static final String COPY_INSERT_STATEMENT_FORMAT = "INSERT INTO %s.%s SELECT * FROM %s.%s"; private static final String COPY_REPLACE_STATEMENT_FORMAT = "REPLACE INTO %s.%s SELECT * FROM %s.%s"; private static final String DELETE_STATEMENT_FORMAT = "DELETE FROM %s.%s"; private final JdbcBufferedInserter jdbcBufferedWriter; private final Connection conn; private final boolean overwriteRecords; public MySqlWriterCommands(State state, Connection conn, boolean overwriteRecords) { this.conn = conn; this.jdbcBufferedWriter = new MySqlBufferedInserter(state, conn, overwriteRecords); this.overwriteRecords = overwriteRecords; } @Override public void setConnectionParameters(Properties properties, Connection conn) throws SQLException { } @Override public void insert(String databaseName, String table, JdbcEntryData jdbcEntryData) throws SQLException { this.jdbcBufferedWriter.insert(databaseName, table, jdbcEntryData); } @Override public void flush() throws SQLException { this.jdbcBufferedWriter.flush(); } @Override public void createTableStructure(String databaseName, String fromStructure, String targetTableName) throws SQLException { String sql = String.format(CREATE_TABLE_SQL_FORMAT, databaseName, targetTableName, databaseName, fromStructure); execute(sql); } @Override public boolean isEmpty(String database, String table) throws SQLException { String sql = String.format(SELECT_SQL_FORMAT, database, table); try (PreparedStatement pstmt = this.conn.prepareStatement(sql); ResultSet resultSet = pstmt.executeQuery();) { if (!resultSet.next()) { throw new RuntimeException("Should have received at least one row from SQL " + pstmt); } return 0 == resultSet.getInt(1); } } @Override public void truncate(String database, String table) throws SQLException { String sql = String.format(TRUNCATE_TABLE_FORMAT, database, table); execute(sql); } @Override public void deleteAll(String database, String table) throws SQLException { String deleteSql = String.format(DELETE_STATEMENT_FORMAT, database, table); execute(deleteSql); } @Override public void drop(String database, String table) throws SQLException { LOG.info("Dropping table " + table); String sql = String.format(DROP_TABLE_SQL_FORMAT, database, table); execute(sql); } /** * https://dev.mysql.com/doc/connector-j/en/connector-j-reference-type-conversions.html * {@inheritDoc} * @see org.apache.gobblin.writer.commands.JdbcWriterCommands#retrieveDateColumns(java.sql.Connection, java.lang.String) */ @Override public Map<String, JdbcType> retrieveDateColumns(String database, String table) throws SQLException { Map<String, JdbcType> targetDataTypes = ImmutableMap.<String, JdbcType> builder() .put("DATE", JdbcType.DATE) .put("DATETIME", JdbcType.TIMESTAMP) .put("TIME", JdbcType.TIME) .put("TIMESTAMP", JdbcType.TIMESTAMP) .build(); ImmutableMap.Builder<String, JdbcType> dateColumnsBuilder = ImmutableMap.builder(); try (PreparedStatement pstmt = this.conn.prepareStatement(INFORMATION_SCHEMA_SELECT_SQL_PSTMT)) { pstmt.setString(1, database); pstmt.setString(2, table); LOG.info("Retrieving column type information from SQL: " + pstmt); try (ResultSet rs = pstmt.executeQuery()) { if (!rs.next()) { throw new IllegalArgumentException("No result from information_schema.columns"); } do { String type = rs.getString("column_type").toUpperCase(); JdbcType convertedType = targetDataTypes.get(type); if (convertedType != null) { dateColumnsBuilder.put(rs.getString("column_name"), convertedType); } } while (rs.next()); } } return dateColumnsBuilder.build(); } @Override public void copyTable(String databaseName, String from, String to) throws SQLException { // Chooses between INSERT and REPLACE logic based on the job configurations String sql = String .format(this.overwriteRecords ? COPY_REPLACE_STATEMENT_FORMAT : COPY_INSERT_STATEMENT_FORMAT, databaseName, to, databaseName, from); execute(sql); } private void execute(String sql) throws SQLException { LOG.info("Executing SQL " + sql); try (PreparedStatement pstmt = this.conn.prepareStatement(sql)) { pstmt.execute(); } } @Override public String toString() { return String.format("MySqlWriterCommands [bufferedWriter=%s]", this.jdbcBufferedWriter); } }
3,497
0
Create_ds/gobblin/gobblin-modules/gobblin-sql/src/main/java/org/apache/gobblin/writer
Create_ds/gobblin/gobblin-modules/gobblin-sql/src/main/java/org/apache/gobblin/writer/commands/PostgresWriterCommands.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.gobblin.writer.commands; import java.sql.Connection; import java.sql.PreparedStatement; import java.sql.ResultSet; import java.sql.SQLException; import java.util.Map; import java.util.Properties; import com.google.common.collect.ImmutableMap; import lombok.extern.slf4j.Slf4j; import org.apache.gobblin.configuration.State; import org.apache.gobblin.converter.jdbc.JdbcEntryData; import org.apache.gobblin.converter.jdbc.JdbcType; /** * The implementation of JdbcWriterCommands for Postgres. */ @Slf4j public class PostgresWriterCommands implements JdbcWriterCommands { private static final String CREATE_TABLE_SQL_FORMAT = "CREATE TABLE %s.%s (LIKE %s.%s)"; private static final String SELECT_SQL_FORMAT = "SELECT COUNT(*) FROM %s.%s"; private static final String TRUNCATE_TABLE_FORMAT = "TRUNCATE TABLE %s.%s"; private static final String DROP_TABLE_SQL_FORMAT = "DROP TABLE %s.%s"; private static final String INFORMATION_SCHEMA_SELECT_SQL_PSTMT = "SELECT column_name, data_type FROM information_schema.columns WHERE table_schema = ? AND table_name = ?"; private static final String COPY_INSERT_STATEMENT_FORMAT = "INSERT INTO %s.%s SELECT * FROM %s.%s"; private static final String DELETE_STATEMENT_FORMAT = "DELETE FROM %s.%s"; private final JdbcBufferedInserter jdbcBufferedWriter; private final Connection conn; public PostgresWriterCommands(State state, Connection conn, boolean overwriteRecords) throws UnsupportedOperationException { if (overwriteRecords) { throw new IllegalArgumentException("Replace existing records is not supported in PostgresWriterCommands"); } this.conn = conn; this.jdbcBufferedWriter = new PostgresBufferedInserter(state, conn); } @Override public void setConnectionParameters(Properties properties, Connection conn) throws SQLException { } @Override public void insert(String databaseName, String table, JdbcEntryData jdbcEntryData) throws SQLException { this.jdbcBufferedWriter.insert(databaseName, table, jdbcEntryData); } @Override public void flush() throws SQLException { this.jdbcBufferedWriter.flush(); } @Override public void createTableStructure(String databaseName, String fromStructure, String targetTableName) throws SQLException { String sql = String.format(CREATE_TABLE_SQL_FORMAT, databaseName, targetTableName, databaseName, fromStructure); execute(sql); } @Override public boolean isEmpty(String database, String table) throws SQLException { String sql = String.format(SELECT_SQL_FORMAT, database, table); try (PreparedStatement pstmt = this.conn.prepareStatement(sql); ResultSet resultSet = pstmt.executeQuery();) { if (!resultSet.first()) { throw new RuntimeException("Should have received at least one row from SQL " + pstmt); } return 0 == resultSet.getInt(1); } } @Override public void truncate(String database, String table) throws SQLException { String sql = String.format(TRUNCATE_TABLE_FORMAT, database, table); execute(sql); } @Override public void deleteAll(String database, String table) throws SQLException { String deleteSql = String.format(DELETE_STATEMENT_FORMAT, database, table); execute(deleteSql); } @Override public void drop(String database, String table) throws SQLException { log.info("Dropping table " + table); String sql = String.format(DROP_TABLE_SQL_FORMAT, database, table); execute(sql); } /** * https://documentation.progress.com/output/DataDirect/DataDirectCloud/index.html#page/queries/postgresql-data-types.html * {@inheritDoc} * @see org.apache.gobblin.writer.commands.JdbcWriterCommands#retrieveDateColumns(java.sql.Connection, java.lang.String) */ @Override public Map<String, JdbcType> retrieveDateColumns(String database, String table) throws SQLException { Map<String, JdbcType> targetDataTypes = ImmutableMap.<String, JdbcType>builder().put("DATE", JdbcType.DATE).put("TIME WITH TIME ZONE", JdbcType.TIME) .put("TIME WITHOUT TIME ZONE", JdbcType.TIME).put("TIMESTAMP WITH TIME ZONE", JdbcType.TIMESTAMP) .put("TIMESTAMP WITHOUT TIME ZONE", JdbcType.TIMESTAMP).build(); ImmutableMap.Builder<String, JdbcType> dateColumnsBuilder = ImmutableMap.builder(); try (PreparedStatement pstmt = this.conn .prepareStatement(INFORMATION_SCHEMA_SELECT_SQL_PSTMT, ResultSet.TYPE_SCROLL_INSENSITIVE, ResultSet.CONCUR_READ_ONLY)) { pstmt.setString(1, database); pstmt.setString(2, table); log.info("Retrieving column type information from SQL: " + pstmt); try (ResultSet rs = pstmt.executeQuery()) { if (!rs.first()) { throw new IllegalArgumentException("No result from information_schema.columns"); } do { String type = rs.getString("data_type").toUpperCase(); JdbcType convertedType = targetDataTypes.get(type); if (convertedType != null) { dateColumnsBuilder.put(rs.getString("column_name"), convertedType); } } while (rs.next()); } } return dateColumnsBuilder.build(); } @Override public void copyTable(String databaseName, String from, String to) throws SQLException { String sql = String.format(COPY_INSERT_STATEMENT_FORMAT, databaseName, to, databaseName, from); execute(sql); } private void execute(String sql) throws SQLException { log.info("Executing SQL " + sql); try (PreparedStatement pstmt = this.conn.prepareStatement(sql)) { pstmt.execute(); } } @Override public String toString() { return String.format("PostgresWriterCommands [bufferedWriter=%s]", this.jdbcBufferedWriter); } }
3,498
0
Create_ds/gobblin/gobblin-modules/gobblin-sql/src/main/java/org/apache/gobblin/writer
Create_ds/gobblin/gobblin-modules/gobblin-sql/src/main/java/org/apache/gobblin/writer/commands/TeradataBufferedInserter.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.gobblin.writer.commands; import java.sql.BatchUpdateException; import java.sql.Connection; import java.sql.ParameterMetaData; import java.sql.PreparedStatement; import java.sql.SQLException; import java.sql.Statement; import java.util.Map; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import com.google.common.collect.Maps; import org.apache.gobblin.configuration.State; import org.apache.gobblin.converter.jdbc.JdbcEntryData; import org.apache.gobblin.converter.jdbc.JdbcEntryDatum; import lombok.ToString; /** * The implementation of JdbcBufferedInserter for Teradata. * Writing is done by executing {@link JdbcBufferedInserter#WRITER_JDBC_INSERT_BATCH_SIZE} sized batch inserts. * * @author Lorand Bendig * */ @ToString public class TeradataBufferedInserter extends BaseJdbcBufferedInserter { private static final Logger LOG = LoggerFactory.getLogger(TeradataBufferedInserter.class); private Map<Integer, Integer> columnPosSqlTypes; public TeradataBufferedInserter(State state, Connection conn) { super(state, conn); } @Override protected boolean insertBatch(PreparedStatement pstmt) throws SQLException { for (JdbcEntryData pendingEntry : TeradataBufferedInserter.this.pendingInserts) { int i = 1; for (JdbcEntryDatum datum : pendingEntry) { Object value = datum.getVal(); if (value != null) { pstmt.setObject(i, value); } else { // Column type is needed for null value insertion pstmt.setNull(i, columnPosSqlTypes.get(i)); } i++; } pstmt.addBatch(); pstmt.clearParameters(); } if (LOG.isDebugEnabled()) { LOG.debug("Executing SQL " + pstmt); } int[] execStatus = pstmt.executeBatch(); // Check status explicitly if driver continues batch insertion upon failure for (int status : execStatus) { if (status == Statement.EXECUTE_FAILED) { throw new BatchUpdateException("Batch insert failed.", execStatus); } } return true; } @Override protected String createPrepareStatementStr(int batchSize) { final String VALUE_FORMAT = "(%s)"; StringBuilder sb = new StringBuilder(this.insertStmtPrefix); String values = String.format(VALUE_FORMAT, JOINER_ON_COMMA.useForNull("?").join(new String[this.columnNames.size()])); sb.append(values); return sb.append(';').toString(); } @Override protected void initializeBatch(String databaseName, String table) throws SQLException { super.initializeBatch(databaseName, table); this.columnPosSqlTypes = getColumnPosSqlTypes(); } /** * Creates a mapping between column positions and their data types * @return A map containing the position of the columns along with their data type as value */ private Map<Integer, Integer> getColumnPosSqlTypes() { try { final Map<Integer, Integer> columnPosSqlTypes = Maps.newHashMap(); ParameterMetaData pMetaData = this.insertPstmtForFixedBatch.getParameterMetaData(); for (int i = 1; i <= pMetaData.getParameterCount(); i++) { columnPosSqlTypes.put(i, pMetaData.getParameterType(i)); } return columnPosSqlTypes; } catch (SQLException e) { throw new RuntimeException("Cannot retrieve columns types for batch insert", e); } } }
3,499