index int64 0 0 | repo_id stringlengths 9 205 | file_path stringlengths 31 246 | content stringlengths 1 12.2M | __index_level_0__ int64 0 10k |
|---|---|---|---|---|
0 | Create_ds/gobblin/gobblin-core/src/main/java/org/apache/gobblin/writer | Create_ds/gobblin/gobblin-core/src/main/java/org/apache/gobblin/writer/http/DelegatingHttpClientConnectionManager.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.writer.http;
import java.io.IOException;
import java.util.concurrent.TimeUnit;
import org.apache.http.HttpClientConnection;
import org.apache.http.conn.ConnectionRequest;
import org.apache.http.conn.HttpClientConnectionManager;
import org.apache.http.conn.routing.HttpRoute;
import org.apache.http.protocol.HttpContext;
/**
* Helper class to decorate HttpClientConnectionManager instances.
*/
public class DelegatingHttpClientConnectionManager implements HttpClientConnectionManager {
protected final HttpClientConnectionManager fallbackConnManager;
public DelegatingHttpClientConnectionManager(HttpClientConnectionManager fallback) {
this.fallbackConnManager = fallback;
}
@Override
public void releaseConnection(HttpClientConnection conn, Object newState, long validDuration, TimeUnit timeUnit) {
this.fallbackConnManager.releaseConnection(conn, newState, validDuration, timeUnit);
}
@Override
public void connect(HttpClientConnection conn, HttpRoute route, int connectTimeout, HttpContext context)
throws IOException {
this.fallbackConnManager.connect(conn, route, connectTimeout, context);
}
@Override
public void upgrade(HttpClientConnection conn, HttpRoute route, HttpContext context) throws IOException {
this.fallbackConnManager.upgrade(conn, route, context);
}
@Override
public void routeComplete(HttpClientConnection conn, HttpRoute route, HttpContext context) throws IOException {
this.fallbackConnManager.routeComplete(conn, route, context);
}
@Override
public void closeIdleConnections(long idletime, TimeUnit tunit) {
this.fallbackConnManager.closeIdleConnections(idletime, tunit);
}
@Override
public void closeExpiredConnections() {
this.fallbackConnManager.closeExpiredConnections();
}
@Override
public void shutdown() {
this.fallbackConnManager.shutdown();
}
@Override
public ConnectionRequest requestConnection(HttpRoute route, Object state) {
return this.fallbackConnManager.requestConnection(route, state);
}
}
| 3,100 |
0 | Create_ds/gobblin/gobblin-core/src/main/java/org/apache/gobblin/writer | Create_ds/gobblin/gobblin-core/src/main/java/org/apache/gobblin/writer/http/HttpWriterBuilder.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.writer.http;
import org.apache.gobblin.writer.DataWriter;
import java.io.IOException;
/**
* Builder that builds HttpWriter
*/
public class HttpWriterBuilder extends AbstractHttpWriterBuilder<Void, String, HttpWriterBuilder>{
@Override
public DataWriter<String> build() throws IOException {
validate();
return new HttpWriter<String>(this);
}
} | 3,101 |
0 | Create_ds/gobblin/gobblin-core/src/main/java/org/apache/gobblin/writer | Create_ds/gobblin/gobblin-core/src/main/java/org/apache/gobblin/writer/http/SalesforceRestWriter.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.writer.http;
import java.io.IOException;
import java.net.URI;
import java.net.URISyntaxException;
import lombok.Getter;
import org.apache.commons.lang3.StringUtils;
import org.apache.commons.lang3.builder.ToStringBuilder;
import org.apache.commons.lang3.builder.ToStringStyle;
import org.apache.gobblin.converter.http.RestEntry;
import org.apache.gobblin.exception.NonTransientException;
import org.apache.http.HttpHeaders;
import org.apache.http.client.methods.CloseableHttpResponse;
import org.apache.http.client.methods.HttpUriRequest;
import org.apache.http.client.methods.RequestBuilder;
import org.apache.http.entity.ContentType;
import org.apache.http.entity.StringEntity;
import org.apache.http.util.EntityUtils;
import org.apache.oltu.oauth2.client.OAuthClient;
import org.apache.oltu.oauth2.client.URLConnectionClient;
import org.apache.oltu.oauth2.client.request.OAuthClientRequest;
import org.apache.oltu.oauth2.client.response.OAuthJSONAccessTokenResponse;
import org.apache.oltu.oauth2.common.OAuth;
import org.apache.oltu.oauth2.common.exception.OAuthProblemException;
import org.apache.oltu.oauth2.common.exception.OAuthSystemException;
import org.apache.oltu.oauth2.common.message.types.GrantType;
import com.google.common.annotations.VisibleForTesting;
import com.google.common.base.Optional;
import com.google.common.base.Preconditions;
import com.google.gson.JsonArray;
import com.google.gson.JsonElement;
import com.google.gson.JsonObject;
import com.google.gson.JsonParser;
/**
* Writes to Salesforce via RESTful API, supporting INSERT_ONLY_NOT_EXIST, and UPSERT.
*
*/
@Getter
public class SalesforceRestWriter extends RestJsonWriter {
public static enum Operation {
INSERT_ONLY_NOT_EXIST,
UPSERT
}
static final String DUPLICATE_VALUE_ERR_CODE = "DUPLICATE_VALUE";
protected String accessToken;
protected final URI oauthEndPoint;
protected final String clientId;
protected final String clientSecret;
protected final String userId;
protected final String password;
protected final String securityToken;
protected final Operation operation;
protected final int batchSize;
protected final Optional<String> batchResourcePath;
protected Optional<JsonArray> batchRecords = Optional.absent();
protected long numRecordsWritten = 0L;
public SalesforceRestWriter(SalesForceRestWriterBuilder builder) {
super(builder);
this.oauthEndPoint = builder.getSvcEndpoint().get(); //Set oauth end point
this.clientId = builder.getClientId();
this.clientSecret = builder.getClientSecret();
this.userId = builder.getUserId();
this.password = builder.getPassword();
this.securityToken = builder.getSecurityToken();
this.operation = builder.getOperation();
this.batchSize = builder.getBatchSize();
this.batchResourcePath = builder.getBatchResourcePath();
Preconditions.checkArgument(batchSize == 1 || batchResourcePath.isPresent(), "Batch resource path is missing");
if (batchSize > 1) {
getLog().info("Batch api will be used with batch size " + batchSize);
}
try {
onConnect(oauthEndPoint);
} catch (IOException e) {
throw new RuntimeException(e);
}
}
@VisibleForTesting
SalesforceRestWriter(SalesForceRestWriterBuilder builder, String accessToken) {
super(builder);
this.oauthEndPoint = builder.getSvcEndpoint().get(); //Set oauth end point
this.clientId = builder.getClientId();
this.clientSecret = builder.getClientSecret();
this.userId = builder.getUserId();
this.password = builder.getPassword();
this.securityToken = builder.getSecurityToken();
this.operation = builder.getOperation();
this.batchSize = builder.getBatchSize();
this.batchResourcePath = builder.getBatchResourcePath();
Preconditions.checkArgument(batchSize == 1 || batchResourcePath.isPresent(), "Batch resource path is missing");
this.accessToken = accessToken;
}
/**
* Retrieve access token, if needed, retrieve instance url, and set server host URL
* {@inheritDoc}
* @see org.apache.gobblin.writer.http.HttpWriter#onConnect(org.apache.http.HttpHost)
*/
@Override
public void onConnect(URI serverHost) throws IOException {
if (!StringUtils.isEmpty(accessToken)) {
return; //No need to be called if accessToken is active.
}
try {
getLog().info("Getting Oauth2 access token.");
OAuthClientRequest request = OAuthClientRequest.tokenLocation(serverHost.toString())
.setGrantType(GrantType.PASSWORD)
.setClientId(clientId)
.setClientSecret(clientSecret)
.setUsername(userId)
.setPassword(password + securityToken).buildQueryMessage();
OAuthClient client = new OAuthClient(new URLConnectionClient());
OAuthJSONAccessTokenResponse response = client.accessToken(request, OAuth.HttpMethod.POST);
accessToken = response.getAccessToken();
setCurServerHost(new URI(response.getParam("instance_url")));
} catch (OAuthProblemException e) {
throw new NonTransientException("Error while authenticating with Oauth2", e);
} catch (OAuthSystemException e) {
throw new RuntimeException("Failed getting access token", e);
} catch (URISyntaxException e) {
throw new RuntimeException("Failed due to invalid instance url", e);
}
}
/**
* For single request, creates HttpUriRequest and decides post/patch operation based on input parameter.
*
* For batch request, add the record into JsonArray as a subrequest and only creates HttpUriRequest with POST method if it filled the batch size.
* {@inheritDoc}
* @see org.apache.gobblin.writer.http.RestJsonWriter#onNewRecord(org.apache.gobblin.converter.rest.RestEntry)
*/
@Override
public Optional<HttpUriRequest> onNewRecord(RestEntry<JsonObject> record) {
Preconditions.checkArgument(!StringUtils.isEmpty(accessToken), "Access token has not been acquired.");
Preconditions.checkNotNull(record, "Record should not be null");
RequestBuilder builder = null;
JsonObject payload = null;
if (batchSize > 1) {
if (!batchRecords.isPresent()) {
batchRecords = Optional.of(new JsonArray());
}
batchRecords.get().add(newSubrequest(record));
if (batchRecords.get().size() < batchSize) { //No need to send. Return absent.
return Optional.absent();
}
payload = newPayloadForBatch();
builder = RequestBuilder.post().setUri(combineUrl(getCurServerHost(), batchResourcePath));
} else {
switch (operation) {
case INSERT_ONLY_NOT_EXIST:
builder = RequestBuilder.post();
break;
case UPSERT:
builder = RequestBuilder.patch();
break;
default:
throw new IllegalArgumentException(operation + " is not supported.");
}
builder.setUri(combineUrl(getCurServerHost(), record.getResourcePath()));
payload = record.getRestEntryVal();
}
return Optional.of(newRequest(builder, payload));
}
/**
* Create batch subrequest. For more detail @link https://developer.salesforce.com/docs/atlas.en-us.api_rest.meta/api_rest/requests_composite_batch.htm
*
* @param record
* @return
*/
private JsonObject newSubrequest(RestEntry<JsonObject> record) {
Preconditions.checkArgument(record.getResourcePath().isPresent(), "Resource path is not defined");
JsonObject subReq = new JsonObject();
subReq.addProperty("url", record.getResourcePath().get());
subReq.add("richInput", record.getRestEntryVal());
switch (operation) {
case INSERT_ONLY_NOT_EXIST:
subReq.addProperty("method", "POST");
break;
case UPSERT:
subReq.addProperty("method", "PATCH");
break;
default:
throw new IllegalArgumentException(operation + " is not supported.");
}
return subReq;
}
/**
* @return JsonObject contains batch records
*/
private JsonObject newPayloadForBatch() {
JsonObject payload = new JsonObject();
payload.add("batchRequests", batchRecords.get());
return payload;
}
private HttpUriRequest newRequest(RequestBuilder builder, JsonElement payload) {
try {
builder.addHeader(HttpHeaders.CONTENT_TYPE, ContentType.APPLICATION_JSON.getMimeType())
.addHeader(HttpHeaders.AUTHORIZATION, "OAuth " + accessToken)
.setEntity(new StringEntity(payload.toString(), ContentType.APPLICATION_JSON));
} catch (Exception e) {
throw new RuntimeException(e);
}
if (getLog().isDebugEnabled()) {
getLog().debug("Request builder: " + ToStringBuilder.reflectionToString(builder, ToStringStyle.SHORT_PREFIX_STYLE));
}
return builder.build();
}
@Override
public void flush() {
try {
if (isRetry()) {
//flushing failed and it should be retried.
super.writeImpl(null);
return;
}
if (batchRecords.isPresent() && batchRecords.get().size() > 0) {
getLog().info("Flusing remaining subrequest of batch. # of subrequests: " + batchRecords.get().size());
curRequest = Optional.of(newRequest(RequestBuilder.post().setUri(combineUrl(getCurServerHost(), batchResourcePath)),
newPayloadForBatch()));
super.writeImpl(null);
}
} catch (IOException e) {
throw new RuntimeException(e);
}
}
/**
* Make it fail (throw exception) if status code is greater or equal to 400 except,
* the status code is 400 and error code is duplicate value, regard it as success(do not throw exception).
*
* If status code is 401 or 403, re-acquire access token before make it fail -- retry will take care of rest.
*
* {@inheritDoc}
* @see org.apache.gobblin.writer.http.HttpWriter#processResponse(org.apache.http.HttpResponse)
*/
@Override
public void processResponse(CloseableHttpResponse response) throws IOException, UnexpectedResponseException {
if (getLog().isDebugEnabled()) {
getLog().debug("Received response " + ToStringBuilder.reflectionToString(response, ToStringStyle.SHORT_PREFIX_STYLE));
}
int statusCode = response.getStatusLine().getStatusCode();
if (statusCode == 401 || statusCode == 403) {
getLog().info("Reacquiring access token.");
accessToken = null;
onConnect(oauthEndPoint);
throw new RuntimeException("Access denied. Access token has been reacquired and retry may solve the problem. "
+ ToStringBuilder.reflectionToString(response, ToStringStyle.SHORT_PREFIX_STYLE));
}
if (batchSize > 1) {
processBatchRequestResponse(response);
numRecordsWritten += batchRecords.get().size();
batchRecords = Optional.absent();
} else {
processSingleRequestResponse(response);
numRecordsWritten++;
}
}
private void processSingleRequestResponse(CloseableHttpResponse response) throws IOException,
UnexpectedResponseException {
int statusCode = response.getStatusLine().getStatusCode();
if (statusCode < 400) {
return;
}
String entityStr = EntityUtils.toString(response.getEntity());
if (statusCode == 400
&& Operation.INSERT_ONLY_NOT_EXIST.equals(operation)
&& entityStr != null) { //Ignore if it's duplicate entry error code
JsonArray jsonArray = new JsonParser().parse(entityStr).getAsJsonArray();
JsonObject jsonObject = jsonArray.get(0).getAsJsonObject();
if (isDuplicate(jsonObject, statusCode)) {
return;
}
}
throw new RuntimeException("Failed due to " + entityStr + " (Detail: "
+ ToStringBuilder.reflectionToString(response, ToStringStyle.SHORT_PREFIX_STYLE) + " )");
}
/**
* Check results from batch response, if any of the results is failure throw exception.
* @param response
* @throws IOException
* @throws UnexpectedResponseException
*/
private void processBatchRequestResponse(CloseableHttpResponse response) throws IOException,
UnexpectedResponseException {
String entityStr = EntityUtils.toString(response.getEntity());
int statusCode = response.getStatusLine().getStatusCode();
if (statusCode >= 400) {
throw new RuntimeException("Failed due to " + entityStr + " (Detail: "
+ ToStringBuilder.reflectionToString(response, ToStringStyle.SHORT_PREFIX_STYLE) + " )");
}
JsonObject jsonBody = new JsonParser().parse(entityStr).getAsJsonObject();
if (!jsonBody.get("hasErrors").getAsBoolean()) {
return;
}
JsonArray results = jsonBody.get("results").getAsJsonArray();
for (JsonElement jsonElem : results) {
JsonObject json = jsonElem.getAsJsonObject();
int subStatusCode = json.get("statusCode").getAsInt();
if (subStatusCode < 400) {
continue;
} else if (subStatusCode == 400
&& Operation.INSERT_ONLY_NOT_EXIST.equals(operation)) {
JsonElement resultJsonElem = json.get("result");
Preconditions.checkNotNull(resultJsonElem, "Error response should contain result property");
JsonObject resultJsonObject = resultJsonElem.getAsJsonArray().get(0).getAsJsonObject();
if (isDuplicate(resultJsonObject, subStatusCode)) {
continue;
}
}
throw new RuntimeException("Failed due to " + jsonBody + " (Detail: "
+ ToStringBuilder.reflectionToString(response, ToStringStyle.SHORT_PREFIX_STYLE) + " )");
}
}
private boolean isDuplicate(JsonObject responseJsonObject, int statusCode) {
return statusCode == 400
&& Operation.INSERT_ONLY_NOT_EXIST.equals(operation)
&& DUPLICATE_VALUE_ERR_CODE.equals(responseJsonObject.get("errorCode").getAsString());
}
/**
* {@inheritDoc}
*/
@Override
public long recordsWritten() {
return this.numRecordsWritten;
}
} | 3,102 |
0 | Create_ds/gobblin/gobblin-core/src/main/java/org/apache/gobblin/writer | Create_ds/gobblin/gobblin-core/src/main/java/org/apache/gobblin/writer/http/HttpWriter.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.writer.http;
import java.io.IOException;
import java.net.MalformedURLException;
import java.net.URI;
import java.net.URISyntaxException;
import java.net.URL;
import org.apache.http.HttpHeaders;
import org.apache.http.client.methods.HttpUriRequest;
import org.apache.http.client.methods.RequestBuilder;
import org.apache.http.entity.ContentType;
import org.apache.http.entity.StringEntity;
import com.google.common.base.Optional;
/**
* Writes via RESTful API that accepts plain text as a body
*/
public class HttpWriter<D> extends AbstractHttpWriter<D> {
@SuppressWarnings("rawtypes")
public HttpWriter(AbstractHttpWriterBuilder builder) {
super(builder);
}
@Override
public URI chooseServerHost() {
return getCurServerHost();
}
@Override
public void onConnect(URI serverHost) throws IOException {}
@Override
public Optional<HttpUriRequest> onNewRecord(D record) {
try {
HttpUriRequest uriRequest = RequestBuilder.post()
.addHeader(HttpHeaders.CONTENT_TYPE, ContentType.TEXT_PLAIN.getMimeType())
.setUri(getCurServerHost())
.setEntity(new StringEntity(record.toString(), ContentType.TEXT_PLAIN.toString()))
.build();
return Optional.of(uriRequest);
} catch (Exception e) {
throw new RuntimeException(e);
}
}
URI combineUrl(URI uri, Optional<String> resourcePath) {
if (!resourcePath.isPresent()) {
return uri;
}
try {
return new URL(getCurServerHost().toURL(), resourcePath.get()).toURI();
} catch (MalformedURLException | URISyntaxException e) {
throw new RuntimeException("Failed combining URL", e);
}
}
}
| 3,103 |
0 | Create_ds/gobblin/gobblin-core/src/main/java/org/apache/gobblin/writer | Create_ds/gobblin/gobblin-core/src/main/java/org/apache/gobblin/writer/http/HttpWriterDecorator.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.writer.http;
import java.io.IOException;
import java.net.URI;
import org.apache.http.client.methods.CloseableHttpResponse;
import org.apache.http.client.methods.HttpUriRequest;
import com.google.common.base.Optional;
import com.google.common.base.Preconditions;
import com.google.common.util.concurrent.ListenableFuture;
/**
* Common parent for {@link AbstractHttpWriter} decorators. Delegates extension methods to another
* implementation and simplifies the overriding of only selected methods
*/
public abstract class HttpWriterDecorator<D> implements HttpWriterDecoration<D> {
private final HttpWriterDecoration<D> fallback;
public HttpWriterDecorator(HttpWriterDecoration<D> fallback) {
Preconditions.checkNotNull(fallback);
this.fallback = fallback;
}
protected HttpWriterDecoration<D> getFallback() {
return this.fallback;
}
@Override
public URI chooseServerHost() {
return getFallback().chooseServerHost();
}
@Override
public void onConnect(URI serverHost) throws IOException {
getFallback().onConnect(serverHost);
}
@Override
public Optional<HttpUriRequest> onNewRecord(D record) {
return getFallback().onNewRecord(record);
}
@Override
public ListenableFuture<CloseableHttpResponse> sendRequest(HttpUriRequest request) throws IOException {
return getFallback().sendRequest(request);
}
@Override
public CloseableHttpResponse waitForResponse(ListenableFuture<CloseableHttpResponse> responseFuture) {
return getFallback().waitForResponse(responseFuture);
}
@Override
public void processResponse(CloseableHttpResponse response) throws IOException, UnexpectedResponseException {
getFallback().processResponse(response);
}
}
| 3,104 |
0 | Create_ds/gobblin/gobblin-core/src/main/java/org/apache/gobblin | Create_ds/gobblin/gobblin-core/src/main/java/org/apache/gobblin/recordaccess/RecordAccessorProviderFactory.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.recordaccess;
import java.util.ServiceLoader;
/**
* This object can build RecordAccessors for a given object type. Developers who wish to
* provide custom RecordAccessors can do so by implementing the RecordAccessProvider interface
* and ensuring that their JAR has a META-INF/services/ entry pointing the ServiceLoader to
* the custom implementation.
*/
public class RecordAccessorProviderFactory {
private static ServiceLoader<RecordAccessorProvider> recordAccessorProviders =
ServiceLoader.load(RecordAccessorProvider.class);
/**
* Get a RecordAccessor for a given object. Throws IllegalArgumentException if none
* can be built.
*/
public synchronized static RecordAccessor getRecordAccessorForObject(Object obj) {
for (RecordAccessorProvider p: recordAccessorProviders) {
RecordAccessor accessor = p.recordAccessorForObject(obj);
if (accessor != null) {
return accessor;
}
}
throw new IllegalArgumentException("Can't build accessor for object " + obj.toString() + "!");
}
}
| 3,105 |
0 | Create_ds/gobblin/gobblin-core/src/main/java/org/apache/gobblin | Create_ds/gobblin/gobblin-core/src/main/java/org/apache/gobblin/recordaccess/CoreRecordAccessProvider.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.recordaccess;
import org.apache.avro.generic.GenericRecord;
/**
* RecordAccessProvider that can instantiate any RecordAccessor in the gobblin-core module.
*/
public class CoreRecordAccessProvider implements RecordAccessorProvider {
@Override
public RecordAccessor recordAccessorForObject(Object obj) {
if (GenericRecord.class.isAssignableFrom(obj.getClass())) {
return new AvroGenericRecordAccessor(GenericRecord.class.cast(obj));
}
return null;
}
}
| 3,106 |
0 | Create_ds/gobblin/gobblin-core/src/main/java/org/apache/gobblin | Create_ds/gobblin/gobblin-core/src/main/java/org/apache/gobblin/recordaccess/AvroGenericRecordAccessor.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.recordaccess;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.Iterator;
import java.util.List;
import java.util.Map;
import org.apache.avro.AvroRuntimeException;
import org.apache.avro.Schema;
import org.apache.avro.generic.GenericArray;
import org.apache.avro.generic.GenericData;
import org.apache.avro.generic.GenericRecord;
import org.apache.avro.util.Utf8;
import com.google.common.base.Optional;
import com.google.common.base.Splitter;
import org.apache.gobblin.util.AvroUtils;
/**
* Implementation of a RecordAccessor that can process Avro GenericRecords.
*
* NOTE: This class assumes field names never contain a '.'; it assumes they are always
* nested.
*/
public class AvroGenericRecordAccessor implements RecordAccessor {
private final GenericRecord record;
public AvroGenericRecordAccessor(GenericRecord record) {
this.record = record;
}
@Override
public Map<String, String> getMultiAsString(String fieldName) {
Map<String, Object> vals = getMultiGeneric(fieldName);
Map<String, String> ret = new HashMap<>();
for (Map.Entry<String, Object> entry : vals.entrySet()) {
Object val = entry.getValue();
String convertedVal = convertToString(entry.getKey(), val);
if (convertedVal != null) {
ret.put(entry.getKey(), convertedVal);
}
}
return ret;
}
@Override
public String getAsString(String fieldName) {
Object obj = getAsObject(fieldName);
return convertToString(fieldName, obj);
}
private String convertToString(String fieldName, Object obj) {
if (obj == null) {
return null;
} else if (obj instanceof String) {
return (String)obj;
} else if (obj instanceof Utf8) {
return obj.toString();
} else {
return castOrThrowTypeException(fieldName, obj, String.class);
}
}
@Override
public Map<String, Integer> getMultiAsInt(String fieldName) {
Map<String, Object> vals = getMultiGeneric(fieldName);
Map<String, Integer> ret = new HashMap<>();
for (Map.Entry<String, Object> entry : vals.entrySet()) {
Object val = entry.getValue();
Integer convertedVal = convertToInt(entry.getKey(), val);
if (convertedVal != null) {
ret.put(entry.getKey(), convertedVal);
}
}
return ret;
}
@Override
public Integer getAsInt(String fieldName) {
return convertToInt(fieldName, getAsObject(fieldName));
}
private Integer convertToInt(String fieldName, Object obj) {
return castOrThrowTypeException(fieldName, obj, Integer.class);
}
@Override
public Map<String, Long> getMultiAsLong(String fieldName) {
Map<String, Object> vals = getMultiGeneric(fieldName);
Map<String, Long> ret = new HashMap<>();
for (Map.Entry<String, Object> entry : vals.entrySet()) {
Object val = entry.getValue();
Long convertedVal = convertToLong(entry.getKey(), val);
if (convertedVal != null) {
ret.put(entry.getKey(), convertedVal);
}
}
return ret;
}
@Override
public Long getAsLong(String fieldName) {
return convertToLong(fieldName, getAsObject(fieldName));
}
private Long convertToLong(String fieldName, Object obj) {
if (obj instanceof Integer) {
return ((Integer) obj).longValue();
} else {
return castOrThrowTypeException(fieldName, obj, Long.class);
}
}
private <T> T castOrThrowTypeException(String fieldName, Object o, Class<? extends T> clazz) {
try {
if (o == null) {
return null;
}
return clazz.cast(o);
} catch (ClassCastException e) {
throw new IncorrectTypeException("Incorrect type for field " + fieldName, e);
}
}
private Object getAsObject(String fieldName) {
Optional<Object> obj = AvroUtils.getFieldValue(record, fieldName);
return obj.isPresent() ? obj.get() : null;
}
@Override
public Map<String, Object> getMultiGeneric(String fieldName) {
Map<String, Object> vals = AvroUtils.getMultiFieldValue(record, fieldName);
for (Map.Entry<String, Object> entry: vals.entrySet()) {
vals.put(entry.getKey(), convertAvroToJava(entry.getKey(), entry.getValue()));
}
return vals;
}
@Override
public Object getGeneric(String fieldName) {
Object val = getAsObject(fieldName);
return convertAvroToJava(fieldName, val);
}
private Object convertAvroToJava(String fieldName, Object val) {
if (val == null || val instanceof String || val instanceof Long || val instanceof Integer) {
return val;
}
if (val instanceof Utf8) {
return convertToString(fieldName, val);
}
if (val instanceof GenericArray) {
return convertToList(fieldName, (GenericArray) val);
}
throw new IllegalArgumentException("Don't know how to parse object of type " + val.getClass().getCanonicalName());
}
@Override
public void set(String fieldName, String value) {
set(fieldName, (Object) value);
}
@Override
public void set(String fieldName, Integer value) {
set(fieldName, (Object) value);
}
@Override
public void set(String fieldName, Long value) {
set(fieldName, (Object) value);
}
@Override
public void setStringArray(String fieldName, List<String> value) {
GenericData.Array<String> avroArray = new GenericData.Array<>(
Schema.createArray(Schema.create(Schema.Type.STRING)), value);
set(fieldName, avroArray);
}
@Override
public void setToNull(String fieldName) {
set(fieldName, (Object) null);
}
/*
* Recurse down record types to set the right value
*/
private void set(String fieldName, Object value) {
try {
String subField;
Iterator<String> levels = Splitter.on(".").split(fieldName).iterator();
GenericRecord toInsert = record;
subField = levels.next();
Object subRecord = toInsert;
while (levels.hasNext()) {
if (subRecord instanceof GenericRecord) {
subRecord = ((GenericRecord)subRecord).get(subField);
} else if (subRecord instanceof List) {
subRecord = ((List)subRecord).get(Integer.parseInt(subField));
} else if (subRecord instanceof Map) {
subRecord = ((Map)subRecord).get(subField);
}
if (subRecord == null) {
throw new FieldDoesNotExistException("Field " + subField + " not found when trying to set " + fieldName);
}
subField = levels.next();
}
if (!(subRecord instanceof GenericRecord)) {
throw new IllegalArgumentException("Field " + fieldName + " does not refer to a record type.");
}
toInsert = (GenericRecord)subRecord;
Object oldValue = toInsert.get(subField);
toInsert.put(subField, value);
Schema.Field changedField = toInsert.getSchema().getField(subField);
GenericData genericData = GenericData.get();
boolean valid = genericData
.validate(changedField.schema(), genericData.getField(toInsert, changedField.name(), changedField.pos()));
if (!valid) {
toInsert.put(subField, oldValue);
throw new IncorrectTypeException(
"Incorrect type - can't insert a " + value.getClass().getCanonicalName() + " into an Avro record of type "
+ changedField.schema().getType().toString());
}
} catch (AvroRuntimeException e) {
throw new FieldDoesNotExistException("Field not found setting name " + fieldName, e);
}
}
@SuppressWarnings("unchecked")
private List convertToList(String fieldName, GenericArray arr) {
List ret = new ArrayList();
for (int i = 0; i < arr.size(); i++) {
ret.add(convertAvroToJava(fieldName + "." + String.valueOf(i), arr.get(i)));
}
return ret;
}
}
| 3,107 |
0 | Create_ds/gobblin/gobblin-core/src/main/java/org/apache/gobblin | Create_ds/gobblin/gobblin-core/src/main/java/org/apache/gobblin/http/HttpClientConfigurator.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.http;
import org.apache.http.client.HttpClient;
import org.apache.http.impl.client.HttpClientBuilder;
import com.typesafe.config.Config;
import org.apache.gobblin.configuration.State;
/**
* An adapter from Gobblin configuration to {@link HttpClientBuilder}. It can also be used to
* create {@link HttpClient} instances.
*/
public interface HttpClientConfigurator {
/** Sets a prefix to use when extracting the configuration from {@link State}. The default is
* empty. */
HttpClientConfigurator setStatePropertiesPrefix(String propertiesPrefix);
/**
* Extracts the HttpClient configuration from a typesafe config. Supported configuration options
* may vary from implementation to implementation.
* */
HttpClientConfigurator configure(Config httpClientConfig);
/** Same as {@link #configure(Config)} but for legacy cases using State. */
HttpClientConfigurator configure(State httpClientConfig);
/** The underlying client builder */
HttpClientBuilder getBuilder();
/**
* Typically this will use {@link HttpClientBuilder#build()} based on the configuration but
* implementations may also return decorated instances. */
HttpClient createClient();
}
| 3,108 |
0 | Create_ds/gobblin/gobblin-core/src/main/java/org/apache/gobblin | Create_ds/gobblin/gobblin-core/src/main/java/org/apache/gobblin/http/HttpClientConfiguratorLoader.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.http;
import org.apache.http.client.HttpClient;
import com.google.common.base.Optional;
import com.typesafe.config.Config;
import org.apache.gobblin.configuration.State;
import org.apache.gobblin.util.ClassAliasResolver;
/**
* Creates an instance of HttpClientConfigurator using dependency injection from configuration.
*/
public class HttpClientConfiguratorLoader {
/** Classname or alias for an {@link HttpClientConfigurator} instance to use for configuring and
* instantiating of {@link HttpClient} instances. */
public static final String HTTP_CLIENT_CONFIGURATOR_TYPE_KEY = "httpClientConfigurator.type";
public static final String HTTP_CLIENT_CONFIGURATOR_TYPE_FULL_KEY =
"gobblin." + HTTP_CLIENT_CONFIGURATOR_TYPE_KEY;
public static final Class<? extends HttpClientConfigurator> DEFAULT_CONFIGURATOR_CLASS =
DefaultHttpClientConfigurator.class;
private static final ClassAliasResolver<HttpClientConfigurator> TYPE_RESOLVER =
new ClassAliasResolver<>(HttpClientConfigurator.class);
private final HttpClientConfigurator _configurator;
/**
* Loads a HttpClientConfigurator using the value of the {@link #HTTP_CLIENT_CONFIGURATOR_TYPE_FULL_KEY}
* property in the state.
*/
public HttpClientConfiguratorLoader(State state) {
this(Optional.<String>fromNullable(state.getProp(HTTP_CLIENT_CONFIGURATOR_TYPE_FULL_KEY)));
}
/** Loads a HttpClientConfigurator using the value of {@link #HTTP_CLIENT_CONFIGURATOR_TYPE_KEY}
* in the local typesafe config. */
public HttpClientConfiguratorLoader(Config config) {
this(Optional.<String>fromNullable(config.hasPath(HTTP_CLIENT_CONFIGURATOR_TYPE_KEY) ?
config.getString(HTTP_CLIENT_CONFIGURATOR_TYPE_KEY) : null));
}
/** Loads a HttpClientConfigurator with the specified class or alias. If not specified,
* {@link #DEFAULT_CONFIGURATOR_CLASS} is used. */
public HttpClientConfiguratorLoader(Optional<String> configuratorType) {
try {
_configurator = getConfiguratorClass(configuratorType).newInstance();
} catch (InstantiationException | IllegalAccessException | ClassNotFoundException e) {
throw new RuntimeException("Unable to find HttpClientConfigurator:" + e, e);
}
}
private static Class<? extends HttpClientConfigurator>
getConfiguratorClass(Optional<String> configuratorType) throws ClassNotFoundException {
return configuratorType.isPresent() ? TYPE_RESOLVER.resolveClass(configuratorType.get()) :
DEFAULT_CONFIGURATOR_CLASS;
}
public HttpClientConfigurator getConfigurator() {
return _configurator;
}
}
| 3,109 |
0 | Create_ds/gobblin/gobblin-core/src/main/java/org/apache/gobblin | Create_ds/gobblin/gobblin-core/src/main/java/org/apache/gobblin/http/DefaultHttpClientConfigurator.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.http;
import java.util.regex.Matcher;
import java.util.regex.Pattern;
import org.apache.http.HttpHost;
import org.apache.http.client.HttpClient;
import org.apache.http.impl.client.CloseableHttpClient;
import org.apache.http.impl.client.HttpClientBuilder;
import com.google.common.annotations.VisibleForTesting;
import com.google.common.base.Optional;
import com.google.common.base.Strings;
import com.typesafe.config.Config;
import com.typesafe.config.ConfigFactory;
import com.typesafe.config.ConfigValueFactory;
import org.apache.gobblin.annotation.Alias;
import org.apache.gobblin.configuration.State;
/**
* Default implementation that uses the following properties to configure an {@link HttpClient}.
*
* <ul>
* <li>{@link #PROXY_HOSTPORT_KEY}
* <li>{@link #PROXY_URL_KEY}
* <li>{@link #PROXY_PORT_KEY}
* </ul>
*/
@Alias(value="default")
public class DefaultHttpClientConfigurator implements HttpClientConfigurator {
// IMPORTANT: don't change the values for PROXY_URL_KEY and PROXY_PORT_KEY as they are meant to
// be backwards compatible with SOURCE_CONN_USE_PROXY_URL and SOURCE_CONN_USE_PROXY_PORT when
// the statePropertiesPrefix is "source.conn."
/** The hostname of the HTTP proxy to use */
public static final String PROXY_URL_KEY = "use.proxy.url";
/** The port of the HTTP proxy to use */
public static final String PROXY_PORT_KEY = "use.proxy.port";
/** Similar to {@link #PROXY_URL_KEY} and {@link #PROXY_PORT_KEY} but allows you to set it on
* one property as <host>:<port> . This property takes precedence over those properties. */
public static final String PROXY_HOSTPORT_KEY = "proxyHostport";
/** Port to use if the HTTP Proxy is enabled but no port is specified */
public static final int DEFAULT_HTTP_PROXY_PORT = 8080;
private static final Pattern HOSTPORT_PATTERN = Pattern.compile("([^:]+)(:([0-9]+))?");
protected final HttpClientBuilder _builder = HttpClientBuilder.create();
protected String _statePropertiesPrefix = null;
/** {@inheritDoc} */
@Override
public DefaultHttpClientConfigurator configure(Config httpClientConfig) {
Optional<HttpHost> proxy = getProxyAddr(httpClientConfig);
if (proxy.isPresent()) {
getBuilder().setProxy(proxy.get());
}
return this;
}
/** {@inheritDoc} */
@Override
public DefaultHttpClientConfigurator configure(State state) {
Config cfg = stateToConfig(state);
return configure(cfg);
}
protected Config stateToConfig(State state) {
String proxyUrlKey = getPrefixedPropertyName(PROXY_URL_KEY);
String proxyPortKey = getPrefixedPropertyName(PROXY_PORT_KEY);
String proxyHostportKey = getPrefixedPropertyName(PROXY_HOSTPORT_KEY);
Config cfg = ConfigFactory.empty();
if (state.contains(proxyUrlKey)) {
cfg = cfg.withValue(PROXY_URL_KEY, ConfigValueFactory.fromAnyRef(state.getProp(proxyUrlKey)));
}
if (state.contains(proxyPortKey)) {
cfg = cfg.withValue(PROXY_PORT_KEY, ConfigValueFactory.fromAnyRef(state.getPropAsInt(proxyPortKey)));
}
if (state.contains(proxyHostportKey)) {
cfg = cfg.withValue(PROXY_HOSTPORT_KEY, ConfigValueFactory.fromAnyRef(state.getProp(proxyHostportKey)));
}
return cfg;
}
/** {@inheritDoc} */
@Override
public CloseableHttpClient createClient() {
return _builder.build();
}
@VisibleForTesting
public static Optional<HttpHost> getProxyAddr(Config httpClientConfig) {
String proxyHost = null;
int proxyPort = DEFAULT_HTTP_PROXY_PORT;
if (httpClientConfig.hasPath(PROXY_URL_KEY) &&
!httpClientConfig.getString(PROXY_URL_KEY).isEmpty()) {
proxyHost = httpClientConfig.getString(PROXY_URL_KEY);
}
if (httpClientConfig.hasPath(PROXY_PORT_KEY)) {
proxyPort = httpClientConfig.getInt(PROXY_PORT_KEY);
}
if (httpClientConfig.hasPath(PROXY_HOSTPORT_KEY)) {
String hostport = httpClientConfig.getString(PROXY_HOSTPORT_KEY);
Matcher hostportMatcher = HOSTPORT_PATTERN.matcher(hostport);
if (!hostportMatcher.matches()) {
throw new IllegalArgumentException("Invalid HTTP proxy hostport: " + hostport);
}
proxyHost = hostportMatcher.group(1);
if (!Strings.isNullOrEmpty(hostportMatcher.group(3))) {
proxyPort = Integer.parseInt(hostportMatcher.group(3));
}
}
return null != proxyHost ? Optional.of(new HttpHost(proxyHost, proxyPort))
: Optional.<HttpHost>absent();
}
@Override
public DefaultHttpClientConfigurator setStatePropertiesPrefix(String propertiesPrefix) {
_statePropertiesPrefix = propertiesPrefix;
return this;
}
String getPrefixedPropertyName(String propertyName) {
return null != _statePropertiesPrefix ? _statePropertiesPrefix + propertyName : propertyName;
}
@Override
public HttpClientBuilder getBuilder() {
return _builder;
}
}
| 3,110 |
0 | Create_ds/gobblin/gobblin-core/src/main/java/gobblin | Create_ds/gobblin/gobblin-core/src/main/java/gobblin/state/ConstructState.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package gobblin.state;
import java.util.Properties;
import gobblin.configuration.State;
/***
* Shim layer for org.apache.gobblin.state.ConstructState
*/
public class ConstructState extends org.apache.gobblin.state.ConstructState {
public ConstructState() {
}
public ConstructState(Properties properties) {
super(properties);
}
public ConstructState(State otherState) {
super(otherState);
}
}
| 3,111 |
0 | Create_ds/gobblin/gobblin-modules/gobblin-eventhub/src/test/java/org/apache/gobblin/eventhub | Create_ds/gobblin/gobblin-modules/gobblin-eventhub/src/test/java/org/apache/gobblin/eventhub/writer/EventhubDataWriterTest.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.eventhub.writer;
import java.io.IOException;
import java.util.LinkedList;
import java.util.List;
import java.util.Properties;
import java.util.concurrent.Future;
import org.apache.http.StatusLine;
import org.apache.http.client.methods.CloseableHttpResponse;
import org.apache.http.client.methods.HttpPost;
import org.apache.http.impl.client.CloseableHttpClient;
import org.mockito.Mockito;
import org.testng.Assert;
import org.testng.annotations.Test;
import org.apache.gobblin.writer.Batch;
import org.apache.gobblin.writer.WriteCallback;
import org.apache.gobblin.writer.WriteResponse;
import static org.mockito.Mockito.*;
public class EventhubDataWriterTest {
private CloseableHttpClient mockHttpClient;
public EventhubDataWriterTest() throws IOException{
// mock httpclient
CloseableHttpResponse mockHttpResponse = mock(CloseableHttpResponse.class);
mockHttpClient = mock(CloseableHttpClient.class);
StatusLine status = mock (StatusLine.class);
Mockito.when(mockHttpClient.execute(Mockito.any(HttpPost.class))).thenReturn(mockHttpResponse);
Mockito.when(status.getStatusCode()).thenReturn(201);
Mockito.when(mockHttpResponse.getEntity()).thenReturn(null);
Mockito.when(mockHttpResponse.getStatusLine()).thenReturn(status);
}
@Test
public void testSingleBatch() {
// mock eventhub data writer
Properties props = new Properties();
EventhubDataWriter eventhubDataWriter = Mockito.spy(new EventhubDataWriter(props, mockHttpClient));
Mockito.doNothing().when(eventhubDataWriter).refreshSignature();
List<String> records = new LinkedList<>();
for (int i=0; i<50; ++i)
records.add(new String("abcdefgh"));
Batch<String> batch = mock(Batch.class);
WriteCallback callback = mock(WriteCallback.class);
Mockito.when(batch.getRecords()).thenReturn(records);
Future<WriteResponse> future = eventhubDataWriter.write(batch,callback);
verify(callback, times(1)).onSuccess(isA(WriteResponse.class));
verify(callback, never()).onFailure(isA(Exception.class));
Assert.assertTrue(future.isDone(), "Future should be done");
}
@Test
public void testSingleRecord() throws IOException {
// mock eventhub data writer
Properties props = new Properties();
EventhubDataWriter eventhubDataWriter = Mockito.spy(new EventhubDataWriter(props, mockHttpClient));
Mockito.doNothing().when(eventhubDataWriter).refreshSignature();
String record = "abcdefgh";
WriteResponse<Integer> writeResponse = eventhubDataWriter.write(record);
int returnCode = writeResponse.getRawResponse();
Assert.assertEquals(returnCode, 201);
}
}
| 3,112 |
0 | Create_ds/gobblin/gobblin-modules/gobblin-eventhub/src/test/java/org/apache/gobblin/eventhub | Create_ds/gobblin/gobblin-modules/gobblin-eventhub/src/test/java/org/apache/gobblin/eventhub/writer/EventhubBatchTest.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.eventhub.writer;
import java.io.IOException;
import org.apache.gobblin.writer.BytesBoundedBatch;
import org.apache.gobblin.writer.LargeMessagePolicy;
import org.apache.gobblin.writer.RecordTooLargeException;
import org.testng.Assert;
import org.testng.annotations.Test;
import org.apache.gobblin.writer.WriteCallback;
public class EventhubBatchTest {
@Test
public void testBatchWithLargeRecord()
throws IOException, RecordTooLargeException {
// Assume memory size has only 2 bytes
BytesBoundedBatch batch = new BytesBoundedBatch(8, 3000);
String record = "abcdefgh";
// Record is larger than the memory size limit, the first append should fail
Assert.assertNull(batch.tryAppend(record, WriteCallback.EMPTY, LargeMessagePolicy.DROP));
// The second append should still fail
Assert.assertNull(batch.tryAppend(record, WriteCallback.EMPTY, LargeMessagePolicy.DROP));
}
@Test
public void testBatch()
throws IOException, RecordTooLargeException {
// Assume memory size has only 200 bytes
BytesBoundedBatch batch = new BytesBoundedBatch(200, 3000);
// Add additional 15 bytes overhead, total size is 27 bytes
String record = "abcdefgh";
LargeMessagePolicy policy = LargeMessagePolicy.DROP;
Assert.assertNotNull(batch.tryAppend(record, WriteCallback.EMPTY, policy));
Assert.assertNotNull(batch.tryAppend(record, WriteCallback.EMPTY, policy));
Assert.assertNotNull(batch.tryAppend(record, WriteCallback.EMPTY, policy));
Assert.assertNotNull(batch.tryAppend(record, WriteCallback.EMPTY, policy));
Assert.assertNotNull(batch.tryAppend(record, WriteCallback.EMPTY, policy));
Assert.assertNotNull(batch.tryAppend(record, WriteCallback.EMPTY, policy));
Assert.assertNotNull(batch.tryAppend(record, WriteCallback.EMPTY, policy));
// Batch has room for 8th record
Assert.assertEquals(batch.hasRoom(record, policy), true);
Assert.assertNotNull(batch.tryAppend(record, WriteCallback.EMPTY, policy));
// Batch has no room for 9th record
Assert.assertEquals(batch.hasRoom(record, policy), false);
Assert.assertNull(batch.tryAppend(record, WriteCallback.EMPTY, policy));
}
}
| 3,113 |
0 | Create_ds/gobblin/gobblin-modules/gobblin-eventhub/src/test/java/org/apache/gobblin/eventhub | Create_ds/gobblin/gobblin-modules/gobblin-eventhub/src/test/java/org/apache/gobblin/eventhub/writer/BatchedEventhubDataWriterTest.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.eventhub.writer;
import java.io.IOException;
import java.util.LinkedList;
import java.util.List;
import java.util.Properties;
import java.util.concurrent.Future;
import org.apache.http.StatusLine;
import org.apache.http.client.methods.CloseableHttpResponse;
import org.apache.http.client.methods.HttpPost;
import org.apache.http.impl.client.CloseableHttpClient;
import org.mockito.Mockito;
import org.testng.Assert;
import org.testng.annotations.Test;
import org.apache.gobblin.writer.WriteCallback;
import org.apache.gobblin.writer.WriteResponse;
import static org.mockito.Mockito.*;
public class BatchedEventhubDataWriterTest {
private CloseableHttpClient mockHttpClient;
public BatchedEventhubDataWriterTest() throws IOException{
// mock httpclient
CloseableHttpResponse mockHttpResponse = mock(CloseableHttpResponse.class);
mockHttpClient = mock(CloseableHttpClient.class);
StatusLine status = mock (StatusLine.class);
Mockito.when(mockHttpClient.execute(Mockito.any(HttpPost.class))).thenReturn(mockHttpResponse);
Mockito.when(status.getStatusCode()).thenReturn(201);
Mockito.when(mockHttpResponse.getEntity()).thenReturn(null);
Mockito.when(mockHttpResponse.getStatusLine()).thenReturn(status);
}
@Test
public void testBufferedRecords()
throws IOException, InterruptedException {
// mock buffered async data writer
Properties props = new Properties();
EventhubDataWriter eventhubDataWriter = Mockito.spy(new EventhubDataWriter(props, mockHttpClient));
EventhubBatchAccumulator accumulator = new EventhubBatchAccumulator(props);
BatchedEventhubDataWriter dataWriter = new BatchedEventhubDataWriter (accumulator, eventhubDataWriter);
Mockito.doNothing().when(eventhubDataWriter).refreshSignature();
// mock record and callback
WriteCallback callback = mock(WriteCallback.class);
List<Future<WriteResponse>> futures = new LinkedList<>();
int totalTimes = 500;
try {
for (int i=0; i<totalTimes; ++i) {
String record = "abcdefgh";
futures.add(dataWriter.write(record, callback));
}
dataWriter.flush();
}
finally
{
dataWriter.close();
}
// verify all the callbacks are invoked
verify(callback, times(totalTimes)).onSuccess(isA(WriteResponse.class));
verify(callback, never()).onFailure(isA(Exception.class));
// verify all the futures are completed
for (Future future: futures) {
Assert.assertTrue(future.isDone(), "Future should be done");
}
}
}
| 3,114 |
0 | Create_ds/gobblin/gobblin-modules/gobblin-eventhub/src/test/java/org/apache/gobblin/eventhub | Create_ds/gobblin/gobblin-modules/gobblin-eventhub/src/test/java/org/apache/gobblin/eventhub/writer/EventhubAccumulatorTest.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.eventhub.writer;
import java.io.IOException;
import java.util.concurrent.CountDownLatch;
import org.apache.gobblin.writer.SequentialBasedBatchAccumulator;
import org.testng.Assert;
import org.testng.annotations.Test;
import org.apache.gobblin.writer.WriteCallback;
public class EventhubAccumulatorTest {
private CountDownLatch latchEmpty = new CountDownLatch(1);
private CountDownLatch latchCapacity = new CountDownLatch(1);
@Test
public void testAccumulatorEmpty() throws IOException, InterruptedException{
SequentialBasedBatchAccumulator accumulator = new EventhubBatchAccumulator(64, 1000, 5);
// Spawn a new thread to add new batches
(new Thread(new AddBatchThread(accumulator))).start();
// Below three get operation will be blocked until we fill the empty queue
accumulator.getNextAvailableBatch();
accumulator.getNextAvailableBatch();
accumulator.getNextAvailableBatch();
this.latchEmpty.await();
// The spawned thread should unblock current thread because it removes some front batches
Assert.assertTrue(accumulator.getNumOfBatches() >=2);
Assert.assertTrue(accumulator.getNumOfBatches() <=5);
}
@Test
public void testAccumulatorCapacity () throws IOException, InterruptedException {
SequentialBasedBatchAccumulator accumulator = new EventhubBatchAccumulator(64, 1000, 5);
StringBuffer buffer = new StringBuffer();
for (int i = 0; i < 40; ++i) {
buffer.append('a');
}
String record = buffer.toString();
accumulator.append(record, WriteCallback.EMPTY);
accumulator.append(record, WriteCallback.EMPTY);
accumulator.append(record, WriteCallback.EMPTY);
accumulator.append(record, WriteCallback.EMPTY);
accumulator.append(record, WriteCallback.EMPTY);
// Spawn a new thread to remove available batches
(new Thread(new RemoveBatchThread(accumulator))).start();
// Flowing two appends will be blocked
accumulator.append(record, WriteCallback.EMPTY);
accumulator.append(record, WriteCallback.EMPTY);
this.latchCapacity.await();
// The spawned thread should unblock current thread because it removes some front batches
Assert.assertEquals(accumulator.getNumOfBatches(), 4);
}
@Test
public void testCloseBeforeAwait () throws IOException, InterruptedException {
SequentialBasedBatchAccumulator accumulator = new EventhubBatchAccumulator(64, 1000, 5);
(new Thread(new CloseAccumulatorThread(accumulator))).start();
Thread.sleep(1000);
Assert.assertNull(accumulator.getNextAvailableBatch());
}
@Test
public void testCloseAfterAwait () throws IOException, InterruptedException {
SequentialBasedBatchAccumulator accumulator = new EventhubBatchAccumulator(64, 1000, 5);
(new Thread(new CloseAccumulatorThread(accumulator))).start();
// this thread should be blocked and waked up by spawned thread
Assert.assertNull(accumulator.getNextAvailableBatch());
}
@Test
public void testClose () throws IOException, InterruptedException {
SequentialBasedBatchAccumulator accumulator = new EventhubBatchAccumulator(64, 3000, 5);
StringBuffer buffer = new StringBuffer();
for (int i = 0; i < 40; ++i) {
buffer.append('a');
}
String record1 = buffer.toString() + "1";
String record2 = buffer.toString() + "2";
String record3 = buffer.toString() + "3";
String record4 = buffer.toString() + "4";
String record5 = buffer.toString() + "5";
accumulator.append(record1, WriteCallback.EMPTY);
accumulator.append(record2, WriteCallback.EMPTY);
accumulator.append(record3, WriteCallback.EMPTY);
accumulator.append(record4, WriteCallback.EMPTY);
accumulator.append(record5, WriteCallback.EMPTY);
(new Thread(new CloseAccumulatorThread(accumulator))).start();
Thread.sleep(1000);
Assert.assertEquals(accumulator.getNextAvailableBatch().getRecords().get(0), record1);
Assert.assertEquals(accumulator.getNextAvailableBatch().getRecords().get(0), record2);
Assert.assertEquals(accumulator.getNextAvailableBatch().getRecords().get(0), record3);
Assert.assertEquals(accumulator.getNextAvailableBatch().getRecords().get(0), record4);
Assert.assertEquals(accumulator.getNextAvailableBatch().getRecords().get(0), record5);
}
@Test
public void testExpiredBatch () throws IOException, InterruptedException {
SequentialBasedBatchAccumulator accumulator = new EventhubBatchAccumulator(64, 3000, 5);
String record = "1";
accumulator.append(record, WriteCallback.EMPTY);
Assert.assertNull(accumulator.getNextAvailableBatch());
Thread.sleep(3000);
Assert.assertNotNull(accumulator.getNextAvailableBatch());
}
public class CloseAccumulatorThread implements Runnable {
SequentialBasedBatchAccumulator accumulator;
public CloseAccumulatorThread (SequentialBasedBatchAccumulator accumulator) {
this.accumulator = accumulator;
}
public void run() {
try {
Thread.sleep(500);
} catch (InterruptedException e) {
}
this.accumulator.close();
}
}
public class RemoveBatchThread implements Runnable {
SequentialBasedBatchAccumulator accumulator;
public RemoveBatchThread (SequentialBasedBatchAccumulator accumulator) {
this.accumulator = accumulator;
}
public void run() {
try {
Thread.sleep(1000);
this.accumulator.getNextAvailableBatch();
this.accumulator.getNextAvailableBatch();
this.accumulator.getNextAvailableBatch();
latchCapacity.countDown();
} catch (InterruptedException e) {
}
}
}
public class AddBatchThread implements Runnable {
SequentialBasedBatchAccumulator accumulator;
public AddBatchThread (SequentialBasedBatchAccumulator accumulator) {
this.accumulator = accumulator;
}
public void run() {
try {
Thread.sleep(1000);
StringBuffer buffer = new StringBuffer();
for (int i = 0; i < 40; ++i) {
buffer.append('a');
}
String record = buffer.toString();
accumulator.append(record, WriteCallback.EMPTY);
accumulator.append(record, WriteCallback.EMPTY);
accumulator.append(record, WriteCallback.EMPTY);
accumulator.append(record, WriteCallback.EMPTY);
accumulator.append(record, WriteCallback.EMPTY);
latchEmpty.countDown();
} catch (InterruptedException e) {
}
}
}
}
| 3,115 |
0 | Create_ds/gobblin/gobblin-modules/gobblin-eventhub/src/main/java/org/apache/gobblin | Create_ds/gobblin/gobblin-modules/gobblin-eventhub/src/main/java/org/apache/gobblin/eventhub/EventhubMetricNames.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.eventhub;
/**
* Contains names for all metrics generated by eventhub component
*/
public class EventhubMetricNames {
public static class EventhubDataWriterMetrics {
/**
* A {@link com.codahale.metrics.Meter} measuring the number of records attempted
* to be written by a {@link org.apache.gobblin.eventhub.writer.EventhubDataWriter}. This includes retries.
*/
public static final String RECORDS_ATTEMPTED_METER = "eventhub.writer.records.attempted";
/**
* A {@link com.codahale.metrics.Meter} measuring the number records written by a {@link org.apache.gobblin.eventhub.writer.EventhubDataWriter}
*/
public static final String RECORDS_SUCCESS_METER = "eventhub.writer.records.success";
/** A {@link com.codahale.metrics.Meter} measuring the number of records
* given to a {@link org.apache.gobblin.eventhub.writer.EventhubDataWriter}. This does not count retries.
*/
public static final String RECORDS_FAILED_METER = "eventhub.writer.records.failed";
/**
* A {@link com.codahale.metrics.Meter} measuring the number bytes written by a {@link org.apache.gobblin.eventhub.writer.EventhubDataWriter} as
*/
public static final String BYTES_WRITTEN_METER = "eventhub.writer.bytes.written";
/**
* A {@link com.codahale.metrics.Timer} measuring the time taken for each write operation.
*/
public static final String WRITE_TIMER = "eventhub.writer.write.time";
}
}
| 3,116 |
0 | Create_ds/gobblin/gobblin-modules/gobblin-eventhub/src/main/java/org/apache/gobblin/eventhub | Create_ds/gobblin/gobblin-modules/gobblin-eventhub/src/main/java/org/apache/gobblin/eventhub/writer/EventhubDataWriter.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.eventhub.writer;
import java.io.IOException;
import org.apache.gobblin.configuration.State;
import org.apache.gobblin.eventhub.EventhubMetricNames;
import org.apache.gobblin.instrumented.Instrumented;
import org.apache.gobblin.metrics.MetricContext;
import org.apache.http.HttpEntity;
import org.apache.http.HttpResponse;
import org.apache.http.HttpStatus;
import org.apache.http.StatusLine;
import org.apache.http.client.HttpClient;
import org.apache.http.client.methods.HttpPost;
import org.apache.http.entity.StringEntity;
import org.apache.http.impl.client.CloseableHttpClient;
import org.apache.http.impl.client.HttpClients;
import org.apache.http.util.EntityUtils;
import org.codehaus.jackson.map.ObjectMapper;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import com.codahale.metrics.Meter;
import com.google.common.util.concurrent.Futures;
import com.microsoft.azure.servicebus.SharedAccessSignatureTokenProvider;
import lombok.extern.slf4j.Slf4j;
import org.apache.gobblin.password.PasswordManager;
import org.apache.gobblin.writer.Batch;
import org.apache.gobblin.writer.BatchAsyncDataWriter;
import org.apache.gobblin.writer.SyncDataWriter;
import org.apache.gobblin.writer.WriteCallback;
import org.apache.gobblin.writer.WriteResponse;
import org.apache.gobblin.writer.WriteResponseFuture;
import org.apache.gobblin.writer.WriteResponseMapper;
import java.time.Duration;
import java.util.ArrayList;
import java.util.List;
import java.util.Properties;
import java.util.concurrent.Future;
import com.codahale.metrics.Timer;
/**
* Data Writer for Eventhub.
* This Data Writer use HttpClient internally and publish data to Eventhub via Post REST API
* Synchronous model is used here that after each data is sent through httpClient, a response is consumed
* immediately. Also this class supports sending multiple records in a batch manner.
*
* The String input needs to be Unicode based because it will convert to JSON format when using REST API
*
* For batch sending, please refer to https://docs.microsoft.com/en-us/rest/api/eventhub/send-batch-events for sending batch records
* For unicode based json string, please refer to http://rfc7159.net/
*/
@Slf4j
public class EventhubDataWriter implements SyncDataWriter<String>, BatchAsyncDataWriter<String> {
private static final Logger LOG = LoggerFactory.getLogger(EventhubDataWriter.class);
private HttpClient httpclient;
private final String namespaceName;
private final String eventHubName;
private final String sasKeyName;
private final String sasKey;
private final String targetURI;
private final Meter bytesWritten;
private final Meter recordsAttempted;
private final Meter recordsSuccess;
private final Meter recordsFailed;
private final Timer writeTimer;
private long postStartTimestamp = 0;
private long sigExpireInMinute = 1;
private String signature = "";
private MetricContext metricContext;
private static final ObjectMapper mapper = new ObjectMapper();
private static final WriteResponseMapper<Integer> WRITE_RESPONSE_WRAPPER =
new WriteResponseMapper<Integer>() {
@Override
public WriteResponse wrap(final Integer returnCode) {
return new WriteResponse<Integer>() {
@Override
public Integer getRawResponse() {
return returnCode;
}
@Override
public String getStringResponse() {
return returnCode.toString();
}
@Override
public long bytesWritten() {
// Don't know how many bytes were written
return -1;
}
};
}
};
/** User needs to provide eventhub properties */
public EventhubDataWriter(Properties properties) {
PasswordManager manager = PasswordManager.getInstance(properties);
namespaceName = properties.getProperty(BatchedEventhubDataWriter.EVH_NAMESPACE);
eventHubName = properties.getProperty(BatchedEventhubDataWriter.EVH_HUBNAME);
sasKeyName = properties.getProperty(BatchedEventhubDataWriter.EVH_SAS_KEYNAME);
String encodedSasKey = properties.getProperty(BatchedEventhubDataWriter.EVH_SAS_KEYVALUE);
sasKey = manager.readPassword(encodedSasKey);
targetURI = "https://" + namespaceName + ".servicebus.windows.net/" + eventHubName + "/messages";
httpclient = HttpClients.createDefault();
metricContext = Instrumented.getMetricContext(new State(properties),EventhubDataWriter.class);
recordsAttempted = this.metricContext.meter(EventhubMetricNames.EventhubDataWriterMetrics.RECORDS_ATTEMPTED_METER);
recordsSuccess = this.metricContext.meter(EventhubMetricNames.EventhubDataWriterMetrics.RECORDS_SUCCESS_METER);
recordsFailed = this.metricContext.meter(EventhubMetricNames.EventhubDataWriterMetrics.RECORDS_FAILED_METER);
bytesWritten = this.metricContext.meter(EventhubMetricNames.EventhubDataWriterMetrics.BYTES_WRITTEN_METER);
writeTimer = this.metricContext.timer(EventhubMetricNames.EventhubDataWriterMetrics.WRITE_TIMER);
}
/** User needs to provide eventhub properties and an httpClient */
public EventhubDataWriter(Properties properties, HttpClient httpclient) {
this (properties);
this.httpclient = httpclient;
}
/**
* Write a whole batch to eventhub
*/
public Future<WriteResponse> write (Batch<String> batch, WriteCallback callback) {
Timer.Context context = writeTimer.time();
int returnCode = 0;
LOG.info ("Dispatching batch " + batch.getId());
recordsAttempted.mark(batch.getRecords().size());
try {
String encoded = encodeBatch(batch);
returnCode = request (encoded);
WriteResponse<Integer> response = WRITE_RESPONSE_WRAPPER.wrap(returnCode);
callback.onSuccess(response);
bytesWritten.mark(encoded.length());
recordsSuccess.mark(batch.getRecords().size());
} catch (Exception e) {
LOG.error("Dispatching batch " + batch.getId() + " failed :" + e.toString());
callback.onFailure(e);
recordsFailed.mark(batch.getRecords().size());
}
context.close();
Future<Integer> future = Futures.immediateFuture(returnCode);
return new WriteResponseFuture<>(future, WRITE_RESPONSE_WRAPPER);
}
/**
* Write a single record to eventhub
*/
public WriteResponse write (String record) throws IOException {
recordsAttempted.mark();
String encoded = encodeRecord(record);
int returnCode = request (encoded);
recordsSuccess.mark();
bytesWritten.mark(encoded.length());
return WRITE_RESPONSE_WRAPPER.wrap(returnCode);
}
/**
* A signature which contains the duration.
* After the duration is expired, the signature becomes invalid
*/
public void refreshSignature () {
if (postStartTimestamp == 0 || (System.nanoTime() - postStartTimestamp) > Duration.ofMinutes(sigExpireInMinute).toNanos()) {
// generate signature
try {
signature = SharedAccessSignatureTokenProvider
.generateSharedAccessSignature(sasKeyName, sasKey, namespaceName, Duration.ofMinutes(sigExpireInMinute));
postStartTimestamp = System.nanoTime();
LOG.info ("Signature is refreshing: " + signature);
} catch (Exception e) {
throw new RuntimeException(e);
}
}
}
/**
* Send an encoded string to the Eventhub using post method
*/
private int request (String encoded) throws IOException {
refreshSignature();
HttpPost httpPost = new HttpPost(targetURI);
httpPost.setHeader("Content-type", "application/vnd.microsoft.servicebus.json");
httpPost.setHeader("Authorization", signature);
httpPost.setHeader("Host", namespaceName + ".servicebus.windows.net ");
StringEntity entity = new StringEntity(encoded);
httpPost.setEntity(entity);
HttpResponse response = httpclient.execute(httpPost);
StatusLine status = response.getStatusLine();
HttpEntity entity2 = response.getEntity();
// do something useful with the response body
// and ensure it is fully consumed
EntityUtils.consume(entity2);
int returnCode = status.getStatusCode();
if (returnCode != HttpStatus.SC_CREATED) {
LOG.error (new IOException(status.getReasonPhrase()).toString());
throw new IOException(status.getReasonPhrase());
}
return returnCode;
}
/**
* Each record of batch is wrapped by a 'Body' json object
* put this new object into an array, encode the whole array
*/
private String encodeBatch (Batch<String> batch) throws IOException {
// Convert original json object to a new json object with format {"Body": "originalJson"}
// Add new json object to an array and send the whole array to eventhub using REST api
// Refer to https://docs.microsoft.com/en-us/rest/api/eventhub/send-batch-events
List<String> records = batch.getRecords();
ArrayList<EventhubRequest> arrayList = new ArrayList<>();
for (String record: records) {
arrayList.add(new EventhubRequest(record));
}
return mapper.writeValueAsString (arrayList);
}
/**
* A single record is wrapped by a 'Body' json object
* encode this json object
*/
private String encodeRecord (String record)throws IOException {
// Convert original json object to a new json object with format {"Body": "originalJson"}
// Add new json object to an array and send the whole array to eventhub using REST api
// Refer to https://docs.microsoft.com/en-us/rest/api/eventhub/send-batch-events
ArrayList<EventhubRequest> arrayList = new ArrayList<>();
arrayList.add(new EventhubRequest(record));
return mapper.writeValueAsString (arrayList);
}
/**
* Close the HttpClient
*/
public void close() throws IOException {
if (httpclient instanceof CloseableHttpClient) {
((CloseableHttpClient)httpclient).close();
}
}
public void cleanup() {
// do nothing
}
public void flush() {
// do nothing
}
}
| 3,117 |
0 | Create_ds/gobblin/gobblin-modules/gobblin-eventhub/src/main/java/org/apache/gobblin/eventhub | Create_ds/gobblin/gobblin-modules/gobblin-eventhub/src/main/java/org/apache/gobblin/eventhub/writer/EventhubDataWriterBuilder.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.eventhub.writer;
import java.io.IOException;
import java.util.Properties;
import com.typesafe.config.Config;
import org.apache.gobblin.configuration.State;
import org.apache.gobblin.util.ConfigUtils;
import org.apache.gobblin.writer.AsyncDataWriter;
import org.apache.gobblin.writer.AsyncWriterManager;
import org.apache.gobblin.writer.DataWriter;
import org.apache.gobblin.writer.DataWriterBuilder;
/**
* Builder that hands back a {@link EventhubDataWriter}
*/
public class EventhubDataWriterBuilder extends DataWriterBuilder {
/**
* Create an eventhub data writer, wrapped into a buffered async data writer
*/
public AsyncDataWriter getAsyncDataWriter(Properties properties) {
EventhubDataWriter eventhubDataWriter = new EventhubDataWriter(properties);
EventhubBatchAccumulator accumulator = new EventhubBatchAccumulator(properties);
BatchedEventhubDataWriter batchedEventhubDataWriter = new BatchedEventhubDataWriter(accumulator, eventhubDataWriter);
return batchedEventhubDataWriter;
}
@Override
public DataWriter build()
throws IOException {
State state = this.destination.getProperties();
Properties taskProps = state.getProperties();
Config config = ConfigUtils.propertiesToConfig(taskProps);
long commitTimeoutMillis = ConfigUtils.getLong(config, BatchedEventhubDataWriter.COMMIT_TIMEOUT_MILLIS_CONFIG,
BatchedEventhubDataWriter.COMMIT_TIMEOUT_MILLIS_DEFAULT);
long commitStepWaitTimeMillis = ConfigUtils.getLong(config, BatchedEventhubDataWriter.COMMIT_STEP_WAIT_TIME_CONFIG,
BatchedEventhubDataWriter.COMMIT_STEP_WAIT_TIME_DEFAULT);
double failureAllowance = ConfigUtils.getDouble(config, BatchedEventhubDataWriter.FAILURE_ALLOWANCE_PCT_CONFIG,
BatchedEventhubDataWriter.FAILURE_ALLOWANCE_PCT_DEFAULT) / 100.0;
return AsyncWriterManager.builder()
.config(config)
.commitTimeoutMillis(commitTimeoutMillis)
.commitStepWaitTimeInMillis(commitStepWaitTimeMillis)
.failureAllowanceRatio(failureAllowance)
.retriesEnabled(false)
.asyncDataWriter(getAsyncDataWriter(taskProps)).maxOutstandingWrites(10000)
.build();
}
}
| 3,118 |
0 | Create_ds/gobblin/gobblin-modules/gobblin-eventhub/src/main/java/org/apache/gobblin/eventhub | Create_ds/gobblin/gobblin-modules/gobblin-eventhub/src/main/java/org/apache/gobblin/eventhub/writer/BatchedEventhubDataWriter.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.eventhub.writer;
import org.apache.gobblin.writer.BufferedAsyncDataWriter;
/**
* A batch writer for eventhub, composed by {@link EventhubBatchAccumulator} and {@link EventhubDataWriter}
* {@link EventhubBatchAccumulator} provides a buffer to store pending records
* {@link EventhubDataWriter} is the actual writer ships data to eventhub
*/
public class BatchedEventhubDataWriter extends BufferedAsyncDataWriter<String> {
public static final String COMMIT_TIMEOUT_MILLIS_CONFIG = "writer.eventhub.commitTimeoutMillis";
public static final long COMMIT_TIMEOUT_MILLIS_DEFAULT = 60000; // 1 minute
public static final String COMMIT_STEP_WAIT_TIME_CONFIG = "writer.eventhub.commitStepWaitTimeMillis";
public static final long COMMIT_STEP_WAIT_TIME_DEFAULT = 500; // 500ms
public static final String FAILURE_ALLOWANCE_PCT_CONFIG = "writer.eventhub.failureAllowancePercentage";
public static final double FAILURE_ALLOWANCE_PCT_DEFAULT = 20.0;
public final static String EVH_NAMESPACE = "eventhub.namespace";
public final static String EVH_HUBNAME = "eventhub.hubname";
public final static String EVH_SAS_KEYNAME = "eventhub.sas.keyname";
public final static String EVH_SAS_KEYVALUE = "eventhub.sas.keyvalue";
public BatchedEventhubDataWriter (EventhubBatchAccumulator accumulator, EventhubDataWriter dataWriter) {
super (accumulator, dataWriter);
}
}
| 3,119 |
0 | Create_ds/gobblin/gobblin-modules/gobblin-eventhub/src/main/java/org/apache/gobblin/eventhub | Create_ds/gobblin/gobblin-modules/gobblin-eventhub/src/main/java/org/apache/gobblin/eventhub/writer/EventhubRequest.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.eventhub.writer;
import org.codehaus.jackson.annotate.JsonProperty;
/**
* The EventhubRequest is a wrapper of given json string
* This is required because when using eventhub REST api, the json object sent out
* needs to be wrapped in an outer Body object. Refer to https://docs.microsoft.com/en-us/rest/api/eventhub/send-batch-events
*/
public class EventhubRequest {
public EventhubRequest (String body) {
this.body = body;
}
@JsonProperty
public String body;
}
| 3,120 |
0 | Create_ds/gobblin/gobblin-modules/gobblin-eventhub/src/main/java/org/apache/gobblin/eventhub | Create_ds/gobblin/gobblin-modules/gobblin-eventhub/src/main/java/org/apache/gobblin/eventhub/writer/EventhubBatchAccumulator.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.eventhub.writer;
import org.apache.gobblin.writer.SequentialBasedBatchAccumulator;
import java.util.Properties;
/**
* Simply a ttl based batch accumulator for eventhub with string type
*/
public class EventhubBatchAccumulator extends SequentialBasedBatchAccumulator<String> {
public EventhubBatchAccumulator (Properties properties) {
super(properties);
}
public EventhubBatchAccumulator (long batchSizeLimit, long expireInMilliSecond, long capacity) {
super (batchSizeLimit, expireInMilliSecond, capacity);
}
}
| 3,121 |
0 | Create_ds/gobblin/gobblin-modules/gobblin-metrics-graphite/src/test/java/org/apache/gobblin/metrics | Create_ds/gobblin/gobblin-modules/gobblin-metrics-graphite/src/test/java/org/apache/gobblin/metrics/graphite/GraphiteEventReporterTest.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.metrics.graphite;
import org.apache.gobblin.metrics.GobblinTrackingEvent;
import org.apache.gobblin.metrics.MetricContext;
import org.apache.gobblin.metrics.event.EventSubmitter;
import org.apache.gobblin.metrics.event.JobEvent;
import org.apache.gobblin.metrics.event.MultiPartEvent;
import org.apache.gobblin.metrics.event.TaskEvent;
import org.apache.gobblin.metrics.test.TimestampedValue;
import java.io.IOException;
import java.util.Map;
import org.mockito.Mockito;
import org.testng.Assert;
import org.testng.annotations.BeforeClass;
import org.testng.annotations.Test;
import com.google.common.collect.Maps;
/**
* Test for GraphiteEventReporter using a mock backend ({@link TestGraphiteSender})
*
* @author Lorand Bendig
*
*/
@Test(groups = { "gobblin.metrics" })
public class GraphiteEventReporterTest {
private static int DEFAULT_PORT = 0;
private static String DEFAULT_HOST = "localhost";
private static String NAMESPACE = "gobblin.metrics.test";
private TestGraphiteSender graphiteSender = new TestGraphiteSender();
private GraphitePusher graphitePusher;
@BeforeClass
public void setUp() throws IOException {
GraphiteConnectionType connectionType = Mockito.mock(GraphiteConnectionType.class);
Mockito.when(connectionType.createConnection(DEFAULT_HOST, DEFAULT_PORT)).thenReturn(graphiteSender);
this.graphitePusher = new GraphitePusher(DEFAULT_HOST, DEFAULT_PORT, connectionType);
}
private GraphiteEventReporter.BuilderImpl getBuilder(MetricContext metricContext) {
return GraphiteEventReporter.Factory.forContext(metricContext).withGraphitePusher(graphitePusher);
}
@Test
public void testSimpleEvent() throws IOException {
try (
MetricContext metricContext =
MetricContext.builder(this.getClass().getCanonicalName() + ".testGraphiteReporter1").build();
GraphiteEventReporter graphiteEventReporter = getBuilder(metricContext).withEmitValueAsKey(false).build();) {
Map<String, String> metadata = Maps.newHashMap();
metadata.put(JobEvent.METADATA_JOB_ID, "job1");
metadata.put(TaskEvent.METADATA_TASK_ID, "task1");
metricContext.submitEvent(GobblinTrackingEvent.newBuilder()
.setName(JobEvent.TASKS_SUBMITTED)
.setNamespace(NAMESPACE)
.setMetadata(metadata).build());
try {
Thread.sleep(100);
} catch (InterruptedException ex) {
Thread.currentThread().interrupt();
}
graphiteEventReporter.report();
try {
Thread.sleep(100);
} catch (InterruptedException ex) {
Thread.currentThread().interrupt();
}
TimestampedValue retrievedEvent = graphiteSender.getMetric("gobblin.metrics.job1.task1.events.TasksSubmitted");
Assert.assertEquals(retrievedEvent.getValue(), "0");
Assert.assertTrue(retrievedEvent.getTimestamp() <= (System.currentTimeMillis() / 1000l));
}
}
@Test
public void testMultiPartEvent() throws IOException {
try (
MetricContext metricContext =
MetricContext.builder(this.getClass().getCanonicalName() + ".testGraphiteReporter2").build();
GraphiteEventReporter graphiteEventReporter = getBuilder(metricContext).withEmitValueAsKey(true).build();) {
Map<String, String> metadata = Maps.newHashMap();
metadata.put(JobEvent.METADATA_JOB_ID, "job2");
metadata.put(TaskEvent.METADATA_TASK_ID, "task2");
metadata.put(EventSubmitter.EVENT_TYPE, "JobStateEvent");
metadata.put(JobEvent.METADATA_JOB_START_TIME, "1457736710521");
metadata.put(JobEvent.METADATA_JOB_END_TIME, "1457736710734");
metadata.put(JobEvent.METADATA_JOB_LAUNCHED_TASKS, "3");
metadata.put(JobEvent.METADATA_JOB_COMPLETED_TASKS, "2");
metadata.put(JobEvent.METADATA_JOB_STATE, "FAILED");
metricContext.submitEvent(GobblinTrackingEvent.newBuilder()
.setName(MultiPartEvent.JOBSTATE_EVENT.getEventName())
.setNamespace(NAMESPACE)
.setMetadata(metadata).build());
try {
Thread.sleep(100);
} catch (InterruptedException ex) {
Thread.currentThread().interrupt();
}
graphiteEventReporter.report();
try {
Thread.sleep(100);
} catch (InterruptedException ex) {
Thread.currentThread().interrupt();
}
String prefix = "gobblin.metrics.job2.task2.events.JobStateEvent";
Assert.assertEquals(graphiteSender.getMetric(prefix + ".jobBeginTime").getValue(), "1457736710521");
Assert.assertEquals(graphiteSender.getMetric(prefix + ".jobEndTime").getValue(), "1457736710734");
Assert.assertEquals(graphiteSender.getMetric(prefix + ".jobLaunchedTasks").getValue(), "3");
Assert.assertEquals(graphiteSender.getMetric(prefix + ".jobCompletedTasks").getValue(), "2");
Assert.assertNotNull(graphiteSender.getMetric(prefix + ".jobState.FAILED"));
}
}
}
| 3,122 |
0 | Create_ds/gobblin/gobblin-modules/gobblin-metrics-graphite/src/test/java/org/apache/gobblin/metrics | Create_ds/gobblin/gobblin-modules/gobblin-metrics-graphite/src/test/java/org/apache/gobblin/metrics/graphite/TestGraphiteSender.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.metrics.graphite;
import org.apache.gobblin.metrics.test.TimestampedValue;
import java.io.IOException;
import java.util.Map;
import com.codahale.metrics.graphite.GraphiteSender;
import com.google.common.collect.Maps;
/**
* A test implementation of {@link com.codahale.metrics.graphite.GraphiteSender}.
*
* @author Yinan Li
*/
public class TestGraphiteSender implements GraphiteSender {
private final Map<String, TimestampedValue> data = Maps.newHashMap();
@Override
public void connect() throws IllegalStateException, IOException {
// Nothing to do
}
@Override
public void send(String name, String value, long timestamp) throws IOException {
this.data.put(name, new TimestampedValue(timestamp, value));
}
@Override
public void flush() throws IOException {
// Nothing to do
}
@Override
public boolean isConnected() {
return true;
}
@Override
public int getFailures() {
return 0;
}
@Override
public void close() throws IOException {
this.data.clear();
}
/**
* Get a metric with a given name.
*
* @param name metric name
* @return a {@link org.apache.gobblin.metrics.TimestampedValue}
*/
public TimestampedValue getMetric(String name) {
return this.data.get(name);
}
}
| 3,123 |
0 | Create_ds/gobblin/gobblin-modules/gobblin-metrics-graphite/src/test/java/org/apache/gobblin/metrics | Create_ds/gobblin/gobblin-modules/gobblin-metrics-graphite/src/test/java/org/apache/gobblin/metrics/graphite/GraphiteReporterTest.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.metrics.graphite;
import org.apache.gobblin.metrics.ContextAwareGauge;
import org.apache.gobblin.metrics.Measurements;
import org.apache.gobblin.metrics.MetricContext;
import org.apache.gobblin.metrics.Tag;
import java.io.IOException;
import java.util.Properties;
import java.util.TreeMap;
import java.util.concurrent.TimeUnit;
import org.mockito.Mockito;
import org.testng.Assert;
import org.testng.annotations.BeforeClass;
import org.testng.annotations.Test;
import com.codahale.metrics.Counter;
import com.codahale.metrics.Gauge;
import com.codahale.metrics.Histogram;
import com.codahale.metrics.Meter;
import com.codahale.metrics.MetricRegistry;
import com.codahale.metrics.Timer;
import static org.apache.gobblin.metrics.test.TestConstants.METRIC_PREFIX;
import static org.apache.gobblin.metrics.test.TestConstants.GAUGE;
import static org.apache.gobblin.metrics.test.TestConstants.COUNTER;
import static org.apache.gobblin.metrics.test.TestConstants.METER;
import static org.apache.gobblin.metrics.test.TestConstants.HISTOGRAM;
import static org.apache.gobblin.metrics.test.TestConstants.TIMER;
import static org.apache.gobblin.metrics.test.TestConstants.CONTEXT_NAME;
/**
* Test for GraphiteReporter using a mock backend ({@link TestGraphiteSender})
*
* @author Lorand Bendig
*
*/
@Test(groups = { "gobblin.metrics" })
public class GraphiteReporterTest {
private static int DEFAULT_PORT = 0;
private static String DEFAULT_HOST = "localhost";
private TestGraphiteSender graphiteSender = new TestGraphiteSender();
private GraphitePusher graphitePusher;
@BeforeClass
public void setUp() throws IOException {
GraphiteConnectionType connectionType = Mockito.mock(GraphiteConnectionType.class);
Mockito.when(connectionType.createConnection(DEFAULT_HOST, DEFAULT_PORT)).thenReturn(graphiteSender);
this.graphitePusher = new GraphitePusher(DEFAULT_HOST, DEFAULT_PORT, connectionType);
}
@Test
public void testWithoutTags() throws IOException {
try (
MetricContext metricContext =
MetricContext.builder(this.getClass().getCanonicalName() + ".testGraphiteReporter").build();
GraphiteReporter graphiteReporter =
GraphiteReporter.Factory.newBuilder()
.withGraphitePusher(graphitePusher)
.withMetricContextName(CONTEXT_NAME)
.build(new Properties());) {
ContextAwareGauge<Long> contextAwareGauge =
metricContext.newContextAwareGauge("com.linkedin.example.gauge", new Gauge<Long>() {
@Override
public Long getValue() {
return 1000l;
}
});
metricContext.register(MetricRegistry.name(METRIC_PREFIX, GAUGE), contextAwareGauge);
Counter counter = metricContext.counter(MetricRegistry.name(METRIC_PREFIX, COUNTER));
Meter meter = metricContext.meter(MetricRegistry.name(METRIC_PREFIX, METER));
Histogram histogram = metricContext.histogram(MetricRegistry.name(METRIC_PREFIX, HISTOGRAM));
Timer timer = metricContext.timer(MetricRegistry.name(METRIC_PREFIX, TIMER));
counter.inc(3l);
meter.mark(1l);
meter.mark(2l);
meter.mark(3l);
histogram.update(1);
histogram.update(1);
histogram.update(2);
timer.update(1, TimeUnit.SECONDS);
timer.update(2, TimeUnit.SECONDS);
timer.update(3, TimeUnit.SECONDS);
graphiteReporter.report(metricContext.getGauges(), metricContext.getCounters(), metricContext.getHistograms(),
metricContext.getMeters(), metricContext.getTimers(), metricContext.getTagMap());
Assert.assertEquals(getMetricValue(COUNTER, Measurements.COUNT), Long.toString(3l));
Assert.assertEquals(getMetricValue(GAUGE, null), Long.toString(1000l));
Assert.assertTrue(getMetricTimestamp(GAUGE, null) <= System.currentTimeMillis() / 1000l);
Assert.assertEquals(getMetricValue(HISTOGRAM, Measurements.PERCENTILE_75TH), Double.toString(2d));
Assert.assertEquals(getMetricValue(HISTOGRAM, Measurements.PERCENTILE_98TH), Double.toString(2d));
Assert.assertEquals(getMetricValue(HISTOGRAM, Measurements.PERCENTILE_99TH), Double.toString(2d));
Assert.assertEquals(getMetricValue(HISTOGRAM, Measurements.PERCENTILE_999TH), Double.toString(2d));
Assert.assertEquals(getMetricValue(HISTOGRAM, Measurements.COUNT), Long.toString(3l));
Assert.assertEquals(getMetricValue(HISTOGRAM, Measurements.MIN), Long.toString(1l));
Assert.assertEquals(getMetricValue(HISTOGRAM, Measurements.MAX), Long.toString(2l));
Assert.assertEquals(getMetricValue(HISTOGRAM, Measurements.MEDIAN), Double.toString(1d));
Assert.assertTrue(Double.valueOf(getMetricValue(HISTOGRAM, Measurements.MEAN)) > 1d);
Assert.assertTrue(Double.valueOf(getMetricValue(HISTOGRAM, Measurements.STDDEV)) < 0.5d);
Assert.assertEquals(getMetricValue(METER, Measurements.RATE_1MIN), Double.toString(0d));
Assert.assertEquals(getMetricValue(METER, Measurements.RATE_5MIN), Double.toString(0d));
Assert.assertEquals(getMetricValue(METER, Measurements.COUNT), Long.toString(6l));
Assert.assertTrue(Double.valueOf(getMetricValue(METER, Measurements.MEAN_RATE)) > 0d);
Assert.assertEquals(getMetricValue(TIMER, Measurements.RATE_1MIN), Double.toString(0d));
Assert.assertEquals(getMetricValue(TIMER, Measurements.RATE_5MIN), Double.toString(0d));
Assert.assertEquals(getMetricValue(TIMER, Measurements.PERCENTILE_75TH), Double.toString(3000d));
Assert.assertEquals(getMetricValue(TIMER, Measurements.PERCENTILE_98TH), Double.toString(3000d));
Assert.assertEquals(getMetricValue(TIMER, Measurements.PERCENTILE_99TH), Double.toString(3000d));
Assert.assertEquals(getMetricValue(TIMER, Measurements.PERCENTILE_999TH), Double.toString(3000d));
Assert.assertEquals(getMetricValue(TIMER, Measurements.COUNT), Long.toString(3l));
Assert.assertEquals(getMetricValue(TIMER, Measurements.MIN), Double.toString(1000d));
Assert.assertEquals(getMetricValue(TIMER, Measurements.MAX), Double.toString(3000d));
Assert.assertEquals(getMetricValue(TIMER, Measurements.MEAN), Double.toString(2000d));
Assert.assertEquals(getMetricValue(TIMER, Measurements.MEDIAN), Double.toString(2000d));
Assert.assertTrue(Double.valueOf(getMetricValue(TIMER, Measurements.MEAN_RATE)) > 0d);
Assert.assertTrue(Double.valueOf(getMetricValue(TIMER, Measurements.STDDEV)) > 0d);
}
}
@Test
public void testWithTags() throws IOException {
try (
MetricContext metricContext =
MetricContext.builder(this.getClass().getCanonicalName() + ".testGraphiteReporter")
.addTag(new Tag<String>("taskId", "task_testjob_123"))
.addTag(new Tag<String>("forkBranchName", "fork_1")).build();
GraphiteReporter graphiteReporter =
GraphiteReporter.Factory.newBuilder()
.withGraphitePusher(graphitePusher)
.withMetricContextName(CONTEXT_NAME)
.build(new Properties());) {
Counter counter = metricContext.counter(MetricRegistry.name(METRIC_PREFIX, COUNTER));
counter.inc(5l);
graphiteReporter.report(new TreeMap<String, Gauge>(), metricContext.getCounters(),
new TreeMap<String, Histogram>(), new TreeMap<String, Meter>(), new TreeMap<String, Timer>(),
metricContext.getTagMap());
Assert.assertEquals(getMetricValue("task_testjob_123.fork_1." + METRIC_PREFIX, COUNTER, Measurements.COUNT),
Long.toString(5l));
}
}
private String getMetricValue(String metric, Measurements key) {
return getMetricValue(METRIC_PREFIX, metric, key);
}
private String getMetricValue(String metricPrefix, String metric, Measurements key) {
String metricKey =
(key == null) ? MetricRegistry.name(CONTEXT_NAME, metricPrefix, metric) : MetricRegistry.name(CONTEXT_NAME,
metricPrefix, metric, key.getName());
return graphiteSender.getMetric(metricKey).getValue();
}
private long getMetricTimestamp(String metric, Measurements key) {
String metricKey =
(key == null) ? MetricRegistry.name(CONTEXT_NAME, METRIC_PREFIX, metric) : MetricRegistry.name(CONTEXT_NAME,
METRIC_PREFIX, metric, key.getName());
return graphiteSender.getMetric(metricKey).getTimestamp();
}
}
| 3,124 |
0 | Create_ds/gobblin/gobblin-modules/gobblin-metrics-graphite/src/main/java/org/apache/gobblin/metrics | Create_ds/gobblin/gobblin-modules/gobblin-metrics-graphite/src/main/java/org/apache/gobblin/metrics/graphite/GraphiteConnectionType.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.metrics.graphite;
import java.net.InetSocketAddress;
import com.codahale.metrics.graphite.Graphite;
import com.codahale.metrics.graphite.GraphiteSender;
import com.codahale.metrics.graphite.GraphiteUDP;
/**
* Connection types used by {@link GraphiteReporter} and {@link GraphiteEventReporter}
*
* @author Lorand Bendig
*
*/
public enum GraphiteConnectionType {
TCP {
@Override
public GraphiteSender createConnection(String hostname, int port) {
return new Graphite(new InetSocketAddress(hostname, port));
}
}, UDP {
@Override
public GraphiteSender createConnection(String hostname, int port) {
return new GraphiteUDP(new InetSocketAddress(hostname, port));
}
};
public abstract GraphiteSender createConnection(String hostname, int port);
}
| 3,125 |
0 | Create_ds/gobblin/gobblin-modules/gobblin-metrics-graphite/src/main/java/org/apache/gobblin/metrics | Create_ds/gobblin/gobblin-modules/gobblin-metrics-graphite/src/main/java/org/apache/gobblin/metrics/graphite/GraphitePusher.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.metrics.graphite;
import java.io.Closeable;
import java.io.IOException;
import com.codahale.metrics.graphite.GraphiteSender;
import com.google.common.io.Closer;
/**
* Establishes a connection through the Graphite protocol and pushes timestamped name - value pairs
*
* @author Lorand Bendig
*
*/
public class GraphitePusher implements Closeable {
private GraphiteSender graphiteSender;
private final Closer closer;
public GraphitePusher(String hostname, int port, GraphiteConnectionType connectionType) throws IOException {
this.closer = Closer.create();
this.graphiteSender = this.closer.register(connectionType.createConnection(hostname, port));
if (this.graphiteSender != null && !this.graphiteSender.isConnected()) {
this.graphiteSender.connect();
}
}
/**
* Pushes a single metrics through the Graphite protocol to the underlying backend
*
* @param name metric name
* @param value metric value
* @param timestamp associated timestamp
* @throws IOException
*/
public void push(String name, String value, long timestamp) throws IOException {
graphiteSender.send(name, value, timestamp);
}
public void flush() throws IOException {
this.graphiteSender.flush();
}
@Override
public void close() throws IOException {
this.closer.close();
}
}
| 3,126 |
0 | Create_ds/gobblin/gobblin-modules/gobblin-metrics-graphite/src/main/java/org/apache/gobblin/metrics | Create_ds/gobblin/gobblin-modules/gobblin-metrics-graphite/src/main/java/org/apache/gobblin/metrics/graphite/GraphiteEventReporter.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.metrics.graphite;
import java.io.IOException;
import java.util.Map;
import java.util.Queue;
import java.util.concurrent.TimeUnit;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import com.google.common.base.Optional;
import org.apache.gobblin.configuration.ConfigurationKeys;
import org.apache.gobblin.metrics.GobblinTrackingEvent;
import org.apache.gobblin.metrics.MetricContext;
import org.apache.gobblin.metrics.event.MultiPartEvent;
import org.apache.gobblin.metrics.event.EventSubmitter;
import org.apache.gobblin.metrics.event.JobEvent;
import org.apache.gobblin.metrics.event.TaskEvent;
import org.apache.gobblin.metrics.reporter.EventReporter;
import static org.apache.gobblin.metrics.event.TimingEvent.METADATA_DURATION;
/**
*
* {@link org.apache.gobblin.metrics.reporter.EventReporter} that emits {@link org.apache.gobblin.metrics.GobblinTrackingEvent} events
* as timestamped name - value pairs through the Graphite protocol
*
* @author Lorand Bendig
*
*/
public class GraphiteEventReporter extends EventReporter {
private final GraphitePusher graphitePusher;
private final boolean emitValueAsKey;
private static final String EMTPY_VALUE = "0";
private static final Logger LOGGER = LoggerFactory.getLogger(GraphiteEventReporter.class);
private String prefix;
public GraphiteEventReporter(Builder<?> builder) throws IOException {
super(builder);
if (builder.graphitePusher.isPresent()) {
this.graphitePusher = builder.graphitePusher.get();
} else {
this.graphitePusher =
this.closer.register(new GraphitePusher(builder.hostname, builder.port, builder.connectionType));
}
this.emitValueAsKey = builder.emitValueAsKey;
this.prefix = builder.prefix;
}
@Override
public void reportEventQueue(Queue<GobblinTrackingEvent> queue) {
GobblinTrackingEvent nextEvent;
try {
while (null != (nextEvent = queue.poll())) {
pushEvent(nextEvent);
}
this.graphitePusher.flush();
} catch (IOException e) {
LOGGER.error("Error sending event to Graphite", e);
try {
this.graphitePusher.flush();
} catch (IOException e1) {
LOGGER.error("Unable to flush previous events to Graphite", e);
}
}
}
/**
* Extracts the event and its metadata from {@link GobblinTrackingEvent} and creates
* timestamped name value pairs
*
* @param event {@link GobblinTrackingEvent} to be reported
* @throws IOException
*/
private void pushEvent(GobblinTrackingEvent event) throws IOException {
Map<String, String> metadata = event.getMetadata();
String name = getMetricName(metadata, event.getName());
long timestamp = event.getTimestamp() / 1000l;
MultiPartEvent multipartEvent = MultiPartEvent.getEvent(metadata.get(EventSubmitter.EVENT_TYPE));
if (multipartEvent == null) {
graphitePusher.push(JOINER.join(prefix, name), EMTPY_VALUE, timestamp);
}
else {
for (String field : multipartEvent.getMetadataFields()) {
String value = metadata.get(field);
if (value == null) {
graphitePusher.push(JOINER.join(prefix, name, field), EMTPY_VALUE, timestamp);
} else {
if (emitAsKey(field)) {
// metric value is emitted as part of the keys
graphitePusher.push(JOINER.join(prefix, name, field, value), EMTPY_VALUE, timestamp);
} else {
graphitePusher.push(JOINER.join(prefix, name, field), convertValue(field, value), timestamp);
}
}
}
}
}
private String convertValue(String field, String value) {
return METADATA_DURATION.equals(field) ?
Double.toString(convertDuration(TimeUnit.MILLISECONDS.toNanos(Long.parseLong(value)))) : value;
}
/**
* Non-numeric event values may be emitted as part of the key by applying them to the end of the key if
* {@link ConfigurationKeys#METRICS_REPORTING_GRAPHITE_EVENTS_VALUE_AS_KEY} is set. Thus such events can be still
* reported even when the backend doesn't accept text values through Graphite
*
* @param field name of the metric's metadata fields
* @return true if event value is emitted in the key
*/
private boolean emitAsKey(String field) {
return emitValueAsKey
&& (field.equals(TaskEvent.METADATA_TASK_WORKING_STATE) || field.equals(JobEvent.METADATA_JOB_STATE));
}
/**
* Returns a new {@link GraphiteEventReporter.Builder} for {@link GraphiteEventReporter}.
* Will automatically add all Context tags to the reporter.
*
* @param context the {@link org.apache.gobblin.metrics.MetricContext} to report
* @return GraphiteEventReporter builder
* @deprecated this method is bugged. Use {@link GraphiteEventReporter.Factory#forContext} instead.
*/
@Deprecated
public static Builder<? extends Builder> forContext(MetricContext context) {
return new BuilderImpl(context);
}
public static class BuilderImpl extends Builder<BuilderImpl> {
private BuilderImpl(MetricContext context) {
super(context);
}
@Override
protected BuilderImpl self() {
return this;
}
}
public static class Factory {
/**
* Returns a new {@link GraphiteEventReporter.Builder} for {@link GraphiteEventReporter}.
* Will automatically add all Context tags to the reporter.
*
* @param context the {@link org.apache.gobblin.metrics.MetricContext} to report
* @return GraphiteEventReporter builder
*/
public static BuilderImpl forContext(MetricContext context) {
return new BuilderImpl(context);
}
}
/**
* Builder for {@link GraphiteEventReporter}.
* Defaults to no filter, reporting rates in seconds and times in milliseconds using TCP connection
*/
public static abstract class Builder<T extends EventReporter.Builder<T>> extends EventReporter.Builder<T> {
protected String hostname;
protected int port;
protected GraphiteConnectionType connectionType;
protected Optional<GraphitePusher> graphitePusher;
protected boolean emitValueAsKey;
protected String prefix;
protected Builder(MetricContext context) {
super(context);
this.graphitePusher = Optional.absent();
this.connectionType = GraphiteConnectionType.TCP;
}
/**
* Set {@link org.apache.gobblin.metrics.graphite.GraphitePusher} to use.
*/
public T withGraphitePusher(GraphitePusher pusher) {
this.graphitePusher = Optional.of(pusher);
return self();
}
/**
* Set connection parameters for the {@link org.apache.gobblin.metrics.graphite.GraphitePusher} creation
*/
public T withConnection(String hostname, int port) {
this.hostname = hostname;
this.port = port;
return self();
}
public T withPrefix(String prefix) {
this.prefix = prefix;
return self();
}
/**
* Set {@link org.apache.gobblin.metrics.graphite.GraphiteConnectionType} to use.
*/
public T withConnectionType(GraphiteConnectionType connectionType) {
this.connectionType = connectionType;
return self();
}
/**
* Set flag that forces the reporter to emit non-numeric event values as part of the key
*/
public T withEmitValueAsKey(boolean emitValueAsKey) {
this.emitValueAsKey = emitValueAsKey;
return self();
}
/**
* Builds and returns {@link GraphiteEventReporter}.
*
* @return GraphiteEventReporter
*/
public GraphiteEventReporter build() throws IOException {
return new GraphiteEventReporter(this);
}
}
}
| 3,127 |
0 | Create_ds/gobblin/gobblin-modules/gobblin-metrics-graphite/src/main/java/org/apache/gobblin/metrics | Create_ds/gobblin/gobblin-modules/gobblin-metrics-graphite/src/main/java/org/apache/gobblin/metrics/graphite/GraphiteReporter.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.metrics.graphite;
import java.io.IOException;
import java.util.Map;
import java.util.Properties;
import java.util.SortedMap;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import com.codahale.metrics.Counter;
import com.codahale.metrics.Counting;
import com.codahale.metrics.Gauge;
import com.codahale.metrics.Histogram;
import com.codahale.metrics.Meter;
import com.codahale.metrics.Metered;
import com.codahale.metrics.MetricFilter;
import com.codahale.metrics.Snapshot;
import com.codahale.metrics.Timer;
import com.google.common.base.Optional;
import com.typesafe.config.Config;
import org.apache.gobblin.configuration.ConfigurationKeys;
import org.apache.gobblin.metrics.Measurements;
import org.apache.gobblin.metrics.reporter.ConfiguredScheduledReporter;
import org.apache.gobblin.util.ConfigUtils;
import static org.apache.gobblin.metrics.Measurements.*;
/**
* Graphite reporter for metrics
*
* @author Lorand Bendig
*
*/
public class GraphiteReporter extends ConfiguredScheduledReporter {
private final GraphitePusher graphitePusher;
private static final Logger LOGGER = LoggerFactory.getLogger(GraphiteReporter.class);
public GraphiteReporter(Builder<?> builder, Config config) throws IOException {
super(builder, config);
if (builder.graphitePusher.isPresent()) {
this.graphitePusher = builder.graphitePusher.get();
} else {
this.graphitePusher = this.closer.register(new GraphitePusher(builder.hostname, builder.port, builder.connectionType));
}
}
/**
* A static factory class for obtaining new {@link org.apache.gobblin.metrics.graphite.GraphiteReporter.Builder}s
*
* @see org.apache.gobblin.metrics.graphite.GraphiteReporter.Builder
*/
public static class Factory {
public static BuilderImpl newBuilder() {
return new BuilderImpl();
}
}
public static class BuilderImpl extends Builder<BuilderImpl> {
@Override
protected BuilderImpl self() {
return this;
}
}
/**
* Builder for {@link GraphiteReporter}. Defaults to no filter, reporting rates in seconds and times in
* milliseconds using TCP sending type
*/
public static abstract class Builder<T extends ConfiguredScheduledReporter.Builder<T>> extends
ConfiguredScheduledReporter.Builder<T> {
protected MetricFilter filter;
protected String hostname;
protected int port;
protected GraphiteConnectionType connectionType;
protected Optional<GraphitePusher> graphitePusher;
protected Builder() {
super();
this.name = "GraphiteReporter";
this.graphitePusher = Optional.absent();
this.filter = MetricFilter.ALL;
this.connectionType = GraphiteConnectionType.TCP;
}
/**
* Set {@link org.apache.gobblin.metrics.graphite.GraphitePusher} to use.
*/
public T withGraphitePusher(GraphitePusher pusher) {
this.graphitePusher = Optional.of(pusher);
return self();
}
/**
* Set connection parameters for the {@link org.apache.gobblin.metrics.graphite.GraphitePusher} creation
*/
public T withConnection(String hostname, int port) {
this.hostname = hostname;
this.port = port;
return self();
}
/**
* Set {@link org.apache.gobblin.metrics.graphite.GraphiteConnectionType} to use.
*/
public T withConnectionType(GraphiteConnectionType connectionType) {
this.connectionType = connectionType;
return self();
}
/**
* Only report metrics which match the given filter.
*
* @param filter a {@link MetricFilter}
* @return {@code this}
*/
public T filter(MetricFilter filter) {
this.filter = filter;
return self();
}
/**
* Builds and returns {@link GraphiteReporter}.
*
* @param props metrics properties
* @return GraphiteReporter
*/
public GraphiteReporter build(Properties props) throws IOException {
return new GraphiteReporter(this, ConfigUtils.propertiesToConfig(props,
Optional.of(ConfigurationKeys.METRICS_CONFIGURATIONS_PREFIX)));
}
}
@Override
protected void report(SortedMap<String, Gauge> gauges, SortedMap<String, Counter> counters,
SortedMap<String, Histogram> histograms, SortedMap<String, Meter> meters, SortedMap<String, Timer> timers,
Map<String, Object> tags) {
String prefix = getMetricNamePrefix(tags);
long timestamp = System.currentTimeMillis() / 1000l;
try {
for (Map.Entry<String, Gauge> gauge : gauges.entrySet()) {
reportGauge(prefix, gauge.getKey(), gauge.getValue(), timestamp);
}
for (Map.Entry<String, Counter> counter : counters.entrySet()) {
reportCounter(prefix, counter.getKey(), counter.getValue(), timestamp);
}
for (Map.Entry<String, Histogram> histogram : histograms.entrySet()) {
reportHistogram(prefix, histogram.getKey(), histogram.getValue(), timestamp);
}
for (Map.Entry<String, Meter> meter : meters.entrySet()) {
reportMetered(prefix, meter.getKey(), meter.getValue(), timestamp);
}
for (Map.Entry<String, Timer> timer : timers.entrySet()) {
reportTimer(prefix, timer.getKey(), timer.getValue(), timestamp);
}
this.graphitePusher.flush();
} catch (IOException ioe) {
LOGGER.error("Error sending metrics to Graphite", ioe);
try {
this.graphitePusher.close();
} catch (IOException innerIoe) {
LOGGER.error("Error closing the Graphite sender", innerIoe);
}
}
}
private void reportGauge(String prefix, String name, Gauge gauge, long timestamp) throws IOException {
String metricName = getKey(prefix, name);
pushMetric(metricName, gauge.getValue().toString(), timestamp);
}
private void reportCounter(String prefix, String name, Counting counter, long timestamp) throws IOException {
String metricName = getKey(prefix, name, COUNT.getName());
pushMetric(metricName, counter.getCount(), false, timestamp);
}
private void reportHistogram(String prefix, String name, Histogram histogram, long timestamp) throws IOException {
reportCounter(prefix, name, histogram, timestamp);
reportSnapshot(prefix, name, histogram.getSnapshot(), timestamp, false);
}
private void reportTimer(String prefix, String name, Timer timer, long timestamp) throws IOException {
reportSnapshot(prefix, name, timer.getSnapshot(), timestamp, true);
reportMetered(prefix, name, timer, timestamp);
}
private void reportSnapshot(String prefix, String name, Snapshot snapshot, long timestamp, boolean convertDuration)
throws IOException {
String baseMetricName = getKey(prefix, name);
pushMetric(getKey(baseMetricName, MIN), snapshot.getMin(), convertDuration, timestamp);
pushMetric(getKey(baseMetricName, MAX), snapshot.getMax(), convertDuration, timestamp);
pushMetric(getKey(baseMetricName, MEAN), snapshot.getMean(), convertDuration, timestamp);
pushMetric(getKey(baseMetricName, STDDEV), snapshot.getStdDev(), convertDuration, timestamp);
pushMetric(getKey(baseMetricName, MEDIAN), snapshot.getMedian(), convertDuration, timestamp);
pushMetric(getKey(baseMetricName, PERCENTILE_75TH), snapshot.get75thPercentile(), convertDuration, timestamp);
pushMetric(getKey(baseMetricName, PERCENTILE_95TH), snapshot.get95thPercentile(), convertDuration, timestamp);
pushMetric(getKey(baseMetricName, PERCENTILE_98TH), snapshot.get98thPercentile(), convertDuration, timestamp);
pushMetric(getKey(baseMetricName, PERCENTILE_99TH), snapshot.get99thPercentile(), convertDuration, timestamp);
pushMetric(getKey(baseMetricName, PERCENTILE_999TH), snapshot.get999thPercentile(), convertDuration, timestamp);
}
private void reportMetered(String prefix, String name, Metered metered, long timestamp) throws IOException {
reportCounter(prefix, name, metered, timestamp);
String baseMetricName = getKey(prefix, name);
pushMetricRate(getKey(baseMetricName, RATE_1MIN), metered.getOneMinuteRate(), timestamp);
pushMetricRate(getKey(baseMetricName, RATE_5MIN), metered.getFiveMinuteRate(), timestamp);
pushMetricRate(getKey(baseMetricName, RATE_15MIN), metered.getFifteenMinuteRate(), timestamp);
pushMetricRate(getKey(baseMetricName, MEAN_RATE), metered.getMeanRate(), timestamp);
}
private void pushMetric(String metricName, Number value, boolean toDuration, long timestamp) throws IOException {
String metricValue = toDuration ? getValue(convertDuration(value.doubleValue())) : getValue(value);
pushMetric(metricName, metricValue, timestamp);
}
private void pushMetricRate(String metricName, double value, long timestamp)
throws IOException {
pushMetric(metricName, getValue(convertRate(value)), timestamp);
}
private void pushMetric(String name, String value, long timestamp) throws IOException {
this.graphitePusher.push(name, value, timestamp);
}
private String getValue(Number value) {
return value.toString();
}
private String getKey(String baseName, Measurements measurements) {
return getKey(baseName, measurements.getName());
}
private String getKey(String... keys) {
return JOINER.join(keys);
}
}
| 3,128 |
0 | Create_ds/gobblin/gobblin-modules/gobblin-kafka-1/src/test/java/org/apache/gobblin/metrics | Create_ds/gobblin/gobblin-modules/gobblin-kafka-1/src/test/java/org/apache/gobblin/metrics/reporter/KafkaKeyValueProducerPusherTest.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.metrics.reporter;
import com.google.common.base.Optional;
import com.google.common.collect.ImmutableMap;
import com.google.common.collect.Lists;
import com.typesafe.config.ConfigFactory;
import kafka.consumer.ConsumerIterator;
import kafka.message.MessageAndMetadata;
import org.apache.commons.lang3.tuple.Pair;
import org.apache.gobblin.kafka.KafkaTestBase;
import org.apache.gobblin.metrics.kafka.KafkaKeyValueProducerPusher;
import org.apache.gobblin.metrics.kafka.Pusher;
import org.apache.kafka.clients.producer.ProducerConfig;
import org.testng.Assert;
import org.testng.annotations.AfterClass;
import org.testng.annotations.BeforeClass;
import org.testng.annotations.Test;
import java.io.IOException;
/**
* Test {@link KafkaKeyValueProducerPusher}.
*/
@Test( groups = {"disabledOnCI"} )
public class KafkaKeyValueProducerPusherTest {
public static final String TOPIC = KafkaKeyValueProducerPusherTest.class.getSimpleName();
private KafkaTestBase kafkaTestHelper;
@BeforeClass
public void setup() throws Exception {
kafkaTestHelper = new KafkaTestBase();
kafkaTestHelper.startServers();
kafkaTestHelper.provisionTopic(TOPIC);
}
@Test
public void test() throws IOException {
// Test that the scoped config overrides the generic config
Pusher pusher = new KafkaKeyValueProducerPusher<byte[], byte[]>("127.0.0.1:dummy", TOPIC,
Optional.of(ConfigFactory.parseMap(ImmutableMap.of(
ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, "127.0.0.1:" + this.kafkaTestHelper.getKafkaServerPort()))));
String msg1 = "msg1";
String msg2 = "msg2";
pusher.pushMessages(Lists.newArrayList(Pair.of("key1", msg1.getBytes()), Pair.of("key2", msg2.getBytes())));
try {
Thread.sleep(1000);
} catch (InterruptedException ex) {
Thread.currentThread().interrupt();
}
ConsumerIterator<byte[], byte[]> iterator = this.kafkaTestHelper.getIteratorForTopic(TOPIC);
assert (iterator.hasNext());
MessageAndMetadata<byte[], byte[]> messageAndMetadata = iterator.next();
Assert.assertEquals(new String(messageAndMetadata.key()), "key1");
Assert.assertEquals(new String(messageAndMetadata.message()), msg1);
assert (iterator.hasNext());
messageAndMetadata = iterator.next();
Assert.assertEquals(new String(messageAndMetadata.key()), "key2");
Assert.assertEquals(new String(messageAndMetadata.message()), msg2);
pusher.close();
}
@AfterClass
public void after() {
try {
this.kafkaTestHelper.close();
} catch (Exception e) {
System.err.println("Failed to close Kafka server.");
}
}
}
| 3,129 |
0 | Create_ds/gobblin/gobblin-modules/gobblin-kafka-1/src/test/java/org/apache/gobblin/metrics | Create_ds/gobblin/gobblin-modules/gobblin-kafka-1/src/test/java/org/apache/gobblin/metrics/reporter/KafkaProducerPusherTest.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.metrics.reporter;
import com.google.common.base.Optional;
import com.google.common.collect.ImmutableMap;
import com.google.common.collect.Lists;
import com.typesafe.config.ConfigFactory;
import kafka.consumer.ConsumerIterator;
import org.apache.gobblin.kafka.KafkaTestBase;
import org.apache.gobblin.metrics.kafka.KafkaProducerPusher;
import org.apache.gobblin.metrics.kafka.Pusher;
import org.apache.kafka.clients.producer.ProducerConfig;
import org.testng.Assert;
import org.testng.annotations.AfterClass;
import org.testng.annotations.BeforeClass;
import org.testng.annotations.Test;
import java.io.IOException;
/**
* Test {@link org.apache.gobblin.metrics.kafka.KafkaProducerPusher}.
*/
@Test( groups = {"disabledOnCI"} )
public class KafkaProducerPusherTest {
public static final String TOPIC = KafkaProducerPusherTest.class.getSimpleName();
private KafkaTestBase kafkaTestHelper;
@BeforeClass
public void setup() throws Exception {
kafkaTestHelper = new KafkaTestBase();
kafkaTestHelper.startServers();
kafkaTestHelper.provisionTopic(TOPIC);
}
@Test
public void test() throws IOException {
// Test that the scoped config overrides the generic config
Pusher pusher = new KafkaProducerPusher("127.0.0.1:dummy", TOPIC, Optional.of(ConfigFactory.parseMap(ImmutableMap.of(
ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, "127.0.0.1:" + this.kafkaTestHelper.getKafkaServerPort()))));
String msg1 = "msg1";
String msg2 = "msg2";
pusher.pushMessages(Lists.newArrayList(msg1.getBytes(), msg2.getBytes()));
try {
Thread.sleep(1000);
} catch (InterruptedException ex) {
Thread.currentThread().interrupt();
}
ConsumerIterator<byte[], byte[]> iterator = this.kafkaTestHelper.getIteratorForTopic(TOPIC);
assert (iterator.hasNext());
Assert.assertEquals(new String(iterator.next().message()), msg1);
assert (iterator.hasNext());
Assert.assertEquals(new String(iterator.next().message()), msg2);
pusher.close();
}
@AfterClass
public void after() {
try {
this.kafkaTestHelper.close();
} catch (Exception e) {
System.err.println("Failed to close Kafka server.");
}
}
}
| 3,130 |
0 | Create_ds/gobblin/gobblin-modules/gobblin-kafka-1/src/test/java/org/apache/gobblin | Create_ds/gobblin/gobblin-modules/gobblin-kafka-1/src/test/java/org/apache/gobblin/kafka/KafkaClusterTestBase.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.kafka;
import kafka.server.KafkaConfig;
import kafka.server.KafkaServer;
import kafka.utils.ZKStringSerializer$;
import kafka.zk.EmbeddedZookeeper;
import org.I0Itec.zkclient.ZkClient;
import org.apache.gobblin.test.TestUtils;
import org.apache.kafka.common.utils.MockTime;
import org.apache.kafka.common.utils.Time;
import java.util.ArrayList;
import java.util.Iterator;
import java.util.List;
import java.util.Properties;
public class KafkaClusterTestBase extends KafkaTestBase {
int clusterCount;
EmbeddedZookeeper _zkServer;
String _zkConnectString;
ZkClient _zkClient;
List<KafkaServer> kafkaBrokerList = new ArrayList<KafkaServer>();
List<Integer> kafkaBrokerPortList = new ArrayList<Integer>();
public KafkaClusterTestBase(int clusterCount) throws InterruptedException, RuntimeException {
super();
this.clusterCount = clusterCount;
}
public void startCluster() {
// Start Zookeeper.
_zkServer = new EmbeddedZookeeper();
_zkConnectString = "127.0.0.1:" + _zkServer.port();
_zkClient = new ZkClient(_zkConnectString, 30000, 30000, ZKStringSerializer$.MODULE$);
// Start Kafka Cluster.
for (int i = 0; i < clusterCount; i++) {
KafkaServer _kafkaServer = createKafkaServer(i, _zkConnectString);
kafkaBrokerList.add(_kafkaServer);
}
}
public void stopCluster() {
Iterator<KafkaServer> iter = kafkaBrokerList.iterator();
while (iter.hasNext()) {
KafkaServer server = iter.next();
try {
server.shutdown();
} catch (RuntimeException e) {
// Simply Ignore.
}
}
}
public int getZookeeperPort() {
return _zkServer.port();
}
public List<KafkaServer> getBrokerList() {
return kafkaBrokerList;
}
public List<Integer> getKafkaBrokerPortList() {
return kafkaBrokerPortList;
}
public int getClusterCount() {
return kafkaBrokerList.size();
}
private KafkaServer createKafkaServer(int brokerId, String _zkConnectString) {
int _brokerId = brokerId;
int _kafkaServerPort = TestUtils.findFreePort();
Properties props = kafka.utils.TestUtils.createBrokerConfig(
_brokerId,
_zkConnectString,
kafka.utils.TestUtils.createBrokerConfig$default$3(),
kafka.utils.TestUtils.createBrokerConfig$default$4(),
_kafkaServerPort,
kafka.utils.TestUtils.createBrokerConfig$default$6(),
kafka.utils.TestUtils.createBrokerConfig$default$7(),
kafka.utils.TestUtils.createBrokerConfig$default$8(),
kafka.utils.TestUtils.createBrokerConfig$default$9(),
kafka.utils.TestUtils.createBrokerConfig$default$10(),
kafka.utils.TestUtils.createBrokerConfig$default$11(),
kafka.utils.TestUtils.createBrokerConfig$default$12(),
kafka.utils.TestUtils.createBrokerConfig$default$13(),
kafka.utils.TestUtils.createBrokerConfig$default$14(),
kafka.utils.TestUtils.createBrokerConfig$default$15(),
kafka.utils.TestUtils.createBrokerConfig$default$16(),
kafka.utils.TestUtils.createBrokerConfig$default$17(),
kafka.utils.TestUtils.createBrokerConfig$default$18()
);
KafkaConfig config = new KafkaConfig(props);
Time mock = new MockTime();
KafkaServer _kafkaServer = kafka.utils.TestUtils.createServer(config, mock);
kafkaBrokerPortList.add(_kafkaServerPort);
return _kafkaServer;
}
public String getBootServersList() {
String bootServerString = "";
Iterator<Integer> ports = kafkaBrokerPortList.iterator();
while (ports.hasNext()) {
Integer port = ports.next();
bootServerString = bootServerString + "localhost:" + port + ",";
}
bootServerString = bootServerString.substring(0, bootServerString.length() - 1);
return bootServerString;
}
}
| 3,131 |
0 | Create_ds/gobblin/gobblin-modules/gobblin-kafka-1/src/test/java/org/apache/gobblin | Create_ds/gobblin/gobblin-modules/gobblin-kafka-1/src/test/java/org/apache/gobblin/kafka/KafkaTestBase.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.kafka;
import com.google.common.collect.ImmutableMap;
import kafka.admin.AdminUtils;
import kafka.admin.RackAwareMode;
import kafka.consumer.Consumer;
import kafka.consumer.ConsumerConfig;
import kafka.consumer.ConsumerIterator;
import kafka.consumer.KafkaStream;
import kafka.javaapi.consumer.ConsumerConnector;
import kafka.server.KafkaConfig;
import kafka.server.KafkaServer;
import kafka.utils.ZKStringSerializer$;
import kafka.utils.ZkUtils;
import kafka.zk.EmbeddedZookeeper;
import lombok.extern.slf4j.Slf4j;
import org.I0Itec.zkclient.ZkClient;
import org.apache.gobblin.test.TestUtils;
import org.apache.kafka.common.utils.MockTime;
import org.apache.kafka.common.utils.Time;
import java.io.Closeable;
import java.io.IOException;
import java.util.*;
import java.util.concurrent.atomic.AtomicInteger;
/**
* A private class for starting a suite of servers for Kafka
* Calls to start and shutdown are reference counted, so that the suite is started and shutdown in pairs.
* A suite of servers (Zk, Kafka etc) will be started just once per process
*/
@Slf4j
class KafkaServerSuite {
static KafkaServerSuite _instance;
private final int _kafkaServerPort;
private final AtomicInteger _numStarted;
private int _brokerId = 0;
private EmbeddedZookeeper _zkServer;
private ZkClient _zkClient;
private KafkaServer _kafkaServer;
private String _zkConnectString;
private KafkaServerSuite() {
_kafkaServerPort = TestUtils.findFreePort();
_zkConnectString = "UNINITIALIZED_HOST_PORT";
_numStarted = new AtomicInteger(0);
}
static KafkaServerSuite getInstance() {
if (null == _instance) {
_instance = new KafkaServerSuite();
return _instance;
} else {
return _instance;
}
}
public ZkClient getZkClient() {
return _zkClient;
}
public KafkaServer getKafkaServer() {
return _kafkaServer;
}
public int getKafkaServerPort() {
return _kafkaServerPort;
}
public String getZkConnectString() {
return _zkConnectString;
}
void start()
throws RuntimeException {
if (_numStarted.incrementAndGet() == 1) {
log.warn("Starting up Kafka server suite. Zk at " + _zkConnectString + "; Kafka server at " + _kafkaServerPort);
_zkServer = new EmbeddedZookeeper();
_zkConnectString = "127.0.0.1:" + _zkServer.port();
_zkClient = new ZkClient(_zkConnectString, 30000, 30000, ZKStringSerializer$.MODULE$);
Properties props = kafka.utils.TestUtils.createBrokerConfig(
_brokerId,
_zkConnectString,
kafka.utils.TestUtils.createBrokerConfig$default$3(),
kafka.utils.TestUtils.createBrokerConfig$default$4(),
_kafkaServerPort,
kafka.utils.TestUtils.createBrokerConfig$default$6(),
kafka.utils.TestUtils.createBrokerConfig$default$7(),
kafka.utils.TestUtils.createBrokerConfig$default$8(),
kafka.utils.TestUtils.createBrokerConfig$default$9(),
kafka.utils.TestUtils.createBrokerConfig$default$10(),
kafka.utils.TestUtils.createBrokerConfig$default$11(),
kafka.utils.TestUtils.createBrokerConfig$default$12(),
kafka.utils.TestUtils.createBrokerConfig$default$13(),
kafka.utils.TestUtils.createBrokerConfig$default$14(),
kafka.utils.TestUtils.createBrokerConfig$default$15(),
kafka.utils.TestUtils.createBrokerConfig$default$16(),
kafka.utils.TestUtils.createBrokerConfig$default$17(),
kafka.utils.TestUtils.createBrokerConfig$default$18()
);
KafkaConfig config = new KafkaConfig(props);
Time mock = new MockTime();
_kafkaServer = kafka.utils.TestUtils.createServer(config, mock);
} else {
log.info("Kafka server suite already started... continuing");
}
}
void shutdown() {
if (_numStarted.decrementAndGet() == 0) {
log.info("Shutting down Kafka server suite");
_kafkaServer.shutdown();
_zkClient.close();
_zkServer.shutdown();
} else {
log.info("Kafka server suite still in use ... not shutting down yet");
}
}
}
class KafkaConsumerSuite {
private final ConsumerConnector _consumer;
private final KafkaStream<byte[], byte[]> _stream;
private final ConsumerIterator<byte[], byte[]> _iterator;
private final String _topic;
KafkaConsumerSuite(String zkConnectString, String topic) {
_topic = topic;
Properties consumeProps = new Properties();
consumeProps.put("zookeeper.connect", zkConnectString);
consumeProps.put("group.id", _topic + "-" + System.nanoTime());
consumeProps.put("zookeeper.session.timeout.ms", "10000");
consumeProps.put("zookeeper.sync.time.ms", "10000");
consumeProps.put("auto.commit.interval.ms", "10000");
consumeProps.put("_consumer.timeout.ms", "10000");
_consumer = Consumer.createJavaConsumerConnector(new ConsumerConfig(consumeProps));
Map<String, List<KafkaStream<byte[], byte[]>>> consumerMap =
_consumer.createMessageStreams(ImmutableMap.of(this._topic, 1));
List<KafkaStream<byte[], byte[]>> streams = consumerMap.get(this._topic);
_stream = streams.get(0);
_iterator = _stream.iterator();
}
void shutdown() {
_consumer.shutdown();
}
public ConsumerIterator<byte[], byte[]> getIterator() {
return _iterator;
}
}
/**
* A Helper class for testing against Kafka
* A suite of servers (Zk, Kafka etc) will be started just once per process
* Consumer and iterator will be created per instantiation and is one instance per topic.
*/
public class KafkaTestBase implements Closeable {
private final KafkaServerSuite _kafkaServerSuite;
private final Map<String, KafkaConsumerSuite> _topicConsumerMap;
public KafkaTestBase() throws InterruptedException, RuntimeException {
this._kafkaServerSuite = KafkaServerSuite.getInstance();
this._topicConsumerMap = new HashMap<>();
}
public synchronized void startServers() {
_kafkaServerSuite.start();
}
public void stopServers() {
_kafkaServerSuite.shutdown();
}
public void start() {
startServers();
}
public void stopClients() throws IOException {
for (Map.Entry<String, KafkaConsumerSuite> consumerSuiteEntry : _topicConsumerMap.entrySet()) {
consumerSuiteEntry.getValue().shutdown();
AdminUtils.deleteTopic(ZkUtils.apply(_kafkaServerSuite.getZkClient(), false),
consumerSuiteEntry.getKey());
}
}
@Override
public void close() throws IOException {
stopClients();
stopServers();
}
public void provisionTopic(String topic) {
if (_topicConsumerMap.containsKey(topic)) {
// nothing to do: return
} else {
// provision topic
AdminUtils.createTopic(ZkUtils.apply(_kafkaServerSuite.getZkClient(), false),
topic, 1, 1, new Properties(), RackAwareMode.Disabled$.MODULE$);
List<KafkaServer> servers = new ArrayList<>();
servers.add(_kafkaServerSuite.getKafkaServer());
kafka.utils.TestUtils.waitUntilMetadataIsPropagated(scala.collection.JavaConversions.asScalaBuffer(servers), topic, 0, 5000);
KafkaConsumerSuite consumerSuite = new KafkaConsumerSuite(_kafkaServerSuite.getZkConnectString(), topic);
_topicConsumerMap.put(topic, consumerSuite);
}
}
public ConsumerIterator<byte[], byte[]> getIteratorForTopic(String topic) {
if (_topicConsumerMap.containsKey(topic)) {
return _topicConsumerMap.get(topic).getIterator();
} else {
throw new IllegalStateException("Could not find provisioned topic" + topic + ": call provisionTopic before");
}
}
public int getKafkaServerPort() {
return _kafkaServerSuite.getKafkaServerPort();
}
public String getZkConnectString() {
return this._kafkaServerSuite.getZkConnectString();
}
}
| 3,132 |
0 | Create_ds/gobblin/gobblin-modules/gobblin-kafka-1/src/test/java/org/apache/gobblin/kafka | Create_ds/gobblin/gobblin-modules/gobblin-kafka-1/src/test/java/org/apache/gobblin/kafka/writer/Kafka1TopicProvisionTest.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.kafka.writer;
import kafka.utils.ZKStringSerializer$;
import kafka.utils.ZkUtils;
import lombok.extern.slf4j.Slf4j;
import org.I0Itec.zkclient.ZkClient;
import org.I0Itec.zkclient.ZkConnection;
import org.apache.commons.lang3.StringUtils;
import org.apache.gobblin.kafka.KafkaClusterTestBase;
import org.apache.kafka.clients.admin.AdminClient;
import org.apache.kafka.clients.admin.AdminClientConfig;
import org.apache.kafka.clients.admin.DescribeTopicsResult;
import org.apache.zookeeper.KeeperException;
import org.apache.zookeeper.ZooKeeper;
import org.json.JSONObject;
import org.testng.Assert;
import org.testng.annotations.AfterSuite;
import org.testng.annotations.BeforeSuite;
import org.testng.annotations.Test;
import java.io.IOException;
import java.lang.management.ManagementFactory;
import java.util.ArrayList;
import java.util.Collections;
import java.util.List;
import java.util.Properties;
import java.util.concurrent.ExecutionException;
@Slf4j
public class Kafka1TopicProvisionTest {
private final KafkaClusterTestBase _kafkaTestHelper;
private int testClusterCount = 5;
public Kafka1TopicProvisionTest()
throws InterruptedException, RuntimeException {
_kafkaTestHelper = new KafkaClusterTestBase(testClusterCount);
}
@BeforeSuite(alwaysRun = true)
public void beforeSuite() {
log.info("Process id = " + ManagementFactory.getRuntimeMXBean().getName());
_kafkaTestHelper.startCluster();
}
@AfterSuite(alwaysRun = true)
public void afterSuite()
throws IOException {
_kafkaTestHelper.stopCluster();
}
@Test(enabled = false)
public void testCluster()
throws IOException, InterruptedException, KeeperException {
int clusterCount = _kafkaTestHelper.getClusterCount();
Assert.assertEquals(clusterCount, testClusterCount);
int zkPort = _kafkaTestHelper.getZookeeperPort();
String kafkaBrokerPortList = _kafkaTestHelper.getKafkaBrokerPortList().toString();
System.out.println("kafkaBrokerPortList : " + kafkaBrokerPortList);
ZooKeeper zk = new ZooKeeper("localhost:" + zkPort, 11000, new ByPassWatcher());
List<Integer> brokerPortList = new ArrayList<Integer>();
List<String> ids = zk.getChildren("/brokers/ids", false);
for (String id : ids) {
String brokerInfo = new String(zk.getData("/brokers/ids/" + id, false, null));
JSONObject obj = new JSONObject(brokerInfo);
int brokerPort = obj.getInt("port");
System.out.println(brokerPort);
brokerPortList.add(brokerPort);
}
Assert.assertTrue(_kafkaTestHelper.getKafkaBrokerPortList().equals(brokerPortList));
}
@Test(enabled = false)
public void testTopicPartitionCreationCount()
throws IOException, InterruptedException, ExecutionException {
String topic = "topicPartition4";
int clusterCount = _kafkaTestHelper.getClusterCount();
int partionCount = clusterCount / 2;
int zkPort = _kafkaTestHelper.getZookeeperPort();
Properties props = new Properties();
// Setting Topic Properties
props.setProperty(KafkaWriterConfigurationKeys.KAFKA_TOPIC, topic);
props.setProperty(KafkaWriterConfigurationKeys.REPLICATION_COUNT, String.valueOf(clusterCount));
props.setProperty(KafkaWriterConfigurationKeys.PARTITION_COUNT, String.valueOf(partionCount));
props.setProperty(KafkaWriterConfigurationKeys.CLUSTER_ZOOKEEPER, "localhost:" + zkPort);
// Setting Producer Properties
props.setProperty(KafkaWriterConfigurationKeys.KAFKA_PRODUCER_CONFIG_PREFIX + "bootstrap.servers", _kafkaTestHelper.getBootServersList());
props.setProperty(KafkaWriterConfigurationKeys.KAFKA_PRODUCER_CONFIG_PREFIX + "value.serializer", "org.apache.kafka.common.serialization.StringSerializer");
Kafka1DataWriter<String, String> kafka1DataWriter = new Kafka1DataWriter<>(props);
String zookeeperConnect = "localhost:" + _kafkaTestHelper.getZookeeperPort();
int sessionTimeoutMs = 10 * 1000;
int connectionTimeoutMs = 8 * 1000;
// Note: You must initialize the ZkClient with ZKStringSerializer. If you don't, then
// createTopic() will only seem to work (it will return without error). The topic will exist in
// only ZooKeeper and will be returned when listing topics, but Kafka itself does not create the
// topic.
ZkClient zkClient = new ZkClient(
zookeeperConnect,
sessionTimeoutMs,
connectionTimeoutMs,
ZKStringSerializer$.MODULE$);
boolean isSecureKafkaCluster = false;
ZkUtils zkUtils = new ZkUtils(zkClient, new ZkConnection(zookeeperConnect), isSecureKafkaCluster);
Integer partitionCount = (Integer) zkUtils.getTopicPartitionCount(topic).get();
Assert.assertEquals(partitionCount.intValue(), partionCount);
}
@Test(enabled = false)
public void testLiveTopicPartitionCreationCount()
throws IOException, InterruptedException, ExecutionException {
String liveClusterCount = System.getProperty("live.cluster.count");
String liveZookeeper = System.getProperty("live.zookeeper");
String liveBroker = System.getProperty("live.broker");
String topic = System.getProperty("live.newtopic");
String topicReplicationCount = System.getProperty("live.newtopic.replicationCount");
String topicPartitionCount = System.getProperty("live.newtopic.partitionCount");
if (StringUtils.isEmpty(liveClusterCount)) {
Assert.assertTrue(true);
return;
}
if (StringUtils.isEmpty(topicPartitionCount)) {
int clusterCount = Integer.parseInt(liveClusterCount);
clusterCount--;
int partionCount = clusterCount / 2;
topicReplicationCount = String.valueOf(clusterCount);
topicPartitionCount = String.valueOf(partionCount);
}
Properties props = new Properties();
// Setting Topic Properties
props.setProperty(KafkaWriterConfigurationKeys.KAFKA_TOPIC, topic);
props.setProperty(KafkaWriterConfigurationKeys.REPLICATION_COUNT, topicReplicationCount);
props.setProperty(KafkaWriterConfigurationKeys.PARTITION_COUNT, topicPartitionCount);
props.setProperty(KafkaWriterConfigurationKeys.CLUSTER_ZOOKEEPER, liveZookeeper);
// Setting Producer Properties
props.setProperty(KafkaWriterConfigurationKeys.KAFKA_PRODUCER_CONFIG_PREFIX + "bootstrap.servers", liveBroker);
props.setProperty(KafkaWriterConfigurationKeys.KAFKA_PRODUCER_CONFIG_PREFIX + "value.serializer", "org.apache.kafka.common.serialization.StringSerializer");
Kafka1DataWriter<String, String> kafka1DataWriter = new Kafka1DataWriter<>(props);
int sessionTimeoutMs = 10 * 1000;
int connectionTimeoutMs = 8 * 1000;
// Note: You must initialize the ZkClient with ZKStringSerializer. If you don't, then
// createTopic() will only seem to work (it will return without error). The topic will exist in
// only ZooKeeper and will be returned when listing topics, but Kafka itself does not create the
// topic.
ZkClient zkClient = new ZkClient(
liveZookeeper,
sessionTimeoutMs,
connectionTimeoutMs,
ZKStringSerializer$.MODULE$);
boolean isSecureKafkaCluster = false;
ZkUtils zkUtils = new ZkUtils(zkClient, new ZkConnection(liveZookeeper), isSecureKafkaCluster);
Properties config = new Properties();
config.put(AdminClientConfig.BOOTSTRAP_SERVERS_CONFIG, _kafkaTestHelper.getBootServersList());
AdminClient adminClient = AdminClient.create(config);
DescribeTopicsResult describer = adminClient.describeTopics(Collections.singletonList(topic));
// Note: AdminUtils.fetchTopicMetadataFromZk is deprecated after 0.10.0. Please consider using AdminClient
// to fetch topic config, or using ZKUtils.
Assert.assertEquals(describer.values().get(topic).get().partitions().size(), Integer.parseInt(topicPartitionCount));
}
}
| 3,133 |
0 | Create_ds/gobblin/gobblin-modules/gobblin-kafka-1/src/test/java/org/apache/gobblin/kafka | Create_ds/gobblin/gobblin-modules/gobblin-kafka-1/src/test/java/org/apache/gobblin/kafka/writer/Kafka1DataWriterTest.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.kafka.writer;
import java.io.IOException;
import java.lang.management.ManagementFactory;
import java.util.Properties;
import java.util.concurrent.ExecutionException;
import java.util.concurrent.Future;
import org.apache.avro.generic.GenericRecord;
import org.testng.Assert;
import org.testng.annotations.AfterSuite;
import org.testng.annotations.BeforeSuite;
import org.testng.annotations.Test;
import kafka.message.MessageAndMetadata;
import lombok.extern.slf4j.Slf4j;
import org.apache.gobblin.kafka.KafkaTestBase;
import org.apache.gobblin.kafka.schemareg.ConfigDrivenMd5SchemaRegistry;
import org.apache.gobblin.kafka.schemareg.KafkaSchemaRegistryConfigurationKeys;
import org.apache.gobblin.kafka.schemareg.SchemaRegistryException;
import org.apache.gobblin.kafka.serialize.LiAvroDeserializer;
import org.apache.gobblin.kafka.serialize.LiAvroSerializer;
import org.apache.gobblin.kafka.serialize.SerializationException;
import org.apache.gobblin.test.TestUtils;
import org.apache.gobblin.writer.WriteCallback;
import org.apache.gobblin.writer.WriteResponse;
import static org.mockito.Mockito.*;
@Slf4j
@Test( groups = {"disabledOnCI"} )
public class Kafka1DataWriterTest {
private final KafkaTestBase _kafkaTestHelper;
public Kafka1DataWriterTest()
throws InterruptedException, RuntimeException {
_kafkaTestHelper = new KafkaTestBase();
}
@BeforeSuite(alwaysRun = true)
public void beforeSuite() {
log.warn("Process id = " + ManagementFactory.getRuntimeMXBean().getName());
_kafkaTestHelper.startServers();
}
@AfterSuite(alwaysRun = true)
public void afterSuite()
throws IOException {
try {
_kafkaTestHelper.stopClients();
} finally {
_kafkaTestHelper.stopServers();
}
}
@Test
public void testStringSerialization()
throws IOException, InterruptedException, ExecutionException {
String topic = "testStringSerialization1";
_kafkaTestHelper.provisionTopic(topic);
Properties props = new Properties();
props.setProperty(KafkaWriterConfigurationKeys.KAFKA_TOPIC, topic);
props.setProperty(KafkaWriterConfigurationKeys.KAFKA_PRODUCER_CONFIG_PREFIX + "bootstrap.servers", "127.0.0.1:" + _kafkaTestHelper.getKafkaServerPort());
props.setProperty(KafkaWriterConfigurationKeys.KAFKA_PRODUCER_CONFIG_PREFIX + "value.serializer", "org.apache.kafka.common.serialization.StringSerializer");
Kafka1DataWriter<String, String> kafka1DataWriter = new Kafka1DataWriter<>(props);
String messageString = "foobar";
WriteCallback callback = mock(WriteCallback.class);
Future<WriteResponse> future;
try {
future = kafka1DataWriter.write(messageString, callback);
kafka1DataWriter.flush();
verify(callback, times(1)).onSuccess(isA(WriteResponse.class));
verify(callback, never()).onFailure(isA(Exception.class));
Assert.assertTrue(future.isDone(), "Future should be done");
System.out.println(future.get().getStringResponse());
byte[] message = _kafkaTestHelper.getIteratorForTopic(topic).next().message();
String messageReceived = new String(message);
Assert.assertEquals(messageReceived, messageString);
} finally {
kafka1DataWriter.close();
}
}
@Test
public void testBinarySerialization()
throws IOException {
String topic = "testBinarySerialization1";
_kafkaTestHelper.provisionTopic(topic);
Properties props = new Properties();
props.setProperty(KafkaWriterConfigurationKeys.KAFKA_TOPIC, topic);
props.setProperty(KafkaWriterConfigurationKeys.KAFKA_PRODUCER_CONFIG_PREFIX + "bootstrap.servers", "127.0.0.1:" + _kafkaTestHelper.getKafkaServerPort());
props.setProperty(KafkaWriterConfigurationKeys.KAFKA_PRODUCER_CONFIG_PREFIX + "value.serializer", "org.apache.kafka.common.serialization.ByteArraySerializer");
Kafka1DataWriter<String, byte[]> kafka1DataWriter = new Kafka1DataWriter<>(props);
WriteCallback callback = mock(WriteCallback.class);
byte[] messageBytes = TestUtils.generateRandomBytes();
try {
kafka1DataWriter.write(messageBytes, callback);
} finally {
kafka1DataWriter.close();
}
verify(callback, times(1)).onSuccess(isA(WriteResponse.class));
verify(callback, never()).onFailure(isA(Exception.class));
byte[] message = _kafkaTestHelper.getIteratorForTopic(topic).next().message();
Assert.assertEquals(message, messageBytes);
}
@Test
public void testAvroSerialization()
throws IOException, SchemaRegistryException {
String topic = "testAvroSerialization1";
_kafkaTestHelper.provisionTopic(topic);
Properties props = new Properties();
props.setProperty(KafkaWriterConfigurationKeys.KAFKA_TOPIC, topic);
props.setProperty(KafkaWriterConfigurationKeys.KAFKA_PRODUCER_CONFIG_PREFIX + "bootstrap.servers",
"127.0.0.1:" + _kafkaTestHelper.getKafkaServerPort());
props.setProperty(KafkaWriterConfigurationKeys.KAFKA_PRODUCER_CONFIG_PREFIX + "value.serializer",
LiAvroSerializer.class.getName());
// set up mock schema registry
props.setProperty(KafkaWriterConfigurationKeys.KAFKA_PRODUCER_CONFIG_PREFIX
+ KafkaSchemaRegistryConfigurationKeys.KAFKA_SCHEMA_REGISTRY_CLASS,
ConfigDrivenMd5SchemaRegistry.class.getCanonicalName());
Kafka1DataWriter<String, GenericRecord> kafka1DataWriter = new Kafka1DataWriter<>(props);
WriteCallback callback = mock(WriteCallback.class);
GenericRecord record = TestUtils.generateRandomAvroRecord();
try {
kafka1DataWriter.write(record, callback);
} finally {
kafka1DataWriter.close();
}
log.info("Kafka events written");
verify(callback, times(1)).onSuccess(isA(WriteResponse.class));
verify(callback, never()).onFailure(isA(Exception.class));
byte[] message = _kafkaTestHelper.getIteratorForTopic(topic).next().message();
log.info("Kafka events read, start to check result... ");
ConfigDrivenMd5SchemaRegistry schemaReg = new ConfigDrivenMd5SchemaRegistry(topic, record.getSchema());
LiAvroDeserializer deser = new LiAvroDeserializer(schemaReg);
GenericRecord receivedRecord = deser.deserialize(topic, message);
Assert.assertEquals(record.toString(), receivedRecord.toString());
}
@Test
public void testKeyedAvroSerialization()
throws IOException, SchemaRegistryException, SerializationException {
String topic = "testAvroSerialization1";
_kafkaTestHelper.provisionTopic(topic);
Properties props = new Properties();
props.setProperty(KafkaWriterConfigurationKeys.KAFKA_TOPIC, topic);
props.setProperty(KafkaWriterConfigurationKeys.KAFKA_PRODUCER_CONFIG_PREFIX + "bootstrap.servers",
"127.0.0.1:" + _kafkaTestHelper.getKafkaServerPort());
props.setProperty(KafkaWriterConfigurationKeys.KAFKA_PRODUCER_CONFIG_PREFIX + "value.serializer",
LiAvroSerializer.class.getName());
props.setProperty(KafkaWriterConfigurationKeys.WRITER_KAFKA_KEYED_CONFIG, "true");
String keyField = "field1";
props.setProperty(KafkaWriterConfigurationKeys.WRITER_KAFKA_KEYFIELD_CONFIG, keyField);
// set up mock schema registry
props.setProperty(KafkaWriterConfigurationKeys.KAFKA_PRODUCER_CONFIG_PREFIX
+ KafkaSchemaRegistryConfigurationKeys.KAFKA_SCHEMA_REGISTRY_CLASS,
ConfigDrivenMd5SchemaRegistry.class.getCanonicalName());
Kafka1DataWriter<String, GenericRecord> kafka1DataWriter = new Kafka1DataWriter<>(props);
WriteCallback callback = mock(WriteCallback.class);
GenericRecord record = TestUtils.generateRandomAvroRecord();
try {
kafka1DataWriter.write(record, callback);
} finally {
kafka1DataWriter.close();
}
verify(callback, times(1)).onSuccess(isA(WriteResponse.class));
verify(callback, never()).onFailure(isA(Exception.class));
MessageAndMetadata<byte[], byte[]> value = _kafkaTestHelper.getIteratorForTopic(topic).next();
byte[] key = value.key();
byte[] message = value.message();
ConfigDrivenMd5SchemaRegistry schemaReg = new ConfigDrivenMd5SchemaRegistry(topic, record.getSchema());
LiAvroDeserializer deser = new LiAvroDeserializer(schemaReg);
GenericRecord receivedRecord = deser.deserialize(topic, message);
Assert.assertEquals(record.toString(), receivedRecord.toString());
Assert.assertEquals(new String(key), record.get(keyField));
}
@Test
public void testValueSerialization()
throws IOException, InterruptedException, SchemaRegistryException {
String topic = "testAvroSerialization1";
_kafkaTestHelper.provisionTopic(topic);
Properties props = new Properties();
props.setProperty(KafkaWriterConfigurationKeys.KAFKA_TOPIC, topic);
props.setProperty(KafkaWriterConfigurationKeys.KAFKA_PRODUCER_CONFIG_PREFIX + "bootstrap.servers",
"127.0.0.1:" + _kafkaTestHelper.getKafkaServerPort());
props.setProperty(KafkaWriterConfigurationKeys.KAFKA_PRODUCER_CONFIG_PREFIX + "value.serializer",
"org.apache.kafka.common.serialization.StringSerializer");
props.setProperty(KafkaWriterConfigurationKeys.WRITER_KAFKA_KEYED_CONFIG, "true");
String keyField = "field1";
props.setProperty(KafkaWriterConfigurationKeys.WRITER_KAFKA_KEYFIELD_CONFIG, keyField);
props.setProperty(KafkaWriterConfigurationKeys.WRITER_KAFKA_VALUEFIELD_CONFIG, keyField);
// set up mock schema registry
props.setProperty(KafkaWriterConfigurationKeys.KAFKA_PRODUCER_CONFIG_PREFIX
+ KafkaSchemaRegistryConfigurationKeys.KAFKA_SCHEMA_REGISTRY_CLASS,
ConfigDrivenMd5SchemaRegistry.class.getCanonicalName());
Kafka1DataWriter<String, GenericRecord> kafka1DataWriter = new Kafka1DataWriter<>(props);
WriteCallback callback = mock(WriteCallback.class);
GenericRecord record = TestUtils.generateRandomAvroRecord();
try {
kafka1DataWriter.write(record, callback);
} finally {
kafka1DataWriter.close();
}
verify(callback, times(1)).onSuccess(isA(WriteResponse.class));
verify(callback, never()).onFailure(isA(Exception.class));
MessageAndMetadata<byte[], byte[]> value = _kafkaTestHelper.getIteratorForTopic(topic).next();
byte[] key = value.key();
byte[] message = value.message();
Assert.assertEquals(new String(message), record.get(keyField));
Assert.assertEquals(new String(key), record.get(keyField));
}
}
| 3,134 |
0 | Create_ds/gobblin/gobblin-modules/gobblin-kafka-1/src/test/java/org/apache/gobblin/kafka | Create_ds/gobblin/gobblin-modules/gobblin-kafka-1/src/test/java/org/apache/gobblin/kafka/writer/ByPassWatcher.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.kafka.writer;
import org.apache.zookeeper.WatchedEvent;
import org.apache.zookeeper.Watcher;
public class ByPassWatcher implements Watcher {
@Override
public void process(WatchedEvent event) {
// TODO Auto-generated method stub
}
}
| 3,135 |
0 | Create_ds/gobblin/gobblin-modules/gobblin-kafka-1/src/test/java/org/apache/gobblin/kafka | Create_ds/gobblin/gobblin-modules/gobblin-kafka-1/src/test/java/org/apache/gobblin/kafka/client/Kafka1ConsumerClientTest.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.kafka.client;
import com.google.common.collect.ImmutableMap;
import com.google.common.collect.ImmutableSet;
import com.google.common.collect.Sets;
import com.typesafe.config.Config;
import com.typesafe.config.ConfigFactory;
import org.apache.gobblin.configuration.ConfigurationKeys;
import org.apache.gobblin.source.extractor.extract.kafka.KafkaPartition;
import org.apache.kafka.clients.consumer.ConsumerRecord;
import org.apache.kafka.clients.consumer.MockConsumer;
import org.apache.kafka.clients.consumer.OffsetResetStrategy;
import org.apache.kafka.common.TopicPartition;
import org.apache.kafka.common.record.TimestampType;
import org.testng.Assert;
import org.testng.annotations.Test;
import java.util.Arrays;
import java.util.HashMap;
import java.util.Set;
public class Kafka1ConsumerClientTest {
@Test
public void testConsume() throws Exception {
Config testConfig = ConfigFactory.parseMap(ImmutableMap.of(ConfigurationKeys.KAFKA_BROKERS, "test"));
MockConsumer<String, String> consumer = new MockConsumer<String, String>(OffsetResetStrategy.NONE);
consumer.assign(Arrays.asList(new TopicPartition("test_topic", 0)));
HashMap<TopicPartition, Long> beginningOffsets = new HashMap<>();
beginningOffsets.put(new TopicPartition("test_topic", 0), 0L);
consumer.updateBeginningOffsets(beginningOffsets);
ConsumerRecord<String, String> record0 = new ConsumerRecord<>("test_topic", 0, 0L, 10L, TimestampType.CREATE_TIME, 0L, 3, 6, "key", "value0");
ConsumerRecord<String, String> record1 = new ConsumerRecord<>("test_topic", 0, 1L, 11L, TimestampType.LOG_APPEND_TIME, 1L, 3, 6, "key", "value1");
ConsumerRecord<String, String> record2 = new ConsumerRecord<>("test_topic", 0, 2L, 12L, TimestampType.LOG_APPEND_TIME, 2L, 3, 6, "key", "value2");
consumer.addRecord(record0);
consumer.addRecord(record1);
consumer.addRecord(record2);
try (Kafka1ConsumerClient<String, String> kafka1Client = new Kafka1ConsumerClient<>(testConfig, consumer);) {
// Consume from 0 offset
Set<KafkaConsumerRecord> consumedRecords =
Sets.newHashSet(kafka1Client.consume(new KafkaPartition.Builder().withId(0).withTopicName("test_topic")
.build(), 0l, 100l));
Set<Kafka1ConsumerClient.Kafka1ConsumerRecord<String, String>> expected =
ImmutableSet.of(new Kafka1ConsumerClient.Kafka1ConsumerRecord<>(record0),
new Kafka1ConsumerClient.Kafka1ConsumerRecord<>(record1), new Kafka1ConsumerClient.Kafka1ConsumerRecord<>(record2));
Assert.assertEquals(consumedRecords, expected);
Kafka1ConsumerClient.Kafka1ConsumerRecord expected0 = expected.iterator().next();
Assert.assertEquals(record0.timestamp(), expected0.getTimestamp());
Assert.assertEquals(record0.timestampType() == TimestampType.LOG_APPEND_TIME, expected0.isTimestampLogAppend());
Assert.assertEquals(record0.timestampType(), expected0.getTimestampType());
}
}
}
| 3,136 |
0 | Create_ds/gobblin/gobblin-modules/gobblin-kafka-1/src/main/java/org/apache/gobblin/metrics | Create_ds/gobblin/gobblin-modules/gobblin-kafka-1/src/main/java/org/apache/gobblin/metrics/kafka/KafkaKeyValueProducerPusher.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.metrics.kafka;
import com.google.common.base.Optional;
import com.google.common.io.Closer;
import com.typesafe.config.Config;
import lombok.extern.slf4j.Slf4j;
import org.apache.commons.lang3.tuple.Pair;
import org.apache.gobblin.util.ConfigUtils;
import org.apache.kafka.clients.producer.KafkaProducer;
import org.apache.kafka.clients.producer.ProducerConfig;
import org.apache.kafka.clients.producer.ProducerRecord;
import org.apache.kafka.common.serialization.ByteArraySerializer;
import org.apache.kafka.common.serialization.StringSerializer;
import java.io.IOException;
import java.util.List;
import java.util.Properties;
/**
* Establishes a connection to a Kafka cluster and push keyed messages to a specified topic.
*
* @param <K> key type
* @param <V> value type
*/
@Slf4j
public class KafkaKeyValueProducerPusher<K, V> implements Pusher<Pair<K, V>> {
private final String topic;
private final KafkaProducer<K, V> producer;
private final Closer closer;
public KafkaKeyValueProducerPusher(String brokers, String topic, Optional<Config> kafkaConfig) {
this.closer = Closer.create();
this.topic = topic;
Properties props = new Properties();
props.put(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, brokers);
props.put(ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG, StringSerializer.class.getName());
props.put(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG, ByteArraySerializer.class.getName());
props.put(ProducerConfig.ACKS_CONFIG, "all");
props.put(ProducerConfig.RETRIES_CONFIG, 3);
//To guarantee ordered delivery, the maximum in flight requests must be set to 1.
props.put(ProducerConfig.MAX_IN_FLIGHT_REQUESTS_PER_CONNECTION, 1);
// add the kafka scoped config. if any of the above are specified then they are overridden
if (kafkaConfig.isPresent()) {
props.putAll(ConfigUtils.configToProperties(kafkaConfig.get()));
}
this.producer = createProducer(props);
}
public KafkaKeyValueProducerPusher(String brokers, String topic) {
this(brokers, topic, Optional.absent());
}
/**
* Push all keyed messages to the Kafka topic.
*
* @param messages List of keyed messages to push to Kakfa.
*/
public void pushMessages(List<Pair<K, V>> messages) {
for (Pair<K, V> message : messages) {
this.producer.send(new ProducerRecord<>(topic, message.getKey(), message.getValue()), (recordMetadata, e) -> {
if (e != null) {
log.error("Failed to send message to topic {} due to exception: ", topic, e);
}
});
}
}
@Override
public void close()
throws IOException {
//Call flush() before invoking close() to ensure any buffered messages are immediately sent. This is required
//since close() only guarantees delivery of in-flight messages.
log.info("Flushing records from producer buffer");
this.producer.flush();
this.closer.close();
}
/**
* Create the Kafka producer.
*/
protected KafkaProducer<K, V> createProducer(Properties props) {
return this.closer.register(new KafkaProducer<K, V>(props));
}
}
| 3,137 |
0 | Create_ds/gobblin/gobblin-modules/gobblin-kafka-1/src/main/java/org/apache/gobblin/metrics | Create_ds/gobblin/gobblin-modules/gobblin-kafka-1/src/main/java/org/apache/gobblin/metrics/kafka/KafkaProducerPusher.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.metrics.kafka;
import com.google.common.base.Optional;
import com.google.common.io.Closer;
import com.typesafe.config.Config;
import lombok.extern.slf4j.Slf4j;
import org.apache.gobblin.util.ConfigUtils;
import org.apache.kafka.clients.producer.KafkaProducer;
import org.apache.kafka.clients.producer.ProducerConfig;
import org.apache.kafka.clients.producer.ProducerRecord;
import org.apache.kafka.common.serialization.ByteArraySerializer;
import org.apache.kafka.common.serialization.StringSerializer;
import java.io.IOException;
import java.util.List;
import java.util.Properties;
/**
* Establish a connection to a Kafka cluster and push byte messages to a specified topic.
*/
@Slf4j
public class KafkaProducerPusher implements Pusher<byte[]> {
private final String topic;
private final KafkaProducer<String, byte[]> producer;
private final Closer closer;
public KafkaProducerPusher(String brokers, String topic, Optional<Config> kafkaConfig) {
this.closer = Closer.create();
this.topic = topic;
Properties props = new Properties();
props.put(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, brokers);
props.put(ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG, StringSerializer.class.getName());
props.put(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG, ByteArraySerializer.class.getName());
props.put(ProducerConfig.ACKS_CONFIG, "all");
props.put(ProducerConfig.RETRIES_CONFIG, 3);
// add the kafka scoped config. if any of the above are specified then they are overridden
if (kafkaConfig.isPresent()) {
props.putAll(ConfigUtils.configToProperties(kafkaConfig.get()));
}
this.producer = createProducer(props);
}
public KafkaProducerPusher(String brokers, String topic) {
this(brokers, topic, Optional.absent());
}
/**
* Push all byte array messages to the Kafka topic.
*
* @param messages List of byte array messages to push to Kakfa.
*/
public void pushMessages(List<byte[]> messages) {
for (byte[] message : messages) {
producer.send(new ProducerRecord<>(topic, message), (recordMetadata, e) -> {
if (e != null) {
log.error("Failed to send message to topic {} due to exception: ", topic, e);
}
});
}
}
@Override
public void close()
throws IOException {
//Call flush() before invoking close() to ensure any buffered messages are immediately sent. This is required
//since close() only guarantees delivery of in-flight messages.
log.info("Flushing records from producer buffer");
this.producer.flush();
this.closer.close();
}
/**
* Create the Kafka producer.
*/
protected KafkaProducer<String, byte[]> createProducer(Properties props) {
return this.closer.register(new KafkaProducer<String, byte[]>(props));
}
}
| 3,138 |
0 | Create_ds/gobblin/gobblin-modules/gobblin-kafka-1/src/main/java/org/apache/gobblin/kafka | Create_ds/gobblin/gobblin-modules/gobblin-kafka-1/src/main/java/org/apache/gobblin/kafka/serialize/LiAvroSerializer.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.kafka.serialize;
import org.apache.avro.generic.GenericRecord;
import org.apache.kafka.common.errors.SerializationException;
import org.apache.kafka.common.serialization.Serializer;
/**
* LinkedIn's implementation of Avro-schema based serialization for Kafka
* TODO: Implement this for IndexedRecord not just GenericRecord
*/
public class LiAvroSerializer extends LiAvroSerializerBase implements Serializer<GenericRecord> {
@Override
public byte[] serialize(String topic, GenericRecord data) {
try {
return super.serialize(topic, data);
} catch (org.apache.gobblin.kafka.serialize.SerializationException e) {
throw new SerializationException(e);
}
}
}
| 3,139 |
0 | Create_ds/gobblin/gobblin-modules/gobblin-kafka-1/src/main/java/org/apache/gobblin/kafka | Create_ds/gobblin/gobblin-modules/gobblin-kafka-1/src/main/java/org/apache/gobblin/kafka/serialize/LiAvroDeserializer.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.kafka.serialize;
import lombok.extern.slf4j.Slf4j;
import org.apache.avro.Schema;
import org.apache.avro.generic.GenericRecord;
import org.apache.gobblin.kafka.schemareg.KafkaSchemaRegistry;
import org.apache.kafka.common.errors.SerializationException;
import org.apache.kafka.common.serialization.Deserializer;
/**
* The LinkedIn Avro Deserializer (works with records serialized by the {@link LiAvroSerializer})
*/
@Slf4j
public class LiAvroDeserializer extends LiAvroDeserializerBase implements Deserializer<GenericRecord> {
public LiAvroDeserializer(KafkaSchemaRegistry<MD5Digest, Schema> schemaRegistry) {
super(schemaRegistry);
}
/**
* @param topic topic associated with the data
* @param data serialized bytes
* @return deserialized object
*/
@Override
public GenericRecord deserialize(String topic, byte[] data) {
try {
return super.deserialize(topic, data);
} catch (org.apache.gobblin.kafka.serialize.SerializationException e) {
throw new SerializationException("Error during Deserialization", e);
}
}
}
| 3,140 |
0 | Create_ds/gobblin/gobblin-modules/gobblin-kafka-1/src/main/java/org/apache/gobblin/kafka | Create_ds/gobblin/gobblin-modules/gobblin-kafka-1/src/main/java/org/apache/gobblin/kafka/writer/Kafka1JsonObjectWriterBuilder.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.kafka.writer;
import com.google.gson.JsonArray;
import com.google.gson.JsonObject;
import org.apache.gobblin.configuration.ConfigurationException;
import org.apache.gobblin.kafka.serialize.GsonSerializerBase;
import org.apache.gobblin.writer.AsyncDataWriter;
import org.apache.kafka.common.serialization.Serializer;
import java.util.Properties;
/**
* A {@link org.apache.gobblin.writer.DataWriterBuilder} that builds a {@link org.apache.gobblin.writer.DataWriter} to
* write {@link JsonObject} to kafka
*/
public class Kafka1JsonObjectWriterBuilder extends AbstractKafkaDataWriterBuilder<JsonArray, JsonObject> {
private static final String VALUE_SERIALIZER_KEY =
KafkaWriterConfigurationKeys.KAFKA_PRODUCER_CONFIG_PREFIX + KafkaWriterConfigurationKeys.VALUE_SERIALIZER_CONFIG;
@Override
protected AsyncDataWriter<JsonObject> getAsyncDataWriter(Properties props)
throws ConfigurationException {
props.setProperty(VALUE_SERIALIZER_KEY, KafkaGsonObjectSerializer.class.getName());
return new Kafka1DataWriter<>(props);
}
/**
* A specific {@link Serializer} that serializes {@link JsonObject} to byte array
*/
public final static class KafkaGsonObjectSerializer extends GsonSerializerBase<JsonObject> implements Serializer<JsonObject> {
}
}
| 3,141 |
0 | Create_ds/gobblin/gobblin-modules/gobblin-kafka-1/src/main/java/org/apache/gobblin/kafka | Create_ds/gobblin/gobblin-modules/gobblin-kafka-1/src/main/java/org/apache/gobblin/kafka/writer/Kafka1DataWriter.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.kafka.writer;
import com.google.common.base.Throwables;
import com.typesafe.config.Config;
import com.typesafe.config.ConfigFactory;
import kafka.admin.AdminUtils;
import kafka.admin.RackAwareMode;
import kafka.utils.ZKStringSerializer$;
import kafka.utils.ZkUtils;
import lombok.extern.slf4j.Slf4j;
import org.I0Itec.zkclient.ZkClient;
import org.I0Itec.zkclient.ZkConnection;
import org.apache.commons.lang3.tuple.Pair;
import org.apache.gobblin.configuration.ConfigurationException;
import org.apache.gobblin.util.ConfigUtils;
import org.apache.gobblin.writer.*;
import org.apache.kafka.clients.producer.RecordMetadata;
import org.apache.kafka.clients.producer.*;
import java.io.IOException;
import java.util.Properties;
import java.util.concurrent.Future;
/**
* Implementation of KafkaWriter that wraps a {@link KafkaProducer}.
* This provides at-least once semantics.
* Applications should expect data to be possibly written to Kafka even if the overall Gobblin job fails.
*/
@Slf4j
public class Kafka1DataWriter<K, V> implements KafkaDataWriter<K, V> {
public static final WriteResponseMapper<RecordMetadata> WRITE_RESPONSE_WRAPPER =
new WriteResponseMapper<RecordMetadata>() {
@Override
public WriteResponse wrap(final RecordMetadata recordMetadata) {
return new WriteResponse<RecordMetadata>() {
@Override
public RecordMetadata getRawResponse() {
return recordMetadata;
}
@Override
public String getStringResponse() {
return recordMetadata.toString();
}
@Override
public long bytesWritten() {
return -1;
}
};
}
};
private final Producer<K, V> producer;
private final String topic;
private final KafkaWriterCommonConfig commonConfig;
public static Producer getKafkaProducer(Properties props) {
Object producerObject = KafkaWriterHelper.getKafkaProducer(props);
try {
Producer producer = (Producer) producerObject;
return producer;
} catch (ClassCastException e) {
log.error("Failed to instantiate Kafka producer " + producerObject.getClass().getName()
+ " as instance of Producer.class", e);
throw Throwables.propagate(e);
}
}
public Kafka1DataWriter(Properties props)
throws ConfigurationException {
this(getKafkaProducer(props), ConfigFactory.parseProperties(props));
}
public Kafka1DataWriter(Producer producer, Config config)
throws ConfigurationException {
this.topic = config.getString(KafkaWriterConfigurationKeys.KAFKA_TOPIC);
provisionTopic(topic, config);
this.producer = producer;
this.commonConfig = new KafkaWriterCommonConfig(config);
}
@Override
public void close()
throws IOException {
log.debug("Close called");
this.producer.close();
}
@Override
public Future<WriteResponse> write(final V record, final WriteCallback callback) {
try {
Pair<K, V> keyValuePair = KafkaWriterHelper.getKeyValuePair(record, this.commonConfig);
return write(keyValuePair, callback);
} catch (Exception e) {
throw new RuntimeException("Failed to create a Kafka write request", e);
}
}
public Future<WriteResponse> write(Pair<K, V> keyValuePair, final WriteCallback callback) {
try {
return new WriteResponseFuture<>(this.producer
.send(new ProducerRecord<>(topic, keyValuePair.getKey(), keyValuePair.getValue()), new Callback() {
@Override
public void onCompletion(final RecordMetadata metadata, Exception exception) {
if (exception != null) {
callback.onFailure(exception);
} else {
callback.onSuccess(WRITE_RESPONSE_WRAPPER.wrap(metadata));
}
}
}), WRITE_RESPONSE_WRAPPER);
} catch (Exception e) {
throw new RuntimeException("Failed to create a Kafka write request", e);
}
}
@Override
public void flush()
throws IOException {
this.producer.flush();
}
private void provisionTopic(String topicName, Config config) {
String zooKeeperPropKey = KafkaWriterConfigurationKeys.CLUSTER_ZOOKEEPER;
if (!config.hasPath(zooKeeperPropKey)) {
log.debug("Topic " + topicName + " is configured without the partition and replication");
return;
}
String zookeeperConnect = config.getString(zooKeeperPropKey);
int sessionTimeoutMs = ConfigUtils.getInt(config,
KafkaWriterConfigurationKeys.ZOOKEEPER_SESSION_TIMEOUT,
KafkaWriterConfigurationKeys.ZOOKEEPER_SESSION_TIMEOUT_DEFAULT);
int connectionTimeoutMs = ConfigUtils.getInt(config,
KafkaWriterConfigurationKeys.ZOOKEEPER_CONNECTION_TIMEOUT,
KafkaWriterConfigurationKeys.ZOOKEEPER_CONNECTION_TIMEOUT_DEFAULT);
// Note: You must initialize the ZkClient with ZKStringSerializer. If you don't, then
// createTopic() will only seem to work (it will return without error). The topic will exist in
// only ZooKeeper and will be returned when listing topics, but Kafka itself does not create the
// topic.
ZkClient zkClient =
new ZkClient(zookeeperConnect, sessionTimeoutMs, connectionTimeoutMs, ZKStringSerializer$.MODULE$);
// Security for Kafka was added in Kafka 0.9.0.0
ZkUtils zkUtils = new ZkUtils(zkClient, new ZkConnection(zookeeperConnect), false);
int partitions = ConfigUtils.getInt(config,
KafkaWriterConfigurationKeys.PARTITION_COUNT,
KafkaWriterConfigurationKeys.PARTITION_COUNT_DEFAULT);
int replication = ConfigUtils.getInt(config,
KafkaWriterConfigurationKeys.REPLICATION_COUNT,
KafkaWriterConfigurationKeys.PARTITION_COUNT_DEFAULT);
Properties topicConfig = new Properties();
if (AdminUtils.topicExists(zkUtils, topicName)) {
log.debug("Topic {} already exists with replication: {} and partitions: {}", topicName, replication, partitions);
boolean deleteTopicIfExists = ConfigUtils.getBoolean(config, KafkaWriterConfigurationKeys.DELETE_TOPIC_IF_EXISTS,
KafkaWriterConfigurationKeys.DEFAULT_DELETE_TOPIC_IF_EXISTS);
if (!deleteTopicIfExists) {
return;
} else {
log.debug("Deleting topic {}", topicName);
AdminUtils.deleteTopic(zkUtils, topicName);
}
}
AdminUtils.createTopic(zkUtils, topicName, partitions, replication, topicConfig, RackAwareMode.Disabled$.MODULE$);
log.info("Created topic {} with replication: {} and partitions : {}", topicName, replication, partitions);
}
}
| 3,142 |
0 | Create_ds/gobblin/gobblin-modules/gobblin-kafka-1/src/main/java/org/apache/gobblin/kafka | Create_ds/gobblin/gobblin-modules/gobblin-kafka-1/src/main/java/org/apache/gobblin/kafka/writer/KafkaDataWriterBuilder.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.kafka.writer;
import java.util.Properties;
import org.apache.gobblin.configuration.ConfigurationException;
import org.apache.gobblin.writer.AsyncDataWriter;
/**
* Builder that hands back a {@link Kafka1DataWriter}
*/
public class KafkaDataWriterBuilder<S, D> extends AbstractKafkaDataWriterBuilder<S, D> {
@Override
protected AsyncDataWriter<D> getAsyncDataWriter(Properties props)
throws ConfigurationException {
return new Kafka1DataWriter<>(props);
}
}
| 3,143 |
0 | Create_ds/gobblin/gobblin-modules/gobblin-kafka-1/src/main/java/org/apache/gobblin/kafka | Create_ds/gobblin/gobblin-modules/gobblin-kafka-1/src/main/java/org/apache/gobblin/kafka/client/Kafka1ConsumerClient.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.kafka.client;
import com.codahale.metrics.Gauge;
import com.codahale.metrics.Metric;
import com.google.common.base.Function;
import com.google.common.base.Joiner;
import com.google.common.base.Preconditions;
import com.google.common.base.Throwables;
import com.google.common.collect.FluentIterable;
import com.google.common.collect.ImmutableMap;
import com.google.common.collect.Iterators;
import com.google.common.collect.Lists;
import com.typesafe.config.Config;
import com.typesafe.config.ConfigFactory;
import lombok.EqualsAndHashCode;
import lombok.ToString;
import lombok.extern.slf4j.Slf4j;
import org.apache.gobblin.configuration.ConfigurationKeys;
import org.apache.gobblin.source.extractor.extract.kafka.KafkaOffsetRetrievalFailureException;
import org.apache.gobblin.source.extractor.extract.kafka.KafkaPartition;
import org.apache.gobblin.source.extractor.extract.kafka.KafkaTopic;
import org.apache.gobblin.util.ConfigUtils;
import org.apache.kafka.clients.consumer.*;
import org.apache.kafka.clients.consumer.internals.NoOpConsumerRebalanceListener;
import org.apache.kafka.common.MetricName;
import org.apache.kafka.common.PartitionInfo;
import org.apache.kafka.common.TopicPartition;
import org.apache.kafka.common.metrics.KafkaMetric;
import org.apache.kafka.common.record.TimestampType;
import javax.annotation.Nonnull;
import java.io.IOException;
import java.util.*;
import java.util.Map.Entry;
import java.util.stream.Collectors;
/**
* A {@link GobblinKafkaConsumerClient} that uses kafka 1.1 consumer client. Use {@link Factory#create(Config)} to create
* new Kafka1.1ConsumerClients. The {@link Config} used to create clients must have required key {@value #GOBBLIN_CONFIG_VALUE_DESERIALIZER_CLASS_KEY}
*
* @param <K> Message key type
* @param <V> Message value type
*/
@Slf4j
public class Kafka1ConsumerClient<K, V> extends AbstractBaseKafkaConsumerClient {
private static final String CLIENT_BOOTSTRAP_SERVERS_KEY = "bootstrap.servers";
private static final String CLIENT_ENABLE_AUTO_COMMIT_KEY = "enable.auto.commit";
private static final String CLIENT_SESSION_TIMEOUT_KEY = "session.timeout.ms";
private static final String CLIENT_KEY_DESERIALIZER_CLASS_KEY = "key.deserializer";
private static final String CLIENT_VALUE_DESERIALIZER_CLASS_KEY = "value.deserializer";
private static final String CLIENT_GROUP_ID = "group.id";
private static final String DEFAULT_ENABLE_AUTO_COMMIT = Boolean.toString(false);
public static final String DEFAULT_KEY_DESERIALIZER =
"org.apache.kafka.common.serialization.StringDeserializer";
private static final String DEFAULT_GROUP_ID = "kafka1";
public static final String GOBBLIN_CONFIG_KEY_DESERIALIZER_CLASS_KEY = CONFIG_PREFIX
+ CLIENT_KEY_DESERIALIZER_CLASS_KEY;
public static final String GOBBLIN_CONFIG_VALUE_DESERIALIZER_CLASS_KEY = CONFIG_PREFIX
+ CLIENT_VALUE_DESERIALIZER_CLASS_KEY;
private static final Config FALLBACK =
ConfigFactory.parseMap(ImmutableMap.<String, Object>builder()
.put(CLIENT_ENABLE_AUTO_COMMIT_KEY, DEFAULT_ENABLE_AUTO_COMMIT)
.put(CLIENT_KEY_DESERIALIZER_CLASS_KEY, DEFAULT_KEY_DESERIALIZER)
.put(CLIENT_GROUP_ID, DEFAULT_GROUP_ID)
.build());
private final Consumer<K, V> consumer;
private Kafka1ConsumerClient(Config config) {
super(config);
Preconditions.checkArgument(config.hasPath(GOBBLIN_CONFIG_VALUE_DESERIALIZER_CLASS_KEY),
"Missing required property " + GOBBLIN_CONFIG_VALUE_DESERIALIZER_CLASS_KEY);
Properties props = new Properties();
props.put(CLIENT_BOOTSTRAP_SERVERS_KEY, Joiner.on(",").join(super.brokers));
props.put(CLIENT_SESSION_TIMEOUT_KEY, super.socketTimeoutMillis);
// grab all the config under "source.kafka" and add the defaults as fallback.
Config baseConfig = ConfigUtils.getConfigOrEmpty(config, CONFIG_NAMESPACE).withFallback(FALLBACK);
// get the "source.kafka.consumerConfig" config for extra config to pass along to Kafka with a fallback to the
// shared config that start with "gobblin.kafka.sharedConfig"
Config specificConfig = ConfigUtils.getConfigOrEmpty(baseConfig, CONSUMER_CONFIG).withFallback(
ConfigUtils.getConfigOrEmpty(config, ConfigurationKeys.SHARED_KAFKA_CONFIG_PREFIX));
// The specific config overrides settings in the base config
Config scopedConfig = specificConfig.withFallback(baseConfig.withoutPath(CONSUMER_CONFIG));
props.putAll(ConfigUtils.configToProperties(scopedConfig));
this.consumer = new KafkaConsumer<>(props);
}
public Kafka1ConsumerClient(Config config, Consumer<K, V> consumer) {
super(config);
this.consumer = consumer;
}
@Override
public List<KafkaTopic> getTopics() {
return FluentIterable.from(this.consumer.listTopics().entrySet())
.transform(new Function<Entry<String, List<PartitionInfo>>, KafkaTopic>() {
@Override
public KafkaTopic apply(Entry<String, List<PartitionInfo>> filteredTopicEntry) {
return new KafkaTopic(filteredTopicEntry.getKey(), Lists.transform(filteredTopicEntry.getValue(),
PARTITION_INFO_TO_KAFKA_PARTITION));
}
}).toList();
}
@Override
public long getEarliestOffset(KafkaPartition partition) {
TopicPartition topicPartition = new TopicPartition(partition.getTopicName(), partition.getId());
List<TopicPartition> topicPartitionList = Collections.singletonList(topicPartition);
this.consumer.assign(topicPartitionList);
this.consumer.seekToBeginning(topicPartitionList);
return this.consumer.position(topicPartition);
}
@Override
public long getLatestOffset(KafkaPartition partition) throws KafkaOffsetRetrievalFailureException {
TopicPartition topicPartition = new TopicPartition(partition.getTopicName(), partition.getId());
List<TopicPartition> topicPartitionList = Collections.singletonList(topicPartition);
this.consumer.assign(topicPartitionList);
this.consumer.seekToEnd(topicPartitionList);
return this.consumer.position(topicPartition);
}
@Override
public Iterator<KafkaConsumerRecord> consume(KafkaPartition partition, long nextOffset, long maxOffset) {
if (nextOffset > maxOffset) {
return null;
}
this.consumer.assign(Lists.newArrayList(new TopicPartition(partition.getTopicName(), partition.getId())));
this.consumer.seek(new TopicPartition(partition.getTopicName(), partition.getId()), nextOffset);
return consume();
}
@Override
public Iterator<KafkaConsumerRecord> consume() {
try {
ConsumerRecords<K, V> consumerRecords = consumer.poll(super.fetchTimeoutMillis);
return Iterators.transform(consumerRecords.iterator(), input -> {
try {
return new Kafka1ConsumerRecord(input);
} catch (Throwable t) {
throw Throwables.propagate(t);
}
});
} catch (Exception e) {
log.error("Exception on polling records", e);
throw new RuntimeException(e);
}
}
/**
* Subscribe to a kafka topic
* TODO Add multi topic support
* @param topic
*/
@Override
public void subscribe(String topic) {
this.consumer.subscribe(Lists.newArrayList(topic), new NoOpConsumerRebalanceListener());
}
/**
* Subscribe to a kafka topic with a {#GobblinConsumerRebalanceListener}
* TODO Add multi topic support
* @param topic
*/
@Override
public void subscribe(String topic, GobblinConsumerRebalanceListener listener) {
this.consumer.subscribe(Lists.newArrayList(topic), new ConsumerRebalanceListener() {
@Override
public void onPartitionsRevoked(Collection<TopicPartition> partitions) {
listener.onPartitionsRevoked(partitions.stream().map(a -> new KafkaPartition.Builder().withTopicName(a.topic()).withId(a.partition()).build()).collect(Collectors.toList()));
}
@Override
public void onPartitionsAssigned(Collection<TopicPartition> partitions) {
listener.onPartitionsAssigned(partitions.stream().map(a -> new KafkaPartition.Builder().withTopicName(a.topic()).withId(a.partition()).build()).collect(Collectors.toList()));
}
});
}
@Override
public Map<String, Metric> getMetrics() {
Map<MetricName, KafkaMetric> kafkaMetrics = (Map<MetricName, KafkaMetric>) this.consumer.metrics();
Map<String, Metric> codaHaleMetricMap = new HashMap<>();
kafkaMetrics
.forEach((key, value) -> codaHaleMetricMap.put(canonicalMetricName(value), kafkaToCodaHaleMetric(value)));
return codaHaleMetricMap;
}
/**
* Commit offsets to Kafka asynchronously
*/
@Override
public void commitOffsetsAsync(Map<KafkaPartition, Long> partitionOffsets) {
Map<TopicPartition, OffsetAndMetadata> offsets = partitionOffsets.entrySet().stream().collect(Collectors.toMap(e -> new TopicPartition(e.getKey().getTopicName(),e.getKey().getId()), e -> new OffsetAndMetadata(e.getValue())));
consumer.commitAsync(offsets, new OffsetCommitCallback() {
@Override
public void onComplete(Map<TopicPartition, OffsetAndMetadata> offsets, Exception exception) {
if(exception != null) {
log.error("Exception while committing offsets " + offsets, exception);
return;
}
}
});
}
/**
* Commit offsets to Kafka synchronously
*/
@Override
public void commitOffsetsSync(Map<KafkaPartition, Long> partitionOffsets) {
Map<TopicPartition, OffsetAndMetadata> offsets = partitionOffsets.entrySet().stream().collect(Collectors.toMap(e -> new TopicPartition(e.getKey().getTopicName(),e.getKey().getId()), e -> new OffsetAndMetadata(e.getValue())));
consumer.commitSync(offsets);
}
/**
* returns the last committed offset for a KafkaPartition
* @param partition
* @return last committed offset or -1 for invalid KafkaPartition
*/
@Override
public long committed(KafkaPartition partition) {
OffsetAndMetadata offsetAndMetadata = consumer.committed(new TopicPartition(partition.getTopicName(), partition.getId()));
return offsetAndMetadata != null ? offsetAndMetadata.offset() : -1l;
}
/**
* Convert a {@link KafkaMetric} instance to a {@link Metric}.
* @param kafkaMetric
* @return
*/
private Metric kafkaToCodaHaleMetric(final KafkaMetric kafkaMetric) {
if (log.isDebugEnabled()) {
log.debug("Processing a metric change for {}", kafkaMetric.metricName().toString());
}
Gauge<Object> gauge = kafkaMetric::metricValue;
return gauge;
}
private String canonicalMetricName(KafkaMetric kafkaMetric) {
MetricName name = kafkaMetric.metricName();
return canonicalMetricName(name.group(), name.tags().values(), name.name());
}
@Override
public void close() throws IOException {
this.consumer.close();
}
private static final Function<PartitionInfo, KafkaPartition> PARTITION_INFO_TO_KAFKA_PARTITION =
new Function<PartitionInfo, KafkaPartition>() {
@Override
public KafkaPartition apply(@Nonnull PartitionInfo partitionInfo) {
return new KafkaPartition.Builder().withId(partitionInfo.partition()).withTopicName(partitionInfo.topic())
.withLeaderId(partitionInfo.leader().id())
.withLeaderHostAndPort(partitionInfo.leader().host(), partitionInfo.leader().port()).build();
}
};
/**
* A factory class to instantiate {@link Kafka1ConsumerClient}
*/
public static class Factory implements GobblinKafkaConsumerClientFactory {
@SuppressWarnings("rawtypes")
@Override
public GobblinKafkaConsumerClient create(Config config) {
return new Kafka1ConsumerClient(config);
}
}
/**
* A record returned by {@link Kafka1ConsumerClient}
*
* @param <K> Message key type
* @param <V> Message value type
*/
@EqualsAndHashCode(callSuper = true)
@ToString
public static class Kafka1ConsumerRecord<K, V> extends BaseKafkaConsumerRecord implements
DecodeableKafkaRecord<K, V> {
private final ConsumerRecord<K, V> consumerRecord;
public Kafka1ConsumerRecord(ConsumerRecord<K, V> consumerRecord) {
// Kafka 09 consumerRecords do not provide value size.
// Only 08 and 11 versions provide them.
super(consumerRecord.offset(), consumerRecord.serializedValueSize() , consumerRecord.topic(), consumerRecord.partition());
this.consumerRecord = consumerRecord;
}
/**
* @return the timestamp type of the underlying ConsumerRecord (only for Kafka 1+ records)
*/
public TimestampType getTimestampType() {
return this.consumerRecord.timestampType();
}
/**
* @return true if the timestamp in the ConsumerRecord is the timestamp when the record is written to Kafka.
*/
@Override
public boolean isTimestampLogAppend() {
return this.consumerRecord.timestampType() == TimestampType.LOG_APPEND_TIME;
}
/**
* @return the timestamp of the underlying ConsumerRecord. NOTE: check TimestampType
*/
@Override
public long getTimestamp() {
return this.consumerRecord.timestamp();
}
@Override
public K getKey() {
return this.consumerRecord.key();
}
@Override
public V getValue() {
return this.consumerRecord.value();
}
}
}
| 3,144 |
0 | Create_ds/gobblin/gobblin-modules/gobblin-parquet-common/src/main/java/org/apache/gobblin/converter | Create_ds/gobblin/gobblin-modules/gobblin-parquet-common/src/main/java/org/apache/gobblin/converter/parquet/JsonSchema.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.converter.parquet;
import com.google.gson.JsonArray;
import com.google.gson.JsonObject;
import com.google.gson.JsonPrimitive;
import org.apache.gobblin.configuration.ConfigurationKeys;
import org.apache.gobblin.source.extractor.schema.Schema;
import static org.apache.gobblin.converter.parquet.JsonSchema.InputType.ENUM;
import static org.apache.gobblin.converter.parquet.JsonSchema.InputType.RECORD;
/**
* Represents a source schema declared in the configuration with {@link ConfigurationKeys#SOURCE_SCHEMA}.
* The source schema is represented by a {@link JsonArray}.
* @author tilakpatidar
*/
public class JsonSchema extends Schema {
public static final String RECORD_FIELDS_KEY = "values";
public static final String TYPE_KEY = "type";
public static final String ENUM_SYMBOLS_KEY = "symbols";
public static final String COLUMN_NAME_KEY = "columnName";
public static final String DATA_TYPE_KEY = "dataType";
public static final String COMMENT_KEY = "comment";
public static final String DEFAULT_VALUE_KEY = "defaultValue";
public static final String IS_NULLABLE_KEY = "isNullable";
public static final String DEFAULT_RECORD_COLUMN_NAME = "temp";
public static final String DEFAULT_VALUE_FOR_OPTIONAL_PROPERTY = "";
public static final String ARRAY_KEY = "item";
public static final String ARRAY_ITEMS_KEY = "items";
public static final String MAP_ITEMS_KEY = "values";
public static final String MAP_KEY = "map";
public static final String MAP_KEY_COLUMN_NAME = "key";
public static final String MAP_VALUE_COLUMN_NAME = "value";
private final InputType type;
public enum InputType {
STRING, INT, LONG, FLOAT, DOUBLE, BOOLEAN, ARRAY, ENUM, RECORD, MAP, DATE, TIMESTAMP
}
public JsonSchema(JsonArray jsonArray) {
JsonObject jsonObject = new JsonObject();
JsonObject dataType = new JsonObject();
jsonObject.addProperty(COLUMN_NAME_KEY, DEFAULT_RECORD_COLUMN_NAME);
dataType.addProperty(TYPE_KEY, RECORD.toString());
dataType.add(RECORD_FIELDS_KEY, jsonArray);
jsonObject.add(DATA_TYPE_KEY, dataType);
setJsonSchemaProperties(jsonObject);
this.type = RECORD;
}
public JsonSchema(JsonObject jsonobject) {
setJsonSchemaProperties(jsonobject);
this.type = InputType.valueOf(getDataType().get(TYPE_KEY).getAsString().toUpperCase());
}
/**
* Get source.schema within a {@link InputType#RECORD} type.
* The source.schema is represented by a {@link JsonArray}
* @return
*/
public JsonArray getDataTypeValues() {
if (this.type.equals(RECORD)) {
return getDataType().get(RECORD_FIELDS_KEY).getAsJsonArray();
}
return new JsonArray();
}
/**
* Get symbols for a {@link InputType#ENUM} type.
* @return
*/
public JsonArray getSymbols() {
if (this.type.equals(ENUM)) {
return getDataType().get(ENUM_SYMBOLS_KEY).getAsJsonArray();
}
return new JsonArray();
}
/**
* Get {@link InputType} for this {@link JsonSchema}.
* @return
*/
public InputType getInputType() {
return type;
}
/**
* Builds a {@link JsonSchema} object for a given {@link InputType} object.
* @param type
* @param isNullable
* @return
*/
public static JsonSchema buildBaseSchema(InputType type, boolean isNullable) {
JsonObject jsonObject = new JsonObject();
JsonObject dataType = new JsonObject();
jsonObject.addProperty(COLUMN_NAME_KEY, DEFAULT_RECORD_COLUMN_NAME);
dataType.addProperty(TYPE_KEY, type.toString());
jsonObject.add(DATA_TYPE_KEY, dataType);
jsonObject.add(IS_NULLABLE_KEY, new JsonPrimitive(isNullable));
return new JsonSchema(jsonObject);
}
/**
* {@link InputType} of the elements composed within complex type.
* @param itemKey
* @return
*/
public InputType getElementTypeUsingKey(String itemKey) {
String type = this.getDataType().get(itemKey).getAsString().toUpperCase();
return InputType.valueOf(type);
}
/**
* Parquet {@link RepetitionType} for this {@link JsonSchema}.
* @return
*/
//public abstract RepetitionType optionalOrRequired();
/**
* Set properties for {@link JsonSchema} from a {@link JsonObject}.
* @param jsonObject
*/
private void setJsonSchemaProperties(JsonObject jsonObject) {
setColumnName(jsonObject.get(COLUMN_NAME_KEY).getAsString());
setDataType(jsonObject.get(DATA_TYPE_KEY).getAsJsonObject());
setNullable(jsonObject.has(IS_NULLABLE_KEY) && jsonObject.get(IS_NULLABLE_KEY).getAsBoolean());
setComment(getOptionalProperty(jsonObject, COMMENT_KEY));
setDefaultValue(getOptionalProperty(jsonObject, DEFAULT_VALUE_KEY));
}
/**
* Get optional property from a {@link JsonObject} for a {@link String} key.
* If key does'nt exists returns {@link #DEFAULT_VALUE_FOR_OPTIONAL_PROPERTY}.
* @param jsonObject
* @param key
* @return
*/
private String getOptionalProperty(JsonObject jsonObject, String key) {
return jsonObject.has(key) ? jsonObject.get(key).getAsString() : DEFAULT_VALUE_FOR_OPTIONAL_PROPERTY;
}
}
| 3,145 |
0 | Create_ds/gobblin/gobblin-modules/gobblin-parquet-common/src/main/java/org/apache/gobblin/parquet | Create_ds/gobblin/gobblin-modules/gobblin-parquet-common/src/main/java/org/apache/gobblin/parquet/writer/ParquetWriterShim.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.parquet.writer;
import java.io.Closeable;
import java.io.IOException;
/**
* An interface to shield gobblin-parquet-common integration from different parquet version specific interfaces
* @param <D>
*/
public interface ParquetWriterShim<D> extends Closeable {
void write(D record)
throws IOException;
}
| 3,146 |
0 | Create_ds/gobblin/gobblin-modules/gobblin-parquet-common/src/main/java/org/apache/gobblin/parquet | Create_ds/gobblin/gobblin-modules/gobblin-parquet-common/src/main/java/org/apache/gobblin/parquet/writer/ParquetRecordFormat.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.parquet.writer;
/**
* Enum to hold the supported values for formats supported by the Parquet writer
* @see {@link ParquetWriterConfiguration} for configuration keys to set them
*/
public enum ParquetRecordFormat {
GROUP,
AVRO,
PROTOBUF;
}
| 3,147 |
0 | Create_ds/gobblin/gobblin-modules/gobblin-parquet-common/src/main/java/org/apache/gobblin/parquet | Create_ds/gobblin/gobblin-modules/gobblin-parquet-common/src/main/java/org/apache/gobblin/parquet/writer/ParquetWriterConfiguration.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.parquet.writer;
import org.apache.hadoop.fs.Path;
import com.typesafe.config.Config;
import lombok.Getter;
import lombok.ToString;
import org.apache.gobblin.configuration.State;
import org.apache.gobblin.util.ConfigUtils;
import org.apache.gobblin.util.ForkOperatorUtils;
import static org.apache.gobblin.configuration.ConfigurationKeys.LOCAL_FS_URI;
import static org.apache.gobblin.configuration.ConfigurationKeys.WRITER_CODEC_TYPE;
import static org.apache.gobblin.configuration.ConfigurationKeys.WRITER_FILE_SYSTEM_URI;
import static org.apache.gobblin.configuration.ConfigurationKeys.WRITER_PREFIX;
/**
* Holds configuration for the {@link ParquetHdfsDataWriter}
*/
@Getter @ToString
public class ParquetWriterConfiguration {
public static final String WRITER_PARQUET_PAGE_SIZE = WRITER_PREFIX + ".parquet.pageSize";
public static final String WRITER_PARQUET_DICTIONARY_PAGE_SIZE = WRITER_PREFIX + ".parquet.dictionaryPageSize";
public static final String WRITER_PARQUET_DICTIONARY = WRITER_PREFIX + ".parquet.dictionary";
public static final String WRITER_PARQUET_VALIDATE = WRITER_PREFIX + ".parquet.validate";
public static final String WRITER_PARQUET_VERSION = WRITER_PREFIX + ".parquet.version";
public static final String DEFAULT_PARQUET_WRITER = "v1";
public static final String WRITER_PARQUET_FORMAT = WRITER_PREFIX + ".parquet.format";
public static final String DEFAULT_PARQUET_FORMAT = "group";
public static final int DEFAULT_BLOCK_SIZE = 128 * 1024 * 1024;
public static final int DEFAULT_PAGE_SIZE = 1 * 1024 * 1024;
public static final String DEFAULT_COMPRESSION_CODEC_NAME = "UNCOMPRESSED";
public static final String[] ALLOWED_COMPRESSION_CODECS = {"SNAPPY", "LZO", "UNCOMPRESSED", "GZIP"};
public static final boolean DEFAULT_IS_DICTIONARY_ENABLED = true;
public static final boolean DEFAULT_IS_VALIDATING_ENABLED = false;
public static final String DEFAULT_WRITER_VERSION = "v1";
public static final String[] ALLOWED_WRITER_VERSIONS = {"v1", "v2"};
private final int pageSize;
private final int dictPageSize;
private final boolean dictionaryEnabled;
private final boolean validate;
private final String writerVersion;
private final ParquetRecordFormat recordFormat;
private final int numBranches;
private final int branchId;
private final String codecName;
private final Path absoluteStagingFile;
private final int blockSize;
public ParquetWriterConfiguration(State state, int numBranches, int branchId, Path stagingFile, int blockSize) {
this(ConfigUtils.propertiesToConfig(state.getProperties()), numBranches, branchId, stagingFile, blockSize);
}
private String getProperty(String key) {
return ForkOperatorUtils.getPropertyNameForBranch(key, numBranches, branchId);
}
public static ParquetRecordFormat getRecordFormatFromConfig(Config config) {
String writeSupport = ConfigUtils.getString(config, WRITER_PARQUET_FORMAT, DEFAULT_PARQUET_FORMAT);
ParquetRecordFormat recordFormat = ParquetRecordFormat.valueOf(writeSupport.toUpperCase());
return recordFormat;
}
ParquetWriterConfiguration(Config config, int numBranches, int branchId, Path stagingFile, int blockSize) {
this.numBranches = numBranches;
this.branchId = branchId;
this.pageSize = ConfigUtils.getInt(config, getProperty(WRITER_PARQUET_PAGE_SIZE), DEFAULT_PAGE_SIZE);
this.dictPageSize = ConfigUtils.getInt(config, getProperty(WRITER_PARQUET_DICTIONARY_PAGE_SIZE), DEFAULT_BLOCK_SIZE);
this.dictionaryEnabled =
ConfigUtils.getBoolean(config, getProperty(WRITER_PARQUET_DICTIONARY), DEFAULT_IS_DICTIONARY_ENABLED);
this.validate = ConfigUtils.getBoolean(config, getProperty(WRITER_PARQUET_VALIDATE), DEFAULT_IS_VALIDATING_ENABLED);
String rootURI = ConfigUtils.getString(config, WRITER_FILE_SYSTEM_URI, LOCAL_FS_URI);
this.absoluteStagingFile = new Path(rootURI, stagingFile);
this.codecName = ConfigUtils.getString(config,getProperty(WRITER_CODEC_TYPE), DEFAULT_COMPRESSION_CODEC_NAME);
this.recordFormat = getRecordFormatFromConfig(config);
this.writerVersion = ConfigUtils.getString(config, getProperty(WRITER_PARQUET_VERSION), DEFAULT_WRITER_VERSION);
this.blockSize = blockSize;
}
}
| 3,148 |
0 | Create_ds/gobblin/gobblin-modules/gobblin-parquet-common/src/main/java/org/apache/gobblin/parquet | Create_ds/gobblin/gobblin-modules/gobblin-parquet-common/src/main/java/org/apache/gobblin/parquet/writer/AbstractParquetDataWriterBuilder.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.parquet.writer;
import java.io.IOException;
import org.apache.hadoop.fs.Path;
import com.google.common.base.Preconditions;
import com.google.common.base.Strings;
import lombok.extern.slf4j.Slf4j;
import org.apache.gobblin.configuration.State;
import org.apache.gobblin.writer.DataWriter;
import org.apache.gobblin.writer.FsDataWriterBuilder;
import org.apache.gobblin.writer.WriterOutputFormat;
@Slf4j
public abstract class AbstractParquetDataWriterBuilder<S,D> extends FsDataWriterBuilder<S, D> {
@Override
public DataWriter<D> build()
throws IOException {
Preconditions.checkNotNull(this.destination);
Preconditions.checkArgument(!Strings.isNullOrEmpty(this.writerId));
Preconditions.checkNotNull(this.schema);
Preconditions.checkArgument(this.format == WriterOutputFormat.PARQUET);
switch (this.destination.getType()) {
case HDFS:
return new ParquetHdfsDataWriter<D>(this, this.destination.getProperties());
default:
throw new RuntimeException("Unknown destination type: " + this.destination.getType());
}
}
protected abstract ParquetWriterShim getVersionSpecificWriter(ParquetWriterConfiguration writerConfiguration)
throws IOException;
/**
* Build a {@link ParquetWriterShim <D>} for given file path with a block size.
* @param blockSize
* @param stagingFile
* @return
* @throws IOException
*/
public ParquetWriterShim<D> getWriter(int blockSize, Path stagingFile)
throws IOException {
State state = this.destination.getProperties();
ParquetWriterConfiguration writerConfiguration =
new ParquetWriterConfiguration(state, this.getBranches(), this.getBranch(), stagingFile, blockSize);
log.info("Parquet writer configured with {}", writerConfiguration);
return getVersionSpecificWriter(writerConfiguration);
}
}
| 3,149 |
0 | Create_ds/gobblin/gobblin-modules/gobblin-parquet-common/src/main/java/org/apache/gobblin/parquet | Create_ds/gobblin/gobblin-modules/gobblin-parquet-common/src/main/java/org/apache/gobblin/parquet/writer/ParquetHdfsDataWriter.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.parquet.writer;
import java.io.IOException;
import java.util.concurrent.atomic.AtomicLong;
import org.apache.gobblin.configuration.State;
import org.apache.gobblin.writer.FsDataWriter;
/**
* An extension to {@link FsDataWriter} that writes in Parquet formats.
*
* <p>
* This implementation allows users to specify different formats and codecs
* through {@link ParquetWriterConfiguration} to write data.
* </p>
*
* @author tilakpatidar
*/
public class ParquetHdfsDataWriter<D> extends FsDataWriter<D> {
private final ParquetWriterShim writer;
protected final AtomicLong count = new AtomicLong(0);
public ParquetHdfsDataWriter(AbstractParquetDataWriterBuilder builder, State state)
throws IOException {
super(builder, state);
this.writer = builder.getWriter((int) this.blockSize, this.stagingFile);
}
@Override
public void write(D record)
throws IOException {
this.writer.write(record);
this.count.incrementAndGet();
}
@Override
public long recordsWritten() {
return this.count.get();
}
@Override
public void close()
throws IOException {
try {
this.writer.close();
} finally {
super.close();
}
}
}
| 3,150 |
0 | Create_ds/gobblin/gobblin-modules/gobblin-parquet-common/src/main/java/org/apache/gobblin/parquet/writer | Create_ds/gobblin/gobblin-modules/gobblin-parquet-common/src/main/java/org/apache/gobblin/parquet/writer/test/TestConstantsBase.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.parquet.writer.test;
import java.util.Arrays;
import org.apache.avro.generic.GenericRecord;
import com.google.protobuf.Message;
import org.apache.gobblin.parquet.writer.ParquetRecordFormat;
import org.apache.gobblin.test.TestRecord;
import org.apache.gobblin.test.proto.TestRecordProtos;
/**
* Holder for TestConstantsBase
* @param <ParquetGroup> : the class that implements ParquetGroup, generic to allow package-specific overrides
*/
public abstract class TestConstantsBase<ParquetGroup> {
public static TestRecord[] getTestValues() {
return Arrays.copyOf(TEST_VALUES, TEST_VALUES.length);
}
public static String[] getPayloadValues() {
return Arrays.copyOf(PAYLOAD_VALUES, PAYLOAD_VALUES.length);
}
public static int[] getSequenceValues() {
return Arrays.copyOf(SEQUENCE_VALUES, SEQUENCE_VALUES.length);
}
public static int[] getPartitionValues() {
return Arrays.copyOf(PARTITION_VALUES, PARTITION_VALUES.length);
}
public final String getParquetTestFilename(String format) { return "test-"+format+".parquet"; };
public static final String TEST_FS_URI = "file:///";
public static final String TEST_ROOT_DIR = System.getProperty("java.io.tmpdir") + "/" + System.currentTimeMillis();
public static final String TEST_STAGING_DIR = TEST_ROOT_DIR + "/staging";
public static final String TEST_OUTPUT_DIR = TEST_ROOT_DIR + "/output";
public static final String TEST_WRITER_ID = "writer-1";
public static final String TEST_EXTRACT_NAMESPACE = "com.linkedin.writer.test";
public static final String TEST_EXTRACT_ID = String.valueOf(System.currentTimeMillis());
public static final String TEST_EXTRACT_TABLE = "TestTable";
public static final String TEST_EXTRACT_PULL_TYPE = "FULL";
private static final TestRecord[] TEST_VALUES = new TestRecord[2];
public static final String PAYLOAD_FIELD_NAME = "payload";
public static final String SEQUENCE_FIELD_NAME = "sequence";
public static final String PARTITION_FIELD_NAME = "partition";
private static final String[] PAYLOAD_VALUES = {"value1", "value2"};
private static final int[] SEQUENCE_VALUES = {1, 2};
private static final int[] PARTITION_VALUES = {0, 1};
static {
for (int i=0; i < 2; ++i) {
TestRecord record = new TestRecord(getPartitionValues()[i],
getSequenceValues()[i],
getPayloadValues()[i]);
TEST_VALUES[i] = record;
}
}
public Object getRecord(int index, ParquetRecordFormat format) {
switch (format) {
case GROUP: {
return convertToParquetGroup(getTestValues()[index]);
}
case PROTOBUF: {
return getProtobufMessage(getTestValues()[index]);
}
case AVRO: {
return getAvroMessage(getTestValues()[index]);
}
default: {
throw new RuntimeException("Not understanding format " + format);
}
}
}
public abstract ParquetGroup convertToParquetGroup(TestRecord record);
private Message getProtobufMessage(TestRecord testValue) {
return TestRecordProtos.TestRecord.newBuilder()
.setPayload(testValue.getPayload())
.setPartition(testValue.getPartition())
.setSequence(testValue.getSequence())
.build();
}
private GenericRecord getAvroMessage(TestRecord record) {
org.apache.gobblin.test.avro.TestRecord testRecord = new org.apache.gobblin.test.avro.TestRecord();
testRecord.setPayload(record.getPayload());
testRecord.setPartition(record.getPartition());
testRecord.setSequence(record.getSequence());
return testRecord;
}
}
| 3,151 |
0 | Create_ds/gobblin/gobblin-modules/gobblin-parquet-common/src/main/java/org/apache/gobblin/parquet/writer | Create_ds/gobblin/gobblin-modules/gobblin-parquet-common/src/main/java/org/apache/gobblin/parquet/writer/test/ParquetHdfsDataWriterTestBase.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.parquet.writer.test;
import java.io.File;
import java.io.IOException;
import java.util.List;
import org.apache.hadoop.fs.FileUtil;
import org.apache.hadoop.fs.Path;
import junit.framework.Assert;
import lombok.extern.slf4j.Slf4j;
import org.apache.gobblin.configuration.ConfigurationKeys;
import org.apache.gobblin.configuration.State;
import org.apache.gobblin.parquet.writer.ParquetRecordFormat;
import org.apache.gobblin.parquet.writer.ParquetWriterConfiguration;
import org.apache.gobblin.test.TestRecord;
import org.apache.gobblin.writer.DataWriter;
import org.apache.gobblin.writer.DataWriterBuilder;
import org.apache.gobblin.writer.Destination;
import org.apache.gobblin.writer.WriterOutputFormat;
/**
* Base class for building version-specific tests for Parquet
*/
@Slf4j
public abstract class ParquetHdfsDataWriterTestBase {
public ParquetHdfsDataWriterTestBase(TestConstantsBase testConstants)
{
this.testConstants = testConstants;
}
private final TestConstantsBase testConstants;
private String filePath;
private DataWriter writer;
protected abstract DataWriterBuilder getDataWriterBuilder();
public void setUp()
throws Exception {
// Making the staging and/or output dirs if necessary
File stagingDir = new File(this.testConstants.TEST_STAGING_DIR);
File outputDir = new File(this.testConstants.TEST_OUTPUT_DIR);
if (!stagingDir.exists()) {
boolean mkdirs = stagingDir.mkdirs();
assert mkdirs;
}
if (!outputDir.exists()) {
boolean mkdirs = outputDir.mkdirs();
assert mkdirs;
}
this.filePath = getFilePath();
}
private String getFilePath() {
return TestConstantsBase.TEST_EXTRACT_NAMESPACE.replaceAll("\\.", "/") + "/" + TestConstantsBase.TEST_EXTRACT_TABLE + "/"
+ TestConstantsBase.TEST_EXTRACT_ID + "_" + TestConstantsBase.TEST_EXTRACT_PULL_TYPE;
}
private State createStateWithConfig(ParquetRecordFormat format) {
State properties = new State();
properties.setProp(ConfigurationKeys.WRITER_BUFFER_SIZE, ConfigurationKeys.DEFAULT_BUFFER_SIZE);
properties.setProp(ConfigurationKeys.WRITER_FILE_SYSTEM_URI, TestConstantsBase.TEST_FS_URI);
properties.setProp(ConfigurationKeys.WRITER_STAGING_DIR, TestConstantsBase.TEST_STAGING_DIR);
properties.setProp(ConfigurationKeys.WRITER_OUTPUT_DIR, TestConstantsBase.TEST_OUTPUT_DIR);
properties.setProp(ConfigurationKeys.WRITER_FILE_PATH, this.filePath);
properties.setProp(ConfigurationKeys.WRITER_FILE_NAME, this.testConstants.getParquetTestFilename(format.name()));
properties.setProp(ParquetWriterConfiguration.WRITER_PARQUET_DICTIONARY, true);
properties.setProp(ParquetWriterConfiguration.WRITER_PARQUET_DICTIONARY_PAGE_SIZE, 1024);
properties.setProp(ParquetWriterConfiguration.WRITER_PARQUET_PAGE_SIZE, 1024);
properties.setProp(ParquetWriterConfiguration.WRITER_PARQUET_VALIDATE, true);
properties.setProp(ParquetWriterConfiguration.WRITER_PARQUET_FORMAT, format.toString());
properties.setProp(ConfigurationKeys.WRITER_CODEC_TYPE, "gzip");
return properties;
}
protected abstract List<TestRecord> readParquetRecordsFromFile(File outputFile, ParquetRecordFormat format) throws IOException;
public void testWrite()
throws Exception {
ParquetRecordFormat[] formats = ParquetRecordFormat.values();
for (ParquetRecordFormat format : formats) {
State formatSpecificProperties = createStateWithConfig(format);
this.writer = getDataWriterBuilder()
.writeTo(Destination.of(Destination.DestinationType.HDFS, formatSpecificProperties))
.withWriterId(TestConstantsBase.TEST_WRITER_ID)
.writeInFormat(WriterOutputFormat.PARQUET)
.withSchema(getSchema(format))
.build();
for (int i=0; i < 2; ++i) {
Object record = this.testConstants.getRecord(i, format);
this.writer.write(record);
Assert.assertEquals(i+1, this.writer.recordsWritten());
}
this.writer.close();
this.writer.commit();
String filePath = TestConstantsBase.TEST_OUTPUT_DIR + Path.SEPARATOR + this.filePath;
File outputFile = new File(filePath, this.testConstants.getParquetTestFilename(format.name()));
List<TestRecord> records = readParquetRecordsFromFile(outputFile, format);
for (int i = 0; i < 2; ++i) {
TestRecord resultRecord = records.get(i);
log.debug("Testing {} record {}", i, resultRecord);
Assert.assertEquals(TestConstantsBase.getPayloadValues()[i], resultRecord.getPayload());
Assert.assertEquals(TestConstantsBase.getSequenceValues()[i], resultRecord.getSequence());
Assert.assertEquals(TestConstantsBase.getPartitionValues()[i], resultRecord.getPartition());
}
}
}
protected abstract Object getSchema(ParquetRecordFormat format);
public void tearDown()
throws IOException {
// Clean up the staging and/or output directories if necessary
File testRootDir = new File(TestConstantsBase.TEST_ROOT_DIR);
if (testRootDir.exists()) {
FileUtil.fullyDelete(testRootDir);
}
}
}
| 3,152 |
0 | Create_ds/gobblin/gobblin-modules/gobblin-grok/src/test/java/org/apache/gobblin/converter | Create_ds/gobblin/gobblin-modules/gobblin-grok/src/test/java/org/apache/gobblin/converter/grok/GrokToJsonConverterTest.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.converter.grok;
import java.io.InputStreamReader;
import org.testng.Assert;
import org.testng.annotations.Test;
import com.google.gson.JsonArray;
import com.google.gson.JsonElement;
import com.google.gson.JsonObject;
import com.google.gson.JsonParser;
import gobblin.configuration.WorkUnitState;
import org.apache.gobblin.converter.DataConversionException;
@Test(groups = {"gobblin.converter"})
public class GrokToJsonConverterTest {
@Test
public void convertOutputWithNullableFields()
throws Exception {
JsonParser parser = new JsonParser();
String inputRecord =
"10.121.123.104 - - [01/Nov/2012:21:01:17 +0100] \"GET /cpc/auth.do?loginsetup=true&targetPage=%2Fcpc%2F HTTP/1.1\" 302 466";
JsonElement jsonElement = parser
.parse(new InputStreamReader(getClass().getResourceAsStream("/converter/grok/schemaWithNullableFields.json")));
JsonArray outputSchema = jsonElement.getAsJsonArray();
GrokToJsonConverter grokToJsonConverter = new GrokToJsonConverter();
WorkUnitState workUnitState = new WorkUnitState();
workUnitState.setProp(GrokToJsonConverter.GROK_PATTERN,
"^%{IPORHOST:clientip} (?:-|%{USER:ident}) (?:-|%{USER:auth}) \\[%{HTTPDATE:timestamp}\\] \\\"(?:%{WORD:verb} %{NOTSPACE:request}(?: HTTP/%{NUMBER:httpversion})?|-)\\\" %{NUMBER:response} (?:-|%{NUMBER:bytes})");
grokToJsonConverter.init(workUnitState);
JsonObject actual = grokToJsonConverter.convertRecord(outputSchema, inputRecord, workUnitState).iterator().next();
JsonObject expected =
parser.parse(new InputStreamReader(getClass().getResourceAsStream("/converter/grok/convertedRecord.json")))
.getAsJsonObject();
Assert.assertEquals(actual, expected);
grokToJsonConverter.close();
}
@Test(expectedExceptions = DataConversionException.class)
public void convertOutputWithNonNullableFieldsShouldThrowDataConversionException()
throws Exception {
JsonParser parser = new JsonParser();
String inputRecord =
"10.121.123.104 - - [01/Nov/2012:21:01:17 +0100] \"GET /cpc/auth.do?loginsetup=true&targetPage=%2Fcpc%2F HTTP/1.1\" 302 466";
JsonElement jsonElement = parser.parse(
new InputStreamReader(getClass().getResourceAsStream("/converter/grok/schemaWithNonNullableFields.json")));
JsonArray outputSchema = jsonElement.getAsJsonArray();
GrokToJsonConverter grokToJsonConverter = new GrokToJsonConverter();
WorkUnitState workUnitState = new WorkUnitState();
workUnitState.setProp(GrokToJsonConverter.GROK_PATTERN,
"^%{IPORHOST:clientip} (?:-|%{USER:ident}) (?:-|%{USER:auth}) \\[%{HTTPDATE:timestamp}\\] \\\"(?:%{WORD:verb} %{NOTSPACE:request}(?: HTTP/%{NUMBER:httpversion})?|-)\\\" %{NUMBER:response} (?:-|%{NUMBER:bytes})");
grokToJsonConverter.init(workUnitState);
JsonObject actual = grokToJsonConverter.convertRecord(outputSchema, inputRecord, workUnitState).iterator().next();
JsonObject expected =
parser.parse(new InputStreamReader(getClass().getResourceAsStream("/converter/grok/convertedRecord.json")))
.getAsJsonObject();
grokToJsonConverter.close();
}
@Test
public void convertWithNullStringSet()
throws Exception {
JsonParser parser = new JsonParser();
String inputRecord =
"79a59df900b949e55d96a1e698fbacedfd6e09d98eacf8f8d5218e7cd47ef2be mybucket [06/Feb/2014:00:00:38 +0000] 192.0.2.3 79a59df900b949e55d96a1e698fbacedfd6e09d98eacf8f8d5218e7cd47ef2be 3E57427F3EXAMPLE REST.GET.VERSIONING - \"GET /mybucket?versioning HTTP/1.1\" 200 - 113 - 7 - \"-\" \"S3Console/0.4\" -";
JsonElement jsonElement =
parser.parse(new InputStreamReader(getClass().getResourceAsStream("/converter/grok/s3AccessLogSchema.json")));
JsonArray outputSchema = jsonElement.getAsJsonArray();
GrokToJsonConverter grokToJsonConverter = new GrokToJsonConverter();
WorkUnitState workUnitState = new WorkUnitState();
//Grok expression was taken from https://github.com/logstash-plugins/logstash-patterns-core/blob/master/patterns/aws
workUnitState.setProp(GrokToJsonConverter.GROK_PATTERN,
"%{WORD:owner} %{NOTSPACE:bucket} \\[%{HTTPDATE:timestamp}\\] %{IP:clientip} %{NOTSPACE:requester} %{NOTSPACE:request_id} %{NOTSPACE:operation} %{NOTSPACE:key} (?:\"(?:%{WORD:verb} %{NOTSPACE:request}(?: HTTP/%{NUMBER:httpversion})?|%{DATA:rawrequest})\"|-) (?:%{INT:response:int}|-) (?:-|%{NOTSPACE:error_code}) (?:%{INT:bytes:int}|-) (?:%{INT:object_size:int}|-) (?:%{INT:request_time_ms:int}|-) (?:%{INT:turnaround_time_ms:int}|-) (?:%{QS:referrer}|-) (?:\"?%{QS:agent}\"?|-) (?:-|%{NOTSPACE:version_id})");
workUnitState.setProp(GrokToJsonConverter.NULLSTRING_REGEXES, "[\\s-]");
grokToJsonConverter.init(workUnitState);
JsonObject actual = grokToJsonConverter.convertRecord(outputSchema, inputRecord, workUnitState).iterator().next();
JsonObject expected = parser
.parse(new InputStreamReader(getClass().getResourceAsStream("/converter/grok/convertedS3AccessLogRecord.json")))
.getAsJsonObject();
Assert.assertEquals(actual, expected);
grokToJsonConverter.close();
}
} | 3,153 |
0 | Create_ds/gobblin/gobblin-modules/gobblin-grok/src/main/java/org/apache/gobblin/converter | Create_ds/gobblin/gobblin-modules/gobblin-grok/src/main/java/org/apache/gobblin/converter/grok/GrokToJsonConverter.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.converter.grok;
import java.io.FileInputStream;
import java.io.FileNotFoundException;
import java.io.InputStreamReader;
import java.io.UnsupportedEncodingException;
import java.util.List;
import java.util.regex.Pattern;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import com.google.common.annotations.VisibleForTesting;
import com.google.common.base.Preconditions;
import com.google.gson.JsonArray;
import com.google.gson.JsonElement;
import com.google.gson.JsonNull;
import com.google.gson.JsonObject;
import com.google.gson.JsonParser;
import io.thekraken.grok.api.Grok;
import io.thekraken.grok.api.Match;
import io.thekraken.grok.api.exception.GrokException;
import org.apache.gobblin.configuration.WorkUnitState;
import org.apache.gobblin.converter.Converter;
import org.apache.gobblin.converter.DataConversionException;
import org.apache.gobblin.converter.SchemaConversionException;
import org.apache.gobblin.converter.SingleRecordIterable;
import org.apache.gobblin.util.DatasetFilterUtils;
/**
* GrokToJsonConverter accepts already deserialized text row, String, where you can use.
*
* Converts Text to JSON based on Grok pattern. Schema is represented by the form of JsonArray same interface being used by CsvToJonConverter.
* Each text record is represented by a String.
* The converter only supports Grok patterns where every group is named because it uses the group names as column names.
*
* The following config properties can be set:
* The grok pattern to use for the conversion:
* converter.grokToJsonConverter.grokPattern ="^%{IPORHOST:clientip} (?:-|%{USER:ident}) (?:-|%{USER:auth}) \[%{HTTPDATE:timestamp}\] \"(?:%{WORD:verb} %{NOTSPACE:request}(?: HTTP/%{NUMBER:httpversion})?|-)\" %{NUMBER:response} (?:-|%{NUMBER:bytes})"
*
* Path to the file which contains the base grok patterns which can be used in the converter's GROK pattern (if not set it will use the default ones):
* converter.grokToJsonConverter.baseGrokPatternsFile=
**
* Specify a comma separated list of regexes which will be applied on the fields and matched one will be converted to json null:
* converter.grokToJsonConverter.nullStringRegexes="[-\s]"
*
* Example of schema:
* [
{
"columnName": "Day",
"comment": "",
"isNullable": "true",
"dataType": {
"type": "string"
}
},
{
"columnName": "Pageviews",
"comment": "",
"isNullable": "true",
"dataType": {
"type": "long"
}
}
]
*/
public class GrokToJsonConverter extends Converter<String, JsonArray, String, JsonObject> {
private static final Logger LOG = LoggerFactory.getLogger(GrokToJsonConverter.class);
private static final JsonParser JSON_PARSER = new JsonParser();
private static final String COLUMN_NAME_KEY = "columnName";
private static final String DATA_TYPE = "dataType";
private static final String TYPE_KEY = "type";
private static final String NULLABLE = "isNullable";
public static final String GROK_PATTERN = "converter.grokToJsonConverter.grokPattern";
public static final String BASE_PATTERNS_FILE = "converter.grokToJsonConverter.baseGrokPatternsFile";
public static final String NULLSTRING_REGEXES = "converter.grokToJsonConverter.nullStringRegexes";
public static final String DEFAULT_GROK_PATTERNS_FILE = "/grok/grok-patterns";
private List<Pattern> nullStringRegexes;
private Grok grok;
@Override
public Converter<String, JsonArray, String, JsonObject> init(WorkUnitState workUnit) {
super.init(workUnit);
String pattern = workUnit.getProp(GROK_PATTERN);
String patternsFile = workUnit.getProp(BASE_PATTERNS_FILE);
this.nullStringRegexes = DatasetFilterUtils.getPatternsFromStrings(workUnit.getPropAsList(NULLSTRING_REGEXES, ""));
InputStreamReader grokPatterns;
try {
if (patternsFile == null) {
grokPatterns = new InputStreamReader(getClass().getResourceAsStream("/grok/grok-base-patterns"), "UTF8");
} else {
grokPatterns = new InputStreamReader(new FileInputStream(patternsFile), "UTF8");
}
grok = new Grok();
grok.addPatternFromReader(grokPatterns);
grok.compile(pattern);
} catch (GrokException | FileNotFoundException | UnsupportedEncodingException e) {
throw new RuntimeException("Error initializing GROK: " + e);
}
return this;
}
@Override
public JsonArray convertSchema(String inputSchema, WorkUnitState workUnit)
throws SchemaConversionException {
Preconditions.checkNotNull(inputSchema, "inputSchema is required.");
return JSON_PARSER.parse(inputSchema).getAsJsonArray();
}
/**
* Converts Text (String) to JSON based on a Grok regexp expression.
* By default, fields between Text and JSON are mapped by Grok SEMANTIC which is the identifier you give to the piece of text being matched in your Grok expression.
*
*
* e.g:
* {@inheritDoc}
* @see Converter#convertRecord(Object, Object, WorkUnitState)
*/
@Override
public Iterable<JsonObject> convertRecord(JsonArray outputSchema, String inputRecord, WorkUnitState workUnit)
throws DataConversionException {
JsonObject outputRecord = createOutput(outputSchema, inputRecord);
LOG.debug("Converted into " + outputRecord);
return new SingleRecordIterable<JsonObject>(outputRecord);
}
@VisibleForTesting
JsonObject createOutput(JsonArray outputSchema, String inputRecord)
throws DataConversionException {
JsonObject outputRecord = new JsonObject();
Match gm = grok.match(inputRecord);
gm.captures();
JsonElement capturesJson = JSON_PARSER.parse(gm.toJson());
for (JsonElement anOutputSchema : outputSchema) {
JsonObject outputSchemaJsonObject = anOutputSchema.getAsJsonObject();
String key = outputSchemaJsonObject.get(COLUMN_NAME_KEY).getAsString();
String type = outputSchemaJsonObject.getAsJsonObject(DATA_TYPE).get(TYPE_KEY).getAsString();
if (isFieldNull(capturesJson, key)) {
if (!outputSchemaJsonObject.get(NULLABLE).getAsBoolean()) {
throw new DataConversionException(
"Field " + key + " is null or not exists but it is non-nullable by the schema.");
}
outputRecord.add(key, JsonNull.INSTANCE);
} else {
JsonElement jsonElement = capturesJson.getAsJsonObject().get(key);
switch (type) {
case "int":
outputRecord.addProperty(key, jsonElement.getAsInt());
break;
case "long":
outputRecord.addProperty(key, jsonElement.getAsLong());
break;
case "double":
outputRecord.addProperty(key, jsonElement.getAsDouble());
break;
case "float":
outputRecord.addProperty(key, jsonElement.getAsFloat());
break;
case "boolean":
outputRecord.addProperty(key, jsonElement.getAsBoolean());
break;
case "string":
default:
outputRecord.addProperty(key, jsonElement.getAsString());
}
}
}
return outputRecord;
}
private boolean isFieldNull(JsonElement capturesJson, String key) {
JsonObject jsonObject = capturesJson.getAsJsonObject();
if (!jsonObject.has(key)) {
return true;
}
for (Pattern pattern : this.nullStringRegexes) {
if (pattern.matcher(jsonObject.get(key).getAsString()).matches()) {
return true;
}
}
return false;
}
}
| 3,154 |
0 | Create_ds/gobblin/gobblin-modules/gobblin-compliance/src/test/java/org/apache/gobblin/compliance | Create_ds/gobblin/gobblin-modules/gobblin-compliance/src/test/java/org/apache/gobblin/compliance/purger/HivePurgerExtractorTest.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.compliance.purger;
import java.io.IOException;
import org.mockito.Mockito;
import org.testng.Assert;
import org.testng.annotations.BeforeTest;
import org.testng.annotations.Test;
public class HivePurgerExtractorTest {
private PurgeableHivePartitionDataset datasetMock = Mockito.mock(PurgeableHivePartitionDataset.class);
private HivePurgerExtractor extractorMock = Mockito.mock(HivePurgerExtractor.class);
@BeforeTest
public void initialize()
throws IOException {
Mockito.doCallRealMethod().when(this.extractorMock).getSchema();
Mockito.doCallRealMethod().when(this.extractorMock).readRecord(null);
Mockito.doCallRealMethod().when(this.extractorMock).getExpectedRecordCount();
Mockito.doCallRealMethod().when(this.extractorMock).setRecord(this.datasetMock);
this.extractorMock.setRecord(this.datasetMock);
}
@Test
public void getSchemaTest() {
Assert.assertNotNull(this.extractorMock.getSchema());
}
@Test
public void readRecordTest()
throws IOException {
Assert.assertNotNull(this.extractorMock.readRecord(null));
Assert.assertNull(this.extractorMock.readRecord(null));
Assert.assertEquals(1, this.extractorMock.getExpectedRecordCount());
}
}
| 3,155 |
0 | Create_ds/gobblin/gobblin-modules/gobblin-compliance/src/test/java/org/apache/gobblin/compliance | Create_ds/gobblin/gobblin-modules/gobblin-compliance/src/test/java/org/apache/gobblin/compliance/purger/HivePurgerWriterTest.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.compliance.purger;
import java.io.IOException;
import org.mockito.Mockito;
import org.testng.annotations.Test;
@Test
public class HivePurgerWriterTest {
private PurgeableHivePartitionDataset datasetMock = Mockito.mock(PurgeableHivePartitionDataset.class);
private HivePurgerWriter writerMock = Mockito.mock(HivePurgerWriter.class);
public void purgeTest()
throws IOException {
Mockito.doCallRealMethod().when(this.writerMock).write(this.datasetMock);
this.writerMock.write(this.datasetMock);
Mockito.verify(this.datasetMock).purge();
}
}
| 3,156 |
0 | Create_ds/gobblin/gobblin-modules/gobblin-compliance/src/test/java/org/apache/gobblin/compliance | Create_ds/gobblin/gobblin-modules/gobblin-compliance/src/test/java/org/apache/gobblin/compliance/purger/HivePurgerConverterTest.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.compliance.purger;
import org.mockito.Mockito;
import org.testng.Assert;
import org.testng.annotations.BeforeTest;
import org.testng.annotations.Test;
import org.apache.gobblin.configuration.WorkUnitState;
import static org.mockito.Mockito.anyList;
@Test
public class HivePurgerConverterTest {
private WorkUnitState stateMock = Mockito.mock(WorkUnitState.class);
private PurgeableHivePartitionDatasetSchema schemaMock = Mockito.mock(PurgeableHivePartitionDatasetSchema.class);
private HivePurgerConverter hivePurgerConverterMock = Mockito.mock(HivePurgerConverter.class);
private PurgeableHivePartitionDataset datasetMock = Mockito.mock(PurgeableHivePartitionDataset.class);
@BeforeTest
public void initialize() {
Mockito.doCallRealMethod().when(this.hivePurgerConverterMock).convertSchema(this.schemaMock, this.stateMock);
Mockito.doCallRealMethod().when(this.hivePurgerConverterMock)
.convertRecord(this.schemaMock, this.datasetMock, this.stateMock);
}
public void convertSchemaTest() {
Assert.assertNotNull(this.hivePurgerConverterMock.convertSchema(this.schemaMock, this.stateMock));
}
public void convertRecordTest() {
this.hivePurgerConverterMock.convertRecord(this.schemaMock, this.datasetMock, this.stateMock);
Mockito.verify(this.datasetMock).setPurgeQueries(anyList());
}
}
| 3,157 |
0 | Create_ds/gobblin/gobblin-modules/gobblin-compliance/src/main/java/org/apache/gobblin | Create_ds/gobblin/gobblin-modules/gobblin-compliance/src/main/java/org/apache/gobblin/compliance/ComplianceJob.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.compliance;
import java.io.Closeable;
import java.io.IOException;
import java.util.List;
import java.util.Properties;
import java.util.concurrent.CountDownLatch;
import java.util.concurrent.ExecutorService;
import org.slf4j.Logger;
import com.google.common.base.Optional;
import com.google.common.collect.Lists;
import com.google.common.io.Closer;
import com.google.common.util.concurrent.ListeningExecutorService;
import com.google.common.util.concurrent.MoreExecutors;
import org.apache.gobblin.configuration.State;
import org.apache.gobblin.dataset.DatasetsFinder;
import org.apache.gobblin.instrumented.Instrumentable;
import org.apache.gobblin.instrumented.Instrumented;
import org.apache.gobblin.metrics.GobblinMetrics;
import org.apache.gobblin.metrics.MetricContext;
import org.apache.gobblin.metrics.Tag;
import org.apache.gobblin.metrics.event.EventSubmitter;
import org.apache.gobblin.util.AzkabanTags;
import org.apache.gobblin.util.ExecutorsUtils;
import org.apache.gobblin.util.executors.ScalingThreadPoolExecutor;
/**
* Abstract class for the compliance jobs.
*
* @author adsharma
*/
public abstract class ComplianceJob implements Closeable, Instrumentable {
protected Properties properties;
protected Optional<CountDownLatch> finishCleanSignal;
protected final ListeningExecutorService service;
protected DatasetsFinder finder;
protected final List<Throwable> throwables;
protected final Closer closer;
protected final boolean isMetricEnabled;
protected MetricContext metricContext;
protected final EventSubmitter eventSubmitter;
public ComplianceJob(Properties properties) {
this.properties = properties;
ExecutorService executor = ScalingThreadPoolExecutor.newScalingThreadPool(0,
Integer.parseInt(properties.getProperty(ComplianceConfigurationKeys.MAX_CONCURRENT_DATASETS, ComplianceConfigurationKeys.DEFAULT_MAX_CONCURRENT_DATASETS)), 100,
ExecutorsUtils.newThreadFactory(Optional.<Logger>absent(), Optional.of("complaince-job-pool-%d")));
this.service = MoreExecutors.listeningDecorator(executor);
this.closer = Closer.create();
List<Tag<?>> tags = Lists.newArrayList();
tags.addAll(Tag.fromMap(AzkabanTags.getAzkabanTags()));
this.metricContext =
this.closer.register(Instrumented.getMetricContext(new State(properties), ComplianceJob.class, tags));
this.isMetricEnabled = GobblinMetrics.isEnabled(properties);
this.eventSubmitter = new EventSubmitter.Builder(this.metricContext, ComplianceEvents.NAMESPACE).build();
this.throwables = Lists.newArrayList();
}
public abstract void run()
throws IOException;
@Override
public MetricContext getMetricContext() {
return this.metricContext;
}
@Override
public boolean isInstrumentationEnabled() {
return this.isMetricEnabled;
}
/** Default with no additional tags */
@Override
public List<Tag<?>> generateTags(State state) {
return Lists.newArrayList();
}
@Override
public void switchMetricContext(List<Tag<?>> tags) {
this.metricContext = this.closer
.register(Instrumented.newContextFromReferenceContext(this.metricContext, tags, Optional.<String>absent()));
this.regenerateMetrics();
}
@Override
public void switchMetricContext(MetricContext context) {
this.metricContext = context;
this.regenerateMetrics();
}
/**
* Generates metrics for the instrumentation of this class.
*/
protected abstract void regenerateMetrics();
}
| 3,158 |
0 | Create_ds/gobblin/gobblin-modules/gobblin-compliance/src/main/java/org/apache/gobblin | Create_ds/gobblin/gobblin-modules/gobblin-compliance/src/main/java/org/apache/gobblin/compliance/DatasetDescriptorImpl.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.compliance;
import java.util.List;
import com.google.common.base.Optional;
import com.google.common.base.Preconditions;
import com.google.common.base.Splitter;
import com.google.common.base.Throwables;
import com.google.gson.JsonObject;
import com.google.gson.JsonParseException;
import com.google.gson.JsonParser;
import lombok.extern.slf4j.Slf4j;
/**
* This is the default implementation of {@link DatasetDescriptor}
*
* Descriptor is the JsonString corresponding to the value of dataset.descriptor.
* ComplianceFieldPath is the path to the complianceField in the dataset.descriptor.
* ComplianceFieldPath must not contain array element as it is not supported.
*
* @author adsharma
*/
@Slf4j
public class DatasetDescriptorImpl extends DatasetDescriptor {
private static final Splitter DOT_SPLITTER = Splitter.on(".").omitEmptyStrings().trimResults();
private String complianceField;
public DatasetDescriptorImpl(String descriptor, Optional<String> complianceFieldPath) {
super(descriptor, complianceFieldPath);
setComplianceField();
}
private void setComplianceField() {
Preconditions.checkArgument(this.complianceFieldPath.isPresent());
try {
JsonObject descriptorObject = new JsonParser().parse(this.descriptor).getAsJsonObject();
List<String> list = DOT_SPLITTER.splitToList(this.complianceFieldPath.get());
for (int i = 0; i < list.size() - 1; i++) {
descriptorObject = descriptorObject.getAsJsonObject(list.get(i));
}
this.complianceField = descriptorObject.get(list.get(list.size() - 1)).getAsString();
} catch (JsonParseException | NullPointerException e) {
log.warn("Compliance field not found at path " + this.complianceFieldPath.get() + " in the descriptor "
+ this.descriptor);
Throwables.propagate(e);
}
}
@Override
public String getComplianceField() {
return this.complianceField;
}
}
| 3,159 |
0 | Create_ds/gobblin/gobblin-modules/gobblin-compliance/src/main/java/org/apache/gobblin | Create_ds/gobblin/gobblin-modules/gobblin-compliance/src/main/java/org/apache/gobblin/compliance/QueryExecutor.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.compliance;
import java.io.IOException;
import java.sql.SQLException;
import java.util.List;
/**
* @author adsharma
*/
public interface QueryExecutor {
void executeQuery(String query)
throws IOException, SQLException;
void executeQueries(List<String> query)
throws IOException, SQLException;
}
| 3,160 |
0 | Create_ds/gobblin/gobblin-modules/gobblin-compliance/src/main/java/org/apache/gobblin | Create_ds/gobblin/gobblin-modules/gobblin-compliance/src/main/java/org/apache/gobblin/compliance/HivePartitionVersion.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.compliance;
import org.apache.hadoop.hive.ql.metadata.Partition;
import org.apache.gobblin.data.management.version.DatasetVersion;
/**
* A class to represent version of a {@link HivePartitionDataset}
*
* @author adsharma
*/
public abstract class HivePartitionVersion extends HivePartitionDataset implements DatasetVersion, Comparable<HivePartitionVersion> {
public HivePartitionVersion(Partition partition) {
super(partition);
}
public HivePartitionVersion(HivePartitionDataset hivePartitionDataset) {
super(hivePartitionDataset);
}
@Override
public String getVersion() {
return datasetURN();
}
@Override
public abstract int compareTo(HivePartitionVersion hivePartitionDatasetVersion);
}
| 3,161 |
0 | Create_ds/gobblin/gobblin-modules/gobblin-compliance/src/main/java/org/apache/gobblin | Create_ds/gobblin/gobblin-modules/gobblin-compliance/src/main/java/org/apache/gobblin/compliance/ComplianceEvents.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.compliance;
/**
* Class for event namespaces for gobblin-compliance
*
* @author adsharma
*/
public class ComplianceEvents {
public static final String NAMESPACE = "gobblin.compliance";
public static final String DATASET_URN_METADATA_KEY = "datasetUrn";
// Value for this key will be a stacktrace of any exception caused
public static final String FAILURE_CONTEXT_METADATA_KEY = "failureContext";
public static class Retention {
public static final String FAILED_EVENT_NAME = "RetentionFailed";
}
public static class Validation {
public static final String FAILED_EVENT_NAME = "ValidationFailed";
}
public static class Restore {
public static final String FAILED_EVENT_NAME = "RestoreFailed";
}
public static class Purger {
public static final String WORKUNIT_GENERATED = "WorkUnitGenerated";
public static final String WORKUNIT_COMMITTED = "WorkUnitCommitted";
public static final String WORKUNIT_FAILED = "WorkUnitFailed";
public static final String CYCLE_COMPLETED = "PurgeCycleCompleted";
}
}
| 3,162 |
0 | Create_ds/gobblin/gobblin-modules/gobblin-compliance/src/main/java/org/apache/gobblin | Create_ds/gobblin/gobblin-modules/gobblin-compliance/src/main/java/org/apache/gobblin/compliance/HiveProxyQueryExecutor.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.compliance;
import java.io.Closeable;
import java.io.IOException;
import java.security.PrivilegedExceptionAction;
import java.sql.DriverManager;
import java.sql.SQLException;
import java.sql.Statement;
import java.util.ArrayList;
import java.util.Collections;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import org.apache.hadoop.hive.metastore.api.MetaException;
import org.apache.hadoop.security.UserGroupInformation;
import org.apache.hive.jdbc.HiveConnection;
import org.apache.thrift.TException;
import com.google.common.base.Optional;
import com.google.common.base.Preconditions;
import com.google.common.base.Splitter;
import edu.umd.cs.findbugs.annotations.SuppressWarnings;
import lombok.extern.slf4j.Slf4j;
import org.apache.gobblin.configuration.ConfigurationKeys;
import org.apache.gobblin.configuration.State;
import org.apache.gobblin.util.HostUtils;
/**
* This class is responsible for executing Hive queries using jdbc connection.
* This class can execute queries as hadoop proxy users by first authenticating hadoop super user.
*
* @author adsharma
*/
@Slf4j
@SuppressWarnings
public class HiveProxyQueryExecutor implements QueryExecutor, Closeable {
private static final String DEFAULT = "default";
private static final Splitter SC_SPLITTER = Splitter.on(";").omitEmptyStrings().trimResults();
private Map<String, HiveConnection> connectionMap = new HashMap<>();
private Map<String, Statement> statementMap = new HashMap<>();
private State state;
private List<String> settings = new ArrayList<>();
/**
* Instantiates a new Hive proxy query executor.
*
* @param state the state
* @param proxies the proxies
* @throws IOException the io exception
*/
public HiveProxyQueryExecutor(State state, List<String> proxies)
throws IOException {
try {
this.state = new State(state);
setHiveSettings(state);
if (proxies.isEmpty()) {
setConnection();
} else {
setProxiedConnection(proxies);
}
} catch (InterruptedException | TException | ClassNotFoundException | SQLException e) {
throw new IOException(e);
}
}
/**
* Instantiates a new Hive proxy query executor.
*
* @param state the state
* @throws IOException the io exception
*/
public HiveProxyQueryExecutor(State state)
throws IOException {
this(state, getProxiesFromState(state));
}
private static List<String> getProxiesFromState(State state) {
if (!state.getPropAsBoolean(ComplianceConfigurationKeys.GOBBLIN_COMPLIANCE_SHOULD_PROXY,
ComplianceConfigurationKeys.GOBBLIN_COMPLIANCE_DEFAULT_SHOULD_PROXY)) {
return Collections.emptyList();
}
Preconditions.checkArgument(state.contains(ComplianceConfigurationKeys.GOBBLIN_COMPLIANCE_PROXY_USER),
"Missing required property " + ComplianceConfigurationKeys.GOBBLIN_COMPLIANCE_PROXY_USER);
Preconditions.checkArgument(state.contains(ComplianceConfigurationKeys.GOBBLIN_COMPLIANCE_SUPER_USER),
"Missing required property " + ComplianceConfigurationKeys.GOBBLIN_COMPLIANCE_SUPER_USER);
List<String> proxies = new ArrayList<>();
proxies.add(state.getProp(ComplianceConfigurationKeys.GOBBLIN_COMPLIANCE_PROXY_USER));
proxies.add(state.getProp(ComplianceConfigurationKeys.GOBBLIN_COMPLIANCE_SUPER_USER));
return proxies;
}
private synchronized void setProxiedConnection(final List<String> proxies)
throws IOException, InterruptedException, TException {
Preconditions.checkArgument(this.state.contains(ConfigurationKeys.SUPER_USER_KEY_TAB_LOCATION),
"Missing required property " + ConfigurationKeys.SUPER_USER_KEY_TAB_LOCATION);
String superUser = this.state.getProp(ComplianceConfigurationKeys.GOBBLIN_COMPLIANCE_SUPER_USER);
String keytabLocation = this.state.getProp(ConfigurationKeys.SUPER_USER_KEY_TAB_LOCATION);
String realm = this.state.getProp(ConfigurationKeys.KERBEROS_REALM);
UserGroupInformation loginUser = UserGroupInformation
.loginUserFromKeytabAndReturnUGI(HostUtils.getPrincipalUsingHostname(superUser, realm), keytabLocation);
loginUser.doAs(new PrivilegedExceptionAction<Void>() {
@Override
public Void run()
throws MetaException, SQLException, ClassNotFoundException {
for (String proxy : proxies) {
HiveConnection hiveConnection = getHiveConnection(Optional.fromNullable(proxy));
Statement statement = hiveConnection.createStatement();
statementMap.put(proxy, statement);
connectionMap.put(proxy, hiveConnection);
for (String setting : settings) {
statement.execute(setting);
}
}
return null;
}
});
}
private synchronized void setConnection()
throws ClassNotFoundException, SQLException {
HiveConnection hiveConnection = getHiveConnection(Optional.<String>absent());
Statement statement = hiveConnection.createStatement();
this.statementMap.put(DEFAULT, statement);
this.connectionMap.put(DEFAULT, hiveConnection);
for (String setting : settings) {
statement.execute(setting);
}
}
private HiveConnection getHiveConnection(Optional<String> proxyUser)
throws ClassNotFoundException, SQLException {
Class.forName("org.apache.hive.jdbc.HiveDriver");
Preconditions.checkArgument(this.state.contains(ComplianceConfigurationKeys.HIVE_JDBC_URL), "Missing required property " + ComplianceConfigurationKeys.HIVE_JDBC_URL);
String url = this.state.getProp(ComplianceConfigurationKeys.HIVE_JDBC_URL);
if (proxyUser.isPresent()) {
url = url + ComplianceConfigurationKeys.HIVE_SERVER2_PROXY_USER + proxyUser.get();
}
return (HiveConnection) DriverManager.getConnection(url);
}
@Override
public void executeQueries(List<String> queries)
throws SQLException {
executeQueries(queries, Optional.<String>absent());
}
@Override
public void executeQuery(String query)
throws SQLException {
executeQuery(query, Optional.<String>absent());
}
/**
* Execute queries.
*
* @param queries the queries
* @param proxy the proxy
* @throws SQLException the sql exception
*/
public void executeQueries(List<String> queries, Optional<String> proxy)
throws SQLException {
Preconditions.checkArgument(!this.statementMap.isEmpty(), "No hive connection. Unable to execute queries");
if (!proxy.isPresent()) {
Preconditions.checkArgument(this.statementMap.size() == 1, "Multiple Hive connections. Please specify a user");
proxy = Optional.fromNullable(this.statementMap.keySet().iterator().next());
}
Statement statement = this.statementMap.get(proxy.get());
for (String query : queries) {
statement.execute(query);
}
}
/**
* Execute query.
*
* @param query the query
* @param proxy the proxy
* @throws SQLException the sql exception
*/
public void executeQuery(String query, Optional<String> proxy)
throws SQLException {
executeQueries(Collections.singletonList(query), proxy);
}
@Override
public void close()
throws IOException {
try {
for (Map.Entry<String, Statement> entry : this.statementMap.entrySet()) {
if (entry.getValue() != null) {
entry.getValue().close();
}
}
for (Map.Entry<String, HiveConnection> entry : this.connectionMap.entrySet()) {
if (entry.getValue() != null) {
entry.getValue().close();
}
}
} catch (SQLException e) {
throw new IOException(e);
}
}
private void setHiveSettings(State state) {
if (state.contains(ComplianceConfigurationKeys.HIVE_SETTINGS)) {
String queryString = state.getProp(ComplianceConfigurationKeys.HIVE_SETTINGS);
this.settings = SC_SPLITTER.splitToList(queryString);
}
}
}
| 3,163 |
0 | Create_ds/gobblin/gobblin-modules/gobblin-compliance/src/main/java/org/apache/gobblin | Create_ds/gobblin/gobblin-modules/gobblin-compliance/src/main/java/org/apache/gobblin/compliance/HivePartitionVersionPolicy.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.compliance;
import org.apache.gobblin.configuration.State;
/**
* Abstract class for selection policies corresponding to {@link HivePartitionVersion}
*
* @author adsharma
*/
public abstract class HivePartitionVersionPolicy implements Policy<HivePartitionVersion> {
protected State state;
protected HivePartitionDataset dataset;
public HivePartitionVersionPolicy(State state, HivePartitionDataset dataset) {
this.state = new State(state);
this.dataset = dataset;
}
}
| 3,164 |
0 | Create_ds/gobblin/gobblin-modules/gobblin-compliance/src/main/java/org/apache/gobblin | Create_ds/gobblin/gobblin-modules/gobblin-compliance/src/main/java/org/apache/gobblin/compliance/HivePartitionVersionFinder.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.compliance;
import java.io.IOException;
import java.security.PrivilegedExceptionAction;
import java.util.ArrayList;
import java.util.Collection;
import java.util.Collections;
import java.util.List;
import java.util.Properties;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.hive.metastore.IMetaStoreClient;
import org.apache.hadoop.hive.metastore.api.Table;
import org.apache.hadoop.hive.ql.metadata.Partition;
import org.apache.hadoop.security.UserGroupInformation;
import org.apache.thrift.TException;
import com.google.common.base.Optional;
import com.google.common.base.Preconditions;
import com.google.common.base.Splitter;
import lombok.extern.slf4j.Slf4j;
import org.apache.gobblin.compliance.retention.ComplianceRetentionJob;
import org.apache.gobblin.compliance.retention.HivePartitionRetentionVersion;
import org.apache.gobblin.configuration.State;
import org.apache.gobblin.data.management.copy.hive.HiveDataset;
import org.apache.gobblin.dataset.Dataset;
import org.apache.gobblin.util.AutoReturnableObject;
/**
* A version finder class to find {@link HivePartitionVersion}s.
*
* @author adsharma
*/
@Slf4j
public class HivePartitionVersionFinder implements org.apache.gobblin.data.management.version.finder.VersionFinder<HivePartitionVersion> {
protected final FileSystem fs;
protected final State state;
protected List<String> patterns;
private Optional<String> owner = Optional.absent();
private List<HivePartitionVersion> versions = new ArrayList<>();
private static final Object lock = new Object();
private static final Splitter At_SPLITTER = Splitter.on("@").omitEmptyStrings().trimResults();
public HivePartitionVersionFinder(FileSystem fs, State state, List<String> patterns) {
this.fs = fs;
this.state = new State(state);
this.patterns = patterns;
}
@Override
public Class<HivePartitionVersion> versionClass() {
return HivePartitionVersion.class;
}
/**
* Will find all the versions of the {@link HivePartitionDataset}.
*
* For a dataset with table name table1, corresponding versions table will be
* table1_backup_timestamp or table1_staging_timestamp or table1_trash_timestamp
*
* Based on pattern, a type of version will be selected eg. backup or trash or staging
*
* If a Hive version's table contain no Partitions, it will be dropped.
*/
@Override
public Collection<HivePartitionVersion> findDatasetVersions(Dataset dataset)
throws IOException {
List<HivePartitionVersion> versions = new ArrayList<>();
if (!(dataset instanceof HivePartitionDataset)) {
return versions;
}
HivePartitionDataset hivePartitionDataset = (HivePartitionDataset) dataset;
this.owner = hivePartitionDataset.getOwner();
Preconditions.checkArgument(!this.patterns.isEmpty(),
"No patterns to find versions for the dataset " + dataset.datasetURN());
versions
.addAll(findVersions(hivePartitionDataset.getName(), hivePartitionDataset.datasetURN()));
return versions;
}
private List<HivePartitionVersion> findVersions(String name, String urn)
throws IOException {
State state = new State(this.state);
Preconditions.checkArgument(this.state.contains(ComplianceConfigurationKeys.HIVE_VERSIONS_WHITELIST),
"Missing required property " + ComplianceConfigurationKeys.HIVE_VERSIONS_WHITELIST);
state.setProp(ComplianceConfigurationKeys.HIVE_DATASET_WHITELIST,
this.state.getProp(ComplianceConfigurationKeys.HIVE_VERSIONS_WHITELIST));
setVersions(name, state);
log.info("Found " + this.versions.size() + " versions for the dataset " + urn);
return this.versions;
}
private void addPartitionsToVersions(List<HivePartitionVersion> versions, String name,
List<Partition> partitions)
throws IOException {
for (Partition partition : partitions) {
if (partition.getName().equalsIgnoreCase(name)) {
versions.add(new HivePartitionRetentionVersion(partition));
}
}
}
private void setVersions(final String name, final State state)
throws IOException {
try {
UserGroupInformation loginUser = UserGroupInformation.getLoginUser();
loginUser.doAs(new PrivilegedExceptionAction<Void>() {
@Override
public Void run()
throws IOException {
synchronized (lock) {
List<Partition> partitions = null;
for (String tableName : ComplianceRetentionJob.tableNamesList) {
for (String pattern : patterns) {
if (tableName.contains(pattern)) {
partitions = getPartitions(tableName);
addPartitionsToVersions(versions, name, partitions);
}
}
}
}
return null;
}
});
} catch (InterruptedException | IOException e) {
throw new IOException(e);
}
}
private static List<Partition> getPartitions(String completeTableName) {
List<String> tableList = At_SPLITTER.splitToList(completeTableName);
if (tableList.size() != 2) {
log.warn("Invalid table name " + completeTableName);
return Collections.EMPTY_LIST;
}
try (AutoReturnableObject<IMetaStoreClient> client = ComplianceRetentionJob.pool.getClient()) {
Table table = client.get().getTable(tableList.get(0), tableList.get(1));
HiveDataset dataset = new HiveDataset(FileSystem.newInstance(new Configuration()), ComplianceRetentionJob.pool,
new org.apache.hadoop.hive.ql.metadata.Table(table), new Properties());
return dataset.getPartitionsFromDataset();
} catch (IOException | TException e) {
log.warn("Unable to get Partitions for table " + completeTableName + " " + e.getMessage());
}
return Collections.EMPTY_LIST;
}
}
| 3,165 |
0 | Create_ds/gobblin/gobblin-modules/gobblin-compliance/src/main/java/org/apache/gobblin | Create_ds/gobblin/gobblin-modules/gobblin-compliance/src/main/java/org/apache/gobblin/compliance/DatasetDescriptor.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.compliance;
import com.google.common.base.Optional;
import com.google.common.base.Throwables;
import com.google.gson.JsonParseException;
import com.google.gson.JsonParser;
import lombok.extern.slf4j.Slf4j;
/**
* Each Hive Dataset using gobblin-compliance for their compliance needs, must contain a dataset.descriptor property
* in the tblproperties of a Hive Dataset.
*
* A dataset.descriptor is a description of a Hive dataset in the Json format.
*
* A compliance field is a column name in a Hive dataset to decide which records should be purged.
*
* A dataset.descriptor must contain an identifier whose value corresponds the column name containing compliance id.
*
* Path to the identifier must be specified in the job properties file
* via property dataset.descriptor.identifier.
*
* Example : dataset.descriptor = {Database : Repos, Owner : GitHub, ComplianceInfo : {IdentifierType : GitHubId}}
* If IdentifierType corresponds to the identifier and GithubId is the compliance field, then
* dataset.descriptor.identifier = ComplianceInfo.IdentifierType
*
* @author adsharma
*/
@Slf4j
public abstract class DatasetDescriptor {
protected String descriptor;
protected Optional<String> complianceFieldPath;
public DatasetDescriptor(String descriptor, Optional<String> complianceFieldPath) {
checkValidJsonStr(descriptor);
this.descriptor = descriptor;
this.complianceFieldPath = complianceFieldPath;
}
protected void checkValidJsonStr(String jsonStr) {
try {
new JsonParser().parse(jsonStr);
} catch (JsonParseException e) {
log.warn("Not a valid JSON String : " + jsonStr);
Throwables.propagate(e);
}
}
public abstract String getComplianceField();
}
| 3,166 |
0 | Create_ds/gobblin/gobblin-modules/gobblin-compliance/src/main/java/org/apache/gobblin | Create_ds/gobblin/gobblin-modules/gobblin-compliance/src/main/java/org/apache/gobblin/compliance/HivePartitionFinder.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.compliance;
import java.io.IOException;
import java.util.ArrayList;
import java.util.List;
import java.util.Properties;
import org.apache.commons.lang.NotImplementedException;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hive.metastore.IMetaStoreClient;
import org.apache.hadoop.hive.ql.metadata.HiveException;
import org.apache.hadoop.hive.ql.metadata.Partition;
import org.apache.hadoop.hive.ql.metadata.Table;
import org.apache.thrift.TException;
import com.google.common.base.Optional;
import com.google.common.base.Preconditions;
import com.google.common.base.Splitter;
import lombok.extern.slf4j.Slf4j;
import org.apache.gobblin.configuration.State;
import org.apache.gobblin.data.management.copy.hive.HiveDataset;
import org.apache.gobblin.data.management.copy.hive.HiveDatasetFinder;
import org.apache.gobblin.dataset.DatasetsFinder;
import org.apache.gobblin.hive.HiveMetastoreClientPool;
import org.apache.gobblin.util.AutoReturnableObject;
import org.apache.gobblin.util.WriterUtils;
import org.apache.gobblin.util.reflection.GobblinConstructorUtils;
/**
* A finder class to find {@link HivePartitionDataset}s.
*
* @author adsharma
*/
@Slf4j
public class HivePartitionFinder implements DatasetsFinder<HivePartitionDataset> {
protected List<HiveDataset> hiveDatasets;
protected State state;
private static final Splitter AT_SPLITTER = Splitter.on("@").omitEmptyStrings().trimResults();
private static Optional<HiveMetastoreClientPool> pool = Optional.<HiveMetastoreClientPool>absent();
private static final Object lock = new Object();
public HivePartitionFinder(State state)
throws IOException {
this.state = new State(state);
this.hiveDatasets = getHiveDatasets(WriterUtils.getWriterFs(this.state), this.state);
}
private static List<HiveDataset> getHiveDatasets(FileSystem fs, State state)
throws IOException {
Preconditions.checkArgument(state.contains(ComplianceConfigurationKeys.COMPLIANCE_DATASET_WHITELIST),
"Missing required property " + ComplianceConfigurationKeys.COMPLIANCE_DATASET_WHITELIST);
Properties prop = new Properties();
prop.setProperty(ComplianceConfigurationKeys.HIVE_DATASET_WHITELIST,
state.getProp(ComplianceConfigurationKeys.COMPLIANCE_DATASET_WHITELIST));
HiveDatasetFinder finder = new HiveDatasetFinder(fs, prop);
return finder.findDatasets();
}
/**
* Will find all datasets according to whitelist, except the backup, trash and staging tables.
*/
@Override
public List<HivePartitionDataset> findDatasets()
throws IOException {
List<HivePartitionDataset> list = new ArrayList<>();
for (HiveDataset hiveDataset : this.hiveDatasets) {
for (Partition partition : hiveDataset.getPartitionsFromDataset()) {
list.add(new HivePartitionDataset(partition));
}
}
String selectionPolicyString = this.state.getProp(ComplianceConfigurationKeys.DATASET_SELECTION_POLICY_CLASS,
ComplianceConfigurationKeys.DEFAULT_DATASET_SELECTION_POLICY_CLASS);
Policy<HivePartitionDataset> selectionPolicy =
GobblinConstructorUtils.invokeConstructor(Policy.class, selectionPolicyString);
return selectionPolicy.selectedList(list);
}
public static HivePartitionDataset findDataset(String completePartitionName, State prop)
throws IOException {
synchronized (lock) {
List<String> partitionList = AT_SPLITTER.splitToList(completePartitionName);
Preconditions.checkArgument(partitionList.size() == 3, "Invalid partition name");
if (!pool.isPresent()) {
pool = Optional.of(HiveMetastoreClientPool.get(new Properties(),
Optional.fromNullable(new Properties().getProperty(HiveDatasetFinder.HIVE_METASTORE_URI_KEY))));
}
try (AutoReturnableObject<IMetaStoreClient> client = pool.get().getClient()) {
Table table = new Table(client.get().getTable(partitionList.get(0), partitionList.get(1)));
Partition partition = new Partition(table,
client.get().getPartition(partitionList.get(0), partitionList.get(1), partitionList.get(2)));
return new HivePartitionDataset(partition);
} catch (TException | HiveException e) {
throw new IOException(e);
}
}
}
@Override
public Path commonDatasetRoot() {
// Not implemented by this method
throw new NotImplementedException();
}
}
| 3,167 |
0 | Create_ds/gobblin/gobblin-modules/gobblin-compliance/src/main/java/org/apache/gobblin | Create_ds/gobblin/gobblin-modules/gobblin-compliance/src/main/java/org/apache/gobblin/compliance/ComplianceConfigurationKeys.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.compliance;
/**
* Class containing keys for the configuration properties needed for gobblin-compliance
*
* @author adsharma
*/
public class ComplianceConfigurationKeys {
public static final String COMPLIANCE_PREFIX = "gobblin.compliance";
public static final String TRASH = "_trash_";
public static final String BACKUP = "_backup_";
public static final String STAGING = "_staging_";
public static final String EXTERNAL = "EXTERNAL";
public static final String TIMESTAMP = COMPLIANCE_PREFIX + ".job.timestamp";
public static final String HIVE_DATASET_WHITELIST = "hive.dataset.whitelist";
public static final String COMPLIANCE_DATASET_WHITELIST = COMPLIANCE_PREFIX + ".dataset.whitelist";
public static final String PARTITION_NAME = COMPLIANCE_PREFIX + ".partition.name";
public static final int TIME_STAMP_LENGTH = 13;
public static final String DBNAME_SEPARATOR = "__";
public static final String SPECIFY_PARTITION_FORMAT = COMPLIANCE_PREFIX + ".specifyPartitionFormat";
public static final boolean DEFAULT_SPECIFY_PARTITION_FORMAT = false;
public static final String NUM_ROWS = "numRows";
public static final String RAW_DATA_SIZE = "rawDataSize";
public static final String TOTAL_SIZE = "totalSize";
public static final int DEFAULT_NUM_ROWS = -1;
public static final int DEFAULT_RAW_DATA_SIZE = -1;
public static final int DEFAULT_TOTAL_SIZE = -1;
public static final String WORKUNIT_RECORDSREAD = COMPLIANCE_PREFIX + ".workunit.recordsRead";
public static final String WORKUNIT_RECORDSWRITTEN = COMPLIANCE_PREFIX + ".workunit.recordsWritten";
public static final String WORKUNIT_BYTESREAD = COMPLIANCE_PREFIX + ".workunit.bytesRead";
public static final String WORKUNIT_BYTESWRITTEN = COMPLIANCE_PREFIX + ".workunit.bytesWritten";
public static final String EXECUTION_COUNT = COMPLIANCE_PREFIX + ".workunit.executionCount";
public static final String TOTAL_EXECUTIONS = COMPLIANCE_PREFIX + ".workunit.totalExecutions";
public static final int DEFAULT_EXECUTION_COUNT = 1;
public static final String MAX_CONCURRENT_DATASETS = COMPLIANCE_PREFIX + ".max.concurrent.datasets";
public static final String DEFAULT_MAX_CONCURRENT_DATASETS = "100";
public static final String HIVE_SERVER2_PROXY_USER = "hive.server2.proxy.user=";
public static final String HIVE_JDBC_URL = COMPLIANCE_PREFIX + ".hive.jdbc.url";
public static final String HIVE_SETTINGS = COMPLIANCE_PREFIX + ".hive.settings";
public static final String REAPER_RETENTION_DAYS = COMPLIANCE_PREFIX + ".reaper.retention.days";
public static final String CLEANER_BACKUP_RETENTION_VERSIONS =
COMPLIANCE_PREFIX + ".cleaner.backup.retention.versions";
public static final String CLEANER_BACKUP_RETENTION_DAYS = COMPLIANCE_PREFIX + ".cleaner.backup.retention.days";
public static final String CLEANER_TRASH_RETENTION_DAYS = COMPLIANCE_PREFIX + ".cleaner.trash.retention.days";
public static final String BACKUP_DB = COMPLIANCE_PREFIX + ".backup.db";
public static final String BACKUP_DIR = COMPLIANCE_PREFIX + ".backup.dir";
public static final String BACKUP_OWNER = COMPLIANCE_PREFIX + ".backup.owner";
public static final String TRASH_DB = COMPLIANCE_PREFIX + ".trash.db";
public static final String TRASH_DIR = COMPLIANCE_PREFIX + ".trash.dir";
public static final String TRASH_OWNER = COMPLIANCE_PREFIX + ".trash.owner";
public static final String HIVE_VERSIONS_WHITELIST = COMPLIANCE_PREFIX + ".hive.versions.whitelist";
public static final String SHOULD_DROP_EMPTY_TABLES = COMPLIANCE_PREFIX + ".drop.empty.tables";
public static final String DEFAULT_SHOULD_DROP_EMPTY_TABLES = "false";
public static final String COMPLIANCE_JOB_SIMULATE = COMPLIANCE_PREFIX + ".simulate";
public static final boolean DEFAULT_COMPLIANCE_JOB_SIMULATE = false;
public static final String GOBBLIN_COMPLIANCE_JOB_CLASS = COMPLIANCE_PREFIX + ".job.class";
public static final String GOBBLIN_COMPLIANCE_DATASET_FINDER_CLASS = COMPLIANCE_PREFIX + ".dataset.finder.class";
public static final String GOBBLIN_COMPLIANCE_PROXY_USER = COMPLIANCE_PREFIX + ".proxy.user";
public static final boolean GOBBLIN_COMPLIANCE_DEFAULT_SHOULD_PROXY = false;
public static final String GOBBLIN_COMPLIANCE_SHOULD_PROXY = COMPLIANCE_PREFIX + ".should.proxy";
public static final String GOBBLIN_COMPLIANCE_SUPER_USER = COMPLIANCE_PREFIX + ".super.user";
public static final String RETENTION_VERSION_FINDER_CLASS_KEY = COMPLIANCE_PREFIX + ".retention.version.finder.class";
public static final String RETENTION_SELECTION_POLICY_CLASS_KEY =
COMPLIANCE_PREFIX + ".retention.selection.policy.class";
public static final String DATASET_SELECTION_POLICY_CLASS = COMPLIANCE_PREFIX + ".dataset.selection.policy.class";
public static final String DEFAULT_DATASET_SELECTION_POLICY_CLASS = "org.apache.gobblin.compliance.HivePartitionDatasetPolicy";
public static final String PURGER_COMMIT_POLICY_CLASS = COMPLIANCE_PREFIX + ".purger.commit.policy.class";
public static final String DEFAULT_PURGER_COMMIT_POLICY_CLASS = "org.apache.gobblin.compliance.purger.HivePurgerCommitPolicy";
public static final String RETENTION_VERSION_CLEANER_CLASS_KEY =
COMPLIANCE_PREFIX + ".retention.version.cleaner.class";
public static final String RESTORE_DATASET = COMPLIANCE_PREFIX + ".restore.dataset";
public static final String RESTORE_POLICY_CLASS = COMPLIANCE_PREFIX + ".restore.policy.class";
public static final String DATASET_TO_RESTORE = COMPLIANCE_PREFIX + ".dataset.to.restore";
public static final String PURGE_POLICY_CLASS = COMPLIANCE_PREFIX + ".purge.policy.class";
public static final String VALIDATION_POLICY_CLASS = COMPLIANCE_PREFIX + ".validation.policy.class";
// Name of the column in the compliance id table containing compliance id.
public static final String COMPLIANCEID_KEY = COMPLIANCE_PREFIX + ".purger.complianceId";
// Path to the compliance id in the dataset descriptor json object.
public static final String DATASET_DESCRIPTOR_FIELDPATH = COMPLIANCE_PREFIX + ".dataset.descriptor.fieldPath";
public static final String DATASET_DESCRIPTOR_CLASS = COMPLIANCE_PREFIX + ".dataset.descriptor.class";
public static final String DEFAULT_DATASET_DESCRIPTOR_CLASS = "org.apache.gobblin.compliance.DatasetDescriptorImpl";
// Name of the table containing the compliance ids based on which purging will take place.
public static final String COMPLIANCE_ID_TABLE_KEY = COMPLIANCE_PREFIX + ".purger.complianceIdTable";
/**
* Configuration keys for the dataset descriptor which will be present in tblproperties of a Hive table.
*/
public static final String DATASET_DESCRIPTOR_KEY = "dataset.descriptor";
public static final String HIVE_PURGER_WATERMARK = COMPLIANCE_PREFIX + ".purger.watermark";
public static final String NO_PREVIOUS_WATERMARK = COMPLIANCE_PREFIX + ".purger.noWatermark";
public static final String MAX_WORKUNITS_KEY = COMPLIANCE_PREFIX + ".purger.maxWorkunits";
/**
* Configuration keys for the execution attempts of a work unit.
*/
public static final String EXECUTION_ATTEMPTS = COMPLIANCE_PREFIX + ".workunits.executionAttempts";
public static final String MAX_WORKUNIT_EXECUTION_ATTEMPTS_KEY =
COMPLIANCE_PREFIX + ".workunits.maxExecutionAttempts";
public static final int DEFAULT_EXECUTION_ATTEMPTS = 1;
public static final int DEFAULT_MAX_WORKUNIT_EXECUTION_ATTEMPTS = 3;
public static final int DEFAULT_MAX_WORKUNITS = 5;
}
| 3,168 |
0 | Create_ds/gobblin/gobblin-modules/gobblin-compliance/src/main/java/org/apache/gobblin | Create_ds/gobblin/gobblin-modules/gobblin-compliance/src/main/java/org/apache/gobblin/compliance/HivePartitionDataset.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.compliance;
import java.util.List;
import java.util.Map;
import java.util.Properties;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hive.metastore.api.FieldSchema;
import org.apache.hadoop.hive.ql.metadata.Partition;
import com.google.common.base.Optional;
import org.apache.gobblin.dataset.Dataset;
import org.apache.gobblin.hive.HiveSerDeWrapper;
/**
* A class to represent Hive Partition Dataset object used for gobblin-compliance
*
* @author adsharma
*/
public class HivePartitionDataset implements Dataset {
private Partition hivePartition;
public HivePartitionDataset(Partition partition) {
this.hivePartition = partition;
}
public HivePartitionDataset(HivePartitionDataset hivePartitionDataset) {
this.hivePartition = hivePartitionDataset.hivePartition;
}
/**
* Will return complete partition name i.e. dbName@tableName@partitionName
*/
@Override
public String datasetURN() {
return this.hivePartition.getCompleteName();
}
public Path getLocation() {
return this.hivePartition.getDataLocation();
}
public Path getTableLocation() {
return this.hivePartition.getTable().getDataLocation();
}
public String getTableName() {
return this.hivePartition.getTable().getTableName();
}
public String getDbName() {
return this.hivePartition.getTable().getDbName();
}
public String getName() {
return this.hivePartition.getName();
}
public Map<String, String> getSpec() {
return this.hivePartition.getSpec();
}
public Map<String, String> getTableParams() {
return this.hivePartition.getTable().getParameters();
}
public Map<String, String> getParams() {
return this.hivePartition.getParameters();
}
public Properties getTableMetadata() {
return this.hivePartition.getTable().getMetadata();
}
public List<FieldSchema> getCols() {
return this.hivePartition.getTable().getCols();
}
public Optional<String> getFileFormat() {
String serdeLib = this.hivePartition.getTPartition().getSd().getSerdeInfo().getSerializationLib();
for (HiveSerDeWrapper.BuiltInHiveSerDe hiveSerDe : HiveSerDeWrapper.BuiltInHiveSerDe.values()) {
if (hiveSerDe.toString().equalsIgnoreCase(serdeLib)) {
return Optional.fromNullable(hiveSerDe.name());
}
}
return Optional.<String>absent();
}
/**
* @return the owner of the corresponding hive table
*/
public Optional<String> getOwner() {
return Optional.fromNullable(this.hivePartition.getTable().getOwner());
}
}
| 3,169 |
0 | Create_ds/gobblin/gobblin-modules/gobblin-compliance/src/main/java/org/apache/gobblin | Create_ds/gobblin/gobblin-modules/gobblin-compliance/src/main/java/org/apache/gobblin/compliance/HivePartitionDatasetPolicy.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.compliance;
import java.util.ArrayList;
import java.util.List;
/**
* This policy checks if a Hive Partition is a valid dataset.
* Hive table to which Hive partition belongs must be an external table.
* Hive table name must not contain "_trash_", "_staging_" and "_backup_"
* in it's name since they will considered as {@link HivePartitionVersion} rather
* than {@link HivePartitionDataset}
*/
public class HivePartitionDatasetPolicy implements Policy<HivePartitionDataset> {
@Override
public boolean shouldSelect(HivePartitionDataset dataset) {
if (dataset.getTableName().contains(ComplianceConfigurationKeys.TRASH)) {
return false;
} else if (dataset.getTableName().contains(ComplianceConfigurationKeys.BACKUP)) {
return false;
} else if (dataset.getTableName().contains(ComplianceConfigurationKeys.STAGING)) {
return false;
} else {
return dataset.getTableMetadata().containsKey(ComplianceConfigurationKeys.EXTERNAL);
}
}
@Override
public List<HivePartitionDataset> selectedList(List<HivePartitionDataset> datasets) {
List<HivePartitionDataset> selectedDatasetList = new ArrayList<>();
for (HivePartitionDataset dataset : datasets) {
if (shouldSelect(dataset)) {
selectedDatasetList.add(dataset);
}
}
return selectedDatasetList;
}
}
| 3,170 |
0 | Create_ds/gobblin/gobblin-modules/gobblin-compliance/src/main/java/org/apache/gobblin | Create_ds/gobblin/gobblin-modules/gobblin-compliance/src/main/java/org/apache/gobblin/compliance/Policy.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.compliance;
import java.util.List;
public interface Policy<T> {
boolean shouldSelect(T dataset);
List<T> selectedList(List<T> datasets);
}
| 3,171 |
0 | Create_ds/gobblin/gobblin-modules/gobblin-compliance/src/main/java/org/apache/gobblin/compliance | Create_ds/gobblin/gobblin-modules/gobblin-compliance/src/main/java/org/apache/gobblin/compliance/azkaban/ComplianceAzkabanJob.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.compliance.azkaban;
import java.io.IOException;
import java.util.Properties;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.util.Tool;
import org.apache.hadoop.util.ToolRunner;
import org.apache.log4j.Logger;
import com.google.common.base.Preconditions;
import azkaban.jobExecutor.AbstractJob;
import azkaban.utils.Props;
import org.apache.gobblin.compliance.ComplianceJob;
import org.apache.gobblin.util.reflection.GobblinConstructorUtils;
import static org.apache.gobblin.compliance.ComplianceConfigurationKeys.GOBBLIN_COMPLIANCE_JOB_CLASS;
/**
* Class to run compliance job on Azkaban.
* A property gobblin.compliance.job.class needs to be passed and the appropriate compliance job will run.
*
* @author adsharma
*/
public class ComplianceAzkabanJob extends AbstractJob implements Tool {
private Configuration conf;
private ComplianceJob complianceJob;
public static void main(String[] args)
throws Exception {
ToolRunner.run(new ComplianceAzkabanJob(ComplianceAzkabanJob.class.getName()), args);
}
public ComplianceAzkabanJob(String id)
throws Exception {
super(id, Logger.getLogger(ComplianceAzkabanJob.class));
}
public ComplianceAzkabanJob(String id, Props props)
throws IOException {
super(id, Logger.getLogger(ComplianceAzkabanJob.class));
this.conf = new Configuration();
// new prop
Properties properties = props.toProperties();
Preconditions.checkArgument(properties.containsKey(GOBBLIN_COMPLIANCE_JOB_CLASS),
"Missing required property " + GOBBLIN_COMPLIANCE_JOB_CLASS);
String complianceJobClass = properties.getProperty(GOBBLIN_COMPLIANCE_JOB_CLASS);
this.complianceJob = GobblinConstructorUtils.invokeConstructor(ComplianceJob.class, complianceJobClass, properties);
}
@Override
public void run()
throws Exception {
if (this.complianceJob != null) {
this.complianceJob.run();
}
}
@Override
public int run(String[] args)
throws Exception {
if (args.length < 1) {
System.out.println("Must provide properties file as first argument.");
return 1;
}
Props props = new Props(null, args[0]);
new ComplianceAzkabanJob(ComplianceAzkabanJob.class.getName(), props).run();
return 0;
}
@Override
public void setConf(Configuration configuration) {
this.conf = configuration;
}
@Override
public Configuration getConf() {
return this.conf;
}
}
| 3,172 |
0 | Create_ds/gobblin/gobblin-modules/gobblin-compliance/src/main/java/org/apache/gobblin/compliance | Create_ds/gobblin/gobblin-modules/gobblin-compliance/src/main/java/org/apache/gobblin/compliance/retention/ComplianceRetentionJob.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.compliance.retention;
import java.io.IOException;
import java.sql.SQLException;
import java.util.ArrayList;
import java.util.Iterator;
import java.util.List;
import java.util.Properties;
import java.util.concurrent.Callable;
import java.util.concurrent.CountDownLatch;
import org.apache.commons.lang.exception.ExceptionUtils;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.hive.ql.metadata.Partition;
import org.apache.thrift.TException;
import com.google.common.base.Optional;
import com.google.common.base.Preconditions;
import com.google.common.base.Throwables;
import com.google.common.collect.ImmutableMap;
import com.google.common.util.concurrent.FutureCallback;
import com.google.common.util.concurrent.Futures;
import com.google.common.util.concurrent.ListenableFuture;
import edu.umd.cs.findbugs.annotations.SuppressWarnings;
import javax.annotation.Nullable;
import lombok.extern.slf4j.Slf4j;
import org.apache.gobblin.compliance.ComplianceConfigurationKeys;
import org.apache.gobblin.compliance.ComplianceEvents;
import org.apache.gobblin.compliance.ComplianceJob;
import org.apache.gobblin.compliance.HiveProxyQueryExecutor;
import org.apache.gobblin.compliance.purger.HivePurgerQueryTemplate;
import org.apache.gobblin.compliance.utils.ProxyUtils;
import org.apache.gobblin.configuration.State;
import org.apache.gobblin.data.management.copy.hive.HiveDataset;
import org.apache.gobblin.data.management.copy.hive.HiveDatasetFinder;
import org.apache.gobblin.data.management.retention.dataset.CleanableDataset;
import org.apache.gobblin.dataset.Dataset;
import org.apache.gobblin.dataset.DatasetsFinder;
import org.apache.gobblin.hive.HiveMetastoreClientPool;
import org.apache.gobblin.util.ExecutorsUtils;
import org.apache.gobblin.util.reflection.GobblinConstructorUtils;
import static org.apache.gobblin.compliance.ComplianceConfigurationKeys.GOBBLIN_COMPLIANCE_DATASET_FINDER_CLASS;
/**
* A compliance job for compliance retention requirements.
*/
@Slf4j
@SuppressWarnings
public class ComplianceRetentionJob extends ComplianceJob {
public static final List<String> tableNamesList = new ArrayList<>();
public static HiveMetastoreClientPool pool;
private List<HiveDataset> tablesToDrop = new ArrayList();
public ComplianceRetentionJob(Properties properties) {
super(properties);
try {
this.pool = HiveMetastoreClientPool
.get(properties, Optional.fromNullable(properties.getProperty(HiveDatasetFinder.HIVE_METASTORE_URI_KEY)));
initDatasetFinder(properties);
ProxyUtils.cancelTokens(new State(properties));
} catch (InterruptedException | TException | IOException e) {
Throwables.propagate(e);
}
}
public void initDatasetFinder(Properties properties)
throws IOException {
Preconditions.checkArgument(properties.containsKey(GOBBLIN_COMPLIANCE_DATASET_FINDER_CLASS),
"Missing required propety " + GOBBLIN_COMPLIANCE_DATASET_FINDER_CLASS);
String finderClass = properties.getProperty(GOBBLIN_COMPLIANCE_DATASET_FINDER_CLASS);
this.finder = GobblinConstructorUtils.invokeConstructor(DatasetsFinder.class, finderClass, new State(properties));
Iterator<HiveDataset> datasetsIterator =
new HiveDatasetFinder(FileSystem.newInstance(new Configuration()), properties).getDatasetsIterator();
while (datasetsIterator.hasNext()) {
// Drop partitions from empty tables if property is set, otherwise skip the table
HiveDataset hiveDataset = datasetsIterator.next();
List<Partition> partitionsFromDataset = hiveDataset.getPartitionsFromDataset();
String completeTableName = hiveDataset.getTable().getCompleteName();
if (!partitionsFromDataset.isEmpty()) {
this.tableNamesList.add(completeTableName);
continue;
}
if (!Boolean.parseBoolean(properties.getProperty(ComplianceConfigurationKeys.SHOULD_DROP_EMPTY_TABLES,
ComplianceConfigurationKeys.DEFAULT_SHOULD_DROP_EMPTY_TABLES))) {
continue;
}
if (completeTableName.contains(ComplianceConfigurationKeys.TRASH) || completeTableName
.contains(ComplianceConfigurationKeys.BACKUP) || completeTableName
.contains(ComplianceConfigurationKeys.STAGING)) {
this.tablesToDrop.add(hiveDataset);
}
}
}
public void run()
throws IOException {
// Dropping empty tables
for (HiveDataset dataset : this.tablesToDrop) {
log.info("Dropping table: " + dataset.getTable().getCompleteName());
executeDropTableQuery(dataset, this.properties);
}
Preconditions.checkNotNull(this.finder, "Dataset finder class is not set");
List<Dataset> datasets = this.finder.findDatasets();
this.finishCleanSignal = Optional.of(new CountDownLatch(datasets.size()));
for (final Dataset dataset : datasets) {
ListenableFuture<Void> future = this.service.submit(new Callable<Void>() {
@Override
public Void call()
throws Exception {
if (dataset instanceof CleanableDataset) {
((CleanableDataset) dataset).clean();
} else {
log.warn(
"Not an instance of " + CleanableDataset.class + " Dataset won't be cleaned " + dataset.datasetURN());
}
return null;
}
});
Futures.addCallback(future, new FutureCallback<Void>() {
@Override
public void onSuccess(@Nullable Void result) {
ComplianceRetentionJob.this.finishCleanSignal.get().countDown();
log.info("Successfully cleaned: " + dataset.datasetURN());
}
@Override
public void onFailure(Throwable t) {
ComplianceRetentionJob.this.finishCleanSignal.get().countDown();
log.warn("Exception caught when cleaning " + dataset.datasetURN() + ".", t);
ComplianceRetentionJob.this.throwables.add(t);
ComplianceRetentionJob.this.eventSubmitter.submit(ComplianceEvents.Retention.FAILED_EVENT_NAME, ImmutableMap
.of(ComplianceEvents.FAILURE_CONTEXT_METADATA_KEY, ExceptionUtils.getFullStackTrace(t),
ComplianceEvents.DATASET_URN_METADATA_KEY, dataset.datasetURN()));
}
});
}
}
@Override
public void close()
throws IOException {
try {
if (this.finishCleanSignal.isPresent()) {
this.finishCleanSignal.get().await();
}
if (!this.throwables.isEmpty()) {
for (Throwable t : this.throwables) {
log.error("Failed clean due to ", t);
}
throw new RuntimeException("Retention failed for one or more datasets");
}
} catch (InterruptedException e) {
Thread.currentThread().interrupt();
throw new IOException("Not all datasets finish retention", e);
} finally {
ExecutorsUtils.shutdownExecutorService(this.service, Optional.of(log));
this.closer.close();
}
}
private static void executeDropTableQuery(HiveDataset hiveDataset, Properties properties)
throws IOException {
String dbName = hiveDataset.getTable().getDbName();
String tableName = hiveDataset.getTable().getTableName();
Optional<String> datasetOwner = Optional.fromNullable(hiveDataset.getTable().getOwner());
try (HiveProxyQueryExecutor hiveProxyQueryExecutor = ProxyUtils
.getQueryExecutor(new State(properties), datasetOwner)) {
hiveProxyQueryExecutor.executeQuery(HivePurgerQueryTemplate.getDropTableQuery(dbName, tableName), datasetOwner);
} catch (SQLException e) {
throw new IOException(e);
}
}
/**
* Generates retention metrics for the instrumentation of this class.
*/
@Override
protected void regenerateMetrics() {
// TODO
}
}
| 3,173 |
0 | Create_ds/gobblin/gobblin-modules/gobblin-compliance/src/main/java/org/apache/gobblin/compliance | Create_ds/gobblin/gobblin-modules/gobblin-compliance/src/main/java/org/apache/gobblin/compliance/retention/HivePartitionRetentionVersion.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.compliance.retention;
import org.apache.hadoop.hive.ql.metadata.Partition;
import com.google.common.base.Preconditions;
import edu.umd.cs.findbugs.annotations.SuppressWarnings;
import org.apache.gobblin.compliance.ComplianceConfigurationKeys;
import org.apache.gobblin.compliance.HivePartitionVersion;
/**
* A version class corresponding to the {@link CleanableHivePartitionDataset}
*
* @author adsharma
*/
@SuppressWarnings
public class HivePartitionRetentionVersion extends HivePartitionVersion {
public HivePartitionRetentionVersion(Partition version) {
super(version);
}
@Override
public int compareTo(HivePartitionVersion version) {
long thisTime = Long.parseLong(getTimeStamp(this));
long otherTime = Long.parseLong(getTimeStamp((HivePartitionRetentionVersion) version));
return Long.compare(otherTime, thisTime);
}
public boolean equals(Object obj) {
if (obj == null) {
return false;
}
if (obj instanceof HivePartitionVersion) {
return compareTo((HivePartitionVersion) obj) == 0;
}
return false;
}
public long getAgeInMilliSeconds(HivePartitionRetentionVersion version) {
Preconditions.checkArgument(getTimeStamp(version).length() == ComplianceConfigurationKeys.TIME_STAMP_LENGTH,
"Invalid time stamp for dataset : " + version.datasetURN() + " time stamp is :" + getTimeStamp(version));
return System.currentTimeMillis() - Long.parseLong(getTimeStamp(version));
}
public Long getAgeInMilliSeconds() {
return getAgeInMilliSeconds(this);
}
public static String getTimeStamp(HivePartitionRetentionVersion version) {
return version.getTableName().substring(version.getTableName().lastIndexOf("_") + 1);
}
public String getTimeStamp() {
return getTimeStamp(this);
}
}
| 3,174 |
0 | Create_ds/gobblin/gobblin-modules/gobblin-compliance/src/main/java/org/apache/gobblin/compliance | Create_ds/gobblin/gobblin-modules/gobblin-compliance/src/main/java/org/apache/gobblin/compliance/retention/HivePartitionVersionRetentionReaper.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.compliance.retention;
import java.io.IOException;
import java.sql.SQLException;
import java.util.Arrays;
import java.util.List;
import org.apache.commons.lang3.StringUtils;
import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.permission.FsAction;
import org.apache.hadoop.fs.permission.FsPermission;
import com.google.common.base.Optional;
import com.google.common.base.Preconditions;
import lombok.extern.slf4j.Slf4j;
import org.apache.gobblin.compliance.ComplianceConfigurationKeys;
import org.apache.gobblin.compliance.HivePartitionVersion;
import org.apache.gobblin.compliance.HiveProxyQueryExecutor;
import org.apache.gobblin.compliance.purger.HivePurgerQueryTemplate;
import org.apache.gobblin.compliance.utils.PartitionUtils;
import org.apache.gobblin.compliance.utils.ProxyUtils;
import org.apache.gobblin.configuration.State;
import org.apache.gobblin.data.management.retention.dataset.CleanableDataset;
import org.apache.gobblin.data.management.version.DatasetVersion;
import org.apache.gobblin.util.HadoopUtils;
import static org.apache.gobblin.compliance.purger.HivePurgerQueryTemplate.getDropPartitionQuery;
import static org.apache.gobblin.compliance.purger.HivePurgerQueryTemplate.getUseDbQuery;
/**
* Class to move/clean backups/staging partitions.
*
* @author adsharma
*/
@Slf4j
public class HivePartitionVersionRetentionReaper extends HivePartitionVersionRetentionRunner {
private FileSystem versionOwnerFs;
private boolean simulate;
private Optional<String> versionOwner = Optional.absent();
private Optional<String> backUpOwner = Optional.absent();
public HivePartitionVersionRetentionReaper(CleanableDataset dataset, DatasetVersion version,
List<String> nonDeletableVersionLocations, State state) {
super(dataset, version, nonDeletableVersionLocations, state);
this.versionOwner = ((HivePartitionVersion) this.datasetVersion).getOwner();
Preconditions.checkArgument(this.state.contains(ComplianceConfigurationKeys.BACKUP_OWNER),
"Missing required property " + ComplianceConfigurationKeys.BACKUP_OWNER);
this.backUpOwner = Optional.fromNullable(this.state.getProp(ComplianceConfigurationKeys.BACKUP_OWNER));
this.simulate = this.state.getPropAsBoolean(ComplianceConfigurationKeys.COMPLIANCE_JOB_SIMULATE,
ComplianceConfigurationKeys.DEFAULT_COMPLIANCE_JOB_SIMULATE);
}
/**
* If simulate is set to true, will simply return.
* If a version is pointing to a non-existing location, then drop the partition and close the jdbc connection.
* If a version is pointing to the same location as of the dataset, then drop the partition and close the jdbc connection.
* If a version is staging, it's data will be deleted and metadata is dropped.
* IF a versions is backup, it's data will be moved to a backup dir, current metadata will be dropped and it will
* be registered in the backup db.
*/
@Override
public void clean()
throws IOException {
Path versionLocation = ((HivePartitionRetentionVersion) this.datasetVersion).getLocation();
Path datasetLocation = ((CleanableHivePartitionDataset) this.cleanableDataset).getLocation();
String completeName = ((HivePartitionRetentionVersion) this.datasetVersion).datasetURN();
State state = new State(this.state);
this.versionOwnerFs = ProxyUtils.getOwnerFs(state, this.versionOwner);
try (HiveProxyQueryExecutor queryExecutor = ProxyUtils
.getQueryExecutor(state, this.versionOwner, this.backUpOwner)) {
if (!this.versionOwnerFs.exists(versionLocation)) {
log.info("Data versionLocation doesn't exist. Metadata will be dropped for the version " + completeName);
} else if (datasetLocation.toString().equalsIgnoreCase(versionLocation.toString())) {
log.info(
"Dataset location is same as version location. Won't delete the data but metadata will be dropped for the version "
+ completeName);
} else if (this.simulate) {
log.info("Simulate is set to true. Won't move the version " + completeName);
return;
} else if (completeName.contains(ComplianceConfigurationKeys.STAGING)) {
log.info("Deleting data from version " + completeName);
this.versionOwnerFs.delete(versionLocation, true);
} else if (completeName.contains(ComplianceConfigurationKeys.BACKUP)) {
executeAlterQueries(queryExecutor);
Path newVersionLocationParent = getNewVersionLocation().getParent();
log.info("Creating new dir " + newVersionLocationParent.toString());
this.versionOwnerFs.mkdirs(newVersionLocationParent);
log.info("Moving data from " + versionLocation + " to " + getNewVersionLocation());
fsMove(versionLocation, getNewVersionLocation());
FsPermission permission = new FsPermission(FsAction.ALL, FsAction.ALL, FsAction.NONE);
HadoopUtils
.setPermissions(newVersionLocationParent, this.versionOwner, this.backUpOwner, this.versionOwnerFs,
permission);
}
executeDropVersionQueries(queryExecutor);
}
}
// These methods are not implemented by this class
@Override
public void preCleanAction() {
}
@Override
public void postCleanAction() {
}
private void executeAlterQueries(HiveProxyQueryExecutor queryExecutor)
throws IOException {
HivePartitionRetentionVersion version = (HivePartitionRetentionVersion) this.datasetVersion;
String partitionSpecString = PartitionUtils.getPartitionSpecString(version.getSpec());
Preconditions.checkArgument(this.state.contains(ComplianceConfigurationKeys.BACKUP_DB),
"Missing required property " + ComplianceConfigurationKeys.BACKUP_DB);
String backUpDb = this.state.getProp(ComplianceConfigurationKeys.BACKUP_DB);
String backUpTableName = getVersionTableName(version);
try {
queryExecutor.executeQuery(HivePurgerQueryTemplate.getUseDbQuery(backUpDb), this.backUpOwner);
queryExecutor.executeQuery(HivePurgerQueryTemplate
.getCreateTableQuery(backUpDb + "." + backUpTableName, version.getDbName(), version.getTableName(),
getBackUpTableLocation(version)), this.backUpOwner);
Optional<String> fileFormat = Optional.absent();
if (this.state.getPropAsBoolean(ComplianceConfigurationKeys.SPECIFY_PARTITION_FORMAT,
ComplianceConfigurationKeys.DEFAULT_SPECIFY_PARTITION_FORMAT)) {
fileFormat = version.getFileFormat();
}
queryExecutor.executeQuery(HivePurgerQueryTemplate
.getAddPartitionQuery(backUpTableName, partitionSpecString, fileFormat,
Optional.fromNullable(getNewVersionLocation().toString())), this.backUpOwner);
} catch (SQLException e) {
throw new IOException(e);
}
}
private void executeDropVersionQueries(HiveProxyQueryExecutor queryExecutor)
throws IOException {
HivePartitionRetentionVersion version = (HivePartitionRetentionVersion) this.datasetVersion;
String partitionSpec = PartitionUtils.getPartitionSpecString(version.getSpec());
try {
queryExecutor.executeQuery(getUseDbQuery(version.getDbName()), this.versionOwner);
queryExecutor.executeQuery(getDropPartitionQuery(version.getTableName(), partitionSpec), this.versionOwner);
} catch (SQLException e) {
throw new IOException(e);
}
}
private String getVersionTableName(HivePartitionVersion version) {
return version.getTableName();
}
private String getBackUpTableLocation(HivePartitionVersion version) {
Preconditions.checkArgument(this.state.contains(ComplianceConfigurationKeys.TRASH_DIR),
"Missing required property " + ComplianceConfigurationKeys.TRASH_DIR);
return StringUtils
.join(Arrays.asList(this.state.getProp(ComplianceConfigurationKeys.TRASH_DIR), getVersionTableName(version)),
'/');
}
private Path getNewVersionLocation() {
Preconditions.checkArgument(this.state.contains(ComplianceConfigurationKeys.BACKUP_DIR),
"Missing required property " + ComplianceConfigurationKeys.BACKUP_DIR);
HivePartitionRetentionVersion version = (HivePartitionRetentionVersion) this.datasetVersion;
if (PartitionUtils.isUnixTimeStamp(version.getLocation().getName())) {
return new Path(StringUtils.join(Arrays.asList(this.state.getProp(ComplianceConfigurationKeys.BACKUP_DIR),
Path.getPathWithoutSchemeAndAuthority(version.getLocation().getParent()).toString(), version.getTimeStamp()), '/'));
} else {
return new Path(StringUtils.join(Arrays.asList(this.state.getProp(ComplianceConfigurationKeys.BACKUP_DIR),
Path.getPathWithoutSchemeAndAuthority(version.getLocation()).toString(), version.getTimeStamp()), '/'));
}
}
private void fsMove(Path from, Path to)
throws IOException {
if (PartitionUtils.isUnixTimeStamp(from.getName())) {
this.versionOwnerFs.rename(from, to.getParent());
} else {
for (FileStatus fileStatus : this.versionOwnerFs.listStatus(from)) {
if (fileStatus.isFile()) {
this.versionOwnerFs.rename(fileStatus.getPath(), to);
}
}
}
}
}
| 3,175 |
0 | Create_ds/gobblin/gobblin-modules/gobblin-compliance/src/main/java/org/apache/gobblin/compliance | Create_ds/gobblin/gobblin-modules/gobblin-compliance/src/main/java/org/apache/gobblin/compliance/retention/CleanableHivePartitionDatasetFinder.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.compliance.retention;
import java.io.IOException;
import java.util.ArrayList;
import java.util.List;
import org.apache.hadoop.fs.FileSystem;
import org.apache.gobblin.compliance.HivePartitionDataset;
import org.apache.gobblin.compliance.HivePartitionFinder;
import org.apache.gobblin.configuration.State;
import org.apache.gobblin.util.WriterUtils;
/**
* A dataset finder class to find all the {@link CleanableHivePartitionDataset} based on the whitelist.
*
* @author adsharma
*/
public class CleanableHivePartitionDatasetFinder extends HivePartitionFinder {
protected FileSystem fs;
public CleanableHivePartitionDatasetFinder(State state)
throws IOException {
this(WriterUtils.getWriterFs(new State(state)), state);
}
public CleanableHivePartitionDatasetFinder(FileSystem fs, State state)
throws IOException {
super(state);
this.fs = fs;
}
/**
* Will find all datasets according to whitelist, except the backup and staging tables.
*/
public List<HivePartitionDataset> findDatasets()
throws IOException {
List<HivePartitionDataset> list = new ArrayList<>();
for (HivePartitionDataset hivePartitionDataset : super.findDatasets()) {
CleanableHivePartitionDataset dataset =
new CleanableHivePartitionDataset(hivePartitionDataset, this.fs, this.state);
list.add(dataset);
}
return list;
}
}
| 3,176 |
0 | Create_ds/gobblin/gobblin-modules/gobblin-compliance/src/main/java/org/apache/gobblin/compliance | Create_ds/gobblin/gobblin-modules/gobblin-compliance/src/main/java/org/apache/gobblin/compliance/retention/HivePartitionVersionRetentionRunner.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.compliance.retention;
import java.util.List;
import org.apache.gobblin.configuration.State;
import org.apache.gobblin.data.management.retention.dataset.CleanableDataset;
import org.apache.gobblin.data.management.retention.version.VersionCleaner;
import org.apache.gobblin.data.management.version.DatasetVersion;
/**
* An abstract class for handling retention of {@link HivePartitionRetentionVersion}
*
* @author adsharma
*/
public abstract class HivePartitionVersionRetentionRunner extends VersionCleaner {
protected List<String> nonDeletableVersionLocations;
protected State state;
public HivePartitionVersionRetentionRunner(CleanableDataset dataset, DatasetVersion version,
List<String> nonDeletableVersionLocations, State state) {
super(version, dataset);
this.state = new State(state);
this.nonDeletableVersionLocations = nonDeletableVersionLocations;
}
}
| 3,177 |
0 | Create_ds/gobblin/gobblin-modules/gobblin-compliance/src/main/java/org/apache/gobblin/compliance | Create_ds/gobblin/gobblin-modules/gobblin-compliance/src/main/java/org/apache/gobblin/compliance/retention/HivePartitionVersionRetentionCleanerPolicy.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.compliance.retention;
import com.google.common.base.Preconditions;
import org.apache.gobblin.compliance.ComplianceConfigurationKeys;
import org.apache.gobblin.compliance.HivePartitionDataset;
import org.apache.gobblin.compliance.HivePartitionVersion;
import org.apache.gobblin.compliance.HivePartitionVersionPolicy;
import org.apache.gobblin.configuration.State;
import java.util.ArrayList;
import java.util.Collections;
import java.util.List;
import java.util.concurrent.TimeUnit;
import lombok.extern.slf4j.Slf4j;
/**
* A retention version policy for the {@link HivePartitionRetentionVersion}.
*
* @author adsharma
*/
@Slf4j
public class HivePartitionVersionRetentionCleanerPolicy extends HivePartitionVersionPolicy {
/**
* Maximum number of backups to be retained.
*/
private int backupRetentionVersions;
/**
* Maximum age for a retained backup.
*/
private int backupRetentionDays;
/**
* Maximum age for a retained trash partition.
*/
private int trashRetentionDays;
public HivePartitionVersionRetentionCleanerPolicy(State state, HivePartitionDataset dataset) {
super(state, dataset);
Preconditions.checkArgument(this.state.contains(ComplianceConfigurationKeys.CLEANER_BACKUP_RETENTION_VERSIONS),
"Missing required property " + ComplianceConfigurationKeys.CLEANER_BACKUP_RETENTION_VERSIONS);
Preconditions.checkArgument(this.state.contains(ComplianceConfigurationKeys.CLEANER_BACKUP_RETENTION_DAYS),
"Missing required property " + ComplianceConfigurationKeys.CLEANER_BACKUP_RETENTION_DAYS);
Preconditions.checkArgument(this.state.contains(ComplianceConfigurationKeys.CLEANER_TRASH_RETENTION_DAYS),
"Missing required property " + ComplianceConfigurationKeys.CLEANER_TRASH_RETENTION_DAYS);
this.backupRetentionVersions =
this.state.getPropAsInt(ComplianceConfigurationKeys.CLEANER_BACKUP_RETENTION_VERSIONS);
this.backupRetentionDays = this.state.getPropAsInt(ComplianceConfigurationKeys.CLEANER_BACKUP_RETENTION_DAYS);
this.trashRetentionDays = this.state.getPropAsInt(ComplianceConfigurationKeys.CLEANER_TRASH_RETENTION_DAYS);
}
@Override
public boolean shouldSelect(HivePartitionVersion version) {
// not implemented by this class
return false;
}
@Override
public List<HivePartitionVersion> selectedList(List<HivePartitionVersion> versions) {
if (versions.isEmpty()) {
return versions;
}
List<HivePartitionRetentionVersion> backupVersions = new ArrayList<>();
List<HivePartitionRetentionVersion> trashVersions = new ArrayList<>();
List<HivePartitionVersion> selectedVersions = new ArrayList<>();
for (HivePartitionVersion version : versions) {
String prefix = this.dataset.getDbName() + ComplianceConfigurationKeys.DBNAME_SEPARATOR;
if (!version.getTableName().startsWith(prefix)) {
continue;
}
if (version.getTableName().contains(ComplianceConfigurationKeys.BACKUP)) {
backupVersions.add((HivePartitionRetentionVersion) version);
}
if (version.getTableName().contains(ComplianceConfigurationKeys.TRASH)) {
trashVersions.add((HivePartitionRetentionVersion) version);
}
}
for (HivePartitionRetentionVersion version : trashVersions) {
long ageInDays = TimeUnit.MILLISECONDS.toDays(version.getAgeInMilliSeconds());
if (ageInDays >= this.trashRetentionDays) {
selectedVersions.add(version);
}
}
if (backupVersions.isEmpty()) {
return selectedVersions;
}
Collections.sort(backupVersions);
selectedVersions.addAll(backupVersions.subList(this.backupRetentionVersions, versions.size()));
if (this.backupRetentionVersions == 0) {
return selectedVersions;
}
for (HivePartitionRetentionVersion version : backupVersions.subList(0, this.backupRetentionVersions)) {
long ageInDays = TimeUnit.MILLISECONDS.toDays(version.getAgeInMilliSeconds());
if (ageInDays >= this.backupRetentionDays) {
selectedVersions.add(version);
}
}
return selectedVersions;
}
}
| 3,178 |
0 | Create_ds/gobblin/gobblin-modules/gobblin-compliance/src/main/java/org/apache/gobblin/compliance | Create_ds/gobblin/gobblin-modules/gobblin-compliance/src/main/java/org/apache/gobblin/compliance/retention/HivePartitionVersionRetentionCleaner.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.compliance.retention;
import java.io.IOException;
import java.sql.SQLException;
import java.util.List;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import com.google.common.base.Optional;
import lombok.extern.slf4j.Slf4j;
import org.apache.gobblin.compliance.ComplianceConfigurationKeys;
import org.apache.gobblin.compliance.HiveProxyQueryExecutor;
import org.apache.gobblin.compliance.purger.HivePurgerQueryTemplate;
import org.apache.gobblin.compliance.utils.PartitionUtils;
import org.apache.gobblin.compliance.utils.ProxyUtils;
import org.apache.gobblin.configuration.State;
import org.apache.gobblin.data.management.retention.dataset.CleanableDataset;
import org.apache.gobblin.data.management.version.DatasetVersion;
import org.apache.gobblin.util.HadoopUtils;
/**
* A version cleaner for the {@link HivePartitionRetentionVersion}.
*
* A version will be considered as clean only if it's metadata no longer exists in the db and the data
* it was pointing to no longer exists.
*
* @author adsharma
*/
@Slf4j
public class HivePartitionVersionRetentionCleaner extends HivePartitionVersionRetentionRunner {
private FileSystem fs;
private boolean simulate;
private Optional<String> versionOwner = Optional.absent();
public HivePartitionVersionRetentionCleaner(CleanableDataset dataset, DatasetVersion version,
List<String> nonDeletableVersionLocations, State state) {
super(dataset, version, nonDeletableVersionLocations, state);
this.versionOwner = ((HivePartitionRetentionVersion) this.datasetVersion).getOwner();
this.simulate = this.state.getPropAsBoolean(ComplianceConfigurationKeys.COMPLIANCE_JOB_SIMULATE,
ComplianceConfigurationKeys.DEFAULT_COMPLIANCE_JOB_SIMULATE);
}
/**
* If simulate is set to true, this will simply return.
* If version is pointing to an empty location, drop the partition and close the jdbc connection.
* If version is pointing to the same location as of the dataset, then drop the partition and close the jdbc connection.
* If version is pointing to the non deletable version locations, then drop the partition and close the jdbc connection.
* Otherwise delete the data underneath, drop the partition and close the jdbc connection.
*/
@Override
public void clean()
throws IOException {
Path versionLocation = ((HivePartitionRetentionVersion) this.datasetVersion).getLocation();
Path datasetLocation = ((CleanableHivePartitionDataset) this.cleanableDataset).getLocation();
String completeName = ((HivePartitionRetentionVersion) this.datasetVersion).datasetURN();
State state = new State(this.state);
this.fs = ProxyUtils.getOwnerFs(state, this.versionOwner);
try (HiveProxyQueryExecutor queryExecutor = ProxyUtils.getQueryExecutor(state, this.versionOwner)) {
log.info("Trying to clean version " + completeName);
if (!this.fs.exists(versionLocation)) {
log.info("Data versionLocation doesn't exist. Metadata will be dropped for the version " + completeName);
} else if (datasetLocation.toString().equalsIgnoreCase(versionLocation.toString())) {
log.info(
"Dataset location is same as version location. Won't delete the data but metadata will be dropped for the version "
+ completeName);
} else if (this.nonDeletableVersionLocations.contains(versionLocation.toString())) {
log.info(
"This version corresponds to the non deletable version. Won't delete the data but metadata will be dropped for the version "
+ completeName);
} else if (HadoopUtils.hasContent(this.fs, versionLocation)) {
if (this.simulate) {
log.info("Simulate is set to true. Won't delete the partition " + completeName);
return;
}
log.info("Deleting data from the version " + completeName);
this.fs.delete(versionLocation, true);
}
executeDropVersionQueries(queryExecutor);
}
}
// These methods are not implemented by this class
@Override
public void preCleanAction() {
}
@Override
public void postCleanAction() {
}
private void executeDropVersionQueries(HiveProxyQueryExecutor queryExecutor)
throws IOException {
String dbName = ((HivePartitionRetentionVersion) this.datasetVersion).getDbName();
String tableName = ((HivePartitionRetentionVersion) this.datasetVersion).getTableName();
String partitionSpec =
PartitionUtils.getPartitionSpecString(((HivePartitionRetentionVersion) this.datasetVersion).getSpec());
try {
queryExecutor.executeQuery(HivePurgerQueryTemplate.getUseDbQuery(dbName), this.versionOwner);
queryExecutor
.executeQuery(HivePurgerQueryTemplate.getDropPartitionQuery(tableName, partitionSpec), this.versionOwner);
} catch (SQLException e) {
throw new IOException(e);
}
}
}
| 3,179 |
0 | Create_ds/gobblin/gobblin-modules/gobblin-compliance/src/main/java/org/apache/gobblin/compliance | Create_ds/gobblin/gobblin-modules/gobblin-compliance/src/main/java/org/apache/gobblin/compliance/retention/CleanableHivePartitionDataset.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.compliance.retention;
import java.io.IOException;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.List;
import org.apache.commons.lang3.StringUtils;
import org.apache.gobblin.dataset.FileSystemDataset;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hive.ql.metadata.Partition;
import com.google.common.base.Preconditions;
import lombok.extern.slf4j.Slf4j;
import org.apache.gobblin.compliance.ComplianceConfigurationKeys;
import org.apache.gobblin.compliance.HivePartitionDataset;
import org.apache.gobblin.compliance.HivePartitionVersion;
import org.apache.gobblin.compliance.HivePartitionVersionFinder;
import org.apache.gobblin.compliance.HivePartitionVersionPolicy;
import org.apache.gobblin.configuration.State;
import org.apache.gobblin.data.management.retention.dataset.CleanableDataset;
import org.apache.gobblin.data.management.retention.version.VersionCleaner;
import org.apache.gobblin.util.reflection.GobblinConstructorUtils;
/**
* This class is a Cleanable representation of {@link HivePartitionDataset}.
* This class implements the clean method which will be called for each dataset
*
* @author adsharma
*/
@Slf4j
public class CleanableHivePartitionDataset extends HivePartitionDataset implements CleanableDataset, FileSystemDataset {
private FileSystem fs;
private State state;
public CleanableHivePartitionDataset(Partition partition, FileSystem fs, State state) {
super(partition);
this.fs = fs;
this.state = new State(state);
}
public CleanableHivePartitionDataset(HivePartitionDataset hivePartitionDataset, FileSystem fs, State state) {
super(hivePartitionDataset);
this.fs = fs;
this.state = new State(state);
}
@Override
public Path datasetRoot() {
return this.getLocation();
}
/**
* This method uses {@link HivePartitionVersionFinder} to list out versions
* corresponding to this dataset. It will then filter out versions using {@link HivePartitionVersionPolicy}.
*
* For each version there will be a corresponding {@link VersionCleaner} which will clean the version.
*/
@Override
public void clean()
throws IOException {
Preconditions.checkArgument(this.state.contains(ComplianceConfigurationKeys.RETENTION_VERSION_FINDER_CLASS_KEY),
"Missing required property " + ComplianceConfigurationKeys.RETENTION_VERSION_FINDER_CLASS_KEY);
Preconditions.checkArgument(this.state.contains(ComplianceConfigurationKeys.RETENTION_SELECTION_POLICY_CLASS_KEY),
"Missing required property " + ComplianceConfigurationKeys.RETENTION_SELECTION_POLICY_CLASS_KEY);
Preconditions.checkArgument(this.state.contains(ComplianceConfigurationKeys.RETENTION_VERSION_CLEANER_CLASS_KEY),
"Missing required property " + ComplianceConfigurationKeys.RETENTION_VERSION_CLEANER_CLASS_KEY);
List<String> patterns = new ArrayList<>();
patterns.add(getCompleteTableName(this) + ComplianceConfigurationKeys.BACKUP);
patterns.add(getCompleteTableName(this) + ComplianceConfigurationKeys.STAGING);
patterns.add(getCompleteTableName(this) + ComplianceConfigurationKeys.TRASH);
HivePartitionVersionFinder versionFinder = GobblinConstructorUtils
.invokeConstructor(HivePartitionVersionFinder.class,
this.state.getProp(ComplianceConfigurationKeys.RETENTION_VERSION_FINDER_CLASS_KEY), this.fs, this.state,
patterns);
List<HivePartitionVersion> versions = new ArrayList<>(versionFinder.findDatasetVersions(this));
HivePartitionVersionPolicy versionPolicy = GobblinConstructorUtils
.invokeConstructor(HivePartitionVersionPolicy.class,
this.state.getProp(ComplianceConfigurationKeys.RETENTION_SELECTION_POLICY_CLASS_KEY), this.state, this);
List<HivePartitionVersion> deletableVersions = new ArrayList<>(versionPolicy.selectedList(versions));
List<String> nonDeletableVersionLocations = getNonDeletableVersionLocations(versions, deletableVersions);
for (HivePartitionVersion hivePartitionDatasetVersion : deletableVersions) {
try {
VersionCleaner versionCleaner = GobblinConstructorUtils
.invokeConstructor(HivePartitionVersionRetentionRunner.class,
this.state.getProp(ComplianceConfigurationKeys.RETENTION_VERSION_CLEANER_CLASS_KEY), this,
hivePartitionDatasetVersion, nonDeletableVersionLocations, this.state);
versionCleaner.clean();
} catch (Exception e) {
log.warn("Caught exception trying to clean version " + hivePartitionDatasetVersion.datasetURN() + "\n" + e
.getMessage());
}
}
}
private List<String> getNonDeletableVersionLocations(List<HivePartitionVersion> versions,
List<HivePartitionVersion> deletableVersions) {
List<String> nonDeletableVersionLocations = new ArrayList<>();
for (HivePartitionVersion version : versions) {
if (!deletableVersions.contains(version)) {
nonDeletableVersionLocations.add(version.getLocation().toString());
}
}
nonDeletableVersionLocations.add(this.getLocation().toString());
return nonDeletableVersionLocations;
}
public String getCompleteTableName(HivePartitionDataset dataset) {
return StringUtils
.join(Arrays.asList(dataset.getDbName(), dataset.getTableName()), ComplianceConfigurationKeys.DBNAME_SEPARATOR);
}
}
| 3,180 |
0 | Create_ds/gobblin/gobblin-modules/gobblin-compliance/src/main/java/org/apache/gobblin/compliance | Create_ds/gobblin/gobblin-modules/gobblin-compliance/src/main/java/org/apache/gobblin/compliance/retention/HivePartitionVersionRetentionReaperPolicy.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.compliance.retention;
import java.util.ArrayList;
import java.util.Collections;
import java.util.List;
import java.util.concurrent.TimeUnit;
import com.google.common.base.Preconditions;
import org.apache.gobblin.compliance.ComplianceConfigurationKeys;
import org.apache.gobblin.compliance.HivePartitionDataset;
import org.apache.gobblin.compliance.HivePartitionVersion;
import org.apache.gobblin.compliance.HivePartitionVersionPolicy;
import org.apache.gobblin.configuration.State;
public class HivePartitionVersionRetentionReaperPolicy extends HivePartitionVersionPolicy {
/**
* Number of days after which a {@link HivePartitionVersion} will be either moved to
* backup or deleted.
*/
private int retentionDays;
public HivePartitionVersionRetentionReaperPolicy(State state, HivePartitionDataset dataset) {
super(state, dataset);
Preconditions.checkArgument(state.contains(ComplianceConfigurationKeys.REAPER_RETENTION_DAYS),
"Missing required property " + ComplianceConfigurationKeys.REAPER_RETENTION_DAYS);
this.retentionDays = state.getPropAsInt(ComplianceConfigurationKeys.REAPER_RETENTION_DAYS);
}
@Override
public boolean shouldSelect(HivePartitionVersion version) {
HivePartitionRetentionVersion partitionVersion = (HivePartitionRetentionVersion) version;
long ageInDays = TimeUnit.MILLISECONDS.toDays(partitionVersion.getAgeInMilliSeconds());
return ageInDays >= this.retentionDays;
}
@Override
public List<HivePartitionVersion> selectedList(List<HivePartitionVersion> versions) {
if (versions.isEmpty()) {
return versions;
}
Preconditions.checkArgument(versions.get(0) instanceof HivePartitionRetentionVersion);
List<HivePartitionVersion> selectedVersions = new ArrayList<>();
Collections.sort(versions);
for (HivePartitionVersion version : versions) {
if (shouldSelect(version)) {
selectedVersions.add(version);
}
}
return selectedVersions;
}
}
| 3,181 |
0 | Create_ds/gobblin/gobblin-modules/gobblin-compliance/src/main/java/org/apache/gobblin/compliance | Create_ds/gobblin/gobblin-modules/gobblin-compliance/src/main/java/org/apache/gobblin/compliance/utils/PartitionUtils.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.compliance.utils;
import java.util.Map;
import com.google.common.base.Preconditions;
import org.apache.gobblin.compliance.ComplianceConfigurationKeys;
/**
* A utility class for Partition.
*
* @author adsharma
*/
public class PartitionUtils {
private static final String SINGLE_QUOTE = "'";
/**
* Add single quotes to the string, if not present.
* TestString will be converted to 'TestString'
*/
public static String getQuotedString(String st) {
Preconditions.checkNotNull(st);
String quotedString = "";
if (!st.startsWith(SINGLE_QUOTE)) {
quotedString += SINGLE_QUOTE;
}
quotedString += st;
if (!st.endsWith(SINGLE_QUOTE)) {
quotedString += SINGLE_QUOTE;
}
return quotedString;
}
/**
* This method returns the partition spec string of the partition.
* Example : datepartition='2016-01-01-00', size='12345'
*/
public static String getPartitionSpecString(Map<String, String> spec) {
StringBuilder sb = new StringBuilder();
for (Map.Entry<String, String> entry : spec.entrySet()) {
if (!sb.toString().isEmpty()) {
sb.append(",");
}
sb.append(entry.getKey());
sb.append("=");
sb.append(getQuotedString(entry.getValue()));
}
return sb.toString();
}
/**
* Check if a given string is a valid unixTimeStamp
*/
public static boolean isUnixTimeStamp(String timeStamp) {
if (timeStamp.length() != ComplianceConfigurationKeys.TIME_STAMP_LENGTH) {
return false;
}
try {
Long.parseLong(timeStamp);
return true;
} catch (NumberFormatException e) {
return false;
}
}
}
| 3,182 |
0 | Create_ds/gobblin/gobblin-modules/gobblin-compliance/src/main/java/org/apache/gobblin/compliance | Create_ds/gobblin/gobblin-modules/gobblin-compliance/src/main/java/org/apache/gobblin/compliance/utils/DatasetUtils.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.compliance.utils;
import java.util.List;
import com.google.common.base.Optional;
import lombok.extern.slf4j.Slf4j;
import org.apache.gobblin.compliance.HivePartitionDataset;
/**
* @author adsharma
*/
@Slf4j
public class DatasetUtils {
/**
* Find {@link HivePartitionDataset} given complete partition name from a list of datasets
* @param partitionName Complete partition name ie dbName@tableName@partitionName
*/
public static Optional<HivePartitionDataset> findDataset(String partitionName, List<HivePartitionDataset> datasets) {
for (HivePartitionDataset dataset : datasets) {
if (dataset.datasetURN().equalsIgnoreCase(partitionName)) {
return Optional.fromNullable(dataset);
}
}
log.warn("Unable to find dataset corresponding to " + partitionName);
return Optional.<HivePartitionDataset>absent();
}
public static String getProperty(HivePartitionDataset dataset, String property, long defaultValue) {
Optional<String> propertyValueOptional = Optional.fromNullable(dataset.getParams().get(property));
if (!propertyValueOptional.isPresent()) {
return Long.toString(defaultValue);
}
try {
long propertyVal = Long.parseLong(propertyValueOptional.get());
if (propertyVal < 0) {
return Long.toString(defaultValue);
} else {
return Long.toString(propertyVal);
}
} catch (NumberFormatException e) {
return Long.toString(defaultValue);
}
}
}
| 3,183 |
0 | Create_ds/gobblin/gobblin-modules/gobblin-compliance/src/main/java/org/apache/gobblin/compliance | Create_ds/gobblin/gobblin-modules/gobblin-compliance/src/main/java/org/apache/gobblin/compliance/utils/ProxyUtils.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.compliance.utils;
import java.io.IOException;
import java.util.ArrayList;
import java.util.List;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.hive.conf.HiveConf;
import org.apache.hadoop.hive.metastore.HiveMetaStoreClient;
import org.apache.hadoop.hive.thrift.DelegationTokenIdentifier;
import org.apache.hadoop.security.Credentials;
import org.apache.hadoop.security.UserGroupInformation;
import org.apache.hadoop.security.token.Token;
import org.apache.thrift.TException;
import com.google.common.base.Optional;
import com.google.common.base.Preconditions;
import lombok.extern.slf4j.Slf4j;
import org.apache.gobblin.compliance.ComplianceConfigurationKeys;
import org.apache.gobblin.compliance.HiveProxyQueryExecutor;
import org.apache.gobblin.configuration.ConfigurationKeys;
import org.apache.gobblin.configuration.State;
import org.apache.gobblin.util.HostUtils;
import org.apache.gobblin.util.WriterUtils;
/**
* A utility class for letting hadoop super user to proxy.
*
* @author adsharma
*/
@Slf4j
public class ProxyUtils {
public static void setProxySettingsForFs(State state) {
if (state.getPropAsBoolean(ComplianceConfigurationKeys.GOBBLIN_COMPLIANCE_SHOULD_PROXY,
ComplianceConfigurationKeys.GOBBLIN_COMPLIANCE_DEFAULT_SHOULD_PROXY)) {
String proxyUser = state.getProp(ComplianceConfigurationKeys.GOBBLIN_COMPLIANCE_PROXY_USER);
String superUser = state.getProp(ComplianceConfigurationKeys.GOBBLIN_COMPLIANCE_SUPER_USER);
String realm = state.getProp(ConfigurationKeys.KERBEROS_REALM);
state.setProp(ConfigurationKeys.SHOULD_FS_PROXY_AS_USER, true);
state.setProp(ConfigurationKeys.FS_PROXY_AS_USER_NAME, proxyUser);
state.setProp(ConfigurationKeys.SUPER_USER_NAME_TO_PROXY_AS_OTHERS,
HostUtils.getPrincipalUsingHostname(superUser, realm));
state.setProp(ConfigurationKeys.FS_PROXY_AUTH_METHOD, ConfigurationKeys.KERBEROS_AUTH);
}
}
public static void cancelTokens(State state)
throws IOException, InterruptedException, TException {
Preconditions.checkArgument(state.contains(ConfigurationKeys.SUPER_USER_KEY_TAB_LOCATION),
"Missing required property " + ConfigurationKeys.SUPER_USER_KEY_TAB_LOCATION);
Preconditions.checkArgument(state.contains(ComplianceConfigurationKeys.GOBBLIN_COMPLIANCE_SUPER_USER),
"Missing required property " + ComplianceConfigurationKeys.GOBBLIN_COMPLIANCE_SUPER_USER);
Preconditions.checkArgument(state.contains(ConfigurationKeys.KERBEROS_REALM),
"Missing required property " + ConfigurationKeys.KERBEROS_REALM);
String superUser = state.getProp(ComplianceConfigurationKeys.GOBBLIN_COMPLIANCE_SUPER_USER);
String keytabLocation = state.getProp(ConfigurationKeys.SUPER_USER_KEY_TAB_LOCATION);
String realm = state.getProp(ConfigurationKeys.KERBEROS_REALM);
UserGroupInformation.loginUserFromKeytab(HostUtils.getPrincipalUsingHostname(superUser, realm), keytabLocation);
UserGroupInformation currentUser = UserGroupInformation.getCurrentUser();
UserGroupInformation realUser = currentUser.getRealUser();
Credentials credentials = realUser.getCredentials();
for (Token<?> token : credentials.getAllTokens()) {
if (token.getKind().equals(DelegationTokenIdentifier.HIVE_DELEGATION_KIND)) {
log.info("Cancelling hive token");
HiveMetaStoreClient hiveClient = new HiveMetaStoreClient(new HiveConf());
hiveClient.cancelDelegationToken(token.encodeToUrlString());
}
}
}
public static FileSystem getOwnerFs(State state, Optional<String> owner)
throws IOException {
if (owner.isPresent()) {
state.setProp(ComplianceConfigurationKeys.GOBBLIN_COMPLIANCE_PROXY_USER, owner.get());
}
ProxyUtils.setProxySettingsForFs(state);
return WriterUtils.getWriterFs(state);
}
@SafeVarargs
public static HiveProxyQueryExecutor getQueryExecutor(State state, Optional<String>... owners)
throws IOException {
List<String> proxies = new ArrayList<>();
for (Optional<String> owner : owners) {
if (owner.isPresent()) {
proxies.add(owner.get());
}
}
return new HiveProxyQueryExecutor(state, proxies);
}
}
| 3,184 |
0 | Create_ds/gobblin/gobblin-modules/gobblin-compliance/src/main/java/org/apache/gobblin/compliance | Create_ds/gobblin/gobblin-modules/gobblin-compliance/src/main/java/org/apache/gobblin/compliance/purger/HivePurgerSource.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.compliance.purger;
import java.io.IOException;
import java.util.ArrayList;
import java.util.Collections;
import java.util.Comparator;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import org.apache.thrift.TException;
import com.google.common.annotations.VisibleForTesting;
import com.google.common.base.Optional;
import com.google.common.base.Preconditions;
import com.google.common.base.Throwables;
import com.google.common.collect.Iterables;
import lombok.extern.slf4j.Slf4j;
import org.apache.gobblin.compliance.ComplianceConfigurationKeys;
import org.apache.gobblin.compliance.ComplianceEvents;
import org.apache.gobblin.compliance.utils.DatasetUtils;
import org.apache.gobblin.compliance.HivePartitionDataset;
import org.apache.gobblin.compliance.HivePartitionFinder;
import org.apache.gobblin.compliance.utils.ProxyUtils;
import org.apache.gobblin.configuration.SourceState;
import org.apache.gobblin.configuration.State;
import org.apache.gobblin.configuration.WorkUnitState;
import org.apache.gobblin.dataset.DatasetsFinder;
import org.apache.gobblin.instrumented.Instrumented;
import org.apache.gobblin.metrics.MetricContext;
import org.apache.gobblin.metrics.event.EventSubmitter;
import org.apache.gobblin.source.Source;
import org.apache.gobblin.source.extractor.Extractor;
import org.apache.gobblin.source.workunit.WorkUnit;
import org.apache.gobblin.util.reflection.GobblinConstructorUtils;
/**
* This class creates {@link WorkUnit}s and assigns exactly one partition to each of them.
* It iterates over all Hive Tables specified via whitelist and blacklist, list all partitions, and create
* workunits.
*
* It revive {@link WorkUnit}s if their execution attempts are not exhausted.
*
* @author adsharma
*/
@Slf4j
public class HivePurgerSource implements Source {
protected DatasetsFinder datasetFinder;
protected Map<String, WorkUnit> workUnitMap = new HashMap<>();
protected int maxWorkUnitExecutionAttempts;
protected int maxWorkUnits;
protected int workUnitsCreatedCount = 0;
protected String lowWatermark;
protected String timeStamp;
protected PurgePolicy policy;
protected boolean shouldProxy;
protected MetricContext metricContext;
protected EventSubmitter eventSubmitter;
protected int executionCount;
// These datasets are lexicographically sorted by their name
protected List<HivePartitionDataset> datasets = new ArrayList<>();
@VisibleForTesting
protected void initialize(SourceState state)
throws IOException {
setTimeStamp();
setLowWatermark(state);
setExecutionCount(state);
this.metricContext = Instrumented.getMetricContext(state, this.getClass());
this.eventSubmitter = new EventSubmitter.Builder(this.metricContext, ComplianceEvents.NAMESPACE).
build();
submitCycleCompletionEvent();
this.maxWorkUnits = state
.getPropAsInt(ComplianceConfigurationKeys.MAX_WORKUNITS_KEY, ComplianceConfigurationKeys.DEFAULT_MAX_WORKUNITS);
this.maxWorkUnitExecutionAttempts = state
.getPropAsInt(ComplianceConfigurationKeys.MAX_WORKUNIT_EXECUTION_ATTEMPTS_KEY,
ComplianceConfigurationKeys.DEFAULT_MAX_WORKUNIT_EXECUTION_ATTEMPTS);
// TODO: Event submitter and metrics will be added later
String datasetFinderClass = state.getProp(ComplianceConfigurationKeys.GOBBLIN_COMPLIANCE_DATASET_FINDER_CLASS,
HivePartitionFinder.class.getName());
this.datasetFinder = GobblinConstructorUtils.invokeConstructor(DatasetsFinder.class, datasetFinderClass, state);
populateDatasets();
String policyClass =
state.getProp(ComplianceConfigurationKeys.PURGE_POLICY_CLASS, HivePurgerPolicy.class.getName());
this.policy = GobblinConstructorUtils.invokeConstructor(PurgePolicy.class, policyClass, this.lowWatermark);
this.shouldProxy = state.getPropAsBoolean(ComplianceConfigurationKeys.GOBBLIN_COMPLIANCE_SHOULD_PROXY,
ComplianceConfigurationKeys.GOBBLIN_COMPLIANCE_DEFAULT_SHOULD_PROXY);
if (!this.shouldProxy) {
return;
}
// cancel tokens
try {
ProxyUtils.cancelTokens(new State(state));
} catch (InterruptedException | TException e) {
throw new IOException(e);
}
}
@Override
public List<WorkUnit> getWorkunits(SourceState state) {
try {
initialize(state);
createWorkUnits(state);
} catch (IOException e) {
Throwables.propagate(e);
}
return new ArrayList<>(this.workUnitMap.values());
}
protected Optional<WorkUnit> createNewWorkUnit(String partitionName, int executionAttempts) {
Optional<HivePartitionDataset> dataset = DatasetUtils.findDataset(partitionName, this.datasets);
if (!dataset.isPresent()) {
return Optional.<WorkUnit>absent();
}
return Optional.fromNullable(createNewWorkUnit(dataset.get(), executionAttempts));
}
protected WorkUnit createNewWorkUnit(HivePartitionDataset dataset) {
return createNewWorkUnit(dataset, ComplianceConfigurationKeys.DEFAULT_EXECUTION_ATTEMPTS);
}
protected WorkUnit createNewWorkUnit(HivePartitionDataset dataset, int executionAttempts) {
WorkUnit workUnit = WorkUnit.createEmpty();
workUnit.setProp(ComplianceConfigurationKeys.PARTITION_NAME, dataset.datasetURN());
workUnit.setProp(ComplianceConfigurationKeys.EXECUTION_ATTEMPTS, executionAttempts);
workUnit.setProp(ComplianceConfigurationKeys.TIMESTAMP, this.timeStamp);
workUnit.setProp(ComplianceConfigurationKeys.GOBBLIN_COMPLIANCE_SHOULD_PROXY, this.shouldProxy);
workUnit.setProp(ComplianceConfigurationKeys.EXECUTION_COUNT, this.executionCount);
workUnit.setProp(ComplianceConfigurationKeys.NUM_ROWS, DatasetUtils
.getProperty(dataset, ComplianceConfigurationKeys.NUM_ROWS,
ComplianceConfigurationKeys.DEFAULT_NUM_ROWS));
workUnit.setProp(ComplianceConfigurationKeys.RAW_DATA_SIZE, DatasetUtils
.getProperty(dataset, ComplianceConfigurationKeys.RAW_DATA_SIZE,
ComplianceConfigurationKeys.DEFAULT_RAW_DATA_SIZE));
workUnit.setProp(ComplianceConfigurationKeys.TOTAL_SIZE, DatasetUtils
.getProperty(dataset, ComplianceConfigurationKeys.TOTAL_SIZE,
ComplianceConfigurationKeys.DEFAULT_TOTAL_SIZE));
submitWorkUnitGeneratedEvent(dataset.datasetURN(), executionAttempts);
return workUnit;
}
protected void submitWorkUnitGeneratedEvent(String partitionName, int executionAttempts) {
Map<String, String> metadata = new HashMap<>();
metadata.put(ComplianceConfigurationKeys.EXECUTION_ATTEMPTS, Integer.toString(executionAttempts));
metadata.put(ComplianceConfigurationKeys.PARTITION_NAME, partitionName);
this.eventSubmitter.submit(ComplianceEvents.Purger.WORKUNIT_GENERATED, metadata);
}
/**
* This method creates the list of all work units needed for the current execution.
* Fresh work units are created for each partition starting from watermark and failed work units from the
* previous run will be added to the list.
*/
protected void createWorkUnits(SourceState state)
throws IOException {
createWorkunitsFromPreviousState(state);
if (this.datasets.isEmpty()) {
return;
}
for (HivePartitionDataset dataset : this.datasets) {
Optional<String> owner = dataset.getOwner();
if (workUnitsExceeded()) {
log.info("Workunits exceeded");
setJobWatermark(state, dataset.datasetURN());
return;
}
if (!this.policy.shouldPurge(dataset)) {
continue;
}
WorkUnit workUnit = createNewWorkUnit(dataset);
log.info("Created new work unit with partition " + workUnit.getProp(ComplianceConfigurationKeys.PARTITION_NAME));
this.workUnitMap.put(workUnit.getProp(ComplianceConfigurationKeys.PARTITION_NAME), workUnit);
this.workUnitsCreatedCount++;
}
if (!state.contains(ComplianceConfigurationKeys.HIVE_PURGER_WATERMARK)) {
this.setJobWatermark(state, ComplianceConfigurationKeys.NO_PREVIOUS_WATERMARK);
}
}
protected boolean workUnitsExceeded() {
return !(this.workUnitsCreatedCount < this.maxWorkUnits);
}
/**
* Find all datasets on the basis on whitelist and blacklist, and then add them in a list in lexicographical order.
*/
protected void populateDatasets()
throws IOException {
this.datasets = this.datasetFinder.findDatasets();
sortHiveDatasets(datasets);
}
/**
* Sort all HiveDatasets on the basis of complete name ie dbName.tableName
*/
protected List<HivePartitionDataset> sortHiveDatasets(List<HivePartitionDataset> datasets) {
Collections.sort(datasets, new Comparator<HivePartitionDataset>() {
@Override
public int compare(HivePartitionDataset o1, HivePartitionDataset o2) {
return o1.datasetURN().compareTo(o2.datasetURN());
}
});
return datasets;
}
/**
* Add failed work units in a workUnitMap with partition name as Key.
* New work units are created using required configuration from the old work unit.
*/
protected void createWorkunitsFromPreviousState(SourceState state) {
if (this.lowWatermark.equalsIgnoreCase(ComplianceConfigurationKeys.NO_PREVIOUS_WATERMARK)) {
return;
}
if (Iterables.isEmpty(state.getPreviousWorkUnitStates())) {
return;
}
for (WorkUnitState workUnitState : state.getPreviousWorkUnitStates()) {
if (workUnitState.getWorkingState() == WorkUnitState.WorkingState.COMMITTED) {
continue;
}
WorkUnit workUnit = workUnitState.getWorkunit();
Preconditions.checkArgument(workUnit.contains(ComplianceConfigurationKeys.PARTITION_NAME),
"Older WorkUnit doesn't contain property partition name.");
int executionAttempts = workUnit.getPropAsInt(ComplianceConfigurationKeys.EXECUTION_ATTEMPTS,
ComplianceConfigurationKeys.DEFAULT_EXECUTION_ATTEMPTS);
if (executionAttempts < this.maxWorkUnitExecutionAttempts) {
Optional<WorkUnit> workUnitOptional =
createNewWorkUnit(workUnit.getProp(ComplianceConfigurationKeys.PARTITION_NAME), ++executionAttempts);
if (!workUnitOptional.isPresent()) {
continue;
}
workUnit = workUnitOptional.get();
log.info("Revived old Work Unit for partiton " + workUnit.getProp(ComplianceConfigurationKeys.PARTITION_NAME)
+ " having execution attempt " + workUnit.getProp(ComplianceConfigurationKeys.EXECUTION_ATTEMPTS));
workUnitMap.put(workUnit.getProp(ComplianceConfigurationKeys.PARTITION_NAME), workUnit);
}
}
}
protected void setTimeStamp() {
this.timeStamp = Long.toString(System.currentTimeMillis());
}
@Override
public Extractor getExtractor(WorkUnitState state)
throws IOException {
return new HivePurgerExtractor(state);
}
@Override
public void shutdown(SourceState state) {
}
/**
* Sets the local watermark, which is a class variable. Local watermark is a complete partition name which act as the starting point for the creation of fresh work units.
*/
protected void setLowWatermark(SourceState state) {
this.lowWatermark = getWatermarkFromPreviousWorkUnits(state, ComplianceConfigurationKeys.HIVE_PURGER_WATERMARK);
log.info("Setting low watermark for the job: " + this.lowWatermark);
}
protected void setExecutionCount(SourceState state) {
String executionCount = getWatermarkFromPreviousWorkUnits(state, ComplianceConfigurationKeys.EXECUTION_COUNT);
if (executionCount.equalsIgnoreCase(ComplianceConfigurationKeys.NO_PREVIOUS_WATERMARK)) {
this.executionCount = ComplianceConfigurationKeys.DEFAULT_EXECUTION_COUNT;
log.info("No executionCount is found. Setting it to " + this.executionCount);
} else {
try {
this.executionCount = Integer.parseInt(executionCount) + 1;
} catch (NumberFormatException e) {
log.warn("Unable to convert executionCount " + executionCount + " to int : " + e.getMessage());
this.executionCount = ComplianceConfigurationKeys.DEFAULT_EXECUTION_COUNT;
}
}
}
/**
* If low watermark is at the reset point, then either cycle is completed or starting for the first time
* If executionCount is greater than 1, then cycle is completed
* If cycle is completed, executionCount will be reset and cycle completion event will be submitted
*/
protected void submitCycleCompletionEvent() {
if (!this.lowWatermark.equalsIgnoreCase(ComplianceConfigurationKeys.NO_PREVIOUS_WATERMARK)) {
return;
}
if (this.executionCount > 1) {
// Cycle completed
Map<String, String> metadata = new HashMap<>();
metadata.put(ComplianceConfigurationKeys.TOTAL_EXECUTIONS, Integer.toString((this.executionCount - 1)));
this.eventSubmitter.submit(ComplianceEvents.Purger.CYCLE_COMPLETED, metadata);
this.executionCount = ComplianceConfigurationKeys.DEFAULT_EXECUTION_COUNT;
}
}
protected String getLowWatermark() {
return this.lowWatermark;
}
/**
* Sets Job Watermark in the SourceState which will be copied to all WorkUnitStates. Job Watermark is a complete partition name.
* During next run of this job, fresh work units will be created starting from this partition.
*/
protected void setJobWatermark(SourceState state, String watermark) {
state.setProp(ComplianceConfigurationKeys.HIVE_PURGER_WATERMARK, watermark);
log.info("Setting job watermark for the job: " + watermark);
}
/**
* Fetches the value of a watermark given its key from the previous run.
*/
protected static String getWatermarkFromPreviousWorkUnits(SourceState state, String watermark) {
if (state.getPreviousWorkUnitStates().isEmpty()) {
return ComplianceConfigurationKeys.NO_PREVIOUS_WATERMARK;
}
return state.getPreviousWorkUnitStates().get(0)
.getProp(watermark, ComplianceConfigurationKeys.NO_PREVIOUS_WATERMARK);
}
}
| 3,185 |
0 | Create_ds/gobblin/gobblin-modules/gobblin-compliance/src/main/java/org/apache/gobblin/compliance | Create_ds/gobblin/gobblin-modules/gobblin-compliance/src/main/java/org/apache/gobblin/compliance/purger/PurgeableHivePartitionDataset.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.compliance.purger;
import java.io.IOException;
import java.sql.SQLException;
import java.util.Arrays;
import java.util.List;
import org.apache.commons.lang3.StringUtils;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hive.ql.metadata.Partition;
import com.google.common.base.Optional;
import com.google.common.base.Preconditions;
import lombok.Getter;
import lombok.Setter;
import lombok.extern.slf4j.Slf4j;
import org.apache.gobblin.compliance.ComplianceConfigurationKeys;
import org.apache.gobblin.compliance.HivePartitionDataset;
import org.apache.gobblin.compliance.HiveProxyQueryExecutor;
import org.apache.gobblin.compliance.utils.PartitionUtils;
import org.apache.gobblin.compliance.utils.ProxyUtils;
import org.apache.gobblin.configuration.State;
import org.apache.gobblin.util.reflection.GobblinConstructorUtils;
/**
* A purgeable representation of {@link HivePartitionDataset}
*
* @author adsharma
*/
@Getter
@Slf4j
@Setter
public class PurgeableHivePartitionDataset extends HivePartitionDataset implements PurgeableDataset {
private String complianceIdTable;
private String complianceId;
private Boolean simulate;
private String timeStamp;
private String complianceField;
private List<String> purgeQueries;
private FileSystem datasetOwnerFs;
private State state;
private Optional<String> datasetOwner = Optional.absent();
private long startTime;
private long endTime;
private Boolean specifyPartitionFormat;
public PurgeableHivePartitionDataset(Partition partition) {
super(partition);
}
public PurgeableHivePartitionDataset(HivePartitionDataset hivePartitionDataset) {
super(hivePartitionDataset);
}
/**
* This method is responsible for actual purging.
* - It first creates a staging table partition with the same schema as of original table partition.
* - Staging table partition is then populated by original table left outer joined with compliance id table.
*
* - Alter query will then change the partition location to the staging partition location.
* - In flight queries won't get affected due to alter partition query.
*/
public void purge()
throws IOException {
this.datasetOwner = getOwner();
State state = new State(this.state);
this.datasetOwnerFs = ProxyUtils.getOwnerFs(state, this.datasetOwner);
try (HiveProxyQueryExecutor queryExecutor = ProxyUtils.getQueryExecutor(state, this.datasetOwner)) {
if (this.simulate) {
log.info("Simulate is set to true. Wont't run actual queries");
return;
}
String originalPartitionLocation = getOriginalPartitionLocation();
// Create the staging table and staging partition
queryExecutor.executeQueries(HivePurgerQueryTemplate.getCreateStagingTableQuery(this), this.datasetOwner);
this.startTime = getLastModifiedTime(originalPartitionLocation);
// Execute purge queries, that is insert filtered data into the staging partition
queryExecutor.executeQueries(this.purgeQueries, this.datasetOwner);
this.endTime = getLastModifiedTime(originalPartitionLocation);
// Create a backup table and partition pointing to the original partition location
queryExecutor.executeQueries(HivePurgerQueryTemplate.getBackupQueries(this), this.datasetOwner);
String commitPolicyString = this.state.getProp(ComplianceConfigurationKeys.PURGER_COMMIT_POLICY_CLASS,
ComplianceConfigurationKeys.DEFAULT_PURGER_COMMIT_POLICY_CLASS);
CommitPolicy<PurgeableHivePartitionDataset> commitPolicy =
GobblinConstructorUtils.invokeConstructor(CommitPolicy.class, commitPolicyString);
if (!commitPolicy.shouldCommit(this)) {
log.error("Last modified time before start of execution : " + this.startTime);
log.error("Last modified time after execution of purge queries : " + this.endTime);
throw new RuntimeException("Failed to commit. File modified during job run.");
}
// Alter the original table partition to start pointing to the cleaned-partition-location/staging-partition-location
queryExecutor
.executeQueries(HivePurgerQueryTemplate.getAlterOriginalPartitionLocationQueries(this), this.datasetOwner);
// Drop the staging table
queryExecutor.executeQueries(HivePurgerQueryTemplate.getDropStagingTableQuery(this), this.datasetOwner);
} catch (SQLException e) {
throw new IOException(e);
}
}
private long getLastModifiedTime(String file)
throws IOException {
return this.datasetOwnerFs.getFileStatus(new Path(file)).getModificationTime();
}
public void setPurgeQueries(List<String> purgeQueries) {
this.purgeQueries = purgeQueries;
}
public String getCompleteStagingTableName() {
return getStagingDb() + "." + getStagingTableName();
}
public String getCompleteBackupTableName() {
return getDbName() + "." + getBackupTableName();
}
public String getStagingTableName() {
return getDbName() + ComplianceConfigurationKeys.DBNAME_SEPARATOR + getTableName()
+ ComplianceConfigurationKeys.STAGING + this.timeStamp;
}
public String getBackupTableName() {
return getDbName() + ComplianceConfigurationKeys.DBNAME_SEPARATOR + getTableName()
+ ComplianceConfigurationKeys.BACKUP + this.timeStamp;
}
public String getStagingTableLocation() {
return StringUtils.join(Arrays.asList(getTrashDir(), getStagingTableName()), '/');
}
public String getStagingPartitionLocation() {
Path originalPartitionLocation = getLocation();
if (PartitionUtils.isUnixTimeStamp(originalPartitionLocation.getName())) {
return StringUtils.join(Arrays.asList(getLocation().getParent().toString(), this.timeStamp), '/');
}
else {
return StringUtils.join(Arrays.asList(getLocation().toString(), this.timeStamp), '/');
}
}
public String getOriginalPartitionLocation() {
return getLocation().toString();
}
public String getStagingDb() {
return getDbName();
}
public String getTrashDir() {
Preconditions.checkArgument(this.state.contains(ComplianceConfigurationKeys.TRASH_DIR));
return this.state.getProp(ComplianceConfigurationKeys.TRASH_DIR);
}
public String getBackupTableLocation() {
Preconditions.checkArgument(this.state.contains(ComplianceConfigurationKeys.TRASH_DIR));
return StringUtils.join(Arrays.asList(getTrashDir(), getBackupTableName()), '/');
}
}
| 3,186 |
0 | Create_ds/gobblin/gobblin-modules/gobblin-compliance/src/main/java/org/apache/gobblin/compliance | Create_ds/gobblin/gobblin-modules/gobblin-compliance/src/main/java/org/apache/gobblin/compliance/purger/HivePurgerPolicy.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.compliance.purger;
import com.google.common.base.Preconditions;
import lombok.extern.slf4j.Slf4j;
import org.apache.gobblin.compliance.ComplianceConfigurationKeys;
import org.apache.gobblin.compliance.HivePartitionDataset;
/**
* A policy class to determine if a dataset should be purged or not.
*/
@Slf4j
public class HivePurgerPolicy implements PurgePolicy<HivePartitionDataset> {
private String watermark;
public HivePurgerPolicy(String watermark) {
Preconditions.checkNotNull(watermark, "Watermark should not be null");
this.watermark = watermark;
}
public boolean shouldPurge(HivePartitionDataset dataset) {
if (!dataset.getTableParams().containsKey(ComplianceConfigurationKeys.DATASET_DESCRIPTOR_KEY)) {
return false;
}
if (this.watermark.equalsIgnoreCase(ComplianceConfigurationKeys.NO_PREVIOUS_WATERMARK)) {
return true;
}
return dataset.datasetURN().compareTo(this.watermark) >= 0;
}
}
| 3,187 |
0 | Create_ds/gobblin/gobblin-modules/gobblin-compliance/src/main/java/org/apache/gobblin/compliance | Create_ds/gobblin/gobblin-modules/gobblin-compliance/src/main/java/org/apache/gobblin/compliance/purger/HivePurgerPublisher.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.compliance.purger;
import java.security.PrivilegedExceptionAction;
import java.util.Collection;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import org.apache.hadoop.hive.conf.HiveConf;
import org.apache.hadoop.hive.metastore.HiveMetaStoreClient;
import org.apache.hadoop.hive.ql.metadata.Partition;
import org.apache.hadoop.hive.ql.metadata.Table;
import org.apache.hadoop.security.UserGroupInformation;
import org.apache.thrift.TException;
import com.google.common.base.Splitter;
import lombok.extern.slf4j.Slf4j;
import org.apache.gobblin.compliance.ComplianceConfigurationKeys;
import org.apache.gobblin.compliance.ComplianceEvents;
import org.apache.gobblin.compliance.HivePartitionDataset;
import org.apache.gobblin.compliance.utils.DatasetUtils;
import org.apache.gobblin.configuration.ConfigurationKeys;
import org.apache.gobblin.configuration.State;
import org.apache.gobblin.configuration.WorkUnitState;
import org.apache.gobblin.instrumented.Instrumented;
import org.apache.gobblin.metrics.MetricContext;
import org.apache.gobblin.metrics.event.EventSubmitter;
import org.apache.gobblin.publisher.DataPublisher;
import org.apache.gobblin.source.workunit.WorkUnit;
import org.apache.gobblin.util.HostUtils;
/**
* The Publisher moves COMMITTED WorkUnitState to SUCCESSFUL, otherwise FAILED.
*
* @author adsharma
*/
@Slf4j
public class HivePurgerPublisher extends DataPublisher {
protected MetricContext metricContext;
protected EventSubmitter eventSubmitter;
public HiveMetaStoreClient client;
public HivePurgerPublisher(State state) throws Exception {
super(state);
this.metricContext = Instrumented.getMetricContext(state, this.getClass());
this.eventSubmitter = new EventSubmitter.Builder(this.metricContext, ComplianceEvents.NAMESPACE).
build();
initHiveMetastoreClient();
}
public void initHiveMetastoreClient() throws Exception {
if (this.state.contains(ConfigurationKeys.SUPER_USER_KEY_TAB_LOCATION)) {
String superUser = this.state.getProp(ComplianceConfigurationKeys.GOBBLIN_COMPLIANCE_SUPER_USER);
String realm = this.state.getProp(ConfigurationKeys.KERBEROS_REALM);
String keytabLocation = this.state.getProp(ConfigurationKeys.SUPER_USER_KEY_TAB_LOCATION);
log.info("Establishing MetastoreClient connection using " + keytabLocation);
UserGroupInformation.loginUserFromKeytab(HostUtils.getPrincipalUsingHostname(superUser, realm), keytabLocation);
UserGroupInformation loginUser = UserGroupInformation.getLoginUser();
loginUser.doAs(new PrivilegedExceptionAction<Void>() {
@Override
public Void run() throws TException {
HivePurgerPublisher.this.client = new HiveMetaStoreClient(new HiveConf());
return null;
}
});
} else {
HivePurgerPublisher.this.client = new HiveMetaStoreClient(new HiveConf());
}
}
public void initialize() {
}
@Override
public void publishData(Collection<? extends WorkUnitState> states) {
for (WorkUnitState state : states) {
if (state.getWorkingState() == WorkUnitState.WorkingState.SUCCESSFUL) {
state.setWorkingState(WorkUnitState.WorkingState.COMMITTED);
submitEvent(state, ComplianceEvents.Purger.WORKUNIT_COMMITTED);
} else {
state.setWorkingState(WorkUnitState.WorkingState.FAILED);
submitEvent(state, ComplianceEvents.Purger.WORKUNIT_FAILED);
}
}
}
private void submitEvent(WorkUnitState state, String name) {
WorkUnit workUnit = state.getWorkunit();
Map<String, String> metadata = new HashMap<>();
String recordsRead = state.getProp(ComplianceConfigurationKeys.NUM_ROWS);
metadata.put(ComplianceConfigurationKeys.WORKUNIT_RECORDSREAD, recordsRead);
metadata.put(ComplianceConfigurationKeys.WORKUNIT_BYTESREAD,
getDataSize(workUnit.getProp(ComplianceConfigurationKeys.RAW_DATA_SIZE),
workUnit.getProp(ComplianceConfigurationKeys.TOTAL_SIZE)));
String partitionNameProp = workUnit.getProp(ComplianceConfigurationKeys.PARTITION_NAME);
Splitter AT_SPLITTER = Splitter.on("@").omitEmptyStrings().trimResults();
List<String> namesList = AT_SPLITTER.splitToList(partitionNameProp);
if (namesList.size() != 3) {
log.warn("Not submitting event. Invalid partition name: " + partitionNameProp);
return;
}
String dbName = namesList.get(0), tableName = namesList.get(1), partitionName = namesList.get(2);
org.apache.hadoop.hive.metastore.api.Partition apiPartition = null;
Partition qlPartition = null;
try {
Table table = new Table(this.client.getTable(dbName, tableName));
apiPartition = this.client.getPartition(dbName, tableName, partitionName);
qlPartition = new Partition(table, apiPartition);
} catch (Exception e) {
log.warn("Not submitting event. Failed to resolve partition '" + partitionName + "': " + e);
e.printStackTrace();
return;
}
HivePartitionDataset hivePartitionDataset = new HivePartitionDataset(qlPartition);
String recordsWritten = DatasetUtils.getProperty(hivePartitionDataset, ComplianceConfigurationKeys.NUM_ROWS,
ComplianceConfigurationKeys.DEFAULT_NUM_ROWS);
String recordsPurged = Long.toString((Long.parseLong(recordsRead) - Long.parseLong(recordsWritten)));
metadata.put(ComplianceConfigurationKeys.WORKUNIT_RECORDSWRITTEN,
recordsWritten);
metadata.put(ComplianceConfigurationKeys.WORKUNIT_BYTESWRITTEN, getDataSize(DatasetUtils
.getProperty(hivePartitionDataset, ComplianceConfigurationKeys.RAW_DATA_SIZE,
ComplianceConfigurationKeys.DEFAULT_RAW_DATA_SIZE), DatasetUtils
.getProperty(hivePartitionDataset, ComplianceConfigurationKeys.TOTAL_SIZE,
ComplianceConfigurationKeys.DEFAULT_TOTAL_SIZE)));
metadata.put(DatasetMetrics.DATABASE_NAME, hivePartitionDataset.getDbName());
metadata.put(DatasetMetrics.TABLE_NAME, hivePartitionDataset.getTableName());
metadata.put(DatasetMetrics.PARTITION_NAME, hivePartitionDataset.getName());
metadata.put(DatasetMetrics.RECORDS_PURGED, recordsPurged);
this.eventSubmitter.submit(name, metadata);
}
private String getDataSize(String rawDataSize, String totalDataSize) {
long rawDataSizeVal = Long.parseLong(rawDataSize);
long totalDataSizeVal = Long.parseLong(totalDataSize);
long dataSize = totalDataSizeVal;
if (totalDataSizeVal <= 0) {
dataSize = rawDataSizeVal;
}
return Long.toString(dataSize);
}
public void publishMetadata(Collection<? extends WorkUnitState> states) {
}
@Override
public void close() {
}
public static class DatasetMetrics {
public static final String DATABASE_NAME = "HiveDatabaseName";
public static final String TABLE_NAME = "HiveTableName";
public static final String PARTITION_NAME = "HivePartitionName";
public static final String RECORDS_PURGED = "RecordsPurged";
}
}
| 3,188 |
0 | Create_ds/gobblin/gobblin-modules/gobblin-compliance/src/main/java/org/apache/gobblin/compliance | Create_ds/gobblin/gobblin-modules/gobblin-compliance/src/main/java/org/apache/gobblin/compliance/purger/HivePurgerExtractor.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.compliance.purger;
import java.io.IOException;
import java.util.Map;
import com.google.common.annotations.VisibleForTesting;
import com.google.common.base.Optional;
import com.google.common.base.Preconditions;
import org.apache.gobblin.compliance.ComplianceConfigurationKeys;
import org.apache.gobblin.compliance.DatasetDescriptor;
import org.apache.gobblin.compliance.HivePartitionDataset;
import org.apache.gobblin.compliance.HivePartitionFinder;
import org.apache.gobblin.configuration.State;
import org.apache.gobblin.configuration.WorkUnitState;
import org.apache.gobblin.source.extractor.Extractor;
import org.apache.gobblin.util.reflection.GobblinConstructorUtils;
/**
* This extractor doesn't extract anything, but is used to instantiate and pass {@link PurgeableHivePartitionDataset}
* to the converter.
*
* @author adsharma
*/
public class HivePurgerExtractor implements Extractor<PurgeableHivePartitionDatasetSchema, PurgeableHivePartitionDataset> {
private PurgeableHivePartitionDataset record;
private State state;
private boolean read;
public HivePurgerExtractor(WorkUnitState state)
throws IOException {
this.read = false;
this.state = new State(state);
}
@Override
public PurgeableHivePartitionDatasetSchema getSchema() {
return new PurgeableHivePartitionDatasetSchema();
}
/**
* There is only one record {@link PurgeableHivePartitionDataset} to be read per partition, and must return null
* after that to indicate end of reading.
*/
@Override
public PurgeableHivePartitionDataset readRecord(PurgeableHivePartitionDataset record)
throws IOException {
if (this.read) {
return null;
}
this.read = true;
if (this.record == null) {
this.record = createPurgeableHivePartitionDataset(this.state);
}
return this.record;
}
@Override
public long getExpectedRecordCount() {
return 1;
}
/**
* Watermark is not managed by this extractor.
*/
@Override
public long getHighWatermark() {
return 0;
}
@Override
public void close()
throws IOException {
}
@VisibleForTesting
public void setRecord(PurgeableHivePartitionDataset record) {
this.record = record;
}
private PurgeableHivePartitionDataset createPurgeableHivePartitionDataset(State state)
throws IOException {
HivePartitionDataset hivePartitionDataset =
HivePartitionFinder.findDataset(state.getProp(ComplianceConfigurationKeys.PARTITION_NAME), state);
Preconditions.checkArgument(state.contains(ComplianceConfigurationKeys.COMPLIANCEID_KEY),
"Missing property " + ComplianceConfigurationKeys.COMPLIANCEID_KEY);
Preconditions.checkArgument(state.contains(ComplianceConfigurationKeys.COMPLIANCE_ID_TABLE_KEY),
"Missing property " + ComplianceConfigurationKeys.COMPLIANCE_ID_TABLE_KEY);
Preconditions.checkArgument(state.contains(ComplianceConfigurationKeys.TIMESTAMP),
"Missing table property " + ComplianceConfigurationKeys.TIMESTAMP);
Boolean simulate = state.getPropAsBoolean(ComplianceConfigurationKeys.COMPLIANCE_JOB_SIMULATE,
ComplianceConfigurationKeys.DEFAULT_COMPLIANCE_JOB_SIMULATE);
String complianceIdentifier = state.getProp(ComplianceConfigurationKeys.COMPLIANCEID_KEY);
String complianceIdTable = state.getProp(ComplianceConfigurationKeys.COMPLIANCE_ID_TABLE_KEY);
String timeStamp = state.getProp(ComplianceConfigurationKeys.TIMESTAMP);
Boolean specifyPartitionFormat = state.getPropAsBoolean(ComplianceConfigurationKeys.SPECIFY_PARTITION_FORMAT,
ComplianceConfigurationKeys.DEFAULT_SPECIFY_PARTITION_FORMAT);
State datasetState = new State();
datasetState.addAll(state.getProperties());
PurgeableHivePartitionDataset dataset = new PurgeableHivePartitionDataset(hivePartitionDataset);
dataset.setComplianceId(complianceIdentifier);
dataset.setComplianceIdTable(complianceIdTable);
dataset.setComplianceField(getComplianceField(state, hivePartitionDataset));
dataset.setTimeStamp(timeStamp);
dataset.setState(datasetState);
dataset.setSimulate(simulate);
dataset.setSpecifyPartitionFormat(specifyPartitionFormat);
return dataset;
}
private String getComplianceField(State state, HivePartitionDataset dataset) {
Map<String, String> partitionParameters = dataset.getTableParams();
Preconditions.checkArgument(partitionParameters.containsKey(ComplianceConfigurationKeys.DATASET_DESCRIPTOR_KEY),
"Missing table property " + ComplianceConfigurationKeys.DATASET_DESCRIPTOR_KEY);
String datasetDescriptorClass = state.getProp(ComplianceConfigurationKeys.DATASET_DESCRIPTOR_CLASS,
ComplianceConfigurationKeys.DEFAULT_DATASET_DESCRIPTOR_CLASS);
DatasetDescriptor descriptor = GobblinConstructorUtils
.invokeConstructor(DatasetDescriptor.class, datasetDescriptorClass,
partitionParameters.get(ComplianceConfigurationKeys.DATASET_DESCRIPTOR_KEY),
Optional.fromNullable(state.getProp(ComplianceConfigurationKeys.DATASET_DESCRIPTOR_FIELDPATH)));
return descriptor.getComplianceField();
}
}
| 3,189 |
0 | Create_ds/gobblin/gobblin-modules/gobblin-compliance/src/main/java/org/apache/gobblin/compliance | Create_ds/gobblin/gobblin-modules/gobblin-compliance/src/main/java/org/apache/gobblin/compliance/purger/PurgeableHivePartitionDatasetSchema.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.compliance.purger;
/**
* Dummy class for schema corresponding to {@link PurgeableHivePartitionDataset}
*
* @author adsharma
*/
public class PurgeableHivePartitionDatasetSchema {
}
| 3,190 |
0 | Create_ds/gobblin/gobblin-modules/gobblin-compliance/src/main/java/org/apache/gobblin/compliance | Create_ds/gobblin/gobblin-modules/gobblin-compliance/src/main/java/org/apache/gobblin/compliance/purger/HivePurgerConverter.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.compliance.purger;
import org.apache.gobblin.configuration.WorkUnitState;
import org.apache.gobblin.converter.Converter;
import org.apache.gobblin.converter.SingleRecordIterable;
/**
* A {@link Converter} to build compliance purge queries. Queries are added to the {@link PurgeableHivePartitionDataset}.
*
* @author adsharma
*/
public class HivePurgerConverter extends Converter<PurgeableHivePartitionDatasetSchema, PurgeableHivePartitionDatasetSchema, PurgeableHivePartitionDataset, PurgeableHivePartitionDataset> {
@Override
public PurgeableHivePartitionDatasetSchema convertSchema(PurgeableHivePartitionDatasetSchema schema,
WorkUnitState state) {
return schema;
}
@Override
public Iterable<PurgeableHivePartitionDataset> convertRecord(PurgeableHivePartitionDatasetSchema schema,
PurgeableHivePartitionDataset record, WorkUnitState state) {
record.setPurgeQueries(HivePurgerQueryTemplate.getPurgeQueries(record));
return new SingleRecordIterable<>(record);
}
}
| 3,191 |
0 | Create_ds/gobblin/gobblin-modules/gobblin-compliance/src/main/java/org/apache/gobblin/compliance | Create_ds/gobblin/gobblin-modules/gobblin-compliance/src/main/java/org/apache/gobblin/compliance/purger/CommitPolicy.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.compliance.purger;
/**
* @author adsharma
*/
public interface CommitPolicy<T> {
boolean shouldCommit(T dataset);
}
| 3,192 |
0 | Create_ds/gobblin/gobblin-modules/gobblin-compliance/src/main/java/org/apache/gobblin/compliance | Create_ds/gobblin/gobblin-modules/gobblin-compliance/src/main/java/org/apache/gobblin/compliance/purger/PurgeableDataset.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.compliance.purger;
import java.io.IOException;
import org.apache.gobblin.data.management.dataset.Dataset;
public interface PurgeableDataset extends Dataset {
void purge()
throws IOException;
}
| 3,193 |
0 | Create_ds/gobblin/gobblin-modules/gobblin-compliance/src/main/java/org/apache/gobblin/compliance | Create_ds/gobblin/gobblin-modules/gobblin-compliance/src/main/java/org/apache/gobblin/compliance/purger/HivePurgerWriter.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.compliance.purger;
import java.io.IOException;
import lombok.AllArgsConstructor;
import lombok.extern.slf4j.Slf4j;
import org.apache.gobblin.writer.DataWriter;
/**
* This class is responsible for executing all purge queries and altering the partition location if the original
* partition is not modified during the current execution.
*
* @author adsharma
*/
@Slf4j
@AllArgsConstructor
public class HivePurgerWriter implements DataWriter<PurgeableHivePartitionDataset> {
@Override
public void write(PurgeableHivePartitionDataset dataset)
throws IOException {
dataset.purge();
}
@Override
public long recordsWritten() {
return 1;
}
/**
* Following methods are not implemented by this class
* @throws IOException
*/
@Override
public void commit()
throws IOException {
}
@Override
public void close()
throws IOException {
}
@Override
public void cleanup()
throws IOException {
}
@Override
public long bytesWritten()
throws IOException {
return 0;
}
}
| 3,194 |
0 | Create_ds/gobblin/gobblin-modules/gobblin-compliance/src/main/java/org/apache/gobblin/compliance | Create_ds/gobblin/gobblin-modules/gobblin-compliance/src/main/java/org/apache/gobblin/compliance/purger/HivePurgerWriterBuilder.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.compliance.purger;
import java.io.IOException;
import org.apache.gobblin.writer.DataWriter;
import org.apache.gobblin.writer.DataWriterBuilder;
/**
* Initializes {@link HivePurgerWriter}
*
* @author adsharma
*/
public class HivePurgerWriterBuilder extends DataWriterBuilder<PurgeableHivePartitionDatasetSchema, PurgeableHivePartitionDataset> {
@Override
public DataWriter<PurgeableHivePartitionDataset> build()
throws IOException {
return new HivePurgerWriter();
}
}
| 3,195 |
0 | Create_ds/gobblin/gobblin-modules/gobblin-compliance/src/main/java/org/apache/gobblin/compliance | Create_ds/gobblin/gobblin-modules/gobblin-compliance/src/main/java/org/apache/gobblin/compliance/purger/HivePurgerQueryTemplate.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.compliance.purger;
import java.util.ArrayList;
import java.util.List;
import java.util.Map;
import org.apache.hadoop.hive.metastore.api.FieldSchema;
import com.google.common.base.Optional;
import org.apache.gobblin.compliance.utils.PartitionUtils;
/**
* This class creates all queries required by {@link HivePurgerConverter}
*
* @author adsharma
*/
public class HivePurgerQueryTemplate {
/**
* Use the staging db for creating staging tables.
* Alter query doesn't work with dbName.tableName.
*/
public static String getUseDbQuery(String dbName) {
return "USE " + dbName;
}
public static String getAutoConvertJoinProperty() {
return "SET hive.auto.convert.join=false";
}
/**
* Will allow insert query to specify only partition coloumn names instead of partition spec.
*/
public static String getDynamicPartitionNonStrictModeProperty() {
return "SET hive.exec.dynamic.partition.mode=nonstrict";
}
/**
* To get around the OOM, we need to set hive.optimize.sort.dynamic.partition to true so that the data is sorted by
* partition key and at one time only one partition needs to be accessed.
*/
public static String getOptimizeSortDynamicPartition() {
return "SET hive.optimize.sort.dynamic.partition=true";
}
/**
* If staging table doesn't exist, it will create a staging table.
*/
public static String getCreateTableQuery(String completeNewTableName, String likeTableDbName, String likeTableName,
String location) {
return getCreateTableQuery(completeNewTableName, likeTableDbName, likeTableName) + " LOCATION " + PartitionUtils
.getQuotedString(location);
}
public static String getCreateTableQuery(String completeNewTableName, String likeTableDbName, String likeTableName) {
return "CREATE EXTERNAL TABLE IF NOT EXISTS " + completeNewTableName + " LIKE " + likeTableDbName + "."
+ likeTableName;
}
/**
* This query will create a partition in staging table and insert the datasets whose compliance id is not
* contained in the compliance id table.
*/
public static String getInsertQuery(PurgeableHivePartitionDataset dataset) {
return "INSERT OVERWRITE" + " TABLE " + dataset.getCompleteStagingTableName() + " PARTITION (" + PartitionUtils
.getPartitionSpecString(dataset.getSpec()) + ")" + " SELECT /*+MAPJOIN(b) */ " + getCommaSeparatedColumnNames(
dataset.getCols(), "a.") + " FROM " + dataset.getDbName() + "." + dataset.getTableName() + " a LEFT JOIN "
+ dataset.getComplianceIdTable() + " b" + " ON a." + dataset.getComplianceField() + "=b." + dataset
.getComplianceId() + " WHERE b." + dataset.getComplianceId() + " IS NULL AND " + getWhereClauseForPartition(
dataset.getSpec(), "a.");
}
public static String getAddPartitionQuery(String tableName, String partitionSpec, Optional<String> fileFormat, Optional<String> location) {
String query = "ALTER TABLE " + tableName + " ADD IF NOT EXISTS" + " PARTITION (" + partitionSpec + ")";
if (fileFormat.isPresent()) {
query = query + " FILEFORMAT " + fileFormat.get();
}
if (location.isPresent()) {
query = query + " LOCATION " + PartitionUtils.getQuotedString(location.get());
}
return query;
}
public static String getAlterTableLocationQuery(String tableName, String partitionSpec, String location) {
return "ALTER TABLE " + tableName + " PARTITION (" + partitionSpec + ")" + " SET LOCATION " + PartitionUtils
.getQuotedString(location);
}
public static String getDropTableQuery(String dbName, String tableName) {
return "DROP TABLE IF EXISTS " + dbName + "." + tableName;
}
public static String getDropPartitionQuery(String tableName, String partitionSpec) {
return "ALTER TABLE " + tableName + " DROP IF EXISTS" + " PARTITION (" + partitionSpec + ")";
}
public static String getUpdatePartitionMetadataQuery(String dbName, String tableName, String partitionSpec) {
return "ANALYZE TABLE " + dbName + "." + tableName + " PARTITION (" + partitionSpec + ") COMPUTE STATISTICS";
}
/**
* Will return all the queries needed to populate the staging table partition.
* This won't include alter table partition location query.
*/
public static List<String> getPurgeQueries(PurgeableHivePartitionDataset dataset) {
List<String> queries = new ArrayList<>();
queries.add(getUseDbQuery(dataset.getStagingDb()));
queries.add(getInsertQuery(dataset));
return queries;
}
public static List<String> getCreateStagingTableQuery(PurgeableHivePartitionDataset dataset) {
List<String> queries = new ArrayList<>();
queries.add(getUseDbQuery(dataset.getStagingDb()));
queries.add(getAutoConvertJoinProperty());
queries.add(getCreateTableQuery(dataset.getCompleteStagingTableName(), dataset.getDbName(), dataset.getTableName(),
dataset.getStagingTableLocation()));
Optional<String> fileFormat = Optional.absent();
if (dataset.getSpecifyPartitionFormat()) {
fileFormat = dataset.getFileFormat();
}
queries.add(getAddPartitionQuery(dataset.getCompleteStagingTableName(),
PartitionUtils.getPartitionSpecString(dataset.getSpec()), fileFormat,
Optional.fromNullable(dataset.getStagingPartitionLocation())));
return queries;
}
/**
* Will return all the queries needed to have a backup table partition pointing to the original partition data location
*/
public static List<String> getBackupQueries(PurgeableHivePartitionDataset dataset) {
List<String> queries = new ArrayList<>();
queries.add(getUseDbQuery(dataset.getDbName()));
queries.add(getCreateTableQuery(dataset.getCompleteBackupTableName(), dataset.getDbName(), dataset.getTableName(),
dataset.getBackupTableLocation()));
Optional<String> fileFormat = Optional.absent();
if (dataset.getSpecifyPartitionFormat()) {
fileFormat = dataset.getFileFormat();
}
queries.add(
getAddPartitionQuery(dataset.getBackupTableName(), PartitionUtils.getPartitionSpecString(dataset.getSpec()),
fileFormat, Optional.fromNullable(dataset.getOriginalPartitionLocation())));
return queries;
}
/**
* Will return all the queries needed to alter the location of the table partition.
* Alter table partition query doesn't work with syntax dbName.tableName
*/
public static List<String> getAlterOriginalPartitionLocationQueries(PurgeableHivePartitionDataset dataset) {
List<String> queries = new ArrayList<>();
queries.add(getUseDbQuery(dataset.getDbName()));
String partitionSpecString = PartitionUtils.getPartitionSpecString(dataset.getSpec());
queries.add(
getAlterTableLocationQuery(dataset.getTableName(), partitionSpecString, dataset.getStagingPartitionLocation()));
queries.add(getUpdatePartitionMetadataQuery(dataset.getDbName(), dataset.getTableName(), partitionSpecString));
return queries;
}
public static List<String> getDropStagingTableQuery(PurgeableHivePartitionDataset dataset) {
List<String> queries = new ArrayList<>();
queries.add(getUseDbQuery(dataset.getStagingDb()));
queries.add(
getDropPartitionQuery(dataset.getStagingTableName(), PartitionUtils.getPartitionSpecString(dataset.getSpec())));
return queries;
}
/**
* This method builds the where clause for the insertion query.
* If prefix is a, then it builds a.datepartition='2016-01-01-00' AND a.size='12345' from [datepartition : '2016-01-01-00', size : '12345']
*/
public static String getWhereClauseForPartition(Map<String, String> spec, String prefix) {
StringBuilder sb = new StringBuilder();
for (Map.Entry<String, String> entry : spec.entrySet()) {
if (!sb.toString().isEmpty()) {
sb.append(" AND ");
}
sb.append(prefix + entry.getKey());
sb.append("=");
sb.append(PartitionUtils.getQuotedString(entry.getValue()));
}
return sb.toString();
}
public static String getCommaSeparatedColumnNames(List<FieldSchema> cols, String prefix) {
StringBuilder sb = new StringBuilder();
for (FieldSchema fs : cols) {
if (!sb.toString().isEmpty()) {
sb.append(", ");
}
sb.append(prefix);
sb.append(fs.getName());
}
return sb.toString();
}
}
| 3,196 |
0 | Create_ds/gobblin/gobblin-modules/gobblin-compliance/src/main/java/org/apache/gobblin/compliance | Create_ds/gobblin/gobblin-modules/gobblin-compliance/src/main/java/org/apache/gobblin/compliance/purger/PurgePolicy.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.compliance.purger;
/**
* @author adsharma
*/
public interface PurgePolicy<T> {
boolean shouldPurge(T dataset);
}
| 3,197 |
0 | Create_ds/gobblin/gobblin-modules/gobblin-compliance/src/main/java/org/apache/gobblin/compliance | Create_ds/gobblin/gobblin-modules/gobblin-compliance/src/main/java/org/apache/gobblin/compliance/purger/HivePurgerCommitPolicy.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.compliance.purger;
import com.google.common.base.Preconditions;
/**
* Default commit policy for purger.
*
* @author adsharma
*/
public class HivePurgerCommitPolicy implements CommitPolicy<PurgeableHivePartitionDataset> {
/**
* @return true if the last modified time does not change during the execution of the job.
*/
public boolean shouldCommit(PurgeableHivePartitionDataset dataset) {
Preconditions
.checkNotNull(dataset.getStartTime(), "Start time for purger is not set for dataset " + dataset.datasetURN());
Preconditions
.checkNotNull(dataset.getEndTime(), "End time for purger is not set for dataset " + dataset.datasetURN());
return dataset.getStartTime() == dataset.getEndTime();
}
}
| 3,198 |
0 | Create_ds/gobblin/gobblin-modules/gobblin-compliance/src/main/java/org/apache/gobblin/compliance | Create_ds/gobblin/gobblin-modules/gobblin-compliance/src/main/java/org/apache/gobblin/compliance/restore/RestorePolicy.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.compliance.restore;
import java.io.IOException;
/**
* @author adsharma
*/
public interface RestorePolicy<T> {
T getDatasetToRestore(T dataset)
throws IOException;
}
| 3,199 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.