index int64 0 0 | repo_id stringlengths 9 205 | file_path stringlengths 31 246 | content stringlengths 1 12.2M | __index_level_0__ int64 0 10k |
|---|---|---|---|---|
0 | Create_ds/gobblin/gobblin-modules/gobblin-http/src/main/java/org/apache/gobblin | Create_ds/gobblin/gobblin-modules/gobblin-http/src/main/java/org/apache/gobblin/converter/AsyncHttpJoinConverter.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.converter;
import java.io.IOException;
import java.util.Queue;
import java.util.concurrent.CompletableFuture;
import java.util.concurrent.LinkedBlockingDeque;
import org.apache.avro.generic.GenericRecord;
import com.google.common.collect.ImmutableMap;
import com.typesafe.config.Config;
import com.typesafe.config.ConfigFactory;
import lombok.Getter;
import lombok.extern.slf4j.Slf4j;
import org.apache.commons.lang3.exception.ExceptionUtils;
import org.apache.gobblin.async.AsyncRequest;
import org.apache.gobblin.async.AsyncRequestBuilder;
import org.apache.gobblin.async.BufferedRecord;
import org.apache.gobblin.async.Callback;
import org.apache.gobblin.broker.gobblin_scopes.GobblinScopeTypes;
import org.apache.gobblin.broker.iface.SharedResourcesBroker;
import org.apache.gobblin.config.ConfigBuilder;
import org.apache.gobblin.configuration.ConfigurationKeys;
import org.apache.gobblin.configuration.State;
import org.apache.gobblin.configuration.WorkUnitState;
import org.apache.gobblin.http.HttpClient;
import org.apache.gobblin.http.HttpOperation;
import org.apache.gobblin.http.ResponseHandler;
import org.apache.gobblin.http.ResponseStatus;
import org.apache.gobblin.net.Request;
import org.apache.gobblin.utils.HttpConstants;
import org.apache.gobblin.writer.WriteCallback;
import edu.umd.cs.findbugs.annotations.SuppressWarnings;
/**
* This converter converts an input record (DI) to an output record (DO) which
* contains original input data and http request & response info.
*
* Sequence:
* Convert DI to HttpOperation
* Convert HttpOperation to RQ (by internal AsyncRequestBuilder)
* Execute http request, get response RP (by HttpClient)
* Combine info (DI, RQ, RP, status, etc..) to generate output DO
*/
@Slf4j
public abstract class AsyncHttpJoinConverter<SI, SO, DI, DO, RQ, RP> extends AsyncConverter1to1<SI, SO, DI, DO> {
public static final String CONF_PREFIX = "gobblin.converter.http.";
public static final Config DEFAULT_FALLBACK =
ConfigFactory.parseMap(ImmutableMap.<String, Object>builder()
.put(HttpConstants.CONTENT_TYPE, "application/json")
.put(HttpConstants.VERB, "GET")
.build());
protected HttpClient<RQ, RP> httpClient = null;
protected ResponseHandler<RQ, RP> responseHandler = null;
protected AsyncRequestBuilder<GenericRecord, RQ> requestBuilder = null;
protected boolean skipFailedRecord;
public AsyncHttpJoinConverter init(WorkUnitState workUnitState) {
super.init(workUnitState);
Config config = ConfigBuilder.create().loadProps(workUnitState.getProperties(), CONF_PREFIX).build();
config = config.withFallback(DEFAULT_FALLBACK);
skipFailedRecord = workUnitState.getPropAsBoolean(ConfigurationKeys.CONVERTER_SKIP_FAILED_RECORD, false);
httpClient = createHttpClient(config, workUnitState.getTaskBroker());
responseHandler = createResponseHandler(config);
requestBuilder = createRequestBuilder(config);
return this;
}
@Override
public final SO convertSchema(SI inputSchema, WorkUnitState workUnitState)
throws SchemaConversionException {
return convertSchemaImpl(inputSchema, workUnitState);
}
protected abstract HttpClient<RQ, RP> createHttpClient(Config config, SharedResourcesBroker<GobblinScopeTypes> broker);
protected abstract ResponseHandler<RQ, RP> createResponseHandler(Config config);
protected abstract AsyncRequestBuilder<GenericRecord, RQ> createRequestBuilder(Config config);
protected abstract HttpOperation generateHttpOperation (DI inputRecord, State state);
protected abstract SO convertSchemaImpl (SI inputSchema, WorkUnitState workUnitState) throws SchemaConversionException;
protected abstract DO convertRecordImpl (SO outputSchema, DI input, RQ rawRequest, ResponseStatus status) throws DataConversionException;
/**
* A helper class which performs the conversion from http response to DO type output, saved as a {@link CompletableFuture}
*/
private class AsyncHttpJoinConverterContext<SO, DI, DO, RP, RQ> {
private final CompletableFuture<DO> future;
private final AsyncHttpJoinConverter<SI, SO, DI, DO, RQ, RP> converter;
@Getter
private final Callback<RP> callback;
public AsyncHttpJoinConverterContext(AsyncHttpJoinConverter converter, SO outputSchema, DI input, Request<RQ> request) {
this.future = new CompletableFuture();
this.converter = converter;
this.callback = new Callback<RP>() {
@Override
public void onSuccess(RP result) {
try {
ResponseStatus status = AsyncHttpJoinConverterContext.this.converter.responseHandler.handleResponse(request, result);
switch (status.getType()) {
case OK:
AsyncHttpJoinConverterContext.this.onSuccess(request.getRawRequest(), status, outputSchema, input);
break;
case CLIENT_ERROR:
log.error ("Http converter client error with request {}", request.getRawRequest());
AsyncHttpJoinConverterContext.this.onSuccess(request.getRawRequest(), status, outputSchema, input);
break;
case SERVER_ERROR:
// Server side error. Retry
log.error ("Http converter server error with request {}", request.getRawRequest());
throw new DataConversionException(request.getRawRequest() + " send failed due to server error");
default:
throw new DataConversionException(request.getRawRequest() + " Should not reach here");
}
} catch (Exception e) {
log.error ("Http converter exception {} with request {}", e.toString(), request.getRawRequest());
AsyncHttpJoinConverterContext.this.future.completeExceptionally(e);
}
}
@SuppressWarnings(value = "NP_NONNULL_PARAM_VIOLATION",
justification = "CompletableFuture will replace null value with NIL")
@Override
public void onFailure(Throwable throwable) {
String errorMsg = ExceptionUtils.getMessage(throwable);
log.error ("Http converter on failure with request {} and throwable {}", request.getRawRequest(), errorMsg);
if (skipFailedRecord) {
AsyncHttpJoinConverterContext.this.future.complete( null);
} else {
AsyncHttpJoinConverterContext.this.future.completeExceptionally(throwable);
}
}
};
}
private void onSuccess(RQ rawRequest, ResponseStatus status, SO outputSchema, DI input) throws DataConversionException {
log.debug("{} send with status type {}", rawRequest, status.getType());
DO output = this.converter.convertRecordImpl(outputSchema, input, rawRequest, status);
AsyncHttpJoinConverterContext.this.future.complete(output);
}
}
/**
* Convert an input record to a future object where an output record will be filled in sometime later
* Sequence:
* Convert input (DI) to an http request
* Send http request asynchronously, and registers an http callback
* Create an {@link CompletableFuture} object. When the callback is invoked, this future object is filled in by an output record which is converted from http response.
* Return the future object.
*/
@Override
public final CompletableFuture<DO> convertRecordAsync(SO outputSchema, DI inputRecord, WorkUnitState workUnitState)
throws DataConversionException {
// Convert DI to HttpOperation
HttpOperation operation = generateHttpOperation(inputRecord, workUnitState);
BufferedRecord<GenericRecord> bufferedRecord = new BufferedRecord<>(operation, WriteCallback.EMPTY);
// Convert HttpOperation to RQ
Queue<BufferedRecord<GenericRecord>> buffer = new LinkedBlockingDeque<>();
buffer.add(bufferedRecord);
AsyncRequest<GenericRecord, RQ> request = this.requestBuilder.buildRequest(buffer);
RQ rawRequest = request.getRawRequest();
// Execute query and get response
AsyncHttpJoinConverterContext context = new AsyncHttpJoinConverterContext(this, outputSchema, inputRecord, request);
try {
httpClient.sendAsyncRequest(rawRequest, context.getCallback());
} catch (IOException e) {
throw new DataConversionException(e);
}
return context.future;
}
public void close() throws IOException {
this.httpClient.close();
}
}
| 3,700 |
0 | Create_ds/gobblin/gobblin-modules/gobblin-http/src/main/java/org/apache/gobblin | Create_ds/gobblin/gobblin-modules/gobblin-http/src/main/java/org/apache/gobblin/converter/AvroHttpJoinConverter.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.converter;
import java.io.IOException;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.Optional;
import org.apache.avro.Schema;
import org.apache.avro.generic.GenericData;
import org.apache.avro.generic.GenericRecord;
import lombok.extern.slf4j.Slf4j;
import org.apache.gobblin.configuration.State;
import org.apache.gobblin.configuration.WorkUnitState;
import org.apache.gobblin.http.HttpOperation;
import org.apache.gobblin.http.HttpRequestResponseRecord;
import org.apache.gobblin.http.ResponseStatus;
import org.apache.gobblin.util.AvroUtils;
import org.apache.gobblin.utils.HttpUtils;
/**
* A type of {@link HttpJoinConverter} with AVRO as input and output format
*
* Input:
* User provided record
*
* Output:
* User provided record plus http request & response record
*/
@Slf4j
public abstract class AvroHttpJoinConverter<RQ, RP> extends AsyncHttpJoinConverter<Schema, Schema, GenericRecord, GenericRecord, RQ, RP> {
public static final String HTTP_REQUEST_RESPONSE_FIELD = "HttpRequestResponse";
@Override
public Schema convertSchemaImpl(Schema inputSchema, WorkUnitState workUnitState)
throws SchemaConversionException {
if (inputSchema == null) {
throw new SchemaConversionException("input schema is empty");
}
List<Schema.Field> fields = AvroUtils.deepCopySchemaFields(inputSchema);
Schema.Field requestResponseField = new Schema.Field(HTTP_REQUEST_RESPONSE_FIELD, HttpRequestResponseRecord.getClassSchema(), "http output schema contains request url and return result", null);
fields.add(requestResponseField);
Schema combinedSchema = Schema.createRecord(inputSchema.getName(), inputSchema.getDoc() + " (Http request and response are contained)", inputSchema.getNamespace(), false);
combinedSchema.setFields(fields);
return combinedSchema;
}
/**
* Extract user defined keys by looking at "gobblin.converter.http.keys"
* If keys are defined, extract key-value pair from inputRecord and set it to HttpOperation
* If keys are not defined, generate HttpOperation by HttpUtils.toHttpOperation
*/
@Override
protected HttpOperation generateHttpOperation (GenericRecord inputRecord, State state) {
Map<String, String> keyAndValue = new HashMap<>();
Optional<Iterable<String>> keys = getKeys(state);
HttpOperation operation;
if (keys.isPresent()) {
for (String key : keys.get()) {
String value = inputRecord.get(key).toString();
log.debug("Http join converter: key is {}, value is {}", key, value);
keyAndValue.put(key, value);
}
operation = new HttpOperation();
operation.setKeys(keyAndValue);
} else {
operation = HttpUtils.toHttpOperation(inputRecord);
}
return operation;
}
private Optional<Iterable<String>> getKeys (State state) {
if (!state.contains(CONF_PREFIX + "keys")) {
return Optional.empty();
}
Iterable<String> keys = state.getPropAsList(CONF_PREFIX + "keys");
return Optional.ofNullable(keys);
}
@Override
public final GenericRecord convertRecordImpl(Schema outputSchema, GenericRecord inputRecord, RQ rawRequest, ResponseStatus status) throws DataConversionException {
if (outputSchema == null) {
throw new DataConversionException("output schema is empty");
}
GenericRecord outputRecord = new GenericData.Record(outputSchema);
Schema httpOutputSchema = null;
for (Schema.Field field : outputSchema.getFields()) {
if (!field.name().equals(HTTP_REQUEST_RESPONSE_FIELD)) {
log.debug ("Copy {}", field.name());
Object inputValue = inputRecord.get(field.name());
outputRecord.put(field.name(), inputValue);
} else {
httpOutputSchema = field.schema();
}
}
try {
fillHttpOutputData (httpOutputSchema, outputRecord, rawRequest, status);
} catch (IOException e) {
throw new DataConversionException(e);
}
return outputRecord;
}
protected abstract void fillHttpOutputData (Schema httpOutputSchema, GenericRecord outputRecord, RQ rawRequest,
ResponseStatus status) throws IOException;
}
| 3,701 |
0 | Create_ds/gobblin/gobblin-modules/gobblin-http/src/main/java/org/apache/gobblin | Create_ds/gobblin/gobblin-modules/gobblin-http/src/main/java/org/apache/gobblin/utils/HttpConstants.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.utils;
public class HttpConstants {
/** Configuration keys */
public static final String URL_TEMPLATE = "urlTemplate";
public static final String VERB = "verb";
public static final String PROTOCOL_VERSION = "protocolVersion";
public static final String CONTENT_TYPE = "contentType";
/** HttpOperation avro record field names */
public static final String KEYS = "keys";
public static final String QUERY_PARAMS = "queryParams";
public static final String HEADERS = "headers";
public static final String BODY = "body";
public static final String SCHEMA_D2 = "d2://";
/** Status code */
public static final String ERROR_CODE_WHITELIST = "errorCodeWhitelist";
public static final String CODE_3XX = "3xx";
public static final String CODE_4XX = "4xx";
public static final String CODE_5XX = "5xx";
/** Event constants */
public static final String REQUEST = "request";
public static final String STATUS_CODE = "statusCode";
}
| 3,702 |
0 | Create_ds/gobblin/gobblin-modules/gobblin-http/src/main/java/org/apache/gobblin | Create_ds/gobblin/gobblin-modules/gobblin-http/src/main/java/org/apache/gobblin/utils/HttpUtils.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.utils;
import java.net.MalformedURLException;
import java.net.URI;
import java.net.URISyntaxException;
import java.net.URL;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.HashMap;
import java.util.HashSet;
import java.util.List;
import java.util.Map;
import java.util.Set;
import org.apache.avro.generic.GenericRecord;
import org.apache.commons.httpclient.util.URIUtil;
import org.apache.commons.lang3.text.StrSubstitutor;
import org.apache.http.client.utils.URIBuilder;
import com.google.common.base.Splitter;
import com.google.gson.Gson;
import com.typesafe.config.Config;
import lombok.extern.slf4j.Slf4j;
import org.apache.gobblin.http.HttpOperation;
import org.apache.gobblin.http.ResponseStatus;
import org.apache.gobblin.http.StatusType;
import org.apache.gobblin.util.AvroUtils;
/**
* Utilities to build gobblin http components
*/
@Slf4j
public class HttpUtils {
private static final Gson GSON = new Gson();
private static final Splitter LIST_SPLITTER = Splitter.on(",").trimResults().omitEmptyStrings();
/**
* Convert the given {@link GenericRecord} to {@link HttpOperation}
*/
public static HttpOperation toHttpOperation(GenericRecord record) {
if (record instanceof HttpOperation) {
return (HttpOperation) record;
}
HttpOperation.Builder builder = HttpOperation.newBuilder();
Map<String, String> stringMap = AvroUtils.toStringMap(record.get(HttpConstants.KEYS));
if (stringMap != null) {
builder.setKeys(stringMap);
}
stringMap = AvroUtils.toStringMap(record.get(HttpConstants.QUERY_PARAMS));
if (stringMap != null) {
builder.setQueryParams(stringMap);
}
stringMap = AvroUtils.toStringMap(record.get(HttpConstants.HEADERS));
if (stringMap != null) {
builder.setHeaders(stringMap);
}
Object body = record.get(HttpConstants.BODY);
if (body != null) {
builder.setBody(body.toString());
}
return builder.build();
}
/**
* Given a url template, interpolate with keys and build the URI after adding query parameters
*
* <p>
* With url template: http://test.com/resource/(urn:${resourceId})/entities/(entity:${entityId}),
* keys: { "resourceId": 123, "entityId": 456 }, queryParams: { "locale": "en_US" }, the outpuT URI is:
* http://test.com/resource/(urn:123)/entities/(entity:456)?locale=en_US
* </p>
*
* @param urlTemplate url template
* @param keys data map to interpolate url template
* @param queryParams query parameters added to the url
* @return a uri
*/
public static URI buildURI(String urlTemplate, Map<String, String> keys, Map<String, String> queryParams) {
// Compute base url
String url = urlTemplate;
if (keys != null && keys.size() != 0) {
url = StrSubstitutor.replace(urlTemplate, keys);
}
try {
URIBuilder uriBuilder = new URIBuilder(url);
// Append query parameters
if (queryParams != null && queryParams.size() != 0) {
for (Map.Entry<String, String> entry : queryParams.entrySet()) {
uriBuilder.addParameter(entry.getKey(), entry.getValue());
}
}
return uriBuilder.build();
} catch (URISyntaxException e) {
throw new RuntimeException("Fail to build uri", e);
}
}
/**
* Get a {@link List<String>} from a comma separated string
*/
public static List<String> getStringList(String list) {
return LIST_SPLITTER.splitToList(list);
}
/**
* Get the error code whitelist from a config
*/
public static Set<String> getErrorCodeWhitelist(Config config) {
String list = config.getString(HttpConstants.ERROR_CODE_WHITELIST).toLowerCase();
return new HashSet<>(getStringList(list));
}
/**
* Update {@link StatusType} of a {@link ResponseStatus} based on statusCode and error code white list
*
* @param status a status report after handling the a response
* @param statusCode a status code in http domain
* @param errorCodeWhitelist a whitelist specifies what http error codes are tolerable
*/
public static void updateStatusType(ResponseStatus status, int statusCode, Set<String> errorCodeWhitelist) {
if (statusCode >= 300 & statusCode < 500) {
List<String> whitelist = new ArrayList<>(2);
whitelist.add(Integer.toString(statusCode));
if (statusCode > 400) {
whitelist.add(HttpConstants.CODE_4XX);
} else {
whitelist.add(HttpConstants.CODE_3XX);
}
if (whitelist.stream().anyMatch(errorCodeWhitelist::contains)) {
status.setType(StatusType.CONTINUE);
} else {
status.setType(StatusType.CLIENT_ERROR);
}
} else if (statusCode >= 500) {
List<String> whitelist = Arrays.asList(Integer.toString(statusCode), HttpConstants.CODE_5XX);
if (whitelist.stream().anyMatch(errorCodeWhitelist::contains)) {
status.setType(StatusType.CONTINUE);
} else {
status.setType(StatusType.SERVER_ERROR);
}
}
}
/**
* Convert a json encoded string to a Map
*
* @param jsonString json string
* @return the Map encoded in the string
*/
public static Map<String, Object> toMap(String jsonString) {
Map<String, Object> map = new HashMap<>();
return GSON.fromJson(jsonString, map.getClass());
}
public static String createApacheHttpClientLimiterKey(Config config) {
try {
String urlTemplate = config.getString(HttpConstants.URL_TEMPLATE);
URL url = new URL(urlTemplate);
String key = url.getProtocol() + "/" + url.getHost();
if (url.getPort() > 0) {
key = key + "/" + url.getPort();
}
log.info("Get limiter key [" + key + "]");
return key;
} catch (MalformedURLException e) {
throw new IllegalStateException("Cannot get limiter key.", e);
}
}
/**
* Convert D2 URL template into a string used for throttling limiter
*
* Valid:
* d2://host/${resource-id}
*
* Invalid:
* d2://host${resource-id}, because we cannot differentiate the host
*/
public static String createR2ClientLimiterKey(Config config) {
String urlTemplate = config.getString(HttpConstants.URL_TEMPLATE);
try {
String escaped = URIUtil.encodeQuery(urlTemplate);
URI uri = new URI(escaped);
if (uri.getHost() == null)
throw new RuntimeException("Cannot get host part from uri" + urlTemplate);
String key = uri.getScheme() + "/" + uri.getHost();
if (uri.getPort() > 0) {
key = key + "/" + uri.getPort();
}
log.info("Get limiter key [" + key + "]");
return key;
} catch (Exception e) {
throw new RuntimeException("Cannot create R2 limiter key", e);
}
}
}
| 3,703 |
0 | Create_ds/gobblin/gobblin-modules/gobblin-http/src/main/java/org/apache/gobblin | Create_ds/gobblin/gobblin-modules/gobblin-http/src/main/java/org/apache/gobblin/writer/AvroHttpWriterBuilder.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.writer;
import java.util.Set;
import org.apache.avro.generic.GenericRecord;
import org.apache.http.client.methods.CloseableHttpResponse;
import org.apache.http.client.methods.HttpUriRequest;
import org.apache.http.impl.client.HttpClientBuilder;
import com.google.common.collect.ImmutableMap;
import com.typesafe.config.Config;
import com.typesafe.config.ConfigFactory;
import lombok.extern.slf4j.Slf4j;
import org.apache.gobblin.http.ApacheHttpClient;
import org.apache.gobblin.http.ApacheHttpResponseHandler;
import org.apache.gobblin.http.ApacheHttpRequestBuilder;
import org.apache.gobblin.utils.HttpConstants;
import org.apache.gobblin.utils.HttpUtils;
@Slf4j
public class AvroHttpWriterBuilder extends AsyncHttpWriterBuilder<GenericRecord, HttpUriRequest, CloseableHttpResponse> {
private static final Config FALLBACK =
ConfigFactory.parseMap(ImmutableMap.<String, Object>builder()
.put(HttpConstants.CONTENT_TYPE, "application/json")
.build());
@Override
public AvroHttpWriterBuilder fromConfig(Config config) {
config = config.withFallback(FALLBACK);
ApacheHttpClient client = new ApacheHttpClient(HttpClientBuilder.create(), config, broker);
this.client = client;
String urlTemplate = config.getString(HttpConstants.URL_TEMPLATE);
String verb = config.getString(HttpConstants.VERB);
String contentType = config.getString(HttpConstants.CONTENT_TYPE);
this.asyncRequestBuilder = new ApacheHttpRequestBuilder(urlTemplate, verb, contentType);
Set<String> errorCodeWhitelist = HttpUtils.getErrorCodeWhitelist(config);
this.responseHandler = new ApacheHttpResponseHandler(errorCodeWhitelist);
return this;
}
}
| 3,704 |
0 | Create_ds/gobblin/gobblin-modules/gobblin-http/src/main/java/org/apache/gobblin | Create_ds/gobblin/gobblin-modules/gobblin-http/src/main/java/org/apache/gobblin/writer/AsyncHttpWriterBuilder.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.writer;
import com.google.common.base.Preconditions;
import com.google.common.collect.ImmutableMap;
import com.typesafe.config.Config;
import com.typesafe.config.ConfigFactory;
import org.apache.gobblin.async.AsyncRequestBuilder;
import org.apache.gobblin.broker.gobblin_scopes.GobblinScopeTypes;
import org.apache.gobblin.broker.iface.SharedResourcesBroker;
import org.apache.gobblin.config.ConfigBuilder;
import org.apache.gobblin.configuration.State;
import org.apache.gobblin.configuration.WorkUnitState;
import org.apache.gobblin.http.HttpClient;
import org.apache.gobblin.http.ResponseHandler;
import org.apache.gobblin.instrumented.Instrumented;
import org.apache.gobblin.metrics.MetricContext;
import org.apache.gobblin.util.ConfigUtils;
import org.apache.gobblin.utils.HttpConstants;
import java.io.IOException;
import lombok.Getter;
import lombok.extern.slf4j.Slf4j;
/**
* Base builder for async http writers
*
* @param <D> type of record
* @param <RQ> type of request
* @param <RP> type of response
*/
@Slf4j
public abstract class AsyncHttpWriterBuilder<D, RQ, RP> extends FluentDataWriterBuilder<Void, D, AsyncHttpWriterBuilder<D, RQ, RP>> {
public static final String CONF_PREFIX = "gobblin.writer.http.";
private static final String MAX_OUTSTANDING_WRITES = "maxOutstandingWrites";
private static final String MAX_ATTEMPTS = "maxAttempts";
private static final Config FALLBACK =
ConfigFactory.parseMap(ImmutableMap.<String, Object>builder()
.put(HttpConstants.ERROR_CODE_WHITELIST, "")
.put(MAX_OUTSTANDING_WRITES, AsyncWriterManager.MAX_OUTSTANDING_WRITES_DEFAULT)
.put(MAX_ATTEMPTS, AsyncHttpWriter.DEFAULT_MAX_ATTEMPTS)
.build());
@Getter
MetricContext metricContext;
@Getter
protected WorkUnitState state;
@Getter
protected HttpClient<RQ, RP> client = null;
@Getter
protected AsyncRequestBuilder<D, RQ> asyncRequestBuilder = null;
@Getter
protected ResponseHandler<RQ, RP> responseHandler = null;
@Getter
protected int queueCapacity = AbstractAsyncDataWriter.DEFAULT_BUFFER_CAPACITY;
@Getter
protected SharedResourcesBroker<GobblinScopeTypes> broker = null;
@Getter
protected int maxAttempts;
private int maxOutstandingWrites;
/**
* For backward compatibility on how Fork creates writer, invoke fromState when it's called writeTo method.
* @param destination
* @return this
*/
@Override
public AsyncHttpWriterBuilder<D, RQ, RP> writeTo(Destination destination) {
super.writeTo(destination);
return fromState(destination.getProperties());
}
AsyncHttpWriterBuilder<D, RQ, RP> fromState(State state) {
if (!(state instanceof WorkUnitState)) {
throw new IllegalStateException(String.format("AsyncHttpWriterBuilder requires a %s on construction.", WorkUnitState.class.getSimpleName()));
}
this.state = (WorkUnitState) state;
this.metricContext = Instrumented.getMetricContext(this.state, AsyncHttpWriter.class);
this.broker = this.state.getTaskBroker();
Config config = ConfigBuilder.create().loadProps(state.getProperties(), CONF_PREFIX).build();
config = config.withFallback(FALLBACK);
this.maxOutstandingWrites = config.getInt(MAX_OUTSTANDING_WRITES);
this.maxAttempts = config.getInt(MAX_ATTEMPTS);
return fromConfig(config);
}
public abstract AsyncHttpWriterBuilder<D, RQ, RP> fromConfig(Config config);
protected void validate() {
Preconditions.checkNotNull(getState(), "State is required for " + this.getClass().getSimpleName());
Preconditions.checkNotNull(getClient(), "Client is required for " + this.getClass().getSimpleName());
Preconditions.checkNotNull(getAsyncRequestBuilder(),
"AsyncWriteRequestBuilder is required for " + this.getClass().getSimpleName());
Preconditions
.checkNotNull(getResponseHandler(), "ResponseHandler is required for " + this.getClass().getSimpleName());
}
@Override
public DataWriter<D> build()
throws IOException {
validate();
return AsyncWriterManager.builder()
.config(ConfigUtils.propertiesToConfig(getState().getProperties()))
.asyncDataWriter(new AsyncHttpWriter(this))
.maxOutstandingWrites(maxOutstandingWrites)
.retriesEnabled(false) // retries are done in HttpBatchDispatcher
.commitTimeoutMillis(10000L)
.failureAllowanceRatio(0).build();
}
}
| 3,705 |
0 | Create_ds/gobblin/gobblin-modules/gobblin-http/src/main/java/org/apache/gobblin | Create_ds/gobblin/gobblin-modules/gobblin-http/src/main/java/org/apache/gobblin/writer/AsyncHttpWriter.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.writer;
import java.io.IOException;
import java.util.Queue;
import org.apache.gobblin.instrumented.Instrumented;
import org.apache.gobblin.metrics.MetricContext;
import org.apache.gobblin.metrics.event.FailureEventBuilder;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import lombok.extern.slf4j.Slf4j;
import org.apache.gobblin.async.AsyncRequest;
import org.apache.gobblin.async.AsyncRequestBuilder;
import org.apache.gobblin.async.BufferedRecord;
import org.apache.gobblin.async.DispatchException;
import org.apache.gobblin.http.HttpClient;
import org.apache.gobblin.http.ResponseHandler;
import org.apache.gobblin.http.ResponseStatus;
/**
* This class is an {@link AsyncHttpWriter} that writes data in a batch, which
* is sent via http request
*
* @param <D> type of record
* @param <RQ> type of request
* @param <RP> type of response
*/
@Slf4j
public class AsyncHttpWriter<D, RQ, RP> extends AbstractAsyncDataWriter<D> {
private static final Logger LOG = LoggerFactory.getLogger(AsyncHttpWriter.class);
private static final String ASYNC_REQUEST = "asyncRequest";
private static final String FATAL_ASYNC_HTTP_WRITE_EVENT = "fatalAsyncHttpWrite";
public static final int DEFAULT_MAX_ATTEMPTS = 3;
private final HttpClient<RQ, RP> httpClient;
private final ResponseHandler<RQ, RP> responseHandler;
private final AsyncRequestBuilder<D, RQ> requestBuilder;
private final int maxAttempts;
private final MetricContext context;
public AsyncHttpWriter(AsyncHttpWriterBuilder builder) {
super(builder.getQueueCapacity());
this.httpClient = builder.getClient();
this.requestBuilder = builder.getAsyncRequestBuilder();
this.responseHandler = builder.getResponseHandler();
this.maxAttempts = builder.getMaxAttempts();
this.context = Instrumented.getMetricContext(builder.getState(), AsyncHttpWriter.class);
}
@Override
protected void dispatch(Queue<BufferedRecord<D>> buffer) throws DispatchException {
AsyncRequest<D, RQ> asyncRequest = requestBuilder.buildRequest(buffer);
if (asyncRequest == null) {
return;
}
RQ rawRequest = asyncRequest.getRawRequest();
RP response;
int attempt = 0;
while (attempt < maxAttempts) {
try {
response = httpClient.sendRequest(rawRequest);
} catch (Exception e) {
// Retry
attempt++;
if (attempt == maxAttempts) {
LOG.error("Fail to send request");
LOG.info(asyncRequest.toString());
DispatchException de = new DispatchException("Write failed on IOException", e);
onFailure(asyncRequest, de);
throw de;
} else {
continue;
}
}
ResponseStatus status = responseHandler.handleResponse(asyncRequest, response);
switch (status.getType()) {
case OK:
// Write succeeds
onSuccess(asyncRequest, status);
return;
case CONTINUE:
LOG.debug("Http write continues");
LOG.debug(asyncRequest.toString());
onSuccess(asyncRequest, status);
return;
case CLIENT_ERROR:
// Client error. Fail!
LOG.error("Http write failed on client error");
LOG.info(asyncRequest.toString());
DispatchException clientExp = new DispatchException("Write failed on client error");
onFailure(asyncRequest, clientExp);
throw clientExp;
case SERVER_ERROR:
// Server side error. Retry
attempt++;
if (attempt == maxAttempts) {
LOG.error("Http write request failed on server error");
LOG.info(asyncRequest.toString());
DispatchException serverExp = new DispatchException("Write failed after " + maxAttempts + " attempts.");
onFailure(asyncRequest, serverExp);
throw serverExp;
}
}
}
}
/**
* Callback on sending the asyncRequest successfully
*/
protected void onSuccess(AsyncRequest<D, RQ> asyncRequest, ResponseStatus status) {
final WriteResponse response = WriteResponse.EMPTY;
for (final AsyncRequest.Thunk thunk: asyncRequest.getThunks()) {
WriteCallback callback = (WriteCallback) thunk.callback;
callback.onSuccess(new WriteResponse() {
@Override
public Object getRawResponse() {
return response.getRawResponse();
}
@Override
public String getStringResponse() {
return response.getStringResponse();
}
@Override
public long bytesWritten() {
return thunk.sizeInBytes;
}
});
}
}
/**
* Callback on failing to send the asyncRequest
*
* @deprecated Use {@link #onFailure(AsyncRequest, DispatchException)}
*/
@Deprecated
protected void onFailure(AsyncRequest<D, RQ> asyncRequest, Throwable throwable) {
for (AsyncRequest.Thunk thunk: asyncRequest.getThunks()) {
thunk.callback.onFailure(throwable);
}
}
protected void onFailure(AsyncRequest<D, RQ> asyncRequest, DispatchException exception) {
if (exception.isFatal()) {
// Report failure event
FailureEventBuilder failureEvent = new FailureEventBuilder(FATAL_ASYNC_HTTP_WRITE_EVENT);
failureEvent.setRootCause(exception);
failureEvent.addMetadata(ASYNC_REQUEST, asyncRequest.toString());
failureEvent.submit(context);
}
for (AsyncRequest.Thunk thunk : asyncRequest.getThunks()) {
thunk.callback.onFailure(exception);
}
}
@Override
public void close()
throws IOException {
try {
super.close();
} finally {
httpClient.close();
}
}
}
| 3,706 |
0 | Create_ds/gobblin/gobblin-modules/gobblin-http/src/main/java/org/apache/gobblin | Create_ds/gobblin/gobblin-modules/gobblin-http/src/main/java/org/apache/gobblin/writer/R2RestWriterBuilder.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.writer;
import com.google.common.collect.ImmutableMap;
import com.linkedin.r2.message.rest.RestRequest;
import com.linkedin.r2.message.rest.RestResponse;
import com.linkedin.r2.transport.common.Client;
import com.typesafe.config.Config;
import com.typesafe.config.ConfigFactory;
import org.apache.gobblin.r2.R2Client;
import org.apache.gobblin.r2.R2ClientFactory;
import org.apache.gobblin.r2.R2RestRequestBuilder;
import org.apache.gobblin.r2.R2RestResponseHandler;
import org.apache.gobblin.utils.HttpConstants;
import org.apache.gobblin.utils.HttpUtils;
import java.util.Set;
import lombok.extern.slf4j.Slf4j;
import org.apache.avro.generic.GenericRecord;
@Slf4j
public class R2RestWriterBuilder extends AsyncHttpWriterBuilder<GenericRecord, RestRequest, RestResponse> {
private static final Config FALLBACK =
ConfigFactory.parseMap(ImmutableMap.<String, Object>builder()
.put(HttpConstants.PROTOCOL_VERSION, "2.0.0")
.build());
@Override
public R2RestWriterBuilder fromConfig(Config config) {
config = config.withFallback(FALLBACK);
this.client = createClient(config);
String urlTemplate = config.getString(HttpConstants.URL_TEMPLATE);
String verb = config.getString(HttpConstants.VERB);
String protocolVersion = config.getString(HttpConstants.PROTOCOL_VERSION);
asyncRequestBuilder = new R2RestRequestBuilder(urlTemplate, verb, protocolVersion);
Set<String> errorCodeWhitelist = HttpUtils.getErrorCodeWhitelist(config);
responseHandler = new R2RestResponseHandler(errorCodeWhitelist, metricContext);
return this;
}
protected R2Client createClient(Config config) {
String urlTemplate = config.getString(HttpConstants.URL_TEMPLATE);
// By default, use http schema
R2ClientFactory.Schema schema = R2ClientFactory.Schema.HTTP;
if (urlTemplate.startsWith(HttpConstants.SCHEMA_D2)) {
schema = R2ClientFactory.Schema.D2;
}
R2ClientFactory factory = new R2ClientFactory(schema);
Client client = factory.createInstance(config);
return new R2Client(client, config, getBroker());
}
}
| 3,707 |
0 | Create_ds/gobblin/gobblin-modules/gobblin-http/src/main/java/org/apache/gobblin | Create_ds/gobblin/gobblin-modules/gobblin-http/src/main/java/org/apache/gobblin/http/ResponseStatus.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.http;
import lombok.Getter;
import lombok.Setter;
/**
* This class represents a result of handling a response
*/
public class ResponseStatus {
@Getter @Setter
StatusType type;
public ResponseStatus(StatusType type) {
this.type = type;
}
}
| 3,708 |
0 | Create_ds/gobblin/gobblin-modules/gobblin-http/src/main/java/org/apache/gobblin | Create_ds/gobblin/gobblin-modules/gobblin-http/src/main/java/org/apache/gobblin/http/ResponseHandler.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.http;
import org.apache.gobblin.net.Request;
/**
* An interface to handle a response
*
* @param <RQ> type of raw request
* @param <RP> type of response
*/
public interface ResponseHandler<RQ, RP> {
ResponseStatus handleResponse(Request<RQ> request, RP response);
}
| 3,709 |
0 | Create_ds/gobblin/gobblin-modules/gobblin-http/src/main/java/org/apache/gobblin | Create_ds/gobblin/gobblin-modules/gobblin-http/src/main/java/org/apache/gobblin/http/ApacheHttpRequest.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.http;
import java.io.IOException;
import java.util.Arrays;
import org.apache.http.HttpEntityEnclosingRequest;
import org.apache.http.client.methods.HttpUriRequest;
import org.apache.http.util.EntityUtils;
import org.apache.gobblin.async.AsyncRequest;
/**
* A specific {@link AsyncRequest} related to a {@link HttpUriRequest} and its associated record information
*/
public class ApacheHttpRequest<D> extends AsyncRequest<D, HttpUriRequest> {
@Override
public String toString() {
HttpUriRequest request = getRawRequest();
StringBuilder outBuffer = new StringBuilder();
String endl = "\n";
outBuffer.append("ApacheHttpRequest Info").append(endl);
outBuffer.append("type: HttpUriRequest").append(endl);
outBuffer.append("uri: ").append(request.getURI().toString()).append(endl);
outBuffer.append("headers: ");
Arrays.stream(request.getAllHeaders()).forEach(header ->
outBuffer.append("[").append(header.getName()).append(":").append(header.getValue()).append("] ")
);
outBuffer.append(endl);
if (request instanceof HttpEntityEnclosingRequest) {
try {
String body = EntityUtils.toString(((HttpEntityEnclosingRequest) request).getEntity());
outBuffer.append("body: ").append(body).append(endl);
} catch (IOException e) {
outBuffer.append("body: ").append(e.getMessage()).append(endl);
}
}
return outBuffer.toString();
}
}
| 3,710 |
0 | Create_ds/gobblin/gobblin-modules/gobblin-http/src/main/java/org/apache/gobblin | Create_ds/gobblin/gobblin-modules/gobblin-http/src/main/java/org/apache/gobblin/http/StatusType.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.http;
/**
* Different types of response status
*/
public enum StatusType {
// success
OK,
// something bad happened but safe to continue
CONTINUE,
// error triggered by client
CLIENT_ERROR,
// error triggered by server
SERVER_ERROR
}
| 3,711 |
0 | Create_ds/gobblin/gobblin-modules/gobblin-http/src/main/java/org/apache/gobblin | Create_ds/gobblin/gobblin-modules/gobblin-http/src/main/java/org/apache/gobblin/http/ThrottledHttpClient.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.http;
import java.io.IOException;
import org.apache.commons.lang.exception.ExceptionUtils;
import com.codahale.metrics.Timer;
import lombok.Getter;
import lombok.extern.slf4j.Slf4j;
import org.apache.gobblin.async.Callback;
import org.apache.gobblin.broker.gobblin_scopes.GobblinScopeTypes;
import org.apache.gobblin.broker.iface.NotConfiguredException;
import org.apache.gobblin.broker.iface.SharedResourcesBroker;
import org.apache.gobblin.metrics.MetricContext;
import org.apache.gobblin.metrics.broker.MetricContextFactory;
import org.apache.gobblin.metrics.broker.MetricContextKey;
import org.apache.gobblin.util.http.HttpLimiterKey;
import org.apache.gobblin.util.limiter.Limiter;
import org.apache.gobblin.util.limiter.broker.SharedLimiterFactory;
/**
* A {@link HttpClient} for throttling calls to the underlying TX operation using the input
* {@link Limiter}.
*/
@Slf4j
public abstract class ThrottledHttpClient<RQ, RP> implements HttpClient<RQ, RP> {
protected final Limiter limiter;
protected final SharedResourcesBroker<GobblinScopeTypes> broker;
@Getter
private final Timer sendTimer;
private final MetricContext metricContext;
public ThrottledHttpClient (SharedResourcesBroker<GobblinScopeTypes> broker, String limiterKey) {
this.broker = broker;
try {
this.limiter = broker.getSharedResource(new SharedLimiterFactory<>(), new HttpLimiterKey(limiterKey));
this.metricContext = broker.getSharedResource(new MetricContextFactory<>(), new MetricContextKey());
this.sendTimer = this.metricContext.timer(limiterKey);
} catch (NotConfiguredException e) {
log.error ("Limiter cannot be initialized due to exception " + ExceptionUtils.getFullStackTrace(e));
throw new RuntimeException(e);
}
}
public final RP sendRequest(RQ request) throws IOException {
final Timer.Context context = sendTimer.time();
try {
if (limiter.acquirePermits(1) != null) {
log.debug ("Acquired permits successfully");
return sendRequestImpl (request);
} else {
throw new IOException ("Acquired permits return null");
}
} catch (InterruptedException e) {
throw new IOException("Throttling is interrupted");
} finally {
context.stop();
}
}
public final void sendAsyncRequest(RQ request, Callback<RP> callback) throws IOException {
final Timer.Context context = sendTimer.time();
try {
if (limiter.acquirePermits(1) != null) {
log.debug ("Acquired permits successfully");
sendAsyncRequestImpl (request, callback);
} else {
throw new IOException ("Acquired permits return null");
}
} catch (InterruptedException e) {
throw new IOException("Throttling is interrupted");
} finally {
context.stop();
}
}
public abstract RP sendRequestImpl (RQ request) throws IOException;
public abstract void sendAsyncRequestImpl (RQ request, Callback<RP> callback) throws IOException;
}
| 3,712 |
0 | Create_ds/gobblin/gobblin-modules/gobblin-http/src/main/java/org/apache/gobblin | Create_ds/gobblin/gobblin-modules/gobblin-http/src/main/java/org/apache/gobblin/http/HttpClient.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.http;
import java.io.Closeable;
import java.io.IOException;
import org.apache.gobblin.async.Callback;
/**
* An interface to send a request
*
* @param <RQ> type of request
* @param <RP> type of response
*/
public interface HttpClient<RQ, RP> extends Closeable {
/**
* Send request synchronously
*/
RP sendRequest(RQ request) throws IOException;
/**
* Send request asynchronously
*/
default void sendAsyncRequest(RQ request, Callback<RP> callback) throws IOException {
throw new UnsupportedOperationException();
}
} | 3,713 |
0 | Create_ds/gobblin/gobblin-modules/gobblin-http/src/main/java/org/apache/gobblin | Create_ds/gobblin/gobblin-modules/gobblin-http/src/main/java/org/apache/gobblin/http/ApacheHttpResponseHandler.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.http;
import java.io.IOException;
import java.util.HashSet;
import java.util.Set;
import org.apache.http.HttpEntity;
import org.apache.http.HttpResponse;
import org.apache.http.client.methods.HttpUriRequest;
import org.apache.http.util.EntityUtils;
import lombok.extern.slf4j.Slf4j;
import org.apache.gobblin.net.Request;
import org.apache.gobblin.utils.HttpUtils;
/**
* Basic logic to handle a {@link HttpResponse} from a http service
*
* <p>
* A more specific handler understands the content inside the response and is able to customize
* the behavior as needed. For example: parsing the entity from a get response, extracting data
* sent from the service for a post response, executing more detailed status code handling, etc.
* </p>
*/
@Slf4j
public class ApacheHttpResponseHandler<RP extends HttpResponse> implements ResponseHandler<HttpUriRequest, RP> {
private final Set<String> errorCodeWhitelist;
public ApacheHttpResponseHandler() {
this(new HashSet<>());
}
public ApacheHttpResponseHandler(Set<String> errorCodeWhitelist) {
this.errorCodeWhitelist = errorCodeWhitelist;
}
@Override
public ApacheHttpResponseStatus handleResponse(Request<HttpUriRequest> request, RP response) {
ApacheHttpResponseStatus status = new ApacheHttpResponseStatus(StatusType.OK);
int statusCode = response.getStatusLine().getStatusCode();
status.setStatusCode(statusCode);
HttpUtils.updateStatusType(status, statusCode, errorCodeWhitelist);
if (status.getType() == StatusType.OK) {
status.setContent(getEntityAsByteArray(response.getEntity()));
status.setContentType(response.getEntity().getContentType().getValue());
} else {
log.info("Receive an unsuccessful response with status code: " + statusCode);
}
HttpEntity entity = response.getEntity();
if (entity != null) {
consumeEntity(entity);
}
return status;
}
private byte[] getEntityAsByteArray(HttpEntity entity) {
try {
return EntityUtils.toByteArray(entity);
} catch (IOException e) {
throw new RuntimeException(e);
}
}
protected void consumeEntity(HttpEntity entity) {
try {
EntityUtils.consume(entity);
} catch (IOException e) {
throw new RuntimeException(e);
}
}
}
| 3,714 |
0 | Create_ds/gobblin/gobblin-modules/gobblin-http/src/main/java/org/apache/gobblin | Create_ds/gobblin/gobblin-modules/gobblin-http/src/main/java/org/apache/gobblin/http/ApacheHttpRequestBuilder.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.http;
import java.net.URI;
import java.util.Map;
import java.util.Queue;
import org.apache.avro.generic.GenericRecord;
import org.apache.http.HttpHeaders;
import org.apache.http.client.methods.HttpUriRequest;
import org.apache.http.client.methods.RequestBuilder;
import org.apache.http.entity.ContentType;
import org.apache.http.entity.StringEntity;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import com.google.common.annotations.VisibleForTesting;
import org.apache.gobblin.utils.HttpUtils;
import org.apache.gobblin.async.AsyncRequestBuilder;
import org.apache.gobblin.async.BufferedRecord;
/**
* Build {@link HttpUriRequest} that can talk to http services. Now only text/plain and application/json are supported
*
* <p>
* This basic implementation builds a write request from a single record. However, it has the extensibility to build
* a write request from batched records, depending on specific implementation of {@link #buildRequest(Queue)}
* </p>
*/
public class ApacheHttpRequestBuilder implements AsyncRequestBuilder<GenericRecord, HttpUriRequest> {
private static final Logger LOG = LoggerFactory.getLogger(ApacheHttpRequestBuilder.class);
private final String urlTemplate;
private final String verb;
private final ContentType contentType;
public ApacheHttpRequestBuilder(String urlTemplate, String verb, String contentType) {
this.urlTemplate = urlTemplate;
this.verb = verb;
this.contentType = createContentType(contentType);
}
@Override
public ApacheHttpRequest<GenericRecord> buildRequest(Queue<BufferedRecord<GenericRecord>> buffer) {
return buildWriteRequest(buffer.poll());
}
/**
* Build a write request from a single record
*/
private ApacheHttpRequest<GenericRecord> buildWriteRequest(BufferedRecord<GenericRecord> record) {
if (record == null) {
return null;
}
ApacheHttpRequest<GenericRecord> request = new ApacheHttpRequest<>();
HttpOperation httpOperation = HttpUtils.toHttpOperation(record.getRecord());
// Set uri
URI uri = HttpUtils.buildURI(urlTemplate, httpOperation.getKeys(), httpOperation.getQueryParams());
if (uri == null) {
return null;
}
RequestBuilder builder = RequestBuilder.create(verb.toUpperCase());
builder.setUri(uri);
// Set headers
Map<String, String> headers = httpOperation.getHeaders();
if (headers != null && headers.size() != 0) {
for (Map.Entry<String, String> header : headers.entrySet()) {
builder.setHeader(header.getKey(), header.getValue());
}
}
// Add payload
int bytesWritten = addPayload(builder, httpOperation.getBody());
if (bytesWritten == -1) {
throw new RuntimeException("Fail to write payload into request");
}
request.setRawRequest(build(builder));
request.markRecord(record, bytesWritten);
return request;
}
/**
* Add payload to request. By default, payload is sent as application/json
*/
protected int addPayload(RequestBuilder builder, String payload) {
if (payload == null || payload.length() == 0) {
return 0;
}
builder.setHeader(HttpHeaders.CONTENT_TYPE, contentType.getMimeType());
builder.setEntity(new StringEntity(payload, contentType));
return payload.length();
}
public static ContentType createContentType(String contentType) {
switch (contentType) {
case "application/json":
return ContentType.APPLICATION_JSON;
case "text/plain":
return ContentType.TEXT_PLAIN;
default:
throw new RuntimeException("contentType not supported: " + contentType);
}
}
/**
* Add this method for argument capture in test
*/
@VisibleForTesting
public HttpUriRequest build(RequestBuilder builder) {
return builder.build();
}
}
| 3,715 |
0 | Create_ds/gobblin/gobblin-modules/gobblin-http/src/main/java/org/apache/gobblin | Create_ds/gobblin/gobblin-modules/gobblin-http/src/main/java/org/apache/gobblin/http/ApacheHttpResponseStatus.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.http;
import lombok.Getter;
import lombok.Setter;
@Getter
@Setter
public class ApacheHttpResponseStatus extends ResponseStatus {
private int statusCode;
private byte[] content = null;
private String contentType = null;
public ApacheHttpResponseStatus(StatusType type) {
super(type);
}
}
| 3,716 |
0 | Create_ds/gobblin/gobblin-modules/gobblin-http/src/main/java/org/apache/gobblin | Create_ds/gobblin/gobblin-modules/gobblin-http/src/main/java/org/apache/gobblin/http/ApacheHttpAsyncClient.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.http;
import java.io.IOException;
import java.util.concurrent.CountDownLatch;
import java.util.concurrent.Future;
import java.util.concurrent.TimeUnit;
import org.apache.http.HttpResponse;
import org.apache.http.client.config.RequestConfig;
import org.apache.http.client.methods.HttpUriRequest;
import org.apache.http.concurrent.FutureCallback;
import org.apache.http.impl.client.CloseableHttpClient;
import org.apache.http.impl.nio.client.CloseableHttpAsyncClient;
import org.apache.http.impl.nio.client.HttpAsyncClientBuilder;
import org.apache.http.impl.nio.conn.PoolingNHttpClientConnectionManager;
import org.apache.http.impl.nio.reactor.DefaultConnectingIOReactor;
import org.apache.http.nio.conn.NHttpClientConnectionManager;
import org.apache.http.nio.reactor.ConnectingIOReactor;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import com.google.common.collect.ImmutableMap;
import com.typesafe.config.Config;
import com.typesafe.config.ConfigFactory;
import lombok.Getter;
import lombok.extern.slf4j.Slf4j;
import org.apache.gobblin.async.Callback;
import org.apache.gobblin.broker.gobblin_scopes.GobblinScopeTypes;
import org.apache.gobblin.broker.iface.SharedResourcesBroker;
import org.apache.gobblin.utils.HttpUtils;
/**
* An asynchronous {@link HttpClient} which sends {@link HttpUriRequest} and registers a callback.
* It encapsulates a {@link CloseableHttpClient} instance to send the {@link HttpUriRequest}
*
* {@link CloseableHttpAsyncClient} is used
*/
@Slf4j
public class ApacheHttpAsyncClient extends ThrottledHttpClient<HttpUriRequest, HttpResponse> {
private static final Logger LOG = LoggerFactory.getLogger(ApacheHttpClient.class);
public static final String HTTP_CONN_MANAGER = "connMgrType";
public static final String POOLING_CONN_MANAGER_MAX_TOTAL_CONN = "connMgr.pooling.maxTotalConn";
public static final String POOLING_CONN_MANAGER_MAX_PER_CONN = "connMgr.pooling.maxPerConn";
public static final String REQUEST_TIME_OUT_MS_KEY = "reqTimeout";
public static final String CONNECTION_TIME_OUT_MS_KEY = "connTimeout";
private static final Config FALLBACK =
ConfigFactory.parseMap(ImmutableMap.<String, Object>builder()
.put(REQUEST_TIME_OUT_MS_KEY, TimeUnit.SECONDS.toMillis(10L))
.put(CONNECTION_TIME_OUT_MS_KEY, TimeUnit.SECONDS.toMillis(10L))
.put(HTTP_CONN_MANAGER, ApacheHttpClient.ConnManager.POOLING.name())
.put(POOLING_CONN_MANAGER_MAX_TOTAL_CONN, 20)
.put(POOLING_CONN_MANAGER_MAX_PER_CONN, 2)
.build());
private final CloseableHttpAsyncClient client;
public ApacheHttpAsyncClient(HttpAsyncClientBuilder builder, Config config, SharedResourcesBroker<GobblinScopeTypes> broker) {
super (broker, HttpUtils.createApacheHttpClientLimiterKey(config));
config = config.withFallback(FALLBACK);
RequestConfig requestConfig = RequestConfig.copy(RequestConfig.DEFAULT)
.setSocketTimeout(config.getInt(REQUEST_TIME_OUT_MS_KEY))
.setConnectTimeout(config.getInt(CONNECTION_TIME_OUT_MS_KEY))
.setConnectionRequestTimeout(config.getInt(CONNECTION_TIME_OUT_MS_KEY))
.build();
try {
builder.disableCookieManagement().useSystemProperties().setDefaultRequestConfig(requestConfig);
builder.setConnectionManager(getNHttpConnManager(config));
client = builder.build();
client.start();
} catch (IOException e) {
throw new RuntimeException("ApacheHttpAsyncClient cannot be initialized");
}
}
private NHttpClientConnectionManager getNHttpConnManager(Config config) throws IOException {
NHttpClientConnectionManager httpConnManager;
String connMgrStr = config.getString(HTTP_CONN_MANAGER);
switch (ApacheHttpClient.ConnManager.valueOf(connMgrStr.toUpperCase())) {
case POOLING:
ConnectingIOReactor ioReactor = new DefaultConnectingIOReactor();
PoolingNHttpClientConnectionManager poolingConnMgr = new PoolingNHttpClientConnectionManager(ioReactor);
poolingConnMgr.setMaxTotal(config.getInt(POOLING_CONN_MANAGER_MAX_TOTAL_CONN));
poolingConnMgr.setDefaultMaxPerRoute(config.getInt(POOLING_CONN_MANAGER_MAX_PER_CONN));
httpConnManager = poolingConnMgr;
break;
default:
throw new IllegalArgumentException(connMgrStr + " is not supported");
}
LOG.info("Using " + httpConnManager.getClass().getSimpleName());
return httpConnManager;
}
/**
* A helper class which contains a latch so that we can achieve blocking calls even using
* http async client APIs. Same can be achieved by invoking {@link Future#get()} returned by
* {@link org.apache.http.nio.client.HttpAsyncClient#execute(HttpUriRequest, FutureCallback)}.
* However this method seems to have a synchronization problem. It seems like {@link Future#get()}
* is not fully blocked before callback is triggered.
*/
@Getter
private static class SyncHttpResponseCallback implements FutureCallback<HttpResponse> {
private HttpUriRequest request = null;
private HttpResponse response = null;
private Exception exception = null;
private final CountDownLatch latch = new CountDownLatch(1);
public SyncHttpResponseCallback(HttpUriRequest request) {
this.request = request;
}
@Override
public void completed(HttpResponse result) {
log.info ("Sync apache version request: {}, statusCode: {}", request, result.getStatusLine().getStatusCode());
response = result;
latch.countDown();
}
@Override
public void failed(Exception ex) {
exception = ex;
latch.countDown();
}
@Override
public void cancelled() {
throw new UnsupportedOperationException("Should not be cancelled");
}
public void await() throws InterruptedException {
latch.await();
}
}
@Override
public HttpResponse sendRequestImpl(HttpUriRequest request) throws IOException {
SyncHttpResponseCallback callback = new SyncHttpResponseCallback(request);
this.client.execute(request, callback);
try {
callback.await();
if (callback.getException() != null) {
throw new IOException(callback.getException());
}
return callback.getResponse();
} catch (InterruptedException e) {
throw new IOException(e);
}
}
@Override
public void sendAsyncRequestImpl(HttpUriRequest request, Callback<HttpResponse> callback) throws IOException {
this.client.execute(request, new FutureCallback<HttpResponse>() {
@Override
public void completed(HttpResponse result) {
callback.onSuccess(result);
}
@Override
public void failed(Exception ex) {
callback.onFailure(ex);
}
@Override
public void cancelled() {
throw new UnsupportedOperationException();
}
});
}
@Override
public void close() throws IOException {
client.close();
}
}
| 3,717 |
0 | Create_ds/gobblin/gobblin-modules/gobblin-http/src/main/java/org/apache/gobblin | Create_ds/gobblin/gobblin-modules/gobblin-http/src/main/java/org/apache/gobblin/http/ApacheHttpClient.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.http;
import java.io.IOException;
import java.util.concurrent.TimeUnit;
import org.apache.http.client.config.RequestConfig;
import org.apache.http.client.methods.CloseableHttpResponse;
import org.apache.http.client.methods.HttpUriRequest;
import org.apache.http.conn.HttpClientConnectionManager;
import org.apache.http.impl.client.CloseableHttpClient;
import org.apache.http.impl.client.HttpClientBuilder;
import org.apache.http.impl.conn.BasicHttpClientConnectionManager;
import org.apache.http.impl.conn.PoolingHttpClientConnectionManager;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import com.google.common.collect.ImmutableMap;
import com.typesafe.config.Config;
import com.typesafe.config.ConfigFactory;
import lombok.extern.slf4j.Slf4j;
import org.apache.gobblin.async.Callback;
import org.apache.gobblin.broker.gobblin_scopes.GobblinScopeTypes;
import org.apache.gobblin.broker.iface.SharedResourcesBroker;
import org.apache.gobblin.utils.HttpUtils;
/**
* A synchronous {@link HttpClient} that sends {@link HttpUriRequest} and gets {@link CloseableHttpResponse}.
* It encapsulates a {@link CloseableHttpClient} instance to send the {@link HttpUriRequest}
*
* {@link CloseableHttpClient} is used
*/
@Slf4j
public class ApacheHttpClient extends ThrottledHttpClient<HttpUriRequest, CloseableHttpResponse> {
private static final Logger LOG = LoggerFactory.getLogger(ApacheHttpClient.class);
public static final String HTTP_CONN_MANAGER = "connMgrType";
public static final String POOLING_CONN_MANAGER_MAX_TOTAL_CONN = "connMgr.pooling.maxTotalConn";
public static final String POOLING_CONN_MANAGER_MAX_PER_CONN = "connMgr.pooling.maxPerConn";
public static final String REQUEST_TIME_OUT_MS_KEY = "reqTimeout";
public static final String CONNECTION_TIME_OUT_MS_KEY = "connTimeout";
public enum ConnManager {
POOLING,
BASIC
}
private static final Config FALLBACK =
ConfigFactory.parseMap(ImmutableMap.<String, Object>builder()
.put(REQUEST_TIME_OUT_MS_KEY, TimeUnit.SECONDS.toMillis(10L))
.put(CONNECTION_TIME_OUT_MS_KEY, TimeUnit.SECONDS.toMillis(10L))
.put(HTTP_CONN_MANAGER, ConnManager.BASIC.name())
.put(POOLING_CONN_MANAGER_MAX_TOTAL_CONN, 20)
.put(POOLING_CONN_MANAGER_MAX_PER_CONN, 2)
.build());
private final CloseableHttpClient client;
public ApacheHttpClient(HttpClientBuilder builder, Config config, SharedResourcesBroker<GobblinScopeTypes> broker) {
super (broker, HttpUtils.createApacheHttpClientLimiterKey(config));
config = config.withFallback(FALLBACK);
RequestConfig requestConfig = RequestConfig.copy(RequestConfig.DEFAULT)
.setSocketTimeout(config.getInt(REQUEST_TIME_OUT_MS_KEY))
.setConnectTimeout(config.getInt(CONNECTION_TIME_OUT_MS_KEY))
.setConnectionRequestTimeout(config.getInt(CONNECTION_TIME_OUT_MS_KEY))
.build();
builder.disableCookieManagement().useSystemProperties().setDefaultRequestConfig(requestConfig);
builder.setConnectionManager(getHttpConnManager(config));
client = builder.build();
}
@Override
public CloseableHttpResponse sendRequestImpl(HttpUriRequest request) throws IOException {
return client.execute(request);
}
@Override
public void sendAsyncRequestImpl(HttpUriRequest request, Callback<CloseableHttpResponse> callback) throws IOException {
throw new UnsupportedOperationException("ApacheHttpClient doesn't support asynchronous send");
}
private HttpClientConnectionManager getHttpConnManager(Config config) {
HttpClientConnectionManager httpConnManager;
String connMgrStr = config.getString(HTTP_CONN_MANAGER);
switch (ConnManager.valueOf(connMgrStr.toUpperCase())) {
case BASIC:
httpConnManager = new BasicHttpClientConnectionManager();
break;
case POOLING:
PoolingHttpClientConnectionManager poolingConnMgr = new PoolingHttpClientConnectionManager();
poolingConnMgr.setMaxTotal(config.getInt(POOLING_CONN_MANAGER_MAX_TOTAL_CONN));
poolingConnMgr.setDefaultMaxPerRoute(config.getInt(POOLING_CONN_MANAGER_MAX_PER_CONN));
httpConnManager = poolingConnMgr;
break;
default:
throw new IllegalArgumentException(connMgrStr + " is not supported");
}
LOG.info("Using " + httpConnManager.getClass().getSimpleName());
return httpConnManager;
}
@Override
public void close() throws IOException {
client.close();
}
}
| 3,718 |
0 | Create_ds/gobblin/gobblin-modules/gobblin-http/src/main/java/org/apache/gobblin | Create_ds/gobblin/gobblin-modules/gobblin-http/src/main/java/org/apache/gobblin/r2/R2Client.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.r2;
import java.io.IOException;
import java.util.concurrent.ExecutionException;
import java.util.concurrent.Future;
import com.linkedin.common.callback.Callbacks;
import com.linkedin.common.util.None;
import com.linkedin.r2.message.rest.RestException;
import com.linkedin.r2.message.rest.RestRequest;
import com.linkedin.r2.message.rest.RestResponse;
import com.linkedin.r2.transport.common.Client;
import com.typesafe.config.Config;
import lombok.extern.slf4j.Slf4j;
import org.apache.gobblin.async.Callback;
import org.apache.gobblin.broker.iface.SharedResourcesBroker;
import org.apache.gobblin.http.ThrottledHttpClient;
import org.apache.gobblin.utils.HttpUtils;
@Slf4j
public class R2Client extends ThrottledHttpClient<RestRequest, RestResponse> {
private final Client client;
public R2Client(Client client, Config config, SharedResourcesBroker broker) {
super (broker, HttpUtils.createR2ClientLimiterKey(config));
this.client = client;
}
@Override
public RestResponse sendRequestImpl(RestRequest request)
throws IOException {
Future<RestResponse> responseFuture = client.restRequest(request);
RestResponse response;
try {
response = responseFuture.get();
} catch (InterruptedException | ExecutionException e) {
// The service may choose to throw an exception as a way to report error
Throwable t = e.getCause();
if (t != null && t instanceof RestException) {
response = ((RestException) t).getResponse();
} else {
throw new IOException(e);
}
}
return response;
}
@Override
public void sendAsyncRequestImpl(RestRequest request, Callback<RestResponse> callback)
throws IOException {
log.debug ("Request URI : {} ", request.getURI());
client.restRequest(request, new com.linkedin.common.callback.Callback<RestResponse>() {
@Override
public void onError(Throwable e) {
callback.onFailure(e);
}
@Override
public void onSuccess(RestResponse result) {
callback.onSuccess(result);
}
});
}
@Override
public void close()
throws IOException {
client.shutdown(Callbacks.<None>empty());
}
}
| 3,719 |
0 | Create_ds/gobblin/gobblin-modules/gobblin-http/src/main/java/org/apache/gobblin | Create_ds/gobblin/gobblin-modules/gobblin-http/src/main/java/org/apache/gobblin/r2/R2ResponseStatus.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.r2;
import com.linkedin.data.ByteString;
import lombok.Getter;
import lombok.Setter;
import org.apache.gobblin.http.ResponseStatus;
import org.apache.gobblin.http.StatusType;
@Getter @Setter
public class R2ResponseStatus extends ResponseStatus {
private int statusCode;
private ByteString content = null;
private String contentType = null;
public R2ResponseStatus(StatusType type) {
super(type);
}
}
| 3,720 |
0 | Create_ds/gobblin/gobblin-modules/gobblin-http/src/main/java/org/apache/gobblin | Create_ds/gobblin/gobblin-modules/gobblin-http/src/main/java/org/apache/gobblin/r2/R2ClientFactory.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.r2;
import java.util.HashMap;
import java.util.Map;
import com.google.common.collect.ImmutableMap;
import com.linkedin.d2.balancer.*;
import com.linkedin.r2.transport.common.Client;
import com.linkedin.r2.transport.http.client.HttpClientFactory;
import com.typesafe.config.Config;
import com.typesafe.config.ConfigException;
import com.typesafe.config.ConfigFactory;
import com.typesafe.config.ConfigValue;
import javax.net.ssl.SSLContext;
import javax.net.ssl.SSLParameters;
import org.apache.gobblin.security.ssl.SSLContextFactory;
/**
* Create a corresponding {@link Client} based on different {@link Schema}
*/
public class R2ClientFactory {
public static final String SSL_ENABLED = "ssl";
public static final String PROPERTIES = "properties";
public static final String CLIENT_SERVICES_CONFIG = "clientServicesConfig";
public static final String ZOOKEEPER_HOSTS = "zkHosts";
private static final Config FALLBACK =
ConfigFactory.parseMap(ImmutableMap.<String, Object>builder()
.put(SSL_ENABLED, false)
.put("d2.ssl", false)
.build());
public enum Schema {
HTTP,
D2
}
private Schema schema;
public R2ClientFactory(Schema schema) {
this.schema = schema;
}
/**
* Given a {@link Config}, create an instance of {@link Client}
*
* <p>
* A sample configuration for https based client is:
* <br> ssl=true
* <br> keyStoreFilePath=/path/to/key/store
* <br> keyStorePassword=password
* <br> keyStoreType=PKCS12
* <br> trustStoreFilePath=/path/to/trust/store
* <br> trustStorePassword=password
*
* <p>
* Http configurations(see {@link HttpClientFactory}) like http.responseMaxSize, http.idleTimeout, etc, can
* be set as:
* <br> properties.http.responseMaxSize=10000
* <br> properties.http.idleTimeout=3000
* </p>
* </p>
*
* <p>
* A sample configuration for a secured d2 client is:
* <br> d2.zkHosts=zk1.host.com:12000
* <br> d2.ssl=true
* <br> d2.keyStoreFilePath=/path/to/key/store
* <br> d2.keyStorePassword=password
* <br> d2.keyStoreType=PKCS12
* <br> d2.trustStoreFilePath=/path/to/trust/store
* <br> d2.trustStorePassword=password
*
* <p>
* Http configurations(see {@link HttpClientFactory}) like http.responseMaxSize, http.idleTimeout, etc, can
* be set as:
* <br> d2.clientServicesConfig.[client_name].http.responseMaxSize=10000
* <br> d2.clientServicesConfig.[client_name].http.idleTimeout=3000
* </p>
* </p>
*
* @param srcConfig configuration
* @return an instance of {@link Client}
*/
public Client createInstance(Config srcConfig) {
Config config = srcConfig.withFallback(FALLBACK);
switch (schema) {
case HTTP:
return createHttpClient(config);
case D2:
String confPrefix = schema.name().toLowerCase();
if (config.hasPath(confPrefix)) {
Config d2Config = config.getConfig(confPrefix);
return createD2Client(d2Config);
} else {
throw new ConfigException.Missing(confPrefix);
}
default:
throw new RuntimeException("Schema not supported: " + schema.name());
}
}
private Client createHttpClient(Config config) {
boolean isSSLEnabled = config.getBoolean(SSL_ENABLED);
SSLContext sslContext = null;
SSLParameters sslParameters = null;
if (isSSLEnabled) {
sslContext = SSLContextFactory.createInstance(config);
sslParameters = sslContext.getDefaultSSLParameters();
}
Map<String, Object> properties = new HashMap<>();
properties.put(HttpClientFactory.HTTP_SSL_CONTEXT, sslContext);
properties.put(HttpClientFactory.HTTP_SSL_PARAMS, sslParameters);
if (config.hasPath(PROPERTIES)) {
properties.putAll(toMap(config.getConfig(PROPERTIES)));
}
return new R2HttpClientProxy(new HttpClientFactory(), properties);
}
private Client createD2Client(Config config) {
String zkhosts = config.getString(ZOOKEEPER_HOSTS);
if (zkhosts == null || zkhosts.length() == 0) {
throw new ConfigException.Missing(ZOOKEEPER_HOSTS);
}
D2ClientBuilder d2Builder = new D2ClientBuilder().setZkHosts(zkhosts);
boolean isSSLEnabled = config.getBoolean(SSL_ENABLED);
if (isSSLEnabled) {
d2Builder.setIsSSLEnabled(true);
SSLContext sslContext = SSLContextFactory.createInstance(config);
d2Builder.setSSLContext(sslContext);
d2Builder.setSSLParameters(sslContext.getDefaultSSLParameters());
}
if (config.hasPath(CLIENT_SERVICES_CONFIG)) {
Config clientServiceConfig = config.getConfig(CLIENT_SERVICES_CONFIG);
Map<String, Map<String, Object>> result = new HashMap<>();
for (String key: clientServiceConfig.root().keySet()) {
Config value = clientServiceConfig.getConfig(key);
result.put(key, toMap(value));
}
d2Builder.setClientServicesConfig(result);
}
return new D2ClientProxy(d2Builder, isSSLEnabled);
}
private static Map<String, Object> toMap(Config config) {
Map<String, Object> map = new HashMap<>();
for (Map.Entry<String, ConfigValue> entry: config.entrySet()) {
map.put(entry.getKey(), entry.getValue().unwrapped());
}
return map;
}
}
| 3,721 |
0 | Create_ds/gobblin/gobblin-modules/gobblin-http/src/main/java/org/apache/gobblin | Create_ds/gobblin/gobblin-modules/gobblin-http/src/main/java/org/apache/gobblin/r2/D2ClientProxy.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.r2;
import java.net.URI;
import java.util.ArrayList;
import java.util.Collection;
import java.util.Map;
import java.util.concurrent.ExecutionException;
import java.util.concurrent.Future;
import com.google.common.collect.ImmutableMap;
import com.google.common.util.concurrent.SettableFuture;
import com.linkedin.common.callback.Callback;
import com.linkedin.common.callback.FutureCallback;
import com.linkedin.common.util.None;
import com.linkedin.d2.balancer.D2Client;
import com.linkedin.d2.balancer.D2ClientBuilder;
import com.linkedin.d2.balancer.Facilities;
import com.linkedin.r2.message.RequestContext;
import com.linkedin.r2.message.rest.RestRequest;
import com.linkedin.r2.message.rest.RestResponse;
import com.linkedin.r2.message.stream.StreamRequest;
import com.linkedin.r2.message.stream.StreamResponse;
import com.linkedin.r2.transport.common.TransportClientFactory;
import com.linkedin.r2.transport.http.client.HttpClientFactory;
/**
* The proxy takes care of {@link TransportClientFactory}s shutdown
*/
public class D2ClientProxy implements D2Client {
private final D2Client d2Client;
private final Collection<TransportClientFactory> clientFactories;
D2ClientProxy(D2ClientBuilder builder, boolean isSSLEnabled) {
if (isSSLEnabled) {
Map<String, TransportClientFactory> factoryMap = createTransportClientFactories();
builder.setClientFactories(factoryMap);
clientFactories = factoryMap.values();
} else {
clientFactories = new ArrayList<>();
}
d2Client = buildClient(builder);
}
@Override
public Facilities getFacilities() {
return d2Client.getFacilities();
}
@Override
public void start(Callback<None> callback) {
d2Client.start(callback);
}
@Override
public Future<RestResponse> restRequest(RestRequest request) {
return d2Client.restRequest(request);
}
@Override
public Future<RestResponse> restRequest(RestRequest request, RequestContext requestContext) {
return d2Client.restRequest(request, requestContext);
}
@Override
public void restRequest(RestRequest request, Callback<RestResponse> callback) {
d2Client.restRequest(request, callback);
}
@Override
public void restRequest(RestRequest request, RequestContext requestContext, Callback<RestResponse> callback) {
d2Client.restRequest(request, requestContext, callback);
}
@Override
public void streamRequest(StreamRequest request, Callback<StreamResponse> callback) {
d2Client.streamRequest(request, callback);
}
@Override
public void streamRequest(StreamRequest request, RequestContext requestContext, Callback<StreamResponse> callback) {
d2Client.streamRequest(request, requestContext, callback);
}
@Override
public Map<String, Object> getMetadata(URI uri) {
return d2Client.getMetadata(uri);
}
@Override
public void shutdown(Callback<None> callback) {
d2Client.shutdown(callback);
for (TransportClientFactory clientFactory : clientFactories) {
clientFactory.shutdown(new FutureCallback<>());
}
}
private D2Client buildClient(D2ClientBuilder builder) {
D2Client d2 = builder.build();
final SettableFuture<None> d2ClientFuture = SettableFuture.create();
d2.start(new Callback<None>() {
@Override
public void onError(Throwable e) {
d2ClientFuture.setException(e);
}
@Override
public void onSuccess(None none) {
d2ClientFuture.set(none);
}
});
try {
// Synchronously wait for d2 to start
d2ClientFuture.get();
} catch (InterruptedException | ExecutionException e) {
throw new RuntimeException(e);
}
return d2;
}
private static Map<String, TransportClientFactory> createTransportClientFactories() {
return ImmutableMap.<String, TransportClientFactory>builder()
.put("http", new HttpClientFactory())
//It won't route to SSL port without this.
.put("https", new HttpClientFactory())
.build();
}
}
| 3,722 |
0 | Create_ds/gobblin/gobblin-modules/gobblin-http/src/main/java/org/apache/gobblin | Create_ds/gobblin/gobblin-modules/gobblin-http/src/main/java/org/apache/gobblin/r2/R2HttpClientProxy.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.r2;
import java.util.Map;
import com.linkedin.common.callback.Callback;
import com.linkedin.common.callback.FutureCallback;
import com.linkedin.common.util.None;
import com.linkedin.r2.transport.common.TransportClientFactory;
import com.linkedin.r2.transport.common.bridge.client.TransportClientAdapter;
/**
* The proxy takes care of {@link TransportClientFactory} shutdown
*/
public class R2HttpClientProxy extends TransportClientAdapter {
private final TransportClientFactory factory;
public R2HttpClientProxy(TransportClientFactory factory, Map<String, Object> properties) {
super(factory.getClient(properties));
this.factory = factory;
}
@Override
public void shutdown(Callback<None> callback) {
try {
super.shutdown(callback);
} finally {
factory.shutdown(new FutureCallback<>());
}
}
}
| 3,723 |
0 | Create_ds/gobblin/gobblin-modules/gobblin-http/src/main/java/org/apache/gobblin | Create_ds/gobblin/gobblin-modules/gobblin-http/src/main/java/org/apache/gobblin/r2/R2RestRequestBuilder.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.r2;
import java.io.IOException;
import java.net.URI;
import java.util.Map;
import java.util.Queue;
import org.apache.avro.generic.GenericRecord;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import com.google.common.annotations.VisibleForTesting;
import com.linkedin.data.DataMap;
import com.linkedin.data.codec.JacksonDataCodec;
import com.linkedin.r2.message.rest.RestRequest;
import com.linkedin.r2.message.rest.RestRequestBuilder;
import com.linkedin.restli.common.ResourceMethod;
import com.linkedin.restli.common.RestConstants;
import org.apache.gobblin.http.HttpOperation;
import org.apache.gobblin.utils.HttpUtils;
import org.apache.gobblin.async.AsyncRequestBuilder;
import org.apache.gobblin.async.BufferedRecord;
/**
* Build {@link RestRequest} that can talk to restli services
*
* <p>
* This basic implementation builds a write request from a single record
* </p>
*/
public class R2RestRequestBuilder implements AsyncRequestBuilder<GenericRecord, RestRequest> {
private static final Logger LOG = LoggerFactory.getLogger(R2RestRequestBuilder.class);
private static final JacksonDataCodec JACKSON_DATA_CODEC = new JacksonDataCodec();
private final String urlTemplate;
private final ResourceMethod method;
private final String protocolVersion;
public R2RestRequestBuilder(String urlTemplate, String verb, String protocolVersion) {
this.urlTemplate = urlTemplate;
method = ResourceMethod.fromString(verb);
this.protocolVersion = protocolVersion;
}
@Override
public R2Request<GenericRecord> buildRequest(Queue<BufferedRecord<GenericRecord>> buffer) {
return buildWriteRequest(buffer.poll());
}
/**
* Build a request from a single record
*/
private R2Request<GenericRecord> buildWriteRequest(BufferedRecord<GenericRecord> record) {
if (record == null) {
return null;
}
R2Request<GenericRecord> request = new R2Request<>();
HttpOperation httpOperation = HttpUtils.toHttpOperation(record.getRecord());
// Set uri
URI uri = HttpUtils.buildURI(urlTemplate, httpOperation.getKeys(), httpOperation.getQueryParams());
if (uri == null) {
return null;
}
RestRequestBuilder builder = new RestRequestBuilder(uri).setMethod(method.getHttpMethod().toString());
// Set headers
Map<String, String> headers = httpOperation.getHeaders();
if (headers != null && headers.size() != 0) {
builder.setHeaders(headers);
}
builder.setHeader(RestConstants.HEADER_RESTLI_PROTOCOL_VERSION, protocolVersion);
builder.setHeader(RestConstants.HEADER_RESTLI_REQUEST_METHOD, method.toString());
// Add payload
int bytesWritten = addPayload(builder, httpOperation.getBody());
if (bytesWritten == -1) {
throw new RuntimeException("Fail to write payload into request");
}
request.markRecord(record, bytesWritten);
request.setRawRequest(build(builder));
return request;
}
/**
* Add payload to request. By default, payload is sent as application/json
*/
protected int addPayload(RestRequestBuilder builder, String payload) {
if (payload == null || payload.length() == 0) {
return 0;
}
builder.setHeader(RestConstants.HEADER_CONTENT_TYPE, RestConstants.HEADER_VALUE_APPLICATION_JSON);
try {
DataMap data = JACKSON_DATA_CODEC.stringToMap(payload);
byte[] bytes = JACKSON_DATA_CODEC.mapToBytes(data);
builder.setEntity(bytes);
return bytes.length;
} catch (IOException e) {
throw new RuntimeException("Fail to convert payload: " + payload, e);
}
}
/**
* Add this method for argument capture in test
*/
@VisibleForTesting
public RestRequest build(RestRequestBuilder builder) {
return builder.build();
}
}
| 3,724 |
0 | Create_ds/gobblin/gobblin-modules/gobblin-http/src/main/java/org/apache/gobblin | Create_ds/gobblin/gobblin-modules/gobblin-http/src/main/java/org/apache/gobblin/r2/R2RestResponseHandler.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.r2;
import com.google.common.collect.Maps;
import com.linkedin.r2.message.rest.RestRequest;
import com.linkedin.r2.message.rest.RestResponse;
import org.apache.gobblin.configuration.State;
import org.apache.gobblin.http.ResponseHandler;
import org.apache.gobblin.http.StatusType;
import org.apache.gobblin.instrumented.Instrumented;
import org.apache.gobblin.metrics.GobblinTrackingEvent;
import org.apache.gobblin.metrics.MetricContext;
import org.apache.gobblin.metrics.event.FailureEventBuilder;
import org.apache.gobblin.net.Request;
import org.apache.gobblin.utils.HttpConstants;
import org.apache.gobblin.utils.HttpUtils;
import java.util.HashSet;
import java.util.Map;
import java.util.Set;
import lombok.extern.slf4j.Slf4j;
/**
* Basic logic to handle a {@link RestResponse} from a restli service
*
* <p>
* A more specific handler understands the content inside the response and is able to customize
* the behavior as needed. For example: parsing the entity from a get response, extracting data
* sent from the service for a post response, executing more detailed status code handling, etc.
* </p>
*/
@Slf4j
public class R2RestResponseHandler implements ResponseHandler<RestRequest, RestResponse> {
public static final String CONTENT_TYPE_HEADER = "Content-Type";
private final String R2_RESPONSE_EVENT_NAMESPACE = "r2.response";
private final String R2_FAILED_REQUEST_EVENT = "r2FailedRequest";
private final Set<String> errorCodeWhitelist;
private MetricContext metricsContext;
public R2RestResponseHandler() {
this(new HashSet<>(), Instrumented.getMetricContext(new State(), R2RestResponseHandler.class));
}
public R2RestResponseHandler(Set<String> errorCodeWhitelist, MetricContext metricContext) {
this.errorCodeWhitelist = errorCodeWhitelist;
this.metricsContext = metricContext;
}
@Override
public R2ResponseStatus handleResponse(Request<RestRequest> request, RestResponse response) {
R2ResponseStatus status = new R2ResponseStatus(StatusType.OK);
int statusCode = response.getStatus();
status.setStatusCode(statusCode);
HttpUtils.updateStatusType(status, statusCode, errorCodeWhitelist);
if (status.getType() == StatusType.OK) {
status.setContent(response.getEntity());
status.setContentType(response.getHeader(CONTENT_TYPE_HEADER));
} else {
log.info("Receive an unsuccessful response with status code: " + statusCode);
Map<String, String> metadata = Maps.newHashMap();
metadata.put(HttpConstants.STATUS_CODE, String.valueOf(statusCode));
metadata.put(HttpConstants.REQUEST, request.toString());
if (status.getType() != StatusType.CONTINUE) {
FailureEventBuilder failureEvent = new FailureEventBuilder(R2_FAILED_REQUEST_EVENT);
failureEvent.addAdditionalMetadata(metadata);
failureEvent.submit(metricsContext);
} else {
GobblinTrackingEvent event =
new GobblinTrackingEvent(0L, R2_RESPONSE_EVENT_NAMESPACE, R2_FAILED_REQUEST_EVENT, metadata);
metricsContext.submitEvent(event);
}
}
return status;
}
}
| 3,725 |
0 | Create_ds/gobblin/gobblin-modules/gobblin-http/src/main/java/org/apache/gobblin | Create_ds/gobblin/gobblin-modules/gobblin-http/src/main/java/org/apache/gobblin/r2/R2Request.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.r2;
import java.nio.charset.Charset;
import com.linkedin.data.ByteString;
import com.linkedin.r2.message.rest.RestRequest;
import org.apache.gobblin.async.AsyncRequest;
/**
* A specific {@link AsyncRequest} related to a {@link RestRequest} and its associated record information
*/
public class R2Request<D> extends AsyncRequest<D, RestRequest> {
@Override
public String toString() {
RestRequest request = getRawRequest();
StringBuilder outBuffer = new StringBuilder();
String endl = "\n";
outBuffer.append("R2Request Info").append(endl);
outBuffer.append("type: RestRequest").append(endl);
outBuffer.append("uri: ").append(request.getURI().toString()).append(endl);
outBuffer.append("headers: ");
request.getHeaders().forEach((k, v) ->
outBuffer.append("[").append(k).append(":").append(v).append("] ")
);
outBuffer.append(endl);
ByteString entity = request.getEntity();
if (entity != null) {
outBuffer.append("body: ").append(entity.asString(Charset.defaultCharset())).append(endl);
}
return outBuffer.toString();
}
}
| 3,726 |
0 | Create_ds/gobblin/gobblin-modules/gobblin-kafka-08/src/test | Create_ds/gobblin/gobblin-modules/gobblin-kafka-08/src/test/java/StandaloneTestKafkaServer.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import lombok.extern.slf4j.Slf4j;
import org.apache.gobblin.kafka.KafkaTestBase;
/**
* A standalone test Kafka server, useful for debugging.
*/
@Slf4j
public class StandaloneTestKafkaServer {
public static void main(String[] args)
throws InterruptedException {
final KafkaTestBase kafkaTestBase = new KafkaTestBase();
System.out.println("Started server on port: " + kafkaTestBase.getKafkaServerPort());
Runtime.getRuntime().addShutdownHook(new Thread() {
@Override
public void run()
{
log.info("Shutting down...");
kafkaTestBase.stopServers();
}
});
kafkaTestBase.startServers();
}
}
| 3,727 |
0 | Create_ds/gobblin/gobblin-modules/gobblin-kafka-08/src/test/java/org/apache/gobblin/metrics | Create_ds/gobblin/gobblin-modules/gobblin-kafka-08/src/test/java/org/apache/gobblin/metrics/reporter/KafkaAvroEventReporterWithSchemaRegistryTest.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.metrics.reporter;
import java.io.ByteArrayInputStream;
import java.io.DataInputStream;
import java.io.IOException;
import java.util.Map;
import org.apache.avro.Schema;
import org.apache.commons.codec.binary.Hex;
import org.apache.commons.codec.digest.DigestUtils;
import org.mockito.Mockito;
import org.mockito.invocation.InvocationOnMock;
import org.mockito.stubbing.Answer;
import org.testng.Assert;
import org.testng.annotations.Test;
import com.google.common.collect.Maps;
import org.apache.gobblin.metrics.GobblinTrackingEvent;
import org.apache.gobblin.metrics.MetricContext;
import org.apache.gobblin.metrics.kafka.KafkaAvroEventReporter;
import org.apache.gobblin.metrics.kafka.KafkaAvroSchemaRegistry;
import org.apache.gobblin.metrics.kafka.KafkaEventReporter;
public class KafkaAvroEventReporterWithSchemaRegistryTest {
private final Map<String, Schema> schemas = Maps.newConcurrentMap();
@Test
public void test() throws Exception {
testHelper(false);
}
@Test
public void testWithSchemaId() throws IOException {
testHelper(true);
}
private String register(Schema schema) {
String id = DigestUtils.sha1Hex(schema.toString().getBytes());
this.schemas.put(id, schema);
return id;
}
private void testHelper(boolean isSchemaIdEnabled) throws IOException {
MetricContext context = MetricContext.builder("context").build();
MockKafkaPusher pusher = new MockKafkaPusher();
KafkaAvroSchemaRegistry registry = Mockito.mock(KafkaAvroSchemaRegistry.class);
KafkaAvroEventReporter.Builder builder = KafkaAvroEventReporter.forContext(context).withKafkaPusher(pusher).withSchemaRegistry(registry);
Schema schema =
new Schema.Parser().parse(getClass().getClassLoader().getResourceAsStream("GobblinTrackingEvent.avsc"));
String schemaId = DigestUtils.sha1Hex(schema.toString().getBytes());
if (!isSchemaIdEnabled) {
Mockito.when(registry.register(Mockito.any(Schema.class))).thenAnswer(new Answer<String>() {
@Override
public String answer(InvocationOnMock invocation)
throws Throwable {
return register((Schema) invocation.getArguments()[0]);
}
});
Mockito.when(registry.register(Mockito.any(Schema.class), Mockito.anyString())).thenAnswer(new Answer<String>() {
@Override
public String answer(InvocationOnMock invocation)
throws Throwable {
return register((Schema) invocation.getArguments()[0]);
}
});
} else {
builder.withSchemaId(schemaId);
}
KafkaEventReporter kafkaReporter = builder.build("localhost:0000", "topic");
GobblinTrackingEvent event = new GobblinTrackingEvent(0l, "namespace", "name", Maps.<String, String>newHashMap());
context.submitEvent(event);
try {
Thread.sleep(100);
} catch(InterruptedException ex) {
Thread.currentThread().interrupt();
}
kafkaReporter.report();
try {
Thread.sleep(100);
} catch(InterruptedException ex) {
Thread.currentThread().interrupt();
}
byte[] nextMessage = pusher.messageIterator().next();
DataInputStream is = new DataInputStream(new ByteArrayInputStream(nextMessage));
Assert.assertEquals(is.readByte(), KafkaAvroSchemaRegistry.MAGIC_BYTE);
byte[] readId = new byte[20];
Assert.assertEquals(is.read(readId), 20);
String readStringId = Hex.encodeHexString(readId);
if (!isSchemaIdEnabled) {
Assert.assertTrue(this.schemas.containsKey(readStringId));
Schema readSchema = this.schemas.get(readStringId);
Assert.assertFalse(readSchema.toString().contains("avro.java.string"));
}
Assert.assertEquals(readStringId, schemaId);
is.close();
}
}
| 3,728 |
0 | Create_ds/gobblin/gobblin-modules/gobblin-kafka-08/src/test/java/org/apache/gobblin/metrics | Create_ds/gobblin/gobblin-modules/gobblin-kafka-08/src/test/java/org/apache/gobblin/metrics/reporter/KafkaPusherTest.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.metrics.reporter;
import java.io.IOException;
import org.testng.Assert;
import org.testng.annotations.AfterClass;
import org.testng.annotations.AfterSuite;
import org.testng.annotations.Test;
import com.google.common.collect.Lists;
import org.apache.gobblin.metrics.kafka.KafkaPusher;
/**
* Test {@link org.apache.gobblin.metrics.kafka.KafkaPusher}.
*/
public class KafkaPusherTest extends KafkaTestBase {
public static final String TOPIC = KafkaPusherTest.class.getSimpleName();
public KafkaPusherTest()
throws InterruptedException, RuntimeException {
super(TOPIC);
}
@Test
public void test() throws IOException {
KafkaPusher pusher = new KafkaPusher("localhost:" + kafkaPort, TOPIC);
String msg1 = "msg1";
String msg2 = "msg2";
pusher.pushMessages(Lists.newArrayList(msg1.getBytes(), msg2.getBytes()));
try {
Thread.sleep(1000);
} catch(InterruptedException ex) {
Thread.currentThread().interrupt();
}
assert(iterator.hasNext());
Assert.assertEquals(new String(iterator.next().message()), msg1);
assert(iterator.hasNext());
Assert.assertEquals(new String(iterator.next().message()), msg2);
pusher.close();
}
@AfterClass
public void after() {
try {
close();
} catch(Exception e) {
System.err.println("Failed to close Kafka server.");
}
}
@AfterSuite
public void afterSuite() {
closeServer();
}
}
| 3,729 |
0 | Create_ds/gobblin/gobblin-modules/gobblin-kafka-08/src/test/java/org/apache/gobblin/metrics | Create_ds/gobblin/gobblin-modules/gobblin-kafka-08/src/test/java/org/apache/gobblin/metrics/reporter/MockKafkaPusher.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.metrics.reporter;
import java.io.IOException;
import java.util.Iterator;
import java.util.List;
import java.util.Queue;
import com.google.common.collect.Queues;
import kafka.producer.ProducerConfig;
import org.apache.gobblin.metrics.kafka.KafkaPusher;
import org.apache.gobblin.metrics.kafka.ProducerCloseable;
/**
* Mock instance of {@link org.apache.gobblin.metrics.kafka.KafkaPusher} used for testing.
*/
public class MockKafkaPusher extends KafkaPusher {
Queue<byte[]> messages = Queues.newLinkedBlockingQueue();
public MockKafkaPusher() {
super("dummy", "dummy");
}
@Override
public void pushMessages(List<byte[]> messages) {
this.messages.addAll(messages);
}
@Override
public void close()
throws IOException {
super.close();
}
@Override
protected ProducerCloseable<String, byte[]> createProducer(ProducerConfig config) {
return null;
}
public Iterator<byte[]> messageIterator() {
return this.messages.iterator();
}
}
| 3,730 |
0 | Create_ds/gobblin/gobblin-modules/gobblin-kafka-08/src/test/java/org/apache/gobblin/metrics | Create_ds/gobblin/gobblin-modules/gobblin-kafka-08/src/test/java/org/apache/gobblin/metrics/reporter/KafkaProducerPusherTest.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.metrics.reporter;
import com.google.common.base.Optional;
import com.google.common.collect.ImmutableMap;
import com.google.common.collect.Lists;
import com.typesafe.config.ConfigFactory;
import java.io.IOException;
import java.util.ArrayList;
import java.util.concurrent.ExecutorService;
import java.util.concurrent.Executors;
import kafka.consumer.ConsumerIterator;
import org.apache.gobblin.kafka.KafkaTestBase;
import org.apache.gobblin.metrics.kafka.KafkaProducerPusher;
import org.apache.gobblin.metrics.kafka.Pusher;
import org.apache.kafka.clients.producer.ProducerConfig;
import org.testng.Assert;
import org.testng.annotations.AfterClass;
import org.testng.annotations.BeforeClass;
import org.testng.annotations.Test;
import static org.apache.gobblin.KafkaCommonUtil.*;
/**
* Test {@link org.apache.gobblin.metrics.kafka.KafkaProducerPusher}.
*/
public class KafkaProducerPusherTest {
public static final String TOPIC = KafkaProducerPusherTest.class.getSimpleName();
private org.apache.gobblin.kafka.KafkaTestBase kafkaTestHelper;
private final long flushTimeoutMilli = KAFKA_FLUSH_TIMEOUT_SECONDS * 1000;
@BeforeClass
public void setup() throws Exception {
kafkaTestHelper = new KafkaTestBase();
kafkaTestHelper.startServers();
kafkaTestHelper.provisionTopic(TOPIC);
}
@Test(priority = 0)
public void testPushMessages() throws IOException {
// Test that the scoped config overrides the generic config
Pusher pusher = new KafkaProducerPusher("127.0.0.1:dummy", TOPIC, Optional.of(ConfigFactory.parseMap(ImmutableMap.of(
ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, "127.0.0.1:" + this.kafkaTestHelper.getKafkaServerPort()))));
String msg1 = "msg1";
String msg2 = "msg2";
pusher.pushMessages(Lists.newArrayList(msg1.getBytes(), msg2.getBytes()));
try {
Thread.sleep(1000);
} catch(InterruptedException ex) {
Thread.currentThread().interrupt();
}
ConsumerIterator<byte[], byte[]> iterator = this.kafkaTestHelper.getIteratorForTopic(TOPIC);
assert(iterator.hasNext());
Assert.assertEquals(new String(iterator.next().message()), msg1);
assert(iterator.hasNext());
Assert.assertEquals(new String(iterator.next().message()), msg2);
pusher.close();
}
@Test(priority = 1)
public void testCloseTimeOut() throws IOException {
// Test that the scoped config overrides the generic config
Pusher pusher = new KafkaProducerPusher("127.0.0.1:dummy", TOPIC, Optional.of(ConfigFactory.parseMap(ImmutableMap.of(
ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, "127.0.0.1:" + this.kafkaTestHelper.getKafkaServerPort()))));
Runnable stuffToDo = new Thread() {
@Override
public void run() {
final long startRunTime = System.currentTimeMillis();
String msg = "msg";
ArrayList al = Lists.newArrayList(msg.getBytes());
// Keep push messages that last 2 times longer than close timeout
while ( System.currentTimeMillis() - startRunTime < flushTimeoutMilli * 2) {
pusher.pushMessages(al);
}
}
};
ExecutorService executor = Executors.newSingleThreadExecutor();
executor.submit(stuffToDo);
try {
Thread.sleep(1000);
} catch(InterruptedException ex) {
Thread.currentThread().interrupt();
}
long startCloseTime = System.currentTimeMillis();
pusher.close();
// Assert that the close should be performed around the timeout, even more messages being pushed
Assert.assertTrue(System.currentTimeMillis() - startCloseTime < flushTimeoutMilli + 3000);
}
@AfterClass
public void after() {
try {
this.kafkaTestHelper.close();
} catch(Exception e) {
System.err.println("Failed to close Kafka server.");
}
}
}
| 3,731 |
0 | Create_ds/gobblin/gobblin-modules/gobblin-kafka-08/src/test/java/org/apache/gobblin/metrics | Create_ds/gobblin/gobblin-modules/gobblin-kafka-08/src/test/java/org/apache/gobblin/metrics/reporter/KafkaTestBase.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.metrics.reporter;
import java.io.Closeable;
import java.io.IOException;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.Properties;
import org.I0Itec.zkclient.ZkClient;
import kafka.admin.AdminUtils;
import kafka.consumer.Consumer;
import kafka.consumer.ConsumerConfig;
import kafka.consumer.ConsumerIterator;
import kafka.consumer.KafkaStream;
import kafka.javaapi.consumer.ConsumerConnector;
import kafka.server.KafkaConfig;
import kafka.server.KafkaServer;
import kafka.utils.MockTime;
import kafka.utils.TestUtils;
import kafka.utils.TestZKUtils;
import kafka.utils.Time;
import kafka.utils.ZKStringSerializer$;
import kafka.zk.EmbeddedZookeeper;
/**
* Base for tests requiring a Kafka server
*
* Server will be created automatically at "localhost:" + kafkaPort
* {@link kafka.consumer.ConsumerIterator} for the specified topic will be created at iterator
*
* @author ibuenros
*/
public class KafkaTestBase implements Closeable {
private static int brokerId = 0;
public static int kafkaPort = 0;
public static String zkConnect = "";
static EmbeddedZookeeper zkServer = null;
static ZkClient zkClient = null;
static KafkaServer kafkaServer = null;
static boolean serverStarted = false;
static boolean serverClosed = false;
public static void startServer() throws RuntimeException {
if (serverStarted && serverClosed) {
throw new RuntimeException("Kafka test server has already been closed. Cannot generate Kafka server twice.");
}
if (!serverStarted) {
serverStarted = true;
zkConnect = TestZKUtils.zookeeperConnect();
zkServer = new EmbeddedZookeeper(zkConnect);
zkClient = new ZkClient(zkServer.connectString(), 30000, 30000, ZKStringSerializer$.MODULE$);
kafkaPort = TestUtils.choosePort();
Properties props = TestUtils.createBrokerConfig(brokerId, kafkaPort, true);
KafkaConfig config = new KafkaConfig(props);
Time mock = new MockTime();
kafkaServer = TestUtils.createServer(config, mock);
}
}
public static void closeServer() {
if (serverStarted && !serverClosed) {
serverClosed = true;
kafkaServer.shutdown();
zkClient.close();
zkServer.shutdown();
}
}
protected String topic;
protected ConsumerConnector consumer;
protected KafkaStream<byte[], byte[]> stream;
protected ConsumerIterator<byte[], byte[]> iterator;
public KafkaTestBase(String topic) throws InterruptedException, RuntimeException {
startServer();
this.topic = topic;
AdminUtils.createTopic(zkClient, topic, 1, 1, new Properties());
List<KafkaServer> servers = new ArrayList<>();
servers.add(kafkaServer);
TestUtils.waitUntilMetadataIsPropagated(scala.collection.JavaConversions.asScalaBuffer(servers), topic, 0, 5000);
Properties consumeProps = new Properties();
consumeProps.put("zookeeper.connect", zkConnect);
consumeProps.put("group.id", "testConsumer");
consumeProps.put("zookeeper.session.timeout.ms", "10000");
consumeProps.put("zookeeper.sync.time.ms", "10000");
consumeProps.put("auto.commit.interval.ms", "10000");
consumeProps.put("consumer.timeout.ms", "10000");
consumer = Consumer.createJavaConsumerConnector(new ConsumerConfig(consumeProps));
Map<String, Integer> topicCountMap = new HashMap<>();
topicCountMap.put(this.topic, 1);
Map<String, List<KafkaStream<byte[], byte[]>>> consumerMap = consumer.createMessageStreams(topicCountMap);
List<KafkaStream<byte[], byte[]>> streams = consumerMap.get(this.topic);
stream = streams.get(0);
iterator = stream.iterator();
}
@Override
public void close() throws IOException {
consumer.shutdown();
}
}
| 3,732 |
0 | Create_ds/gobblin/gobblin-modules/gobblin-kafka-08/src/test/java/org/apache/gobblin/source/extractor/extract | Create_ds/gobblin/gobblin-modules/gobblin-kafka-08/src/test/java/org/apache/gobblin/source/extractor/extract/kafka/KafkaDeserializerExtractorTest.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.source.extractor.extract.kafka;
import java.io.IOException;
import java.nio.ByteBuffer;
import java.nio.charset.StandardCharsets;
import java.util.ArrayList;
import org.apache.avro.Schema;
import org.apache.avro.SchemaBuilder;
import org.apache.avro.generic.GenericRecord;
import org.apache.avro.generic.GenericRecordBuilder;
import org.apache.kafka.common.serialization.Deserializer;
import org.apache.kafka.common.serialization.Serializer;
import org.testng.Assert;
import org.testng.annotations.Test;
import com.google.common.base.Optional;
import io.confluent.kafka.schemaregistry.client.SchemaRegistryClient;
import io.confluent.kafka.schemaregistry.client.rest.exceptions.RestClientException;
import io.confluent.kafka.serializers.KafkaAvroDeserializer;
import io.confluent.kafka.serializers.KafkaAvroSerializer;
import io.confluent.kafka.serializers.KafkaJsonDeserializer;
import io.confluent.kafka.serializers.KafkaJsonSerializer;
import kafka.message.Message;
import kafka.message.MessageAndOffset;
import lombok.AllArgsConstructor;
import lombok.EqualsAndHashCode;
import lombok.Getter;
import lombok.NoArgsConstructor;
import lombok.Setter;
import org.apache.gobblin.configuration.ConfigurationKeys;
import org.apache.gobblin.configuration.State;
import org.apache.gobblin.configuration.WorkUnitState;
import org.apache.gobblin.kafka.client.ByteArrayBasedKafkaRecord;
import org.apache.gobblin.kafka.client.Kafka08ConsumerClient.Kafka08ConsumerRecord;
import org.apache.gobblin.metrics.kafka.KafkaSchemaRegistry;
import org.apache.gobblin.metrics.kafka.SchemaRegistryException;
import org.apache.gobblin.source.extractor.WatermarkInterval;
import org.apache.gobblin.source.extractor.extract.kafka.KafkaDeserializerExtractor.Deserializers;
import org.apache.gobblin.source.workunit.WorkUnit;
import org.apache.gobblin.util.PropertiesUtils;
import static org.mockito.Mockito.*;
@Test(groups = { "gobblin.source.extractor.extract.kafka" })
public class KafkaDeserializerExtractorTest {
private static final String TEST_TOPIC_NAME = "testTopic";
private static final String TEST_URL = "testUrl";
private static final String TEST_RECORD_NAME = "testRecord";
private static final String TEST_NAMESPACE = "testNamespace";
private static final String TEST_FIELD_NAME = "testField";
private static final String TEST_FIELD_NAME2 = "testField2";
@Test
public void testDeserializeRecord() throws IOException {
WorkUnitState mockWorkUnitState = getMockWorkUnitState(0L,10L);
String testString = "Hello World";
ByteBuffer testStringByteBuffer = ByteBuffer.wrap(testString.getBytes(StandardCharsets.UTF_8));
Deserializer<Object> mockKafkaDecoder = mock(Deserializer.class);
KafkaSchemaRegistry<?, ?> mockKafkaSchemaRegistry = mock(KafkaSchemaRegistry.class);
when(mockKafkaDecoder.deserialize(TEST_TOPIC_NAME, testStringByteBuffer.array())).thenReturn(testString);
KafkaDeserializerExtractor kafkaDecoderExtractor = new KafkaDeserializerExtractor(mockWorkUnitState,
Optional.fromNullable(Deserializers.BYTE_ARRAY),
mockKafkaDecoder, mockKafkaSchemaRegistry);
ByteArrayBasedKafkaRecord mockMessageAndOffset = getMockMessageAndOffset(testStringByteBuffer);
Assert.assertEquals(kafkaDecoderExtractor.decodeRecord(mockMessageAndOffset), testString);
}
@Test
public void testBuiltInStringDeserializer() throws ReflectiveOperationException {
WorkUnitState mockWorkUnitState = getMockWorkUnitState(0L, 10L);
mockWorkUnitState.setProp(KafkaDeserializerExtractor.KAFKA_DESERIALIZER_TYPE,
KafkaDeserializerExtractor.Deserializers.STRING.name());
KafkaDeserializerExtractor kafkaDecoderExtractor = new KafkaDeserializerExtractor(mockWorkUnitState);
Assert.assertEquals(kafkaDecoderExtractor.getKafkaDeserializer().getClass(),
KafkaDeserializerExtractor.Deserializers.STRING.getDeserializerClass());
Assert.assertEquals(kafkaDecoderExtractor.getKafkaSchemaRegistry().getClass(),
KafkaDeserializerExtractor.Deserializers.STRING.getSchemaRegistryClass());
}
@Test
public void testBuiltInGsonDeserializer() throws ReflectiveOperationException {
WorkUnitState mockWorkUnitState = getMockWorkUnitState(0L, 10L);
mockWorkUnitState.setProp(KafkaDeserializerExtractor.KAFKA_DESERIALIZER_TYPE,
KafkaDeserializerExtractor.Deserializers.GSON.name());
KafkaDeserializerExtractor kafkaDecoderExtractor = new KafkaDeserializerExtractor(mockWorkUnitState);
Assert.assertEquals(kafkaDecoderExtractor.getKafkaDeserializer().getClass(),
KafkaDeserializerExtractor.Deserializers.GSON.getDeserializerClass());
Assert.assertEquals(kafkaDecoderExtractor.getKafkaSchemaRegistry().getClass(),
KafkaDeserializerExtractor.Deserializers.GSON.getSchemaRegistryClass());
}
@Test
public void testBuiltInConfluentAvroDeserializer() throws ReflectiveOperationException {
WorkUnitState mockWorkUnitState = getMockWorkUnitState(0L, 10L);
mockWorkUnitState.setProp(KafkaDeserializerExtractor.KAFKA_DESERIALIZER_TYPE,
KafkaDeserializerExtractor.Deserializers.CONFLUENT_AVRO.name());
KafkaDeserializerExtractor kafkaDecoderExtractor = new KafkaDeserializerExtractor(mockWorkUnitState) {
@Override
public Object getSchema() {
return SchemaBuilder.record(TEST_RECORD_NAME)
.namespace(TEST_NAMESPACE).fields()
.name(TEST_FIELD_NAME).type().stringType().noDefault()
.endRecord();
}
};
Assert.assertEquals(kafkaDecoderExtractor.getKafkaDeserializer().getClass(),
KafkaDeserializerExtractor.Deserializers.CONFLUENT_AVRO.getDeserializerClass());
Assert.assertEquals(kafkaDecoderExtractor.getKafkaSchemaRegistry().getClass(),
KafkaDeserializerExtractor.Deserializers.CONFLUENT_AVRO.getSchemaRegistryClass());
}
@Test
public void testCustomDeserializer() throws ReflectiveOperationException {
WorkUnitState mockWorkUnitState = getMockWorkUnitState(0L, 10L);
mockWorkUnitState
.setProp(KafkaDeserializerExtractor.KAFKA_DESERIALIZER_TYPE, KafkaJsonDeserializer.class.getName());
mockWorkUnitState
.setProp(KafkaSchemaRegistry.KAFKA_SCHEMA_REGISTRY_CLASS, SimpleKafkaSchemaRegistry.class.getName());
KafkaDeserializerExtractor kafkaDecoderExtractor = new KafkaDeserializerExtractor(mockWorkUnitState);
Assert.assertEquals(kafkaDecoderExtractor.getKafkaDeserializer().getClass(), KafkaJsonDeserializer.class);
Assert.assertEquals(kafkaDecoderExtractor.getKafkaSchemaRegistry().getClass(), SimpleKafkaSchemaRegistry.class);
}
@Test
public void testConfluentAvroDeserializer() throws IOException, RestClientException {
WorkUnitState mockWorkUnitState = getMockWorkUnitState(0L,10L);
mockWorkUnitState.setProp("schema.registry.url", TEST_URL);
Schema schema = SchemaBuilder.record(TEST_RECORD_NAME)
.namespace(TEST_NAMESPACE).fields()
.name(TEST_FIELD_NAME).type().stringType().noDefault()
.endRecord();
GenericRecord testGenericRecord = new GenericRecordBuilder(schema).set(TEST_FIELD_NAME, "testValue").build();
SchemaRegistryClient mockSchemaRegistryClient = mock(SchemaRegistryClient.class);
when(mockSchemaRegistryClient.getByID(any(Integer.class))).thenReturn(schema);
Serializer<Object> kafkaEncoder = new KafkaAvroSerializer(mockSchemaRegistryClient);
Deserializer<Object> kafkaDecoder = new KafkaAvroDeserializer(mockSchemaRegistryClient);
ByteBuffer testGenericRecordByteBuffer =
ByteBuffer.wrap(kafkaEncoder.serialize(TEST_TOPIC_NAME, testGenericRecord));
KafkaSchemaRegistry<Integer, Schema> mockKafkaSchemaRegistry = mock(KafkaSchemaRegistry.class);
KafkaDeserializerExtractor kafkaDecoderExtractor =
new KafkaDeserializerExtractor(mockWorkUnitState,
Optional.fromNullable(Deserializers.CONFLUENT_AVRO), kafkaDecoder, mockKafkaSchemaRegistry);
ByteArrayBasedKafkaRecord mockMessageAndOffset = getMockMessageAndOffset(testGenericRecordByteBuffer);
Assert.assertEquals(kafkaDecoderExtractor.decodeRecord(mockMessageAndOffset), testGenericRecord);
}
@Test
public void testConfluentAvroDeserializerForSchemaEvolution() throws IOException, RestClientException, SchemaRegistryException {
WorkUnitState mockWorkUnitState = getMockWorkUnitState(0L, 10L);
mockWorkUnitState.setProp("schema.registry.url", TEST_URL);
Schema schemaV1 = SchemaBuilder.record(TEST_RECORD_NAME)
.namespace(TEST_NAMESPACE).fields()
.name(TEST_FIELD_NAME).type().stringType().noDefault()
.endRecord();
Schema schemaV2 = SchemaBuilder.record(TEST_RECORD_NAME)
.namespace(TEST_NAMESPACE).fields()
.name(TEST_FIELD_NAME).type().stringType().noDefault()
.optionalString(TEST_FIELD_NAME2).endRecord();
GenericRecord testGenericRecord = new GenericRecordBuilder(schemaV1).set(TEST_FIELD_NAME, "testValue").build();
SchemaRegistryClient mockSchemaRegistryClient = mock(SchemaRegistryClient.class);
when(mockSchemaRegistryClient.getByID(any(Integer.class))).thenReturn(schemaV1);
Serializer<Object> kafkaEncoder = new KafkaAvroSerializer(mockSchemaRegistryClient);
Deserializer<Object> kafkaDecoder = new KafkaAvroDeserializer(mockSchemaRegistryClient);
ByteBuffer testGenericRecordByteBuffer =
ByteBuffer.wrap(kafkaEncoder.serialize(TEST_TOPIC_NAME, testGenericRecord));
KafkaSchemaRegistry<Integer, Schema> mockKafkaSchemaRegistry = mock(KafkaSchemaRegistry.class);
when(mockKafkaSchemaRegistry.getLatestSchemaByTopic(TEST_TOPIC_NAME)).thenReturn(schemaV2);
KafkaDeserializerExtractor kafkaDecoderExtractor = new KafkaDeserializerExtractor(mockWorkUnitState,
Optional.fromNullable(Deserializers.CONFLUENT_AVRO), kafkaDecoder, mockKafkaSchemaRegistry);
when(kafkaDecoderExtractor.getSchema()).thenReturn(schemaV2);
ByteArrayBasedKafkaRecord mockMessageAndOffset = getMockMessageAndOffset(testGenericRecordByteBuffer);
GenericRecord received = (GenericRecord) kafkaDecoderExtractor.decodeRecord(mockMessageAndOffset);
Assert.assertEquals(received.toString(), "{\"testField\": \"testValue\", \"testField2\": null}");
}
@Test
public void testConfluentJsonDeserializer() throws IOException {
WorkUnitState mockWorkUnitState = getMockWorkUnitState(0L, 10L);
mockWorkUnitState.setProp("json.value.type", KafkaRecord.class.getName());
KafkaRecord testKafkaRecord = new KafkaRecord("Hello World");
Serializer<KafkaRecord> kafkaEncoder = new KafkaJsonSerializer<>();
kafkaEncoder.configure(PropertiesUtils.propsToStringKeyMap(mockWorkUnitState.getProperties()), false);
Deserializer<KafkaRecord> kafkaDecoder = new KafkaJsonDeserializer<>();
kafkaDecoder.configure(PropertiesUtils.propsToStringKeyMap(mockWorkUnitState.getProperties()), false);
ByteBuffer testKafkaRecordByteBuffer = ByteBuffer.wrap(kafkaEncoder.serialize(TEST_TOPIC_NAME, testKafkaRecord));
KafkaSchemaRegistry<?, ?> mockKafkaSchemaRegistry = mock(KafkaSchemaRegistry.class);
KafkaDeserializerExtractor kafkaDecoderExtractor =
new KafkaDeserializerExtractor(mockWorkUnitState,
Optional.fromNullable(Deserializers.CONFLUENT_JSON), kafkaDecoder, mockKafkaSchemaRegistry);
ByteArrayBasedKafkaRecord mockMessageAndOffset = getMockMessageAndOffset(testKafkaRecordByteBuffer);
Assert.assertEquals(kafkaDecoderExtractor.decodeRecord(mockMessageAndOffset), testKafkaRecord);
}
private WorkUnitState getMockWorkUnitState(Long lowWaterMark, Long highWaterMark) {
WorkUnit mockWorkUnit = WorkUnit.createEmpty();
mockWorkUnit.setWatermarkInterval(new WatermarkInterval(new MultiLongWatermark(new ArrayList<Long>(){{add(lowWaterMark);}}),
new MultiLongWatermark(new ArrayList<Long>(){{add(highWaterMark);}})));
WorkUnitState mockWorkUnitState = new WorkUnitState(mockWorkUnit, new State());
mockWorkUnitState.setProp(KafkaSource.TOPIC_NAME, TEST_TOPIC_NAME);
mockWorkUnitState.setProp(KafkaSource.PARTITION_ID, "1");
mockWorkUnitState.setProp(ConfigurationKeys.KAFKA_BROKERS, "localhost:8080");
mockWorkUnitState.setProp(KafkaSchemaRegistry.KAFKA_SCHEMA_REGISTRY_URL, TEST_URL);
return mockWorkUnitState;
}
@Test
public void testConfluentShouldNotQuerySchemaRegistryWhenTheGapIsZero()
throws IOException, RestClientException, SchemaRegistryException {
WorkUnitState mockWorkUnitState = getMockWorkUnitState(0L, 0L);
mockWorkUnitState.setProp("schema.registry.url", TEST_URL);
SchemaRegistryClient mockSchemaRegistryClient = mock(SchemaRegistryClient.class);
Deserializer<Object> kafkaDecoder = new KafkaAvroDeserializer(mockSchemaRegistryClient);
KafkaSchemaRegistry<Integer, Schema> mockKafkaSchemaRegistry = mock(KafkaSchemaRegistry.class);
KafkaDeserializerExtractor kafkaDecoderExtractor = new KafkaDeserializerExtractor(mockWorkUnitState,
Optional.fromNullable(Deserializers.CONFLUENT_AVRO), kafkaDecoder, mockKafkaSchemaRegistry);
verify(mockKafkaSchemaRegistry, never()).getLatestSchemaByTopic(any());
kafkaDecoderExtractor.getSchema();
}
private ByteArrayBasedKafkaRecord getMockMessageAndOffset(ByteBuffer payload) {
MessageAndOffset mockMessageAndOffset = mock(MessageAndOffset.class);
Message mockMessage = mock(Message.class);
when(mockMessage.payload()).thenReturn(payload);
when(mockMessageAndOffset.message()).thenReturn(mockMessage);
return new Kafka08ConsumerRecord(mockMessageAndOffset, "test", 0);
}
@AllArgsConstructor
@NoArgsConstructor
@EqualsAndHashCode
@Getter
@Setter
private static class KafkaRecord {
private String value;
}
}
| 3,733 |
0 | Create_ds/gobblin/gobblin-modules/gobblin-kafka-08/src/test/java/org/apache/gobblin/source/extractor/extract | Create_ds/gobblin/gobblin-modules/gobblin-kafka-08/src/test/java/org/apache/gobblin/source/extractor/extract/kafka/KafkaWrapperTest.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.source.extractor.extract.kafka;
import java.util.Properties;
import org.testng.Assert;
import org.testng.annotations.Test;
import lombok.extern.slf4j.Slf4j;
import org.apache.gobblin.configuration.ConfigurationKeys;
import org.apache.gobblin.configuration.State;
@Slf4j
public class KafkaWrapperTest {
@Test
public void testTimeoutConfig()
{
String brokerList = "localhost:9092";
Properties props = new Properties();
props.setProperty(ConfigurationKeys.KAFKA_BROKERS, brokerList);
props.setProperty("source.kafka.fetchTimeoutMillis", "10000");
props.setProperty("source.kafka.socketTimeoutMillis", "1000");
State state = new State(props);
try {
KafkaWrapper wrapper = KafkaWrapper.create(state);
Assert.fail("KafkaWrapper should fail to initialize if fetchTimeout is greater than socketTimeout");
}
catch (IllegalArgumentException e)
{
log.info("Found exception as expected");
log.debug("Exception trace", e);
}
catch (Exception e)
{
Assert.fail("Should only throw IllegalArgumentException", e);
}
}
}
| 3,734 |
0 | Create_ds/gobblin/gobblin-modules/gobblin-kafka-08/src/test/java/org/apache/gobblin/source/extractor/extract | Create_ds/gobblin/gobblin-modules/gobblin-kafka-08/src/test/java/org/apache/gobblin/source/extractor/extract/kafka/SimpleKafkaSchemaRegistryTest.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.source.extractor.extract.kafka;
import java.util.Properties;
import org.testng.Assert;
import org.testng.annotations.Test;
import org.apache.gobblin.metrics.kafka.SchemaRegistryException;
@Test(groups = { "gobblin.source.extractor.extract.kafka" })
public class SimpleKafkaSchemaRegistryTest {
@Test
public void testGetLatestSchemaByTopic() throws SchemaRegistryException {
String topicName = "testTopicName";
Assert.assertEquals(topicName, new SimpleKafkaSchemaRegistry(new Properties()).getLatestSchemaByTopic(topicName));
}
}
| 3,735 |
0 | Create_ds/gobblin/gobblin-modules/gobblin-kafka-08/src/test/java/org/apache/gobblin/source/extractor/extract | Create_ds/gobblin/gobblin-modules/gobblin-kafka-08/src/test/java/org/apache/gobblin/source/extractor/extract/kafka/ConfluentKafkaSchemaRegistryTest.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.source.extractor.extract.kafka;
import java.util.Properties;
import org.apache.avro.Schema;
import org.apache.avro.SchemaBuilder;
import org.testng.Assert;
import org.testng.annotations.Test;
import io.confluent.kafka.schemaregistry.client.MockSchemaRegistryClient;
import io.confluent.kafka.schemaregistry.client.SchemaRegistryClient;
import org.apache.gobblin.metrics.kafka.KafkaSchemaRegistry;
import org.apache.gobblin.metrics.kafka.SchemaRegistryException;
@Test(groups = { "gobblin.source.extractor.extract.kafka" })
public class ConfluentKafkaSchemaRegistryTest {
private static final String TEST_TOPIC_NAME = "testTopic";
private static final String TEST_URL = "testUrl";
private static final String TEST_RECORD_NAME = "testRecord";
private static final String TEST_NAMESPACE = "testNamespace";
private static final String TEST_FIELD_NAME = "testField";
@Test
public void testRegisterAndGetByKey() throws SchemaRegistryException {
Properties properties = new Properties();
properties.setProperty(KafkaSchemaRegistry.KAFKA_SCHEMA_REGISTRY_URL, TEST_URL);
SchemaRegistryClient schemaRegistryClient = new MockSchemaRegistryClient();
KafkaSchemaRegistry<Integer, Schema> kafkaSchemaRegistry =
new ConfluentKafkaSchemaRegistry(properties, schemaRegistryClient);
Schema schema =
SchemaBuilder.record(TEST_RECORD_NAME).namespace(TEST_NAMESPACE).fields().name(TEST_FIELD_NAME).type()
.stringType().noDefault().endRecord();
Integer id = kafkaSchemaRegistry.register(schema);
Assert.assertEquals(schema, kafkaSchemaRegistry.getSchemaByKey(id));
}
@Test
public void testRegisterAndGetLatest() throws SchemaRegistryException {
Properties properties = new Properties();
properties.setProperty(KafkaSchemaRegistry.KAFKA_SCHEMA_REGISTRY_URL, TEST_URL);
doTestRegisterAndGetLatest(properties);
}
@Test
public void testRegisterAndGetLatestCustomSuffix() throws SchemaRegistryException {
Properties properties = new Properties();
properties.setProperty(KafkaSchemaRegistry.KAFKA_SCHEMA_REGISTRY_URL, TEST_URL);
properties.setProperty(ConfluentKafkaSchemaRegistry.CONFLUENT_SCHEMA_NAME_SUFFIX, "-key");
doTestRegisterAndGetLatest(properties);
}
private void doTestRegisterAndGetLatest(Properties properties) throws SchemaRegistryException {
SchemaRegistryClient schemaRegistryClient = new MockSchemaRegistryClient();
KafkaSchemaRegistry<Integer, Schema> kafkaSchemaRegistry =
new ConfluentKafkaSchemaRegistry(properties, schemaRegistryClient);
Schema schema1 =
SchemaBuilder.record(TEST_RECORD_NAME + "1").namespace(TEST_NAMESPACE).fields().name(TEST_FIELD_NAME).type()
.stringType().noDefault().endRecord();
Schema schema2 =
SchemaBuilder.record(TEST_RECORD_NAME + "2").namespace(TEST_NAMESPACE).fields().name(TEST_FIELD_NAME).type()
.stringType().noDefault().endRecord();
kafkaSchemaRegistry.register(schema1, TEST_TOPIC_NAME);
kafkaSchemaRegistry.register(schema2, TEST_TOPIC_NAME);
Assert.assertNotEquals(schema1, kafkaSchemaRegistry.getLatestSchemaByTopic(TEST_TOPIC_NAME));
Assert.assertEquals(schema2, kafkaSchemaRegistry.getLatestSchemaByTopic(TEST_TOPIC_NAME));
}
}
| 3,736 |
0 | Create_ds/gobblin/gobblin-modules/gobblin-kafka-08/src/test/java/org/apache/gobblin/source/extractor/extract | Create_ds/gobblin/gobblin-modules/gobblin-kafka-08/src/test/java/org/apache/gobblin/source/extractor/extract/kafka/KafkaGsonDeserializerTest.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.source.extractor.extract.kafka;
import org.apache.kafka.common.serialization.Deserializer;
import org.testng.Assert;
import org.testng.annotations.Test;
import com.google.gson.Gson;
import com.google.gson.JsonElement;
import com.google.gson.JsonObject;
@Test(groups = { "gobblin.source.extractor.extract.kafka" })
public class KafkaGsonDeserializerTest {
@Test
public void testDeserialize() {
JsonObject jsonObject = new JsonObject();
jsonObject.addProperty("testKey", "testValue");
Deserializer<JsonElement> deserializer = new KafkaGsonDeserializer();
Assert.assertEquals(
deserializer.deserialize("testTopic", new Gson().toJson(jsonObject).getBytes(KafkaGsonDeserializer.CHARSET)),
jsonObject);
}
}
| 3,737 |
0 | Create_ds/gobblin/gobblin-modules/gobblin-kafka-08/src/test/java/org/apache/gobblin | Create_ds/gobblin/gobblin-modules/gobblin-kafka-08/src/test/java/org/apache/gobblin/kafka/KafkaTestBase.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.kafka;
import java.io.Closeable;
import java.io.IOException;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.Properties;
import java.util.concurrent.atomic.AtomicInteger;
import org.I0Itec.zkclient.ZkClient;
import com.google.common.collect.ImmutableMap;
import kafka.admin.AdminUtils;
import kafka.consumer.Consumer;
import kafka.consumer.ConsumerConfig;
import kafka.consumer.ConsumerIterator;
import kafka.consumer.KafkaStream;
import kafka.javaapi.consumer.ConsumerConnector;
import kafka.server.KafkaConfig;
import kafka.server.KafkaServer;
import kafka.utils.MockTime;
import kafka.utils.Time;
import kafka.utils.ZKStringSerializer$;
import kafka.zk.EmbeddedZookeeper;
import lombok.extern.slf4j.Slf4j;
import org.apache.gobblin.test.TestUtils;
/**
* A private class for starting a suite of servers for Kafka
* Calls to start and shutdown are reference counted, so that the suite is started and shutdown in pairs.
* A suite of servers (Zk, Kafka etc) will be started just once per process
*/
@Slf4j
class KafkaServerSuite {
static KafkaServerSuite _instance;
static KafkaServerSuite getInstance()
{
if (null == _instance)
{
_instance = new KafkaServerSuite();
return _instance;
}
else
{
return _instance;
}
}
private int _brokerId = 0;
private EmbeddedZookeeper _zkServer;
private ZkClient _zkClient;
private KafkaServer _kafkaServer;
private final int _kafkaServerPort;
private final String _zkConnectString;
private final AtomicInteger _numStarted;
public ZkClient getZkClient() {
return _zkClient;
}
public KafkaServer getKafkaServer() {
return _kafkaServer;
}
public int getKafkaServerPort() {
return _kafkaServerPort;
}
public String getZkConnectString() {
return _zkConnectString;
}
private KafkaServerSuite()
{
_kafkaServerPort = TestUtils.findFreePort();
_zkConnectString = "localhost:" + TestUtils.findFreePort();
_numStarted = new AtomicInteger(0);
}
void start()
throws RuntimeException {
if (_numStarted.incrementAndGet() == 1) {
log.warn("Starting up Kafka server suite. Zk at " + _zkConnectString + "; Kafka server at " + _kafkaServerPort);
_zkServer = new EmbeddedZookeeper(_zkConnectString);
_zkClient = new ZkClient(_zkConnectString, 30000, 30000, ZKStringSerializer$.MODULE$);
Properties props = kafka.utils.TestUtils.createBrokerConfig(_brokerId, _kafkaServerPort, true);
props.setProperty("zookeeper.connect", _zkConnectString);
KafkaConfig config = new KafkaConfig(props);
Time mock = new MockTime();
_kafkaServer = kafka.utils.TestUtils.createServer(config, mock);
}
else
{
log.info("Kafka server suite already started... continuing");
}
}
void shutdown() {
if (_numStarted.decrementAndGet() == 0) {
log.info("Shutting down Kafka server suite");
_kafkaServer.shutdown();
_zkClient.close();
_zkServer.shutdown();
}
else {
log.info("Kafka server suite still in use ... not shutting down yet");
}
}
}
class KafkaConsumerSuite {
private final ConsumerConnector _consumer;
private final KafkaStream<byte[], byte[]> _stream;
private final ConsumerIterator<byte[], byte[]> _iterator;
private final String _topic;
KafkaConsumerSuite(String zkConnectString, String topic)
{
_topic = topic;
Properties consumeProps = new Properties();
consumeProps.put("zookeeper.connect", zkConnectString);
consumeProps.put("group.id", _topic+"-"+System.nanoTime());
consumeProps.put("zookeeper.session.timeout.ms", "10000");
consumeProps.put("zookeeper.sync.time.ms", "10000");
consumeProps.put("auto.commit.interval.ms", "10000");
consumeProps.put("_consumer.timeout.ms", "10000");
_consumer = Consumer.createJavaConsumerConnector(new ConsumerConfig(consumeProps));
Map<String, List<KafkaStream<byte[], byte[]>>> consumerMap =
_consumer.createMessageStreams(ImmutableMap.of(this._topic, 1));
List<KafkaStream<byte[], byte[]>> streams = consumerMap.get(this._topic);
_stream = streams.get(0);
_iterator = _stream.iterator();
}
void shutdown()
{
_consumer.shutdown();
}
public ConsumerIterator<byte[],byte[]> getIterator() {
return _iterator;
}
}
/**
* A Helper class for testing against Kafka
* A suite of servers (Zk, Kafka etc) will be started just once per process
* Consumer and iterator will be created per instantiation and is one instance per topic.
*/
public class KafkaTestBase implements Closeable {
private final KafkaServerSuite _kafkaServerSuite;
private final Map<String, KafkaConsumerSuite> _topicConsumerMap;
public KafkaTestBase() throws InterruptedException, RuntimeException {
this._kafkaServerSuite = KafkaServerSuite.getInstance();
this._topicConsumerMap = new HashMap<>();
}
public synchronized void startServers()
{
_kafkaServerSuite.start();
}
public void stopServers()
{
_kafkaServerSuite.shutdown();
}
public void start() {
startServers();
}
public void stopClients() throws IOException {
for (Map.Entry<String, KafkaConsumerSuite> consumerSuiteEntry: _topicConsumerMap.entrySet())
{
consumerSuiteEntry.getValue().shutdown();
AdminUtils.deleteTopic(_kafkaServerSuite.getZkClient(), consumerSuiteEntry.getKey());
}
}
@Override
public void close() throws IOException {
stopClients();
stopServers();
}
public void provisionTopic(String topic) {
if (_topicConsumerMap.containsKey(topic)) {
// nothing to do: return
} else {
// provision topic
AdminUtils.createTopic(_kafkaServerSuite.getZkClient(), topic, 1, 1, new Properties());
List<KafkaServer> servers = new ArrayList<>();
servers.add(_kafkaServerSuite.getKafkaServer());
kafka.utils.TestUtils.waitUntilMetadataIsPropagated(scala.collection.JavaConversions.asScalaBuffer(servers), topic, 0, 5000);
KafkaConsumerSuite consumerSuite = new KafkaConsumerSuite(_kafkaServerSuite.getZkConnectString(), topic);
_topicConsumerMap.put(topic, consumerSuite);
}
}
public ConsumerIterator<byte[], byte[]> getIteratorForTopic(String topic) {
if (_topicConsumerMap.containsKey(topic))
{
return _topicConsumerMap.get(topic).getIterator();
}
else
{
throw new IllegalStateException("Could not find provisioned topic" + topic + ": call provisionTopic before");
}
}
public int getKafkaServerPort() {
return _kafkaServerSuite.getKafkaServerPort();
}
public String getZkConnectString() {
return _kafkaServerSuite.getZkConnectString();
}
}
| 3,738 |
0 | Create_ds/gobblin/gobblin-modules/gobblin-kafka-08/src/test/java/org/apache/gobblin | Create_ds/gobblin/gobblin-modules/gobblin-kafka-08/src/test/java/org/apache/gobblin/kafka/FlakyKafkaProducer.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.kafka;
import java.util.Properties;
import java.util.concurrent.ExecutionException;
import java.util.concurrent.Future;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.TimeoutException;
import org.apache.kafka.clients.producer.Callback;
import org.apache.kafka.clients.producer.KafkaProducer;
import org.apache.kafka.clients.producer.ProducerRecord;
import org.apache.kafka.clients.producer.RecordMetadata;
import com.typesafe.config.Config;
import com.typesafe.config.ConfigFactory;
import org.apache.gobblin.test.ErrorManager;
/**
* A Flaky Kafka Producer that wraps a real KafkaProducer.
* Can be configured to throw errors selectively instead of writing to Kafka
*/
public class FlakyKafkaProducer<K,V> extends KafkaProducer<K,V> {
private final Future<RecordMetadata> nullFuture = new Future<RecordMetadata>() {
@Override
public boolean cancel(boolean mayInterruptIfRunning) {
return false;
}
@Override
public boolean isCancelled() {
return false;
}
@Override
public boolean isDone() {
return false;
}
@Override
public RecordMetadata get()
throws InterruptedException, ExecutionException {
return null;
}
@Override
public RecordMetadata get(long timeout, TimeUnit unit)
throws InterruptedException, ExecutionException, TimeoutException {
return null;
}
};
private final ErrorManager<V> errorManager;
public FlakyKafkaProducer(Properties properties) {
super(properties);
Config config = ConfigFactory.parseProperties(properties);
errorManager = new ErrorManager(config);
}
@Override
public Future<RecordMetadata> send(ProducerRecord<K, V> record) {
return send(record, null);
}
@Override
public Future<RecordMetadata> send(ProducerRecord<K, V> record, final Callback callback) {
boolean error = errorManager.nextError(record.value());
if (errorManager.nextError(record.value()))
{
final Exception e = new Exception();
callback.onCompletion(null, e);
return nullFuture;
}
else {
return super.send(record, callback);
}
}
}
| 3,739 |
0 | Create_ds/gobblin/gobblin-modules/gobblin-kafka-08/src/test/java/org/apache/gobblin/kafka | Create_ds/gobblin/gobblin-modules/gobblin-kafka-08/src/test/java/org/apache/gobblin/kafka/writer/Kafka08DataWriterUnitTest.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.kafka.writer;
import java.util.Properties;
import org.testng.Assert;
import org.testng.annotations.Test;
import lombok.extern.slf4j.Slf4j;
/**
* Tests that don't need Kafka server to be running
* */
@Slf4j
public class Kafka08DataWriterUnitTest {
@Test
public void testMinimalConfig()
{
Properties props = new Properties();
props.setProperty(KafkaWriterConfigurationKeys.KAFKA_TOPIC, "FakeTopic");
props.setProperty(KafkaWriterConfigurationKeys.KAFKA_PRODUCER_CONFIG_PREFIX + "bootstrap.servers", "localhost:9092");
try {
Kafka08DataWriter kafkaWriter = new Kafka08DataWriter<>(props);
}
catch (Exception e)
{
Assert.fail("Should not throw exception", e);
}
}
}
| 3,740 |
0 | Create_ds/gobblin/gobblin-modules/gobblin-kafka-08/src/test/java/org/apache/gobblin/kafka | Create_ds/gobblin/gobblin-modules/gobblin-kafka-08/src/test/java/org/apache/gobblin/kafka/writer/Kafka08DataWriterIntegrationTest.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.kafka.writer;
import java.io.File;
import java.io.FileReader;
import java.io.IOException;
import java.io.UnsupportedEncodingException;
import java.lang.management.ManagementFactory;
import java.util.Properties;
import org.apache.commons.io.FileUtils;
import org.testng.annotations.AfterClass;
import org.testng.annotations.BeforeClass;
import org.testng.annotations.Test;
import com.google.common.io.Closer;
import org.testng.Assert;
import kafka.consumer.ConsumerIterator;
import lombok.extern.slf4j.Slf4j;
import org.apache.gobblin.kafka.KafkaTestBase;
import org.apache.gobblin.runtime.JobLauncher;
import org.apache.gobblin.runtime.JobLauncherFactory;
/**
* Tests that set up a complete standalone Gobblin pipeline along with a Kafka suite
*/
@Slf4j
public class Kafka08DataWriterIntegrationTest {
private static final String JOB_PROPS_DIR="gobblin-modules/gobblin-kafka-08/resource/job-props/";
private static final String TEST_LAUNCHER_PROPERTIES_FILE = JOB_PROPS_DIR + "testKafkaIngest.properties";
private static final String TEST_INGEST_PULL_FILE = JOB_PROPS_DIR + "testKafkaIngest.pull";
private Properties gobblinProps;
private Properties jobProps;
private KafkaTestBase kafkaTestHelper;
private static final String TOPIC = Kafka08DataWriterIntegrationTest.class.getName();
@BeforeClass
public void setup() throws Exception {
kafkaTestHelper = new KafkaTestBase();
this.gobblinProps = new Properties();
gobblinProps.load(new FileReader(TEST_LAUNCHER_PROPERTIES_FILE));
this.jobProps = new Properties();
jobProps.load(new FileReader(TEST_INGEST_PULL_FILE));
replaceProperties(gobblinProps, "{$topic}", TOPIC);
replaceProperties(gobblinProps, "{$kafkaPort}", ""+ kafkaTestHelper.getKafkaServerPort());
replaceProperties(jobProps, "{$topic}", TOPIC);
replaceProperties(jobProps, "{$kafkaPort}", ""+kafkaTestHelper.getKafkaServerPort());
kafkaTestHelper.startServers();
}
private void replaceProperties(Properties props, String searchString, String replacementString) {
for (String key: props.stringPropertyNames())
{
String value = props.getProperty(key);
if (value.contains(searchString))
{
String replacedValue = value.replace(searchString, replacementString);
props.setProperty(key, replacedValue);
}
}
}
@Test
public void testErrors() throws Exception {
log.warn("Process id = " + ManagementFactory.getRuntimeMXBean().getName());
int numRecordsPerExtract = 5;
int numParallel = 2;
int errorEvery = 2000;
int totalRecords = numRecordsPerExtract * numParallel;
int totalSuccessful = totalRecords / errorEvery + totalRecords%errorEvery;
{
Closer closer = Closer.create();
try {
kafkaTestHelper.provisionTopic(TOPIC);
jobProps.setProperty("source.numRecordsPerExtract",""+numRecordsPerExtract);
jobProps.setProperty("source.numParallelism",""+numParallel);
jobProps.setProperty("writer.kafka.producerConfig.flaky.errorType","regex");
// all records from partition 0 will be dropped.
jobProps.setProperty("writer.kafka.producerConfig.flaky.regexPattern",":index:0.*");
jobProps.setProperty("job.commit.policy","partial");
jobProps.setProperty("publish.at.job.level","false");
totalSuccessful = 5; // number of records in partition 1
JobLauncher jobLauncher = closer.register(JobLauncherFactory.newJobLauncher(gobblinProps, jobProps));
jobLauncher.launchJob(null);
}
catch (Exception e) {
log.error("Failed to run job with exception ", e);
Assert.fail("Should not throw exception on running the job");
}
finally
{
closer.close();
}
// test records written
testRecordsWritten(totalSuccessful, TOPIC);
}
boolean trySecond = true;
if (trySecond) {
Closer closer = Closer.create();
try {
jobProps.setProperty("source.numRecordsPerExtract", "" + numRecordsPerExtract);
jobProps.setProperty("source.numParallelism", "" + numParallel);
jobProps.setProperty("writer.kafka.producerConfig.flaky.errorType", "nth");
jobProps.setProperty("writer.kafka.producerConfig.flaky.errorEvery", "" + errorEvery);
JobLauncher jobLauncher = closer.register(JobLauncherFactory.newJobLauncher(gobblinProps, jobProps));
jobLauncher.launchJob(null);
totalSuccessful = totalRecords / errorEvery + totalRecords%errorEvery;
} catch (Exception e) {
log.error("Failed to run job with exception ", e);
Assert.fail("Should not throw exception on running the job");
} finally {
closer.close();
}
}
// test records written
testRecordsWritten(totalSuccessful, TOPIC);
}
private void testRecordsWritten(int totalSuccessful, String topic)
throws UnsupportedEncodingException {
final ConsumerIterator<byte[], byte[]> iterator = kafkaTestHelper.getIteratorForTopic(topic);
for (int i = 0; i < totalSuccessful; ++i) {
String message = new String(iterator.next().message(), "UTF-8");
log.debug(String.format("%d of %d: Message consumed: %s", (i+1), totalSuccessful, message));
}
}
@AfterClass
public void stopServers()
throws IOException {
try {
kafkaTestHelper.stopClients();
}
finally
{
kafkaTestHelper.stopServers();
}
}
@AfterClass
@BeforeClass
public void cleanup() throws Exception {
File file = new File("gobblin-kafka/testOutput");
FileUtils.deleteDirectory(file);
}
}
| 3,741 |
0 | Create_ds/gobblin/gobblin-modules/gobblin-kafka-08/src/test/java/org/apache/gobblin/kafka | Create_ds/gobblin/gobblin-modules/gobblin-kafka-08/src/test/java/org/apache/gobblin/kafka/writer/Kafka08DataWriterTest.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.kafka.writer;
import java.io.IOException;
import java.lang.management.ManagementFactory;
import java.util.Properties;
import org.apache.avro.generic.GenericRecord;
import org.testng.Assert;
import org.testng.annotations.AfterSuite;
import org.testng.annotations.BeforeSuite;
import org.testng.annotations.Test;
import lombok.extern.slf4j.Slf4j;
import org.apache.gobblin.kafka.KafkaTestBase;
import org.apache.gobblin.kafka.schemareg.ConfigDrivenMd5SchemaRegistry;
import org.apache.gobblin.kafka.schemareg.KafkaSchemaRegistryConfigurationKeys;
import org.apache.gobblin.kafka.schemareg.SchemaRegistryException;
import org.apache.gobblin.kafka.serialize.LiAvroDeserializer;
import org.apache.gobblin.test.TestUtils;
import org.apache.gobblin.writer.WriteCallback;
import org.apache.gobblin.writer.WriteResponse;
import static org.mockito.Mockito.*;
@Slf4j
public class Kafka08DataWriterTest {
private final KafkaTestBase _kafkaTestHelper;
public Kafka08DataWriterTest()
throws InterruptedException, RuntimeException {
_kafkaTestHelper = new KafkaTestBase();
}
@BeforeSuite
public void beforeSuite() {
log.warn("Process id = " + ManagementFactory.getRuntimeMXBean().getName());
_kafkaTestHelper.startServers();
}
@AfterSuite
public void afterSuite()
throws IOException {
try {
_kafkaTestHelper.stopClients();
}
finally {
_kafkaTestHelper.stopServers();
}
}
@Test
public void testStringSerialization()
throws IOException, InterruptedException {
String topic = "testStringSerialization08";
_kafkaTestHelper.provisionTopic(topic);
Properties props = new Properties();
props.setProperty(KafkaWriterConfigurationKeys.KAFKA_TOPIC, topic);
props.setProperty(KafkaWriterConfigurationKeys.KAFKA_PRODUCER_CONFIG_PREFIX+"bootstrap.servers", "localhost:" + _kafkaTestHelper.getKafkaServerPort());
props.setProperty(KafkaWriterConfigurationKeys.KAFKA_PRODUCER_CONFIG_PREFIX+"value.serializer", "org.apache.kafka.common.serialization.StringSerializer");
Kafka08DataWriter kafka08DataWriter = new Kafka08DataWriter(props);
String messageString = "foobar";
WriteCallback callback = mock(WriteCallback.class);
try {
kafka08DataWriter.write(messageString, callback);
}
finally
{
kafka08DataWriter.close();
}
verify(callback, times(1)).onSuccess(isA(WriteResponse.class));
verify(callback, never()).onFailure(isA(Exception.class));
byte[] message = _kafkaTestHelper.getIteratorForTopic(topic).next().message();
String messageReceived = new String(message);
Assert.assertEquals(messageReceived, messageString);
}
@Test
public void testBinarySerialization()
throws IOException, InterruptedException {
String topic = "testBinarySerialization08";
_kafkaTestHelper.provisionTopic(topic);
Properties props = new Properties();
props.setProperty(KafkaWriterConfigurationKeys.KAFKA_TOPIC, topic);
props.setProperty(KafkaWriterConfigurationKeys.KAFKA_PRODUCER_CONFIG_PREFIX+"bootstrap.servers", "localhost:" + _kafkaTestHelper.getKafkaServerPort());
props.setProperty(KafkaWriterConfigurationKeys.KAFKA_PRODUCER_CONFIG_PREFIX+"value.serializer", "org.apache.kafka.common.serialization.ByteArraySerializer");
Kafka08DataWriter kafka08DataWriter = new Kafka08DataWriter(props);
WriteCallback callback = mock(WriteCallback.class);
byte[] messageBytes = TestUtils.generateRandomBytes();
try {
kafka08DataWriter.write(messageBytes, callback);
}
finally
{
kafka08DataWriter.close();
}
verify(callback, times(1)).onSuccess(isA(WriteResponse.class));
verify(callback, never()).onFailure(isA(Exception.class));
byte[] message = _kafkaTestHelper.getIteratorForTopic(topic).next().message();
Assert.assertEquals(message, messageBytes);
}
@Test
public void testAvroSerialization()
throws IOException, InterruptedException, SchemaRegistryException {
String topic = "testAvroSerialization08";
_kafkaTestHelper.provisionTopic(topic);
Properties props = new Properties();
props.setProperty(KafkaWriterConfigurationKeys.KAFKA_TOPIC, topic);
props.setProperty(KafkaWriterConfigurationKeys.KAFKA_PRODUCER_CONFIG_PREFIX + "bootstrap.servers", "localhost:" + _kafkaTestHelper.getKafkaServerPort());
props.setProperty(KafkaWriterConfigurationKeys.KAFKA_PRODUCER_CONFIG_PREFIX + "value.serializer",
"org.apache.gobblin.kafka.serialize.LiAvroSerializer");
// set up mock schema registry
props.setProperty(KafkaWriterConfigurationKeys.KAFKA_PRODUCER_CONFIG_PREFIX
+ KafkaSchemaRegistryConfigurationKeys.KAFKA_SCHEMA_REGISTRY_CLASS,
ConfigDrivenMd5SchemaRegistry.class.getCanonicalName());
Kafka08DataWriter kafka08DataWriter = new Kafka08DataWriter<>(props);
WriteCallback callback = mock(WriteCallback.class);
GenericRecord record = TestUtils.generateRandomAvroRecord();
try {
kafka08DataWriter.write(record, callback);
}
finally
{
kafka08DataWriter.close();
}
verify(callback, times(1)).onSuccess(isA(WriteResponse.class));
verify(callback, never()).onFailure(isA(Exception.class));
byte[] message = _kafkaTestHelper.getIteratorForTopic(topic).next().message();
ConfigDrivenMd5SchemaRegistry schemaReg = new ConfigDrivenMd5SchemaRegistry(topic, record.getSchema());
LiAvroDeserializer deser = new LiAvroDeserializer(schemaReg);
GenericRecord receivedRecord = deser.deserialize(topic, message);
Assert.assertEquals(record.toString(), receivedRecord.toString());
}
}
| 3,742 |
0 | Create_ds/gobblin/gobblin-modules/gobblin-kafka-08/src/test/java/org/apache/gobblin | Create_ds/gobblin/gobblin-modules/gobblin-kafka-08/src/test/java/org/apache/gobblin/service/SimpleKafkaSpecExecutorTest.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.service;
import java.net.URI;
import java.util.List;
import java.util.Map;
import java.util.Properties;
import com.google.common.io.Closer;
import org.apache.commons.lang3.tuple.Pair;
import org.testng.Assert;
import org.testng.annotations.AfterClass;
import org.testng.annotations.AfterSuite;
import org.testng.annotations.Test;
import org.apache.gobblin.configuration.ConfigurationKeys;
import org.apache.gobblin.kafka.writer.KafkaWriterConfigurationKeys;
import org.apache.gobblin.metrics.reporter.KafkaTestBase;
import org.apache.gobblin.runtime.api.JobSpec;
import org.apache.gobblin.runtime.api.Spec;
import org.apache.gobblin.util.ConfigUtils;
import org.apache.gobblin.writer.WriteResponse;
import org.apache.gobblin.runtime.api.SpecExecutor;
import lombok.extern.slf4j.Slf4j;
@Slf4j
public class SimpleKafkaSpecExecutorTest extends KafkaTestBase {
public static final String TOPIC = SimpleKafkaSpecExecutorTest.class.getSimpleName();
private Closer _closer;
private Properties _properties;
private SimpleKafkaSpecProducer _seip;
private SimpleKafkaSpecConsumer _seic;
private String _kafkaBrokers;
public SimpleKafkaSpecExecutorTest()
throws InterruptedException, RuntimeException {
super(TOPIC);
_kafkaBrokers = "localhost:" + kafkaPort;
log.info("Going to use Kakfa broker: " + _kafkaBrokers);
}
@Test
public void testAddSpec() throws Exception {
_closer = Closer.create();
_properties = new Properties();
// Properties for Producer
_properties.setProperty(KafkaWriterConfigurationKeys.KAFKA_TOPIC, TOPIC);
_properties.setProperty(KafkaWriterConfigurationKeys.KAFKA_PRODUCER_CONFIG_PREFIX + "bootstrap.servers", _kafkaBrokers);
_properties.setProperty(KafkaWriterConfigurationKeys.KAFKA_PRODUCER_CONFIG_PREFIX+"value.serializer", "org.apache.kafka.common.serialization.ByteArraySerializer");
// Properties for Consumer
_properties.setProperty(ConfigurationKeys.KAFKA_BROKERS, _kafkaBrokers);
_properties.setProperty(SimpleKafkaSpecExecutor.SPEC_KAFKA_TOPICS_KEY, TOPIC);
// SEI Producer
_seip = _closer.register(new SimpleKafkaSpecProducer(ConfigUtils.propertiesToConfig(_properties)));
String addedSpecUriString = "/foo/bar/addedSpec";
Spec spec = initJobSpec(addedSpecUriString);
WriteResponse writeResponse = (WriteResponse) _seip.addSpec(spec).get();
log.info("WriteResponse: " + writeResponse);
try {
Thread.sleep(1000);
} catch(InterruptedException ex) {
Thread.currentThread().interrupt();
}
_seic = _closer.register(new SimpleKafkaSpecConsumer(ConfigUtils.propertiesToConfig(_properties)));
List<Pair<SpecExecutor.Verb, Spec>> consumedEvent = _seic.changedSpecs().get();
Assert.assertTrue(consumedEvent.size() == 1, "Consumption did not match production");
Map.Entry<SpecExecutor.Verb, Spec> consumedSpecAction = consumedEvent.get(0);
Assert.assertTrue(consumedSpecAction.getKey().equals(SpecExecutor.Verb.ADD), "Verb did not match");
Assert.assertTrue(consumedSpecAction.getValue().getUri().toString().equals(addedSpecUriString), "Expected URI did not match");
Assert.assertTrue(consumedSpecAction.getValue() instanceof JobSpec, "Expected JobSpec");
}
@Test (dependsOnMethods = "testAddSpec")
public void testUpdateSpec() throws Exception {
String updatedSpecUriString = "/foo/bar/updatedSpec";
Spec spec = initJobSpec(updatedSpecUriString);
WriteResponse writeResponse = (WriteResponse) _seip.updateSpec(spec).get();
log.info("WriteResponse: " + writeResponse);
try {
Thread.sleep(1000);
} catch(InterruptedException ex) {
Thread.currentThread().interrupt();
}
List<Pair<SpecExecutor.Verb, Spec>> consumedEvent = _seic.changedSpecs().get();
Assert.assertTrue(consumedEvent.size() == 1, "Consumption did not match production");
Map.Entry<SpecExecutor.Verb, Spec> consumedSpecAction = consumedEvent.get(0);
Assert.assertTrue(consumedSpecAction.getKey().equals(SpecExecutor.Verb.UPDATE), "Verb did not match");
Assert.assertTrue(consumedSpecAction.getValue().getUri().toString().equals(updatedSpecUriString), "Expected URI did not match");
Assert.assertTrue(consumedSpecAction.getValue() instanceof JobSpec, "Expected JobSpec");
}
@Test (dependsOnMethods = "testUpdateSpec")
public void testDeleteSpec() throws Exception {
String deletedSpecUriString = "/foo/bar/deletedSpec";
WriteResponse writeResponse = (WriteResponse) _seip.deleteSpec(new URI(deletedSpecUriString)).get();
log.info("WriteResponse: " + writeResponse);
try {
Thread.sleep(1000);
} catch(InterruptedException ex) {
Thread.currentThread().interrupt();
}
List<Pair<SpecExecutor.Verb, Spec>> consumedEvent = _seic.changedSpecs().get();
Assert.assertTrue(consumedEvent.size() == 1, "Consumption did not match production");
Map.Entry<SpecExecutor.Verb, Spec> consumedSpecAction = consumedEvent.get(0);
Assert.assertTrue(consumedSpecAction.getKey().equals(SpecExecutor.Verb.DELETE), "Verb did not match");
Assert.assertTrue(consumedSpecAction.getValue().getUri().toString().equals(deletedSpecUriString), "Expected URI did not match");
Assert.assertTrue(consumedSpecAction.getValue() instanceof JobSpec, "Expected JobSpec");
}
@Test (dependsOnMethods = "testDeleteSpec")
public void testResetConsumption() throws Exception {
SimpleKafkaSpecConsumer seic = _closer
.register(new SimpleKafkaSpecConsumer(ConfigUtils.propertiesToConfig(_properties)));
List<Pair<SpecExecutor.Verb, Spec>> consumedEvent = seic.changedSpecs().get();
Assert.assertTrue(consumedEvent.size() == 3, "Consumption was reset, we should see all events");
}
private JobSpec initJobSpec(String specUri) {
Properties properties = new Properties();
return JobSpec.builder(specUri)
.withConfig(ConfigUtils.propertiesToConfig(properties))
.withVersion("1")
.withDescription("Spec Description")
.build();
}
@AfterClass
public void after() {
try {
_closer.close();
} catch(Exception e) {
log.error("Failed to close SEIC and SEIP.", e);
}
try {
close();
} catch(Exception e) {
log.error("Failed to close Kafka server.", e);
}
}
@AfterSuite
public void afterSuite() {
closeServer();
}
} | 3,743 |
0 | Create_ds/gobblin/gobblin-modules/gobblin-kafka-08/src/main/java/org/apache/gobblin/metrics | Create_ds/gobblin/gobblin-modules/gobblin-kafka-08/src/main/java/org/apache/gobblin/metrics/kafka/KafkaKeyValueProducerPusher.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.metrics.kafka;
import java.io.IOException;
import java.util.List;
import java.util.Properties;
import java.util.Queue;
import java.util.concurrent.Future;
import java.util.concurrent.LinkedBlockingDeque;
import org.apache.commons.lang3.tuple.Pair;
import org.apache.kafka.clients.producer.KafkaProducer;
import org.apache.kafka.clients.producer.ProducerConfig;
import org.apache.kafka.clients.producer.ProducerRecord;
import org.apache.kafka.clients.producer.RecordMetadata;
import org.apache.kafka.common.serialization.ByteArraySerializer;
import org.apache.kafka.common.serialization.StringSerializer;
import com.google.common.base.Optional;
import com.google.common.io.Closer;
import com.typesafe.config.Config;
import lombok.extern.slf4j.Slf4j;
import org.apache.gobblin.util.ConfigUtils;
/**
* Establishes a connection to a Kafka cluster and push keyed messages to a specified topic.
* @param <K> key type
* @param <V> value type
*/
@Slf4j
public class KafkaKeyValueProducerPusher<K, V> implements Pusher<Pair<K, V>> {
private static final long DEFAULT_MAX_NUM_FUTURES_TO_BUFFER = 1000L;
//Low watermark for the size of the futures queue, to trigger flushing of messages.
private static final String MAX_NUM_FUTURES_TO_BUFFER_KEY = "numFuturesToBuffer";
private final String topic;
private final KafkaProducer<K, V> producer;
private final Closer closer;
//Queue to keep track of the futures returned by the Kafka asynchronous send() call. The futures queue is used
// to mimic the functionality of flush() call (available in Kafka 09 and later). Currently, there are no
// capacity limits on the size of the futures queue. In general, if queue capacity is enforced, a safe lower bound for queue
// capacity is MAX_NUM_FUTURES_TO_BUFFER + (numThreads * maxNumMessagesPerInterval), where numThreads equals the number of
// threads sharing the producer instance and maxNumMessagesPerInterval is the estimated maximum number of messages
// emitted by a thread per reporting interval.
private final Queue<Future<RecordMetadata>> futures = new LinkedBlockingDeque<>();
private long numFuturesToBuffer=1000L;
public KafkaKeyValueProducerPusher(String brokers, String topic, Optional<Config> kafkaConfig) {
this.closer = Closer.create();
this.topic = topic;
Properties props = new Properties();
props.put(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, brokers);
props.put(ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG, StringSerializer.class.getName());
props.put(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG, ByteArraySerializer.class.getName());
props.put(ProducerConfig.ACKS_CONFIG, "all");
props.put(ProducerConfig.RETRIES_CONFIG, 3);
//To guarantee ordered delivery, the maximum in flight requests must be set to 1.
props.put(ProducerConfig.MAX_IN_FLIGHT_REQUESTS_PER_CONNECTION, 1);
props.put(ProducerConfig.BLOCK_ON_BUFFER_FULL_CONFIG, true);
// add the kafka scoped config. if any of the above are specified then they are overridden
if (kafkaConfig.isPresent()) {
props.putAll(ConfigUtils.configToProperties(kafkaConfig.get()));
this.numFuturesToBuffer = ConfigUtils.getLong(kafkaConfig.get(), MAX_NUM_FUTURES_TO_BUFFER_KEY, DEFAULT_MAX_NUM_FUTURES_TO_BUFFER);
}
this.producer = createProducer(props);
}
public KafkaKeyValueProducerPusher(String brokers, String topic) {
this(brokers, topic, Optional.absent());
}
/**
* Push all keyed messages to the Kafka topic.
* @param messages List of keyed messages to push to Kakfa.
*/
public void pushMessages(List<Pair<K, V>> messages) {
for (Pair<K, V> message: messages) {
this.futures.offer(this.producer.send(new ProducerRecord<>(topic, message.getKey(), message.getValue()), (recordMetadata, e) -> {
if (e != null) {
log.error("Failed to send message to topic {} due to exception: ", topic, e);
}
}));
}
//Once the low watermark of numFuturesToBuffer is hit, start flushing messages from the futures
// buffer. In order to avoid blocking on newest messages added to futures queue, we only invoke future.get() on
// the oldest messages in the futures buffer. The number of messages to flush is same as the number of messages added
// in the current call. Note this does not completely avoid calling future.get() on the newer messages e.g. when
// multiple threads enter the if{} block concurrently, and invoke flush().
if (this.futures.size() >= this.numFuturesToBuffer) {
flush(messages.size());
}
}
/**
* Flush any records that may be present in the producer buffer upto a maximum of <code>numRecordsToFlush</code>.
* This method is needed since Kafka 0.8 producer does not have a flush() API. In the absence of the flush()
* implementation, records which are present in the buffer but not in-flight may not be delivered at all when close()
* is called, leading to data loss.
* @param numRecordsToFlush
*/
private void flush(long numRecordsToFlush) {
log.debug("Flushing records from producer buffer");
Future future;
long numRecordsFlushed = 0L;
while (((future = futures.poll()) != null) && (numRecordsFlushed++ < numRecordsToFlush)) {
try {
future.get();
} catch (Exception e) {
log.error("Exception encountered when flushing record", e);
}
}
log.debug("Flushed {} records from producer buffer", numRecordsFlushed);
}
@Override
public void close()
throws IOException {
log.info("Flushing records before close");
flush(Long.MAX_VALUE);
this.closer.close();
}
/**
* Create the Kafka producer.
*/
protected KafkaProducer<K, V> createProducer(Properties props) {
return this.closer.register(new KafkaProducer<K, V>(props));
}
}
| 3,744 |
0 | Create_ds/gobblin/gobblin-modules/gobblin-kafka-08/src/main/java/org/apache/gobblin/metrics | Create_ds/gobblin/gobblin-modules/gobblin-kafka-08/src/main/java/org/apache/gobblin/metrics/kafka/KafkaPusher.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.metrics.kafka;
import java.io.IOException;
import java.util.List;
import java.util.Properties;
import com.google.common.base.Function;
import com.google.common.collect.Lists;
import com.google.common.io.Closer;
import javax.annotation.Nullable;
import kafka.producer.KeyedMessage;
import kafka.producer.ProducerConfig;
/**
* Establishes a connection to a Kafka cluster and pushed byte messages to a specified topic.
*/
public class KafkaPusher implements Pusher<byte[]> {
private final String topic;
private final ProducerCloseable<String, byte[]> producer;
private final Closer closer;
public KafkaPusher(String brokers, String topic) {
this.closer = Closer.create();
this.topic = topic;
Properties props = new Properties();
props.put("metadata.broker.list", brokers);
props.put("serializer.class", "kafka.serializer.DefaultEncoder");
props.put("request.required.acks", "1");
ProducerConfig config = new ProducerConfig(props);
this.producer = createProducer(config);
}
/**
* Push all mbyte array messages to the Kafka topic.
* @param messages List of byte array messages to push to Kakfa.
*/
public void pushMessages(List<byte[]> messages) {
List<KeyedMessage<String, byte[]>> keyedMessages = Lists.transform(messages,
new Function<byte[], KeyedMessage<String, byte[]>>() {
@Nullable
@Override
public KeyedMessage<String, byte[]> apply(byte[] bytes) {
return new KeyedMessage<String, byte[]>(topic, bytes);
}
});
this.producer.send(keyedMessages);
}
@Override
public void close()
throws IOException {
this.closer.close();
}
/**
* Actually creates the Kafka producer.
*/
protected ProducerCloseable<String, byte[]> createProducer(ProducerConfig config) {
return this.closer.register(new ProducerCloseable<String, byte[]>(config));
}
}
| 3,745 |
0 | Create_ds/gobblin/gobblin-modules/gobblin-kafka-08/src/main/java/org/apache/gobblin/metrics | Create_ds/gobblin/gobblin-modules/gobblin-kafka-08/src/main/java/org/apache/gobblin/metrics/kafka/KafkaProducerPusher.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.metrics.kafka;
import java.io.IOException;
import java.util.List;
import java.util.Properties;
import java.util.Queue;
import java.util.concurrent.Future;
import java.util.concurrent.LinkedBlockingDeque;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.TimeoutException;
import org.apache.gobblin.KafkaCommonUtil;
import org.apache.kafka.clients.producer.KafkaProducer;
import org.apache.kafka.clients.producer.ProducerConfig;
import org.apache.kafka.clients.producer.ProducerRecord;
import org.apache.kafka.clients.producer.RecordMetadata;
import org.apache.kafka.common.serialization.ByteArraySerializer;
import org.apache.kafka.common.serialization.StringSerializer;
import com.google.common.base.Optional;
import com.google.common.io.Closer;
import com.typesafe.config.Config;
import lombok.extern.slf4j.Slf4j;
import org.apache.gobblin.util.ConfigUtils;
/**
* Establishes a connection to a Kafka cluster and push byte messages to a specified topic.
*/
@Slf4j
public class KafkaProducerPusher implements Pusher<byte[]> {
private static final long DEFAULT_MAX_NUM_FUTURES_TO_BUFFER = 1000L;
//Low watermark for the size of the futures queue, to trigger flushing of messages.
private static final String MAX_NUM_FUTURES_TO_BUFFER_KEY = "numFuturesToBuffer";
private final String topic;
private final KafkaProducer<String, byte[]> producer;
private final Closer closer;
//Queue to keep track of the futures returned by the Kafka asynchronous send() call. The futures queue is used
// to mimic the functionality of flush() call (available in Kafka 09 and later). Currently, there are no
// capacity limits on the size of the futures queue. In general, if queue capacity is enforced, a safe lower bound for queue
// capacity is MAX_NUM_FUTURES_TO_BUFFER + (numThreads * maxNumMessagesPerInterval), where numThreads equals the number of
// threads sharing the producer instance and maxNumMessagesPerInterval is the estimated maximum number of messages
// emitted by a thread per reporting interval.
private final Queue<Future<RecordMetadata>> futures = new LinkedBlockingDeque<>();
private long numFuturesToBuffer = 1000L;
public KafkaProducerPusher(String brokers, String topic, Optional<Config> kafkaConfig) {
this.closer = Closer.create();
this.topic = topic;
Properties props = new Properties();
props.put(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, brokers);
props.put(ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG, StringSerializer.class.getName());
props.put(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG, ByteArraySerializer.class.getName());
props.put(ProducerConfig.ACKS_CONFIG, "all");
props.put(ProducerConfig.RETRIES_CONFIG, 3);
// add the kafka scoped config. if any of the above are specified then they are overridden
if (kafkaConfig.isPresent()) {
props.putAll(ConfigUtils.configToProperties(kafkaConfig.get()));
this.numFuturesToBuffer = ConfigUtils.getLong(kafkaConfig.get(), MAX_NUM_FUTURES_TO_BUFFER_KEY, DEFAULT_MAX_NUM_FUTURES_TO_BUFFER);
}
this.producer = createProducer(props);
}
public KafkaProducerPusher(String brokers, String topic) {
this(brokers, topic, Optional.absent());
}
/**
* Push all byte array messages to the Kafka topic.
* @param messages List of byte array messages to push to Kakfa.
*/
public void pushMessages(List<byte[]> messages) {
for (byte[] message: messages) {
this.futures.offer(producer.send(new ProducerRecord<>(topic, message), (recordMetadata, e) -> {
if (e != null) {
log.error("Failed to send message to topic {} due to exception: ", topic, e);
}
}));
}
//Once the low watermark of numFuturesToBuffer is hit, start flushing messages from the futures
// buffer. In order to avoid blocking on newest messages added to futures queue, we only invoke future.get() on
// the oldest messages in the futures buffer. The number of messages to flush is same as the number of messages added
// in the current call. Note this does not completely avoid calling future.get() on the newer messages e.g. when
// multiple threads enter the if{} block concurrently, and invoke flush().
if (this.futures.size() >= this.numFuturesToBuffer) {
flush(messages.size());
}
}
/**
* Flush any records that may be present in the producer buffer upto a maximum of <code>numRecordsToFlush</code>.
* This method is needed since Kafka 0.8 producer does not have a flush() API. In the absence of the flush()
* implementation, records which are present in the buffer but not in-flight may not be delivered at all when close()
* is called, leading to data loss.
* @param numRecordsToFlush
*/
private void flush(long numRecordsToFlush) {
log.debug("Flushing records from producer buffer");
Future future;
long numRecordsFlushed = 0L;
while (((future = futures.poll()) != null) && (numRecordsFlushed++ < numRecordsToFlush)) {
try {
future.get();
} catch (Exception e) {
log.error("Exception encountered when flushing record", e);
}
}
log.debug("Flushed {} records from producer buffer", numRecordsFlushed);
}
@Override
public void close()
throws IOException {
log.info("Flushing records before close");
//Call flush() before invoking close() to ensure any buffered messages are immediately sent. This is required
//since close() only guarantees delivery of in-flight messages. Set a timeout to prevent GOBBLIN-1432 issue.
//This issue shouldn't exits in later version, as native flush function has a timeout setting offset.flush.timeout.ms
try {
KafkaCommonUtil.runWithTimeout(
() -> flush(Long.MAX_VALUE), KafkaCommonUtil.KAFKA_FLUSH_TIMEOUT_SECONDS, TimeUnit.SECONDS);
} catch (TimeoutException e) {
log.warn("Flush records before close was interrupted! Reached {} seconds timeout!",
KafkaCommonUtil.KAFKA_FLUSH_TIMEOUT_SECONDS);
} catch (Exception e) {
log.error("Exception encountered when flushing record before close", e);
}
this.closer.close();
}
/**
* Create the Kafka producer.
*/
protected KafkaProducer<String, byte[]> createProducer(Properties props) {
return this.closer.register(new KafkaProducer<String, byte[]>(props));
}
}
| 3,746 |
0 | Create_ds/gobblin/gobblin-modules/gobblin-kafka-08/src/main/java/org/apache/gobblin/metrics | Create_ds/gobblin/gobblin-modules/gobblin-kafka-08/src/main/java/org/apache/gobblin/metrics/kafka/ProducerCloseable.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.metrics.kafka;
import java.io.Closeable;
import kafka.javaapi.producer.Producer;
import kafka.producer.ProducerConfig;
/**
* Closeable extension of producer
*/
public class ProducerCloseable<K,V> extends Producer<K,V> implements Closeable {
public ProducerCloseable(ProducerConfig config) {
super(config);
}
}
| 3,747 |
0 | Create_ds/gobblin/gobblin-modules/gobblin-kafka-08/src/main/java/org/apache/gobblin/source/extractor/extract | Create_ds/gobblin/gobblin-modules/gobblin-kafka-08/src/main/java/org/apache/gobblin/source/extractor/extract/kafka/KafkaGsonDeserializer.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.source.extractor.extract.kafka;
import java.nio.charset.Charset;
import java.nio.charset.StandardCharsets;
import org.apache.kafka.common.serialization.Deserializer;
import com.google.common.annotations.VisibleForTesting;
import com.google.gson.Gson;
import com.google.gson.JsonElement;
import org.apache.gobblin.kafka.serialize.GsonDeserializerBase;
/**
* Implementation of {@link Deserializer} that deserializes Kafka data into a {@link JsonElement} using the
* {@link StandardCharsets#UTF_8} encoding.
*/
public class KafkaGsonDeserializer extends GsonDeserializerBase<JsonElement> implements Deserializer<JsonElement> {
private static final Gson GSON = new Gson();
@VisibleForTesting
static final Charset CHARSET = StandardCharsets.UTF_8;
}
| 3,748 |
0 | Create_ds/gobblin/gobblin-modules/gobblin-kafka-08/src/main/java/org/apache/gobblin/source/extractor/extract | Create_ds/gobblin/gobblin-modules/gobblin-kafka-08/src/main/java/org/apache/gobblin/source/extractor/extract/kafka/KafkaWrapper.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.source.extractor.extract.kafka;
import java.io.Closeable;
import java.io.IOException;
import java.util.Arrays;
import java.util.Collections;
import java.util.Iterator;
import java.util.List;
import java.util.Map;
import java.util.concurrent.ConcurrentMap;
import java.util.regex.Pattern;
import org.apache.commons.lang3.NotImplementedException;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import com.google.common.base.Preconditions;
import com.google.common.base.Splitter;
import com.google.common.collect.Lists;
import com.google.common.collect.Maps;
import com.google.common.net.HostAndPort;
import com.typesafe.config.Config;
import com.typesafe.config.ConfigFactory;
import org.apache.gobblin.configuration.ConfigurationKeys;
import org.apache.gobblin.configuration.State;
import org.apache.gobblin.kafka.client.GobblinKafkaConsumerClient;
import org.apache.gobblin.util.ConfigUtils;
import org.apache.gobblin.util.DatasetFilterUtils;
import kafka.api.PartitionFetchInfo;
import kafka.api.PartitionOffsetRequestInfo;
import kafka.common.TopicAndPartition;
import kafka.javaapi.FetchRequest;
import kafka.javaapi.FetchResponse;
import kafka.javaapi.OffsetRequest;
import kafka.javaapi.OffsetResponse;
import kafka.javaapi.PartitionMetadata;
import kafka.javaapi.TopicMetadata;
import kafka.javaapi.TopicMetadataRequest;
import kafka.javaapi.consumer.SimpleConsumer;
import kafka.javaapi.message.ByteBufferMessageSet;
import kafka.message.MessageAndOffset;
/**
* Wrapper class that contains two alternative Kakfa APIs: an old low-level Scala-based API, and a new API.
* The new API has not been implemented since it's not ready to be open sourced.
*
* @deprecated - Use {@link GobblinKafkaConsumerClient}
*
* @author Ziyang Liu
*/
@Deprecated
public class KafkaWrapper implements Closeable {
private static final Logger LOG = LoggerFactory.getLogger(KafkaWrapper.class);
private static final String USE_NEW_KAFKA_API = "use.new.kafka.api";
private static final boolean DEFAULT_USE_NEW_KAFKA_API = false;
private final List<String> brokers;
private final KafkaAPI kafkaAPI;
private final boolean useNewKafkaAPI;
private static class Builder {
private boolean useNewKafkaAPI = DEFAULT_USE_NEW_KAFKA_API;
private List<String> brokers = Lists.newArrayList();
private Config config = ConfigFactory.empty();
private Builder withNewKafkaAPI() {
this.useNewKafkaAPI = true;
return this;
}
private Builder withBrokers(List<String> brokers) {
for (String broker : brokers) {
Preconditions.checkArgument(broker.matches(".+:\\d+"),
String.format("Invalid broker: %s. Must be in the format of address:port.", broker));
}
this.brokers = Lists.newArrayList(brokers);
return this;
}
private Builder withConfig(Config config) {
this.config = config;
return this;
}
private KafkaWrapper build() {
Preconditions.checkArgument(!this.brokers.isEmpty(), "Need to specify at least one Kafka broker.");
return new KafkaWrapper(this);
}
}
private KafkaWrapper(Builder builder) {
this.useNewKafkaAPI = builder.useNewKafkaAPI;
this.brokers = builder.brokers;
this.kafkaAPI = getKafkaAPI(builder.config);
}
/**
* Create a KafkaWrapper based on the given type of Kafka API and list of Kafka brokers.
*
* @param state A {@link State} object that should contain a list of comma separated Kafka brokers
* in property "kafka.brokers". It may optionally specify whether to use the new Kafka API by setting
* use.new.kafka.api=true.
*/
public static KafkaWrapper create(State state) {
Preconditions.checkNotNull(state.getProp(ConfigurationKeys.KAFKA_BROKERS),
"Need to specify at least one Kafka broker.");
KafkaWrapper.Builder builder = new KafkaWrapper.Builder();
if (state.getPropAsBoolean(USE_NEW_KAFKA_API, DEFAULT_USE_NEW_KAFKA_API)) {
builder = builder.withNewKafkaAPI();
}
Config config = ConfigUtils.propertiesToConfig(state.getProperties());
return builder.withBrokers(state.getPropAsList(ConfigurationKeys.KAFKA_BROKERS))
.withConfig(config)
.build();
}
public List<String> getBrokers() {
return this.brokers;
}
public List<KafkaTopic> getFilteredTopics(List<Pattern> blacklist, List<Pattern> whitelist) {
return this.kafkaAPI.getFilteredTopics(blacklist, whitelist);
}
public long getEarliestOffset(KafkaPartition partition) throws KafkaOffsetRetrievalFailureException {
return this.kafkaAPI.getEarliestOffset(partition);
}
public long getLatestOffset(KafkaPartition partition) throws KafkaOffsetRetrievalFailureException {
return this.kafkaAPI.getLatestOffset(partition);
}
public Iterator<MessageAndOffset> fetchNextMessageBuffer(KafkaPartition partition, long nextOffset, long maxOffset) {
return this.kafkaAPI.fetchNextMessageBuffer(partition, nextOffset, maxOffset);
}
private KafkaAPI getKafkaAPI(Config config) {
if (this.useNewKafkaAPI) {
return new KafkaNewAPI(config);
}
return new KafkaOldAPI(config);
}
@Override
public void close() throws IOException {
this.kafkaAPI.close();
}
/**
* @deprecated - Use {@link GobblinKafkaConsumerClient}
*/
@Deprecated
private abstract class KafkaAPI implements Closeable {
protected KafkaAPI(Config config) {
}
protected abstract List<KafkaTopic> getFilteredTopics(List<Pattern> blacklist, List<Pattern> whitelist);
protected abstract long getEarliestOffset(KafkaPartition partition) throws KafkaOffsetRetrievalFailureException;
protected abstract long getLatestOffset(KafkaPartition partition) throws KafkaOffsetRetrievalFailureException;
protected abstract Iterator<MessageAndOffset> fetchNextMessageBuffer(KafkaPartition partition, long nextOffset,
long maxOffset);
}
/**
* Wrapper for the old low-level Scala-based Kafka API.
*
* @deprecated - Use {@link GobblinKafkaConsumerClient}
*/
@Deprecated
private class KafkaOldAPI extends KafkaAPI {
public static final String CONFIG_PREFIX = "source.kafka.";
public static final String CONFIG_KAFKA_SOCKET_TIMEOUT_VALUE = CONFIG_PREFIX + "socketTimeoutMillis";
public static final int CONFIG_KAFKA_SOCKET_TIMEOUT_VALUE_DEFAULT = 30000; // 30 seconds
public static final String CONFIG_KAFKA_BUFFER_SIZE_BYTES = CONFIG_PREFIX + "bufferSizeBytes";
public static final int CONFIG_KAFKA_BUFFER_SIZE_BYTES_DEFAULT = 1024*1024; // 1MB
public static final String CONFIG_KAFKA_CLIENT_NAME = CONFIG_PREFIX + "clientName";
public static final String CONFIG_KAFKA_CLIENT_NAME_DEFAULT = "gobblin-kafka";
public static final String CONFIG_KAFKA_FETCH_REQUEST_CORRELATION_ID = CONFIG_PREFIX + "fetchCorrelationId";
private static final int CONFIG_KAFKA_FETCH_REQUEST_CORRELATION_ID_DEFAULT = -1;
public static final String CONFIG_KAFKA_FETCH_TIMEOUT_VALUE = CONFIG_PREFIX + "fetchTimeoutMillis";
public static final int CONFIG_KAFKA_FETCH_TIMEOUT_VALUE_DEFAULT = 1000; // 1 second
public static final String CONFIG_KAFKA_FETCH_REQUEST_MIN_BYTES = CONFIG_PREFIX + "fetchMinBytes";
private static final int CONFIG_KAFKA_FETCH_REQUEST_MIN_BYTES_DEFAULT = 1024;
public static final String CONFIG_KAFKA_FETCH_TOPIC_NUM_TRIES = CONFIG_PREFIX + "fetchTopicNumTries";
private static final int CONFIG_KAFKA_FETCH_TOPIC_NUM_TRIES_DEFAULT = 3;
public static final String CONFIG_KAFKA_FETCH_OFFSET_NUM_TRIES = CONFIG_PREFIX + "fetchOffsetNumTries";
private static final int CONFIG_KAFKA_FETCH_OFFSET_NUM_TRIES_DEFAULT = 3;
private final int socketTimeoutMillis;
private final int bufferSize;
private final String clientName;
private final int fetchCorrelationId;
private final int fetchTimeoutMillis;
private final int fetchMinBytes;
private final int fetchTopicRetries;
private final int fetchOffsetRetries;
private final ConcurrentMap<String, SimpleConsumer> activeConsumers = Maps.newConcurrentMap();
protected KafkaOldAPI(Config config) {
super(config);
socketTimeoutMillis = ConfigUtils.getInt(config, CONFIG_KAFKA_SOCKET_TIMEOUT_VALUE,
CONFIG_KAFKA_SOCKET_TIMEOUT_VALUE_DEFAULT);
bufferSize = ConfigUtils.getInt(config, CONFIG_KAFKA_BUFFER_SIZE_BYTES, CONFIG_KAFKA_BUFFER_SIZE_BYTES_DEFAULT);
clientName = ConfigUtils.getString(config, CONFIG_KAFKA_CLIENT_NAME, CONFIG_KAFKA_CLIENT_NAME_DEFAULT);
fetchCorrelationId = ConfigUtils.getInt(config, CONFIG_KAFKA_FETCH_REQUEST_CORRELATION_ID,
CONFIG_KAFKA_FETCH_REQUEST_CORRELATION_ID_DEFAULT);
fetchTimeoutMillis = ConfigUtils.getInt(config, CONFIG_KAFKA_FETCH_TIMEOUT_VALUE,
CONFIG_KAFKA_FETCH_TIMEOUT_VALUE_DEFAULT);
fetchMinBytes = ConfigUtils.getInt(config, CONFIG_KAFKA_FETCH_REQUEST_MIN_BYTES,
CONFIG_KAFKA_FETCH_REQUEST_MIN_BYTES_DEFAULT);
fetchTopicRetries = ConfigUtils.getInt(config, CONFIG_KAFKA_FETCH_TOPIC_NUM_TRIES,
CONFIG_KAFKA_FETCH_TOPIC_NUM_TRIES_DEFAULT);
fetchOffsetRetries = ConfigUtils.getInt(config, CONFIG_KAFKA_FETCH_OFFSET_NUM_TRIES,
CONFIG_KAFKA_FETCH_OFFSET_NUM_TRIES_DEFAULT);
Preconditions.checkArgument((this.fetchTimeoutMillis < this.socketTimeoutMillis),
"Kafka Source configuration error: FetchTimeout " + this.fetchTimeoutMillis +
" must be smaller than SocketTimeout " + this.socketTimeoutMillis);
}
@Override
public List<KafkaTopic> getFilteredTopics(List<Pattern> blacklist, List<Pattern> whitelist) {
List<TopicMetadata> topicMetadataList = getFilteredMetadataList(blacklist, whitelist);
List<KafkaTopic> filteredTopics = Lists.newArrayList();
for (TopicMetadata topicMetadata : topicMetadataList) {
List<KafkaPartition> partitions = getPartitionsForTopic(topicMetadata);
filteredTopics.add(new KafkaTopic(topicMetadata.topic(), partitions));
}
return filteredTopics;
}
private List<KafkaPartition> getPartitionsForTopic(TopicMetadata topicMetadata) {
List<KafkaPartition> partitions = Lists.newArrayList();
for (PartitionMetadata partitionMetadata : topicMetadata.partitionsMetadata()) {
if (null == partitionMetadata) {
LOG.error("Ignoring topic with null partition metadata " + topicMetadata.topic());
return Collections.emptyList();
}
if (null == partitionMetadata.leader()) {
LOG.error(
"Ignoring topic with null partition leader " + topicMetadata.topic() + " metatada=" + partitionMetadata);
return Collections.emptyList();
}
partitions.add(new KafkaPartition.Builder().withId(partitionMetadata.partitionId())
.withTopicName(topicMetadata.topic()).withLeaderId(partitionMetadata.leader().id())
.withLeaderHostAndPort(partitionMetadata.leader().host(), partitionMetadata.leader().port()).build());
}
return partitions;
}
private List<TopicMetadata> getFilteredMetadataList(List<Pattern> blacklist, List<Pattern> whitelist) {
List<TopicMetadata> filteredTopicMetadataList = Lists.newArrayList();
//Try all brokers one by one, until successfully retrieved topic metadata (topicMetadataList is non-null)
for (String broker : KafkaWrapper.this.getBrokers()) {
filteredTopicMetadataList = fetchTopicMetadataFromBroker(broker, blacklist, whitelist);
if (filteredTopicMetadataList != null) {
return filteredTopicMetadataList;
}
}
throw new RuntimeException(
"Fetching topic metadata from all brokers failed. See log warning for more information.");
}
private List<TopicMetadata> fetchTopicMetadataFromBroker(String broker, List<Pattern> blacklist,
List<Pattern> whitelist) {
List<TopicMetadata> topicMetadataList = fetchTopicMetadataFromBroker(broker);
if (topicMetadataList == null) {
return null;
}
List<TopicMetadata> filteredTopicMetadataList = Lists.newArrayList();
for (TopicMetadata topicMetadata : topicMetadataList) {
if (DatasetFilterUtils.survived(topicMetadata.topic(), blacklist, whitelist)) {
filteredTopicMetadataList.add(topicMetadata);
}
}
return filteredTopicMetadataList;
}
private List<TopicMetadata> fetchTopicMetadataFromBroker(String broker, String... selectedTopics) {
LOG.info(String.format("Fetching topic metadata from broker %s", broker));
SimpleConsumer consumer = null;
try {
consumer = getSimpleConsumer(broker);
for (int i = 0; i < this.fetchTopicRetries; i++) {
try {
return consumer.send(new TopicMetadataRequest(Arrays.asList(selectedTopics))).topicsMetadata();
} catch (Exception e) {
LOG.warn(String.format("Fetching topic metadata from broker %s has failed %d times.", broker, i + 1), e);
try {
Thread.sleep((long) ((i + Math.random()) * 1000));
} catch (InterruptedException e2) {
LOG.warn("Caught InterruptedException: " + e2);
}
}
}
} finally {
if (consumer != null) {
consumer.close();
}
}
return null;
}
private SimpleConsumer getSimpleConsumer(String broker) {
if (this.activeConsumers.containsKey(broker)) {
return this.activeConsumers.get(broker);
}
SimpleConsumer consumer = this.createSimpleConsumer(broker);
this.activeConsumers.putIfAbsent(broker, consumer);
return consumer;
}
private SimpleConsumer getSimpleConsumer(HostAndPort hostAndPort) {
return this.getSimpleConsumer(hostAndPort.toString());
}
private SimpleConsumer createSimpleConsumer(String broker) {
List<String> hostPort = Splitter.on(':').trimResults().omitEmptyStrings().splitToList(broker);
return createSimpleConsumer(hostPort.get(0), Integer.parseInt(hostPort.get(1)));
}
private SimpleConsumer createSimpleConsumer(String host, int port) {
return new SimpleConsumer(host, port, this.socketTimeoutMillis, this.bufferSize, this.clientName);
}
@Override
protected long getEarliestOffset(KafkaPartition partition) throws KafkaOffsetRetrievalFailureException {
Map<TopicAndPartition, PartitionOffsetRequestInfo> offsetRequestInfo =
Collections.singletonMap(new TopicAndPartition(partition.getTopicName(), partition.getId()),
new PartitionOffsetRequestInfo(kafka.api.OffsetRequest.EarliestTime(), 1));
return getOffset(partition, offsetRequestInfo);
}
@Override
protected long getLatestOffset(KafkaPartition partition) throws KafkaOffsetRetrievalFailureException {
Map<TopicAndPartition, PartitionOffsetRequestInfo> offsetRequestInfo =
Collections.singletonMap(new TopicAndPartition(partition.getTopicName(), partition.getId()),
new PartitionOffsetRequestInfo(kafka.api.OffsetRequest.LatestTime(), 1));
return getOffset(partition, offsetRequestInfo);
}
private long getOffset(KafkaPartition partition,
Map<TopicAndPartition, PartitionOffsetRequestInfo> offsetRequestInfo)
throws KafkaOffsetRetrievalFailureException {
SimpleConsumer consumer = this.getSimpleConsumer(partition.getLeader().getHostAndPort());
for (int i = 0; i < this.fetchOffsetRetries; i++) {
try {
OffsetResponse offsetResponse = consumer.getOffsetsBefore(new OffsetRequest(offsetRequestInfo,
kafka.api.OffsetRequest.CurrentVersion(), this.clientName));
if (offsetResponse.hasError()) {
throw new RuntimeException(
"offsetReponse has error: " + offsetResponse.errorCode(partition.getTopicName(), partition.getId()));
}
return offsetResponse.offsets(partition.getTopicName(), partition.getId())[0];
} catch (Exception e) {
LOG.warn(
String.format("Fetching offset for partition %s has failed %d time(s). Reason: %s", partition, i + 1, e));
if (i < this.fetchOffsetRetries - 1) {
try {
Thread.sleep((long) ((i + Math.random()) * 1000));
} catch (InterruptedException e2) {
LOG.error("Caught interrupted exception between retries of getting latest offsets. " + e2);
}
}
}
}
throw new KafkaOffsetRetrievalFailureException(
String.format("Fetching offset for partition %s has failed.", partition));
}
@Override
protected Iterator<MessageAndOffset> fetchNextMessageBuffer(KafkaPartition partition, long nextOffset,
long maxOffset) {
if (nextOffset > maxOffset) {
return null;
}
FetchRequest fetchRequest = createFetchRequest(partition, nextOffset);
try {
FetchResponse fetchResponse = getFetchResponseForFetchRequest(fetchRequest, partition);
return getIteratorFromFetchResponse(fetchResponse, partition);
} catch (Exception e) {
LOG.warn(
String.format("Fetch message buffer for partition %s has failed: %s. Will refresh topic metadata and retry",
partition, e));
return refreshTopicMetadataAndRetryFetch(partition, fetchRequest);
}
}
private synchronized FetchResponse getFetchResponseForFetchRequest(FetchRequest fetchRequest,
KafkaPartition partition) {
SimpleConsumer consumer = getSimpleConsumer(partition.getLeader().getHostAndPort());
FetchResponse fetchResponse = consumer.fetch(fetchRequest);
if (fetchResponse.hasError()) {
throw new RuntimeException(
String.format("error code %d", fetchResponse.errorCode(partition.getTopicName(), partition.getId())));
}
return fetchResponse;
}
private Iterator<MessageAndOffset> getIteratorFromFetchResponse(FetchResponse fetchResponse,
KafkaPartition partition) {
try {
ByteBufferMessageSet messageBuffer = fetchResponse.messageSet(partition.getTopicName(), partition.getId());
return messageBuffer.iterator();
} catch (Exception e) {
LOG.warn(String.format("Failed to retrieve next message buffer for partition %s: %s."
+ "The remainder of this partition will be skipped.", partition, e));
return null;
}
}
private Iterator<MessageAndOffset> refreshTopicMetadataAndRetryFetch(KafkaPartition partition,
FetchRequest fetchRequest) {
try {
refreshTopicMetadata(partition);
FetchResponse fetchResponse = getFetchResponseForFetchRequest(fetchRequest, partition);
return getIteratorFromFetchResponse(fetchResponse, partition);
} catch (Exception e) {
LOG.warn(String.format("Fetch message buffer for partition %s has failed: %s. This partition will be skipped.",
partition, e));
return null;
}
}
private void refreshTopicMetadata(KafkaPartition partition) {
for (String broker : KafkaWrapper.this.getBrokers()) {
List<TopicMetadata> topicMetadataList = fetchTopicMetadataFromBroker(broker, partition.getTopicName());
if (topicMetadataList != null && !topicMetadataList.isEmpty()) {
TopicMetadata topicMetadata = topicMetadataList.get(0);
for (PartitionMetadata partitionMetadata : topicMetadata.partitionsMetadata()) {
if (partitionMetadata.partitionId() == partition.getId()) {
partition.setLeader(partitionMetadata.leader().id(), partitionMetadata.leader().host(),
partitionMetadata.leader().port());
break;
}
}
break;
}
}
}
private FetchRequest createFetchRequest(KafkaPartition partition, long nextOffset) {
TopicAndPartition topicAndPartition = new TopicAndPartition(partition.getTopicName(), partition.getId());
PartitionFetchInfo partitionFetchInfo = new PartitionFetchInfo(nextOffset, this.bufferSize);
Map<TopicAndPartition, PartitionFetchInfo> fetchInfo =
Collections.singletonMap(topicAndPartition, partitionFetchInfo);
return new FetchRequest(this.fetchCorrelationId, this.clientName,
this.fetchTimeoutMillis, this.fetchMinBytes, fetchInfo);
}
@Override
public void close() throws IOException {
int numOfConsumersNotClosed = 0;
for (SimpleConsumer consumer : this.activeConsumers.values()) {
if (consumer != null) {
try {
consumer.close();
} catch (Exception e) {
LOG.warn(String.format("Failed to close Kafka Consumer %s:%d", consumer.host(), consumer.port()));
numOfConsumersNotClosed++;
}
}
}
this.activeConsumers.clear();
if (numOfConsumersNotClosed > 0) {
throw new IOException(numOfConsumersNotClosed + " consumer(s) failed to close.");
}
}
}
/**
* Wrapper for the new Kafka API.
* @deprecated - Use {@link GobblinKafkaConsumerClient}
*/
@Deprecated
private class KafkaNewAPI extends KafkaAPI {
protected KafkaNewAPI(Config config) {
super(config);
}
@Override
public List<KafkaTopic> getFilteredTopics(List<Pattern> blacklist, List<Pattern> whitelist) {
throw new NotImplementedException("kafka new API has not been implemented");
}
@Override
protected long getEarliestOffset(KafkaPartition partition) throws KafkaOffsetRetrievalFailureException {
throw new NotImplementedException("kafka new API has not been implemented");
}
@Override
protected long getLatestOffset(KafkaPartition partition) throws KafkaOffsetRetrievalFailureException {
throw new NotImplementedException("kafka new API has not been implemented");
}
@Override
public void close() throws IOException {
throw new NotImplementedException("kafka new API has not been implemented");
}
@Override
protected Iterator<MessageAndOffset> fetchNextMessageBuffer(KafkaPartition partition, long nextOffset,
long maxOffset) {
throw new NotImplementedException("kafka new API has not been implemented");
}
}
}
| 3,749 |
0 | Create_ds/gobblin/gobblin-modules/gobblin-kafka-08/src/main/java/org/apache/gobblin/source/extractor/extract | Create_ds/gobblin/gobblin-modules/gobblin-kafka-08/src/main/java/org/apache/gobblin/source/extractor/extract/kafka/KafkaDeserializerExtractor.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.source.extractor.extract.kafka;
import java.io.IOException;
import java.util.Properties;
import org.apache.avro.Schema;
import org.apache.avro.generic.GenericRecord;
import org.apache.commons.lang3.reflect.ConstructorUtils;
import org.apache.kafka.common.serialization.ByteArrayDeserializer;
import org.apache.kafka.common.serialization.Deserializer;
import org.apache.kafka.common.serialization.StringDeserializer;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import com.google.common.annotations.VisibleForTesting;
import com.google.common.base.Enums;
import com.google.common.base.Optional;
import com.google.common.base.Preconditions;
import io.confluent.kafka.serializers.KafkaAvroDeserializer;
import io.confluent.kafka.serializers.KafkaJsonDeserializer;
import lombok.AccessLevel;
import lombok.AllArgsConstructor;
import lombok.Getter;
import org.apache.gobblin.annotation.Alias;
import org.apache.gobblin.configuration.WorkUnitState;
import org.apache.gobblin.kafka.client.ByteArrayBasedKafkaRecord;
import org.apache.gobblin.metrics.kafka.KafkaSchemaRegistry;
import org.apache.gobblin.metrics.kafka.SchemaRegistryException;
import org.apache.gobblin.util.AvroUtils;
import org.apache.gobblin.util.PropertiesUtils;
/**
* <p>
* Extension of {@link KafkaExtractor} that wraps Kafka's {@link Deserializer} API. Kafka's {@link Deserializer} provides
* a generic way of converting Kafka {@link kafka.message.Message}s to {@link Object}. Typically, a {@link Deserializer}
* will be used along with a {@link org.apache.kafka.common.serialization.Serializer} which is responsible for converting
* an {@link Object} to a Kafka {@link kafka.message.Message}. These APIs are useful for reading and writing to Kafka,
* since Kafka is primarily a byte oriented system.
* </p>
*
* <p>
* This class wraps the {@link Deserializer} API allowing any existing classes that implement the {@link Deserializer}
* API to integrate with seamlessly with Gobblin. The deserializer can be specified in the following ways:
*
* <ul>
* <li>{@link #KAFKA_DESERIALIZER_TYPE} can be used to specify a pre-defined enum from {@link Deserializers} or
* it can be used to specify the fully-qualified name of a {@link Class} that defines the {@link Deserializer}
* interface. If this property is set to a class name, then {@link KafkaSchemaRegistry} must also be specified
* using the {@link KafkaSchemaRegistry#KAFKA_SCHEMA_REGISTRY_CLASS} config key</li>
* </ul>
* </p>
*/
@Getter(AccessLevel.PACKAGE)
@Alias(value = "DESERIALIZER")
public class KafkaDeserializerExtractor extends KafkaExtractor<Object, Object> {
private static final Logger LOG = LoggerFactory.getLogger(KafkaDeserializerExtractor.class);
public static final String KAFKA_DESERIALIZER_TYPE = "kafka.deserializer.type";
private static final String CONFLUENT_SCHEMA_REGISTRY_URL = "schema.registry.url";
private final Deserializer<?> kafkaDeserializer;
private final KafkaSchemaRegistry<?, ?> kafkaSchemaRegistry;
private final Schema latestSchema;
public KafkaDeserializerExtractor(WorkUnitState state) throws ReflectiveOperationException {
this(state, getDeserializerType(state.getProperties()));
}
private KafkaDeserializerExtractor(WorkUnitState state, Optional<Deserializers> deserializerType)
throws ReflectiveOperationException {
this(state, deserializerType,
getDeserializer(getProps(state), deserializerType),
getKafkaSchemaRegistry(getProps(state)));
}
@VisibleForTesting
KafkaDeserializerExtractor(WorkUnitState state, Optional<Deserializers> deserializerType,
Deserializer<?> kafkaDeserializer, KafkaSchemaRegistry<?, ?> kafkaSchemaRegistry) {
super(state);
this.kafkaDeserializer = kafkaDeserializer;
this.kafkaSchemaRegistry = kafkaSchemaRegistry;
this.latestSchema =
(deserializerType.equals(Optional.of(Deserializers.CONFLUENT_AVRO))) ? (Schema) getSchema() : null;
}
@Override
protected Object decodeRecord(ByteArrayBasedKafkaRecord messageAndOffset) throws IOException {
Object deserialized = kafkaDeserializer.deserialize(this.topicName, messageAndOffset.getMessageBytes());
// For Confluent's Schema Registry the read schema is the latest registered schema to support schema evolution
return (this.latestSchema == null) ? deserialized
: AvroUtils.convertRecordSchema((GenericRecord) deserialized, this.latestSchema);
}
@Override
public Object getSchema() {
try {
LOG.info("Getting schema for {}. Gap: {} HighWaterMark: {}", this.topicName, this.lowWatermark.getGap(this.highWatermark));
//If HighWatermark equals LowWatermark that might mean the workunit is an empty workunit
if (this.lowWatermark.getGap(this.highWatermark) == 0) {
LOG.info("Not getting schema for {} as the gap between high and low watermark is 0", this.topicName);
return null;
}
return this.kafkaSchemaRegistry.getLatestSchemaByTopic(this.topicName);
} catch (SchemaRegistryException e) {
throw new RuntimeException(e);
}
}
private static Optional<Deserializers> getDeserializerType(Properties props) {
Preconditions.checkArgument(props.containsKey(KAFKA_DESERIALIZER_TYPE),
"Missing required property " + KAFKA_DESERIALIZER_TYPE);
return Enums.getIfPresent(Deserializers.class, props.getProperty(KAFKA_DESERIALIZER_TYPE).toUpperCase());
}
/**
* Constructs a {@link Deserializer}, using the value of {@link #KAFKA_DESERIALIZER_TYPE}.
*/
private static Deserializer<?> getDeserializer(Properties props, Optional<Deserializers> deserializerType) throws ReflectiveOperationException {
Deserializer<?> deserializer;
if (deserializerType.isPresent()) {
deserializer = ConstructorUtils.invokeConstructor(deserializerType.get().getDeserializerClass());
} else {
deserializer = Deserializer.class
.cast(ConstructorUtils.invokeConstructor(Class.forName(props.getProperty(KAFKA_DESERIALIZER_TYPE))));
}
deserializer.configure(PropertiesUtils.propsToStringKeyMap(props), false);
return deserializer;
}
/**
* Constructs a {@link KafkaSchemaRegistry} using the value of {@link #KAFKA_DESERIALIZER_TYPE}, if not set it
* defaults to {@link SimpleKafkaSchemaRegistry}.
*/
private static KafkaSchemaRegistry<?, ?> getKafkaSchemaRegistry(Properties props)
throws ReflectiveOperationException {
Optional<Deserializers> deserializerType =
Enums.getIfPresent(Deserializers.class, props.getProperty(KAFKA_DESERIALIZER_TYPE).toUpperCase());
if (deserializerType.isPresent()) {
return ConstructorUtils.invokeConstructor(deserializerType.get().getSchemaRegistryClass(), props);
}
if (props.containsKey(KafkaSchemaRegistry.KAFKA_SCHEMA_REGISTRY_CLASS)) {
return KafkaSchemaRegistry.get(props);
}
return new SimpleKafkaSchemaRegistry(props);
}
/**
* Gets {@link Properties} from a {@link WorkUnitState} and sets the config <code>schema.registry.url</code> to value
* of {@link KafkaSchemaRegistry#KAFKA_SCHEMA_REGISTRY_URL} if set. This way users don't need to specify both
* properties as <code>schema.registry.url</code> is required by the {@link ConfluentKafkaSchemaRegistry}.
*/
private static Properties getProps(WorkUnitState workUnitState) {
Properties properties = workUnitState.getProperties();
if (properties.containsKey(KafkaSchemaRegistry.KAFKA_SCHEMA_REGISTRY_URL)) {
properties.setProperty(CONFLUENT_SCHEMA_REGISTRY_URL,
properties.getProperty(KafkaSchemaRegistry.KAFKA_SCHEMA_REGISTRY_URL));
}
return properties;
}
/**
* Pre-defined {@link Deserializer} that can be referenced by the enum name.
*/
@AllArgsConstructor
@Getter
public enum Deserializers {
/**
* Confluent's Avro {@link Deserializer}
*
* @see KafkaAvroDeserializer
*/
CONFLUENT_AVRO(KafkaAvroDeserializer.class, ConfluentKafkaSchemaRegistry.class),
/**
* Confluent's JSON {@link Deserializer}
*
* @see KafkaJsonDeserializer
*/
CONFLUENT_JSON(KafkaJsonDeserializer.class, SimpleKafkaSchemaRegistry.class),
/**
* A custom {@link Deserializer} for converting <code>byte[]</code> to {@link com.google.gson.JsonElement}s
*
* @see KafkaGsonDeserializer
*/
GSON(KafkaGsonDeserializer.class, SimpleKafkaSchemaRegistry.class),
/**
* A standard Kafka {@link Deserializer} that does nothing, it simply returns the <code>byte[]</code>
*/
BYTE_ARRAY(ByteArrayDeserializer.class, SimpleKafkaSchemaRegistry.class),
/**
* A standard Kafka {@link Deserializer} for converting <code>byte[]</code> to {@link String}s
*/
STRING(StringDeserializer.class, SimpleKafkaSchemaRegistry.class);
private final Class<? extends Deserializer> deserializerClass;
private final Class<? extends KafkaSchemaRegistry> schemaRegistryClass;
}
}
| 3,750 |
0 | Create_ds/gobblin/gobblin-modules/gobblin-kafka-08/src/main/java/org/apache/gobblin/source/extractor/extract | Create_ds/gobblin/gobblin-modules/gobblin-kafka-08/src/main/java/org/apache/gobblin/source/extractor/extract/kafka/KafkaDeserializerSource.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.source.extractor.extract.kafka;
import java.io.IOException;
import org.apache.gobblin.configuration.WorkUnitState;
import org.apache.gobblin.source.extractor.Extractor;
/**
* Extension of {@link KafkaSource} that returns a {@link KafkaDeserializerExtractor}.
*/
public class KafkaDeserializerSource extends KafkaSource<Object, Object> {
@Override
public Extractor<Object, Object> getExtractor(WorkUnitState state) throws IOException {
try {
return new KafkaDeserializerExtractor(state);
} catch (ReflectiveOperationException e) {
throw new RuntimeException(e);
}
}
}
| 3,751 |
0 | Create_ds/gobblin/gobblin-modules/gobblin-kafka-08/src/main/java/org/apache/gobblin/kafka | Create_ds/gobblin/gobblin-modules/gobblin-kafka-08/src/main/java/org/apache/gobblin/kafka/serialize/LiAvroSerializer.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.kafka.serialize;
import org.apache.avro.generic.GenericRecord;
import org.apache.kafka.common.errors.SerializationException;
import org.apache.kafka.common.serialization.Serializer;
/**
* LinkedIn's implementation of Avro-schema based serialization for Kafka
* TODO: Implement this for IndexedRecord not just GenericRecord
*
*/
public class LiAvroSerializer extends LiAvroSerializerBase implements Serializer<GenericRecord> {
@Override
public byte[] serialize(String topic, GenericRecord data) {
try {
return super.serialize(topic, data);
} catch (org.apache.gobblin.kafka.serialize.SerializationException e) {
throw new SerializationException(e);
}
}
}
| 3,752 |
0 | Create_ds/gobblin/gobblin-modules/gobblin-kafka-08/src/main/java/org/apache/gobblin/kafka | Create_ds/gobblin/gobblin-modules/gobblin-kafka-08/src/main/java/org/apache/gobblin/kafka/serialize/LiAvroDeserializer.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.kafka.serialize;
import org.apache.avro.Schema;
import org.apache.avro.generic.GenericRecord;
import org.apache.kafka.common.errors.SerializationException;
import org.apache.kafka.common.serialization.Deserializer;
import lombok.extern.slf4j.Slf4j;
import org.apache.gobblin.kafka.schemareg.KafkaSchemaRegistry;
/**
* The LinkedIn Avro Deserializer (works with records serialized by the {@link LiAvroSerializer})
*/
@Slf4j
public class LiAvroDeserializer extends LiAvroDeserializerBase implements Deserializer<GenericRecord> {
public LiAvroDeserializer(KafkaSchemaRegistry<MD5Digest, Schema> schemaRegistry)
{
super(schemaRegistry);
}
/**
*
* @param topic topic associated with the data
* @param data serialized bytes
* @return deserialized object
*/
@Override
public GenericRecord deserialize(String topic, byte[] data) {
try {
return super.deserialize(topic, data);
}
catch (org.apache.gobblin.kafka.serialize.SerializationException e) {
throw new SerializationException("Error during Deserialization", e);
}
}
}
| 3,753 |
0 | Create_ds/gobblin/gobblin-modules/gobblin-kafka-08/src/main/java/org/apache/gobblin/kafka | Create_ds/gobblin/gobblin-modules/gobblin-kafka-08/src/main/java/org/apache/gobblin/kafka/writer/KafkaDataWriterBuilder.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.kafka.writer;
import java.util.Properties;
import org.apache.gobblin.configuration.ConfigurationException;
import org.apache.gobblin.writer.AsyncDataWriter;
/**
* Builder that hands back a {@link Kafka08DataWriter}
*/
public class KafkaDataWriterBuilder<S,D> extends AbstractKafkaDataWriterBuilder<S, D> {
@Override
protected AsyncDataWriter<D> getAsyncDataWriter(Properties props)
throws ConfigurationException {
return new Kafka08DataWriter<>(props);
}
}
| 3,754 |
0 | Create_ds/gobblin/gobblin-modules/gobblin-kafka-08/src/main/java/org/apache/gobblin/kafka | Create_ds/gobblin/gobblin-modules/gobblin-kafka-08/src/main/java/org/apache/gobblin/kafka/writer/Kafka08DataWriter.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.kafka.writer;
import java.io.IOException;
import java.util.Properties;
import java.util.concurrent.Future;
import org.apache.commons.lang3.tuple.Pair;
import org.apache.kafka.clients.producer.Callback;
import org.apache.kafka.clients.producer.KafkaProducer;
import org.apache.kafka.clients.producer.Producer;
import org.apache.kafka.clients.producer.ProducerRecord;
import org.apache.kafka.clients.producer.RecordMetadata;
import com.google.common.base.Throwables;
import com.typesafe.config.Config;
import com.typesafe.config.ConfigFactory;
import lombok.extern.slf4j.Slf4j;
import org.apache.gobblin.configuration.ConfigurationException;
import org.apache.gobblin.writer.WriteCallback;
import org.apache.gobblin.writer.WriteResponse;
import org.apache.gobblin.writer.WriteResponseFuture;
import org.apache.gobblin.writer.WriteResponseMapper;
/**
* Implementation of a Kafka writer that wraps a 0.8 {@link KafkaProducer}.
* This does not provide transactional / exactly-once semantics.
* Applications should expect data to be possibly written to Kafka even if the overall Gobblin job fails.
*
*/
@Slf4j
public class Kafka08DataWriter<K,V> implements KafkaDataWriter<K, V> {
public static final WriteResponseMapper<RecordMetadata> WRITE_RESPONSE_WRAPPER =
new WriteResponseMapper<RecordMetadata>() {
@Override
public WriteResponse wrap(final RecordMetadata recordMetadata) {
return new WriteResponse<RecordMetadata>() {
@Override
public RecordMetadata getRawResponse() {
return recordMetadata;
}
@Override
public String getStringResponse() {
return recordMetadata.toString();
}
@Override
public long bytesWritten() {
// Don't know how many bytes were written
return -1;
}
};
}
};
private final Producer<K, V> producer;
private final String topic;
private final KafkaWriterCommonConfig commonConfig;
public static Producer getKafkaProducer(Properties props)
{
Object producerObject = KafkaWriterHelper.getKafkaProducer(props);
try
{
Producer producer = (Producer) producerObject;
return producer;
} catch (ClassCastException e) {
log.error("Failed to instantiate Kafka producer " + producerObject.getClass().getName() + " as instance of Producer.class", e);
throw Throwables.propagate(e);
}
}
public Kafka08DataWriter(Properties props)
throws ConfigurationException {
this(getKafkaProducer(props), ConfigFactory.parseProperties(props));
}
public Kafka08DataWriter(Producer producer, Config config)
throws ConfigurationException {
this.topic = config.getString(KafkaWriterConfigurationKeys.KAFKA_TOPIC);
this.producer = producer;
this.commonConfig = new KafkaWriterCommonConfig(config);
}
@Override
public void close()
throws IOException {
log.debug("Close called");
this.producer.close();
}
@Override
public Future<WriteResponse> write(final V record, final WriteCallback callback) {
try {
Pair<K, V> kvPair = KafkaWriterHelper.getKeyValuePair(record, commonConfig);
return write(kvPair, callback);
}
catch (Exception e) {
throw new RuntimeException("Failed to generate write request", e);
}
}
public Future<WriteResponse> write(Pair<K, V> keyValuePair, final WriteCallback callback) {
try {
return new WriteResponseFuture<>(this.producer
.send(new ProducerRecord<>(topic, keyValuePair.getKey(), keyValuePair.getValue()), new Callback() {
@Override
public void onCompletion(final RecordMetadata metadata, Exception exception) {
if (exception != null) {
callback.onFailure(exception);
} else {
callback.onSuccess(WRITE_RESPONSE_WRAPPER.wrap(metadata));
}
}
}), WRITE_RESPONSE_WRAPPER);
} catch (Exception e) {
throw new RuntimeException("Failed to create a Kafka write request", e);
}
}
@Override
public void flush()
throws IOException {
// Do nothing, 0.8 kafka producer doesn't support flush.
}
}
| 3,755 |
0 | Create_ds/gobblin/gobblin-modules/gobblin-kafka-08/src/main/java/org/apache/gobblin/kafka | Create_ds/gobblin/gobblin-modules/gobblin-kafka-08/src/main/java/org/apache/gobblin/kafka/client/Kafka08ConsumerClient.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.kafka.client;
import java.io.IOException;
import java.nio.ByteBuffer;
import java.util.Arrays;
import java.util.Collections;
import java.util.Iterator;
import java.util.List;
import java.util.Map;
import java.util.concurrent.ConcurrentMap;
import com.google.common.base.Function;
import com.google.common.base.Splitter;
import com.google.common.collect.Iterators;
import com.google.common.collect.Lists;
import com.google.common.collect.Maps;
import com.google.common.net.HostAndPort;
import com.typesafe.config.Config;
import kafka.api.PartitionFetchInfo;
import kafka.api.PartitionOffsetRequestInfo;
import kafka.common.TopicAndPartition;
import kafka.javaapi.FetchRequest;
import kafka.javaapi.FetchResponse;
import kafka.javaapi.OffsetRequest;
import kafka.javaapi.OffsetResponse;
import kafka.javaapi.PartitionMetadata;
import kafka.javaapi.TopicMetadata;
import kafka.javaapi.TopicMetadataRequest;
import kafka.javaapi.consumer.SimpleConsumer;
import kafka.javaapi.message.ByteBufferMessageSet;
import kafka.message.MessageAndOffset;
import lombok.extern.slf4j.Slf4j;
import org.apache.gobblin.source.extractor.extract.kafka.KafkaOffsetRetrievalFailureException;
import org.apache.gobblin.source.extractor.extract.kafka.KafkaPartition;
import org.apache.gobblin.source.extractor.extract.kafka.KafkaTopic;
import org.apache.gobblin.util.ConfigUtils;
/**
* A {@link GobblinKafkaConsumerClient} that uses kafka 08 scala consumer client. All the code has been moved from the
* legacy org.apache.gobblin.source.extractor.extract.kafka.KafkaWrapper's KafkaOldApi
*/
@Slf4j
public class Kafka08ConsumerClient extends AbstractBaseKafkaConsumerClient {
public static final String CONFIG_PREFIX = AbstractBaseKafkaConsumerClient.CONFIG_PREFIX;
public static final String CONFIG_KAFKA_BUFFER_SIZE_BYTES = CONFIG_PREFIX + "bufferSizeBytes";
public static final int CONFIG_KAFKA_BUFFER_SIZE_BYTES_DEFAULT = 1024 * 1024; // 1MB
public static final String CONFIG_KAFKA_CLIENT_NAME = CONFIG_PREFIX + "clientName";
public static final String CONFIG_KAFKA_CLIENT_NAME_DEFAULT = "gobblin-kafka";
public static final String CONFIG_KAFKA_FETCH_REQUEST_CORRELATION_ID = CONFIG_PREFIX + "fetchCorrelationId";
private static final int CONFIG_KAFKA_FETCH_REQUEST_CORRELATION_ID_DEFAULT = -1;
public static final String CONFIG_KAFKA_FETCH_TOPIC_NUM_TRIES = CONFIG_PREFIX + "fetchTopicNumTries";
private static final int CONFIG_KAFKA_FETCH_TOPIC_NUM_TRIES_DEFAULT = 3;
public static final String CONFIG_KAFKA_FETCH_OFFSET_NUM_TRIES = CONFIG_PREFIX + "fetchOffsetNumTries";
private static final int CONFIG_KAFKA_FETCH_OFFSET_NUM_TRIES_DEFAULT = 3;
private final int bufferSize;
private final String clientName;
private final int fetchCorrelationId;
private final int fetchTopicRetries;
private final int fetchOffsetRetries;
private final ConcurrentMap<String, SimpleConsumer> activeConsumers = Maps.newConcurrentMap();
private Kafka08ConsumerClient(Config config) {
super(config);
bufferSize = ConfigUtils.getInt(config, CONFIG_KAFKA_BUFFER_SIZE_BYTES, CONFIG_KAFKA_BUFFER_SIZE_BYTES_DEFAULT);
clientName = ConfigUtils.getString(config, CONFIG_KAFKA_CLIENT_NAME, CONFIG_KAFKA_CLIENT_NAME_DEFAULT);
fetchCorrelationId =
ConfigUtils.getInt(config, CONFIG_KAFKA_FETCH_REQUEST_CORRELATION_ID,
CONFIG_KAFKA_FETCH_REQUEST_CORRELATION_ID_DEFAULT);
fetchTopicRetries =
ConfigUtils.getInt(config, CONFIG_KAFKA_FETCH_TOPIC_NUM_TRIES, CONFIG_KAFKA_FETCH_TOPIC_NUM_TRIES_DEFAULT);
fetchOffsetRetries =
ConfigUtils.getInt(config, CONFIG_KAFKA_FETCH_OFFSET_NUM_TRIES, CONFIG_KAFKA_FETCH_OFFSET_NUM_TRIES_DEFAULT);
}
@Override
public List<KafkaTopic> getTopics() {
List<TopicMetadata> topicMetadataList = getFilteredMetadataList();
List<KafkaTopic> filteredTopics = Lists.newArrayList();
for (TopicMetadata topicMetadata : topicMetadataList) {
List<KafkaPartition> partitions = getPartitionsForTopic(topicMetadata);
filteredTopics.add(new KafkaTopic(topicMetadata.topic(), partitions));
}
return filteredTopics;
}
private List<KafkaPartition> getPartitionsForTopic(TopicMetadata topicMetadata) {
List<KafkaPartition> partitions = Lists.newArrayList();
for (PartitionMetadata partitionMetadata : topicMetadata.partitionsMetadata()) {
if (null == partitionMetadata) {
log.error("Ignoring topic with null partition metadata " + topicMetadata.topic());
return Collections.emptyList();
}
if (null == partitionMetadata.leader()) {
log.error("Ignoring topic with null partition leader " + topicMetadata.topic() + " metatada="
+ partitionMetadata);
return Collections.emptyList();
}
partitions.add(new KafkaPartition.Builder().withId(partitionMetadata.partitionId())
.withTopicName(topicMetadata.topic()).withLeaderId(partitionMetadata.leader().id())
.withLeaderHostAndPort(partitionMetadata.leader().host(), partitionMetadata.leader().port()).build());
}
return partitions;
}
private List<TopicMetadata> getFilteredMetadataList() {
//Try all brokers one by one, until successfully retrieved topic metadata (topicMetadataList is non-null)
for (String broker : this.brokers) {
List<TopicMetadata> filteredTopicMetadataList = fetchTopicMetadataFromBroker(broker);
if (filteredTopicMetadataList != null) {
return filteredTopicMetadataList;
}
}
throw new RuntimeException("Fetching topic metadata from all brokers failed. See log warning for more information.");
}
private List<TopicMetadata> fetchTopicMetadataFromBroker(String broker, String... selectedTopics) {
log.info(String.format("Fetching topic metadata from broker %s", broker));
SimpleConsumer consumer = null;
try {
consumer = getSimpleConsumer(broker);
for (int i = 0; i < this.fetchTopicRetries; i++) {
try {
return consumer.send(new TopicMetadataRequest(Arrays.asList(selectedTopics))).topicsMetadata();
} catch (Exception e) {
log.warn(String.format("Fetching topic metadata from broker %s has failed %d times.", broker, i + 1), e);
try {
Thread.sleep((long) ((i + Math.random()) * 1000));
} catch (InterruptedException e2) {
log.warn("Caught InterruptedException: " + e2);
}
}
}
} finally {
if (consumer != null) {
consumer.close();
}
}
return null;
}
private SimpleConsumer getSimpleConsumer(String broker) {
if (this.activeConsumers.containsKey(broker)) {
return this.activeConsumers.get(broker);
}
SimpleConsumer consumer = this.createSimpleConsumer(broker);
this.activeConsumers.putIfAbsent(broker, consumer);
return consumer;
}
private SimpleConsumer getSimpleConsumer(HostAndPort hostAndPort) {
return this.getSimpleConsumer(hostAndPort.toString());
}
private SimpleConsumer createSimpleConsumer(String broker) {
List<String> hostPort = Splitter.on(':').trimResults().omitEmptyStrings().splitToList(broker);
return createSimpleConsumer(hostPort.get(0), Integer.parseInt(hostPort.get(1)));
}
private SimpleConsumer createSimpleConsumer(String host, int port) {
return new SimpleConsumer(host, port, this.socketTimeoutMillis, this.bufferSize, this.clientName);
}
@Override
public long getEarliestOffset(KafkaPartition partition) throws KafkaOffsetRetrievalFailureException {
Map<TopicAndPartition, PartitionOffsetRequestInfo> offsetRequestInfo =
Collections.singletonMap(new TopicAndPartition(partition.getTopicName(), partition.getId()),
new PartitionOffsetRequestInfo(kafka.api.OffsetRequest.EarliestTime(), 1));
return getOffset(partition, offsetRequestInfo);
}
@Override
public long getLatestOffset(KafkaPartition partition) throws KafkaOffsetRetrievalFailureException {
Map<TopicAndPartition, PartitionOffsetRequestInfo> offsetRequestInfo =
Collections.singletonMap(new TopicAndPartition(partition.getTopicName(), partition.getId()),
new PartitionOffsetRequestInfo(kafka.api.OffsetRequest.LatestTime(), 1));
return getOffset(partition, offsetRequestInfo);
}
private long getOffset(KafkaPartition partition, Map<TopicAndPartition, PartitionOffsetRequestInfo> offsetRequestInfo)
throws KafkaOffsetRetrievalFailureException {
SimpleConsumer consumer = this.getSimpleConsumer(partition.getLeader().getHostAndPort());
for (int i = 0; i < this.fetchOffsetRetries; i++) {
try {
OffsetResponse offsetResponse =
consumer.getOffsetsBefore(new OffsetRequest(offsetRequestInfo, kafka.api.OffsetRequest.CurrentVersion(),
this.clientName));
if (offsetResponse.hasError()) {
throw new RuntimeException("offsetReponse has error: "
+ offsetResponse.errorCode(partition.getTopicName(), partition.getId()));
}
return offsetResponse.offsets(partition.getTopicName(), partition.getId())[0];
} catch (Exception e) {
log.warn(String.format("Fetching offset for partition %s has failed %d time(s). Reason: %s", partition, i + 1,
e));
if (i < this.fetchOffsetRetries - 1) {
try {
Thread.sleep((long) ((i + Math.random()) * 1000));
} catch (InterruptedException e2) {
log.error("Caught interrupted exception between retries of getting latest offsets. " + e2);
}
}
}
}
throw new KafkaOffsetRetrievalFailureException(String.format("Fetching offset for partition %s has failed.",
partition));
}
@Override
public Iterator<KafkaConsumerRecord> consume(KafkaPartition partition, long nextOffset, long maxOffset) {
if (nextOffset > maxOffset) {
return null;
}
FetchRequest fetchRequest = createFetchRequest(partition, nextOffset);
try {
FetchResponse fetchResponse = getFetchResponseForFetchRequest(fetchRequest, partition);
return getIteratorFromFetchResponse(fetchResponse, partition);
} catch (Exception e) {
log.warn(String.format(
"Fetch message buffer for partition %s has failed: %s. Will refresh topic metadata and retry", partition, e));
return refreshTopicMetadataAndRetryFetch(partition, fetchRequest);
}
}
@Override
public Iterator<KafkaConsumerRecord> consume() {
throw new UnsupportedOperationException("consume() not supported by " + this.getClass().getSimpleName() + " Please use Kafka09ConsumerClient or above");
}
private synchronized FetchResponse getFetchResponseForFetchRequest(FetchRequest fetchRequest, KafkaPartition partition) {
SimpleConsumer consumer = getSimpleConsumer(partition.getLeader().getHostAndPort());
FetchResponse fetchResponse = consumer.fetch(fetchRequest);
if (fetchResponse.hasError()) {
throw new RuntimeException(String.format("error code %d",
fetchResponse.errorCode(partition.getTopicName(), partition.getId())));
}
return fetchResponse;
}
private Iterator<KafkaConsumerRecord> getIteratorFromFetchResponse(FetchResponse fetchResponse, KafkaPartition partition) {
try {
ByteBufferMessageSet messageBuffer = fetchResponse.messageSet(partition.getTopicName(), partition.getId());
return Iterators.transform(messageBuffer.iterator(),
new Function<kafka.message.MessageAndOffset, KafkaConsumerRecord>() {
@Override
public KafkaConsumerRecord apply(kafka.message.MessageAndOffset input) {
return new Kafka08ConsumerRecord(input, partition.getTopicName(), partition.getId());
}
});
} catch (Exception e) {
log.warn(String.format("Failed to retrieve next message buffer for partition %s: %s."
+ "The remainder of this partition will be skipped.", partition, e));
return null;
}
}
private Iterator<KafkaConsumerRecord> refreshTopicMetadataAndRetryFetch(KafkaPartition partition,
FetchRequest fetchRequest) {
try {
refreshTopicMetadata(partition);
FetchResponse fetchResponse = getFetchResponseForFetchRequest(fetchRequest, partition);
return getIteratorFromFetchResponse(fetchResponse, partition);
} catch (Exception e) {
log.warn(String.format("Fetch message buffer for partition %s has failed: %s. This partition will be skipped.",
partition, e));
return null;
}
}
private void refreshTopicMetadata(KafkaPartition partition) {
for (String broker : this.brokers) {
List<TopicMetadata> topicMetadataList = fetchTopicMetadataFromBroker(broker, partition.getTopicName());
if (topicMetadataList != null && !topicMetadataList.isEmpty()) {
TopicMetadata topicMetadata = topicMetadataList.get(0);
for (PartitionMetadata partitionMetadata : topicMetadata.partitionsMetadata()) {
if (partitionMetadata.partitionId() == partition.getId()) {
partition.setLeader(partitionMetadata.leader().id(), partitionMetadata.leader().host(), partitionMetadata
.leader().port());
break;
}
}
break;
}
}
}
private FetchRequest createFetchRequest(KafkaPartition partition, long nextOffset) {
TopicAndPartition topicAndPartition = new TopicAndPartition(partition.getTopicName(), partition.getId());
PartitionFetchInfo partitionFetchInfo = new PartitionFetchInfo(nextOffset, this.bufferSize);
Map<TopicAndPartition, PartitionFetchInfo> fetchInfo =
Collections.singletonMap(topicAndPartition, partitionFetchInfo);
return new FetchRequest(this.fetchCorrelationId, this.clientName, this.fetchTimeoutMillis, this.fetchMinBytes,
fetchInfo);
}
@Override
public void close() throws IOException {
int numOfConsumersNotClosed = 0;
for (SimpleConsumer consumer : this.activeConsumers.values()) {
if (consumer != null) {
try {
consumer.close();
} catch (Exception e) {
log.warn(String.format("Failed to close Kafka Consumer %s:%d", consumer.host(), consumer.port()));
numOfConsumersNotClosed++;
}
}
}
this.activeConsumers.clear();
if (numOfConsumersNotClosed > 0) {
throw new IOException(numOfConsumersNotClosed + " consumer(s) failed to close.");
}
}
public static class Factory implements GobblinKafkaConsumerClientFactory {
@Override
public GobblinKafkaConsumerClient create(Config config) {
return new Kafka08ConsumerClient(config);
}
}
public static class Kafka08ConsumerRecord extends BaseKafkaConsumerRecord implements ByteArrayBasedKafkaRecord {
private final MessageAndOffset messageAndOffset;
public Kafka08ConsumerRecord(MessageAndOffset messageAndOffset, String topic, int partition) {
super(messageAndOffset.offset(), messageAndOffset.message().size(), topic, partition);
this.messageAndOffset = messageAndOffset;
}
@Override
public byte[] getMessageBytes() {
return getBytes(this.messageAndOffset.message().payload());
}
@Override
public byte[] getKeyBytes() {
return getBytes(this.messageAndOffset.message().key());
}
private static byte[] getBytes(ByteBuffer buf) {
byte[] bytes = null;
if (buf != null) {
int size = buf.remaining();
bytes = new byte[size];
buf.get(bytes, buf.position(), size);
}
return bytes;
}
}
}
| 3,756 |
0 | Create_ds/gobblin/gobblin-modules/gobblin-kafka-08/src/main/java/org/apache/gobblin/kafka | Create_ds/gobblin/gobblin-modules/gobblin-kafka-08/src/main/java/org/apache/gobblin/kafka/tool/KafkaCheckpoint.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.kafka.tool;
import java.io.File;
import java.io.IOException;
import java.io.InputStream;
import java.io.OutputStream;
import java.util.HashMap;
import org.codehaus.jackson.map.ObjectMapper;
import org.codehaus.jackson.map.type.MapType;
import org.codehaus.jackson.map.type.TypeFactory;
/**
* A class to store kafka checkpoints.
* Knows how to serialize and deserialize itself.
*/
public class KafkaCheckpoint {
private final HashMap<Integer, Long> _partitionOffsetMap;
private static final ObjectMapper _mapper = new ObjectMapper();
public static KafkaCheckpoint emptyCheckpoint() {return new KafkaCheckpoint(new HashMap<Integer, Long>());}
public KafkaCheckpoint(HashMap<Integer, Long> partitionOffsetMap) {
_partitionOffsetMap = partitionOffsetMap;
}
public void update(int partition, long offset) {
_partitionOffsetMap.put(partition, offset);
}
public static KafkaCheckpoint deserialize(InputStream inputStream)
throws IOException {
TypeFactory typeFactory = _mapper.getTypeFactory();
MapType mapType = typeFactory.constructMapType(HashMap.class, Integer.class, Long.class);
HashMap<Integer, Long> checkpoint = _mapper.readValue(inputStream, mapType);
return new KafkaCheckpoint(checkpoint);
}
public static void serialize(KafkaCheckpoint checkpoint, OutputStream outputStream)
throws IOException {
_mapper.writeValue(outputStream, checkpoint._partitionOffsetMap);
}
public static void serialize(KafkaCheckpoint checkpoint, File outputFile)
throws IOException {
_mapper.writeValue(outputFile, checkpoint._partitionOffsetMap);
}
public boolean isEmpty()
{
return _partitionOffsetMap.isEmpty();
}
}
| 3,757 |
0 | Create_ds/gobblin/gobblin-modules/gobblin-kafka-08/src/main/java/org/apache/gobblin/kafka | Create_ds/gobblin/gobblin-modules/gobblin-kafka-08/src/main/java/org/apache/gobblin/kafka/tool/SimpleKafkaConsumer.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.kafka.tool;
import java.io.File;
import java.io.FileInputStream;
import java.io.IOException;
import java.util.List;
import java.util.Map;
import java.util.Properties;
import org.apache.avro.Schema;
import org.apache.avro.generic.GenericRecord;
import org.apache.kafka.common.serialization.Deserializer;
import com.google.common.base.Preconditions;
import com.google.common.collect.ImmutableMap;
import com.typesafe.config.Config;
import com.typesafe.config.ConfigFactory;
import kafka.consumer.Consumer;
import kafka.consumer.ConsumerConfig;
import kafka.consumer.ConsumerIterator;
import kafka.consumer.KafkaStream;
import kafka.javaapi.consumer.ConsumerConnector;
import kafka.message.MessageAndMetadata;
import lombok.extern.slf4j.Slf4j;
import org.apache.gobblin.kafka.schemareg.KafkaSchemaRegistry;
import org.apache.gobblin.kafka.schemareg.KafkaSchemaRegistryFactory;
import org.apache.gobblin.kafka.serialize.LiAvroDeserializer;
import org.apache.gobblin.kafka.serialize.MD5Digest;
/**
* A simple kafka consumer for debugging purposes.
*/
@Slf4j
public class SimpleKafkaConsumer {
private final ConsumerConnector consumer;
private final KafkaStream<byte[], byte[]> stream;
private final ConsumerIterator<byte[], byte[]> iterator;
private final String topic;
private final KafkaSchemaRegistry<MD5Digest, Schema> schemaRegistry;
private final Deserializer deserializer;
public SimpleKafkaConsumer(Properties props, KafkaCheckpoint checkpoint)
{
Config config = ConfigFactory.parseProperties(props);
topic = config.getString("topic");
String zkConnect = config.getString("zookeeper.connect");
schemaRegistry = KafkaSchemaRegistryFactory.getSchemaRegistry(props);
deserializer = new LiAvroDeserializer(schemaRegistry);
/** TODO: Make Confluent schema registry integration configurable
* HashMap<String, String> avroSerDeConfig = new HashMap<>();
* avroSerDeConfig.put("schema.registry.url", "http://localhost:8081");
* deserializer = new io.confluent.kafka.serializers.KafkaAvroDeserializer();
* deserializer.configure(avroSerDeConfig, false);
*
**/
Properties consumeProps = new Properties();
consumeProps.put("zookeeper.connect", zkConnect);
consumeProps.put("group.id", "gobblin-tool-" + System.nanoTime());
consumeProps.put("zookeeper.session.timeout.ms", "10000");
consumeProps.put("zookeeper.sync.time.ms", "10000");
consumeProps.put("auto.commit.interval.ms", "10000");
consumeProps.put("auto.offset.reset", "smallest");
consumeProps.put("auto.commit.enable", "false");
//consumeProps.put("consumer.timeout.ms", "10000");
consumer = Consumer.createJavaConsumerConnector(new ConsumerConfig(consumeProps));
Map<String, List<KafkaStream<byte[], byte[]>>> consumerMap = consumer.createMessageStreams(ImmutableMap.of(topic, 1));
List<KafkaStream<byte[], byte[]>> streams = consumerMap.get(this.topic);
stream = streams.get(0);
iterator = stream.iterator();
}
public void close()
{
consumer.shutdown();
}
public void shutdown()
{
close();
}
public static void main(String[] args)
throws IOException {
Preconditions.checkArgument(args.length>=1, "Usage: java " + SimpleKafkaConsumer.class.getName() + " <properties_file> <checkpoint_file>");
String fileName = args[0];
Properties props = new Properties();
props.load(new FileInputStream(new File(fileName)));
KafkaCheckpoint checkpoint = KafkaCheckpoint.emptyCheckpoint();
File checkpointFile = null;
if (args.length > 1)
{
try {
checkpointFile = new File(args[1]);
if (checkpointFile.exists()) {
FileInputStream fis = null;
try {
fis = new FileInputStream(checkpointFile);
checkpoint = KafkaCheckpoint.deserialize(fis);
} finally {
if (fis != null) fis.close();
}
} else {
log.info("Checkpoint doesn't exist, we will start with an empty one and store it here.");
}
}
catch (IOException e)
{
log.warn("Could not deserialize the previous checkpoint. Starting with empty", e);
if (!checkpoint.isEmpty())
{
checkpoint = KafkaCheckpoint.emptyCheckpoint();
}
}
}
final SimpleKafkaConsumer consumer = new SimpleKafkaConsumer(props, checkpoint);
Runtime.getRuntime().addShutdownHook(new Thread() {
@Override
public void run()
{
log.info("Shutting down...");
consumer.shutdown();
}
});
consumer.printLoop(checkpoint, checkpointFile);
}
private void printLoop(KafkaCheckpoint checkpoint, File checkpointFile)
throws IOException {
boolean storeCheckpoints = (checkpointFile != null);
if (storeCheckpoints)
{
boolean newFileCreated = checkpointFile.createNewFile();
if (newFileCreated) {
log.info("Created new checkpoint file: " + checkpointFile.getAbsolutePath());
}
}
while (true)
{
MessageAndMetadata<byte[], byte[]> messagePlusMeta;
try {
if (!iterator.hasNext()) {
return;
}
messagePlusMeta = iterator.next();
if (messagePlusMeta!=null) {
byte[] payload = messagePlusMeta.message();
System.out.println("Got a message of size " + payload.length + " bytes");
GenericRecord record = (GenericRecord) deserializer.deserialize(topic, payload);
System.out.println(record.toString());
checkpoint.update(messagePlusMeta.partition(), messagePlusMeta.offset());
}
}
catch (RuntimeException e)
{
log.warn("Error detected", e);
}
finally
{
if (storeCheckpoints) {
if (checkpoint != KafkaCheckpoint.emptyCheckpoint()) {
System.out.println("Storing checkpoint to file: " + checkpointFile.getAbsolutePath());
KafkaCheckpoint.serialize(checkpoint, checkpointFile);
}
}
}
}
}
}
| 3,758 |
0 | Create_ds/gobblin/gobblin-modules/gobblin-kafka-08/src/main/java/org/apache/gobblin | Create_ds/gobblin/gobblin-modules/gobblin-kafka-08/src/main/java/org/apache/gobblin/service/AvroJobSpecDeserializer.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.service;
import java.io.ByteArrayInputStream;
import java.io.DataInputStream;
import java.io.IOException;
import java.io.InputStream;
import java.util.Map;
import org.apache.avro.io.BinaryDecoder;
import org.apache.avro.io.Decoder;
import org.apache.avro.io.DecoderFactory;
import org.apache.avro.specific.SpecificDatumReader;
import org.apache.kafka.common.serialization.Deserializer;
import org.apache.gobblin.metrics.reporter.util.FixedSchemaVersionWriter;
import org.apache.gobblin.metrics.reporter.util.SchemaVersionWriter;
import org.apache.gobblin.runtime.job_spec.AvroJobSpec;
import lombok.extern.slf4j.Slf4j;
@Slf4j
/**
* A deserializer that converts a byte array into an {@link AvroJobSpec}
*/
public class AvroJobSpecDeserializer implements Deserializer<AvroJobSpec> {
private BinaryDecoder _decoder;
private SpecificDatumReader<AvroJobSpec> _reader;
private SchemaVersionWriter<?> _versionWriter;
@Override
public void configure(Map<String, ?> configs, boolean isKey) {
InputStream dummyInputStream = new ByteArrayInputStream(new byte[0]);
_decoder = DecoderFactory.get().binaryDecoder(dummyInputStream, null);
_reader = new SpecificDatumReader<AvroJobSpec>(AvroJobSpec.SCHEMA$);
_versionWriter = new FixedSchemaVersionWriter();
}
@Override
public AvroJobSpec deserialize(String topic, byte[] data) {
try (InputStream is = new ByteArrayInputStream(data)) {
_versionWriter.readSchemaVersioningInformation(new DataInputStream(is));
Decoder decoder = DecoderFactory.get().binaryDecoder(is, _decoder);
return _reader.read(null, decoder);
} catch (IOException e) {
throw new RuntimeException("Could not decode message");
}
}
@Override
public void close() {
}
} | 3,759 |
0 | Create_ds/gobblin/gobblin-service/src/test/java/org/apache/gobblin | Create_ds/gobblin/gobblin-service/src/test/java/org/apache/gobblin/service/GobblinServiceManagerTest.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.service;
import java.io.File;
import java.net.URI;
import java.util.ArrayList;
import java.util.Collection;
import java.util.List;
import java.util.Map;
import java.util.Properties;
import java.util.concurrent.TimeUnit;
import org.apache.commons.io.FileUtils;
import org.apache.curator.test.TestingServer;
import org.apache.gobblin.service.modules.orchestration.AbstractUserQuotaManager;
import org.apache.gobblin.service.modules.orchestration.ServiceAzkabanConfigKeys;
import org.apache.hadoop.fs.Path;
import org.eclipse.jetty.http.HttpStatus;
import org.eclipse.jgit.api.Git;
import org.eclipse.jgit.lib.RepositoryCache;
import org.eclipse.jgit.transport.RefSpec;
import org.eclipse.jgit.util.FS;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.testcontainers.containers.MySQLContainer;
import org.testng.Assert;
import org.testng.annotations.AfterClass;
import org.testng.annotations.BeforeClass;
import org.testng.annotations.Test;
import com.google.common.base.Charsets;
import com.google.common.collect.Maps;
import com.google.common.io.Files;
import com.google.gson.Gson;
import com.google.gson.GsonBuilder;
import com.linkedin.data.template.StringMap;
import com.linkedin.r2.transport.http.client.HttpClientFactory;
import com.linkedin.restli.client.RestLiResponseException;
import org.apache.gobblin.configuration.ConfigurationKeys;
import org.apache.gobblin.metastore.MysqlJobStatusStateStoreFactory;
import org.apache.gobblin.metastore.testing.ITestMetastoreDatabase;
import org.apache.gobblin.metastore.testing.TestMetastoreDatabaseFactory;
import org.apache.gobblin.runtime.api.FlowSpec;
import org.apache.gobblin.runtime.api.Spec;
import org.apache.gobblin.runtime.spec_catalog.FlowCatalog;
import org.apache.gobblin.service.monitoring.GitConfigMonitor;
import org.apache.gobblin.service.modules.core.GobblinServiceManager;
import org.apache.gobblin.service.modules.flow.MockedSpecCompiler;
import org.apache.gobblin.service.monitoring.FsJobStatusRetriever;
import org.apache.gobblin.testing.AssertWithBackoff;
import org.apache.gobblin.util.ConfigUtils;
public class GobblinServiceManagerTest {
private static final Logger logger = LoggerFactory.getLogger(GobblinServiceManagerTest.class);
private static Gson gson = new GsonBuilder().setPrettyPrinting().create();
private static final String SERVICE_WORK_DIR = "/tmp/serviceWorkDir/";
private static final String SPEC_STORE_PARENT_DIR = "/tmp/serviceCore/";
private static final String SPEC_DESCRIPTION = "Test ServiceCore";
private static final String TOPOLOGY_SPEC_STORE_DIR = "/tmp/serviceCore/topologyTestSpecStore";
private static final String FLOW_SPEC_STORE_DIR = "/tmp/serviceCore/flowTestSpecStore";
private static final String GIT_CLONE_DIR = "/tmp/serviceCore/clone";
private static final String GIT_REMOTE_REPO_DIR = "/tmp/serviceCore/remote";
private static final String GIT_LOCAL_REPO_DIR = "/tmp/serviceCore/local";
private static final String JOB_STATUS_STATE_STORE_DIR = "/tmp/serviceCore/fsJobStatusRetriever";
private static final String GROUP_OWNERSHIP_CONFIG_DIR = Files.createTempDir().getAbsolutePath();
private static final String TEST_GROUP_NAME = "testGroup";
private static final String TEST_FLOW_NAME = "testFlow";
private static final String TEST_FLOW_NAME2 = "testFlow2";
private static final String TEST_FLOW_NAME3 = "testFlow3";
private static final String TEST_FLOW_NAME4 = "testFlow4";
private static final String TEST_FLOW_NAME5 = "testFlow5";
private static final String TEST_FLOW_NAME6 = "testFlow6";
private static final String TEST_FLOW_NAME7 = "testFlow7";
private static final FlowId TEST_FLOW_ID = new FlowId().setFlowGroup(TEST_GROUP_NAME).setFlowName(TEST_FLOW_NAME);
private static final FlowId TEST_FLOW_ID2 = new FlowId().setFlowGroup(TEST_GROUP_NAME).setFlowName(TEST_FLOW_NAME2);
private static final FlowId TEST_FLOW_ID3 = new FlowId().setFlowGroup(TEST_GROUP_NAME).setFlowName(TEST_FLOW_NAME3);
private static final FlowId TEST_FLOW_ID4 = new FlowId().setFlowGroup(TEST_GROUP_NAME).setFlowName(TEST_FLOW_NAME4);
private static final FlowId TEST_FLOW_ID5 = new FlowId().setFlowGroup(TEST_GROUP_NAME).setFlowName(TEST_FLOW_NAME5);
private static final FlowId TEST_FLOW_ID6 = new FlowId().setFlowGroup(TEST_GROUP_NAME).setFlowName(TEST_FLOW_NAME6);
private static final FlowId TEST_FLOW_ID7 = new FlowId().setFlowGroup(TEST_GROUP_NAME).setFlowName(TEST_FLOW_NAME7);
private static final FlowId UNCOMPILABLE_FLOW_ID = new FlowId().setFlowGroup(TEST_GROUP_NAME)
.setFlowName(MockedSpecCompiler.UNCOMPILABLE_FLOW);
private static final String TEST_SCHEDULE = "0 1/0 * ? * *";
private static final String TEST_TEMPLATE_URI = "FS:///templates/test.template";
private static final String TEST_DUMMY_GROUP_NAME = "dummyGroup";
private static final String TEST_DUMMY_FLOW_NAME = "dummyFlow";
private static final String TEST_GOBBLIN_EXECUTOR_NAME = "testGobblinExecutor";
private static final String TEST_SOURCE_NAME = "testSource";
private static final String TEST_SINK_NAME = "testSink";
private final URI TEST_URI = FlowSpec.Utils.createFlowSpecUri(TEST_FLOW_ID);
private GobblinServiceManager gobblinServiceManager;
private FlowConfigV2Client flowConfigClient;
private MySQLContainer mysql;
private Git gitForPush;
private TestingServer testingServer;
Properties serviceCoreProperties = new Properties();
Map<String, String> flowProperties = Maps.newHashMap();
Map<String, String> transportClientProperties = Maps.newHashMap();
public GobblinServiceManagerTest() throws Exception {
}
@BeforeClass
public void setup() throws Exception {
cleanUpDir(SERVICE_WORK_DIR);
cleanUpDir(SPEC_STORE_PARENT_DIR);
mysql = new MySQLContainer("mysql:" + TestServiceDatabaseConfig.MysqlVersion);
mysql.start();
serviceCoreProperties.put(ServiceConfigKeys.SERVICE_DB_URL_KEY, mysql.getJdbcUrl());
serviceCoreProperties.put(ServiceConfigKeys.SERVICE_DB_USERNAME, mysql.getUsername());
serviceCoreProperties.put(ServiceConfigKeys.SERVICE_DB_PASSWORD, mysql.getPassword());
ITestMetastoreDatabase testMetastoreDatabase = TestMetastoreDatabaseFactory.get();
testingServer = new TestingServer(true);
flowProperties.put("param1", "value1");
flowProperties.put(ServiceConfigKeys.FLOW_SOURCE_IDENTIFIER_KEY, TEST_SOURCE_NAME);
flowProperties.put(ServiceConfigKeys.FLOW_DESTINATION_IDENTIFIER_KEY, TEST_SINK_NAME);
serviceCoreProperties.put(ConfigurationKeys.STATE_STORE_DB_USER_KEY, "testUser");
serviceCoreProperties.put(ConfigurationKeys.STATE_STORE_DB_PASSWORD_KEY, "testPassword");
serviceCoreProperties.put(ConfigurationKeys.STATE_STORE_DB_URL_KEY, testMetastoreDatabase.getJdbcUrl());
serviceCoreProperties.put("zookeeper.connect", testingServer.getConnectString());
serviceCoreProperties.put(ConfigurationKeys.STATE_STORE_FACTORY_CLASS_KEY, MysqlJobStatusStateStoreFactory.class.getName());
serviceCoreProperties.put(ConfigurationKeys.TOPOLOGYSPEC_STORE_DIR_KEY, TOPOLOGY_SPEC_STORE_DIR);
serviceCoreProperties.put(FlowCatalog.FLOWSPEC_STORE_DIR_KEY, FLOW_SPEC_STORE_DIR);
serviceCoreProperties.put(FlowCatalog.FLOWSPEC_STORE_CLASS_KEY, "org.apache.gobblin.runtime.spec_store.MysqlSpecStore");
serviceCoreProperties.put(ConfigurationKeys.STATE_STORE_DB_TABLE_KEY, "flow_spec_store");
serviceCoreProperties.put(FlowCatalog.FLOWSPEC_SERDE_CLASS_KEY, "org.apache.gobblin.runtime.spec_serde.GsonFlowSpecSerDe");
serviceCoreProperties.put(ServiceConfigKeys.TOPOLOGY_FACTORY_TOPOLOGY_NAMES_KEY, TEST_GOBBLIN_EXECUTOR_NAME);
serviceCoreProperties.put(ServiceConfigKeys.TOPOLOGY_FACTORY_PREFIX + TEST_GOBBLIN_EXECUTOR_NAME + ".description",
"StandaloneTestExecutor");
serviceCoreProperties.put(ServiceConfigKeys.TOPOLOGY_FACTORY_PREFIX + TEST_GOBBLIN_EXECUTOR_NAME + ".version",
FlowSpec.Builder.DEFAULT_VERSION);
serviceCoreProperties.put(ServiceConfigKeys.TOPOLOGY_FACTORY_PREFIX + TEST_GOBBLIN_EXECUTOR_NAME + ".uri",
"gobblinExecutor");
serviceCoreProperties.put(ServiceConfigKeys.TOPOLOGY_FACTORY_PREFIX + TEST_GOBBLIN_EXECUTOR_NAME + ".specExecutorInstance",
"org.apache.gobblin.service.InMemorySpecExecutor");
serviceCoreProperties.put(ServiceConfigKeys.TOPOLOGY_FACTORY_PREFIX + TEST_GOBBLIN_EXECUTOR_NAME + ".specExecInstance.capabilities",
TEST_SOURCE_NAME + ":" + TEST_SINK_NAME);
serviceCoreProperties.put(ServiceConfigKeys.GOBBLIN_SERVICE_GIT_CONFIG_MONITOR_ENABLED_KEY, true);
serviceCoreProperties.put(GitConfigMonitor.GIT_CONFIG_MONITOR_PREFIX + "." + ConfigurationKeys.GIT_MONITOR_REPO_URI, GIT_REMOTE_REPO_DIR);
serviceCoreProperties.put(GitConfigMonitor.GIT_CONFIG_MONITOR_PREFIX + "." + ConfigurationKeys.GIT_MONITOR_REPO_DIR, GIT_LOCAL_REPO_DIR);
serviceCoreProperties.put(GitConfigMonitor.GIT_CONFIG_MONITOR_PREFIX + "." + ConfigurationKeys.GIT_MONITOR_POLLING_INTERVAL, 5);
serviceCoreProperties.put(FsJobStatusRetriever.CONF_PREFIX + "." + ConfigurationKeys.STATE_STORE_ROOT_DIR_KEY, JOB_STATUS_STATE_STORE_DIR);
serviceCoreProperties.put(ServiceConfigKeys.GOBBLIN_SERVICE_JOB_STATUS_MONITOR_ENABLED_KEY, false);
serviceCoreProperties.put(ServiceConfigKeys.GOBBLIN_SERVICE_FLOWCOMPILER_CLASS_KEY, MockedSpecCompiler.class.getCanonicalName());
serviceCoreProperties.put(AbstractUserQuotaManager.PER_USER_QUOTA, "testUser:1");
transportClientProperties.put(HttpClientFactory.HTTP_REQUEST_TIMEOUT, "10000");
// Create a bare repository
RepositoryCache.FileKey fileKey = RepositoryCache.FileKey.exact(new File(GIT_REMOTE_REPO_DIR), FS.DETECTED);
fileKey.open(false).create(true);
this.gitForPush = Git.cloneRepository().setURI(GIT_REMOTE_REPO_DIR).setDirectory(new File(GIT_CLONE_DIR)).call();
// push an empty commit as a base for detecting changes
this.gitForPush.commit().setMessage("First commit").call();
this.gitForPush.push().setRemote("origin").setRefSpecs(new RefSpec("master")).call();
this.gobblinServiceManager = GobblinServiceManager.create("CoreService", "1",
ConfigUtils.propertiesToConfig(serviceCoreProperties), new Path(SERVICE_WORK_DIR));
this.gobblinServiceManager.start();
this.flowConfigClient = new FlowConfigV2Client(String.format("http://127.0.0.1:%s/",
this.gobblinServiceManager.getRestLiServerListeningURI().getPort()), transportClientProperties);
}
private void cleanUpDir(String dir) throws Exception {
File specStoreDir = new File(dir);
if (specStoreDir.exists()) {
FileUtils.deleteDirectory(specStoreDir);
}
}
@AfterClass
public void cleanUp() throws Exception {
// Shutdown Service
try {
this.gobblinServiceManager.stop();
} catch (Exception e) {
logger.warn("Could not cleanly stop Gobblin Service Manager", e);
}
try {
cleanUpDir(SERVICE_WORK_DIR);
} catch (Exception e) {
logger.warn("Could not completely cleanup Work Dir");
}
try {
cleanUpDir(SPEC_STORE_PARENT_DIR);
} catch (Exception e) {
logger.warn("Could not completely cleanup Spec Store Parent Dir");
}
try {
cleanUpDir(GROUP_OWNERSHIP_CONFIG_DIR);
} catch (Exception e) {
logger.warn("Could not completely cleanup Group Ownership Parent Dir");
}
try {
this.testingServer.close();
} catch(Exception e) {
System.err.println("Failed to close ZK testing server.");
}
mysql.stop();
}
/**
* To test an existing flow in a spec store does not get deleted just because it is not compilable during service restarts
*/
@Test
public void testRestart() throws Exception {
FlowConfig uncompilableFlowConfig = new FlowConfig().setId(UNCOMPILABLE_FLOW_ID).setTemplateUris(TEST_TEMPLATE_URI)
.setSchedule(new Schedule().setCronSchedule(TEST_SCHEDULE).setRunImmediately(true))
.setProperties(new StringMap(flowProperties));
FlowSpec uncompilableSpec = FlowConfigResourceLocalHandler.createFlowSpecForConfig(uncompilableFlowConfig);
FlowConfig runOnceFlowConfig = new FlowConfig().setId(TEST_FLOW_ID)
.setTemplateUris(TEST_TEMPLATE_URI).setProperties(new StringMap(flowProperties));
FlowSpec runOnceSpec = FlowConfigResourceLocalHandler.createFlowSpecForConfig(runOnceFlowConfig);
// add the non compilable flow directly to the spec store skipping flow catalog which would not allow this
this.gobblinServiceManager.getFlowCatalog().getSpecStore().addSpec(uncompilableSpec);
this.gobblinServiceManager.getFlowCatalog().getSpecStore().addSpec(runOnceSpec);
List<Spec> specs = (List<Spec>) this.gobblinServiceManager.getFlowCatalog().getSpecs();
Assert.assertEquals(specs.size(), 2);
if (specs.get(0).getUri().equals(uncompilableSpec.getUri())) {
Assert.assertEquals(specs.get(1).getUri(), runOnceSpec.getUri());
} else if (specs.get(0).getUri().equals(runOnceSpec.getUri())) {
Assert.assertEquals(specs.get(1).getUri(), uncompilableSpec.getUri());
} else {
Assert.fail();
}
// restart the service
serviceReboot();
// runOnce job should get deleted from the spec store after running but uncompilable flow should stay
AssertWithBackoff.create().maxSleepMs(200L).timeoutMs(20000L).backoffFactor(1)
.assertTrue(input -> this.gobblinServiceManager.getFlowCatalog().getSpecs().size() == 1,
"Waiting for the runOnce job to finish");
specs = (List<Spec>) this.gobblinServiceManager.getFlowCatalog().getSpecs();
Assert.assertEquals(specs.get(0).getUri(), uncompilableSpec.getUri());
Assert.assertTrue(uncompilableSpec.getConfig().getBoolean(ConfigurationKeys.FLOW_RUN_IMMEDIATELY));
// clean it
this.gobblinServiceManager.getFlowCatalog().remove(uncompilableSpec.getUri());
specs = (List<Spec>) this.gobblinServiceManager.getFlowCatalog().getSpecs();
Assert.assertEquals(specs.size(), 0);
}
@Test (dependsOnMethods = "testRestart")
public void testUncompilableJob() throws Exception {
FlowId flowId = new FlowId().setFlowGroup(TEST_GROUP_NAME).setFlowName(MockedSpecCompiler.UNCOMPILABLE_FLOW);
URI uri = FlowSpec.Utils.createFlowSpecUri(flowId);
FlowConfig flowConfig = new FlowConfig().setId(flowId)
.setTemplateUris(TEST_TEMPLATE_URI).setProperties(new StringMap(flowProperties));
RestLiResponseException exception = null;
try {
this.flowConfigClient.createFlowConfig(flowConfig);
} catch (RestLiResponseException e) {
exception = e;
}
Assert.assertEquals(exception.getStatus(), HttpStatus.BAD_REQUEST_400);
// uncompilable job should not be persisted
Assert.assertEquals(this.gobblinServiceManager.getFlowCatalog().getSpecs().size(), 0);
Assert.assertFalse(this.gobblinServiceManager.getScheduler().getScheduledFlowSpecs().containsKey(uri.toString()));
}
@Test (dependsOnMethods = "testUncompilableJob")
public void testRunOnceJob() throws Exception {
FlowConfig flowConfig = new FlowConfig().setId(TEST_FLOW_ID)
.setTemplateUris(TEST_TEMPLATE_URI).setProperties(new StringMap(flowProperties));
this.flowConfigClient.createFlowConfig(flowConfig);
// runOnce job is deleted soon after it is orchestrated
AssertWithBackoff.create().maxSleepMs(200L).timeoutMs(2000L).backoffFactor(1)
.assertTrue(input -> this.gobblinServiceManager.getFlowCatalog().getSpecs().size() == 0,
"Waiting for job to get orchestrated...");
AssertWithBackoff.create().maxSleepMs(100L).timeoutMs(1000L).backoffFactor(1)
.assertTrue(input -> !this.gobblinServiceManager.getScheduler().getScheduledFlowSpecs().containsKey(TEST_URI.toString()),
"Waiting for job to get orchestrated...");
}
@Test (dependsOnMethods = "testRunOnceJob")
public void testRunQuotaExceeds() throws Exception {
Map<String, String> props = flowProperties;
props.put(ServiceAzkabanConfigKeys.AZKABAN_PROJECT_USER_TO_PROXY_KEY, "testUser");
FlowConfig flowConfig = new FlowConfig().setId(TEST_FLOW_ID)
.setTemplateUris(TEST_TEMPLATE_URI).setProperties(new StringMap(props));
this.flowConfigClient.createFlowConfig(flowConfig);
FlowConfig flowConfig2 = new FlowConfig().setId(TEST_FLOW_ID2)
.setTemplateUris(TEST_TEMPLATE_URI).setProperties(new StringMap(props));
try {
this.flowConfigClient.createFlowConfig(flowConfig2);
} catch (RestLiResponseException e) {
Assert.assertEquals(e.getStatus(), HttpStatus.SERVICE_UNAVAILABLE_503);
}
}
@Test (dependsOnMethods = "testRunQuotaExceeds")
public void testExplainJob() throws Exception {
FlowConfig flowConfig = new FlowConfig().setId(new FlowId().setFlowGroup(TEST_GROUP_NAME).setFlowName(TEST_FLOW_NAME))
.setTemplateUris(TEST_TEMPLATE_URI).setProperties(new StringMap(flowProperties)).setExplain(true);
this.flowConfigClient.createFlowConfig(flowConfig);
// explain job should not be persisted
Assert.assertEquals(this.gobblinServiceManager.getFlowCatalog().getSpecs().size(), 0);
Assert.assertFalse(this.gobblinServiceManager.getScheduler().getScheduledFlowSpecs().containsKey(TEST_URI.toString()));
}
@Test (dependsOnMethods = "testExplainJob")
public void testCreate() throws Exception {
FlowConfig flowConfig = new FlowConfig().setId(TEST_FLOW_ID)
.setTemplateUris(TEST_TEMPLATE_URI).setSchedule(new Schedule().setCronSchedule(TEST_SCHEDULE).setRunImmediately(true))
.setProperties(new StringMap(flowProperties));
this.flowConfigClient.createFlowConfig(flowConfig);
Assert.assertEquals(this.gobblinServiceManager.getFlowCatalog().getSpecs().size(), 1);
Assert.assertTrue(this.gobblinServiceManager.getScheduler().getScheduledFlowSpecs().containsKey(TEST_URI.toString()));
}
@Test (dependsOnMethods = "testCreate")
public void testCreateAgain() throws Exception {
FlowConfig flowConfig = new FlowConfig().setId(TEST_FLOW_ID)
.setTemplateUris(TEST_TEMPLATE_URI).setSchedule(new Schedule().setCronSchedule(TEST_SCHEDULE))
.setProperties(new StringMap(flowProperties));
RestLiResponseException exception = null;
try {
this.flowConfigClient.createFlowConfig(flowConfig);
} catch (RestLiResponseException e) {
exception = e;
}
Assert.assertNotNull(exception);
Assert.assertEquals(exception.getStatus(), HttpStatus.CONFLICT_409);
}
@Test (dependsOnMethods = "testCreateAgain")
public void testGet() throws Exception {
FlowConfig flowConfig = this.flowConfigClient.getFlowConfig(TEST_FLOW_ID);
Assert.assertEquals(flowConfig.getId().getFlowGroup(), TEST_GROUP_NAME);
Assert.assertEquals(flowConfig.getId().getFlowName(), TEST_FLOW_NAME);
Assert.assertEquals(flowConfig.getSchedule().getCronSchedule(), TEST_SCHEDULE);
Assert.assertEquals(flowConfig.getTemplateUris(), TEST_TEMPLATE_URI);
Assert.assertTrue(flowConfig.getSchedule().isRunImmediately());
// Add this assert back when getFlowSpec() is changed to return the raw flow spec
//Assert.assertEquals(flowConfig.getProperties().size(), 1);
Assert.assertEquals(flowConfig.getProperties().get("param1"), "value1");
}
@Test (dependsOnMethods = "testCreateAgain")
public void testGetAll() throws Exception {
FlowConfig flowConfig2 = new FlowConfig().setId(TEST_FLOW_ID2)
.setTemplateUris(TEST_TEMPLATE_URI).setSchedule(new Schedule().setCronSchedule(TEST_SCHEDULE))
.setProperties(new StringMap(flowProperties));
this.flowConfigClient.createFlowConfig(flowConfig2);
Collection<FlowConfig> flowConfigs = this.flowConfigClient.getAllFlowConfigs();
Assert.assertEquals(flowConfigs.size(), 2);
this.flowConfigClient.deleteFlowConfig(TEST_FLOW_ID2);
}
@Test (dependsOnMethods = "testCreateAgain", enabled = false)
public void testGetFilteredFlows() throws Exception {
// Not implemented for FsSpecStore
Collection<FlowConfig> flowConfigs = this.flowConfigClient.getFlowConfigs(TEST_GROUP_NAME, null, null, null, null, null,
null, null, null, null);
Assert.assertEquals(flowConfigs.size(), 2);
flowConfigs = this.flowConfigClient.getFlowConfigs(TEST_GROUP_NAME, TEST_FLOW_NAME2, null, null, null, null,
null, null, null, null);
Assert.assertEquals(flowConfigs.size(), 1);
flowConfigs = this.flowConfigClient.getFlowConfigs(null, null, null, null, null, null,
TEST_SCHEDULE, null, null, null);
Assert.assertEquals(flowConfigs.size(), 2);
}
@Test (dependsOnMethods = "testGet")
public void testUpdate() throws Exception {
FlowId flowId = new FlowId().setFlowGroup(TEST_GROUP_NAME).setFlowName(TEST_FLOW_NAME);
Map<String, String> flowProperties = Maps.newHashMap();
flowProperties.put("param1", "value1b");
flowProperties.put("param2", "value2b");
flowProperties.put(ServiceConfigKeys.FLOW_SOURCE_IDENTIFIER_KEY, TEST_SOURCE_NAME);
flowProperties.put(ServiceConfigKeys.FLOW_DESTINATION_IDENTIFIER_KEY, TEST_SINK_NAME);
FlowConfig flowConfig = new FlowConfig().setId(TEST_FLOW_ID)
.setTemplateUris(TEST_TEMPLATE_URI).setSchedule(new Schedule().setCronSchedule(TEST_SCHEDULE))
.setProperties(new StringMap(flowProperties));
this.flowConfigClient.updateFlowConfig(flowConfig);
FlowConfig retrievedFlowConfig = this.flowConfigClient.getFlowConfig(flowId);
Assert.assertTrue(this.gobblinServiceManager.getScheduler().getScheduledFlowSpecs().containsKey(TEST_URI.toString()));
Assert.assertEquals(retrievedFlowConfig.getId().getFlowGroup(), TEST_GROUP_NAME);
Assert.assertEquals(retrievedFlowConfig.getId().getFlowName(), TEST_FLOW_NAME);
Assert.assertEquals(retrievedFlowConfig.getSchedule().getCronSchedule(), TEST_SCHEDULE);
Assert.assertEquals(retrievedFlowConfig.getTemplateUris(), TEST_TEMPLATE_URI);
// Add this asssert when getFlowSpec() is changed to return the raw flow spec
//Assert.assertEquals(flowConfig.getProperties().size(), 2);
Assert.assertEquals(retrievedFlowConfig.getProperties().get("param1"), "value1b");
Assert.assertEquals(retrievedFlowConfig.getProperties().get("param2"), "value2b");
}
@Test (dependsOnMethods = "testUpdate")
public void testDelete() throws Exception {
FlowId flowId = new FlowId().setFlowGroup(TEST_GROUP_NAME).setFlowName(TEST_FLOW_NAME);
URI uri = FlowSpec.Utils.createFlowSpecUri(flowId);
// make sure flow config exists
FlowConfig flowConfig = this.flowConfigClient.getFlowConfig(flowId);
Assert.assertEquals(flowConfig.getId().getFlowGroup(), TEST_GROUP_NAME);
Assert.assertEquals(flowConfig.getId().getFlowName(), TEST_FLOW_NAME);
this.flowConfigClient.deleteFlowConfig(flowId);
try {
this.flowConfigClient.getFlowConfig(flowId);
} catch (RestLiResponseException e) {
Assert.assertEquals(e.getStatus(), HttpStatus.NOT_FOUND_404);
Assert.assertFalse(this.gobblinServiceManager.getScheduler().getScheduledFlowSpecs().containsKey(uri.toString()));
return;
}
Assert.fail("Get should have gotten a 404 error");
}
@Test (dependsOnMethods = "testDelete")
public void testGitCreate() throws Exception {
// push a new config file
File testFlowFile = new File(GIT_CLONE_DIR + "/gobblin-config/testGroup/testFlow.pull");
testFlowFile.getParentFile().mkdirs();
Files.write("{\"id\":{\"flowName\":\"testFlow\",\"flowGroup\":\"testGroup\"},\"param1\":\"value20\"}", testFlowFile, Charsets.UTF_8);
Collection<Spec> specs = this.gobblinServiceManager.getFlowCatalog().getSpecs();
Assert.assertEquals(specs.size(), 0);
// add, commit, push
this.gitForPush.add().addFilepattern("gobblin-config/testGroup/testFlow.pull").call();
this.gitForPush.commit().setMessage("second commit").call();
this.gitForPush.push().setRemote("origin").setRefSpecs(new RefSpec("master")).call();
// polling is every 5 seconds, so wait twice as long and check
TimeUnit.SECONDS.sleep(10);
// spec generated using git monitor do not have schedule, so their life cycle should be similar to runOnce jobs
Assert.assertEquals(this.gobblinServiceManager.getFlowCatalog().getSpecs().size(), 0);
AssertWithBackoff.create().maxSleepMs(200L).timeoutMs(2000L).backoffFactor(1)
.assertTrue(input -> !this.gobblinServiceManager.getScheduler().getScheduledFlowSpecs().containsKey(TEST_URI.toString()),
"Waiting for job to get orchestrated...");
}
@Test
public void testBadGet() throws Exception {
FlowId flowId = new FlowId().setFlowGroup(TEST_DUMMY_GROUP_NAME).setFlowName(TEST_DUMMY_FLOW_NAME);
try {
this.flowConfigClient.getFlowConfig(flowId);
} catch (RestLiResponseException e) {
Assert.assertEquals(e.getStatus(), HttpStatus.NOT_FOUND_404);
return;
}
Assert.fail("Get should have raised a 404 error");
}
@Test
public void testBadDelete() throws Exception {
FlowId flowId = new FlowId().setFlowGroup(TEST_DUMMY_GROUP_NAME).setFlowName(TEST_DUMMY_FLOW_NAME);
try {
this.flowConfigClient.deleteFlowConfig(flowId);
} catch (RestLiResponseException e) {
Assert.assertEquals(e.getStatus(), HttpStatus.NOT_FOUND_404);
return;
}
Assert.fail("Get should have raised a 404 error");
}
@Test
public void testBadUpdate() throws Exception {
Map<String, String> flowProperties = Maps.newHashMap();
flowProperties.put("param1", "value1b");
flowProperties.put("param2", "value2b");
FlowConfig flowConfig = new FlowConfig()
.setId(new FlowId().setFlowGroup(TEST_DUMMY_GROUP_NAME).setFlowName(TEST_DUMMY_FLOW_NAME))
.setTemplateUris(TEST_TEMPLATE_URI).setSchedule(new Schedule().setCronSchedule(TEST_SCHEDULE))
.setProperties(new StringMap(flowProperties));
try {
this.flowConfigClient.updateFlowConfig(flowConfig);
} catch (RestLiResponseException e) {
Assert.assertEquals(e.getStatus(), HttpStatus.NOT_FOUND_404);
}
}
private void serviceReboot() throws Exception {
this.gobblinServiceManager.stop();
this.gobblinServiceManager = GobblinServiceManager.create("CoreService", "1",
ConfigUtils.propertiesToConfig(serviceCoreProperties), new Path(SERVICE_WORK_DIR));
this.gobblinServiceManager.start();
this.flowConfigClient = new FlowConfigV2Client(String.format("http://127.0.0.1:%s/",
this.gobblinServiceManager.getRestLiServerListeningURI().getPort()), transportClientProperties);
}
@Test (dependsOnMethods = "testGitCreate")
public void testGetAllPaginated() throws Exception {
// Order of the flows by descending modified_time, and ascending flow.name should be: testFlow, testFlow2, testFlow3, testFlow4
FlowConfig flowConfig1 = new FlowConfig().setId(TEST_FLOW_ID)
.setTemplateUris(TEST_TEMPLATE_URI).setSchedule(new Schedule().setCronSchedule(TEST_SCHEDULE).setRunImmediately(false))
.setProperties(new StringMap(flowProperties));
this.flowConfigClient.createFlowConfig(flowConfig1);
FlowConfig flowConfig2 = new FlowConfig().setId(TEST_FLOW_ID2)
.setTemplateUris(TEST_TEMPLATE_URI).setSchedule(new Schedule().setCronSchedule(TEST_SCHEDULE).setRunImmediately(false))
.setProperties(new StringMap(flowProperties));
this.flowConfigClient.createFlowConfig(flowConfig2);
FlowConfig flowConfig3 = new FlowConfig().setId(TEST_FLOW_ID3)
.setTemplateUris(TEST_TEMPLATE_URI).setSchedule(new Schedule().setCronSchedule(TEST_SCHEDULE).setRunImmediately(false))
.setProperties(new StringMap(flowProperties));
this.flowConfigClient.createFlowConfig(flowConfig3);
FlowConfig flowConfig4 = new FlowConfig().setId(TEST_FLOW_ID4)
.setTemplateUris(TEST_TEMPLATE_URI).setSchedule(new Schedule().setCronSchedule(TEST_SCHEDULE).setRunImmediately(false))
.setProperties(new StringMap(flowProperties));
this.flowConfigClient.createFlowConfig(flowConfig4);
// Check that there are a total of 4 flowConfigs by using the default getAll call
Collection<FlowConfig> flowConfigs = this.flowConfigClient.getAllFlowConfigs();
Assert.assertEquals(flowConfigs.size(), 4);
// Check that there are a total of 4 flowConfigs using new getAll call
flowConfigs = this.flowConfigClient.getAllFlowConfigs(0,20);
Assert.assertEquals(flowConfigs.size(), 4);
// Attempt pagination with one element from the start of the specStore configurations stored
// Start at index 0 and return 1 element
flowConfigs = this.flowConfigClient.getAllFlowConfigs(0,1);
Assert.assertEquals(flowConfigs.size(), 1);
Assert.assertEquals(((FlowConfig)(flowConfigs.toArray()[0])).getId().getFlowName(), "testFlow");
// Attempt pagination with one element from the specStore configurations stored with offset of 1
// Start at index 1 and return 1 element
flowConfigs = this.flowConfigClient.getAllFlowConfigs(1,1);
Assert.assertEquals(flowConfigs.size(), 1);
Assert.assertEquals(((FlowConfig)(flowConfigs.toArray()[0])).getId().getFlowName(), "testFlow2");
// Attempt pagination with one element from the specStore configurations stored with offset of 2
// Start at index 2 and return 1 element
flowConfigs = this.flowConfigClient.getAllFlowConfigs(2,1);
Assert.assertEquals(flowConfigs.size(), 1);
Assert.assertEquals(((FlowConfig)(flowConfigs.toArray()[0])).getId().getFlowName(), "testFlow3");
// Attempt pagination with one element from the specStore configurations stored with offset of 3
// Start at index 2 and return 1 element
flowConfigs = this.flowConfigClient.getAllFlowConfigs(3,1);
Assert.assertEquals(flowConfigs.size(), 1);
Assert.assertEquals(((FlowConfig)(flowConfigs.toArray()[0])).getId().getFlowName(), "testFlow4");
// Attempt pagination with 20 element from the specStore configurations stored with offset of 1
// Start at index 1 and return 20 elements if there exists 20 elements.
// But only 4 total elements, return 3 elements since offset by 1
flowConfigs = this.flowConfigClient.getAllFlowConfigs(1,20);
Assert.assertEquals(flowConfigs.size(), 3);
List flowNameArray = new ArrayList();
List expectedResults = new ArrayList();
expectedResults.add("testFlow2");
expectedResults.add("testFlow3");
expectedResults.add("testFlow4");
for (FlowConfig fc : flowConfigs) {
flowNameArray.add(fc.getId().getFlowName());
}
Assert.assertEquals(flowNameArray, expectedResults);
// Clean up the flowConfigs added in for the pagination tests
this.flowConfigClient.deleteFlowConfig(TEST_FLOW_ID);
this.flowConfigClient.deleteFlowConfig(TEST_FLOW_ID2);
this.flowConfigClient.deleteFlowConfig(TEST_FLOW_ID3);
this.flowConfigClient.deleteFlowConfig(TEST_FLOW_ID4);
}
@Test (dependsOnMethods = "testGitCreate")
public void testGetFilteredFlowsPaginated() throws Exception {
// Attempt pagination with one element from the start of the specStore configurations stored. Filter by the owningGroup of "Keep.this"
FlowConfig flowConfig2 = new FlowConfig().setId(TEST_FLOW_ID5).setOwningGroup("Filter.this")
.setTemplateUris(TEST_TEMPLATE_URI).setSchedule(new Schedule().setCronSchedule(TEST_SCHEDULE).setRunImmediately(false))
.setProperties(new StringMap(flowProperties));
this.flowConfigClient.createFlowConfig(flowConfig2);
FlowConfig flowConfig3 = new FlowConfig().setId(TEST_FLOW_ID6).setOwningGroup("Keep.this")
.setTemplateUris(TEST_TEMPLATE_URI).setSchedule(new Schedule().setCronSchedule(TEST_SCHEDULE).setRunImmediately(false))
.setProperties(new StringMap(flowProperties));
this.flowConfigClient.createFlowConfig(flowConfig3);
FlowConfig flowConfig4 = new FlowConfig().setId(TEST_FLOW_ID7).setOwningGroup("Keep.this")
.setTemplateUris(TEST_TEMPLATE_URI).setSchedule(new Schedule().setCronSchedule(TEST_SCHEDULE).setRunImmediately(false))
.setProperties(new StringMap(flowProperties));
this.flowConfigClient.createFlowConfig(flowConfig4);
// Start at index 0 and return 1 element
Collection<FlowConfig> flowConfigs = this.flowConfigClient.getFlowConfigs(null, null, null, null, null, null,
TEST_SCHEDULE, null, "Keep.this", null, 0, 1);
Assert.assertEquals(flowConfigs.size(), 1);
Assert.assertEquals(((FlowConfig)(flowConfigs.toArray()[0])).getId().getFlowName(), "testFlow6");
// Attempt pagination with one element from the start of the specStore configurations stored. Filter by the owningGroup of "Keep.this"
// Start at index 1 and return 1 element
flowConfigs = this.flowConfigClient.getFlowConfigs(null, null, null, null, null, null,
TEST_SCHEDULE, null, "Keep.this", null, 1, 1);
Assert.assertEquals(flowConfigs.size(), 1);
Assert.assertEquals(((FlowConfig)(flowConfigs.toArray()[0])).getId().getFlowName(), "testFlow7");
// Attempt pagination with one element from the start of the specStore configurations stored. Filter by the owningGroup of "Keep.this"
// Start at index 0 and return 20 element if exists. In this case, only 2 items so return all two items
flowConfigs = this.flowConfigClient.getFlowConfigs(null, null, null, null, null, null,
TEST_SCHEDULE, null, "Keep.this", null, 0, 20);
Assert.assertEquals(flowConfigs.size(), 2);
List flowNameArray = new ArrayList();
List expectedResults = new ArrayList();
expectedResults.add("testFlow6");
expectedResults.add("testFlow7");
for (FlowConfig fc : flowConfigs) {
flowNameArray.add(fc.getId().getFlowName());
}
Assert.assertEquals(flowNameArray, expectedResults);
// Clean up the flowConfigs added in for the pagination tests
this.flowConfigClient.deleteFlowConfig(TEST_FLOW_ID5);
this.flowConfigClient.deleteFlowConfig(TEST_FLOW_ID6);
this.flowConfigClient.deleteFlowConfig(TEST_FLOW_ID7);
}
} | 3,760 |
0 | Create_ds/gobblin/gobblin-service/src/test/java/org/apache/gobblin | Create_ds/gobblin/gobblin-service/src/test/java/org/apache/gobblin/service/TestServiceDatabaseConfig.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.service;
public class TestServiceDatabaseConfig {
public static final String MysqlVersion = "8.0.20";
}
| 3,761 |
0 | Create_ds/gobblin/gobblin-service/src/test/java/org/apache/gobblin/service | Create_ds/gobblin/gobblin-service/src/test/java/org/apache/gobblin/service/monitoring/GaaSObservabilityProducerTest.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.service.monitoring;
import java.time.ZonedDateTime;
import java.util.ArrayList;
import java.util.Iterator;
import java.util.List;
import java.util.Map;
import java.util.Properties;
import org.testng.annotations.Test;
import org.testng.Assert;
import com.google.common.collect.Maps;
import org.apache.gobblin.configuration.State;
import org.apache.gobblin.metrics.GaaSObservabilityEventExperimental;
import org.apache.gobblin.metrics.JobStatus;
import org.apache.gobblin.metrics.event.TimingEvent;
import org.apache.gobblin.metrics.reporter.util.AvroBinarySerializer;
import org.apache.gobblin.metrics.reporter.util.AvroSerializer;
import org.apache.gobblin.metrics.reporter.util.NoopSchemaVersionWriter;
import org.apache.gobblin.runtime.DatasetTaskSummary;
import org.apache.gobblin.runtime.troubleshooter.InMemoryMultiContextIssueRepository;
import org.apache.gobblin.runtime.troubleshooter.Issue;
import org.apache.gobblin.runtime.troubleshooter.IssueSeverity;
import org.apache.gobblin.runtime.troubleshooter.MultiContextIssueRepository;
import org.apache.gobblin.runtime.troubleshooter.TroubleshooterUtils;
import org.apache.gobblin.runtime.util.GsonUtils;
import org.apache.gobblin.service.ExecutionStatus;
import org.apache.gobblin.service.ServiceConfigKeys;
import org.apache.gobblin.service.modules.orchestration.AzkabanProjectConfig;
import org.apache.gobblin.service.modules.spec.JobExecutionPlan;
public class GaaSObservabilityProducerTest {
private MultiContextIssueRepository issueRepository = new InMemoryMultiContextIssueRepository();
@Test
public void testCreateGaaSObservabilityEventWithFullMetadata() throws Exception {
String flowGroup = "testFlowGroup1";
String flowName = "testFlowName1";
String jobName = String.format("%s_%s_%s", flowGroup, flowName, "testJobName1");
String flowExecutionId = "1";
this.issueRepository.put(
TroubleshooterUtils.getContextIdForJob(flowGroup, flowName, flowExecutionId, jobName),
createTestIssue("issueSummary", "issueCode", IssueSeverity.INFO)
);
List<DatasetTaskSummary> summaries = new ArrayList<>();
DatasetTaskSummary dataset1 = new DatasetTaskSummary("/testFolder", 100, 1000, true);
DatasetTaskSummary dataset2 = new DatasetTaskSummary("/testFolder2", 1000, 10000, false);
summaries.add(dataset1);
summaries.add(dataset2);
State state = new State();
state.setProp(ServiceConfigKeys.GOBBLIN_SERVICE_INSTANCE_NAME, "testCluster");
MockGaaSObservabilityEventProducer producer = new MockGaaSObservabilityEventProducer(state, this.issueRepository);
Map<String, String> gteEventMetadata = Maps.newHashMap();
gteEventMetadata.put(TimingEvent.FlowEventConstants.FLOW_GROUP_FIELD, flowGroup);
gteEventMetadata.put(TimingEvent.FlowEventConstants.FLOW_NAME_FIELD, flowName);
gteEventMetadata.put(TimingEvent.FlowEventConstants.FLOW_EXECUTION_ID_FIELD, flowExecutionId);
gteEventMetadata.put(TimingEvent.FlowEventConstants.JOB_NAME_FIELD, jobName);
gteEventMetadata.put(TimingEvent.FlowEventConstants.JOB_GROUP_FIELD, flowName);
gteEventMetadata.put(TimingEvent.FlowEventConstants.FLOW_EDGE_FIELD, "flowEdge");
gteEventMetadata.put(TimingEvent.FlowEventConstants.SPEC_EXECUTOR_FIELD, "specExecutor");
gteEventMetadata.put(AzkabanProjectConfig.USER_TO_PROXY, "azkabanUser");
gteEventMetadata.put(TimingEvent.METADATA_MESSAGE, "hostName");
gteEventMetadata.put(TimingEvent.JOB_START_TIME, "20");
gteEventMetadata.put(TimingEvent.JOB_END_TIME, "100");
gteEventMetadata.put(JobStatusRetriever.EVENT_NAME_FIELD, ExecutionStatus.COMPLETE.name());
gteEventMetadata.put(TimingEvent.JOB_ORCHESTRATED_TIME, "1");
gteEventMetadata.put(TimingEvent.FlowEventConstants.FLOW_MODIFICATION_TIME_FIELD, "20");
gteEventMetadata.put(TimingEvent.DATASET_TASK_SUMMARIES, GsonUtils.GSON_WITH_DATE_HANDLING.toJson(summaries));
gteEventMetadata.put(JobExecutionPlan.JOB_PROPS_KEY, "{\"flow\":{\"executionId\":1681242538558},\"user\":{\"to\":{\"proxy\":\"newUser\"}}}");
Properties jobStatusProps = new Properties();
jobStatusProps.putAll(gteEventMetadata);
producer.emitObservabilityEvent(new State(jobStatusProps));
List<GaaSObservabilityEventExperimental> emittedEvents = producer.getTestEmittedEvents();
Assert.assertEquals(emittedEvents.size(), 1);
Iterator<GaaSObservabilityEventExperimental> iterator = emittedEvents.iterator();
GaaSObservabilityEventExperimental event = iterator.next();
Assert.assertEquals(event.getFlowGroup(), flowGroup);
Assert.assertEquals(event.getFlowName(), flowName);
Assert.assertEquals(event.getJobName(), jobName);
Assert.assertEquals(event.getFlowExecutionId(), Long.valueOf(flowExecutionId));
Assert.assertEquals(event.getJobStatus(), JobStatus.SUCCEEDED);
Assert.assertEquals(event.getExecutorUrl(), "hostName");
Assert.assertEquals(event.getIssues().size(), 1);
Assert.assertEquals(event.getFlowGraphEdgeId(), "flowEdge");
Assert.assertEquals(event.getExecutorId(), "specExecutor");
Assert.assertEquals(event.getExecutionUserUrn(), "azkabanUser");
Assert.assertEquals(event.getJobOrchestratedTime(), Long.valueOf(1));
Assert.assertEquals(event.getLastFlowModificationTime(), Long.valueOf(20));
Assert.assertEquals(event.getJobStartTime(), Long.valueOf(20));
Assert.assertEquals(event.getJobEndTime(), Long.valueOf(100));
Assert.assertEquals(event.getDatasetsWritten().size(), 2);
Assert.assertEquals(event.getDatasetsWritten().get(0).getDatasetUrn(), dataset1.getDatasetUrn());
Assert.assertEquals(event.getDatasetsWritten().get(0).getEntitiesWritten(), Long.valueOf(dataset1.getRecordsWritten()));
Assert.assertEquals(event.getDatasetsWritten().get(0).getBytesWritten(), Long.valueOf(dataset1.getBytesWritten()));
Assert.assertEquals(event.getDatasetsWritten().get(0).getSuccessfullyCommitted(), Boolean.valueOf(dataset1.isSuccessfullyCommitted()));
Assert.assertEquals(event.getDatasetsWritten().get(1).getDatasetUrn(), dataset2.getDatasetUrn());
Assert.assertEquals(event.getDatasetsWritten().get(1).getEntitiesWritten(), Long.valueOf(dataset2.getRecordsWritten()));
Assert.assertEquals(event.getDatasetsWritten().get(1).getBytesWritten(), Long.valueOf(dataset2.getBytesWritten()));
Assert.assertEquals(event.getDatasetsWritten().get(1).getSuccessfullyCommitted(), Boolean.valueOf(dataset2.isSuccessfullyCommitted()));
Assert.assertEquals(event.getJobProperties(), "{\"flow\":{\"executionId\":1681242538558},\"user\":{\"to\":{\"proxy\":\"newUser\"}}}");
Assert.assertEquals(event.getGaasId(), "testCluster");
AvroSerializer<GaaSObservabilityEventExperimental> serializer = new AvroBinarySerializer<>(
GaaSObservabilityEventExperimental.SCHEMA$, new NoopSchemaVersionWriter()
);
serializer.serializeRecord(event);
}
@Test
public void testCreateGaaSObservabilityEventWithPartialMetadata() throws Exception {
String flowGroup = "testFlowGroup2";
String flowName = "testFlowName2";
String jobName = String.format("%s_%s_%s", flowGroup, flowName, "testJobName1");
String flowExecutionId = "1";
this.issueRepository.put(
TroubleshooterUtils.getContextIdForJob(flowGroup, flowName, flowExecutionId, jobName),
createTestIssue("issueSummary", "issueCode", IssueSeverity.INFO)
);
MockGaaSObservabilityEventProducer producer = new MockGaaSObservabilityEventProducer(new State(), this.issueRepository);
Map<String, String> gteEventMetadata = Maps.newHashMap();
gteEventMetadata.put(TimingEvent.FlowEventConstants.FLOW_GROUP_FIELD, flowGroup);
gteEventMetadata.put(TimingEvent.FlowEventConstants.FLOW_NAME_FIELD, flowName);
gteEventMetadata.put(TimingEvent.FlowEventConstants.FLOW_EXECUTION_ID_FIELD, "1");
gteEventMetadata.put(TimingEvent.FlowEventConstants.JOB_NAME_FIELD, jobName);
gteEventMetadata.put(TimingEvent.FlowEventConstants.JOB_GROUP_FIELD, flowName);
gteEventMetadata.put(TimingEvent.FlowEventConstants.FLOW_EDGE_FIELD, "flowEdge");
gteEventMetadata.put(TimingEvent.FlowEventConstants.SPEC_EXECUTOR_FIELD, "specExecutor");
gteEventMetadata.put(JobStatusRetriever.EVENT_NAME_FIELD, ExecutionStatus.CANCELLED.name());
Properties jobStatusProps = new Properties();
jobStatusProps.putAll(gteEventMetadata);
producer.emitObservabilityEvent(new State(jobStatusProps));
List<GaaSObservabilityEventExperimental> emittedEvents = producer.getTestEmittedEvents();
Assert.assertEquals(emittedEvents.size(), 1);
Iterator<GaaSObservabilityEventExperimental> iterator = emittedEvents.iterator();
GaaSObservabilityEventExperimental event = iterator.next();
Assert.assertEquals(event.getFlowGroup(), flowGroup);
Assert.assertEquals(event.getFlowName(), flowName);
Assert.assertEquals(event.getJobName(), jobName);
Assert.assertEquals(event.getFlowExecutionId(), Long.valueOf(flowExecutionId));
Assert.assertEquals(event.getJobStatus(), JobStatus.CANCELLED);
Assert.assertEquals(event.getIssues().size(), 1);
Assert.assertEquals(event.getFlowGraphEdgeId(), "flowEdge");
Assert.assertEquals(event.getExecutorId(), "specExecutor");
Assert.assertEquals(event.getJobOrchestratedTime(), null);
Assert.assertEquals(event.getJobStartTime(), null);
Assert.assertEquals(event.getExecutionUserUrn(), null);
Assert.assertEquals(event.getExecutorUrl(), null);
AvroSerializer<GaaSObservabilityEventExperimental> serializer = new AvroBinarySerializer<>(
GaaSObservabilityEventExperimental.SCHEMA$, new NoopSchemaVersionWriter()
);
serializer.serializeRecord(event);
}
private Issue createTestIssue(String summary, String code, IssueSeverity severity) {
return Issue.builder().summary(summary).code(code).time(ZonedDateTime.now()).severity(severity).build();
}
}
| 3,762 |
0 | Create_ds/gobblin/gobblin-service/src/test/java/org/apache/gobblin/service | Create_ds/gobblin/gobblin-service/src/test/java/org/apache/gobblin/service/monitoring/MysqlJobStatusRetrieverTest.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.service.monitoring;
import java.io.IOException;
import java.util.Iterator;
import java.util.Properties;
import org.testng.Assert;
import org.testng.annotations.BeforeClass;
import org.testng.annotations.Test;
import com.google.common.base.Strings;
import org.apache.gobblin.config.ConfigBuilder;
import org.apache.gobblin.configuration.ConfigurationKeys;
import org.apache.gobblin.configuration.State;
import org.apache.gobblin.metastore.MysqlJobStatusStateStore;
import org.apache.gobblin.metastore.testing.ITestMetastoreDatabase;
import org.apache.gobblin.metastore.testing.TestMetastoreDatabaseFactory;
import org.apache.gobblin.metrics.event.TimingEvent;
import org.apache.gobblin.runtime.troubleshooter.MultiContextIssueRepository;
import org.apache.gobblin.service.ExecutionStatus;
import org.apache.gobblin.service.ServiceConfigKeys;
import static org.mockito.Mockito.mock;
public class MysqlJobStatusRetrieverTest extends JobStatusRetrieverTest {
private MysqlJobStatusStateStore<State> dbJobStateStore;
private static final String TEST_USER = "testUser";
private static final String TEST_PASSWORD = "testPassword";
@BeforeClass
@Override
public void setUp() throws Exception {
ITestMetastoreDatabase testMetastoreDatabase = TestMetastoreDatabaseFactory.get();
String jdbcUrl = testMetastoreDatabase.getJdbcUrl();
ConfigBuilder configBuilder = ConfigBuilder.create();
configBuilder.addPrimitive(MysqlJobStatusRetriever.MYSQL_JOB_STATUS_RETRIEVER_PREFIX + "." + ConfigurationKeys.STATE_STORE_DB_URL_KEY, jdbcUrl);
configBuilder.addPrimitive(MysqlJobStatusRetriever.MYSQL_JOB_STATUS_RETRIEVER_PREFIX + "." + ConfigurationKeys.STATE_STORE_DB_USER_KEY, TEST_USER);
configBuilder.addPrimitive(MysqlJobStatusRetriever.MYSQL_JOB_STATUS_RETRIEVER_PREFIX + "." + ConfigurationKeys.STATE_STORE_DB_PASSWORD_KEY, TEST_PASSWORD);
configBuilder.addPrimitive(ServiceConfigKeys.GOBBLIN_SERVICE_DAG_MANAGER_ENABLED_KEY, "true");
this.jobStatusRetriever =
new MysqlJobStatusRetriever(configBuilder.build(), mock(MultiContextIssueRepository.class));
this.dbJobStateStore = ((MysqlJobStatusRetriever) this.jobStatusRetriever).getStateStore();
cleanUpDir();
}
@Test
public void testGetJobStatusesForFlowExecution() throws IOException {
super.testGetJobStatusesForFlowExecution();
}
@Test (dependsOnMethods = "testGetJobStatusesForFlowExecution")
public void testJobTiming() throws Exception {
super.testJobTiming();
}
@Test (dependsOnMethods = "testJobTiming")
public void testOutOfOrderJobTimingEvents() throws IOException {
super.testOutOfOrderJobTimingEvents();
}
@Test (dependsOnMethods = "testJobTiming")
public void testGetJobStatusesForFlowExecution1() {
super.testGetJobStatusesForFlowExecution1();
}
@Test (dependsOnMethods = "testGetJobStatusesForFlowExecution1")
public void testGetLatestExecutionIdsForFlow() throws Exception {
super.testGetLatestExecutionIdsForFlow();
}
@Test (dependsOnMethods = "testGetLatestExecutionIdsForFlow")
public void testGetFlowStatusFromJobStatuses() throws Exception {
long flowExecutionId = 1237L;
addJobStatusToStateStore(flowExecutionId, JobStatusRetriever.NA_KEY, ExecutionStatus.COMPILED.name());
Assert.assertEquals(ExecutionStatus.COMPILED,
jobStatusRetriever.getFlowStatusFromJobStatuses(jobStatusRetriever.dagManagerEnabled, jobStatusRetriever.getJobStatusesForFlowExecution(FLOW_NAME, FLOW_GROUP, flowExecutionId)));
addJobStatusToStateStore(flowExecutionId, MY_JOB_NAME_1, ExecutionStatus.ORCHESTRATED.name(), JOB_ORCHESTRATED_TIME, JOB_ORCHESTRATED_TIME);
Assert.assertEquals(ExecutionStatus.COMPILED,
jobStatusRetriever.getFlowStatusFromJobStatuses(jobStatusRetriever.dagManagerEnabled, jobStatusRetriever.getJobStatusesForFlowExecution(FLOW_NAME, FLOW_GROUP, flowExecutionId)));
addJobStatusToStateStore(flowExecutionId, JobStatusRetriever.NA_KEY, ExecutionStatus.ORCHESTRATED.name());
Assert.assertEquals(ExecutionStatus.ORCHESTRATED,
jobStatusRetriever.getFlowStatusFromJobStatuses(jobStatusRetriever.dagManagerEnabled, jobStatusRetriever.getJobStatusesForFlowExecution(FLOW_NAME, FLOW_GROUP, flowExecutionId)));
addJobStatusToStateStore(flowExecutionId, MY_JOB_NAME_1, ExecutionStatus.RUNNING.name(), JOB_ORCHESTRATED_TIME, JOB_ORCHESTRATED_TIME);
Assert.assertEquals(ExecutionStatus.ORCHESTRATED,
jobStatusRetriever.getFlowStatusFromJobStatuses(jobStatusRetriever.dagManagerEnabled, jobStatusRetriever.getJobStatusesForFlowExecution(FLOW_NAME, FLOW_GROUP, flowExecutionId)));
addJobStatusToStateStore(flowExecutionId, JobStatusRetriever.NA_KEY, ExecutionStatus.RUNNING.name());
Assert.assertEquals(ExecutionStatus.RUNNING,
jobStatusRetriever.getFlowStatusFromJobStatuses(jobStatusRetriever.dagManagerEnabled, jobStatusRetriever.getJobStatusesForFlowExecution(FLOW_NAME, FLOW_GROUP, flowExecutionId)));
addJobStatusToStateStore(flowExecutionId, MY_JOB_NAME_1, ExecutionStatus.COMPLETE.name(), JOB_ORCHESTRATED_TIME, JOB_ORCHESTRATED_TIME);
Assert.assertEquals(ExecutionStatus.RUNNING,
jobStatusRetriever.getFlowStatusFromJobStatuses(jobStatusRetriever.dagManagerEnabled, jobStatusRetriever.getJobStatusesForFlowExecution(FLOW_NAME, FLOW_GROUP, flowExecutionId)));
addJobStatusToStateStore(flowExecutionId, JobStatusRetriever.NA_KEY, ExecutionStatus.COMPLETE.name());
Assert.assertEquals(ExecutionStatus.COMPLETE,
jobStatusRetriever.getFlowStatusFromJobStatuses(jobStatusRetriever.dagManagerEnabled, jobStatusRetriever.getJobStatusesForFlowExecution(FLOW_NAME, FLOW_GROUP, flowExecutionId)));
}
@Test
public void testMaxColumnName() throws Exception {
Properties properties = new Properties();
long flowExecutionId = 12340L;
String flowGroup = Strings.repeat("A", ServiceConfigKeys.MAX_FLOW_GROUP_LENGTH);
String flowName = Strings.repeat("B", ServiceConfigKeys.MAX_FLOW_NAME_LENGTH);
properties.setProperty(TimingEvent.FlowEventConstants.FLOW_GROUP_FIELD, flowGroup);
properties.setProperty(TimingEvent.FlowEventConstants.FLOW_NAME_FIELD, flowName);
properties.setProperty(TimingEvent.FlowEventConstants.FLOW_EXECUTION_ID_FIELD, String.valueOf(flowExecutionId));
properties.setProperty(TimingEvent.FlowEventConstants.JOB_NAME_FIELD, Strings.repeat("C", ServiceConfigKeys.MAX_JOB_NAME_LENGTH));
properties.setProperty(JobStatusRetriever.EVENT_NAME_FIELD, ExecutionStatus.ORCHESTRATED.name());
properties.setProperty(TimingEvent.FlowEventConstants.JOB_GROUP_FIELD, Strings.repeat("D", ServiceConfigKeys.MAX_JOB_GROUP_LENGTH));
State jobStatus = new State(properties);
KafkaJobStatusMonitor.addJobStatusToStateStore(jobStatus, this.jobStatusRetriever.getStateStore(), new NoopGaaSObservabilityEventProducer());
Iterator<JobStatus>
jobStatusIterator = this.jobStatusRetriever.getJobStatusesForFlowExecution(flowName, flowGroup, flowExecutionId);
Assert.assertTrue(jobStatusIterator.hasNext());
Assert.assertEquals(jobStatusIterator.next().getFlowGroup(), flowGroup);
}
@Test
public void testInvalidColumnName() {
Properties properties = new Properties();
long flowExecutionId = 12340L;
String flowGroup = Strings.repeat("A", ServiceConfigKeys.MAX_FLOW_GROUP_LENGTH + 1);
String flowName = Strings.repeat("B", ServiceConfigKeys.MAX_FLOW_NAME_LENGTH);
properties.setProperty(TimingEvent.FlowEventConstants.FLOW_GROUP_FIELD, flowGroup);
properties.setProperty(TimingEvent.FlowEventConstants.FLOW_NAME_FIELD, flowName);
properties.setProperty(TimingEvent.FlowEventConstants.FLOW_EXECUTION_ID_FIELD, String.valueOf(flowExecutionId));
properties.setProperty(TimingEvent.FlowEventConstants.JOB_NAME_FIELD, Strings.repeat("C", ServiceConfigKeys.MAX_JOB_NAME_LENGTH));
properties.setProperty(JobStatusRetriever.EVENT_NAME_FIELD, ExecutionStatus.ORCHESTRATED.name());
properties.setProperty(TimingEvent.FlowEventConstants.JOB_GROUP_FIELD, Strings.repeat("D", ServiceConfigKeys.MAX_JOB_GROUP_LENGTH));
State jobStatus = new State(properties);
try {
KafkaJobStatusMonitor.addJobStatusToStateStore(jobStatus, this.jobStatusRetriever.getStateStore(), new NoopGaaSObservabilityEventProducer());
} catch (IOException e) {
Assert.assertTrue(e.getCause().getCause().getMessage().contains("Data too long"));
return;
}
Assert.fail();
}
@Override
void cleanUpDir() throws Exception {
this.dbJobStateStore.delete(KafkaJobStatusMonitor.jobStatusStoreName(FLOW_GROUP, FLOW_NAME));
}
}
| 3,763 |
0 | Create_ds/gobblin/gobblin-service/src/test/java/org/apache/gobblin/service | Create_ds/gobblin/gobblin-service/src/test/java/org/apache/gobblin/service/monitoring/MockGaaSObservabilityEventProducer.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.service.monitoring;
import java.util.ArrayList;
import java.util.Collections;
import java.util.List;
import org.apache.gobblin.configuration.State;
import org.apache.gobblin.metrics.GaaSObservabilityEventExperimental;
import org.apache.gobblin.runtime.troubleshooter.MultiContextIssueRepository;
/**
* An extension of GaaSObservabilityEventProducer which creates the events and stores them in a list
* Tests can use a getter to fetch a read-only version of the events that were emitted
*/
public class MockGaaSObservabilityEventProducer extends GaaSObservabilityEventProducer {
private List<GaaSObservabilityEventExperimental> emittedEvents = new ArrayList<>();
public MockGaaSObservabilityEventProducer(State state, MultiContextIssueRepository issueRepository) {
super(state, issueRepository, false);
}
@Override
protected void sendUnderlyingEvent(GaaSObservabilityEventExperimental event) {
emittedEvents.add(event);
}
/**
* Returns the events that the mock producer has written
* This should only be used as a read-only object for emitted GaaSObservabilityEvents
* @return list of events that would have been emitted
*/
public List<GaaSObservabilityEventExperimental> getTestEmittedEvents() {
return Collections.unmodifiableList(this.emittedEvents);
}
} | 3,764 |
0 | Create_ds/gobblin/gobblin-service/src/test/java/org/apache/gobblin/service | Create_ds/gobblin/gobblin-service/src/test/java/org/apache/gobblin/service/monitoring/JobStatusRetrieverTest.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.service.monitoring;
import java.io.IOException;
import java.util.Iterator;
import java.util.List;
import java.util.Properties;
import org.testng.Assert;
import org.testng.annotations.AfterClass;
import org.testng.annotations.Test;
import com.google.common.collect.ImmutableList;
import org.apache.gobblin.configuration.State;
import org.apache.gobblin.metrics.event.TimingEvent;
import org.apache.gobblin.service.ExecutionStatus;
import org.apache.gobblin.test.matchers.service.monitoring.FlowStatusMatch;
import org.apache.gobblin.test.matchers.service.monitoring.JobStatusMatch;
import static org.hamcrest.MatcherAssert.assertThat;
public abstract class JobStatusRetrieverTest {
protected static final String FLOW_GROUP = "myFlowGroup";
protected static final String FLOW_NAME = "myFlowName";
protected static final String FLOW_GROUP_ALT_A = "myFlowGroup-alt-A";
protected static final String FLOW_GROUP_ALT_B = "myFlowGroup-alt-B";
protected static final String FLOW_NAME_ALT_1 = "myFlowName-alt-1";
protected static final String FLOW_NAME_ALT_2 = "myFlowName-alt-2";
protected static final String FLOW_NAME_ALT_3 = "myFlowName-alt-3";
protected String jobGroup;
private static final String MY_JOB_GROUP = "myJobGroup";
protected static final String MY_JOB_NAME_1 = "myJobName1";
private static final String MY_JOB_NAME_2 = "myJobName2";
private static final long JOB_EXECUTION_ID = 1111L;
private static final String MESSAGE = "https://myServer:8143/1234/1111";
protected static final long JOB_ORCHESTRATED_TIME = 3;
protected static final long JOB_START_TIME = 5;
protected static final long JOB_END_TIME = 15;
JobStatusRetriever jobStatusRetriever;
abstract void setUp() throws Exception;
protected void addJobStatusToStateStore(long flowExecutionId, String jobName, String status) throws IOException {
addFlowIdJobStatusToStateStore(FLOW_GROUP, FLOW_NAME, flowExecutionId, jobName, status, 0, 0, new Properties());
}
protected void addFlowIdJobStatusToStateStore(String flowGroup, String flowName, long flowExecutionId, String jobName, String status) throws IOException {
addFlowIdJobStatusToStateStore(flowGroup, flowName, flowExecutionId, jobName, status, 0, 0, new Properties());
}
protected void addJobStatusToStateStore(long flowExecutionId, String jobName, String status, long startTime, long endTime) throws IOException {
addFlowIdJobStatusToStateStore(FLOW_GROUP, FLOW_NAME, flowExecutionId, jobName, status, startTime, endTime, new Properties());
}
protected void addJobStatusToStateStore(long flowExecutionId, String jobName, String status, long startTime, long endTime, Properties properties) throws IOException {
addFlowIdJobStatusToStateStore(FLOW_GROUP, FLOW_NAME, flowExecutionId, jobName, status, startTime, endTime, properties);
}
protected void addFlowIdJobStatusToStateStore(String flowGroup, String flowName, long flowExecutionId, String jobName, String status, long startTime, long endTime, Properties properties) throws IOException {
properties.setProperty(TimingEvent.FlowEventConstants.FLOW_GROUP_FIELD, flowGroup);
properties.setProperty(TimingEvent.FlowEventConstants.FLOW_NAME_FIELD, flowName);
properties.setProperty(TimingEvent.FlowEventConstants.FLOW_EXECUTION_ID_FIELD, String.valueOf(flowExecutionId));
properties.setProperty(TimingEvent.FlowEventConstants.JOB_NAME_FIELD, jobName);
if (!jobName.equals(JobStatusRetriever.NA_KEY)) {
jobGroup = MY_JOB_GROUP;
properties.setProperty(TimingEvent.FlowEventConstants.JOB_EXECUTION_ID_FIELD, String.valueOf(JOB_EXECUTION_ID));
properties.setProperty(TimingEvent.METADATA_MESSAGE, MESSAGE);
} else {
jobGroup = JobStatusRetriever.NA_KEY;
}
properties.setProperty(JobStatusRetriever.EVENT_NAME_FIELD, status);
properties.setProperty(TimingEvent.FlowEventConstants.JOB_GROUP_FIELD, jobGroup);
if (status.equals(ExecutionStatus.RUNNING.name())) {
properties.setProperty(TimingEvent.JOB_START_TIME, String.valueOf(startTime));
} else if (status.equals(ExecutionStatus.COMPLETE.name())) {
properties.setProperty(TimingEvent.JOB_END_TIME, String.valueOf(endTime));
} else if (status.equals(ExecutionStatus.ORCHESTRATED.name())) {
properties.setProperty(TimingEvent.JOB_ORCHESTRATED_TIME, String.valueOf(endTime));
}
State jobStatus = new State(properties);
KafkaJobStatusMonitor.addJobStatusToStateStore(jobStatus, this.jobStatusRetriever.getStateStore(), new NoopGaaSObservabilityEventProducer());
}
static Properties createAttemptsProperties(int currGen, int currAttempts, boolean shouldRetry) {
Properties properties = new Properties();
properties.setProperty(TimingEvent.FlowEventConstants.CURRENT_GENERATION_FIELD, String.valueOf(currGen));
properties.setProperty(TimingEvent.FlowEventConstants.CURRENT_ATTEMPTS_FIELD, String.valueOf(currAttempts));
properties.setProperty(TimingEvent.FlowEventConstants.SHOULD_RETRY_FIELD, String.valueOf(shouldRetry));
return properties;
}
@Test (dependsOnMethods = "testGetLatestExecutionIdsForFlow")
public void testOutOfOrderJobTimingEventsForRetryingJob() throws IOException {
long flowExecutionId = 1240L;
Properties properties = createAttemptsProperties(1, 0, false);
addJobStatusToStateStore(flowExecutionId, MY_JOB_NAME_1, ExecutionStatus.RUNNING.name(), JOB_START_TIME, JOB_START_TIME, properties);
addJobStatusToStateStore(flowExecutionId, MY_JOB_NAME_1, ExecutionStatus.ORCHESTRATED.name(), JOB_ORCHESTRATED_TIME, JOB_ORCHESTRATED_TIME, properties);
addJobStatusToStateStore(flowExecutionId, MY_JOB_NAME_1, ExecutionStatus.FAILED.name(), 0, 0, properties);
Iterator<JobStatus>
jobStatusIterator = this.jobStatusRetriever.getJobStatusesForFlowExecution(FLOW_NAME, FLOW_GROUP, flowExecutionId);
JobStatus jobStatus = jobStatusIterator.next();
if (jobStatus.getJobName().equals(JobStatusRetriever.NA_KEY)) {
jobStatus = jobStatusIterator.next();
}
Assert.assertEquals(jobStatus.getEventName(), ExecutionStatus.PENDING_RETRY.name());
Assert.assertEquals(jobStatus.isShouldRetry(), true);
properties = createAttemptsProperties(1, 1, false);
addJobStatusToStateStore(flowExecutionId, MY_JOB_NAME_1, ExecutionStatus.RUNNING.name(), JOB_START_TIME, JOB_START_TIME, properties);
addJobStatusToStateStore(flowExecutionId, MY_JOB_NAME_1, ExecutionStatus.ORCHESTRATED.name(), JOB_ORCHESTRATED_TIME, JOB_ORCHESTRATED_TIME, properties);
jobStatusIterator = this.jobStatusRetriever.getJobStatusesForFlowExecution(FLOW_NAME, FLOW_GROUP, flowExecutionId);
jobStatus = jobStatusIterator.next();
if (jobStatus.getJobName().equals(JobStatusRetriever.NA_KEY)) {
jobStatus = jobStatusIterator.next();
}
Assert.assertEquals(jobStatus.getEventName(), ExecutionStatus.RUNNING.name());
Assert.assertEquals(jobStatus.isShouldRetry(), false);
Assert.assertEquals(jobStatus.getCurrentAttempts(), 1);
Properties properties_new = createAttemptsProperties(2, 0, false);
addJobStatusToStateStore(flowExecutionId, MY_JOB_NAME_1, ExecutionStatus.PENDING_RESUME.name(), JOB_START_TIME, JOB_START_TIME, properties_new);
addJobStatusToStateStore(flowExecutionId, MY_JOB_NAME_1, ExecutionStatus.COMPLETE.name(), JOB_END_TIME, JOB_END_TIME, properties);
jobStatusIterator = this.jobStatusRetriever.getJobStatusesForFlowExecution(FLOW_NAME, FLOW_GROUP, flowExecutionId);
jobStatus = jobStatusIterator.next();
if (jobStatus.getJobName().equals(JobStatusRetriever.NA_KEY)) {
jobStatus = jobStatusIterator.next();
}
Assert.assertEquals(jobStatus.getEventName(), ExecutionStatus.PENDING_RESUME.name());
addJobStatusToStateStore(flowExecutionId, MY_JOB_NAME_1, ExecutionStatus.COMPLETE.name(), JOB_END_TIME, JOB_END_TIME, properties_new);
jobStatusIterator = this.jobStatusRetriever.getJobStatusesForFlowExecution(FLOW_NAME, FLOW_GROUP, flowExecutionId);
jobStatus = jobStatusIterator.next();
if (jobStatus.getJobName().equals(JobStatusRetriever.NA_KEY)) {
jobStatus = jobStatusIterator.next();
}
Assert.assertEquals(jobStatus.getEventName(), ExecutionStatus.COMPLETE.name());
}
@Test
public void testGetJobStatusesForFlowExecution() throws IOException {
long flowExecutionId = 1234L;
addJobStatusToStateStore(flowExecutionId, JobStatusRetriever.NA_KEY, ExecutionStatus.COMPILED.name());
List<JobStatus> jobStatuses = ImmutableList.copyOf(this.jobStatusRetriever.getJobStatusesForFlowExecution(FLOW_NAME, FLOW_GROUP, flowExecutionId));
Iterator<JobStatus> jobStatusIterator = jobStatuses.iterator();
Assert.assertTrue(jobStatusIterator.hasNext());
JobStatus jobStatus = jobStatusIterator.next();
Assert.assertEquals(jobStatus.getEventName(), ExecutionStatus.COMPILED.name());
Assert.assertEquals(jobStatus.getJobName(), JobStatusRetriever.NA_KEY);
Assert.assertEquals(jobStatus.getJobGroup(), JobStatusRetriever.NA_KEY);
Assert.assertEquals(jobStatus.getProcessedCount(), 0);
Assert.assertEquals(jobStatus.getLowWatermark(), "");
Assert.assertEquals(jobStatus.getHighWatermark(), "");
addJobStatusToStateStore(flowExecutionId, JobStatusRetriever.NA_KEY, ExecutionStatus.RUNNING.name());
addJobStatusToStateStore(flowExecutionId, MY_JOB_NAME_1, ExecutionStatus.RUNNING.name(), JOB_START_TIME, JOB_START_TIME);
jobStatusIterator = this.jobStatusRetriever.getJobStatusesForFlowExecution(FLOW_NAME, FLOW_GROUP, flowExecutionId, MY_JOB_NAME_1, MY_JOB_GROUP);
jobStatus = jobStatusIterator.next();
Assert.assertEquals(jobStatus.getEventName(), ExecutionStatus.RUNNING.name());
Assert.assertEquals(jobStatus.getJobName(), MY_JOB_NAME_1);
Assert.assertEquals(jobStatus.getJobGroup(), jobGroup);
Assert.assertFalse(jobStatusIterator.hasNext());
Assert.assertEquals(ExecutionStatus.RUNNING,
this.jobStatusRetriever.getFlowStatusFromJobStatuses(this.jobStatusRetriever.dagManagerEnabled, this.jobStatusRetriever.getJobStatusesForFlowExecution(FLOW_NAME, FLOW_GROUP, flowExecutionId)));
addJobStatusToStateStore(flowExecutionId, MY_JOB_NAME_2, ExecutionStatus.RUNNING.name());
jobStatusIterator = this.jobStatusRetriever.getJobStatusesForFlowExecution(FLOW_NAME, FLOW_GROUP, flowExecutionId);
Assert.assertTrue(jobStatusIterator.hasNext());
jobStatus = jobStatusIterator.next();
if (JobStatusRetriever.isFlowStatus(jobStatus)) {
jobStatus = jobStatusIterator.next();
}
Assert.assertTrue(jobStatus.getJobName().equals(MY_JOB_NAME_1) || jobStatus.getJobName().equals(MY_JOB_NAME_2));
String jobName = jobStatus.getJobName();
String nextExpectedJobName = (MY_JOB_NAME_1.equals(jobName)) ? MY_JOB_NAME_2 : MY_JOB_NAME_1;
Assert.assertTrue(jobStatusIterator.hasNext());
jobStatus = jobStatusIterator.next();
if (JobStatusRetriever.isFlowStatus(jobStatus)) {
Assert.assertTrue(jobStatusIterator.hasNext());
jobStatus = jobStatusIterator.next();
}
Assert.assertEquals(jobStatus.getJobName(), nextExpectedJobName);
}
@Test (dependsOnMethods = "testGetJobStatusesForFlowExecution")
public void testJobTiming() throws Exception {
long flowExecutionId = 1233L;
addJobStatusToStateStore(flowExecutionId, MY_JOB_NAME_1, ExecutionStatus.ORCHESTRATED.name(), JOB_ORCHESTRATED_TIME, JOB_ORCHESTRATED_TIME);
addJobStatusToStateStore(flowExecutionId, MY_JOB_NAME_1, ExecutionStatus.RUNNING.name(), JOB_START_TIME, JOB_START_TIME);
addJobStatusToStateStore(flowExecutionId, MY_JOB_NAME_1, ExecutionStatus.COMPLETE.name(), JOB_END_TIME, JOB_END_TIME);
Iterator<JobStatus>
jobStatusIterator = this.jobStatusRetriever.getJobStatusesForFlowExecution(FLOW_NAME, FLOW_GROUP, flowExecutionId, MY_JOB_NAME_1, MY_JOB_GROUP);
JobStatus jobStatus = jobStatusIterator.next();
Assert.assertEquals(jobStatus.getEventName(), ExecutionStatus.COMPLETE.name());
Assert.assertEquals(jobStatus.getStartTime(), JOB_START_TIME);
Assert.assertEquals(jobStatus.getEndTime(), JOB_END_TIME);
Assert.assertEquals(jobStatus.getOrchestratedTime(), JOB_ORCHESTRATED_TIME);
}
@Test (dependsOnMethods = "testJobTiming")
public void testOutOfOrderJobTimingEvents() throws IOException {
long flowExecutionId = 1232L;
addJobStatusToStateStore(flowExecutionId, MY_JOB_NAME_1, ExecutionStatus.RUNNING.name(), JOB_START_TIME, JOB_START_TIME);
addJobStatusToStateStore(flowExecutionId, MY_JOB_NAME_1, ExecutionStatus.ORCHESTRATED.name(), JOB_ORCHESTRATED_TIME, JOB_ORCHESTRATED_TIME);
addJobStatusToStateStore(flowExecutionId, MY_JOB_NAME_1, ExecutionStatus.COMPLETE.name(), JOB_END_TIME, JOB_END_TIME);
Iterator<JobStatus>
jobStatusIterator = this.jobStatusRetriever.getJobStatusesForFlowExecution(FLOW_NAME, FLOW_GROUP, flowExecutionId);
JobStatus jobStatus = jobStatusIterator.next();
if (jobStatus.getJobName().equals(JobStatusRetriever.NA_KEY)) {
jobStatus = jobStatusIterator.next();
}
Assert.assertEquals(jobStatus.getEventName(), ExecutionStatus.COMPLETE.name());
Assert.assertEquals(jobStatus.getStartTime(), JOB_START_TIME);
Assert.assertEquals(jobStatus.getEndTime(), JOB_END_TIME);
Assert.assertEquals(jobStatus.getOrchestratedTime(), JOB_ORCHESTRATED_TIME);
}
@Test (dependsOnMethods = "testJobTiming")
public void testGetJobStatusesForFlowExecution1() {
long flowExecutionId = 1234L;
Iterator<JobStatus> jobStatusIterator = this.jobStatusRetriever.
getJobStatusesForFlowExecution(FLOW_NAME, FLOW_GROUP, flowExecutionId, MY_JOB_NAME_1, MY_JOB_GROUP);
Assert.assertTrue(jobStatusIterator.hasNext());
JobStatus jobStatus = jobStatusIterator.next();
Assert.assertEquals(jobStatus.getJobName(), MY_JOB_NAME_1);
Assert.assertEquals(jobStatus.getJobGroup(), MY_JOB_GROUP);
Assert.assertEquals(jobStatus.getJobExecutionId(), JOB_EXECUTION_ID);
Assert.assertEquals(jobStatus.getFlowName(), FLOW_NAME);
Assert.assertEquals(jobStatus.getFlowGroup(), FLOW_GROUP);
Assert.assertEquals(jobStatus.getFlowExecutionId(), flowExecutionId);
Assert.assertEquals(jobStatus.getMessage(), MESSAGE);
}
@Test (dependsOnMethods = "testGetJobStatusesForFlowExecution1")
public void testGetLatestExecutionIdsForFlow() throws Exception {
//Add new flow execution to state store
long flowExecutionId1 = 1235L;
addJobStatusToStateStore(flowExecutionId1, MY_JOB_NAME_1, ExecutionStatus.RUNNING.name());
long latestExecutionIdForFlow = this.jobStatusRetriever.getLatestExecutionIdForFlow(FLOW_NAME, FLOW_GROUP);
Assert.assertEquals(latestExecutionIdForFlow, flowExecutionId1);
long flowExecutionId2 = 1236L;
//IMPORTANT: multiple jobs for latest flow verifies that flow executions counted exactly once, not once per constituent job
addJobStatusToStateStore(flowExecutionId2, MY_JOB_NAME_1, ExecutionStatus.COMPLETE.name());
addJobStatusToStateStore(flowExecutionId2, MY_JOB_NAME_2, ExecutionStatus.RUNNING.name());
//State store now has 3 flow executions - 1234, 1235, 1236. Get the latest 2 executions i.e. 1235 and 1236.
List<Long> latestFlowExecutionIds = this.jobStatusRetriever.getLatestExecutionIdsForFlow(FLOW_NAME, FLOW_GROUP, 2);
Assert.assertEquals(latestFlowExecutionIds.size(), 2);
Assert.assertEquals(latestFlowExecutionIds, ImmutableList.of(flowExecutionId2, flowExecutionId1));
//Remove all flow executions from state store
cleanUpDir();
Assert.assertEquals(this.jobStatusRetriever.getLatestExecutionIdsForFlow(FLOW_NAME, FLOW_GROUP, 1).size(), 0);
Assert.assertEquals(this.jobStatusRetriever.getLatestExecutionIdForFlow(FLOW_NAME, FLOW_GROUP), -1L);
}
@Test
public void testGetFlowStatusesForFlowGroupExecutions() throws IOException {
// a.) simplify to begin, in `FLOW_GROUP_ALT_A`, leaving out job-level status
addFlowIdJobStatusToStateStore(FLOW_GROUP_ALT_A, FLOW_NAME, 101L, JobStatusRetriever.NA_KEY, ExecutionStatus.COMPILED.name());
addFlowIdJobStatusToStateStore(FLOW_GROUP_ALT_A, FLOW_NAME, 102L, JobStatusRetriever.NA_KEY, ExecutionStatus.RUNNING.name());
addFlowIdJobStatusToStateStore(FLOW_GROUP_ALT_A, FLOW_NAME_ALT_1, 111L, JobStatusRetriever.NA_KEY, ExecutionStatus.COMPILED.name());
addFlowIdJobStatusToStateStore(FLOW_GROUP_ALT_A, FLOW_NAME_ALT_2, 121L, JobStatusRetriever.NA_KEY, ExecutionStatus.COMPLETE.name());
addFlowIdJobStatusToStateStore(FLOW_GROUP_ALT_A, FLOW_NAME_ALT_2, 122L, JobStatusRetriever.NA_KEY, ExecutionStatus.COMPILED.name());
addFlowIdJobStatusToStateStore(FLOW_GROUP_ALT_A, FLOW_NAME_ALT_3, 131L, JobStatusRetriever.NA_KEY, ExecutionStatus.FAILED.name());
addFlowIdJobStatusToStateStore(FLOW_GROUP_ALT_A, FLOW_NAME_ALT_3, 132L, JobStatusRetriever.NA_KEY, ExecutionStatus.COMPLETE.name());
addFlowIdJobStatusToStateStore(FLOW_GROUP_ALT_A, FLOW_NAME_ALT_3, 133L, JobStatusRetriever.NA_KEY, ExecutionStatus.PENDING_RESUME.name());
// b.) include job-level status, in `FLOW_GROUP_ALT_B`
addFlowIdJobStatusToStateStore(FLOW_GROUP_ALT_B, FLOW_NAME_ALT_1, 211L, JobStatusRetriever.NA_KEY, ExecutionStatus.FAILED.name());
addFlowIdJobStatusToStateStore(FLOW_GROUP_ALT_B, FLOW_NAME_ALT_1, 211L, MY_JOB_NAME_2, ExecutionStatus.ORCHESTRATED.name());
addFlowIdJobStatusToStateStore(FLOW_GROUP_ALT_B, FLOW_NAME_ALT_3, 231L, JobStatusRetriever.NA_KEY, ExecutionStatus.COMPLETE.name());
addFlowIdJobStatusToStateStore(FLOW_GROUP_ALT_B, FLOW_NAME_ALT_3, 231L, MY_JOB_NAME_1, ExecutionStatus.FAILED.name());
addFlowIdJobStatusToStateStore(FLOW_GROUP_ALT_B, FLOW_NAME_ALT_3, 231L, MY_JOB_NAME_2, ExecutionStatus.COMPLETE.name());
addFlowIdJobStatusToStateStore(FLOW_GROUP_ALT_B, FLOW_NAME_ALT_3, 232L, JobStatusRetriever.NA_KEY, ExecutionStatus.FAILED.name());
addFlowIdJobStatusToStateStore(FLOW_GROUP_ALT_B, FLOW_NAME_ALT_3, 233L, JobStatusRetriever.NA_KEY, ExecutionStatus.ORCHESTRATED.name());
addFlowIdJobStatusToStateStore(FLOW_GROUP_ALT_B, FLOW_NAME_ALT_3, 233L, MY_JOB_NAME_1, ExecutionStatus.COMPLETE.name());
addFlowIdJobStatusToStateStore(FLOW_GROUP_ALT_B, FLOW_NAME_ALT_3, 233L, MY_JOB_NAME_2, ExecutionStatus.ORCHESTRATED.name());
List<FlowStatus> flowStatusesForGroupAltA = this.jobStatusRetriever.getFlowStatusesForFlowGroupExecutions(FLOW_GROUP_ALT_A, 2);
Assert.assertEquals(flowStatusesForGroupAltA.size(), 2 + 1 + 2 + 2);
assertThat(flowStatusesForGroupAltA.get(0), FlowStatusMatch.of(FLOW_GROUP_ALT_A, FLOW_NAME, 102L, ExecutionStatus.RUNNING));
assertThat(flowStatusesForGroupAltA.get(1), FlowStatusMatch.of(FLOW_GROUP_ALT_A, FLOW_NAME, 101L, ExecutionStatus.COMPILED));
assertThat(flowStatusesForGroupAltA.get(2), FlowStatusMatch.of(FLOW_GROUP_ALT_A, FLOW_NAME_ALT_1, 111L, ExecutionStatus.COMPILED));
assertThat(flowStatusesForGroupAltA.get(3), FlowStatusMatch.of(FLOW_GROUP_ALT_A, FLOW_NAME_ALT_2, 122L, ExecutionStatus.COMPILED));
assertThat(flowStatusesForGroupAltA.get(4), FlowStatusMatch.of(FLOW_GROUP_ALT_A, FLOW_NAME_ALT_2, 121L, ExecutionStatus.COMPLETE));
assertThat(flowStatusesForGroupAltA.get(5), FlowStatusMatch.of(FLOW_GROUP_ALT_A, FLOW_NAME_ALT_3, 133L, ExecutionStatus.PENDING_RESUME));
assertThat(flowStatusesForGroupAltA.get(6), FlowStatusMatch.of(FLOW_GROUP_ALT_A, FLOW_NAME_ALT_3, 132L, ExecutionStatus.COMPLETE));
List<FlowStatus> flowStatusesForGroupAltB = this.jobStatusRetriever.getFlowStatusesForFlowGroupExecutions(FLOW_GROUP_ALT_B, 1);
Assert.assertEquals(flowStatusesForGroupAltB.size(), 1 + 1);
assertThat(flowStatusesForGroupAltB.get(0), FlowStatusMatch.withDependentJobStatuses(FLOW_GROUP_ALT_B, FLOW_NAME_ALT_1, 211L, ExecutionStatus.FAILED,
ImmutableList.of(JobStatusMatch.Dependent.of(MY_JOB_GROUP, MY_JOB_NAME_2, 1111L, ExecutionStatus.ORCHESTRATED.name()))));
assertThat(flowStatusesForGroupAltB.get(1), FlowStatusMatch.withDependentJobStatuses(FLOW_GROUP_ALT_B, FLOW_NAME_ALT_3, 233L, ExecutionStatus.ORCHESTRATED,
ImmutableList.of(
JobStatusMatch.Dependent.of(MY_JOB_GROUP, MY_JOB_NAME_1, 1111L, ExecutionStatus.COMPLETE.name()),
JobStatusMatch.Dependent.of(MY_JOB_GROUP, MY_JOB_NAME_2, 1111L, ExecutionStatus.ORCHESTRATED.name()))));
}
abstract void cleanUpDir() throws Exception;
@AfterClass
public void tearDown() throws Exception {
cleanUpDir();
}
}
| 3,765 |
0 | Create_ds/gobblin/gobblin-service/src/test/java/org/apache/gobblin/service | Create_ds/gobblin/gobblin-service/src/test/java/org/apache/gobblin/service/monitoring/MysqlJobStatusRetrieverTestWithoutDagManager.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.service.monitoring;
import java.io.IOException;
import org.testng.Assert;
import org.testng.annotations.BeforeClass;
import org.testng.annotations.Test;
import org.apache.gobblin.config.ConfigBuilder;
import org.apache.gobblin.configuration.ConfigurationKeys;
import org.apache.gobblin.configuration.State;
import org.apache.gobblin.metastore.MysqlJobStatusStateStore;
import org.apache.gobblin.metastore.testing.ITestMetastoreDatabase;
import org.apache.gobblin.metastore.testing.TestMetastoreDatabaseFactory;
import org.apache.gobblin.runtime.troubleshooter.MultiContextIssueRepository;
import org.apache.gobblin.service.ExecutionStatus;
import org.apache.gobblin.service.ServiceConfigKeys;
import static org.mockito.Mockito.mock;
/**
* Flow status can be different when DagManager is not being used. So we need separate unit tests for testing job/flow
* status when DagManager is disabled.
*/
public class MysqlJobStatusRetrieverTestWithoutDagManager extends JobStatusRetrieverTest {
private MysqlJobStatusStateStore<State> dbJobStateStore;
private static final String TEST_USER = "testUser";
private static final String TEST_PASSWORD = "testPassword";
@BeforeClass
@Override
public void setUp() throws Exception {
ITestMetastoreDatabase testMetastoreDatabase = TestMetastoreDatabaseFactory.get();
String jdbcUrl = testMetastoreDatabase.getJdbcUrl();
ConfigBuilder configBuilder = ConfigBuilder.create();
configBuilder.addPrimitive(MysqlJobStatusRetriever.MYSQL_JOB_STATUS_RETRIEVER_PREFIX + "." + ConfigurationKeys.STATE_STORE_DB_URL_KEY, jdbcUrl);
configBuilder.addPrimitive(MysqlJobStatusRetriever.MYSQL_JOB_STATUS_RETRIEVER_PREFIX + "." + ConfigurationKeys.STATE_STORE_DB_USER_KEY, TEST_USER);
configBuilder.addPrimitive(MysqlJobStatusRetriever.MYSQL_JOB_STATUS_RETRIEVER_PREFIX + "." + ConfigurationKeys.STATE_STORE_DB_PASSWORD_KEY, TEST_PASSWORD);
configBuilder.addPrimitive(ServiceConfigKeys.GOBBLIN_SERVICE_DAG_MANAGER_ENABLED_KEY, "false");
this.jobStatusRetriever =
new MysqlJobStatusRetriever(configBuilder.build(), mock(MultiContextIssueRepository.class));
this.dbJobStateStore = ((MysqlJobStatusRetriever) this.jobStatusRetriever).getStateStore();
cleanUpDir();
}
@Test
public void testGetJobStatusesForFlowExecution() throws IOException {
super.testGetJobStatusesForFlowExecution();
}
@Test (dependsOnMethods = "testGetJobStatusesForFlowExecution")
public void testJobTiming() throws Exception {
super.testJobTiming();
}
@Test (dependsOnMethods = "testJobTiming")
public void testOutOfOrderJobTimingEvents() throws IOException {
super.testOutOfOrderJobTimingEvents();
}
@Test (dependsOnMethods = "testJobTiming")
public void testGetJobStatusesForFlowExecution1() {
super.testGetJobStatusesForFlowExecution1();
}
@Test (dependsOnMethods = "testGetJobStatusesForFlowExecution1")
public void testGetLatestExecutionIdsForFlow() throws Exception {
super.testGetLatestExecutionIdsForFlow();
}
@Test (dependsOnMethods = "testGetLatestExecutionIdsForFlow")
public void testGetFlowStatusFromJobStatuses() throws Exception {
long flowExecutionId = 1237L;
addJobStatusToStateStore(flowExecutionId, JobStatusRetriever.NA_KEY, ExecutionStatus.COMPILED.name());
Assert.assertEquals(ExecutionStatus.$UNKNOWN,
JobStatusRetriever.getFlowStatusFromJobStatuses(jobStatusRetriever.dagManagerEnabled, jobStatusRetriever.getJobStatusesForFlowExecution(FLOW_NAME, FLOW_GROUP, flowExecutionId)));
addJobStatusToStateStore(flowExecutionId, JobStatusRetriever.NA_KEY, ExecutionStatus.ORCHESTRATED.name());
Assert.assertEquals(ExecutionStatus.$UNKNOWN,
JobStatusRetriever.getFlowStatusFromJobStatuses(jobStatusRetriever.dagManagerEnabled, jobStatusRetriever.getJobStatusesForFlowExecution(FLOW_NAME, FLOW_GROUP, flowExecutionId)));
addJobStatusToStateStore(flowExecutionId, MY_JOB_NAME_1, ExecutionStatus.ORCHESTRATED.name(), JOB_ORCHESTRATED_TIME, JOB_ORCHESTRATED_TIME);
Assert.assertEquals(ExecutionStatus.ORCHESTRATED,
JobStatusRetriever.getFlowStatusFromJobStatuses(jobStatusRetriever.dagManagerEnabled, jobStatusRetriever.getJobStatusesForFlowExecution(FLOW_NAME, FLOW_GROUP, flowExecutionId)));
addJobStatusToStateStore(flowExecutionId, JobStatusRetriever.NA_KEY, ExecutionStatus.RUNNING.name());
Assert.assertEquals(ExecutionStatus.ORCHESTRATED,
JobStatusRetriever.getFlowStatusFromJobStatuses(jobStatusRetriever.dagManagerEnabled, jobStatusRetriever.getJobStatusesForFlowExecution(FLOW_NAME, FLOW_GROUP, flowExecutionId)));
addJobStatusToStateStore(flowExecutionId, MY_JOB_NAME_1, ExecutionStatus.RUNNING.name(), JOB_ORCHESTRATED_TIME, JOB_ORCHESTRATED_TIME);
Assert.assertEquals(ExecutionStatus.RUNNING,
JobStatusRetriever.getFlowStatusFromJobStatuses(jobStatusRetriever.dagManagerEnabled, jobStatusRetriever.getJobStatusesForFlowExecution(FLOW_NAME, FLOW_GROUP, flowExecutionId)));
addJobStatusToStateStore(flowExecutionId, JobStatusRetriever.NA_KEY, ExecutionStatus.COMPLETE.name(), JOB_ORCHESTRATED_TIME, JOB_ORCHESTRATED_TIME);
Assert.assertEquals(ExecutionStatus.RUNNING,
JobStatusRetriever.getFlowStatusFromJobStatuses(jobStatusRetriever.dagManagerEnabled, jobStatusRetriever.getJobStatusesForFlowExecution(FLOW_NAME, FLOW_GROUP, flowExecutionId)));
addJobStatusToStateStore(flowExecutionId, MY_JOB_NAME_1, ExecutionStatus.COMPLETE.name());
Assert.assertEquals(ExecutionStatus.COMPLETE,
JobStatusRetriever.getFlowStatusFromJobStatuses(jobStatusRetriever.dagManagerEnabled, jobStatusRetriever.getJobStatusesForFlowExecution(FLOW_NAME, FLOW_GROUP, flowExecutionId)));
}
@Override
void cleanUpDir() throws Exception {
this.dbJobStateStore.delete(KafkaJobStatusMonitor.jobStatusStoreName(FLOW_GROUP, FLOW_NAME));
}
}
| 3,766 |
0 | Create_ds/gobblin/gobblin-service/src/test/java/org/apache/gobblin/service | Create_ds/gobblin/gobblin-service/src/test/java/org/apache/gobblin/service/monitoring/FsJobStatusRetrieverTest.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.service.monitoring;
import java.io.File;
import java.io.IOException;
import org.apache.commons.io.FileUtils;
import org.testng.Assert;
import org.testng.annotations.BeforeClass;
import org.testng.annotations.Test;
import com.typesafe.config.Config;
import com.typesafe.config.ConfigFactory;
import com.typesafe.config.ConfigValueFactory;
import org.apache.gobblin.configuration.ConfigurationKeys;
import org.apache.gobblin.runtime.troubleshooter.MultiContextIssueRepository;
import org.apache.gobblin.service.ExecutionStatus;
import org.apache.gobblin.service.ServiceConfigKeys;
import static org.mockito.Mockito.mock;
public class FsJobStatusRetrieverTest extends JobStatusRetrieverTest {
private String stateStoreDir = "/tmp/jobStatusRetrieverTest/statestore";
@BeforeClass
public void setUp() throws Exception {
cleanUpDir();
Config config = ConfigFactory.empty()
.withValue(FsJobStatusRetriever.CONF_PREFIX + "." + ConfigurationKeys.STATE_STORE_ROOT_DIR_KEY,
ConfigValueFactory.fromAnyRef(stateStoreDir))
.withValue(ServiceConfigKeys.GOBBLIN_SERVICE_DAG_MANAGER_ENABLED_KEY,
ConfigValueFactory.fromAnyRef("true"));
this.jobStatusRetriever = new FsJobStatusRetriever(config, mock(MultiContextIssueRepository.class));
}
@Test
public void testGetJobStatusesForFlowExecution() throws IOException {
super.testGetJobStatusesForFlowExecution();
}
@Test (dependsOnMethods = "testGetJobStatusesForFlowExecution")
public void testJobTiming() throws Exception {
super.testJobTiming();
}
@Test (dependsOnMethods = "testJobTiming")
public void testOutOfOrderJobTimingEvents() throws IOException {
super.testOutOfOrderJobTimingEvents();
}
@Test (dependsOnMethods = "testJobTiming")
public void testGetJobStatusesForFlowExecution1() {
super.testGetJobStatusesForFlowExecution1();
}
@Test (dependsOnMethods = "testGetJobStatusesForFlowExecution1")
public void testGetLatestExecutionIdsForFlow() throws Exception {
super.testGetLatestExecutionIdsForFlow();
}
@Test (dependsOnMethods = "testGetLatestExecutionIdsForFlow")
public void testGetFlowStatusFromJobStatuses() throws Exception {
long flowExecutionId = 1237L;
addJobStatusToStateStore(flowExecutionId, JobStatusRetriever.NA_KEY, ExecutionStatus.COMPILED.name());
Assert.assertEquals(ExecutionStatus.COMPILED,
jobStatusRetriever.getFlowStatusFromJobStatuses(jobStatusRetriever.dagManagerEnabled, jobStatusRetriever.getJobStatusesForFlowExecution(FLOW_NAME, FLOW_GROUP, flowExecutionId)));
addJobStatusToStateStore(flowExecutionId, MY_JOB_NAME_1, ExecutionStatus.ORCHESTRATED.name(), JOB_ORCHESTRATED_TIME, JOB_ORCHESTRATED_TIME);
Assert.assertEquals(ExecutionStatus.COMPILED,
jobStatusRetriever.getFlowStatusFromJobStatuses(jobStatusRetriever.dagManagerEnabled, jobStatusRetriever.getJobStatusesForFlowExecution(FLOW_NAME, FLOW_GROUP, flowExecutionId)));
addJobStatusToStateStore(flowExecutionId, JobStatusRetriever.NA_KEY, ExecutionStatus.ORCHESTRATED.name());
Assert.assertEquals(ExecutionStatus.ORCHESTRATED,
jobStatusRetriever.getFlowStatusFromJobStatuses(jobStatusRetriever.dagManagerEnabled, jobStatusRetriever.getJobStatusesForFlowExecution(FLOW_NAME, FLOW_GROUP, flowExecutionId)));
addJobStatusToStateStore(flowExecutionId, MY_JOB_NAME_1, ExecutionStatus.RUNNING.name(), JOB_ORCHESTRATED_TIME, JOB_ORCHESTRATED_TIME);
Assert.assertEquals(ExecutionStatus.ORCHESTRATED,
jobStatusRetriever.getFlowStatusFromJobStatuses(jobStatusRetriever.dagManagerEnabled, jobStatusRetriever.getJobStatusesForFlowExecution(FLOW_NAME, FLOW_GROUP, flowExecutionId)));
addJobStatusToStateStore(flowExecutionId, JobStatusRetriever.NA_KEY, ExecutionStatus.RUNNING.name());
Assert.assertEquals(ExecutionStatus.RUNNING,
jobStatusRetriever.getFlowStatusFromJobStatuses(jobStatusRetriever.dagManagerEnabled, jobStatusRetriever.getJobStatusesForFlowExecution(FLOW_NAME, FLOW_GROUP, flowExecutionId)));
addJobStatusToStateStore(flowExecutionId, MY_JOB_NAME_1, ExecutionStatus.COMPLETE.name(), JOB_ORCHESTRATED_TIME, JOB_ORCHESTRATED_TIME);
Assert.assertEquals(ExecutionStatus.RUNNING,
jobStatusRetriever.getFlowStatusFromJobStatuses(jobStatusRetriever.dagManagerEnabled, jobStatusRetriever.getJobStatusesForFlowExecution(FLOW_NAME, FLOW_GROUP, flowExecutionId)));
addJobStatusToStateStore(flowExecutionId, JobStatusRetriever.NA_KEY, ExecutionStatus.COMPLETE.name());
Assert.assertEquals(ExecutionStatus.COMPLETE,
jobStatusRetriever.getFlowStatusFromJobStatuses(jobStatusRetriever.dagManagerEnabled, jobStatusRetriever.getJobStatusesForFlowExecution(FLOW_NAME, FLOW_GROUP, flowExecutionId)));
}
@Override
protected void cleanUpDir() throws Exception {
File specStoreDir = new File(this.stateStoreDir);
if (specStoreDir.exists()) {
FileUtils.deleteDirectory(specStoreDir);
}
}
} | 3,767 |
0 | Create_ds/gobblin/gobblin-service/src/test/java/org/apache/gobblin/service | Create_ds/gobblin/gobblin-service/src/test/java/org/apache/gobblin/service/monitoring/FsFlowGraphMonitorTest.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.service.monitoring;
import com.google.common.base.Charsets;
import com.google.common.base.Joiner;
import com.google.common.base.Optional;
import com.google.common.io.Files;
import com.typesafe.config.Config;
import com.typesafe.config.ConfigFactory;
import com.typesafe.config.ConfigValueFactory;
import java.io.File;
import java.io.IOException;
import java.net.URI;
import java.util.Collection;
import java.util.Map;
import java.util.Properties;
import java.util.concurrent.CountDownLatch;
import java.util.concurrent.ExecutionException;
import java.util.concurrent.atomic.AtomicReference;
import java.util.concurrent.locks.ReentrantReadWriteLock;
import org.apache.commons.io.FileUtils;
import org.apache.gobblin.config.ConfigBuilder;
import org.apache.gobblin.configuration.ConfigurationKeys;
import org.apache.gobblin.runtime.api.TopologySpec;
import org.apache.gobblin.service.ServiceConfigKeys;
import org.apache.gobblin.service.modules.flow.MultiHopFlowCompiler;
import org.apache.gobblin.service.modules.flow.MultiHopFlowCompilerTest;
import org.apache.gobblin.service.modules.flowgraph.BaseFlowGraph;
import org.apache.gobblin.service.modules.flowgraph.DataNode;
import org.apache.gobblin.service.modules.flowgraph.FlowEdge;
import org.apache.gobblin.service.modules.flowgraph.FlowGraph;
import org.apache.gobblin.service.modules.flowgraph.FlowGraphConfigurationKeys;
import org.apache.gobblin.service.modules.template_catalog.UpdatableFSFlowTemplateCatalog;
import org.apache.hadoop.fs.Path;
import org.eclipse.jgit.transport.RefSpec;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.testng.Assert;
import org.testng.annotations.AfterClass;
import org.testng.annotations.BeforeClass;
import org.testng.annotations.Test;
public class FsFlowGraphMonitorTest {
private static final Logger logger = LoggerFactory.getLogger(FsFlowGraphMonitorTest.class);
private final File TEST_DIR = new File(FileUtils.getTempDirectory(), "flowGraphTemplates");
private final File flowGraphTestDir = new File(TEST_DIR, "fsFlowGraphTestDir");
private final File flowGraphDir = new File(flowGraphTestDir, "gobblin-flowgraph");
private static final String NODE_1_FILE = "node1.properties";
private final File node1Dir = new File(FileUtils.getTempDirectory(), "node1");
private final File node1File = new File(node1Dir, NODE_1_FILE);
private static final String NODE_2_FILE = "node2.properties";
private final File node2Dir = new File(FileUtils.getTempDirectory(), "node2");
private final File node2File = new File(node2Dir, NODE_2_FILE);
private final File edge1Dir = new File(node1Dir, "node2");
private final File edge1File = new File(edge1Dir, "edge1.properties");
private final File sharedNodeFolder = new File(flowGraphTestDir, "nodes");
private RefSpec masterRefSpec = new RefSpec("master");
private Optional<UpdatableFSFlowTemplateCatalog> flowCatalog;
private Config config;
private AtomicReference<FlowGraph> flowGraph;
private FsFlowGraphMonitor flowGraphMonitor;
private Map<URI, TopologySpec> topologySpecMap;
private File flowTemplateCatalogFolder;
@BeforeClass
public void setUp() throws Exception {
cleanUpDir(TEST_DIR.toString());
TEST_DIR.mkdirs();
URI topologyCatalogUri = this.getClass().getClassLoader().getResource("topologyspec_catalog").toURI();
this.topologySpecMap = MultiHopFlowCompilerTest.buildTopologySpecMap(topologyCatalogUri);
this.config = ConfigBuilder.create()
.addPrimitive(FsFlowGraphMonitor.FS_FLOWGRAPH_MONITOR_PREFIX + "."
+ ConfigurationKeys.FLOWGRAPH_ABSOLUTE_DIR, flowGraphTestDir.getAbsolutePath())
.addPrimitive(FsFlowGraphMonitor.FS_FLOWGRAPH_MONITOR_PREFIX + "." + ConfigurationKeys.FLOWGRAPH_BASE_DIR, "gobblin-flowgraph")
.addPrimitive(FsFlowGraphMonitor.FS_FLOWGRAPH_MONITOR_PREFIX + "." + ConfigurationKeys.FLOWGRAPH_POLLING_INTERVAL, 1)
.addPrimitive(FsFlowGraphMonitor.FS_FLOWGRAPH_MONITOR_PREFIX + "." + FsFlowGraphMonitor.MONITOR_TEMPLATE_CATALOG_CHANGES, true)
.build();
// Create a FSFlowTemplateCatalog instance
URI flowTemplateCatalogUri = this.getClass().getClassLoader().getResource("template_catalog").toURI();
this.flowTemplateCatalogFolder = new File(TEST_DIR, "template_catalog");
this.flowTemplateCatalogFolder.mkdirs();
FileUtils.copyDirectory(new File(flowTemplateCatalogUri.getPath()), this.flowTemplateCatalogFolder);
Properties properties = new Properties();
this.flowGraphDir.mkdirs();
properties.put(ServiceConfigKeys.TEMPLATE_CATALOGS_FULLY_QUALIFIED_PATH_KEY, this.flowTemplateCatalogFolder.getAbsolutePath());
Config config = ConfigFactory.parseProperties(properties);
Config templateCatalogCfg = config
.withValue(ConfigurationKeys.JOB_CONFIG_FILE_GENERAL_PATH_KEY,
config.getValue(ServiceConfigKeys.TEMPLATE_CATALOGS_FULLY_QUALIFIED_PATH_KEY));
this.flowCatalog = Optional.of(new UpdatableFSFlowTemplateCatalog(templateCatalogCfg, new ReentrantReadWriteLock(true)));
//Create a FlowGraph instance with defaults
this.flowGraph = new AtomicReference<>(new BaseFlowGraph());
MultiHopFlowCompiler mhfc = new MultiHopFlowCompiler(config, this.flowGraph);
this.flowGraphMonitor = new FsFlowGraphMonitor(this.config, this.flowCatalog, mhfc, topologySpecMap, new CountDownLatch(1), true);
this.flowGraphMonitor.startUp();
this.flowGraphMonitor.setActive(true);
}
@Test
public void testAddNode() throws Exception {
String file1Contents = FlowGraphConfigurationKeys.DATA_NODE_IS_ACTIVE_KEY + "=true\nparam1=value1\n";
String file2Contents = FlowGraphConfigurationKeys.DATA_NODE_IS_ACTIVE_KEY + "=true\nparam2=value2\n";
addNode(this.node1Dir, this.node1File, file1Contents);
addNode(this.node2Dir, this.node2File, file2Contents);
// Let the monitor pick up the nodes that were recently added
Thread.sleep(3000);
for (int i = 0; i < 2; i++) {
String nodeId = "node" + (i + 1);
String paramKey = "param" + (i + 1);
String paramValue = "value" + (i + 1);
//Check if nodes have been added to the FlowGraph
DataNode dataNode = this.flowGraph.get().getNode(nodeId);
Assert.assertEquals(dataNode.getId(), nodeId);
Assert.assertTrue(dataNode.isActive());
Assert.assertEquals(dataNode.getRawConfig().getString(paramKey), paramValue);
}
}
@Test (dependsOnMethods = "testAddNode")
public void testAddEdge() throws Exception {
//Build contents of edge file
String fileContents = buildEdgeFileContents("node1", "node2", "edge1", "value1");
addEdge(this.edge1Dir, this.edge1File, fileContents);
// Let the monitor pick up the edges that were recently added
Thread.sleep(3000);
//Check if edge1 has been added to the FlowGraph
testIfEdgeSuccessfullyAdded("node1", "node2", "edge1", "value1");
}
@Test (dependsOnMethods = "testAddEdge")
public void testUpdateEdge() throws Exception {
//Update edge1 file
String fileContents = buildEdgeFileContents("node1", "node2", "edge1", "value2");
addEdge(this.edge1Dir, this.edge1File, fileContents);
// Let the monitor pick up the edges that were recently added
Thread.sleep(3000);
//Check if new edge1 has been added to the FlowGraph
testIfEdgeSuccessfullyAdded("node1", "node2", "edge1", "value2");
}
@Test (dependsOnMethods = "testUpdateEdge")
public void testUpdateNode() throws Exception {
//Update param1 value in node1 and check if updated node is added to the graph
String fileContents = FlowGraphConfigurationKeys.DATA_NODE_IS_ACTIVE_KEY + "=true\nparam1=value3\n";
addNode(this.node1Dir, this.node1File, fileContents);
// Let the monitor pick up the edges that were recently added
Thread.sleep(3000);
//Check if node has been updated in the FlowGraph
DataNode dataNode = this.flowGraph.get().getNode("node1");
Assert.assertEquals(dataNode.getId(), "node1");
Assert.assertTrue(dataNode.isActive());
Assert.assertEquals(dataNode.getRawConfig().getString("param1"), "value3");
}
@Test (dependsOnMethods = "testUpdateNode")
public void testSetUpExistingGraph() throws Exception {
// Create a FlowGraph instance with defaults
this.flowGraphMonitor.shutDown();
this.flowGraph = new AtomicReference<>(new BaseFlowGraph());
MultiHopFlowCompiler mhfc = new MultiHopFlowCompiler(config, this.flowGraph);
this.flowGraphMonitor = new FsFlowGraphMonitor(this.config, this.flowCatalog, mhfc, this.topologySpecMap, new CountDownLatch(1), true);
this.flowGraphMonitor.startUp();
this.flowGraphMonitor.setActive(true);
// Let the monitor repopulate the flowgraph
Thread.sleep(3000);
Assert.assertNotNull(this.flowGraph.get().getNode("node1"));
Assert.assertNotNull(this.flowGraph.get().getNode("node2"));
Assert.assertEquals(this.flowGraph.get().getEdges("node1").size(), 1);
}
@Test (dependsOnMethods = "testSetUpExistingGraph")
public void testSharedFlowgraphHelper() throws Exception {
this.flowGraphMonitor.shutDown();
Config sharedFlowgraphConfig = ConfigFactory.empty()
.withValue(ServiceConfigKeys.GOBBLIN_SERVICE_FLOWGRAPH_HELPER_KEY, ConfigValueFactory.fromAnyRef("org.apache.gobblin.service.modules.flowgraph.SharedFlowGraphHelper"))
.withFallback(this.config);
this.flowGraph = new AtomicReference<>(new BaseFlowGraph());
MultiHopFlowCompiler mhfc = new MultiHopFlowCompiler(config, this.flowGraph);
// Set up node 3
File node3Folder = new File(this.flowGraphDir, "node3");
node3Folder.mkdirs();
File node3File = new File(this.sharedNodeFolder, "node3.conf");
String file3Contents = FlowGraphConfigurationKeys.DATA_NODE_IS_ACTIVE_KEY + "=true\nparam3=value3\n";
// Have different default values for node 1
File node1File = new File(this.sharedNodeFolder, "node1.properties");
String file1Contents = FlowGraphConfigurationKeys.DATA_NODE_IS_ACTIVE_KEY + "=true\nparam2=value10\n";
createNewFile(this.sharedNodeFolder, node3File, file3Contents);
createNewFile(this.sharedNodeFolder, node1File, file1Contents);
this.flowGraphMonitor = new FsFlowGraphMonitor(sharedFlowgraphConfig, this.flowCatalog, mhfc, this.topologySpecMap, new CountDownLatch(1), true);
this.flowGraphMonitor.startUp();
this.flowGraphMonitor.setActive(true);
// Let the monitor repopulate the flowgraph
Thread.sleep(3000);
Assert.assertNotNull(this.flowGraph.get().getNode("node3"));
DataNode node1 = this.flowGraph.get().getNode("node1");
Assert.assertTrue(node1.isActive());
Assert.assertEquals(node1.getRawConfig().getString("param2"), "value10");
}
@Test (dependsOnMethods = "testSharedFlowgraphHelper")
public void testUpdateOnlyTemplates() throws Exception {
Assert.assertEquals(this.flowGraph.get().getEdges("node1").size(), 1);
//If deleting all the templates, the cache of flow templates will be cleared and the flowgraph will be unable to add edges on reload.
cleanUpDir(this.flowTemplateCatalogFolder.getAbsolutePath());
Thread.sleep(3000);
Assert.assertEquals(this.flowGraph.get().getEdges("node1").size(), 0);
URI flowTemplateCatalogUri = this.getClass().getClassLoader().getResource("template_catalog").toURI();
// Adding the flowtemplates back will make the edges eligible to be added again on reload.
FileUtils.copyDirectory(new File(flowTemplateCatalogUri.getPath()), this.flowTemplateCatalogFolder);
Thread.sleep(3000);
Assert.assertEquals(this.flowGraph.get().getEdges("node1").size(), 1);
}
@Test (dependsOnMethods = "testUpdateOnlyTemplates")
public void testRemoveEdge() throws Exception {
//Node1 has 1 edge before delete
Collection<FlowEdge> edgeSet = this.flowGraph.get().getEdges("node1");
Assert.assertEquals(edgeSet.size(), 1);
File edgeFile = new File(this.flowGraphDir.getAbsolutePath(), node1Dir.getName() + Path.SEPARATOR_CHAR + edge1Dir.getName() + Path.SEPARATOR_CHAR + edge1File.getName());
edgeFile.delete();
// Let the monitor pick up the edges that were recently deleted
Thread.sleep(3000);
//Check if edge1 has been deleted from the graph
edgeSet = this.flowGraph.get().getEdges("node1");
Assert.assertEquals(edgeSet.size(), 0);
}
@Test (dependsOnMethods = "testRemoveEdge")
public void testRemoveNode() throws Exception {
//Ensure node1 and node2 are present in the graph before delete
DataNode node1 = this.flowGraph.get().getNode("node1");
Assert.assertNotNull(node1);
DataNode node2 = this.flowGraph.get().getNode("node2");
Assert.assertNotNull(node2);
File node1FlowGraphFile = new File(this.flowGraphDir.getAbsolutePath(), node1Dir.getName());
File node2FlowGraphFile = new File(this.flowGraphDir.getAbsolutePath(), node2Dir.getName());
//delete node files
FileUtils.deleteDirectory(node1FlowGraphFile);
FileUtils.deleteDirectory(node2FlowGraphFile);
// Let the monitor pick up the edges that were recently deleted
Thread.sleep(3000);
//Check if node1 and node 2 have been deleted from the graph
node1 = this.flowGraph.get().getNode("node1");
Assert.assertNull(node1);
node2 = this.flowGraph.get().getNode("node2");
Assert.assertNull(node2);
}
@AfterClass
public void tearDown() throws Exception {
cleanUpDir(TEST_DIR.toString());
}
private void createNewFile(File dir, File file, String fileContents) throws IOException {
if (!dir.exists()) {
dir.mkdirs();
}
file.createNewFile();
Files.write(fileContents, file, Charsets.UTF_8);
}
private void addNode(File nodeDir, File nodeFile, String fileContents) throws IOException {
createNewFile(nodeDir, nodeFile, fileContents);
File destinationFile = new File(this.flowGraphDir.getAbsolutePath(), nodeDir.getName() + Path.SEPARATOR_CHAR + nodeFile.getName());
logger.info(destinationFile.toString());
if (destinationFile.exists()) {
// clear file
Files.write(new byte[0], destinationFile);
Files.write(fileContents, destinationFile, Charsets.UTF_8);
} else {
FileUtils.moveDirectory(nodeDir, destinationFile.getParentFile());
}
}
private void addEdge(File edgeDir, File edgeFile, String fileContents) throws Exception {
createNewFile(edgeDir, edgeFile, fileContents);
File destinationFile = new File(this.flowGraphDir.getAbsolutePath(), edgeDir.getParentFile().getName() + Path.SEPARATOR_CHAR + edgeDir.getName() + Path.SEPARATOR_CHAR + edgeFile.getName());
if (destinationFile.exists()) {
// clear old properties file
Files.write(new byte[0], destinationFile);
Files.write(fileContents, destinationFile, Charsets.UTF_8);
} else {
FileUtils.moveDirectory(edgeDir, destinationFile.getParentFile());
}
}
private String buildEdgeFileContents(String node1, String node2, String edgeName, String value) {
String fileContents = FlowGraphConfigurationKeys.FLOW_EDGE_SOURCE_KEY + "=" + node1 + "\n"
+ FlowGraphConfigurationKeys.FLOW_EDGE_DESTINATION_KEY + "=" + node2 + "\n"
+ FlowGraphConfigurationKeys.FLOW_EDGE_NAME_KEY + "=" + edgeName + "\n"
+ FlowGraphConfigurationKeys.FLOW_EDGE_IS_ACTIVE_KEY + "=true\n"
+ FlowGraphConfigurationKeys.FLOW_EDGE_TEMPLATE_DIR_URI_KEY + "=FS:///flowEdgeTemplate\n"
+ FlowGraphConfigurationKeys.FLOW_EDGE_SPEC_EXECUTORS_KEY + "=testExecutor1,testExecutor2\n"
+ "key1=" + value + "\n";
return fileContents;
}
private void testIfEdgeSuccessfullyAdded(String node1, String node2, String edgeName, String value) throws ExecutionException, InterruptedException {
Collection<FlowEdge> edgeSet = this.flowGraph.get().getEdges(node1);
Assert.assertEquals(edgeSet.size(), 1);
FlowEdge flowEdge = edgeSet.iterator().next();
Assert.assertEquals(flowEdge.getId(), Joiner.on("_").join(node1, node2, edgeName));
Assert.assertEquals(flowEdge.getSrc(), node1);
Assert.assertEquals(flowEdge.getDest(), node2);
Assert.assertEquals(flowEdge.getExecutors().get(0).getConfig().get().getString("specStore.fs.dir"), "/tmp1");
Assert.assertEquals(flowEdge.getExecutors().get(0).getConfig().get().getString("specExecInstance.capabilities"), "s1:d1");
Assert.assertEquals(flowEdge.getExecutors().get(0).getClass().getSimpleName(), "InMemorySpecExecutor");
Assert.assertEquals(flowEdge.getExecutors().get(1).getConfig().get().getString("specStore.fs.dir"), "/tmp2");
Assert.assertEquals(flowEdge.getExecutors().get(1).getConfig().get().getString("specExecInstance.capabilities"), "s2:d2");
Assert.assertEquals(flowEdge.getExecutors().get(1).getClass().getSimpleName(), "InMemorySpecExecutor");
Assert.assertEquals(flowEdge.getConfig().getString("key1"), value);
}
private void cleanUpDir(String dir) {
File dirToDelete = new File(dir);
// cleanup is flaky on Travis, so retry a few times and then suppress the error if unsuccessful
for (int i = 0; i < 5; i++) {
try {
if (dirToDelete.exists()) {
FileUtils.deleteDirectory(dirToDelete);
}
// if delete succeeded then break out of loop
break;
} catch (IOException e) {
logger.warn("Cleanup delete directory failed for directory: " + dir, e);
}
}
}
} | 3,768 |
0 | Create_ds/gobblin/gobblin-service/src/test/java/org/apache/gobblin/service | Create_ds/gobblin/gobblin-service/src/test/java/org/apache/gobblin/service/monitoring/FsJobStatusRetrieverTestWithoutDagManager.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.service.monitoring;
import java.io.File;
import java.io.IOException;
import org.apache.commons.io.FileUtils;
import org.testng.Assert;
import org.testng.annotations.BeforeClass;
import org.testng.annotations.Test;
import com.typesafe.config.Config;
import com.typesafe.config.ConfigFactory;
import com.typesafe.config.ConfigValueFactory;
import org.apache.gobblin.configuration.ConfigurationKeys;
import org.apache.gobblin.runtime.troubleshooter.MultiContextIssueRepository;
import org.apache.gobblin.service.ExecutionStatus;
import static org.mockito.Mockito.mock;
public class FsJobStatusRetrieverTestWithoutDagManager extends JobStatusRetrieverTest {
private String stateStoreDir = "/tmp/jobStatusRetrieverTest/statestore";
@BeforeClass
public void setUp() throws Exception {
cleanUpDir();
Config config = ConfigFactory.empty().withValue(FsJobStatusRetriever.CONF_PREFIX + "." + ConfigurationKeys.STATE_STORE_ROOT_DIR_KEY,
ConfigValueFactory.fromAnyRef(stateStoreDir));
this.jobStatusRetriever = new FsJobStatusRetriever(config, mock(MultiContextIssueRepository.class));
}
@Test
public void testGetJobStatusesForFlowExecution() throws IOException {
super.testGetJobStatusesForFlowExecution();
}
@Test (dependsOnMethods = "testGetJobStatusesForFlowExecution")
public void testJobTiming() throws Exception {
super.testJobTiming();
}
@Test (dependsOnMethods = "testJobTiming")
public void testOutOfOrderJobTimingEvents() throws IOException {
super.testOutOfOrderJobTimingEvents();
}
@Test (dependsOnMethods = "testJobTiming")
public void testGetJobStatusesForFlowExecution1() {
super.testGetJobStatusesForFlowExecution1();
}
@Test (dependsOnMethods = "testGetJobStatusesForFlowExecution1")
public void testGetLatestExecutionIdsForFlow() throws Exception {
super.testGetLatestExecutionIdsForFlow();
}
@Test (dependsOnMethods = "testGetLatestExecutionIdsForFlow")
public void testGetFlowStatusFromJobStatuses() throws Exception {
long flowExecutionId = 1237L;
addJobStatusToStateStore(flowExecutionId, JobStatusRetriever.NA_KEY, ExecutionStatus.COMPILED.name());
Assert.assertEquals(ExecutionStatus.$UNKNOWN,
jobStatusRetriever.getFlowStatusFromJobStatuses(jobStatusRetriever.dagManagerEnabled, jobStatusRetriever.getJobStatusesForFlowExecution(FLOW_NAME, FLOW_GROUP, flowExecutionId)));
addJobStatusToStateStore(flowExecutionId, JobStatusRetriever.NA_KEY, ExecutionStatus.ORCHESTRATED.name());
Assert.assertEquals(ExecutionStatus.$UNKNOWN,
jobStatusRetriever.getFlowStatusFromJobStatuses(jobStatusRetriever.dagManagerEnabled, jobStatusRetriever.getJobStatusesForFlowExecution(FLOW_NAME, FLOW_GROUP, flowExecutionId)));
addJobStatusToStateStore(flowExecutionId, MY_JOB_NAME_1, ExecutionStatus.ORCHESTRATED.name(), JOB_ORCHESTRATED_TIME, JOB_ORCHESTRATED_TIME);
Assert.assertEquals(ExecutionStatus.ORCHESTRATED,
jobStatusRetriever.getFlowStatusFromJobStatuses(jobStatusRetriever.dagManagerEnabled, jobStatusRetriever.getJobStatusesForFlowExecution(FLOW_NAME, FLOW_GROUP, flowExecutionId)));
addJobStatusToStateStore(flowExecutionId, JobStatusRetriever.NA_KEY, ExecutionStatus.RUNNING.name());
Assert.assertEquals(ExecutionStatus.ORCHESTRATED,
jobStatusRetriever.getFlowStatusFromJobStatuses(jobStatusRetriever.dagManagerEnabled, jobStatusRetriever.getJobStatusesForFlowExecution(FLOW_NAME, FLOW_GROUP, flowExecutionId)));
addJobStatusToStateStore(flowExecutionId, MY_JOB_NAME_1, ExecutionStatus.RUNNING.name(), JOB_ORCHESTRATED_TIME, JOB_ORCHESTRATED_TIME);
Assert.assertEquals(ExecutionStatus.RUNNING,
jobStatusRetriever.getFlowStatusFromJobStatuses(jobStatusRetriever.dagManagerEnabled, jobStatusRetriever.getJobStatusesForFlowExecution(FLOW_NAME, FLOW_GROUP, flowExecutionId)));
addJobStatusToStateStore(flowExecutionId, JobStatusRetriever.NA_KEY, ExecutionStatus.COMPLETE.name(), JOB_ORCHESTRATED_TIME, JOB_ORCHESTRATED_TIME);
Assert.assertEquals(ExecutionStatus.RUNNING,
jobStatusRetriever.getFlowStatusFromJobStatuses(jobStatusRetriever.dagManagerEnabled, jobStatusRetriever.getJobStatusesForFlowExecution(FLOW_NAME, FLOW_GROUP, flowExecutionId)));
addJobStatusToStateStore(flowExecutionId, MY_JOB_NAME_1, ExecutionStatus.COMPLETE.name());
Assert.assertEquals(ExecutionStatus.COMPLETE,
jobStatusRetriever.getFlowStatusFromJobStatuses(jobStatusRetriever.dagManagerEnabled, jobStatusRetriever.getJobStatusesForFlowExecution(FLOW_NAME, FLOW_GROUP, flowExecutionId)));
}
@Override
protected void cleanUpDir() throws Exception {
File specStoreDir = new File(this.stateStoreDir);
if (specStoreDir.exists()) {
FileUtils.deleteDirectory(specStoreDir);
}
}
} | 3,769 |
0 | Create_ds/gobblin/gobblin-service/src/test/java/org/apache/gobblin/service | Create_ds/gobblin/gobblin-service/src/test/java/org/apache/gobblin/service/monitoring/GitConfigMonitorTest.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.service.monitoring;
import java.io.File;
import java.io.IOException;
import java.net.URI;
import java.net.URISyntaxException;
import java.util.Collection;
import java.util.Comparator;
import java.util.List;
import java.util.concurrent.TimeUnit;
import org.apache.commons.io.FileUtils;
import org.apache.commons.lang3.SystemUtils;
import org.eclipse.jgit.api.Git;
import org.eclipse.jgit.api.ResetCommand;
import org.eclipse.jgit.api.errors.GitAPIException;
import org.eclipse.jgit.dircache.DirCache;
import org.eclipse.jgit.lib.Repository;
import org.eclipse.jgit.lib.RepositoryCache;
import org.eclipse.jgit.revwalk.RevCommit;
import org.eclipse.jgit.transport.RefSpec;
import org.eclipse.jgit.util.FS;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.testng.Assert;
import org.testng.annotations.AfterClass;
import org.testng.annotations.BeforeClass;
import org.testng.annotations.Test;
import com.google.common.base.Charsets;
import com.google.common.collect.Lists;
import com.google.common.io.Files;
import com.typesafe.config.Config;
import org.apache.gobblin.config.ConfigBuilder;
import org.apache.gobblin.configuration.ConfigurationKeys;
import org.apache.gobblin.runtime.api.FlowSpec;
import org.apache.gobblin.runtime.api.Spec;
import org.apache.gobblin.runtime.api.SpecCatalogListener;
import org.apache.gobblin.runtime.spec_catalog.AddSpecResponse;
import org.apache.gobblin.runtime.spec_catalog.FlowCatalog;
import org.apache.gobblin.service.ServiceConfigKeys;
import static org.mockito.Mockito.any;
import static org.mockito.Mockito.mock;
import static org.mockito.Mockito.when;
public class GitConfigMonitorTest {
private static final Logger logger = LoggerFactory.getLogger(GitConfigMonitorTest.class);
private Repository remoteRepo;
private Git gitForPush;
private static final String TEST_DIR = "/tmp/gitConfigTestDir/";
private final File remoteDir = new File(TEST_DIR + "/remote");
private final File cloneDir = new File(TEST_DIR + "/clone");
private final File configDir = new File(cloneDir, "/gobblin-config");
private static final String TEST_FLOW_FILE = "testFlow.pull";
private static final String TEST_FLOW_FILE2 = "testFlow2.pull";
private static final String TEST_FLOW_FILE3 = "testFlow3.pull";
private final File testGroupDir = new File(configDir, "testGroup");
private final File testFlowFile = new File(testGroupDir, TEST_FLOW_FILE);
private final File testFlowFile2 = new File(testGroupDir, TEST_FLOW_FILE2);
private final File testFlowFile3 = new File(testGroupDir, TEST_FLOW_FILE3);
private RefSpec masterRefSpec = new RefSpec("master");
private FlowCatalog flowCatalog;
private SpecCatalogListener mockListener;
private Config config;
private GitConfigMonitor gitConfigMonitor;
@BeforeClass
public void setup() throws Exception {
cleanUpDir(TEST_DIR);
// Create a bare repository
RepositoryCache.FileKey fileKey = RepositoryCache.FileKey.exact(remoteDir, FS.DETECTED);
this.remoteRepo = fileKey.open(false);
this.remoteRepo.create(true);
this.gitForPush = Git.cloneRepository().setURI(this.remoteRepo.getDirectory().getAbsolutePath()).setDirectory(cloneDir).call();
// push an empty commit as a base for detecting changes
this.gitForPush.commit().setMessage("First commit").call();
this.gitForPush.push().setRemote("origin").setRefSpecs(this.masterRefSpec).call();
this.config = ConfigBuilder.create()
.addPrimitive(GitConfigMonitor.GIT_CONFIG_MONITOR_PREFIX + "." + ConfigurationKeys.GIT_MONITOR_REPO_URI,
this.remoteRepo.getDirectory().getAbsolutePath())
.addPrimitive(GitConfigMonitor.GIT_CONFIG_MONITOR_PREFIX + "." + ConfigurationKeys.GIT_MONITOR_REPO_DIR, TEST_DIR + "/jobConfig")
.addPrimitive(FlowCatalog.FLOWSPEC_STORE_DIR_KEY, TEST_DIR + "flowCatalog")
.addPrimitive(ConfigurationKeys.GIT_MONITOR_POLLING_INTERVAL, 5)
.build();
this.flowCatalog = new FlowCatalog(config);
this.mockListener = mock(SpecCatalogListener.class);
when(mockListener.getName()).thenReturn(ServiceConfigKeys.GOBBLIN_SERVICE_JOB_SCHEDULER_LISTENER_CLASS);
when(mockListener.onAddSpec(any())).thenReturn(new AddSpecResponse(""));
this.flowCatalog.addListener(mockListener);
this.flowCatalog.startAsync().awaitRunning();
this.gitConfigMonitor = new GitConfigMonitor(this.config, this.flowCatalog);
this.gitConfigMonitor.setActive(true);
}
private void cleanUpDir(String dir) {
File specStoreDir = new File(dir);
// cleanup is flaky on Travis, so retry a few times and then suppress the error if unsuccessful
for (int i = 0; i < 5; i++) {
try {
if (specStoreDir.exists()) {
FileUtils.deleteDirectory(specStoreDir);
}
// if delete succeeded then break out of loop
break;
} catch (IOException e) {
logger.warn("Cleanup delete directory failed for directory: " + dir, e);
}
}
}
@AfterClass
public void cleanUp() {
if (this.flowCatalog != null) {
this.flowCatalog.stopAsync().awaitTerminated();
}
cleanUpDir(TEST_DIR);
}
private String formConfigFilePath(String groupDir, String fileName) {
return this.configDir.getName() + SystemUtils.FILE_SEPARATOR + groupDir + SystemUtils.FILE_SEPARATOR + fileName;
}
@Test
public void testAddConfig() throws IOException, GitAPIException, URISyntaxException {
// push a new config file
this.testGroupDir.mkdirs();
this.testFlowFile.createNewFile();
Files.write("flow.name=testFlow\nflow.group=testGroup\nparam1=value1\n", testFlowFile, Charsets.UTF_8);
// add, commit, push
this.gitForPush.add().addFilepattern(formConfigFilePath(this.testGroupDir.getName(), this.testFlowFile.getName()))
.call();
this.gitForPush.commit().setMessage("Second commit").call();
this.gitForPush.push().setRemote("origin").setRefSpecs(this.masterRefSpec).call();
this.gitConfigMonitor.processGitConfigChanges();
Collection<Spec> specs = this.flowCatalog.getSpecs();
Assert.assertTrue(specs.size() == 1);
FlowSpec spec = (FlowSpec) (specs.iterator().next());
Assert.assertEquals(spec.getUri(), new URI("gobblin-flow:/testGroup/testFlow"));
Assert.assertEquals(spec.getConfig().getString(ConfigurationKeys.FLOW_NAME_KEY), "testFlow");
Assert.assertEquals(spec.getConfig().getString(ConfigurationKeys.FLOW_GROUP_KEY), "testGroup");
Assert.assertEquals(spec.getConfig().getString("param1"), "value1");
}
@Test(dependsOnMethods = "testAddConfig")
public void testUpdateConfig() throws IOException, GitAPIException, URISyntaxException {
// push an updated config file
Files.write("flow.name=testFlow\nflow.group=testGroup\nparam1=value2\n", testFlowFile, Charsets.UTF_8);
// add, commit, push
this.gitForPush.add().addFilepattern(formConfigFilePath(this.testGroupDir.getName(), this.testFlowFile.getName()))
.call();
this.gitForPush.commit().setMessage("Third commit").call();
this.gitForPush.push().setRemote("origin").setRefSpecs(this.masterRefSpec).call();
this.gitConfigMonitor.processGitConfigChanges();
Collection<Spec> specs = this.flowCatalog.getSpecs();
Assert.assertTrue(specs.size() == 1);
FlowSpec spec = (FlowSpec) (specs.iterator().next());
Assert.assertEquals(spec.getUri(), new URI("gobblin-flow:/testGroup/testFlow"));
Assert.assertEquals(spec.getConfig().getString(ConfigurationKeys.FLOW_NAME_KEY), "testFlow");
Assert.assertEquals(spec.getConfig().getString(ConfigurationKeys.FLOW_GROUP_KEY), "testGroup");
Assert.assertEquals(spec.getConfig().getString("param1"), "value2");
}
@Test(dependsOnMethods = "testUpdateConfig")
public void testDeleteConfig() throws IOException, GitAPIException, URISyntaxException {
// delete a config file
testFlowFile.delete();
// flow catalog has 1 entry before the config is deleted
Collection<Spec> specs = this.flowCatalog.getSpecs();
Assert.assertTrue(specs.size() == 1);
// add, commit, push
DirCache ac = this.gitForPush.rm().addFilepattern(formConfigFilePath(this.testGroupDir.getName(), this.testFlowFile.getName()))
.call();
RevCommit cc = this.gitForPush.commit().setMessage("Fourth commit").call();
this.gitForPush.push().setRemote("origin").setRefSpecs(this.masterRefSpec).call();
this.gitConfigMonitor.processGitConfigChanges();
specs = this.flowCatalog.getSpecs();
Assert.assertTrue(specs.size() == 0);
}
@Test(dependsOnMethods = "testDeleteConfig")
public void testForcedPushConfig() throws IOException, GitAPIException, URISyntaxException {
// push a new config file
this.testGroupDir.mkdirs();
this.testFlowFile.createNewFile();
Files.write("flow.name=testFlow\nflow.group=testGroup\nparam1=value1\n", testFlowFile, Charsets.UTF_8);
this.testFlowFile2.createNewFile();
Files.write("flow.name=testFlow2\nflow.group=testGroup\nparam1=value2\n", testFlowFile2, Charsets.UTF_8);
// add, commit, push
this.gitForPush.add().addFilepattern(formConfigFilePath(this.testGroupDir.getName(), this.testFlowFile.getName()))
.call();
this.gitForPush.add().addFilepattern(formConfigFilePath(this.testGroupDir.getName(), this.testFlowFile2.getName()))
.call();
this.gitForPush.commit().setMessage("Fifth commit").call();
this.gitForPush.push().setRemote("origin").setRefSpecs(this.masterRefSpec).call();
this.gitConfigMonitor.processGitConfigChanges();
Collection<Spec> specs = this.flowCatalog.getSpecs();
Assert.assertTrue(specs.size() == 2);
List<Spec> specList = Lists.newArrayList(specs);
specList.sort(new Comparator<Spec>() {
@Override
public int compare(Spec o1, Spec o2) {
return o1.getUri().compareTo(o2.getUri());
}
});
FlowSpec spec = (FlowSpec) specList.get(0);
Assert.assertEquals(spec.getUri(), new URI("gobblin-flow:/testGroup/testFlow"));
Assert.assertEquals(spec.getConfig().getString(ConfigurationKeys.FLOW_NAME_KEY), "testFlow");
Assert.assertEquals(spec.getConfig().getString(ConfigurationKeys.FLOW_GROUP_KEY), "testGroup");
Assert.assertEquals(spec.getConfig().getString("param1"), "value1");
spec = (FlowSpec) specList.get(1);
Assert.assertEquals(spec.getUri(), new URI("gobblin-flow:/testGroup/testFlow2"));
Assert.assertEquals(spec.getConfig().getString(ConfigurationKeys.FLOW_NAME_KEY), "testFlow2");
Assert.assertEquals(spec.getConfig().getString(ConfigurationKeys.FLOW_GROUP_KEY), "testGroup");
Assert.assertEquals(spec.getConfig().getString("param1"), "value2");
// go back in time to cause conflict
this.gitForPush.reset().setMode(ResetCommand.ResetType.HARD).setRef("HEAD~1").call();
this.gitForPush.push().setForce(true).setRemote("origin").setRefSpecs(this.masterRefSpec).call();
// add new files
this.testGroupDir.mkdirs();
this.testFlowFile2.createNewFile();
Files.write("flow.name=testFlow2\nflow.group=testGroup\nparam1=value4\n", testFlowFile2, Charsets.UTF_8);
this.testFlowFile3.createNewFile();
Files.write("flow.name=testFlow3\nflow.group=testGroup\nparam1=value5\n", testFlowFile3, Charsets.UTF_8);
// add, commit, push
this.gitForPush.add().addFilepattern(formConfigFilePath(this.testGroupDir.getName(), this.testFlowFile2.getName()))
.call();
this.gitForPush.add().addFilepattern(formConfigFilePath(this.testGroupDir.getName(), this.testFlowFile3.getName()))
.call();
this.gitForPush.commit().setMessage("Sixth commit").call();
this.gitForPush.push().setRemote("origin").setRefSpecs(this.masterRefSpec).call();
this.gitConfigMonitor.processGitConfigChanges();
specs = this.flowCatalog.getSpecs();
Assert.assertTrue(specs.size() == 2);
specList = Lists.newArrayList(specs);
specList.sort(new Comparator<Spec>() {
@Override
public int compare(Spec o1, Spec o2) {
return o1.getUri().compareTo(o2.getUri());
}
});
spec = (FlowSpec) specList.get(0);
Assert.assertEquals(spec.getUri(), new URI("gobblin-flow:/testGroup/testFlow2"));
Assert.assertEquals(spec.getConfig().getString(ConfigurationKeys.FLOW_NAME_KEY), "testFlow2");
Assert.assertEquals(spec.getConfig().getString(ConfigurationKeys.FLOW_GROUP_KEY), "testGroup");
Assert.assertEquals(spec.getConfig().getString("param1"), "value4");
spec = (FlowSpec) specList.get(1);
Assert.assertEquals(spec.getUri(), new URI("gobblin-flow:/testGroup/testFlow3"));
Assert.assertEquals(spec.getConfig().getString(ConfigurationKeys.FLOW_NAME_KEY), "testFlow3");
Assert.assertEquals(spec.getConfig().getString(ConfigurationKeys.FLOW_GROUP_KEY), "testGroup");
Assert.assertEquals(spec.getConfig().getString("param1"), "value5");
// reset for next test case
this.gitForPush.reset().setMode(ResetCommand.ResetType.HARD).setRef("HEAD~4").call();
this.gitForPush.push().setForce(true).setRemote("origin").setRefSpecs(this.masterRefSpec).call();
this.gitConfigMonitor.processGitConfigChanges();
specs = this.flowCatalog.getSpecs();
Assert.assertTrue(specs.size() == 0);
}
@Test(dependsOnMethods = "testForcedPushConfig")
public void testPollingConfig() throws IOException, GitAPIException, URISyntaxException, InterruptedException {
// push a new config file
this.testGroupDir.mkdirs();
this.testFlowFile.createNewFile();
Files.write("flow.name=testFlow\nflow.group=testGroup\nparam1=value20\n", testFlowFile, Charsets.UTF_8);
// add, commit, push
this.gitForPush.add().addFilepattern(formConfigFilePath(this.testGroupDir.getName(), this.testFlowFile.getName()))
.call();
this.gitForPush.commit().setMessage("Seventh commit").call();
this.gitForPush.push().setRemote("origin").setRefSpecs(this.masterRefSpec).call();
Collection<Spec> specs = this.flowCatalog.getSpecs();
Assert.assertTrue(specs.size() == 0);
this.gitConfigMonitor.startAsync().awaitRunning();
// polling is every 5 seconds, so wait twice as long and check
TimeUnit.SECONDS.sleep(10);
specs = this.flowCatalog.getSpecs();
Assert.assertTrue(specs.size() == 1);
FlowSpec spec = (FlowSpec) (specs.iterator().next());
Assert.assertEquals(spec.getUri(), new URI("gobblin-flow:/testGroup/testFlow"));
Assert.assertEquals(spec.getConfig().getString(ConfigurationKeys.FLOW_NAME_KEY), "testFlow");
Assert.assertEquals(spec.getConfig().getString(ConfigurationKeys.FLOW_GROUP_KEY), "testGroup");
Assert.assertEquals(spec.getConfig().getString("param1"), "value20");
}
}
| 3,770 |
0 | Create_ds/gobblin/gobblin-service/src/test/java/org/apache/gobblin/service | Create_ds/gobblin/gobblin-service/src/test/java/org/apache/gobblin/service/monitoring/GitFlowGraphMonitorTest.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.service.monitoring;
import java.io.File;
import java.io.IOException;
import java.net.URI;
import java.net.URISyntaxException;
import java.util.Collection;
import java.util.Map;
import java.util.Properties;
import java.util.concurrent.CountDownLatch;
import java.util.concurrent.ExecutionException;
import java.util.concurrent.atomic.AtomicReference;
import org.apache.commons.io.FileUtils;
import org.apache.commons.lang3.SystemUtils;
import org.apache.gobblin.service.modules.flow.MultiHopFlowCompiler;
import org.apache.gobblin.service.modules.flowgraph.FlowGraph;
import org.eclipse.jgit.api.Git;
import org.eclipse.jgit.api.errors.GitAPIException;
import org.eclipse.jgit.dircache.DirCache;
import org.eclipse.jgit.lib.Repository;
import org.eclipse.jgit.lib.RepositoryCache;
import org.eclipse.jgit.revwalk.RevCommit;
import org.eclipse.jgit.transport.RefSpec;
import org.eclipse.jgit.util.FS;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.testng.Assert;
import org.testng.annotations.AfterClass;
import org.testng.annotations.BeforeClass;
import org.testng.annotations.Test;
import com.google.common.base.Charsets;
import com.google.common.base.Joiner;
import com.google.common.base.Optional;
import com.google.common.io.Files;
import com.typesafe.config.Config;
import com.typesafe.config.ConfigFactory;
import org.apache.gobblin.config.ConfigBuilder;
import org.apache.gobblin.configuration.ConfigurationKeys;
import org.apache.gobblin.runtime.api.TopologySpec;
import org.apache.gobblin.service.ServiceConfigKeys;
import org.apache.gobblin.service.modules.flow.MultiHopFlowCompilerTest;
import org.apache.gobblin.service.modules.flowgraph.BaseFlowGraph;
import org.apache.gobblin.service.modules.flowgraph.DataNode;
import org.apache.gobblin.service.modules.flowgraph.FlowEdge;
import org.apache.gobblin.service.modules.flowgraph.FlowGraphConfigurationKeys;
import org.apache.gobblin.service.modules.template_catalog.FSFlowTemplateCatalog;
public class GitFlowGraphMonitorTest {
private static final Logger logger = LoggerFactory.getLogger(GitFlowGraphMonitor.class);
private Repository remoteRepo;
private Git gitForPush;
private static final String TEST_DIR = "/tmp/gitFlowGraphTestDir";
private final File remoteDir = new File(TEST_DIR + "/remote");
private final File cloneDir = new File(TEST_DIR + "/clone");
private final File flowGraphDir = new File(cloneDir, "/gobblin-flowgraph");
private static final String NODE_1_FILE = "node1.properties";
private final File node1Dir = new File(flowGraphDir, "node1");
private final File node1File = new File(node1Dir, NODE_1_FILE);
private static final String NODE_2_FILE = "node2.properties";
private final File node2Dir = new File(flowGraphDir, "node2");
private final File node2File = new File(node2Dir, NODE_2_FILE);
private final File edge1Dir = new File(node1Dir, "node2");
private final File edge1File = new File(edge1Dir, "edge1.properties");
private RefSpec masterRefSpec = new RefSpec("master");
private Optional<FSFlowTemplateCatalog> flowCatalog;
private Config config;
private AtomicReference<FlowGraph> flowGraph;
private GitFlowGraphMonitor gitFlowGraphMonitor;
@BeforeClass
public void setUp() throws Exception {
cleanUpDir(TEST_DIR);
// Create a bare repository
RepositoryCache.FileKey fileKey = RepositoryCache.FileKey.exact(remoteDir, FS.DETECTED);
this.remoteRepo = fileKey.open(false);
this.remoteRepo.create(true);
this.gitForPush = Git.cloneRepository().setURI(this.remoteRepo.getDirectory().getAbsolutePath()).setDirectory(cloneDir).call();
// push an empty commit as a base for detecting changes
this.gitForPush.commit().setMessage("First commit").call();
this.gitForPush.push().setRemote("origin").setRefSpecs(this.masterRefSpec).call();
URI topologyCatalogUri = this.getClass().getClassLoader().getResource("topologyspec_catalog").toURI();
Map<URI, TopologySpec> topologySpecMap = MultiHopFlowCompilerTest.buildTopologySpecMap(topologyCatalogUri);
this.config = ConfigBuilder.create()
.addPrimitive(GitFlowGraphMonitor.GIT_FLOWGRAPH_MONITOR_PREFIX + "."
+ ConfigurationKeys.GIT_MONITOR_REPO_URI, this.remoteRepo.getDirectory().getAbsolutePath())
.addPrimitive(GitFlowGraphMonitor.GIT_FLOWGRAPH_MONITOR_PREFIX + "." + ConfigurationKeys.GIT_MONITOR_REPO_DIR, TEST_DIR + "/git-flowgraph")
.addPrimitive(GitFlowGraphMonitor.GIT_FLOWGRAPH_MONITOR_PREFIX + "." + ConfigurationKeys.GIT_MONITOR_POLLING_INTERVAL, 5)
.build();
// Create a FSFlowTemplateCatalog instance
URI flowTemplateCatalogUri = this.getClass().getClassLoader().getResource("template_catalog").toURI();
Properties properties = new Properties();
properties.put(ServiceConfigKeys.TEMPLATE_CATALOGS_FULLY_QUALIFIED_PATH_KEY, flowTemplateCatalogUri.toString());
Config config = ConfigFactory.parseProperties(properties);
Config templateCatalogCfg = config
.withValue(ConfigurationKeys.JOB_CONFIG_FILE_GENERAL_PATH_KEY,
config.getValue(ServiceConfigKeys.TEMPLATE_CATALOGS_FULLY_QUALIFIED_PATH_KEY));
this.flowCatalog = Optional.of(new FSFlowTemplateCatalog(templateCatalogCfg));
this.flowGraph = new AtomicReference<>(new BaseFlowGraph());
MultiHopFlowCompiler mhfc = new MultiHopFlowCompiler(config, this.flowGraph);
this.gitFlowGraphMonitor = new GitFlowGraphMonitor(this.config, this.flowCatalog, mhfc, topologySpecMap, new CountDownLatch(1), true);
this.gitFlowGraphMonitor.setActive(true);
}
@Test
public void testAddNode() throws IOException, GitAPIException {
String file1Contents = FlowGraphConfigurationKeys.DATA_NODE_IS_ACTIVE_KEY + "=true\nparam1=value1\n";
String file2Contents = FlowGraphConfigurationKeys.DATA_NODE_IS_ACTIVE_KEY + "=true\nparam2=value2\n";
addNode(this.node1Dir, this.node1File, file1Contents);
addNode(this.node2Dir, this.node2File, file2Contents);
this.gitFlowGraphMonitor.processGitConfigChanges();
for (int i = 0; i < 1; i++) {
String nodeId = "node" + (i + 1);
String paramKey = "param" + (i + 1);
String paramValue = "value" + (i + 1);
//Check if nodes have been added to the FlowGraph
DataNode dataNode = this.flowGraph.get().getNode(nodeId);
Assert.assertEquals(dataNode.getId(), nodeId);
Assert.assertTrue(dataNode.isActive());
Assert.assertEquals(dataNode.getRawConfig().getString(paramKey), paramValue);
}
}
@Test (dependsOnMethods = "testAddNode")
public void testAddEdge()
throws IOException, GitAPIException, ExecutionException, InterruptedException {
//Build contents of edge file
String fileContents = buildEdgeFileContents("node1", "node2", "edge1", "value1");
addEdge(this.edge1Dir, this.edge1File, fileContents);
this.gitFlowGraphMonitor.processGitConfigChanges();
//Check if edge1 has been added to the FlowGraph
testIfEdgeSuccessfullyAdded("node1", "node2", "edge1", "value1");
}
@Test (dependsOnMethods = "testAddNode")
public void testUpdateEdge()
throws IOException, GitAPIException, URISyntaxException, ExecutionException, InterruptedException {
//Update edge1 file
String fileContents = buildEdgeFileContents("node1", "node2", "edge1", "value2");
addEdge(this.edge1Dir, this.edge1File, fileContents);
// add, commit, push
this.gitForPush.add().addFilepattern(formEdgeFilePath(this.edge1Dir.getParentFile().getName(), this.edge1Dir.getName(), this.edge1File.getName())).call();
this.gitForPush.commit().setMessage("Edge commit").call();
this.gitForPush.push().setRemote("origin").setRefSpecs(this.masterRefSpec).call();
this.gitFlowGraphMonitor.processGitConfigChanges();
//Check if new edge1 has been added to the FlowGraph
testIfEdgeSuccessfullyAdded("node1", "node2", "edge1", "value2");
}
@Test (dependsOnMethods = "testUpdateEdge")
public void testUpdateNode()
throws IOException, GitAPIException, URISyntaxException, ExecutionException, InterruptedException {
//Update param1 value in node1 and check if updated node is added to the graph
String fileContents = FlowGraphConfigurationKeys.DATA_NODE_IS_ACTIVE_KEY + "=true\nparam1=value3\n";
addNode(this.node1Dir, this.node1File, fileContents);
this.gitFlowGraphMonitor.processGitConfigChanges();
//Check if node has been updated in the FlowGraph
DataNode dataNode = this.flowGraph.get().getNode("node1");
Assert.assertEquals(dataNode.getId(), "node1");
Assert.assertTrue(dataNode.isActive());
Assert.assertEquals(dataNode.getRawConfig().getString("param1"), "value3");
}
@Test (dependsOnMethods = "testUpdateNode")
public void testRemoveEdge() throws GitAPIException, IOException {
// delete a config file
edge1File.delete();
//Node1 has 1 edge before delete
Collection<FlowEdge> edgeSet = this.flowGraph.get().getEdges("node1");
Assert.assertEquals(edgeSet.size(), 1);
// delete, commit, push
DirCache ac = this.gitForPush.rm().addFilepattern(formEdgeFilePath(this.edge1Dir.getParentFile().getName(),
this.edge1Dir.getName(), this.edge1File.getName())).call();
RevCommit cc = this.gitForPush.commit().setMessage("Edge remove commit").call();
this.gitForPush.push().setRemote("origin").setRefSpecs(this.masterRefSpec).call();
this.gitFlowGraphMonitor.processGitConfigChanges();
//Check if edge1 has been deleted from the graph
edgeSet = this.flowGraph.get().getEdges("node1");
Assert.assertTrue(edgeSet.size() == 0);
}
@Test (dependsOnMethods = "testRemoveEdge")
public void testRemoveNode() throws GitAPIException, IOException {
//delete node files
node1File.delete();
node2File.delete();
//Ensure node1 and node2 are present in the graph before delete
DataNode node1 = this.flowGraph.get().getNode("node1");
Assert.assertNotNull(node1);
DataNode node2 = this.flowGraph.get().getNode("node2");
Assert.assertNotNull(node2);
// delete, commit, push
this.gitForPush.rm().addFilepattern(formNodeFilePath(this.node1Dir.getName(), this.node1File.getName())).call();
this.gitForPush.rm().addFilepattern(formNodeFilePath(this.node2Dir.getName(), this.node2File.getName())).call();
this.gitForPush.commit().setMessage("Node remove commit").call();
this.gitForPush.push().setRemote("origin").setRefSpecs(this.masterRefSpec).call();
this.gitFlowGraphMonitor.processGitConfigChanges();
//Check if node1 and node 2 have been deleted from the graph
node1 = this.flowGraph.get().getNode("node1");
Assert.assertNull(node1);
node2 = this.flowGraph.get().getNode("node2");
Assert.assertNull(node2);
}
@Test (dependsOnMethods = "testRemoveNode")
public void testChangesReorder() throws GitAPIException, IOException, ExecutionException, InterruptedException {
String node1FileContents = FlowGraphConfigurationKeys.DATA_NODE_IS_ACTIVE_KEY + "=true\nparam1=value1\n";
String node2FileContents = FlowGraphConfigurationKeys.DATA_NODE_IS_ACTIVE_KEY + "=true\nparam2=value2\n";
String edgeFileContents = buildEdgeFileContents("node1", "node2", "edge1", "value1");
createNewFile(this.node1Dir, this.node1File, node1FileContents);
createNewFile(this.node2Dir, this.node2File, node2FileContents);
createNewFile(this.edge1Dir, this.edge1File, edgeFileContents);
// add, commit, push
this.gitForPush.add().addFilepattern(formNodeFilePath(this.node1Dir.getName(), this.node1File.getName())).call();
this.gitForPush.add().addFilepattern(formNodeFilePath(this.node2Dir.getName(), this.node2File.getName())).call();
this.gitForPush.commit().setMessage("Add nodes commit").call();
this.gitForPush.push().setRemote("origin").setRefSpecs(this.masterRefSpec).call();
this.gitForPush.add().addFilepattern(formEdgeFilePath(this.edge1Dir.getParentFile().getName(), this.edge1Dir.getName(), this.edge1File.getName())).call();
this.gitForPush.commit().setMessage("Add nodes and edges commit").call();
this.gitForPush.push().setRemote("origin").setRefSpecs(this.masterRefSpec).call();
this.gitFlowGraphMonitor.processGitConfigChanges();
//Ensure node1 and node2 are present in the graph
DataNode node1 = this.flowGraph.get().getNode("node1");
Assert.assertNotNull(node1);
DataNode node2 = this.flowGraph.get().getNode("node2");
Assert.assertNotNull(node2);
testIfEdgeSuccessfullyAdded("node1", "node2", "edge1", "value1");
//Delete node1, edge node1->node2 files
node1File.delete();
edge1File.delete();
//Commit1: delete node1 and edge node1->node2
this.gitForPush.rm().addFilepattern(formNodeFilePath(this.node1Dir.getName(), this.node1File.getName())).call();
this.gitForPush.rm().addFilepattern(formEdgeFilePath(this.edge1Dir.getParentFile().getName(), this.edge1Dir.getName(), this.edge1File.getName())).call();
this.gitForPush.commit().setMessage("Delete node1 and edge1 commit").call();
this.gitForPush.push().setRemote("origin").setRefSpecs(this.masterRefSpec).call();
//Commit2: add node1 back
createNewFile(this.node1Dir, this.node1File, node1FileContents);
this.gitForPush.add().addFilepattern(formNodeFilePath(this.node1Dir.getName(), this.node1File.getName())).call();
this.gitForPush.commit().setMessage("Add node1 commit").call();
this.gitForPush.push().setRemote("origin").setRefSpecs(this.masterRefSpec).call();
this.gitFlowGraphMonitor.processGitConfigChanges();
node1 = this.flowGraph.get().getNode("node1");
Assert.assertNotNull(node1);
Assert.assertEquals(this.flowGraph.get().getEdges(node1).size(), 0);
}
@AfterClass
public void tearDown() throws Exception {
cleanUpDir(TEST_DIR);
}
private void createNewFile(File dir, File file, String fileContents) throws IOException {
dir.mkdirs();
file.createNewFile();
Files.write(fileContents, file, Charsets.UTF_8);
}
private void addNode(File nodeDir, File nodeFile, String fileContents) throws IOException, GitAPIException {
createNewFile(nodeDir, nodeFile, fileContents);
// add, commit, push node
this.gitForPush.add().addFilepattern(formNodeFilePath(nodeDir.getName(), nodeFile.getName())).call();
this.gitForPush.commit().setMessage("Node commit").call();
this.gitForPush.push().setRemote("origin").setRefSpecs(this.masterRefSpec).call();
}
private void addEdge(File edgeDir, File edgeFile, String fileContents) throws IOException, GitAPIException {
createNewFile(edgeDir, edgeFile, fileContents);
// add, commit, push edge
this.gitForPush.add().addFilepattern(formEdgeFilePath(edgeDir.getParentFile().getName(), edgeDir.getName(), edgeFile.getName())).call();
this.gitForPush.commit().setMessage("Edge commit").call();
this.gitForPush.push().setRemote("origin").setRefSpecs(this.masterRefSpec).call();
}
private String buildEdgeFileContents(String node1, String node2, String edgeName, String value) {
String fileContents = FlowGraphConfigurationKeys.FLOW_EDGE_SOURCE_KEY + "=" + node1 + "\n"
+ FlowGraphConfigurationKeys.FLOW_EDGE_DESTINATION_KEY + "=" + node2 + "\n"
+ FlowGraphConfigurationKeys.FLOW_EDGE_NAME_KEY + "=" + edgeName + "\n"
+ FlowGraphConfigurationKeys.FLOW_EDGE_IS_ACTIVE_KEY + "=true\n"
+ FlowGraphConfigurationKeys.FLOW_EDGE_TEMPLATE_DIR_URI_KEY + "=FS:///flowEdgeTemplate\n"
+ FlowGraphConfigurationKeys.FLOW_EDGE_SPEC_EXECUTORS_KEY + "=testExecutor1,testExecutor2\n"
+ "key1=" + value + "\n";
return fileContents;
}
private void testIfEdgeSuccessfullyAdded(String node1, String node2, String edgeName, String value) throws ExecutionException, InterruptedException {
Collection<FlowEdge> edgeSet = this.flowGraph.get().getEdges(node1);
Assert.assertEquals(edgeSet.size(), 1);
FlowEdge flowEdge = edgeSet.iterator().next();
Assert.assertEquals(flowEdge.getId(), Joiner.on("_").join(node1, node2, edgeName));
Assert.assertEquals(flowEdge.getSrc(), node1);
Assert.assertEquals(flowEdge.getDest(), node2);
Assert.assertEquals(flowEdge.getExecutors().get(0).getConfig().get().getString("specStore.fs.dir"), "/tmp1");
Assert.assertEquals(flowEdge.getExecutors().get(0).getConfig().get().getString("specExecInstance.capabilities"), "s1:d1");
Assert.assertEquals(flowEdge.getExecutors().get(0).getClass().getSimpleName(), "InMemorySpecExecutor");
Assert.assertEquals(flowEdge.getExecutors().get(1).getConfig().get().getString("specStore.fs.dir"), "/tmp2");
Assert.assertEquals(flowEdge.getExecutors().get(1).getConfig().get().getString("specExecInstance.capabilities"), "s2:d2");
Assert.assertEquals(flowEdge.getExecutors().get(1).getClass().getSimpleName(), "InMemorySpecExecutor");
Assert.assertEquals(flowEdge.getConfig().getString("key1"), value);
}
private String formNodeFilePath(String groupDir, String fileName) {
return this.flowGraphDir.getName() + SystemUtils.FILE_SEPARATOR + groupDir + SystemUtils.FILE_SEPARATOR + fileName;
}
private String formEdgeFilePath(String parentDir, String groupDir, String fileName) {
return this.flowGraphDir.getName() + SystemUtils.FILE_SEPARATOR + parentDir + SystemUtils.FILE_SEPARATOR + groupDir + SystemUtils.FILE_SEPARATOR + fileName;
}
private void cleanUpDir(String dir) {
File specStoreDir = new File(dir);
// cleanup is flaky on Travis, so retry a few times and then suppress the error if unsuccessful
for (int i = 0; i < 5; i++) {
try {
if (specStoreDir.exists()) {
FileUtils.deleteDirectory(specStoreDir);
}
// if delete succeeded then break out of loop
break;
} catch (IOException e) {
logger.warn("Cleanup delete directory failed for directory: " + dir, e);
}
}
}
} | 3,771 |
0 | Create_ds/gobblin/gobblin-service/src/test/java/org/apache/gobblin/service/modules | Create_ds/gobblin/gobblin-service/src/test/java/org/apache/gobblin/service/modules/topology/ConfigBasedTopologySpecFactoryTest.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.service.modules.topology;
import java.util.Collection;
import java.util.Iterator;
import java.util.Properties;
import org.testng.Assert;
import org.testng.annotations.AfterClass;
import org.testng.annotations.BeforeClass;
import org.testng.annotations.Test;
import com.typesafe.config.Config;
import org.apache.gobblin.configuration.ConfigurationKeys;
import org.apache.gobblin.runtime.api.TopologySpec;
import org.apache.gobblin.service.ServiceConfigKeys;
import org.apache.gobblin.util.ConfigUtils;
public class ConfigBasedTopologySpecFactoryTest {
private Config _config;
private ConfigBasedTopologySpecFactory _configBasedTopologySpecFactory;
@BeforeClass
public void setup() throws Exception {
String topology1 = "cluster1";
String topology2 = "azkaban1";
// Global properties
Properties properties = new Properties();
properties.put(ServiceConfigKeys.TOPOLOGYSPEC_FACTORY_KEY, ConfigBasedTopologySpecFactory.class.getCanonicalName());
properties.put(ServiceConfigKeys.TOPOLOGY_FACTORY_TOPOLOGY_NAMES_KEY, topology1 + "," + topology2);
// Topology Cluster1 properties
String topology1Prefix = ServiceConfigKeys.TOPOLOGY_FACTORY_PREFIX + topology1 + ".";
properties.put(topology1Prefix + ServiceConfigKeys.TOPOLOGYSPEC_DESCRIPTION_KEY, "Topology for cluster");
properties.put(topology1Prefix + ServiceConfigKeys.TOPOLOGYSPEC_VERSION_KEY, "1");
properties.put(topology1Prefix + ServiceConfigKeys.TOPOLOGYSPEC_URI_KEY, "/mySpecs/" + topology1);
properties.put(topology1Prefix + ServiceConfigKeys.SPEC_EXECUTOR_KEY,
ServiceConfigKeys.DEFAULT_SPEC_EXECUTOR);
properties.put(topology1Prefix + ConfigurationKeys.SPECEXECUTOR_INSTANCE_CAPABILITIES_KEY, "salesforce:nosql");
// Topology Azkaban1 properties
String topology2Prefix = ServiceConfigKeys.TOPOLOGY_FACTORY_PREFIX + topology2 + ".";
properties.put(topology2Prefix + ServiceConfigKeys.TOPOLOGYSPEC_DESCRIPTION_KEY, "Topology for Azkaban");
properties.put(topology2Prefix + ServiceConfigKeys.TOPOLOGYSPEC_VERSION_KEY, "2");
properties.put(topology2Prefix + ServiceConfigKeys.TOPOLOGYSPEC_URI_KEY, "/mySpecs/" + topology2);
properties.put(topology2Prefix + ServiceConfigKeys.SPEC_EXECUTOR_KEY,
ServiceConfigKeys.DEFAULT_SPEC_EXECUTOR);
properties.put(topology2Prefix + ConfigurationKeys.SPECEXECUTOR_INSTANCE_CAPABILITIES_KEY, "nosql:hdfs");
_config = ConfigUtils.propertiesToConfig(properties);
_configBasedTopologySpecFactory = new ConfigBasedTopologySpecFactory(_config);
}
@AfterClass
public void cleanUp() throws Exception {
}
@Test
public void testGetTopologies() {
Collection<TopologySpec> topologySpecs = _configBasedTopologySpecFactory.getTopologies();
Assert.assertTrue(topologySpecs.size() == 2, "Expected 2 topologies but received: " + topologySpecs.size());
Iterator<TopologySpec> topologySpecIterator = topologySpecs.iterator();
TopologySpec topologySpec1 = topologySpecIterator.next();
Assert.assertTrue(topologySpec1.getDescription().equals("Topology for cluster"),
"Description did not match with construction");
Assert.assertTrue(topologySpec1.getVersion().equals("1"),
"Version did not match with construction");
TopologySpec topologySpec2 = topologySpecIterator.next();
Assert.assertTrue(topologySpec2.getDescription().equals("Topology for Azkaban"),
"Description did not match with construction");
Assert.assertTrue(topologySpec2.getVersion().equals("2"),
"Version did not match with construction");
}
} | 3,772 |
0 | Create_ds/gobblin/gobblin-service/src/test/java/org/apache/gobblin/service/modules | Create_ds/gobblin/gobblin-service/src/test/java/org/apache/gobblin/service/modules/core/GobblinServiceRedirectTest.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.service.modules.core;
import java.io.File;
import java.net.InetAddress;
import java.util.Map;
import java.util.Properties;
import java.util.UUID;
import org.apache.curator.test.TestingServer;
import org.apache.hadoop.fs.Path;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.testcontainers.containers.MySQLContainer;
import org.testng.Assert;
import org.testng.annotations.AfterClass;
import org.testng.annotations.BeforeClass;
import org.testng.annotations.Test;
import com.google.common.collect.Maps;
import com.google.common.io.Files;
import com.linkedin.data.template.StringMap;
import com.linkedin.r2.transport.http.client.HttpClientFactory;
import com.linkedin.restli.client.RestLiResponseException;
import org.apache.gobblin.configuration.ConfigurationKeys;
import org.apache.gobblin.metastore.MysqlJobStatusStateStoreFactory;
import org.apache.gobblin.metastore.testing.ITestMetastoreDatabase;
import org.apache.gobblin.metastore.testing.TestMetastoreDatabaseFactory;
import org.apache.gobblin.runtime.spec_catalog.FlowCatalog;
import org.apache.gobblin.service.FlowConfig;
import org.apache.gobblin.service.FlowConfigClient;
import org.apache.gobblin.service.FlowId;
import org.apache.gobblin.service.Schedule;
import org.apache.gobblin.service.ServiceConfigKeys;
import org.apache.gobblin.service.TestServiceDatabaseConfig;
import org.apache.gobblin.service.modules.utils.HelixUtils;
import org.apache.gobblin.service.monitoring.FsJobStatusRetriever;
import org.apache.gobblin.util.ConfigUtils;
import org.apache.gobblin.util.PortUtils;
@Test
public class GobblinServiceRedirectTest {
private static final Logger logger = LoggerFactory.getLogger(GobblinServiceRedirectTest.class);
private static final String QUARTZ_INSTANCE_NAME = "org.quartz.scheduler.instanceName";
private static final String QUARTZ_THREAD_POOL_COUNT = "org.quartz.threadPool.threadCount";
private static final File BASE_PATH1 = Files.createTempDir();
private static final String NODE_1_SERVICE_WORK_DIR = new Path(BASE_PATH1.getAbsolutePath(), "serviceWorkDirNode1").toString();
private static final String NODE_1_TOPOLOGY_SPEC_STORE_DIR = new Path(BASE_PATH1.getAbsolutePath(), "topologyTestSpecStoreNode1").toString();
private static final String NODE_1_FLOW_SPEC_STORE_DIR = new Path(BASE_PATH1.getAbsolutePath(), "flowTestSpecStore").toString();
private static final String NODE_1_JOB_STATUS_STATE_STORE_DIR = new Path(BASE_PATH1.getAbsolutePath(), "fsJobStatusRetriever").toString();
private static final File BASE_PATH2 = Files.createTempDir();
private static final String NODE_2_SERVICE_WORK_DIR = new Path(BASE_PATH2.getAbsolutePath(), "serviceWorkDirNode2").toString();
private static final String NODE_2_TOPOLOGY_SPEC_STORE_DIR = new Path(BASE_PATH2.getAbsolutePath(), "topologyTestSpecStoreNode2").toString();
private static final String NODE_2_FLOW_SPEC_STORE_DIR = new Path(BASE_PATH2.getAbsolutePath(), "flowTestSpecStore").toString();
private static final String NODE_2_JOB_STATUS_STATE_STORE_DIR = new Path(BASE_PATH2.getAbsolutePath(), "fsJobStatusRetriever").toString();
private static final String TEST_HELIX_CLUSTER_NAME = "testRedirectGobblinServiceCluster";
private static final String TEST_GROUP_NAME_1 = "testRedirectGroup1";
private static final String TEST_FLOW_NAME_1 = "testRedirectFlow1";
private static final String TEST_SCHEDULE_1 = "0 1/0 * ? * *";
private static final String TEST_TEMPLATE_URI_1 = "FS:///templates/test.template";
private static final String TEST_GROUP_NAME_2 = "testRedirectGroup2";
private static final String TEST_FLOW_NAME_2 = "testRedirectFlow2";
private static final String TEST_SCHEDULE_2 = "0 1/0 * ? * *";
private static final String TEST_TEMPLATE_URI_2 = "FS:///templates/test.template";
private static final String TEST_GOBBLIN_EXECUTOR_NAME = "testRedirectGobblinExecutor";
private static final String TEST_SOURCE_NAME = "testSource";
private static final String TEST_SINK_NAME = "testSink";
private String port1 = "10000";
private String port2 = "20000";
private static final String PREFIX = "https://";
private static final String SERVICE_NAME = "gobblinServiceTest";
private GobblinServiceManager node1GobblinServiceManager;
private FlowConfigClient node1FlowConfigClient;
private GobblinServiceManager node2GobblinServiceManager;
private FlowConfigClient node2FlowConfigClient;
private TestingServer testingZKServer;
private Properties node1ServiceCoreProperties;
private Properties node2ServiceCoreProperties;
private MySQLContainer mysql;
@BeforeClass
public void setup() throws Exception {
port1 = Integer.toString(new PortUtils.ServerSocketPortLocator().random());
port2 = Integer.toString(new PortUtils.ServerSocketPortLocator().random());
BASE_PATH1.deleteOnExit();
BASE_PATH2.deleteOnExit();
// Use a random ZK port
this.testingZKServer = new TestingServer(-1);
logger.info("Testing ZK Server listening on: " + testingZKServer.getConnectString());
HelixUtils.createGobblinHelixCluster(testingZKServer.getConnectString(), TEST_HELIX_CLUSTER_NAME);
ITestMetastoreDatabase testMetastoreDatabase = TestMetastoreDatabaseFactory.get();
Properties commonServiceCoreProperties = new Properties();
mysql = new MySQLContainer("mysql:" + TestServiceDatabaseConfig.MysqlVersion);
mysql.start();
commonServiceCoreProperties.put(ServiceConfigKeys.SERVICE_DB_URL_KEY, mysql.getJdbcUrl());
commonServiceCoreProperties.put(ServiceConfigKeys.SERVICE_DB_USERNAME, mysql.getUsername());
commonServiceCoreProperties.put(ServiceConfigKeys.SERVICE_DB_PASSWORD, mysql.getPassword());
commonServiceCoreProperties.put(ServiceConfigKeys.ZK_CONNECTION_STRING_KEY, testingZKServer.getConnectString());
commonServiceCoreProperties.put(ServiceConfigKeys.HELIX_CLUSTER_NAME_KEY, TEST_HELIX_CLUSTER_NAME);
commonServiceCoreProperties.put(ServiceConfigKeys.HELIX_INSTANCE_NAME_KEY, "GaaS_" + UUID.randomUUID().toString());
commonServiceCoreProperties.put(ServiceConfigKeys.TOPOLOGY_FACTORY_TOPOLOGY_NAMES_KEY , TEST_GOBBLIN_EXECUTOR_NAME);
commonServiceCoreProperties.put(ServiceConfigKeys.TOPOLOGY_FACTORY_PREFIX + TEST_GOBBLIN_EXECUTOR_NAME + ".description",
"StandaloneTestExecutor");
commonServiceCoreProperties.put(ServiceConfigKeys.TOPOLOGY_FACTORY_PREFIX + TEST_GOBBLIN_EXECUTOR_NAME + ".version",
"1");
commonServiceCoreProperties.put(ServiceConfigKeys.TOPOLOGY_FACTORY_PREFIX + TEST_GOBBLIN_EXECUTOR_NAME + ".uri",
"gobblinExecutor");
commonServiceCoreProperties.put(ServiceConfigKeys.TOPOLOGY_FACTORY_PREFIX + TEST_GOBBLIN_EXECUTOR_NAME + ".specExecutorInstance",
"org.gobblin.service.InMemorySpecExecutor");
commonServiceCoreProperties.put(ServiceConfigKeys.TOPOLOGY_FACTORY_PREFIX + TEST_GOBBLIN_EXECUTOR_NAME + ".specExecInstance.capabilities",
TEST_SOURCE_NAME + ":" + TEST_SINK_NAME);
commonServiceCoreProperties.put(ConfigurationKeys.STATE_STORE_DB_USER_KEY, "testUser");
commonServiceCoreProperties.put(ConfigurationKeys.STATE_STORE_DB_PASSWORD_KEY, "testPassword");
commonServiceCoreProperties.put(ConfigurationKeys.STATE_STORE_DB_URL_KEY, testMetastoreDatabase.getJdbcUrl());
commonServiceCoreProperties.put("zookeeper.connect", testingZKServer.getConnectString());
commonServiceCoreProperties.put(ConfigurationKeys.STATE_STORE_FACTORY_CLASS_KEY, MysqlJobStatusStateStoreFactory.class.getName());
commonServiceCoreProperties.put(ServiceConfigKeys.GOBBLIN_SERVICE_JOB_STATUS_MONITOR_ENABLED_KEY, false);
commonServiceCoreProperties.put(ServiceConfigKeys.FORCE_LEADER, true);
commonServiceCoreProperties.put(ServiceConfigKeys.SERVICE_URL_PREFIX, PREFIX);
commonServiceCoreProperties.put(ServiceConfigKeys.SERVICE_NAME, SERVICE_NAME);
node1ServiceCoreProperties = new Properties();
node1ServiceCoreProperties.putAll(commonServiceCoreProperties);
node1ServiceCoreProperties.put(ConfigurationKeys.TOPOLOGYSPEC_STORE_DIR_KEY, NODE_1_TOPOLOGY_SPEC_STORE_DIR);
node1ServiceCoreProperties.put(FlowCatalog.FLOWSPEC_STORE_DIR_KEY, NODE_1_FLOW_SPEC_STORE_DIR);
node1ServiceCoreProperties.put(FsJobStatusRetriever.CONF_PREFIX + "." + ConfigurationKeys.STATE_STORE_ROOT_DIR_KEY, NODE_1_JOB_STATUS_STATE_STORE_DIR);
node1ServiceCoreProperties.put(QUARTZ_INSTANCE_NAME, "RedirectQuartzScheduler1");
node1ServiceCoreProperties.put(QUARTZ_THREAD_POOL_COUNT, 3);
node1ServiceCoreProperties.put(ServiceConfigKeys.SERVICE_PORT, port1);
node2ServiceCoreProperties = new Properties();
node2ServiceCoreProperties.putAll(commonServiceCoreProperties);
node2ServiceCoreProperties.put(ConfigurationKeys.TOPOLOGYSPEC_STORE_DIR_KEY, NODE_2_TOPOLOGY_SPEC_STORE_DIR);
node2ServiceCoreProperties.put(FlowCatalog.FLOWSPEC_STORE_DIR_KEY, NODE_2_FLOW_SPEC_STORE_DIR);
node2ServiceCoreProperties.put(FsJobStatusRetriever.CONF_PREFIX + "." + ConfigurationKeys.STATE_STORE_ROOT_DIR_KEY, NODE_2_JOB_STATUS_STATE_STORE_DIR);
node2ServiceCoreProperties.put(QUARTZ_INSTANCE_NAME, "RedirectQuartzScheduler2");
node2ServiceCoreProperties.put(QUARTZ_THREAD_POOL_COUNT, 3);
node2ServiceCoreProperties.put(ServiceConfigKeys.SERVICE_PORT, port2);
// Start Node 1
this.node1GobblinServiceManager = GobblinServiceManager.create("RedirectCoreService1", "1",
ConfigUtils.propertiesToConfig(node1ServiceCoreProperties), new Path(NODE_1_SERVICE_WORK_DIR));
this.node1GobblinServiceManager.start();
// Start Node 2
this.node2GobblinServiceManager = GobblinServiceManager.create("RedirectCoreService2", "2",
ConfigUtils.propertiesToConfig(node2ServiceCoreProperties), new Path(NODE_2_SERVICE_WORK_DIR));
this.node2GobblinServiceManager.start();
// Initialize Node 1 Client
Map<String, String> transportClientProperties = Maps.newHashMap();
transportClientProperties.put(HttpClientFactory.HTTP_REQUEST_TIMEOUT, "10000");
this.node1FlowConfigClient = new FlowConfigClient(String.format("http://localhost:%s/",
this.node1GobblinServiceManager.restliServer.getPort()), transportClientProperties);
// Initialize Node 2 Client
this.node2FlowConfigClient = new FlowConfigClient(String.format("http://localhost:%s/",
this.node2GobblinServiceManager.restliServer.getPort()), transportClientProperties);
}
@AfterClass
public void cleanUp() throws Exception {
// Shutdown Node 1
try {
logger.info("+++++++++++++++++++ start shutdown noad1");
this.node1GobblinServiceManager.stop();
} catch (Exception e) {
logger.warn("Could not cleanly stop Node 1 of Gobblin Service", e);
}
// Shutdown Node 2
try {
logger.info("+++++++++++++++++++ start shutdown noad2");
this.node2GobblinServiceManager.stop();
} catch (Exception e) {
logger.warn("Could not cleanly stop Node 2 of Gobblin Service", e);
}
// Stop Zookeeper
try {
this.testingZKServer.close();
} catch (Exception e) {
logger.warn("Could not cleanly stop Testing Zookeeper", e);
}
mysql.stop();
}
@Test
public void testCreate() throws Exception {
Map<String, String> flowProperties = Maps.newHashMap();
flowProperties.put("param1", "value1");
flowProperties.put(ServiceConfigKeys.FLOW_SOURCE_IDENTIFIER_KEY, TEST_SOURCE_NAME);
flowProperties.put(ServiceConfigKeys.FLOW_DESTINATION_IDENTIFIER_KEY, TEST_SINK_NAME);
FlowConfig flowConfig1 = new FlowConfig()
.setId(new FlowId().setFlowGroup(TEST_GROUP_NAME_1).setFlowName(TEST_FLOW_NAME_1))
.setTemplateUris(TEST_TEMPLATE_URI_1).setSchedule(new Schedule().setCronSchedule(TEST_SCHEDULE_1).
setRunImmediately(true))
.setProperties(new StringMap(flowProperties));
FlowConfig flowConfig2 = new FlowConfig()
.setId(new FlowId().setFlowGroup(TEST_GROUP_NAME_2).setFlowName(TEST_FLOW_NAME_2))
.setTemplateUris(TEST_TEMPLATE_URI_2).setSchedule(new Schedule().setCronSchedule(TEST_SCHEDULE_2).
setRunImmediately(true))
.setProperties(new StringMap(flowProperties));
GobblinServiceManager leader;
FlowConfigClient leaderClient;
FlowConfigClient slaveClient;
if (this.node1GobblinServiceManager.isLeader()) {
leader = this.node1GobblinServiceManager;
leaderClient = this.node1FlowConfigClient;
slaveClient = this.node2FlowConfigClient;
} else {
leader = this.node2GobblinServiceManager;
leaderClient = this.node2FlowConfigClient;
slaveClient = this.node1FlowConfigClient;
}
// Try create on leader, should be successful
leaderClient.createFlowConfig(flowConfig1);
// Try create on slave, should throw an error with leader URL
try {
slaveClient.createFlowConfig(flowConfig2);
} catch (RestLiResponseException e) {
Assert.assertTrue(e.hasErrorDetails());
Assert.assertTrue(e.getErrorDetails().containsKey(ServiceConfigKeys.LEADER_URL));
String expectedUrl = PREFIX + InetAddress.getLocalHost().getHostName() + ":" + leader.restliServer.getPort() + "/" + SERVICE_NAME;
Assert.assertEquals(e.getErrorDetails().get(ServiceConfigKeys.LEADER_URL), expectedUrl);
return;
}
throw new RuntimeException("Slave should have thrown an error");
}
} | 3,773 |
0 | Create_ds/gobblin/gobblin-service/src/test/java/org/apache/gobblin/service/modules | Create_ds/gobblin/gobblin-service/src/test/java/org/apache/gobblin/service/modules/core/GobblinServiceHATest.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.service.modules.core;
import java.io.File;
import java.util.Map;
import java.util.Properties;
import java.util.UUID;
import org.apache.commons.io.FileUtils;
import org.apache.curator.test.TestingServer;
import org.apache.hadoop.fs.Path;
import org.eclipse.jetty.http.HttpStatus;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.testcontainers.containers.MySQLContainer;
import org.testng.Assert;
import org.testng.annotations.AfterClass;
import org.testng.annotations.BeforeClass;
import org.testng.annotations.Test;
import com.google.common.collect.Maps;
import com.linkedin.data.template.StringMap;
import com.linkedin.r2.transport.http.client.HttpClientFactory;
import com.linkedin.restli.client.RestLiResponseException;
import org.apache.gobblin.configuration.ConfigurationKeys;
import org.apache.gobblin.metastore.MysqlJobStatusStateStoreFactory;
import org.apache.gobblin.metastore.testing.ITestMetastoreDatabase;
import org.apache.gobblin.metastore.testing.TestMetastoreDatabaseFactory;
import org.apache.gobblin.runtime.spec_catalog.FlowCatalog;
import org.apache.gobblin.service.FlowConfig;
import org.apache.gobblin.service.FlowConfigClient;
import org.apache.gobblin.service.FlowId;
import org.apache.gobblin.service.Schedule;
import org.apache.gobblin.service.ServiceConfigKeys;
import org.apache.gobblin.service.TestServiceDatabaseConfig;
import org.apache.gobblin.service.modules.utils.HelixUtils;
import org.apache.gobblin.service.monitoring.FsJobStatusRetriever;
import org.apache.gobblin.util.ConfigUtils;
@Test
public class GobblinServiceHATest {
private static final Logger logger = LoggerFactory.getLogger(GobblinServiceHATest.class);
private static final String QUARTZ_INSTANCE_NAME = "org.quartz.scheduler.instanceName";
private static final String QUARTZ_THREAD_POOL_COUNT = "org.quartz.threadPool.threadCount";
private static final String COMMON_SPEC_STORE_PARENT_DIR = "/tmp/serviceCoreCommon/";
private static final String NODE_1_SERVICE_WORK_DIR = "/tmp/serviceWorkDirNode1/";
private static final String NODE_1_SPEC_STORE_PARENT_DIR = "/tmp/serviceCoreNode1/";
private static final String NODE_1_TOPOLOGY_SPEC_STORE_DIR = "/tmp/serviceCoreNode1/topologyTestSpecStoreNode1";
private static final String NODE_1_FLOW_SPEC_STORE_DIR = "/tmp/serviceCoreCommon/flowTestSpecStore";
private static final String NODE_1_JOB_STATUS_STATE_STORE_DIR = "/tmp/serviceCoreNode1/fsJobStatusRetriever";
private static final String NODE_2_SERVICE_WORK_DIR = "/tmp/serviceWorkDirNode2/";
private static final String NODE_2_SPEC_STORE_PARENT_DIR = "/tmp/serviceCoreNode2/";
private static final String NODE_2_TOPOLOGY_SPEC_STORE_DIR = "/tmp/serviceCoreNode2/topologyTestSpecStoreNode2";
private static final String NODE_2_FLOW_SPEC_STORE_DIR = "/tmp/serviceCoreCommon/flowTestSpecStore";
private static final String NODE_2_JOB_STATUS_STATE_STORE_DIR = "/tmp/serviceCoreNode2/fsJobStatusRetriever";
private static final String TEST_HELIX_CLUSTER_NAME = "testGobblinServiceCluster";
private static final String TEST_GROUP_NAME_1 = "testGroup1";
private static final String TEST_FLOW_NAME_1 = "testFlow1";
private static final String TEST_SCHEDULE_1 = "0 1/0 * ? * *";
private static final String TEST_TEMPLATE_URI_1 = "FS:///templates/test.template";
private static final String TEST_DUMMY_GROUP_NAME_1 = "dummyGroup";
private static final String TEST_DUMMY_FLOW_NAME_1 = "dummyFlow";
private static final String TEST_GROUP_NAME_2 = "testGroup2";
private static final String TEST_FLOW_NAME_2 = "testFlow2";
private static final String TEST_SCHEDULE_2 = "0 1/0 * ? * *";
private static final String TEST_TEMPLATE_URI_2 = "FS:///templates/test.template";
private static final String TEST_GOBBLIN_EXECUTOR_NAME = "testGobblinExecutor";
private static final String TEST_SOURCE_NAME = "testSource";
private static final String TEST_SINK_NAME = "testSink";
private GobblinServiceManager node1GobblinServiceManager;
private FlowConfigClient node1FlowConfigClient;
private GobblinServiceManager node2GobblinServiceManager;
private FlowConfigClient node2FlowConfigClient;
private TestingServer testingZKServer;
private MySQLContainer mysql;
@BeforeClass
public void setup() throws Exception {
// Clean up common Flow Spec Dir
cleanUpDir(COMMON_SPEC_STORE_PARENT_DIR);
// Clean up work dir for Node 1
cleanUpDir(NODE_1_SERVICE_WORK_DIR);
cleanUpDir(NODE_1_SPEC_STORE_PARENT_DIR);
// Clean up work dir for Node 2
cleanUpDir(NODE_2_SERVICE_WORK_DIR);
cleanUpDir(NODE_2_SPEC_STORE_PARENT_DIR);
// Use a random ZK port
this.testingZKServer = new TestingServer(-1);
logger.info("Testing ZK Server listening on: " + testingZKServer.getConnectString());
HelixUtils.createGobblinHelixCluster(testingZKServer.getConnectString(), TEST_HELIX_CLUSTER_NAME);
ITestMetastoreDatabase testMetastoreDatabase = TestMetastoreDatabaseFactory.get();
Properties commonServiceCoreProperties = new Properties();
mysql = new MySQLContainer("mysql:" + TestServiceDatabaseConfig.MysqlVersion);
mysql.start();
commonServiceCoreProperties.put(ServiceConfigKeys.SERVICE_DB_URL_KEY, mysql.getJdbcUrl());
commonServiceCoreProperties.put(ServiceConfigKeys.SERVICE_DB_USERNAME, mysql.getUsername());
commonServiceCoreProperties.put(ServiceConfigKeys.SERVICE_DB_PASSWORD, mysql.getPassword());
commonServiceCoreProperties.put(ServiceConfigKeys.ZK_CONNECTION_STRING_KEY, testingZKServer.getConnectString());
commonServiceCoreProperties.put(ServiceConfigKeys.HELIX_CLUSTER_NAME_KEY, TEST_HELIX_CLUSTER_NAME);
commonServiceCoreProperties.put(ServiceConfigKeys.HELIX_INSTANCE_NAME_KEY, "GaaS_" + UUID.randomUUID().toString());
commonServiceCoreProperties.put(ServiceConfigKeys.TOPOLOGY_FACTORY_TOPOLOGY_NAMES_KEY , TEST_GOBBLIN_EXECUTOR_NAME);
commonServiceCoreProperties.put(ServiceConfigKeys.TOPOLOGY_FACTORY_PREFIX + TEST_GOBBLIN_EXECUTOR_NAME + ".description",
"StandaloneTestExecutor");
commonServiceCoreProperties.put(ServiceConfigKeys.TOPOLOGY_FACTORY_PREFIX + TEST_GOBBLIN_EXECUTOR_NAME + ".version",
"1");
commonServiceCoreProperties.put(ServiceConfigKeys.TOPOLOGY_FACTORY_PREFIX + TEST_GOBBLIN_EXECUTOR_NAME + ".uri",
"gobblinExecutor");
commonServiceCoreProperties.put(ServiceConfigKeys.TOPOLOGY_FACTORY_PREFIX + TEST_GOBBLIN_EXECUTOR_NAME + ".specExecutorInstance",
"org.gobblin.service.InMemorySpecExecutor");
commonServiceCoreProperties.put(ServiceConfigKeys.TOPOLOGY_FACTORY_PREFIX + TEST_GOBBLIN_EXECUTOR_NAME + ".specExecInstance.capabilities",
TEST_SOURCE_NAME + ":" + TEST_SINK_NAME);
commonServiceCoreProperties.put(ConfigurationKeys.STATE_STORE_DB_USER_KEY, "testUser");
commonServiceCoreProperties.put(ConfigurationKeys.STATE_STORE_DB_PASSWORD_KEY, "testPassword");
commonServiceCoreProperties.put(ConfigurationKeys.STATE_STORE_DB_URL_KEY, testMetastoreDatabase.getJdbcUrl());
commonServiceCoreProperties.put("zookeeper.connect", testingZKServer.getConnectString());
commonServiceCoreProperties.put(ConfigurationKeys.STATE_STORE_FACTORY_CLASS_KEY, MysqlJobStatusStateStoreFactory.class.getName());
commonServiceCoreProperties.put(ServiceConfigKeys.GOBBLIN_SERVICE_JOB_STATUS_MONITOR_ENABLED_KEY, false);
commonServiceCoreProperties.put(ServiceConfigKeys.GOBBLIN_SERVICE_GIT_CONFIG_MONITOR_ENABLED_KEY, false);
commonServiceCoreProperties.put(ServiceConfigKeys.GOBBLIN_SERVICE_FLOW_CATALOG_LOCAL_COMMIT, false);
Properties node1ServiceCoreProperties = new Properties();
node1ServiceCoreProperties.putAll(commonServiceCoreProperties);
node1ServiceCoreProperties.put(ConfigurationKeys.TOPOLOGYSPEC_STORE_DIR_KEY, NODE_1_TOPOLOGY_SPEC_STORE_DIR);
node1ServiceCoreProperties.put(FlowCatalog.FLOWSPEC_STORE_DIR_KEY, NODE_1_FLOW_SPEC_STORE_DIR);
node1ServiceCoreProperties.put(FsJobStatusRetriever.CONF_PREFIX + "." + ConfigurationKeys.STATE_STORE_ROOT_DIR_KEY, NODE_1_JOB_STATUS_STATE_STORE_DIR);
node1ServiceCoreProperties.put(QUARTZ_INSTANCE_NAME, "QuartzScheduler1");
node1ServiceCoreProperties.put(QUARTZ_THREAD_POOL_COUNT, 3);
Properties node2ServiceCoreProperties = new Properties();
node2ServiceCoreProperties.putAll(commonServiceCoreProperties);
node2ServiceCoreProperties.put(ConfigurationKeys.TOPOLOGYSPEC_STORE_DIR_KEY, NODE_2_TOPOLOGY_SPEC_STORE_DIR);
node2ServiceCoreProperties.put(FlowCatalog.FLOWSPEC_STORE_DIR_KEY, NODE_2_FLOW_SPEC_STORE_DIR);
node2ServiceCoreProperties.put(FsJobStatusRetriever.CONF_PREFIX + "." + ConfigurationKeys.STATE_STORE_ROOT_DIR_KEY, NODE_2_JOB_STATUS_STATE_STORE_DIR);
node2ServiceCoreProperties.put(QUARTZ_INSTANCE_NAME, "QuartzScheduler2");
node2ServiceCoreProperties.put(QUARTZ_THREAD_POOL_COUNT, 3);
// Start Node 1
this.node1GobblinServiceManager = GobblinServiceManager.create("CoreService1", "1",
ConfigUtils.propertiesToConfig(node1ServiceCoreProperties), new Path(NODE_1_SERVICE_WORK_DIR));
this.node1GobblinServiceManager.start();
// Start Node 2
this.node2GobblinServiceManager = GobblinServiceManager.create("CoreService2", "2",
ConfigUtils.propertiesToConfig(node2ServiceCoreProperties), new Path(NODE_2_SERVICE_WORK_DIR));
this.node2GobblinServiceManager.start();
// Initialize Node 1 Client
Map<String, String> transportClientProperties = Maps.newHashMap();
transportClientProperties.put(HttpClientFactory.HTTP_REQUEST_TIMEOUT, "10000");
this.node1FlowConfigClient = new FlowConfigClient(String.format("http://localhost:%s/",
this.node1GobblinServiceManager.restliServer.getPort()), transportClientProperties);
// Initialize Node 2 Client
this.node2FlowConfigClient = new FlowConfigClient(String.format("http://localhost:%s/",
this.node2GobblinServiceManager.restliServer.getPort()), transportClientProperties);
}
private void cleanUpDir(String dir) throws Exception {
File specStoreDir = new File(dir);
if (specStoreDir.exists()) {
FileUtils.deleteDirectory(specStoreDir);
}
}
@AfterClass
public void cleanUp() throws Exception {
// Shutdown Node 1
try {
logger.info("+++++++++++++++++++ start shutdown noad1");
this.node1GobblinServiceManager.stop();
} catch (Exception e) {
logger.warn("Could not cleanly stop Node 1 of Gobblin Service", e);
}
// Shutdown Node 2
try {
logger.info("+++++++++++++++++++ start shutdown noad2");
this.node2GobblinServiceManager.stop();
} catch (Exception e) {
logger.warn("Could not cleanly stop Node 2 of Gobblin Service", e);
}
// Stop Zookeeper
try {
this.testingZKServer.close();
} catch (Exception e) {
logger.warn("Could not cleanly stop Testing Zookeeper", e);
}
// Cleanup Node 1
try {
cleanUpDir(NODE_1_SERVICE_WORK_DIR);
} catch (Exception e) {
logger.warn("Could not completely cleanup Node 1 Work Dir");
}
try {
cleanUpDir(NODE_1_SPEC_STORE_PARENT_DIR);
} catch (Exception e) {
logger.warn("Could not completely cleanup Node 1 Spec Store Parent Dir");
}
// Cleanup Node 2
try {
cleanUpDir(NODE_2_SERVICE_WORK_DIR);
} catch (Exception e) {
logger.warn("Could not completely cleanup Node 2 Work Dir");
}
try {
cleanUpDir(NODE_2_SPEC_STORE_PARENT_DIR);
} catch (Exception e) {
logger.warn("Could not completely cleanup Node 2 Spec Store Parent Dir");
}
cleanUpDir(COMMON_SPEC_STORE_PARENT_DIR);
mysql.stop();
}
@Test
public void testCreate() throws Exception {
logger.info("+++++++++++++++++++ testCreate START");
Map<String, String> flowProperties = Maps.newHashMap();
flowProperties.put("param1", "value1");
flowProperties.put(ServiceConfigKeys.FLOW_SOURCE_IDENTIFIER_KEY, TEST_SOURCE_NAME);
flowProperties.put(ServiceConfigKeys.FLOW_DESTINATION_IDENTIFIER_KEY, TEST_SINK_NAME);
FlowConfig flowConfig1 = new FlowConfig()
.setId(new FlowId().setFlowGroup(TEST_GROUP_NAME_1).setFlowName(TEST_FLOW_NAME_1))
.setTemplateUris(TEST_TEMPLATE_URI_1).setSchedule(new Schedule().setCronSchedule(TEST_SCHEDULE_1).
setRunImmediately(true))
.setProperties(new StringMap(flowProperties));
FlowConfig flowConfig2 = new FlowConfig()
.setId(new FlowId().setFlowGroup(TEST_GROUP_NAME_2).setFlowName(TEST_FLOW_NAME_2))
.setTemplateUris(TEST_TEMPLATE_URI_2).setSchedule(new Schedule().setCronSchedule(TEST_SCHEDULE_2).
setRunImmediately(true))
.setProperties(new StringMap(flowProperties));
// Try create on both nodes
long schedulingStartTime = System.currentTimeMillis();
this.node1FlowConfigClient.createFlowConfig(flowConfig1);
this.node2FlowConfigClient.createFlowConfig(flowConfig2);
// Check if created on master
GobblinServiceManager master;
if (this.node1GobblinServiceManager.isLeader()) {
master = this.node1GobblinServiceManager;
logger.info("#### node 1 is manager");
} else if (this.node2GobblinServiceManager.isLeader()) {
master = this.node2GobblinServiceManager;
logger.info("#### node 2 is manager");
} else {
Assert.fail("No leader found in service cluster");
return;
}
int attempt = 0;
boolean assertSuccess = false;
// Below while-loop will read all flow specs, but some of them are being persisted.
// We have seen CRC file java.io.EOFException when reading and writing at the same time.
// Wait for a few seconds to guarantee all the flow specs are persisted.
Thread.sleep(3000);
while (attempt < 800) {
int masterJobs = master.flowCatalog.getSpecs().size();
if (masterJobs == 2) {
assertSuccess = true;
break;
}
Thread.sleep(5);
attempt ++;
}
long schedulingEndTime = System.currentTimeMillis();
logger.info("Total scheduling time in ms: " + (schedulingEndTime - schedulingStartTime));
Assert.assertTrue(assertSuccess, "Flow that was created is not reflecting in FlowCatalog");
logger.info("+++++++++++++++++++ testCreate END");
}
@Test (dependsOnMethods = "testCreate")
public void testCreateAgain() throws Exception {
logger.info("+++++++++++++++++++ testCreateAgain START");
Map<String, String> flowProperties = Maps.newHashMap();
flowProperties.put("param1", "value1");
flowProperties.put(ServiceConfigKeys.FLOW_SOURCE_IDENTIFIER_KEY, TEST_SOURCE_NAME);
flowProperties.put(ServiceConfigKeys.FLOW_DESTINATION_IDENTIFIER_KEY, TEST_SINK_NAME);
FlowConfig flowConfig1 = new FlowConfig()
.setId(new FlowId().setFlowGroup(TEST_GROUP_NAME_1).setFlowName(TEST_FLOW_NAME_1))
.setTemplateUris(TEST_TEMPLATE_URI_1).setSchedule(new Schedule().setCronSchedule(TEST_SCHEDULE_1).
setRunImmediately(true))
.setProperties(new StringMap(flowProperties));
FlowConfig flowConfig2 = new FlowConfig()
.setId(new FlowId().setFlowGroup(TEST_GROUP_NAME_2).setFlowName(TEST_FLOW_NAME_2))
.setTemplateUris(TEST_TEMPLATE_URI_2).setSchedule(new Schedule().setCronSchedule(TEST_SCHEDULE_2).
setRunImmediately(true))
.setProperties(new StringMap(flowProperties));
// Try create on both nodes
try {
this.node1FlowConfigClient.createFlowConfig(flowConfig1);
} catch (RestLiResponseException e) {
Assert.fail("Create Again should pass without complaining that the spec already exists.");
}
try {
this.node2FlowConfigClient.createFlowConfig(flowConfig2);
} catch (RestLiResponseException e) {
Assert.fail("Create Again should pass without complaining that the spec already exists.");
}
logger.info("+++++++++++++++++++ testCreateAgain END");
}
@Test (dependsOnMethods = "testCreateAgain")
public void testGet() throws Exception {
logger.info("+++++++++++++++++++ testGet START");
FlowId flowId1 = new FlowId().setFlowGroup(TEST_GROUP_NAME_1).setFlowName(TEST_FLOW_NAME_1);
FlowConfig flowConfig1 = this.node1FlowConfigClient.getFlowConfig(flowId1);
Assert.assertEquals(flowConfig1.getId().getFlowGroup(), TEST_GROUP_NAME_1);
Assert.assertEquals(flowConfig1.getId().getFlowName(), TEST_FLOW_NAME_1);
Assert.assertEquals(flowConfig1.getSchedule().getCronSchedule(), TEST_SCHEDULE_1);
Assert.assertEquals(flowConfig1.getTemplateUris(), TEST_TEMPLATE_URI_1);
Assert.assertTrue(flowConfig1.getSchedule().isRunImmediately());
Assert.assertEquals(flowConfig1.getProperties().get("param1"), "value1");
flowConfig1 = this.node2FlowConfigClient.getFlowConfig(flowId1);
Assert.assertEquals(flowConfig1.getId().getFlowGroup(), TEST_GROUP_NAME_1);
Assert.assertEquals(flowConfig1.getId().getFlowName(), TEST_FLOW_NAME_1);
Assert.assertEquals(flowConfig1.getSchedule().getCronSchedule(), TEST_SCHEDULE_1);
Assert.assertEquals(flowConfig1.getTemplateUris(), TEST_TEMPLATE_URI_1);
Assert.assertTrue(flowConfig1.getSchedule().isRunImmediately());
Assert.assertEquals(flowConfig1.getProperties().get("param1"), "value1");
logger.info("+++++++++++++++++++ testGet END");
}
@Test (dependsOnMethods = "testGet")
public void testUpdate() throws Exception {
logger.info("+++++++++++++++++++ testUpdate START");
// Update on one node and retrieve from another
FlowId flowId = new FlowId().setFlowGroup(TEST_GROUP_NAME_1).setFlowName(TEST_FLOW_NAME_1);
Map<String, String> flowProperties = Maps.newHashMap();
flowProperties.put("param1", "value1b");
flowProperties.put("param2", "value2b");
flowProperties.put(ServiceConfigKeys.FLOW_SOURCE_IDENTIFIER_KEY, TEST_SOURCE_NAME);
flowProperties.put(ServiceConfigKeys.FLOW_DESTINATION_IDENTIFIER_KEY, TEST_SINK_NAME);
FlowConfig flowConfig = new FlowConfig()
.setId(new FlowId().setFlowGroup(TEST_GROUP_NAME_1).setFlowName(TEST_FLOW_NAME_1))
.setTemplateUris(TEST_TEMPLATE_URI_1).setSchedule(new Schedule().setCronSchedule(TEST_SCHEDULE_1))
.setProperties(new StringMap(flowProperties));
this.node1FlowConfigClient.updateFlowConfig(flowConfig);
FlowConfig retrievedFlowConfig = this.node2FlowConfigClient.getFlowConfig(flowId);
Assert.assertEquals(retrievedFlowConfig.getId().getFlowGroup(), TEST_GROUP_NAME_1);
Assert.assertEquals(retrievedFlowConfig.getId().getFlowName(), TEST_FLOW_NAME_1);
Assert.assertEquals(retrievedFlowConfig.getSchedule().getCronSchedule(), TEST_SCHEDULE_1);
Assert.assertEquals(retrievedFlowConfig.getTemplateUris(), TEST_TEMPLATE_URI_1);
Assert.assertFalse(retrievedFlowConfig.getSchedule().isRunImmediately());
Assert.assertEquals(retrievedFlowConfig.getProperties().get("param1"), "value1b");
Assert.assertEquals(retrievedFlowConfig.getProperties().get("param2"), "value2b");
logger.info("+++++++++++++++++++ testUpdate END");
}
@Test (dependsOnMethods = "testUpdate")
public void testDelete() throws Exception {
logger.info("+++++++++++++++++++ testDelete START");
FlowId flowId = new FlowId().setFlowGroup(TEST_GROUP_NAME_1).setFlowName(TEST_FLOW_NAME_1);
// make sure flow config exists
FlowConfig flowConfig = this.node1FlowConfigClient.getFlowConfig(flowId);
Assert.assertEquals(flowConfig.getId().getFlowGroup(), TEST_GROUP_NAME_1);
Assert.assertEquals(flowConfig.getId().getFlowName(), TEST_FLOW_NAME_1);
this.node1FlowConfigClient.deleteFlowConfig(flowId);
// Check if deletion is reflected on both nodes
try {
this.node1FlowConfigClient.getFlowConfig(flowId);
Assert.fail("Get should have gotten a 404 error");
} catch (RestLiResponseException e) {
Assert.assertEquals(e.getStatus(), HttpStatus.NOT_FOUND_404);
}
try {
this.node2FlowConfigClient.getFlowConfig(flowId);
Assert.fail("Get should have gotten a 404 error");
} catch (RestLiResponseException e) {
Assert.assertEquals(e.getStatus(), HttpStatus.NOT_FOUND_404);
}
logger.info("+++++++++++++++++++ testDelete END");
}
@Test (dependsOnMethods = "testDelete")
public void testBadGet() throws Exception {
logger.info("+++++++++++++++++++ testBadGet START");
FlowId flowId = new FlowId().setFlowGroup(TEST_DUMMY_GROUP_NAME_1).setFlowName(TEST_DUMMY_FLOW_NAME_1);
try {
this.node1FlowConfigClient.getFlowConfig(flowId);
Assert.fail("Get should have raised a 404 error");
} catch (RestLiResponseException e) {
Assert.assertEquals(e.getStatus(), HttpStatus.NOT_FOUND_404);
}
try {
this.node2FlowConfigClient.getFlowConfig(flowId);
Assert.fail("Get should have raised a 404 error");
} catch (RestLiResponseException e) {
Assert.assertEquals(e.getStatus(), HttpStatus.NOT_FOUND_404);
}
logger.info("+++++++++++++++++++ testBadGet END");
}
@Test (dependsOnMethods = "testBadGet")
public void testBadDelete() throws Exception {
logger.info("+++++++++++++++++++ testBadDelete START");
FlowId flowId = new FlowId().setFlowGroup(TEST_DUMMY_GROUP_NAME_1).setFlowName(TEST_DUMMY_FLOW_NAME_1);
try {
this.node1FlowConfigClient.getFlowConfig(flowId);
Assert.fail("Get should have raised a 404 error");
} catch (RestLiResponseException e) {
Assert.assertEquals(e.getStatus(), HttpStatus.NOT_FOUND_404);
}
try {
this.node2FlowConfigClient.getFlowConfig(flowId);
Assert.fail("Get should have raised a 404 error");
} catch (RestLiResponseException e) {
Assert.assertEquals(e.getStatus(), HttpStatus.NOT_FOUND_404);
}
logger.info("+++++++++++++++++++ testBadDelete END");
}
@Test (dependsOnMethods = "testBadDelete")
public void testBadUpdate() throws Exception {
logger.info("+++++++++++++++++++ testBadUpdate START");
Map<String, String> flowProperties = Maps.newHashMap();
flowProperties.put("param1", "value1b");
flowProperties.put("param2", "value2b");
FlowConfig flowConfig = new FlowConfig()
.setId(new FlowId().setFlowGroup(TEST_DUMMY_GROUP_NAME_1).setFlowName(TEST_DUMMY_FLOW_NAME_1))
.setTemplateUris(TEST_TEMPLATE_URI_1).setSchedule(new Schedule().setCronSchedule(TEST_SCHEDULE_1))
.setProperties(new StringMap(flowProperties));
try {
this.node1FlowConfigClient.updateFlowConfig(flowConfig);
} catch (RestLiResponseException e) {
Assert.assertEquals(e.getStatus(), HttpStatus.NOT_FOUND_404);
}
try {
this.node2FlowConfigClient.updateFlowConfig(flowConfig);
} catch (RestLiResponseException e) {
Assert.assertEquals(e.getStatus(), HttpStatus.NOT_FOUND_404);
}
logger.info("+++++++++++++++++++ testBadUpdate END");
}
@Test (dependsOnMethods = "testBadUpdate")
public void testKillNode() throws Exception {
logger.info("+++++++++++++++++++ testKillNode START");
GobblinServiceManager master, secondary;
if (this.node1GobblinServiceManager.isLeader()) {
master = this.node1GobblinServiceManager;
secondary = this.node2GobblinServiceManager;
} else {
master = this.node2GobblinServiceManager;
secondary = this.node1GobblinServiceManager;
}
int initialMasterJobs = master.getScheduler().getScheduledFlowSpecs().size();
int initialSecondaryJobs = secondary.getScheduler().getScheduledFlowSpecs().size();
Assert.assertTrue(initialMasterJobs > 0, "Master initially should have a few jobs by now in test suite.");
Assert.assertTrue(initialSecondaryJobs == 0, "Secondary node should not schedule any jobs initially.");
// Stop current master
long failOverStartTime = System.currentTimeMillis();
master.stop();
// Wait until secondary becomes master, max 4 seconds
int attempt = 0;
while (!secondary.isLeader()) {
if (attempt > 800) {
Assert.fail("Timeout waiting for Secondary to become master.");
}
Thread.sleep(5);
attempt ++;
}
long failOverOwnerShipTransferTime = System.currentTimeMillis();
attempt = 0;
boolean assertSuccess = false;
while (attempt < 800) {
int newMasterJobs = secondary.getScheduler().getScheduledFlowSpecs().size();
if (newMasterJobs == initialMasterJobs) {
assertSuccess = true;
break;
}
Thread.sleep(5);
attempt ++;
}
long failOverEndTime = System.currentTimeMillis();
logger.info("Total ownership transfer time in ms: " + (failOverOwnerShipTransferTime - failOverStartTime));
logger.info("Total rescheduling time in ms: " + (failOverEndTime - failOverOwnerShipTransferTime));
logger.info("Total failover time in ms: " + (failOverEndTime - failOverStartTime));
Assert.assertTrue(assertSuccess, "New master should take over all old master jobs.");
logger.info("+++++++++++++++++++ testKillNode END");
}
} | 3,774 |
0 | Create_ds/gobblin/gobblin-service/src/test/java/org/apache/gobblin/service/modules | Create_ds/gobblin/gobblin-service/src/test/java/org/apache/gobblin/service/modules/core/IdentityFlowToJobSpecCompilerTest.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.service.modules.core;
import java.io.File;
import java.net.URI;
import java.net.URISyntaxException;
import java.util.ArrayList;
import java.util.List;
import java.util.Properties;
import org.apache.commons.io.FileUtils;
import org.apache.hadoop.fs.Path;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.testng.Assert;
import org.testng.annotations.AfterClass;
import org.testng.annotations.BeforeClass;
import org.testng.annotations.Test;
import com.typesafe.config.Config;
import org.apache.gobblin.configuration.ConfigurationKeys;
import org.apache.gobblin.runtime.api.FlowSpec;
import org.apache.gobblin.runtime.api.JobSpec;
import org.apache.gobblin.runtime.api.Spec;
import org.apache.gobblin.runtime.api.SpecExecutor;
import org.apache.gobblin.runtime.api.TopologySpec;
import org.apache.gobblin.runtime.spec_executorInstance.InMemorySpecExecutor;
import org.apache.gobblin.service.ServiceConfigKeys;
import org.apache.gobblin.service.modules.flow.IdentityFlowToJobSpecCompiler;
import org.apache.gobblin.service.modules.flowgraph.Dag;
import org.apache.gobblin.service.modules.spec.JobExecutionPlan;
import org.apache.gobblin.util.ConfigUtils;
import org.apache.gobblin.util.PathUtils;
public class IdentityFlowToJobSpecCompilerTest {
private static final Logger logger = LoggerFactory.getLogger(IdentityFlowToJobSpecCompilerTest.class);
private static final String TEST_TEMPLATE_CATALOG_PATH = "/tmp/gobblinTestTemplateCatalog_" + System.currentTimeMillis();
private static final String TEST_TEMPLATE_CATALOG_URI = "file://" + TEST_TEMPLATE_CATALOG_PATH;
private static final String TEST_TEMPLATE_NAME = "test.template";
private static final String TEST_TEMPLATE_URI = "FS:///test.template";
private static final String TEST_SOURCE_NAME = "testSource";
private static final String TEST_SINK_NAME = "testSink";
private static final String TEST_FLOW_GROUP = "testFlowGroup";
private static final String TEST_FLOW_NAME = "testFlowName";
private static final String SPEC_STORE_PARENT_DIR = "/tmp/orchestrator/";
private static final String SPEC_DESCRIPTION = "Test Orchestrator";
private static final String SPEC_VERSION = FlowSpec.Builder.DEFAULT_VERSION;
private static final String TOPOLOGY_SPEC_STORE_DIR = "/tmp/orchestrator/topologyTestSpecStore_" + System.currentTimeMillis();
private static final String FLOW_SPEC_STORE_DIR = "/tmp/orchestrator/flowTestSpecStore_" + System.currentTimeMillis();
private IdentityFlowToJobSpecCompiler compilerWithTemplateCalague;
private IdentityFlowToJobSpecCompiler compilerWithoutTemplateCalague;
@BeforeClass
public void setup() throws Exception {
// Create dir for template catalog
setupDir(TEST_TEMPLATE_CATALOG_PATH);
// Create template to use in test
List<String> templateEntries = new ArrayList<>();
templateEntries.add("testProperty1 = \"testValue1\"");
templateEntries.add("testProperty2 = \"test.Value1\"");
templateEntries.add("testProperty3 = 100");
FileUtils.writeLines(new File(TEST_TEMPLATE_CATALOG_PATH + "/" + TEST_TEMPLATE_NAME), templateEntries);
// Initialize compiler with template catalog
Properties compilerWithTemplateCatalogProperties = new Properties();
compilerWithTemplateCatalogProperties.setProperty(ServiceConfigKeys.TEMPLATE_CATALOGS_FULLY_QUALIFIED_PATH_KEY, TEST_TEMPLATE_CATALOG_URI);
this.compilerWithTemplateCalague = new IdentityFlowToJobSpecCompiler(ConfigUtils.propertiesToConfig(compilerWithTemplateCatalogProperties));
// Add a topology to compiler
this.compilerWithTemplateCalague.onAddSpec(initTopologySpec());
// Initialize compiler without template catalog
this.compilerWithoutTemplateCalague = new IdentityFlowToJobSpecCompiler(ConfigUtils.propertiesToConfig(new Properties()));
// Add a topology to compiler
this.compilerWithoutTemplateCalague.onAddSpec(initTopologySpec());
}
private void setupDir(String dir) throws Exception {
FileUtils.forceMkdir(new File(dir));
}
private void cleanUpDir(String dir) throws Exception {
File specStoreDir = new File(dir);
if (specStoreDir.exists()) {
FileUtils.deleteDirectory(specStoreDir);
}
}
private TopologySpec initTopologySpec() {
Properties properties = new Properties();
properties.put("specStore.fs.dir", TOPOLOGY_SPEC_STORE_DIR);
properties.put("specExecInstance.capabilities", TEST_SOURCE_NAME + ":" + TEST_SINK_NAME);
Config config = ConfigUtils.propertiesToConfig(properties);
SpecExecutor specExecutorInstance = new InMemorySpecExecutor(config);
TopologySpec.Builder topologySpecBuilder = TopologySpec.builder(computeTopologySpecURI(SPEC_STORE_PARENT_DIR,
TOPOLOGY_SPEC_STORE_DIR))
.withConfig(config)
.withDescription(SPEC_DESCRIPTION)
.withVersion(SPEC_VERSION)
.withSpecExecutor(specExecutorInstance);
return topologySpecBuilder.build();
}
private FlowSpec initFlowSpec() {
return initFlowSpec(TEST_FLOW_GROUP, TEST_FLOW_NAME, TEST_SOURCE_NAME, TEST_SINK_NAME);
}
private FlowSpec initFlowSpec(String flowGroup, String flowName, String source, String destination) {
Properties properties = new Properties();
properties.put(ConfigurationKeys.JOB_SCHEDULE_KEY, "* * * * *");
properties.put(ConfigurationKeys.FLOW_GROUP_KEY, flowGroup);
properties.put(ConfigurationKeys.FLOW_NAME_KEY, flowName);
properties.put(ServiceConfigKeys.FLOW_SOURCE_IDENTIFIER_KEY, source);
properties.put(ServiceConfigKeys.FLOW_DESTINATION_IDENTIFIER_KEY, destination);
Config config = ConfigUtils.propertiesToConfig(properties);
FlowSpec.Builder flowSpecBuilder = null;
try {
flowSpecBuilder = FlowSpec.builder(computeTopologySpecURI(SPEC_STORE_PARENT_DIR,
FLOW_SPEC_STORE_DIR))
.withConfig(config)
.withDescription("dummy description")
.withVersion(SPEC_VERSION)
.withTemplate(new URI(TEST_TEMPLATE_URI));
} catch (URISyntaxException e) {
throw new RuntimeException(e);
}
return flowSpecBuilder.build();
}
public static URI computeTopologySpecURI(String parent, String current) {
// Make sure this is relative
return PathUtils.relativizePath(new Path(current), new Path(parent)).toUri();
}
@AfterClass
public void cleanUp() throws Exception {
// Cleanup Template Catalog
try {
cleanUpDir(TEST_TEMPLATE_CATALOG_PATH);
} catch (Exception e) {
logger.warn("Could not completely cleanup Template catalog dir");
}
// Cleanup ToplogySpec Dir
try {
cleanUpDir(TOPOLOGY_SPEC_STORE_DIR);
} catch (Exception e) {
logger.warn("Could not completely cleanup ToplogySpec catalog dir");
}
// Cleanup FlowSpec Dir
try {
cleanUpDir(FLOW_SPEC_STORE_DIR);
} catch (Exception e) {
logger.warn("Could not completely cleanup FlowSpec catalog dir");
}
}
@Test
public void testCompilerWithTemplateCatalog() {
FlowSpec flowSpec = initFlowSpec();
// Run compiler on flowSpec
Dag<JobExecutionPlan> jobExecutionPlanDag = this.compilerWithTemplateCalague.compileFlow(flowSpec);
// Assert pre-requisites
Assert.assertNotNull(jobExecutionPlanDag, "Expected non null dag.");
Assert.assertTrue(jobExecutionPlanDag.getNodes().size() == 1, "Exepected 1 executor for FlowSpec.");
// Assert FlowSpec compilation
Dag.DagNode<JobExecutionPlan> dagNode = jobExecutionPlanDag.getStartNodes().get(0);
Spec spec = dagNode.getValue().getJobSpec();
Assert.assertTrue(spec instanceof JobSpec, "Expected JobSpec compiled from FlowSpec.");
// Assert JobSpec properties
JobSpec jobSpec = (JobSpec) spec;
Assert.assertEquals(jobSpec.getConfig().getString("testProperty1"), "testValue1");
Assert.assertEquals(jobSpec.getConfig().getString("testProperty2"), "test.Value1");
Assert.assertEquals(jobSpec.getConfig().getString("testProperty3"), "100");
Assert.assertEquals(jobSpec.getConfig().getString(ServiceConfigKeys.FLOW_SOURCE_IDENTIFIER_KEY), TEST_SOURCE_NAME);
Assert.assertFalse(jobSpec.getConfig().hasPath(ConfigurationKeys.JOB_SCHEDULE_KEY));
Assert.assertEquals(jobSpec.getConfig().getString(ConfigurationKeys.JOB_NAME_KEY), TEST_FLOW_NAME);
Assert.assertEquals(jobSpec.getConfig().getString(ConfigurationKeys.JOB_GROUP_KEY), TEST_FLOW_GROUP);
Assert.assertEquals(jobSpec.getConfig().getString(ConfigurationKeys.FLOW_NAME_KEY), TEST_FLOW_NAME);
Assert.assertEquals(jobSpec.getConfig().getString(ConfigurationKeys.FLOW_GROUP_KEY), TEST_FLOW_GROUP);
Assert.assertTrue(jobSpec.getConfig().hasPath(ConfigurationKeys.FLOW_EXECUTION_ID_KEY));
//Assert the start node has no children.
Assert.assertEquals(jobExecutionPlanDag.getChildren(dagNode).size(), 0);
}
@Test
public void testCompilerWithoutTemplateCatalog() {
FlowSpec flowSpec = initFlowSpec();
// Run compiler on flowSpec
Dag<JobExecutionPlan> jobExecutionPlanDag = this.compilerWithoutTemplateCalague.compileFlow(flowSpec);
// Assert pre-requisites
Assert.assertNotNull(jobExecutionPlanDag, "Expected non null dag.");
Assert.assertTrue(jobExecutionPlanDag.getNodes().size() == 1, "Exepected 1 executor for FlowSpec.");
// Assert FlowSpec compilation
Assert.assertEquals(jobExecutionPlanDag.getStartNodes().size(), 1);
Dag.DagNode<JobExecutionPlan> dagNode = jobExecutionPlanDag.getStartNodes().get(0);
Spec spec = dagNode.getValue().getJobSpec();
Assert.assertTrue(spec instanceof JobSpec, "Expected JobSpec compiled from FlowSpec.");
// Assert JobSpec properties
JobSpec jobSpec = (JobSpec) spec;
Assert.assertTrue(!jobSpec.getConfig().hasPath("testProperty1"));
Assert.assertTrue(!jobSpec.getConfig().hasPath("testProperty2"));
Assert.assertTrue(!jobSpec.getConfig().hasPath("testProperty3"));
Assert.assertEquals(jobSpec.getConfig().getString(ServiceConfigKeys.FLOW_SOURCE_IDENTIFIER_KEY), TEST_SOURCE_NAME);
Assert.assertFalse(jobSpec.getConfig().hasPath(ConfigurationKeys.JOB_SCHEDULE_KEY));
Assert.assertEquals(jobSpec.getConfig().getString(ConfigurationKeys.JOB_NAME_KEY), TEST_FLOW_NAME);
Assert.assertEquals(jobSpec.getConfig().getString(ConfigurationKeys.JOB_GROUP_KEY), TEST_FLOW_GROUP);
Assert.assertEquals(jobSpec.getConfig().getString(ConfigurationKeys.FLOW_NAME_KEY), TEST_FLOW_NAME);
Assert.assertEquals(jobSpec.getConfig().getString(ConfigurationKeys.FLOW_GROUP_KEY), TEST_FLOW_GROUP);
Assert.assertTrue(jobSpec.getConfig().hasPath(ConfigurationKeys.FLOW_EXECUTION_ID_KEY));
//Assert the start node has no children.
Assert.assertEquals(jobExecutionPlanDag.getChildren(dagNode).size(), 0);
}
@Test
public void testNoJobSpecCompilation() {
FlowSpec flowSpec = initFlowSpec(TEST_FLOW_GROUP, TEST_FLOW_NAME, "unsupportedSource", "unsupportedSink");
// Run compiler on flowSpec
Dag<JobExecutionPlan> jobExecutionPlanDag = this.compilerWithTemplateCalague.compileFlow(flowSpec);
// Assert pre-requisites
Assert.assertNotNull(jobExecutionPlanDag, "Expected non null dag.");
Assert.assertTrue(jobExecutionPlanDag.getNodes().size() == 0, "Exepected 1 executor for FlowSpec.");
}
} | 3,775 |
0 | Create_ds/gobblin/gobblin-service/src/test/java/org/apache/gobblin/service/modules | Create_ds/gobblin/gobblin-service/src/test/java/org/apache/gobblin/service/modules/flowgraph/BaseFlowGraphTest.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.service.modules.flowgraph;
import java.lang.reflect.Field;
import java.net.URI;
import java.net.URISyntaxException;
import java.util.Map;
import java.util.Properties;
import org.junit.Assert;
import org.testng.annotations.BeforeClass;
import org.testng.annotations.Test;
import org.testng.collections.Lists;
import com.typesafe.config.Config;
import com.typesafe.config.ConfigFactory;
import com.typesafe.config.ConfigValueFactory;
import org.apache.gobblin.service.modules.template.FlowTemplate;
import org.apache.gobblin.service.modules.template.StaticFlowTemplate;
import org.apache.gobblin.util.ConfigUtils;
public class BaseFlowGraphTest {
private DataNode node1;
private DataNode node2;
private DataNode node3;
private DataNode node3c;
private FlowEdge edge1;
private FlowEdge edge2;
private FlowEdge edge3;
private FlowEdge edge3c;
private String edgeId1;
private String edgeId2;
private String edgeId3;
BaseFlowGraph graph;
@BeforeClass
public void setUp() throws URISyntaxException, DataNode.DataNodeCreationException {
Properties properties = new Properties();
properties.put("key1", "val1");
Config node1Config = ConfigUtils.propertiesToConfig(properties).withValue(FlowGraphConfigurationKeys.DATA_NODE_ID_KEY,
ConfigValueFactory.fromAnyRef("node1"));
node1 = new BaseDataNode(node1Config);
properties = new Properties();
properties.put("key2", "val2");
Config node2Config = ConfigUtils.propertiesToConfig(properties).withValue(FlowGraphConfigurationKeys.DATA_NODE_ID_KEY,
ConfigValueFactory.fromAnyRef("node2"));
node2 = new BaseDataNode(node2Config);
properties = new Properties();
properties.put("key3", "val3");
Config node3Config = ConfigUtils.propertiesToConfig(properties).withValue(FlowGraphConfigurationKeys.DATA_NODE_ID_KEY,
ConfigValueFactory.fromAnyRef("node3"));
node3 = new BaseDataNode(node3Config);
//Create a clone of node3
node3c = new BaseDataNode(node3Config);
FlowTemplate flowTemplate1 = new StaticFlowTemplate(new URI("FS:///uri1"), "", "", ConfigFactory.empty(), null, null);
FlowTemplate flowTemplate2 = new StaticFlowTemplate(new URI("FS:///uri2"), "", "", ConfigFactory.empty(), null, null);
FlowTemplate flowTemplate3 = new StaticFlowTemplate(new URI("FS:///uri3"), "", "", ConfigFactory.empty(), null, null);
//Create edge instances
edgeId1 = "node1:node2:edge1";
edgeId2 = "node2:node3:edge2";
edgeId3 = "node3:node1:edge3";
edge1 = new BaseFlowEdge(Lists.newArrayList("node1", "node2"), edgeId1, flowTemplate1, null, ConfigFactory.empty(), true);
edge2 = new BaseFlowEdge(Lists.newArrayList("node2", "node3"), edgeId2, flowTemplate2, null, ConfigFactory.empty(), true);
edge3 = new BaseFlowEdge(Lists.newArrayList("node3", "node1"), edgeId3, flowTemplate3, null, ConfigFactory.empty(), true);
//Create a clone of edge3
edge3c = new BaseFlowEdge(Lists.newArrayList("node3", "node1"), edgeId3, flowTemplate3, null, ConfigFactory.empty(), true);
//Create a FlowGraph
graph = new BaseFlowGraph();
//Add nodes
Assert.assertTrue(graph.addDataNode(node1));
Assert.assertTrue(graph.addDataNode(node2));
Assert.assertTrue(graph.addDataNode(node3));
Assert.assertEquals(graph.getEdges(node1).size(), 0);
Assert.assertEquals(graph.getEdges(node2).size(), 0);
Assert.assertEquals(graph.getEdges(node3).size(), 0);
//Add edges
Assert.assertTrue(graph.addFlowEdge(edge1));
Assert.assertTrue(graph.addFlowEdge(edge2));
Assert.assertTrue(graph.addFlowEdge(edge3));
}
@Test
public void testAddDataNode() throws Exception {
//Check contents of dataNodeMap
Field field = BaseFlowGraph.class.getDeclaredField("dataNodeMap");
field.setAccessible(true);
Map<String, DataNode> dataNodeMap = (Map<String, DataNode>) field.get(graph);
Assert.assertEquals(dataNodeMap.get("node1"), node1);
Assert.assertEquals(dataNodeMap.get("node2"), node2);
Assert.assertEquals(dataNodeMap.get("node3"), node3);
graph.addDataNode(node3c);
Assert.assertEquals(graph.getNode("node3"), node3);
Assert.assertEquals(graph.getNode("node3"), node3c);
//Ensure the cloned node overwrites the original
Assert.assertTrue(graph.getNode("node3") == node3c);
Assert.assertTrue(graph.getNode("node3") != node3);
//Add back original node
graph.addDataNode(node3);
}
@Test (dependsOnMethods = "testAddDataNode")
public void testAddFlowEdge() throws Exception {
//Check nodesToEdges
Assert.assertEquals(graph.getEdges("node1").size(), 1);
Assert.assertEquals(graph.getEdges("node2").size(), 1);
Assert.assertEquals(graph.getEdges("node3").size(), 1);
Assert.assertTrue(graph.getEdges("node1").contains(edge1));
Assert.assertTrue(graph.getEdges("node2").contains(edge2));
Assert.assertTrue(graph.getEdges("node3").contains(edge3));
//Try adding an edge that already exists
Assert.assertTrue(graph.addFlowEdge(edge3c));
Assert.assertTrue(graph.getEdges("node3").contains(edge3));
//graph should contain the new copy of the edge
Assert.assertTrue(graph.getEdges("node3").iterator().next() == edge3c);
Assert.assertTrue(edge3 != edge3c);
//Add back original edge
Assert.assertTrue(graph.addFlowEdge(edge3));
//Check contents of flowEdgeMap
Field field = BaseFlowGraph.class.getDeclaredField("flowEdgeMap");
field.setAccessible(true);
Map<String, FlowEdge> flowEdgeMap = (Map<String, FlowEdge>) field.get(graph);
Assert.assertEquals(flowEdgeMap.get(edge1.getId()), edge1);
Assert.assertEquals(flowEdgeMap.get(edge2.getId()), edge2);
Assert.assertEquals(flowEdgeMap.get(edge3.getId()), edge3);
}
@Test (dependsOnMethods = "testAddFlowEdge")
public void testDeleteDataNode() throws Exception {
//Delete node1 from graph
Assert.assertTrue(graph.deleteDataNode("node1"));
//Check contents of dataNodeMap
Assert.assertEquals(graph.getNode(node1.getId()), null);
Assert.assertEquals(graph.getNode(node2.getId()), node2);
Assert.assertEquals(graph.getNode(node3.getId()), node3);
//Check contents of nodesToEdges
Assert.assertEquals(graph.getEdges(node1), null);
//Check contents of dataNodeMap
Field field = BaseFlowGraph.class.getDeclaredField("dataNodeMap");
field.setAccessible(true);
Map<String, DataNode> dataNodeMap = (Map<String, DataNode>) field.get(graph);
Assert.assertTrue(!dataNodeMap.containsKey("node1"));
Assert.assertEquals(dataNodeMap.get("node2"), node2);
Assert.assertEquals(dataNodeMap.get("node3"), node3);
//Check contents of flowEdgeMap. Ensure edge1 is no longer in flowEdgeMap
Assert.assertTrue(!graph.deleteFlowEdge(edge1));
field = BaseFlowGraph.class.getDeclaredField("flowEdgeMap");
field.setAccessible(true);
Map<String, FlowEdge> flowEdgeMap = (Map<String, FlowEdge>) field.get(graph);
Assert.assertTrue(!flowEdgeMap.containsKey(edge1.getId()));
Assert.assertEquals(flowEdgeMap.get(edge2.getId()), edge2);
Assert.assertEquals(flowEdgeMap.get(edge3.getId()), edge3);
//Add node1 and edge1 back to the graph
graph.addDataNode(node1);
graph.addFlowEdge(edge1);
}
@Test (dependsOnMethods = "testDeleteDataNode")
public void testDeleteFlowEdgeById() throws Exception {
Assert.assertTrue(graph.deleteFlowEdge(edgeId1));
Assert.assertEquals(graph.getEdges("node1").size(), 0);
Assert.assertEquals(graph.getEdges("node2").size(), 1);
Assert.assertEquals(graph.getEdges("node3").size(), 1);
Assert.assertTrue(!graph.getEdges("node1").contains(edge1));
Assert.assertTrue(graph.getEdges("node2").contains(edge2));
Assert.assertTrue(graph.getEdges("node3").contains(edge3));
Assert.assertTrue(graph.deleteFlowEdge(edgeId2));
Assert.assertEquals(graph.getEdges("node1").size(), 0);
Assert.assertEquals(graph.getEdges("node2").size(), 0);
Assert.assertEquals(graph.getEdges("node3").size(), 1);
Assert.assertTrue(!graph.getEdges("node1").contains(edge1));
Assert.assertTrue(!graph.getEdges("node2").contains(edge2));
Assert.assertTrue(graph.getEdges("node3").contains(edge3));
Assert.assertTrue(graph.deleteFlowEdge(edgeId3));
Assert.assertEquals(graph.getEdges("node1").size(), 0);
Assert.assertEquals(graph.getEdges("node2").size(), 0);
Assert.assertEquals(graph.getEdges("node3").size(), 0);
Assert.assertTrue(!graph.getEdges("node1").contains(edge1));
Assert.assertTrue(!graph.getEdges("node2").contains(edge2));
Assert.assertTrue(!graph.getEdges("node3").contains(edge3));
Assert.assertTrue(!graph.deleteFlowEdge(edgeId1));
Assert.assertTrue(!graph.deleteFlowEdge(edgeId2));
Assert.assertTrue(!graph.deleteFlowEdge(edgeId3));
}
} | 3,776 |
0 | Create_ds/gobblin/gobblin-service/src/test/java/org/apache/gobblin/service/modules | Create_ds/gobblin/gobblin-service/src/test/java/org/apache/gobblin/service/modules/flowgraph/DagTest.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.service.modules.flowgraph;
import java.util.HashSet;
import java.util.List;
import java.util.Set;
import org.testng.Assert;
import org.testng.annotations.Test;
import org.testng.collections.Sets;
import com.google.common.collect.Lists;
import lombok.extern.slf4j.Slf4j;
import org.apache.gobblin.service.modules.flowgraph.Dag.DagNode;
@Slf4j
public class DagTest {
@Test
public void testInitialize() {
DagNode<String> dagNode1 = new DagNode<>("val1");
DagNode<String> dagNode2 = new DagNode<>("val2");
DagNode<String> dagNode3 = new DagNode<>("val3");
DagNode<String> dagNode4 = new DagNode<>("val4");
DagNode<String> dagNode5 = new DagNode<>("val5");
dagNode2.addParentNode(dagNode1);
dagNode3.addParentNode(dagNode1);
dagNode4.addParentNode(dagNode2);
dagNode4.addParentNode(dagNode3);
dagNode5.addParentNode(dagNode3);
List<DagNode<String>> dagNodeList = Lists.newArrayList(dagNode1, dagNode2, dagNode3, dagNode4, dagNode5);
Dag<String> dag = new Dag<>(dagNodeList);
//Test startNodes and endNodes
Assert.assertEquals(dag.getStartNodes().size(), 1);
Assert.assertEquals(dag.getStartNodes().get(0).getValue(), "val1");
Assert.assertEquals(dag.getEndNodes().size(), 2);
Assert.assertEquals(dag.getEndNodes().get(0).getValue(), "val4");
Assert.assertEquals(dag.getEndNodes().get(1).getValue(), "val5");
DagNode startNode = dag.getStartNodes().get(0);
Assert.assertEquals(dag.getChildren(startNode).size(), 2);
Set<String> childSet = new HashSet<>();
for (DagNode<String> node: dag.getChildren(startNode)) {
childSet.add(node.getValue());
}
Assert.assertTrue(childSet.contains("val2"));
Assert.assertTrue(childSet.contains("val3"));
dagNode2 = dag.getChildren(startNode).get(0);
dagNode3 = dag.getChildren(startNode).get(1);
Assert.assertEquals(dag.getChildren(dagNode2).size(), 1);
Assert.assertEquals(dag.getChildren(dagNode2).get(0).getValue(), "val4");
for (DagNode<String> node: dag.getChildren(dagNode3)) {
childSet.add(node.getValue());
}
Assert.assertTrue(childSet.contains("val4"));
Assert.assertTrue(childSet.contains("val5"));
//Ensure end nodes have no children
Assert.assertEquals(dag.getChildren(dagNode4).size(), 0);
Assert.assertEquals(dag.getChildren(dagNode5).size(), 0);
}
@Test
public void testConcatenate() {
DagNode<String> dagNode1 = new DagNode<>("val1");
DagNode<String> dagNode2 = new DagNode<>("val2");
DagNode<String> dagNode3 = new DagNode<>("val3");
DagNode<String> dagNode4 = new DagNode<>("val4");
DagNode<String> dagNode5 = new DagNode<>("val5");
dagNode2.addParentNode(dagNode1);
dagNode3.addParentNode(dagNode1);
dagNode4.addParentNode(dagNode2);
dagNode4.addParentNode(dagNode3);
dagNode5.addParentNode(dagNode3);
List<DagNode<String>> dagNodeList = Lists.newArrayList(dagNode1, dagNode2, dagNode3, dagNode4, dagNode5);
Dag<String> dag1 = new Dag<>(dagNodeList);
DagNode<String> dagNode6 = new DagNode<>("val6");
DagNode<String> dagNode7 = new DagNode<>("val7");
DagNode<String> dagNode8 = new DagNode<>("val8");
dagNode8.addParentNode(dagNode6);
dagNode8.addParentNode(dagNode7);
Dag<String> dag2 = new Dag<>(Lists.newArrayList(dagNode6, dagNode7, dagNode8));
Dag<String> dagNew = dag1.concatenate(dag2);
//Ensure end nodes of first dag are no longer end nodes
for (DagNode<String> dagNode : Lists.newArrayList(dagNode6, dagNode7)) {
Assert.assertEquals(dagNew.getParents(dagNode).size(), 2);
Set<String> set = new HashSet<>();
set.add(dagNew.getParents(dagNode).get(0).getValue());
set.add(dagNew.getParents(dagNode).get(1).getValue());
Assert.assertTrue(set.contains("val4"));
Assert.assertTrue(set.contains("val5"));
}
for (DagNode<String> dagNode : Lists.newArrayList(dagNode4, dagNode5)) {
Assert.assertEquals(dagNew.getChildren(dagNode).size(), 2);
Set<String> set = new HashSet<>();
set.add(dagNew.getChildren(dagNode).get(0).getValue());
set.add(dagNew.getChildren(dagNode).get(1).getValue());
Assert.assertTrue(set.contains("val6"));
Assert.assertTrue(set.contains("val7"));
}
for (DagNode<String> dagNode : Lists.newArrayList(dagNode6, dagNode7)) {
List<DagNode<String>> nextNodes = dagNew.getChildren(dagNode);
Assert.assertEquals(nextNodes.size(), 1);
Assert.assertEquals(nextNodes.get(0).getValue(), "val8");
}
//Test new start and end nodes.
Assert.assertEquals(dagNew.getStartNodes().size(), 1);
Assert.assertEquals(dagNew.getStartNodes().get(0).getValue(), "val1");
Assert.assertEquals(dagNew.getEndNodes().size(), 1);
Assert.assertEquals(dagNew.getEndNodes().get(0).getValue(), "val8");
}
@Test
public void testConcatenateForkNodes() {
DagNode<String> dagNode1 = new DagNode<>("val1");
DagNode<String> dagNode2 = new DagNode<>("val2");
DagNode<String> dagNode3 = new DagNode<>("val3");
dagNode2.addParentNode(dagNode1);
dagNode3.addParentNode(dagNode1);
Dag<String> dag1 = new Dag<>(Lists.newArrayList(dagNode1, dagNode2, dagNode3));
DagNode<String> dagNode4 = new DagNode<>("val4");
Dag<String> dag2 = new Dag<>(Lists.newArrayList(dagNode4));
Set<DagNode<String>> forkNodes = Sets.newHashSet();
forkNodes.add(dagNode3);
Dag<String> dagNew = dag1.concatenate(dag2, forkNodes);
Assert.assertEquals(dagNew.getChildren(dagNode2).size(), 1);
Assert.assertEquals(dagNew.getChildren(dagNode2).get(0), dagNode4);
Assert.assertEquals(dagNew.getParents(dagNode4).size(), 1);
Assert.assertEquals(dagNew.getParents(dagNode4).get(0), dagNode2);
Assert.assertEquals(dagNew.getEndNodes().size(), 2);
Assert.assertEquals(dagNew.getEndNodes().get(0).getValue(), "val4");
Assert.assertEquals(dagNew.getEndNodes().get(1).getValue(), "val3");
Assert.assertEquals(dagNew.getChildren(dagNode3).size(), 0);
}
@Test
public void testMerge() {
DagNode<String> dagNode1 = new DagNode<>("val1");
DagNode<String> dagNode2 = new DagNode<>("val2");
DagNode<String> dagNode3 = new DagNode<>("val3");
DagNode<String> dagNode4 = new DagNode<>("val4");
DagNode<String> dagNode5 = new DagNode<>("val5");
dagNode2.addParentNode(dagNode1);
dagNode3.addParentNode(dagNode1);
dagNode4.addParentNode(dagNode2);
dagNode4.addParentNode(dagNode3);
dagNode5.addParentNode(dagNode3);
List<DagNode<String>> dagNodeList = Lists.newArrayList(dagNode1, dagNode2, dagNode3, dagNode4, dagNode5);
Dag<String> dag1 = new Dag<>(dagNodeList);
DagNode<String> dagNode6 = new DagNode<>("val6");
DagNode<String> dagNode7 = new DagNode<>("val7");
DagNode<String> dagNode8 = new DagNode<>("val8");
dagNode8.addParentNode(dagNode6);
dagNode8.addParentNode(dagNode7);
Dag<String> dag2 = new Dag<>(Lists.newArrayList(dagNode6, dagNode7, dagNode8));
//Merge the two dags
Dag<String> dagNew = dag1.merge(dag2);
//Test the startNodes
Assert.assertEquals(dagNew.getStartNodes().size(), 3);
for (DagNode<String> dagNode: Lists.newArrayList(dagNode1, dagNode6, dagNode7)) {
Assert.assertTrue(dagNew.getStartNodes().contains(dagNode));
Assert.assertEquals(dagNew.getParents(dagNode).size(), 0);
if (dagNode == dagNode1) {
List<DagNode<String>> nextNodes = dagNew.getChildren(dagNode);
Assert.assertEquals(nextNodes.size(), 2);
Assert.assertTrue(nextNodes.contains(dagNode2));
Assert.assertTrue(nextNodes.contains(dagNode3));
} else {
Assert.assertEquals(dagNew.getChildren(dagNode).size(), 1);
Assert.assertTrue(dagNew.getChildren(dagNode).contains(dagNode8));
}
}
//Test the endNodes
Assert.assertEquals(dagNew.getEndNodes().size(), 3);
for (DagNode<String> dagNode: Lists.newArrayList(dagNode4, dagNode5, dagNode8)) {
Assert.assertTrue(dagNew.getEndNodes().contains(dagNode));
Assert.assertEquals(dagNew.getChildren(dagNode).size(), 0);
if (dagNode == dagNode8) {
Assert.assertEquals(dagNew.getParents(dagNode).size(), 2);
Assert.assertTrue(dagNew.getParents(dagNode).contains(dagNode6));
Assert.assertTrue(dagNew.getParents(dagNode).contains(dagNode7));
} else {
Assert.assertTrue(dagNew.getParents(dagNode).contains(dagNode3));
if (dagNode == dagNode4) {
Assert.assertEquals(dagNew.getParents(dagNode).size(), 2);
Assert.assertTrue(dagNew.getParents(dagNode).contains(dagNode2));
} else {
Assert.assertEquals(dagNew.getParents(dagNode).size(), 1);
}
}
}
//Test the other nodes
Assert.assertEquals(dagNew.getChildren(dagNode2).size(), 1);
Assert.assertTrue(dagNew.getChildren(dagNode2).contains(dagNode4));
Assert.assertEquals(dagNew.getChildren(dagNode3).size(), 2);
Assert.assertTrue(dagNew.getChildren(dagNode3).contains(dagNode4));
Assert.assertTrue(dagNew.getChildren(dagNode3).contains(dagNode5));
}
} | 3,777 |
0 | Create_ds/gobblin/gobblin-service/src/test/java/org/apache/gobblin/service/modules | Create_ds/gobblin/gobblin-service/src/test/java/org/apache/gobblin/service/modules/flowgraph/BaseFlowEdgeFactoryTest.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.service.modules.flowgraph;
import java.net.URI;
import java.util.ArrayList;
import java.util.List;
import java.util.Properties;
import org.apache.gobblin.runtime.api.SpecExecutor;
import org.apache.gobblin.runtime.spec_executorInstance.InMemorySpecExecutor;
import org.apache.gobblin.util.ConfigUtils;
import org.testng.Assert;
import org.testng.annotations.Test;
import com.typesafe.config.Config;
import com.typesafe.config.ConfigFactory;
import com.typesafe.config.ConfigValueFactory;
import org.apache.gobblin.configuration.ConfigurationKeys;
import org.apache.gobblin.service.ServiceConfigKeys;
import org.apache.gobblin.service.modules.template_catalog.FSFlowTemplateCatalog;
import lombok.extern.slf4j.Slf4j;
@Slf4j
public class BaseFlowEdgeFactoryTest {
@Test
public void testCreateFlowEdge() throws Exception {
Properties properties = new Properties();
properties.put(FlowGraphConfigurationKeys.FLOW_EDGE_SOURCE_KEY,"node1");
properties.put(FlowGraphConfigurationKeys.FLOW_EDGE_DESTINATION_KEY, "node2");
properties.put(FlowGraphConfigurationKeys.FLOW_EDGE_NAME_KEY, "edge1");
properties.put(FlowGraphConfigurationKeys.FLOW_EDGE_ID_KEY, "node1:node2:edge1");
properties.put(FlowGraphConfigurationKeys.FLOW_EDGE_TEMPLATE_DIR_URI_KEY, "FS:///flowEdgeTemplate");
List<SpecExecutor> specExecutorList = new ArrayList<>();
Config config1 = ConfigFactory.empty().withValue("specStore.fs.dir", ConfigValueFactory.fromAnyRef("/tmp1")).
withValue("specExecInstance.capabilities", ConfigValueFactory.fromAnyRef("s1:d1"));
specExecutorList.add(new InMemorySpecExecutor(config1));
Config config2 = ConfigFactory.empty().withValue("specStore.fs.dir", ConfigValueFactory.fromAnyRef("/tmp2")).
withValue("specExecInstance.capabilities", ConfigValueFactory.fromAnyRef("s2:d2"));
specExecutorList.add(new InMemorySpecExecutor(config2));
FlowEdgeFactory flowEdgeFactory = new BaseFlowEdge.Factory();
Properties props = new Properties();
URI flowTemplateCatalogUri = this.getClass().getClassLoader().getResource("template_catalog").toURI();
props.put(ServiceConfigKeys.TEMPLATE_CATALOGS_FULLY_QUALIFIED_PATH_KEY, flowTemplateCatalogUri.toString());
Config config = ConfigFactory.parseProperties(props);
Config templateCatalogCfg = config
.withValue(ConfigurationKeys.JOB_CONFIG_FILE_GENERAL_PATH_KEY,
config.getValue(ServiceConfigKeys.TEMPLATE_CATALOGS_FULLY_QUALIFIED_PATH_KEY));
FSFlowTemplateCatalog catalog = new FSFlowTemplateCatalog(templateCatalogCfg);
Config edgeProps = ConfigUtils.propertiesToConfig(properties);
FlowEdge flowEdge = flowEdgeFactory.createFlowEdge(edgeProps, catalog, specExecutorList);
Assert.assertEquals(flowEdge.getSrc(), "node1");
Assert.assertEquals(flowEdge.getDest(), "node2");
Assert.assertEquals(flowEdge.getExecutors().get(0).getConfig().get().getString("specStore.fs.dir"),"/tmp1");
Assert.assertEquals(flowEdge.getExecutors().get(0).getConfig().get().getString("specExecInstance.capabilities"),"s1:d1");
Assert.assertEquals(flowEdge.getExecutors().get(1).getConfig().get().getString("specStore.fs.dir"),"/tmp2");
Assert.assertEquals(flowEdge.getExecutors().get(1).getConfig().get().getString("specExecInstance.capabilities"),"s2:d2");
Assert.assertEquals(flowEdge.getExecutors().get(0).getClass().getSimpleName(),"InMemorySpecExecutor");
Assert.assertEquals(flowEdge.getExecutors().get(1).getClass().getSimpleName(),"InMemorySpecExecutor");
}
} | 3,778 |
0 | Create_ds/gobblin/gobblin-service/src/test/java/org/apache/gobblin/service/modules/flowgraph | Create_ds/gobblin/gobblin-service/src/test/java/org/apache/gobblin/service/modules/flowgraph/pathfinder/AbstractPathFinderTest.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.service.modules.flowgraph.pathfinder;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import org.testng.Assert;
import org.testng.annotations.Test;
import com.typesafe.config.Config;
import com.typesafe.config.ConfigFactory;
import com.typesafe.config.ConfigValueFactory;
import org.apache.gobblin.service.ServiceConfigKeys;
import org.apache.gobblin.service.modules.restli.FlowConfigUtils;
public class AbstractPathFinderTest {
@Test
public void convertDataNodesTest() {
Config flowConfig = ConfigFactory.empty()
.withValue(ServiceConfigKeys.FLOW_DESTINATION_IDENTIFIER_KEY, ConfigValueFactory.fromAnyRef("node1-alpha,node2"));
Map<String, String> dataNodeAliasMap = new HashMap<>();
dataNodeAliasMap.put("node1-alpha", "node1");
dataNodeAliasMap.put("node1-beta", "node1");
dataNodeAliasMap.put("node3-alpha", "node3");
dataNodeAliasMap.put("node1-beta", "node3");
List<String> dataNodes = FlowConfigUtils.getDataNodes(flowConfig, ServiceConfigKeys.FLOW_DESTINATION_IDENTIFIER_KEY, dataNodeAliasMap);
Assert.assertEquals(dataNodes.size(), 2);
Assert.assertTrue(dataNodes.contains("node1"));
Assert.assertTrue(dataNodes.contains("node2"));
}
}
| 3,779 |
0 | Create_ds/gobblin/gobblin-service/src/test/java/org/apache/gobblin/service/modules/flowgraph | Create_ds/gobblin/gobblin-service/src/test/java/org/apache/gobblin/service/modules/flowgraph/datanodes/HttpDataNodeTest.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.service.modules.flowgraph.datanodes;
import org.junit.Assert;
import org.testng.annotations.Test;
import com.typesafe.config.Config;
import com.typesafe.config.ConfigFactory;
import com.typesafe.config.ConfigValueFactory;
import org.apache.gobblin.service.modules.flowgraph.DataNode;
import org.apache.gobblin.service.modules.flowgraph.FlowGraphConfigurationKeys;
import org.apache.gobblin.util.ConfigUtils;
public class HttpDataNodeTest {
@Test
public void testConfig() throws DataNode.DataNodeCreationException {
String expectedNodeId = "some-node-id";
String expectedHttpDomain = "https://a.b.c";
String expectedHttpAuthType = "oauth";
Config config = ConfigFactory.empty()
.withValue(FlowGraphConfigurationKeys.DATA_NODE_ID_KEY, ConfigValueFactory.fromAnyRef(expectedNodeId))
.withValue(FlowGraphConfigurationKeys.DATA_NODE_HTTP_DOMAIN_KEY, ConfigValueFactory.fromAnyRef(expectedHttpDomain))
.withValue(FlowGraphConfigurationKeys.DATA_NODE_HTTP_AUTHENTICATION_TYPE_KEY, ConfigValueFactory.fromAnyRef(expectedHttpAuthType));
HttpDataNode node = new HttpDataNode(config);
// Verify the node id
String id = node.getId();
Assert.assertTrue(id.equals(expectedNodeId));
Config rawConfig = node.getRawConfig();
String httpDomain = ConfigUtils.getString(rawConfig, FlowGraphConfigurationKeys.DATA_NODE_HTTP_DOMAIN_KEY, "");
String httpAuthType = ConfigUtils.getString(rawConfig, FlowGraphConfigurationKeys.DATA_NODE_HTTP_AUTHENTICATION_TYPE_KEY, "");
// Verify config saved to the node successfully
Assert.assertTrue(httpDomain.equals(expectedHttpDomain));
Assert.assertTrue(httpAuthType.equals(expectedHttpAuthType));
}
} | 3,780 |
0 | Create_ds/gobblin/gobblin-service/src/test/java/org/apache/gobblin/service/modules/flowgraph/datanodes | Create_ds/gobblin/gobblin-service/src/test/java/org/apache/gobblin/service/modules/flowgraph/datanodes/iceberg/IcebergDataNodeTest.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.service.modules.flowgraph.datanodes.iceberg;
import org.testng.Assert;
import org.testng.annotations.AfterMethod;
import org.testng.annotations.BeforeMethod;
import org.testng.annotations.Test;
import com.typesafe.config.Config;
import com.typesafe.config.ConfigFactory;
import com.typesafe.config.ConfigValueFactory;
import org.apache.gobblin.service.modules.flowgraph.DataNode;
import org.apache.gobblin.service.modules.flowgraph.FlowGraphConfigurationKeys;
public class IcebergDataNodeTest {
Config config = null;
@BeforeMethod
public void setUp() {
String sampleNodeId = "some-iceberg-node-id";
String sampleAdlFsUri = "hdfs://data.hdfs.core.windows.net";
String sampleCatalogUri = "https://xyz.company.com/clusters/db/catalog:443";
config = ConfigFactory.empty()
.withValue(FlowGraphConfigurationKeys.DATA_NODE_ID_KEY, ConfigValueFactory.fromAnyRef(sampleNodeId))
.withValue(FlowGraphConfigurationKeys.DATA_NODE_PREFIX + "fs.uri", ConfigValueFactory.fromAnyRef(sampleAdlFsUri))
.withValue(FlowGraphConfigurationKeys.DATA_NODE_PREFIX + "iceberg.catalog.uri", ConfigValueFactory.fromAnyRef(sampleCatalogUri));
}
@AfterMethod
public void tearDown() {
}
@Test
public void testIcebergDataNodeWithValidCatalogUri() throws DataNode.DataNodeCreationException {
IcebergDataNode icebergDataNode = new IcebergDataNode(config);
Assert.assertNotNull(icebergDataNode);
}
@Test(expectedExceptions = DataNode.DataNodeCreationException.class)
public void testIcebergDataNodeWithInvalidCatalogUri() throws DataNode.DataNodeCreationException {
String emptyCatalogUri = "";
config = config.withValue(FlowGraphConfigurationKeys.DATA_NODE_PREFIX + "iceberg.catalog.uri", ConfigValueFactory.fromAnyRef(emptyCatalogUri));
IcebergDataNode icebergDataNode = new IcebergDataNode(config);
}
}
| 3,781 |
0 | Create_ds/gobblin/gobblin-service/src/test/java/org/apache/gobblin/service/modules/flowgraph/datanodes | Create_ds/gobblin/gobblin-service/src/test/java/org/apache/gobblin/service/modules/flowgraph/datanodes/iceberg/IcebergOnHiveDataNodeTest.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.service.modules.flowgraph.datanodes.iceberg;
import java.net.URI;
import java.net.URISyntaxException;
import org.testng.Assert;
import org.testng.annotations.AfterMethod;
import org.testng.annotations.BeforeMethod;
import org.testng.annotations.Test;
import com.typesafe.config.Config;
import com.typesafe.config.ConfigFactory;
import com.typesafe.config.ConfigValueFactory;
import org.apache.gobblin.service.modules.flowgraph.DataNode;
import org.apache.gobblin.service.modules.flowgraph.FlowGraphConfigurationKeys;
public class IcebergOnHiveDataNodeTest {
Config config = null;
@BeforeMethod
public void setUp() {
String sampleNodeId = "some-iceberg-node-id";
String sampleAdlFsUri = "hdfs://data.hdfs.core.windows.net";
String sampleHiveMetastoreUri = "thrift://hcat.company.com:7552";
config = ConfigFactory.empty()
.withValue(FlowGraphConfigurationKeys.DATA_NODE_ID_KEY, ConfigValueFactory.fromAnyRef(sampleNodeId))
.withValue(FlowGraphConfigurationKeys.DATA_NODE_PREFIX + "fs.uri", ConfigValueFactory.fromAnyRef(sampleAdlFsUri))
.withValue(FlowGraphConfigurationKeys.DATA_NODE_PREFIX + "hive.metastore.uri", ConfigValueFactory.fromAnyRef(sampleHiveMetastoreUri));
}
@AfterMethod
public void tearDown() {
}
@Test
public void testIcebergDataNodeWithValidMetastoreUri() throws DataNode.DataNodeCreationException, URISyntaxException {
IcebergOnHiveDataNode icebergDataNode = new IcebergOnHiveDataNode(config);
URI uri = new URI(config.getString(FlowGraphConfigurationKeys.DATA_NODE_PREFIX + "hive.metastore.uri"));
Assert.assertTrue(icebergDataNode.isMetastoreUriValid(uri));
}
@Test(expectedExceptions = DataNode.DataNodeCreationException.class)
public void testIcebergDataNodeWithInvalidMetastoreUri() throws DataNode.DataNodeCreationException, URISyntaxException {
String bogusHiveMetastoreUri = "not-thrift://hcat.company.com:7552";
config = config.withValue(FlowGraphConfigurationKeys.DATA_NODE_PREFIX + "hive.metastore.uri", ConfigValueFactory.fromAnyRef(bogusHiveMetastoreUri));
IcebergOnHiveDataNode icebergDataNode = new IcebergOnHiveDataNode(config);
URI uri = new URI(config.getString(FlowGraphConfigurationKeys.DATA_NODE_PREFIX + "hive.metastore.uri"));
icebergDataNode.isMetastoreUriValid(uri);
}
}
| 3,782 |
0 | Create_ds/gobblin/gobblin-service/src/test/java/org/apache/gobblin/service/modules/flowgraph/datanodes | Create_ds/gobblin/gobblin-service/src/test/java/org/apache/gobblin/service/modules/flowgraph/datanodes/hive/HiveDataNodeTest.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.service.modules.flowgraph.datanodes.hive;
import org.apache.gobblin.service.modules.flowgraph.DataNode;
import org.apache.gobblin.service.modules.flowgraph.FlowGraphConfigurationKeys;
import org.junit.Assert;
import org.testng.annotations.AfterMethod;
import org.testng.annotations.BeforeMethod;
import org.testng.annotations.Test;
import com.typesafe.config.Config;
import com.typesafe.config.ConfigFactory;
import com.typesafe.config.ConfigValueFactory;
public class HiveDataNodeTest {
Config config = null;
@BeforeMethod
public void setUp() {
String expectedNodeId = "some-node-id";
String expectedAdlFsUri = "abfs://data@adl.dfs.core.windows.net";
String expectedHiveMetastoreUri = "thrift://hcat.company.com:7552";
config = ConfigFactory.empty()
.withValue(FlowGraphConfigurationKeys.DATA_NODE_ID_KEY, ConfigValueFactory.fromAnyRef(expectedNodeId))
.withValue(FlowGraphConfigurationKeys.DATA_NODE_PREFIX + "fs.uri", ConfigValueFactory.fromAnyRef(expectedAdlFsUri))
.withValue(FlowGraphConfigurationKeys.DATA_NODE_PREFIX + "hive.metastore.uri", ConfigValueFactory.fromAnyRef(expectedHiveMetastoreUri));
}
@AfterMethod
public void tearDown() {
}
@Test
public void testIsMetastoreUriValid() throws Exception {
HiveDataNode hiveDataNode = new HiveDataNode(config);
Assert.assertNotNull(hiveDataNode);
}
@Test(expectedExceptions = DataNode.DataNodeCreationException.class)
public void testIsMetastoreUriInValid() throws Exception {
String expectedHiveMetastoreUri = "thrift-1://hcat.company.com:7552";
config = config.withValue(FlowGraphConfigurationKeys.DATA_NODE_PREFIX + "hive.metastore.uri", ConfigValueFactory.fromAnyRef(expectedHiveMetastoreUri));
HiveDataNode hiveDataNode = new HiveDataNode(config);
}
} | 3,783 |
0 | Create_ds/gobblin/gobblin-service/src/test/java/org/apache/gobblin/service/modules/flowgraph/datanodes | Create_ds/gobblin/gobblin-service/src/test/java/org/apache/gobblin/service/modules/flowgraph/datanodes/fs/SftpDataNodeTest.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.service.modules.flowgraph.datanodes.fs;
import org.testng.Assert;
import org.testng.annotations.Test;
import com.typesafe.config.Config;
import com.typesafe.config.ConfigFactory;
import com.typesafe.config.ConfigValueFactory;
import org.apache.gobblin.configuration.ConfigurationKeys;
import org.apache.gobblin.service.modules.dataset.FSDatasetDescriptor;
import org.apache.gobblin.service.modules.flowgraph.DataNode;
import org.apache.gobblin.service.modules.flowgraph.FlowGraphConfigurationKeys;
public class SftpDataNodeTest {
@Test
public void testCreate() throws DataNode.DataNodeCreationException {
//Create a SFTP DataNode with default SFTP port
Config config = ConfigFactory.empty().withValue(SftpDataNode.SFTP_HOSTNAME,
ConfigValueFactory.fromAnyRef("testHost"))
.withValue(FlowGraphConfigurationKeys.DATA_NODE_ID_KEY, ConfigValueFactory.fromAnyRef("testId"));
SftpDataNode dataNode = new SftpDataNode(config);
Assert.assertEquals(dataNode.getId(), "testId");
Assert.assertEquals(dataNode.getHostName(), "testHost");
Assert.assertEquals(dataNode.getPort().intValue(), ConfigurationKeys.SOURCE_CONN_DEFAULT_PORT);
Assert.assertEquals(dataNode.getDefaultDatasetDescriptorPlatform(), SftpDataNode.PLATFORM);
Assert.assertEquals(dataNode.getDefaultDatasetDescriptorClass(), FSDatasetDescriptor.class.getCanonicalName());
config = config.withValue(SftpDataNode.SFTP_PORT, ConfigValueFactory.fromAnyRef(143));
SftpDataNode dataNodeWithPort = new SftpDataNode(config);
Assert.assertEquals(dataNode.getId(), "testId");
Assert.assertEquals(dataNode.getHostName(), "testHost");
Assert.assertEquals(dataNodeWithPort.getPort().intValue(), 143);
Assert.assertEquals(dataNode.getDefaultDatasetDescriptorPlatform(), SftpDataNode.PLATFORM);
Assert.assertEquals(dataNode.getDefaultDatasetDescriptorClass(), FSDatasetDescriptor.class.getCanonicalName());
Config configMissingProps = ConfigFactory.empty().withValue(FlowGraphConfigurationKeys.DATA_NODE_ID_KEY,
ConfigValueFactory.fromAnyRef("testId"));
try {
DataNode sftpNode = new SftpDataNode(configMissingProps);
Assert.fail("Unexpected success in creating Sftp node.");
} catch (DataNode.DataNodeCreationException e) {
//Expected exception.
}
}
} | 3,784 |
0 | Create_ds/gobblin/gobblin-service/src/test/java/org/apache/gobblin/service/modules | Create_ds/gobblin/gobblin-service/src/test/java/org/apache/gobblin/service/modules/dataset/SqlDatasetDescriptorTest.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.service.modules.dataset;
import java.io.IOException;
import org.junit.Assert;
import org.testng.annotations.Test;
import com.typesafe.config.Config;
import com.typesafe.config.ConfigFactory;
import com.typesafe.config.ConfigValueFactory;
import org.apache.gobblin.service.modules.flowgraph.DatasetDescriptorConfigKeys;
import static org.testng.Assert.*;
public class SqlDatasetDescriptorTest {
@Test
public void testContains() throws IOException {
Config config1 = ConfigFactory.empty().withValue(DatasetDescriptorConfigKeys.PLATFORM_KEY, ConfigValueFactory.fromAnyRef("sqlserver"))
.withValue(DatasetDescriptorConfigKeys.DATABASE_KEY, ConfigValueFactory.fromAnyRef("testDb_Db1"))
.withValue(DatasetDescriptorConfigKeys.TABLE_KEY, ConfigValueFactory.fromAnyRef("testTable_Table1"));
SqlDatasetDescriptor descriptor1 = new SqlDatasetDescriptor(config1);
Config config2 = ConfigFactory.empty().withValue(DatasetDescriptorConfigKeys.PLATFORM_KEY, ConfigValueFactory.fromAnyRef("sqlserver"));
SqlDatasetDescriptor descriptor2 = new SqlDatasetDescriptor(config2);
Assert.assertEquals(descriptor2.contains(descriptor1).size(), 0);
Config config3 = ConfigFactory.empty().withValue(DatasetDescriptorConfigKeys.PLATFORM_KEY, ConfigValueFactory.fromAnyRef("sqlserver"))
.withValue(DatasetDescriptorConfigKeys.DATABASE_KEY, ConfigValueFactory.fromAnyRef("testDb_.*"))
.withValue(DatasetDescriptorConfigKeys.TABLE_KEY, ConfigValueFactory.fromAnyRef("testTable_.*"));
SqlDatasetDescriptor descriptor3 = new SqlDatasetDescriptor(config3);
Assert.assertEquals(descriptor3.contains(descriptor1).size(), 0);
Config config4 = ConfigFactory.empty().withValue(DatasetDescriptorConfigKeys.PLATFORM_KEY, ConfigValueFactory.fromAnyRef("sqlserver"))
.withValue(DatasetDescriptorConfigKeys.DATABASE_KEY, ConfigValueFactory.fromAnyRef("Db_.*"))
.withValue(DatasetDescriptorConfigKeys.TABLE_KEY, ConfigValueFactory.fromAnyRef("Table_.*"));
SqlDatasetDescriptor descriptor4 = new SqlDatasetDescriptor(config4);
Assert.assertNotEquals(descriptor4.contains(descriptor1).size(), 0);
}
} | 3,785 |
0 | Create_ds/gobblin/gobblin-service/src/test/java/org/apache/gobblin/service/modules | Create_ds/gobblin/gobblin-service/src/test/java/org/apache/gobblin/service/modules/dataset/FSDatasetDescriptorTest.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.service.modules.dataset;
import java.io.IOException;
import org.testng.Assert;
import org.testng.annotations.Test;
import com.typesafe.config.Config;
import com.typesafe.config.ConfigFactory;
import com.typesafe.config.ConfigValueFactory;
import org.apache.gobblin.service.modules.flowgraph.DatasetDescriptorConfigKeys;
public class FSDatasetDescriptorTest {
@Test
public void testContains() throws IOException {
//Ensure descriptor2's path is matched by the regular expression in descriptor1's path
Config config1 = ConfigFactory.empty().withValue(DatasetDescriptorConfigKeys.PATH_KEY, ConfigValueFactory.fromAnyRef("/a/b/c/*"))
.withValue(DatasetDescriptorConfigKeys.PLATFORM_KEY, ConfigValueFactory.fromAnyRef("hdfs"));
FSDatasetDescriptor descriptor1 = new FSDatasetDescriptor(config1);
Config config2 = ConfigFactory.empty().withValue(DatasetDescriptorConfigKeys.PATH_KEY, ConfigValueFactory.fromAnyRef("/a/b/c/d"))
.withValue(DatasetDescriptorConfigKeys.PLATFORM_KEY, ConfigValueFactory.fromAnyRef("hdfs"))
.withValue(DatasetDescriptorConfigKeys.FORMAT_KEY, ConfigValueFactory.fromAnyRef("avro"))
.withValue(DatasetDescriptorConfigKeys.CODEC_KEY, ConfigValueFactory.fromAnyRef("gzip"));
FSDatasetDescriptor descriptor2 = new FSDatasetDescriptor(config2);
Assert.assertEquals(descriptor1.contains(descriptor2).size(), 0);
//Add encryption config
Config encConfig = ConfigFactory.empty().withValue(DatasetDescriptorConfigKeys.ENCRYPTION_LEVEL_KEY, ConfigValueFactory.fromAnyRef("file"))
.withValue(DatasetDescriptorConfigKeys.ENCRYPTION_ALGORITHM_KEY, ConfigValueFactory.fromAnyRef("aes_rotating"))
.atPath(DatasetDescriptorConfigKeys.ENCYPTION_PREFIX);
Config config3 = config2.withFallback(encConfig);
FSDatasetDescriptor descriptor3 = new FSDatasetDescriptor(config3);
Assert.assertEquals(descriptor2.contains(descriptor3).size(), 0);
Assert.assertEquals(descriptor1.contains(descriptor3).size(), 0);
//Add partition config
Config partitionConfig = ConfigFactory.empty().withValue(DatasetDescriptorConfigKeys.PARTITION_TYPE_KEY, ConfigValueFactory.fromAnyRef("datetime"))
.withValue(DatasetDescriptorConfigKeys.PARTITION_PATTERN_KEY, ConfigValueFactory.fromAnyRef("yyyy/MM/dd"))
.atPath(DatasetDescriptorConfigKeys.PARTITION_PREFIX);
Config config4 = config3.withFallback(partitionConfig);
FSDatasetDescriptor descriptor4 = new FSDatasetDescriptor(config4);
Assert.assertEquals(descriptor3.contains(descriptor4).size(), 0);
Assert.assertEquals(descriptor2.contains(descriptor4).size(), 0);
Assert.assertEquals(descriptor1.contains(descriptor4).size(), 0);
//Add compaction/retention config
Config miscConfig = ConfigFactory.empty().withValue(DatasetDescriptorConfigKeys.IS_COMPACTED_AND_DEDUPED_KEY, ConfigValueFactory.fromAnyRef("true"))
.withValue(DatasetDescriptorConfigKeys.IS_RETENTION_APPLIED_KEY, ConfigValueFactory.fromAnyRef("true"));
Config config5 = config4.withFallback(miscConfig);
FSDatasetDescriptor descriptor5 = new FSDatasetDescriptor(config5);
Assert.assertNotEquals(descriptor4.contains(descriptor5).size(), 0);
Assert.assertNotEquals(descriptor3.contains(descriptor5).size(), 0);
Assert.assertNotEquals(descriptor2.contains(descriptor5).size(), 0);
Assert.assertNotEquals(descriptor1.contains(descriptor5).size(), 0);
// Test subpaths
Config subPathConfig = ConfigFactory.empty().withValue(DatasetDescriptorConfigKeys.PATH_KEY, ConfigValueFactory.fromAnyRef("/a/b/c"))
.withValue(DatasetDescriptorConfigKeys.SUBPATHS_KEY, ConfigValueFactory.fromAnyRef("{e,f,g}"))
.withValue(DatasetDescriptorConfigKeys.PLATFORM_KEY, ConfigValueFactory.fromAnyRef("hdfs"));
FSDatasetDescriptor descriptor6 = new FSDatasetDescriptor(subPathConfig);
Assert.assertEquals(descriptor1.contains(descriptor6).size(), 0);
Assert.assertNotEquals(descriptor2.contains(descriptor6).size(), 0);
//Test fs.uri
Config config7 = ConfigFactory.empty().withValue(DatasetDescriptorConfigKeys.PATH_KEY, ConfigValueFactory.fromAnyRef("/a/b/c/d"))
.withValue(DatasetDescriptorConfigKeys.PLATFORM_KEY, ConfigValueFactory.fromAnyRef("hdfs"))
.withValue(DatasetDescriptorConfigKeys.FORMAT_KEY, ConfigValueFactory.fromAnyRef("avro"))
.withValue(DatasetDescriptorConfigKeys.CODEC_KEY, ConfigValueFactory.fromAnyRef("gzip"))
.withValue(DatasetDescriptorConfigKeys.FS_URI_KEY, ConfigValueFactory.fromAnyRef("hdfs://test-cluster:9000"));
Config config8 = ConfigFactory.empty().withValue(DatasetDescriptorConfigKeys.PATH_KEY, ConfigValueFactory.fromAnyRef("/a/b/c/d"))
.withValue(DatasetDescriptorConfigKeys.PLATFORM_KEY, ConfigValueFactory.fromAnyRef("hdfs"))
.withValue(DatasetDescriptorConfigKeys.FORMAT_KEY, ConfigValueFactory.fromAnyRef("avro"))
.withValue(DatasetDescriptorConfigKeys.CODEC_KEY, ConfigValueFactory.fromAnyRef("gzip"))
.withValue(DatasetDescriptorConfigKeys.FS_URI_KEY, ConfigValueFactory.fromAnyRef("hdfs://test-cluster_1:9000"));
FSVolumeDatasetDescriptor descriptor7 = new FSVolumeDatasetDescriptor(config7);
FSVolumeDatasetDescriptor volumeDescriptor = new FSVolumeDatasetDescriptor(config1);
FSVolumeDatasetDescriptor descriptor8 = new FSVolumeDatasetDescriptor(config8);
Assert.assertEquals(descriptor1.contains(descriptor6).size(), 0);
Assert.assertNotEquals(descriptor2.contains(descriptor6).size(), 0);
Assert.assertEquals(volumeDescriptor.contains(descriptor7).size(), 0);
Assert.assertNotEquals(descriptor7.contains(volumeDescriptor).size(), 0);
Assert.assertNotEquals(descriptor8.contains(descriptor7).size(), 0);
}
@Test
public void testContainsMatchingPaths() throws IOException {
// Paths that match exactly should be accepted, and that should allow glob patterns as input paths for the self serve edges
Config config1 = ConfigFactory.empty().withValue(DatasetDescriptorConfigKeys.PATH_KEY, ConfigValueFactory.fromAnyRef("/a/b/c/*"))
.withValue(DatasetDescriptorConfigKeys.PLATFORM_KEY, ConfigValueFactory.fromAnyRef("hdfs"));
FSDatasetDescriptor descriptor1 = new FSDatasetDescriptor(config1);
Config config2 = ConfigFactory.empty().withValue(DatasetDescriptorConfigKeys.PATH_KEY, ConfigValueFactory.fromAnyRef("/a/b/c/*"))
.withValue(DatasetDescriptorConfigKeys.PLATFORM_KEY, ConfigValueFactory.fromAnyRef("hdfs"));
FSDatasetDescriptor descriptor2 = new FSDatasetDescriptor(config2);
Assert.assertEquals(descriptor1.contains(descriptor2).size(), 0);
}
@Test
public void testEquals() throws IOException {
Config config1 = ConfigFactory.empty().withValue(DatasetDescriptorConfigKeys.PATH_KEY, ConfigValueFactory.fromAnyRef("/a/b/c/*"))
.withValue(DatasetDescriptorConfigKeys.PLATFORM_KEY, ConfigValueFactory.fromAnyRef("hdfs"));
FSDatasetDescriptor descriptor1 = new FSDatasetDescriptor(config1);
Config config2 = ConfigFactory.empty().withValue(DatasetDescriptorConfigKeys.PATH_KEY, ConfigValueFactory.fromAnyRef("/a/b/c/*"))
.withValue(DatasetDescriptorConfigKeys.PLATFORM_KEY, ConfigValueFactory.fromAnyRef("hdfs"));
FSDatasetDescriptor descriptor2 = new FSDatasetDescriptor(config2);
Assert.assertEquals(descriptor2, descriptor1);
Assert.assertEquals(descriptor1, descriptor2);
Assert.assertEquals(descriptor1.hashCode(), descriptor2.hashCode());
Config config3 = ConfigFactory.empty().withValue(DatasetDescriptorConfigKeys.PATH_KEY, ConfigValueFactory.fromAnyRef("/a/b/c/*"))
.withValue(DatasetDescriptorConfigKeys.PLATFORM_KEY, ConfigValueFactory.fromAnyRef("hdfs"))
.withValue(DatasetDescriptorConfigKeys.FORMAT_KEY, ConfigValueFactory.fromAnyRef("any"))
.withValue(DatasetDescriptorConfigKeys.IS_RETENTION_APPLIED_KEY, ConfigValueFactory.fromAnyRef("false"));
FSDatasetDescriptor descriptor3 = new FSDatasetDescriptor(config3);
Assert.assertEquals(descriptor3, descriptor1);
Assert.assertEquals(descriptor1.hashCode(), descriptor3.hashCode());
//Ensure switching booleans between 2 boolean member variables does not produce the same hashcode.
Config config4 = ConfigFactory.empty().withValue(DatasetDescriptorConfigKeys.PATH_KEY, ConfigValueFactory.fromAnyRef("/a/b/c/*"))
.withValue(DatasetDescriptorConfigKeys.PLATFORM_KEY, ConfigValueFactory.fromAnyRef("hdfs"))
.withValue(DatasetDescriptorConfigKeys.FORMAT_KEY, ConfigValueFactory.fromAnyRef("any"))
.withValue(DatasetDescriptorConfigKeys.IS_RETENTION_APPLIED_KEY, ConfigValueFactory.fromAnyRef("false"))
.withValue(DatasetDescriptorConfigKeys.IS_COMPACTED_KEY, ConfigValueFactory.fromAnyRef("true"));
FSDatasetDescriptor descriptor4 = new FSDatasetDescriptor(config4);
Config config5 = ConfigFactory.empty().withValue(DatasetDescriptorConfigKeys.PATH_KEY, ConfigValueFactory.fromAnyRef("/a/b/c/*"))
.withValue(DatasetDescriptorConfigKeys.PLATFORM_KEY, ConfigValueFactory.fromAnyRef("hdfs"))
.withValue(DatasetDescriptorConfigKeys.FORMAT_KEY, ConfigValueFactory.fromAnyRef("any"))
.withValue(DatasetDescriptorConfigKeys.IS_RETENTION_APPLIED_KEY, ConfigValueFactory.fromAnyRef("true"))
.withValue(DatasetDescriptorConfigKeys.IS_COMPACTED_KEY, ConfigValueFactory.fromAnyRef("false"));
FSDatasetDescriptor descriptor5 = new FSDatasetDescriptor(config5);
Assert.assertNotEquals(descriptor5, descriptor4);
Assert.assertNotEquals(descriptor4.hashCode(), descriptor5.hashCode());
}
@Test
public void testInitFails() {
//Datetime partition type, invalid datetime pattern
Config config = ConfigFactory.empty().withValue(DatasetDescriptorConfigKeys.PATH_KEY, ConfigValueFactory.fromAnyRef("/a/b/c/*"))
.withValue(DatasetDescriptorConfigKeys.PLATFORM_KEY, ConfigValueFactory.fromAnyRef("hdfs"));
Config partitionConfig = ConfigFactory.empty()
.withValue(DatasetDescriptorConfigKeys.PARTITION_TYPE_KEY, ConfigValueFactory.fromAnyRef("datetime"))
.withValue(DatasetDescriptorConfigKeys.PARTITION_PATTERN_KEY, ConfigValueFactory.fromAnyRef("BBBB/MM/dd"))
.atPath(DatasetDescriptorConfigKeys.PARTITION_PREFIX);
Config config1 = config.withFallback(partitionConfig);
Assert.assertThrows(IOException.class, () -> new FSDatasetDescriptor(config1));
//Regex partition type, invalid regular expression
partitionConfig = ConfigFactory.empty().withValue(DatasetDescriptorConfigKeys.PARTITION_TYPE_KEY, ConfigValueFactory.fromAnyRef("regex"))
.withValue(DatasetDescriptorConfigKeys.PARTITION_PATTERN_KEY, ConfigValueFactory.fromAnyRef("["))
.atPath(DatasetDescriptorConfigKeys.PARTITION_PREFIX);
Config config2 = config.withFallback(partitionConfig);
Assert.assertThrows(IOException.class, () -> new FSDatasetDescriptor(config2));
//Partition Config with invalid partition type
partitionConfig = ConfigFactory.empty().withValue(DatasetDescriptorConfigKeys.PARTITION_TYPE_KEY, ConfigValueFactory.fromAnyRef("invalidType"))
.withValue(DatasetDescriptorConfigKeys.PARTITION_PATTERN_KEY, ConfigValueFactory.fromAnyRef("aaaa"))
.atPath(DatasetDescriptorConfigKeys.PARTITION_PREFIX);
Config config3 = config.withFallback(partitionConfig);
Assert.assertThrows(IOException.class, () -> new FSDatasetDescriptor(config3));
//Encryption config with invalid encryption level
Config encryptionConfig = ConfigFactory.empty().withValue(DatasetDescriptorConfigKeys.ENCRYPTION_LEVEL_KEY, ConfigValueFactory.fromAnyRef("aaaa"))
.atPath(DatasetDescriptorConfigKeys.ENCYPTION_PREFIX);
Config config4 = config.withFallback(encryptionConfig);
Assert.assertThrows(IOException.class, () -> new FSDatasetDescriptor(config4));
encryptionConfig = ConfigFactory.empty().withValue(DatasetDescriptorConfigKeys.ENCRYPTION_LEVEL_KEY, ConfigValueFactory.fromAnyRef("field"))
.atPath(DatasetDescriptorConfigKeys.ENCYPTION_PREFIX);
Config config5 = config.withFallback(encryptionConfig);
Assert.assertThrows(IOException.class, () -> new FSDatasetDescriptor(config5));
encryptionConfig = ConfigFactory.empty().withValue(DatasetDescriptorConfigKeys.ENCRYPTION_LEVEL_KEY, ConfigValueFactory.fromAnyRef("none"))
.withValue(DatasetDescriptorConfigKeys.ENCRYPTED_FIELDS, ConfigValueFactory.fromAnyRef("field1")).atPath(DatasetDescriptorConfigKeys.ENCYPTION_PREFIX);
Config config6 = config.withFallback(encryptionConfig);
Assert.assertThrows(IOException.class, () -> new FSDatasetDescriptor(config6));
}
} | 3,786 |
0 | Create_ds/gobblin/gobblin-service/src/test/java/org/apache/gobblin/service/modules | Create_ds/gobblin/gobblin-service/src/test/java/org/apache/gobblin/service/modules/dataset/HttpDatasetDescriptorTest.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.service.modules.dataset;
import java.io.IOException;
import org.junit.Assert;
import org.testng.annotations.Test;
import com.typesafe.config.Config;
import com.typesafe.config.ConfigFactory;
import com.typesafe.config.ConfigValueFactory;
import org.apache.gobblin.service.modules.flowgraph.DatasetDescriptorConfigKeys;
public class HttpDatasetDescriptorTest {
@Test
public void testContains() throws IOException {
Config config1 = ConfigFactory.empty()
.withValue(DatasetDescriptorConfigKeys.PLATFORM_KEY, ConfigValueFactory.fromAnyRef("https"))
.withValue(DatasetDescriptorConfigKeys.PATH_KEY, ConfigValueFactory.fromAnyRef("https://a.com/b"));
HttpDatasetDescriptor descriptor1 = new HttpDatasetDescriptor(config1);
// Verify that same path points to same dataset
Config config2 = ConfigFactory.empty()
.withValue(DatasetDescriptorConfigKeys.PLATFORM_KEY, ConfigValueFactory.fromAnyRef("https"))
.withValue(DatasetDescriptorConfigKeys.PATH_KEY, ConfigValueFactory.fromAnyRef("https://a.com/b"));
HttpDatasetDescriptor descriptor2 = new HttpDatasetDescriptor(config2);
Assert.assertEquals(descriptor2.contains(descriptor1).size(), 0);
// Verify that same path but different platform points to different dataset
Config config3 = ConfigFactory.empty()
.withValue(DatasetDescriptorConfigKeys.PLATFORM_KEY, ConfigValueFactory.fromAnyRef("http"))
.withValue(DatasetDescriptorConfigKeys.PATH_KEY, ConfigValueFactory.fromAnyRef("https://a.com/b"));
HttpDatasetDescriptor descriptor3 = new HttpDatasetDescriptor(config3);
Assert.assertNotEquals(descriptor3.contains(descriptor1).size(), 0);
}
} | 3,787 |
0 | Create_ds/gobblin/gobblin-service/src/test/java/org/apache/gobblin/service/modules | Create_ds/gobblin/gobblin-service/src/test/java/org/apache/gobblin/service/modules/dataset/HiveDatasetDescriptorTest.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.service.modules.dataset;
import java.io.IOException;
import org.junit.Assert;
import org.testng.annotations.Test;
import com.typesafe.config.Config;
import com.typesafe.config.ConfigFactory;
import com.typesafe.config.ConfigValueFactory;
import org.apache.gobblin.service.modules.flowgraph.DatasetDescriptorConfigKeys;
public class HiveDatasetDescriptorTest {
Config baseConfig = ConfigFactory.empty().withValue(DatasetDescriptorConfigKeys.PLATFORM_KEY, ConfigValueFactory.fromAnyRef("hive"))
.withValue(DatasetDescriptorConfigKeys.DATABASE_KEY, ConfigValueFactory.fromAnyRef("testDb_Db1"))
.withValue(DatasetDescriptorConfigKeys.TABLE_KEY, ConfigValueFactory.fromAnyRef("testTable_Table1"));;
@Test
public void objectCreation() throws IOException {
SqlDatasetDescriptor descriptor1 = new HiveDatasetDescriptor(baseConfig.withValue(HiveDatasetDescriptor.IS_PARTITIONED_KEY, ConfigValueFactory.fromAnyRef(true)));
SqlDatasetDescriptor descriptor2 = new HiveDatasetDescriptor(baseConfig.withValue(HiveDatasetDescriptor.IS_PARTITIONED_KEY, ConfigValueFactory.fromAnyRef(false)));
Assert.assertNotEquals(descriptor1, descriptor2);
}
@Test
public void testWhitelist() throws IOException {
HiveDatasetDescriptor descriptor = new HiveDatasetDescriptor(baseConfig
.withValue(DatasetDescriptorConfigKeys.DATABASE_KEY, ConfigValueFactory.fromAnyRef("test*"))
.withValue(DatasetDescriptorConfigKeys.TABLE_KEY, ConfigValueFactory.fromAnyRef("abc,def,fgh"))
);
Assert.assertTrue(descriptor.whitelistBlacklist.acceptTable("testDb", "abc"));
Assert.assertTrue(descriptor.whitelistBlacklist.acceptTable("testDb", "abca"));
Assert.assertFalse(descriptor.whitelistBlacklist.acceptTable("otherTestDb", "abc"));
descriptor = new HiveDatasetDescriptor(baseConfig
.withValue(DatasetDescriptorConfigKeys.DATABASE_KEY, ConfigValueFactory.fromAnyRef("testDb"))
.withValue(DatasetDescriptorConfigKeys.TABLE_KEY, ConfigValueFactory.fromAnyRef("abc,def,ghi"))
);
Assert.assertTrue(descriptor.whitelistBlacklist.acceptTable("testDb", "abc"));
Assert.assertFalse(descriptor.whitelistBlacklist.acceptTable("testDb", "abca"));
Assert.assertFalse(descriptor.whitelistBlacklist.acceptTable("otherTestDb", "abc"));
descriptor = new HiveDatasetDescriptor(baseConfig
.withValue(DatasetDescriptorConfigKeys.DATABASE_KEY, ConfigValueFactory.fromAnyRef("testDb"))
.withValue(DatasetDescriptorConfigKeys.TABLE_KEY, ConfigValueFactory.fromAnyRef("abc*,ghi"))
);
Assert.assertTrue(descriptor.whitelistBlacklist.acceptTable("testDb", "abc"));
Assert.assertTrue(descriptor.whitelistBlacklist.acceptTable("testDb", "ghi"));
Assert.assertTrue(descriptor.whitelistBlacklist.acceptTable("testDb", "abcabc"));
Assert.assertFalse(descriptor.whitelistBlacklist.acceptTable("otherTestDb", "abc"));
}
} | 3,788 |
0 | Create_ds/gobblin/gobblin-service/src/test/java/org/apache/gobblin/service/modules | Create_ds/gobblin/gobblin-service/src/test/java/org/apache/gobblin/service/modules/dataset/IcebergDatasetDescriptorTest.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.service.modules.dataset;
import java.io.IOException;
import org.testng.Assert;
import org.testng.annotations.Test;
import com.typesafe.config.Config;
import com.typesafe.config.ConfigFactory;
import com.typesafe.config.ConfigValueFactory;
import org.apache.gobblin.service.modules.flowgraph.DatasetDescriptorConfigKeys;
public class IcebergDatasetDescriptorTest {
Config baseConfig = createDatasetDescriptorConfig("iceberg","testDb_Db1", "testTable_Table1");
@Test
public void testIsPathContaining() throws IOException {
Config config1 = createDatasetDescriptorConfig("iceberg","testDb_Db1", "testTable_Table1");
Config config2 = createDatasetDescriptorConfig("iceberg","testDb_Db1", "testTable_Table2");
IcebergDatasetDescriptor current = new IcebergDatasetDescriptor(baseConfig);
IcebergDatasetDescriptor other = new IcebergDatasetDescriptor(config1);
IcebergDatasetDescriptor yetAnother = new IcebergDatasetDescriptor(config2);
Assert.assertEquals(current.isPathContaining(other).size(), 0);
Assert.assertNotEquals(current.isPathContaining(yetAnother).size(), 0);
}
private Config createDatasetDescriptorConfig(String platform, String dbName, String tableName) {
return ConfigFactory.empty().withValue(DatasetDescriptorConfigKeys.PLATFORM_KEY, ConfigValueFactory.fromAnyRef(platform))
.withValue(DatasetDescriptorConfigKeys.DATABASE_KEY, ConfigValueFactory.fromAnyRef(dbName))
.withValue(DatasetDescriptorConfigKeys.TABLE_KEY, ConfigValueFactory.fromAnyRef(tableName));
}
}
| 3,789 |
0 | Create_ds/gobblin/gobblin-service/src/test/java/org/apache/gobblin/service/modules | Create_ds/gobblin/gobblin-service/src/test/java/org/apache/gobblin/service/modules/spec/JobExecutionPlanDagFactoryTest.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.service.modules.spec;
import java.io.IOException;
import java.net.URI;
import java.net.URISyntaxException;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.HashSet;
import java.util.List;
import java.util.Properties;
import java.util.Set;
import org.apache.hadoop.fs.Path;
import org.testng.Assert;
import org.testng.annotations.BeforeClass;
import org.testng.annotations.Test;
import com.google.common.io.Files;
import com.typesafe.config.Config;
import com.typesafe.config.ConfigFactory;
import com.typesafe.config.ConfigValueFactory;
import org.apache.gobblin.config.ConfigBuilder;
import org.apache.gobblin.configuration.ConfigurationKeys;
import org.apache.gobblin.runtime.api.FlowSpec;
import org.apache.gobblin.runtime.api.JobSpec;
import org.apache.gobblin.runtime.api.JobTemplate;
import org.apache.gobblin.runtime.api.SpecExecutor;
import org.apache.gobblin.runtime.api.SpecNotFoundException;
import org.apache.gobblin.runtime.spec_executorInstance.InMemorySpecExecutor;
import org.apache.gobblin.service.ServiceConfigKeys;
import org.apache.gobblin.service.modules.flowgraph.Dag;
import org.apache.gobblin.service.modules.flowgraph.FlowGraphConfigurationKeys;
import org.apache.gobblin.service.modules.template.FlowTemplate;
import org.apache.gobblin.service.modules.template_catalog.FSFlowTemplateCatalog;
import org.apache.gobblin.util.ConfigUtils;
public class JobExecutionPlanDagFactoryTest {
private static final String TEST_TEMPLATE_NAME = "flowEdgeTemplate";
private static final String TEST_TEMPLATE_URI = "FS:///" + TEST_TEMPLATE_NAME;
private SpecExecutor specExecutor;
private List<JobTemplate> jobTemplates;
@BeforeClass
public void setUp() throws URISyntaxException, IOException, SpecNotFoundException, JobTemplate.TemplateException {
// Create a FSFlowTemplateCatalog instance
URI flowTemplateCatalogUri = this.getClass().getClassLoader().getResource("template_catalog").toURI();
Properties properties = new Properties();
properties.put(ServiceConfigKeys.TEMPLATE_CATALOGS_FULLY_QUALIFIED_PATH_KEY, flowTemplateCatalogUri.toString());
Config config = ConfigFactory.parseProperties(properties);
Config templateCatalogCfg = config
.withValue(ConfigurationKeys.JOB_CONFIG_FILE_GENERAL_PATH_KEY,
config.getValue(ServiceConfigKeys.TEMPLATE_CATALOGS_FULLY_QUALIFIED_PATH_KEY));
FSFlowTemplateCatalog catalog = new FSFlowTemplateCatalog(templateCatalogCfg);
FlowTemplate flowTemplate = catalog.getFlowTemplate(new URI(TEST_TEMPLATE_URI));
this.jobTemplates = flowTemplate.getJobTemplates();
//Create a spec executor instance
properties = new Properties();
properties.put("specStore.fs.dir", "/tmp/testSpecStoreDir");
properties.put("specExecInstance.capabilities", "source:destination");
properties.put(ConfigurationKeys.SPECEXECUTOR_INSTANCE_URI_KEY, "testSpecExecutorInstanceUri");
Config specExecutorConfig = ConfigUtils.propertiesToConfig(properties);
this.specExecutor = new InMemorySpecExecutor(specExecutorConfig);
}
@Test
public void testCreateDag() throws Exception {
//Create a list of JobExecutionPlans
List<JobExecutionPlan> jobExecutionPlans = new ArrayList<>();
for (JobTemplate jobTemplate: this.jobTemplates) {
Config config = jobTemplate.getRawTemplateConfig()
.withValue(ConfigurationKeys.FLOW_NAME_KEY, ConfigValueFactory.fromAnyRef("testFlowName"))
.withValue(ConfigurationKeys.FLOW_GROUP_KEY, ConfigValueFactory.fromAnyRef("testFlowGroup"))
.withValue(ConfigurationKeys.FLOW_EXECUTION_ID_KEY, ConfigValueFactory.fromAnyRef(System.currentTimeMillis()))
.withValue(FlowGraphConfigurationKeys.FLOW_EDGE_ID_KEY, ConfigValueFactory.fromAnyRef("source:destination:edgeName1"));
String jobSpecUri = Files.getNameWithoutExtension(new Path(jobTemplate.getUri()).getName());
jobExecutionPlans.add(new JobExecutionPlan(JobSpec.builder(jobSpecUri).withConfig(config).
withVersion("1").withTemplate(jobTemplate.getUri()).build(), specExecutor));
}
//Create a DAG from job execution plans.
Dag<JobExecutionPlan> dag = new JobExecutionPlanDagFactory().createDag(jobExecutionPlans);
//Test DAG properties
Assert.assertEquals(dag.getStartNodes().size(), 1);
Assert.assertEquals(dag.getEndNodes().size(), 1);
Assert.assertEquals(dag.getNodes().size(), 4);
String startNodeName = new Path(dag.getStartNodes().get(0).getValue().getJobSpec().getUri()).getName();
Assert.assertEquals(startNodeName, "job1");
String templateUri = new Path(dag.getStartNodes().get(0).getValue().getJobSpec().getTemplateURI().get()).getName();
Assert.assertEquals(templateUri, "job1.job");
String endNodeName = new Path(dag.getEndNodes().get(0).getValue().getJobSpec().getUri()).getName();
Assert.assertEquals(endNodeName, "job4");
templateUri = new Path(dag.getEndNodes().get(0).getValue().getJobSpec().getTemplateURI().get()).getName();
Assert.assertEquals(templateUri, "job4.job");
String flowEdgeId = dag.getStartNodes().get(0).getValue().getJobSpec().getConfig().getString(FlowGraphConfigurationKeys.FLOW_EDGE_ID_KEY);
Assert.assertEquals(flowEdgeId, "source:destination:edgeName1");
String specExecutorId = dag.getStartNodes().get(0).getValue().getSpecExecutor().getUri().toString();
Assert.assertEquals(specExecutorId, "testSpecExecutorInstanceUri");
Dag.DagNode<JobExecutionPlan> startNode = dag.getStartNodes().get(0);
List<Dag.DagNode<JobExecutionPlan>> nextNodes = dag.getChildren(startNode);
Set<String> nodeSet = new HashSet<>();
for (Dag.DagNode<JobExecutionPlan> node: nextNodes) {
nodeSet.add(new Path(node.getValue().getJobSpec().getUri()).getName());
Dag.DagNode<JobExecutionPlan> nextNode = dag.getChildren(node).get(0);
Assert.assertEquals(new Path(nextNode.getValue().getJobSpec().getUri()).getName(), "job4");
}
Assert.assertTrue(nodeSet.contains("job2"));
Assert.assertTrue(nodeSet.contains("job3"));
}
@Test
public void testCreateDagWithDuplicateJobNames() throws Exception {
Config flowConfig1 = ConfigBuilder.create().addPrimitive(ConfigurationKeys.FLOW_NAME_KEY, "flowName")
.addPrimitive(ConfigurationKeys.FLOW_GROUP_KEY, "flowGroup").build();
Config flowConfig2 = ConfigBuilder.create().addPrimitive(ConfigurationKeys.FLOW_NAME_KEY, "flowName")
.addPrimitive(ConfigurationKeys.FLOW_GROUP_KEY, "flowGroup").build();
Config flowConfig3 = ConfigBuilder.create().addPrimitive(ConfigurationKeys.FLOW_NAME_KEY, "flowName")
.addPrimitive(ConfigurationKeys.FLOW_GROUP_KEY, "flowGroup").build();
List<Config> flowConfigs = Arrays.asList(flowConfig1, flowConfig2, flowConfig3);
Config jobConfig1 = ConfigBuilder.create().addPrimitive(ConfigurationKeys.JOB_NAME_KEY, "job1")
.addPrimitive(FlowGraphConfigurationKeys.FLOW_EDGE_ID_KEY, "source:destination:edgeName1").build();
Config jobConfig2 = ConfigBuilder.create().addPrimitive(ConfigurationKeys.JOB_NAME_KEY, "job2")
.addPrimitive(FlowGraphConfigurationKeys.FLOW_EDGE_ID_KEY, "source:destination:edgeName2").build();
Config jobConfig3 = ConfigBuilder.create().addPrimitive(ConfigurationKeys.JOB_NAME_KEY, "job1")
.addPrimitive(FlowGraphConfigurationKeys.FLOW_EDGE_ID_KEY, "source:destination:edgeName3").build();
List<Config> jobConfigs = Arrays.asList(jobConfig1, jobConfig2, jobConfig3);
List<JobExecutionPlan> jobExecutionPlans = new ArrayList<>();
for (int i = 0; i < 3; i++) {
Config jobConfig = jobConfigs.get(i);
if (i > 0) {
String previousJobName = jobExecutionPlans.get(jobExecutionPlans.size() - 1).getJobSpec().getConfig().getString(ConfigurationKeys.JOB_NAME_KEY);
jobConfig = jobConfig.withValue(ConfigurationKeys.JOB_DEPENDENCIES, ConfigValueFactory.fromAnyRef(previousJobName));
}
FlowSpec flowSpec = FlowSpec.builder("testFlowSpec").withConfig(flowConfigs.get(i)).build();
jobExecutionPlans.add(new JobExecutionPlan.Factory().createPlan(flowSpec, jobConfig.withValue(ConfigurationKeys.JOB_TEMPLATE_PATH,
ConfigValueFactory.fromAnyRef("testUri")), new InMemorySpecExecutor(ConfigFactory.empty()), 0L, ConfigFactory.empty()));
}
Dag<JobExecutionPlan> dag = new JobExecutionPlanDagFactory().createDag(jobExecutionPlans);
Assert.assertEquals(dag.getStartNodes().size(), 1);
Assert.assertEquals(dag.getEndNodes().size(), 1);
Assert.assertEquals(dag.getNodes().size(), 3);
Assert.assertNull(dag.getNodes().get(0).getParentNodes());
Assert.assertEquals(dag.getNodes().get(1).getParentNodes().size(), 1);
Assert.assertEquals(dag.getNodes().get(2).getParentNodes().size(), 1);
Assert.assertEquals(dag.getNodes().get(1).getParentNodes().get(0), dag.getNodes().get(0));
Assert.assertEquals(dag.getNodes().get(2).getParentNodes().get(0), dag.getNodes().get(1));
}
@Test
public void testCreateDagAdhoc() throws Exception {
Config flowConfig1 = ConfigBuilder.create().addPrimitive(ConfigurationKeys.FLOW_NAME_KEY, "flowName")
.addPrimitive(ConfigurationKeys.FLOW_GROUP_KEY, "flowGroup")
.addPrimitive(ConfigurationKeys.JOB_SCHEDULE_KEY, "0/2 * * * * ?").build();
Config flowConfig2 = ConfigBuilder.create().addPrimitive(ConfigurationKeys.FLOW_NAME_KEY, "flowName")
.addPrimitive(ConfigurationKeys.FLOW_GROUP_KEY, "flowGroup").build();
List<Config> flowConfigs = Arrays.asList(flowConfig1, flowConfig2);
Config jobConfig1 = ConfigBuilder.create().addPrimitive(ConfigurationKeys.JOB_NAME_KEY, "job1")
.addPrimitive(FlowGraphConfigurationKeys.FLOW_EDGE_ID_KEY, "source:destination:edgeName1")
.addPrimitive(ConfigurationKeys.JOB_SCHEDULE_KEY, "0/2 * * * * ?").build();
Config jobConfig2 = ConfigBuilder.create().addPrimitive(ConfigurationKeys.JOB_NAME_KEY, "job2")
.addPrimitive(FlowGraphConfigurationKeys.FLOW_EDGE_ID_KEY, "source:destination:edgeName2").build();
List<Config> jobConfigs = Arrays.asList(jobConfig1, jobConfig2);
List<JobExecutionPlan> jobExecutionPlans = new ArrayList<>();
for (int i = 0; i < 2; i++) {
Config jobConfig = jobConfigs.get(i);
FlowSpec flowSpec = FlowSpec.builder("testFlowSpec").withConfig(flowConfigs.get(i)).build();
jobExecutionPlans.add(new JobExecutionPlan.Factory().createPlan(flowSpec, jobConfig.withValue(ConfigurationKeys.JOB_TEMPLATE_PATH,
ConfigValueFactory.fromAnyRef("testUri")), new InMemorySpecExecutor(ConfigFactory.empty()), 0L, ConfigFactory.empty()));
}
Dag<JobExecutionPlan> dag1 = new JobExecutionPlanDagFactory().createDag(Arrays.asList(jobExecutionPlans.get(0)));
Dag<JobExecutionPlan> dag2 = new JobExecutionPlanDagFactory().createDag(Arrays.asList(jobExecutionPlans.get(1)));
Assert.assertEquals(dag1.getStartNodes().size(), 1);
Assert.assertEquals(dag1.getEndNodes().size(), 1);
Assert.assertEquals(dag1.getNodes().size(), 1);
Assert.assertEquals(dag2.getStartNodes().size(), 1);
Assert.assertEquals(dag2.getEndNodes().size(), 1);
Assert.assertEquals(dag2.getNodes().size(), 1);
// Dag1 is scheduled so should be adhoc and output metrics, but not dag2
Assert.assertTrue(dag1.getStartNodes().get(0).getValue().getJobSpec().getConfig().getBoolean(ConfigurationKeys.GOBBLIN_OUTPUT_JOB_LEVEL_METRICS));
Assert.assertFalse(dag2.getStartNodes().get(0).getValue().getJobSpec().getConfig().getBoolean(ConfigurationKeys.GOBBLIN_OUTPUT_JOB_LEVEL_METRICS));
}
@Test
public void testCreateDagLongName() throws Exception {
// flowName and flowGroup are both 128 characters long, the maximum for flowName and flowGroup
Config flowConfig = ConfigBuilder.create().addPrimitive(ConfigurationKeys.FLOW_NAME_KEY, "uwXJwZPAPygvmSAfhtrzXL7ovIEKOBZdulBiNIGzaT7vILrK9QB5EDJj0fc4pkgNHuIKZ3d18TZzyH6a9HpaZACwpWpIpf8SYcSfKtXeoF8IJY064BqEUXR32k3ox31G")
.addPrimitive(ConfigurationKeys.FLOW_GROUP_KEY, "4mdfSGSv6GoFW7ICWubN2ORK4s5PMTQ60yIWkcbJOVneTSPn12cXT5ueEgij907tjzLlbcjdVjWFITFf9Y5sB9i0EvKGmTbUF98hJGoQlAhmottaipDEFTdbyzt5Loxg")
.addPrimitive(ConfigurationKeys.JOB_SCHEDULE_KEY, "0/2 * * * * ?").build();
Config jobConfig = ConfigBuilder.create()
.addPrimitive(FlowGraphConfigurationKeys.FLOW_EDGE_ID_KEY, "source:destination:edgeName1")
.addPrimitive(ConfigurationKeys.JOB_SCHEDULE_KEY, "0/2 * * * * ?").build();
FlowSpec flowSpec = FlowSpec.builder("testFlowSpec").withConfig(flowConfig).build();
JobExecutionPlan jobExecutionPlan = new JobExecutionPlan.Factory().createPlan(flowSpec, jobConfig.withValue(ConfigurationKeys.JOB_TEMPLATE_PATH,
ConfigValueFactory.fromAnyRef("testUri")), new InMemorySpecExecutor(ConfigFactory.empty()), 0L, ConfigFactory.empty());
Dag<JobExecutionPlan> dag1 = new JobExecutionPlanDagFactory().createDag(Arrays.asList(jobExecutionPlan));
Assert.assertEquals(dag1.getStartNodes().get(0).getValue().getJobSpec().getConfig().getString(ConfigurationKeys.JOB_NAME_KEY).length(), 139);
}
@Test
public void testCreateJobSpecAdditionalProps() throws Exception {
long currentTime = System.currentTimeMillis();
Config flowConfig = ConfigBuilder.create().addPrimitive(ConfigurationKeys.FLOW_NAME_KEY, "flowName")
.addPrimitive(ConfigurationKeys.FLOW_GROUP_KEY, "flowGroup")
.addPrimitive(ConfigurationKeys.JOB_SCHEDULE_KEY, "0/2 * * * * ?")
.addPrimitive(FlowSpec.MODIFICATION_TIME_KEY, currentTime).build();
Config jobConfig = ConfigBuilder.create()
.addPrimitive(FlowGraphConfigurationKeys.FLOW_EDGE_ID_KEY, "source:destination:edgeName1")
.addPrimitive(ConfigurationKeys.JOB_SCHEDULE_KEY, "0/2 * * * * ?").build();
FlowSpec flowSpec = FlowSpec.builder("testFlowSpec").withConfig(flowConfig).build();
JobExecutionPlan jobExecutionPlan = new JobExecutionPlan.Factory().createPlan(flowSpec, jobConfig.withValue(ConfigurationKeys.JOB_TEMPLATE_PATH,
ConfigValueFactory.fromAnyRef("testUri")), new InMemorySpecExecutor(ConfigFactory.empty()), 0L, ConfigFactory.empty());
Dag<JobExecutionPlan> dag1 = new JobExecutionPlanDagFactory().createDag(Arrays.asList(jobExecutionPlan));
Assert.assertEquals(dag1.getStartNodes().get(0).getValue().getJobSpec().getConfig().getLong(FlowSpec.MODIFICATION_TIME_KEY), currentTime);
Assert.assertEquals(dag1.getStartNodes().get(0).getValue().getJobSpec().getConfig().getString(ConfigurationKeys.FLOW_EDGE_ID_KEY), "source:destination:edgeName1");
}
} | 3,790 |
0 | Create_ds/gobblin/gobblin-service/src/test/java/org/apache/gobblin/service/modules | Create_ds/gobblin/gobblin-service/src/test/java/org/apache/gobblin/service/modules/scheduler/GobblinServiceJobSchedulerTest.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.service.modules.scheduler;
import java.io.File;
import java.net.URI;
import java.net.URISyntaxException;
import java.util.ArrayList;
import java.util.Collection;
import java.util.List;
import java.util.Map;
import java.util.Properties;
import org.mockito.Mockito;
import org.mockito.invocation.Invocation;
import org.mockito.stubbing.Answer;
import org.testng.Assert;
import org.testng.annotations.BeforeClass;
import org.testng.annotations.Test;
import com.google.common.base.Optional;
import com.google.common.base.Predicate;
import com.google.common.io.Files;
import com.typesafe.config.Config;
import com.typesafe.config.ConfigFactory;
import com.typesafe.config.ConfigValueFactory;
import org.apache.gobblin.configuration.ConfigurationKeys;
import org.apache.gobblin.runtime.JobException;
import org.apache.gobblin.runtime.api.FlowSpec;
import org.apache.gobblin.runtime.api.JobSpec;
import org.apache.gobblin.runtime.api.Spec;
import org.apache.gobblin.runtime.api.SpecCatalogListener;
import org.apache.gobblin.runtime.api.SpecExecutor;
import org.apache.gobblin.runtime.app.ServiceBasedAppLauncher;
import org.apache.gobblin.runtime.spec_catalog.AddSpecResponse;
import org.apache.gobblin.runtime.spec_catalog.FlowCatalog;
import org.apache.gobblin.runtime.spec_catalog.FlowCatalogTest;
import org.apache.gobblin.runtime.spec_catalog.TopologyCatalog;
import org.apache.gobblin.runtime.spec_executorInstance.InMemorySpecExecutor;
import org.apache.gobblin.scheduler.SchedulerService;
import org.apache.gobblin.service.ServiceConfigKeys;
import org.apache.gobblin.service.modules.flow.MockedSpecCompiler;
import org.apache.gobblin.service.modules.flow.SpecCompiler;
import org.apache.gobblin.service.modules.flowgraph.Dag;
import org.apache.gobblin.service.modules.orchestration.AbstractUserQuotaManager;
import org.apache.gobblin.service.modules.orchestration.InMemoryUserQuotaManager;
import org.apache.gobblin.service.modules.orchestration.Orchestrator;
import org.apache.gobblin.service.modules.orchestration.FlowTriggerHandler;
import org.apache.gobblin.service.modules.orchestration.UserQuotaManager;
import org.apache.gobblin.service.modules.spec.JobExecutionPlan;
import org.apache.gobblin.service.modules.spec.JobExecutionPlanDagFactory;
import org.apache.gobblin.testing.AssertWithBackoff;
import org.apache.gobblin.util.ConfigUtils;
import static org.apache.gobblin.runtime.spec_catalog.FlowCatalog.FLOWSPEC_STORE_DIR_KEY;
import static org.mockito.Mockito.any;
import static org.mockito.Mockito.when;
public class GobblinServiceJobSchedulerTest {
private static final String TEST_GROUP_NAME = "testGroup";
private static final String TEST_FLOW_NAME = "testFlow";
private static final String TEST_SCHEDULE = "0 1/0 * ? * *";
private static final String TEST_TEMPLATE_URI = "FS:///templates/test.template";
private Config quotaConfig;
@BeforeClass
public void setUp() {
this.quotaConfig = ConfigFactory.empty().withValue(AbstractUserQuotaManager.PER_FLOWGROUP_QUOTA, ConfigValueFactory.fromAnyRef("group1:1"));
}
@Test
public void testIsNextRunWithinRangeToSchedule() throws Throwable {
int thresholdToSkipScheduling = 100;
Assert.assertFalse(GobblinServiceJobScheduler.isWithinRange("0 0 0 ? 1 1 2050", thresholdToSkipScheduling));
Assert.assertFalse(GobblinServiceJobScheduler.isWithinRange("0 0 0 ? 1 1 2030", thresholdToSkipScheduling));
// Schedule at 3:20am every day should pass
Assert.assertTrue(GobblinServiceJobScheduler.isWithinRange("0 20 3 * * ?", thresholdToSkipScheduling));
// Schedule every sun, mon 4am should pass
Assert.assertTrue(GobblinServiceJobScheduler.isWithinRange("0 * 4 ? * 1,2", thresholdToSkipScheduling));
// For adhoc flows schedule is empty string
Assert.assertTrue(GobblinServiceJobScheduler.isWithinRange("", thresholdToSkipScheduling));
// Schedule for midnight of the current day which is in the past if threshold is set to zero (today)
Assert.assertFalse(GobblinServiceJobScheduler.isWithinRange("0 0 0 * * ?", 0));
// Capture invalid schedules in the past
Assert.assertFalse(GobblinServiceJobScheduler.isWithinRange("0 0 0 ? * 6L 2022", thresholdToSkipScheduling));
}
/**
* Test whenever JobScheduler is calling setActive, the FlowSpec is loading into scheduledFlowSpecs (eventually)
*/
@Test
public void testJobSchedulerInit() throws Throwable {
// Mock a FlowCatalog.
File specDir = Files.createTempDir();
Properties properties = new Properties();
properties.setProperty(FLOWSPEC_STORE_DIR_KEY, specDir.getAbsolutePath());
FlowCatalog flowCatalog = new FlowCatalog(ConfigUtils.propertiesToConfig(properties));
SpecCatalogListener mockListener = Mockito.mock(SpecCatalogListener.class);
when(mockListener.getName()).thenReturn(ServiceConfigKeys.GOBBLIN_SERVICE_JOB_SCHEDULER_LISTENER_CLASS);
when(mockListener.onAddSpec(any())).thenReturn(new AddSpecResponse(""));
flowCatalog.addListener(mockListener);
ServiceBasedAppLauncher serviceLauncher = new ServiceBasedAppLauncher(properties, "GaaSJobSchedulerTest");
serviceLauncher.addService(flowCatalog);
serviceLauncher.start();
FlowSpec flowSpec0 = FlowCatalogTest.initFlowSpec(specDir.getAbsolutePath(), URI.create("spec0"));
FlowSpec flowSpec1 = FlowCatalogTest.initFlowSpec(specDir.getAbsolutePath(), URI.create("spec1"));
flowCatalog.put(flowSpec0, true);
flowCatalog.put(flowSpec1, true);
Assert.assertEquals(flowCatalog.getSpecs().size(), 2);
Orchestrator mockOrchestrator = Mockito.mock(Orchestrator.class);
UserQuotaManager quotaManager = new InMemoryUserQuotaManager(quotaConfig);
// Mock a GaaS scheduler.
TestGobblinServiceJobScheduler scheduler = new TestGobblinServiceJobScheduler("testscheduler",
ConfigFactory.empty(), Optional.of(flowCatalog), null, mockOrchestrator, Optional.of(quotaManager), null, false);
SpecCompiler mockCompiler = Mockito.mock(SpecCompiler.class);
Mockito.when(mockOrchestrator.getSpecCompiler()).thenReturn(mockCompiler);
Mockito.doAnswer((Answer<Void>) a -> {
scheduler.isCompilerHealthy = true;
return null;
}).when(mockCompiler).awaitHealthy();
scheduler.setActive(true);
AssertWithBackoff.create().timeoutMs(20000).maxSleepMs(2000).backoffFactor(2)
.assertTrue(new Predicate<Void>() {
@Override
public boolean apply(Void input) {
Map<String, Spec> scheduledFlowSpecs = scheduler.scheduledFlowSpecs;
if (scheduledFlowSpecs != null && scheduledFlowSpecs.size() == 2) {
return scheduler.scheduledFlowSpecs.containsKey("spec0") &&
scheduler.scheduledFlowSpecs.containsKey("spec1");
} else {
return false;
}
}
}, "Waiting all flowSpecs to be scheduled");
}
@Test
public void testDisableFlowRunImmediatelyOnStart()
throws Exception {
Properties properties = new Properties();
properties.setProperty(ConfigurationKeys.FLOW_RUN_IMMEDIATELY, "true");
properties.setProperty(ConfigurationKeys.JOB_SCHEDULE_KEY, TEST_SCHEDULE);
properties.setProperty(ConfigurationKeys.JOB_GROUP_KEY, TEST_GROUP_NAME);
properties.setProperty(ConfigurationKeys.JOB_NAME_KEY, TEST_FLOW_NAME);
Config config = ConfigFactory.parseProperties(properties);
FlowSpec spec = FlowSpec.builder().withTemplate(new URI(TEST_TEMPLATE_URI)).withVersion("version")
.withConfigAsProperties(properties).withConfig(config).build();
FlowSpec modifiedSpec = (FlowSpec) GobblinServiceJobScheduler.disableFlowRunImmediatelyOnStart(spec);
for (URI templateURI : modifiedSpec.getTemplateURIs().get()) {
Assert.assertEquals(templateURI.toString(), TEST_TEMPLATE_URI);
}
Assert.assertEquals(modifiedSpec.getVersion(), "version");
Config modifiedConfig = modifiedSpec.getConfig();
Assert.assertFalse(modifiedConfig.getBoolean(ConfigurationKeys.FLOW_RUN_IMMEDIATELY));
Assert.assertEquals(modifiedConfig.getString(ConfigurationKeys.JOB_GROUP_KEY), TEST_GROUP_NAME);
Assert.assertEquals(modifiedConfig.getString(ConfigurationKeys.JOB_NAME_KEY), TEST_FLOW_NAME);
}
/**
* Test that flowSpecs that throw compilation errors do not block the scheduling of other flowSpecs
*/
@Test
public void testJobSchedulerInitWithFailedSpec() throws Throwable {
// Mock a FlowCatalog.
File specDir = Files.createTempDir();
Properties properties = new Properties();
properties.setProperty(FLOWSPEC_STORE_DIR_KEY, specDir.getAbsolutePath());
FlowCatalog flowCatalog = new FlowCatalog(ConfigUtils.propertiesToConfig(properties));
ServiceBasedAppLauncher serviceLauncher = new ServiceBasedAppLauncher(properties, "GaaSJobSchedulerTest");
// Assume that the catalog can store corrupted flows
SpecCatalogListener mockListener = Mockito.mock(SpecCatalogListener.class);
when(mockListener.getName()).thenReturn(ServiceConfigKeys.GOBBLIN_SERVICE_JOB_SCHEDULER_LISTENER_CLASS);
when(mockListener.onAddSpec(any())).thenReturn(new AddSpecResponse(""));
flowCatalog.addListener(mockListener);
serviceLauncher.addService(flowCatalog);
serviceLauncher.start();
FlowSpec flowSpec0 = FlowCatalogTest.initFlowSpec(specDir.getAbsolutePath(), URI.create("spec0"),
MockedSpecCompiler.UNCOMPILABLE_FLOW);
FlowSpec flowSpec1 = FlowCatalogTest.initFlowSpec(specDir.getAbsolutePath(), URI.create("spec1"));
FlowSpec flowSpec2 = FlowCatalogTest.initFlowSpec(specDir.getAbsolutePath(), URI.create("spec2"));
// Ensure that these flows are scheduled
flowCatalog.put(flowSpec0, true);
flowCatalog.put(flowSpec1, true);
flowCatalog.put(flowSpec2, true);
Assert.assertEquals(flowCatalog.getSpecs().size(), 3);
Orchestrator mockOrchestrator = Mockito.mock(Orchestrator.class);
// Mock a GaaS scheduler.
TestGobblinServiceJobScheduler scheduler = new TestGobblinServiceJobScheduler("testscheduler",
ConfigFactory.empty(), Optional.of(flowCatalog), null, mockOrchestrator, Optional.of(new InMemoryUserQuotaManager(quotaConfig)), null, false);
SpecCompiler mockCompiler = Mockito.mock(SpecCompiler.class);
Mockito.when(mockOrchestrator.getSpecCompiler()).thenReturn(mockCompiler);
Mockito.doAnswer((Answer<Void>) a -> {
scheduler.isCompilerHealthy = true;
return null;
}).when(mockCompiler).awaitHealthy();
scheduler.setActive(true);
AssertWithBackoff.create().timeoutMs(20000).maxSleepMs(2000).backoffFactor(2)
.assertTrue(new Predicate<Void>() {
@Override
public boolean apply(Void input) {
Map<String, Spec> scheduledFlowSpecs = scheduler.scheduledFlowSpecs;
if (scheduledFlowSpecs != null && scheduledFlowSpecs.size() == 2) {
return scheduler.scheduledFlowSpecs.containsKey("spec1") &&
scheduler.scheduledFlowSpecs.containsKey("spec2");
} else {
return false;
}
}
}, "Waiting all flowSpecs to be scheduled");
}
/**
* Test that flowSpecs that throw compilation errors do not block the scheduling of other flowSpecs
*/
@Test
public void testJobSchedulerUnschedule() throws Throwable {
// Mock a FlowCatalog.
File specDir = Files.createTempDir();
Properties properties = new Properties();
properties.setProperty(FLOWSPEC_STORE_DIR_KEY, specDir.getAbsolutePath());
FlowCatalog flowCatalog = new FlowCatalog(ConfigUtils.propertiesToConfig(properties));
ServiceBasedAppLauncher serviceLauncher = new ServiceBasedAppLauncher(properties, "GaaSJobSchedulerTest");
// Assume that the catalog can store corrupted flows
SpecCatalogListener mockListener = Mockito.mock(SpecCatalogListener.class);
when(mockListener.getName()).thenReturn(ServiceConfigKeys.GOBBLIN_SERVICE_JOB_SCHEDULER_LISTENER_CLASS);
when(mockListener.onAddSpec(any())).thenReturn(new AddSpecResponse(""));
flowCatalog.addListener(mockListener);
serviceLauncher.addService(flowCatalog);
serviceLauncher.start();
FlowSpec flowSpec0 = FlowCatalogTest.initFlowSpec(specDir.getAbsolutePath(), URI.create("spec0"));
FlowSpec flowSpec1 = FlowCatalogTest.initFlowSpec(specDir.getAbsolutePath(), URI.create("spec1"));
FlowSpec flowSpec2 = FlowCatalogTest.initFlowSpec(specDir.getAbsolutePath(), URI.create("spec2"));
// Ensure that these flows are scheduled
flowCatalog.put(flowSpec0, true);
flowCatalog.put(flowSpec1, true);
flowCatalog.put(flowSpec2, true);
Assert.assertEquals(flowCatalog.getSpecs().size(), 3);
Orchestrator mockOrchestrator = Mockito.mock(Orchestrator.class);
SchedulerService schedulerService = new SchedulerService(new Properties());
// Mock a GaaS scheduler.
TestGobblinServiceJobScheduler scheduler = new TestGobblinServiceJobScheduler("testscheduler",
ConfigFactory.empty(), Optional.of(flowCatalog), null, mockOrchestrator, Optional.of(new InMemoryUserQuotaManager(quotaConfig)), schedulerService, false);
schedulerService.startAsync().awaitRunning();
scheduler.startUp();
SpecCompiler mockCompiler = Mockito.mock(SpecCompiler.class);
Mockito.when(mockOrchestrator.getSpecCompiler()).thenReturn(mockCompiler);
Mockito.doAnswer((Answer<Void>) a -> {
scheduler.isCompilerHealthy = true;
return null;
}).when(mockCompiler).awaitHealthy();
scheduler.setActive(true);
AssertWithBackoff.create().timeoutMs(20000).maxSleepMs(2000).backoffFactor(2)
.assertTrue(new Predicate<Void>() {
@Override
public boolean apply(Void input) {
Map<String, Spec> scheduledFlowSpecs = scheduler.scheduledFlowSpecs;
if (scheduledFlowSpecs != null && scheduledFlowSpecs.size() == 3) {
return scheduler.scheduledFlowSpecs.containsKey("spec0") &&
scheduler.scheduledFlowSpecs.containsKey("spec1") &&
scheduler.scheduledFlowSpecs.containsKey("spec2");
} else {
return false;
}
}
}, "Waiting all flowSpecs to be scheduled");
// set scheduler to be inactive and unschedule flows
scheduler.setActive(false);
Collection<Invocation> invocations = Mockito.mockingDetails(mockOrchestrator).getInvocations();
for (Invocation invocation: invocations) {
// ensure that orchestrator is not calling remove
Assert.assertFalse(invocation.getMethod().getName().equals("remove"));
}
Assert.assertEquals(scheduler.scheduledFlowSpecs.size(), 0);
Assert.assertEquals(schedulerService.getScheduler().getJobGroupNames().size(), 0);
}
@Test
public void testJobSchedulerAddFlowQuotaExceeded() throws Exception {
File specDir = Files.createTempDir();
Properties properties = new Properties();
properties.setProperty(FLOWSPEC_STORE_DIR_KEY, specDir.getAbsolutePath());
FlowCatalog flowCatalog = new FlowCatalog(ConfigUtils.propertiesToConfig(properties));
ServiceBasedAppLauncher serviceLauncher = new ServiceBasedAppLauncher(properties, "GaaSJobSchedulerTest");
serviceLauncher.addService(flowCatalog);
serviceLauncher.start();
FlowSpec flowSpec0 = FlowCatalogTest.initFlowSpec(specDir.getAbsolutePath(), URI.create("spec0"), "flowName0", "group1",
ConfigFactory.empty(), true);
FlowSpec flowSpec1 = FlowCatalogTest.initFlowSpec(specDir.getAbsolutePath(), URI.create("spec1"), "flowName1", "group1",
ConfigFactory.empty(), true);
Orchestrator mockOrchestrator = Mockito.mock(Orchestrator.class);
SpecCompiler mockSpecCompiler = Mockito.mock(SpecCompiler.class);
when(mockOrchestrator.getSpecCompiler()).thenReturn(mockSpecCompiler);
Dag<JobExecutionPlan> mockDag0 = this.buildDag(flowSpec0.getConfig(), "0");
Dag<JobExecutionPlan> mockDag1 = this.buildDag(flowSpec1.getConfig(), "1");
when(mockSpecCompiler.compileFlow(flowSpec0)).thenReturn(mockDag0);
when(mockSpecCompiler.compileFlow(flowSpec1)).thenReturn(mockDag1);
SchedulerService schedulerService = new SchedulerService(new Properties());
// Mock a GaaS scheduler not in warm standby mode
GobblinServiceJobScheduler scheduler = new GobblinServiceJobScheduler("testscheduler",
ConfigFactory.empty(), Optional.absent(), Optional.of(flowCatalog), null, mockOrchestrator, schedulerService, Optional.of(new InMemoryUserQuotaManager(quotaConfig)), Optional.absent(), false, Optional.of(Mockito.mock(
FlowTriggerHandler.class)));
schedulerService.startAsync().awaitRunning();
scheduler.startUp();
scheduler.setActive(true);
scheduler.onAddSpec(flowSpec0); //Ignore the response for this request
Assert.assertThrows(RuntimeException.class, () -> scheduler.onAddSpec(flowSpec1));
Assert.assertEquals(scheduler.scheduledFlowSpecs.size(), 1);
// Second flow should not be added to scheduled flows since it was rejected
Assert.assertEquals(scheduler.scheduledFlowSpecs.size(), 1);
// set scheduler to be inactive and unschedule flows
scheduler.setActive(false);
Assert.assertEquals(scheduler.scheduledFlowSpecs.size(), 0);
//Mock a GaaS scheduler in warm standby mode, where we don't check quota
GobblinServiceJobScheduler schedulerWithWarmStandbyEnabled = new GobblinServiceJobScheduler("testscheduler",
ConfigFactory.empty(), Optional.absent(), Optional.of(flowCatalog), null, mockOrchestrator, schedulerService, Optional.of(new InMemoryUserQuotaManager(quotaConfig)), Optional.absent(), true, Optional.of(Mockito.mock(
FlowTriggerHandler.class)));
schedulerWithWarmStandbyEnabled.startUp();
schedulerWithWarmStandbyEnabled.setActive(true);
schedulerWithWarmStandbyEnabled.onAddSpec(flowSpec0); //Ignore the response for this request
Assert.assertEquals(schedulerWithWarmStandbyEnabled.scheduledFlowSpecs.size(), 1);
schedulerWithWarmStandbyEnabled.onAddSpec(flowSpec1);
// Second flow should be added to scheduled flows since no quota check in this case
Assert.assertEquals(schedulerWithWarmStandbyEnabled.scheduledFlowSpecs.size(), 2);
// set scheduler to be inactive and unschedule flows
schedulerWithWarmStandbyEnabled.setActive(false);
Assert.assertEquals(schedulerWithWarmStandbyEnabled.scheduledFlowSpecs.size(), 0);
}
class TestGobblinServiceJobScheduler extends GobblinServiceJobScheduler {
public boolean isCompilerHealthy = false;
private boolean hasScheduler = false;
public TestGobblinServiceJobScheduler(String serviceName, Config config,
Optional<FlowCatalog> flowCatalog, Optional<TopologyCatalog> topologyCatalog, Orchestrator orchestrator, Optional<UserQuotaManager> quotaManager,
SchedulerService schedulerService, boolean isWarmStandbyEnabled) throws Exception {
super(serviceName, config, Optional.absent(), flowCatalog, topologyCatalog, orchestrator, schedulerService, quotaManager, Optional.absent(), isWarmStandbyEnabled, Optional.of(Mockito.mock(
FlowTriggerHandler.class)));
if (schedulerService != null) {
hasScheduler = true;
}
}
/**
* Override super method to only add spec into in-memory containers but not scheduling anything to simplify testing.
*/
@Override
public AddSpecResponse onAddSpec(Spec addedSpec) {
String flowName = (String) ((FlowSpec) addedSpec).getConfigAsProperties().get(ConfigurationKeys.FLOW_NAME_KEY);
if (flowName.equals(MockedSpecCompiler.UNCOMPILABLE_FLOW)) {
throw new RuntimeException("Could not compile flow");
}
super.scheduledFlowSpecs.put(addedSpec.getUri().toString(), addedSpec);
if (hasScheduler) {
try {
scheduleJob(((FlowSpec) addedSpec).getConfigAsProperties(), null);
} catch (JobException e) {
throw new RuntimeException(e);
}
}
// Check that compiler is healthy at time of scheduling flows
Assert.assertTrue(isCompilerHealthy);
return new AddSpecResponse(addedSpec.getDescription());
}
}
Dag<JobExecutionPlan> buildDag(Config additionalConfig, String id) throws URISyntaxException {
Config config = ConfigFactory.empty().
withValue(ConfigurationKeys.FLOW_EXECUTION_ID_KEY, ConfigValueFactory.fromAnyRef(System.currentTimeMillis()));
config = additionalConfig.withFallback(config);
List<JobExecutionPlan> jobExecutionPlans = new ArrayList<>();
JobSpec js = JobSpec.builder("test_job_" + id).withVersion(id).withConfig(config).
withTemplate(new URI("job_" + id)).build();
SpecExecutor specExecutor = InMemorySpecExecutor.createDummySpecExecutor(new URI("jobExecutor"));
JobExecutionPlan jobExecutionPlan = new JobExecutionPlan(js, specExecutor);
jobExecutionPlans.add(jobExecutionPlan);
return new JobExecutionPlanDagFactory().createDag(jobExecutionPlans);
}
}
| 3,791 |
0 | Create_ds/gobblin/gobblin-service/src/test/java/org/apache/gobblin/service/modules | Create_ds/gobblin/gobblin-service/src/test/java/org/apache/gobblin/service/modules/utils/FlowCompilationValidationHelperTest.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.service.modules.utils;
import com.google.common.base.Optional;
import java.net.URISyntaxException;
import java.util.HashMap;
import org.apache.gobblin.metrics.event.TimingEvent;
import org.apache.gobblin.service.modules.flowgraph.Dag;
import org.apache.gobblin.service.modules.orchestration.DagTestUtils;
import org.apache.gobblin.service.modules.spec.JobExecutionPlan;
import org.junit.Assert;
import org.testng.annotations.BeforeClass;
import org.testng.annotations.Test;
/**
* Test functionality provided by the helper class re-used between the DagManager and Orchestrator for flow compilation.
*/
public class FlowCompilationValidationHelperTest {
private String dagId = "testDag";
private Long jobSpecFlowExecutionId = 1234L;
private String newFlowExecutionId = "5678";
private String existingFlowExecutionId = "9999";
private Dag<JobExecutionPlan> jobExecutionPlanDag;
@BeforeClass
public void setup() throws URISyntaxException {
jobExecutionPlanDag = DagTestUtils.buildDag(dagId, jobSpecFlowExecutionId);
}
/*
Tests that addFlowExecutionIdIfAbsent adds flowExecutionId to a flowMetadata object when it is absent, prioritizing
the optional flowExecutionId over the one from the job spec
*/
@Test
public void testAddFlowExecutionIdWhenAbsent() {
HashMap<String, String> flowMetadata = new HashMap<>();
FlowCompilationValidationHelper.addFlowExecutionIdIfAbsent(flowMetadata, Optional.of(newFlowExecutionId), jobExecutionPlanDag);
Assert.assertEquals(flowMetadata.get(TimingEvent.FlowEventConstants.FLOW_EXECUTION_ID_FIELD), newFlowExecutionId);
}
/*
Tests that addFlowExecutionIdIfAbsent does not update an existing flowExecutionId in a flowMetadata object
*/
@Test
public void testSkipAddingFlowExecutionIdWhenPresent() {
HashMap<String, String> flowMetadata = new HashMap<>();
flowMetadata.put(TimingEvent.FlowEventConstants.FLOW_EXECUTION_ID_FIELD, existingFlowExecutionId);
FlowCompilationValidationHelper.addFlowExecutionIdIfAbsent(flowMetadata, Optional.of(newFlowExecutionId), jobExecutionPlanDag);
Assert.assertEquals(flowMetadata.get(TimingEvent.FlowEventConstants.FLOW_EXECUTION_ID_FIELD), existingFlowExecutionId);
}
}
| 3,792 |
0 | Create_ds/gobblin/gobblin-service/src/test/java/org/apache/gobblin/service/modules | Create_ds/gobblin/gobblin-service/src/test/java/org/apache/gobblin/service/modules/template_catalog/FSFlowTemplateCatalogTest.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.service.modules.template_catalog;
import java.net.URI;
import java.util.List;
import java.util.Properties;
import com.typesafe.config.Config;
import com.typesafe.config.ConfigFactory;
import com.typesafe.config.ConfigValueFactory;
import org.apache.gobblin.runtime.api.JobTemplate;
import org.apache.hadoop.fs.Path;
import org.testng.Assert;
import org.testng.annotations.Test;
import org.apache.commons.lang3.tuple.Pair;
import org.apache.gobblin.service.modules.dataset.DatasetDescriptor;
import org.apache.gobblin.configuration.ConfigurationKeys;
import org.apache.gobblin.service.ServiceConfigKeys;
import org.apache.gobblin.service.modules.dataset.FSDatasetDescriptor;
import org.apache.gobblin.service.modules.flowgraph.DatasetDescriptorConfigKeys;
import org.apache.gobblin.service.modules.template.FlowTemplate;
import org.testng.collections.Lists;
import lombok.extern.slf4j.Slf4j;
@Slf4j
public class FSFlowTemplateCatalogTest {
public static final String TEST_TEMPLATE_NAME = "flowEdgeTemplate";
public static final String TEST_TEMPLATE_DIR_URI = "FS:///" + TEST_TEMPLATE_NAME;
@Test
public void testGetFlowTemplate() throws Exception {
URI flowTemplateCatalogUri = this.getClass().getClassLoader().getResource("template_catalog").toURI();
// Create a FSFlowTemplateCatalog instance
Properties properties = new Properties();
properties.put(ServiceConfigKeys.TEMPLATE_CATALOGS_FULLY_QUALIFIED_PATH_KEY, flowTemplateCatalogUri.toString());
Config config = ConfigFactory.parseProperties(properties);
Config templateCatalogCfg = config
.withValue(ConfigurationKeys.JOB_CONFIG_FILE_GENERAL_PATH_KEY,
config.getValue(ServiceConfigKeys.TEMPLATE_CATALOGS_FULLY_QUALIFIED_PATH_KEY));
FSFlowTemplateCatalog catalog = new FSFlowTemplateCatalog(templateCatalogCfg);
FlowTemplate flowTemplate = catalog.getFlowTemplate(new URI(TEST_TEMPLATE_DIR_URI));
//Basic sanity check for the FlowTemplate
List<JobTemplate> jobTemplates = flowTemplate.getJobTemplates();
Assert.assertEquals(jobTemplates.size(), 4);
for (int i = 0; i < 4; i++) {
String uri = new Path(jobTemplates.get(i).getUri()).getName().split("\\.")[0];
String templateId = uri.substring(uri.length() - 1);
for (int j = 0; j < 2; j++) {
Config jobTemplateConfig = jobTemplates.get(i).getRawTemplateConfig();
String suffix = templateId + Integer.toString(j + 1);
Assert.assertEquals(jobTemplateConfig.getString("key" + suffix), "val" + suffix);
}
}
Config flowConfig = ConfigFactory.empty().withValue("team.name", ConfigValueFactory.fromAnyRef("test-team"))
.withValue("dataset.name", ConfigValueFactory.fromAnyRef("test-dataset"));
List<Pair<DatasetDescriptor, DatasetDescriptor>> inputOutputDescriptors = flowTemplate.getDatasetDescriptors(flowConfig, true);
Assert.assertTrue(inputOutputDescriptors.size() == 2);
List<String> dirs = Lists.newArrayList("inbound", "outbound");
for (int i = 0; i < 2; i++) {
for (int j = 0; j < 2; j++) {
FSDatasetDescriptor datasetDescriptor;
if (j == 0) {
datasetDescriptor = (FSDatasetDescriptor) inputOutputDescriptors.get(i).getLeft();
} else {
datasetDescriptor = (FSDatasetDescriptor) inputOutputDescriptors.get(i).getRight();
}
Assert.assertEquals(datasetDescriptor.getPlatform(), "hdfs");
Assert.assertEquals(datasetDescriptor.getFormatConfig().getFormat(), "avro");
Assert.assertEquals(datasetDescriptor.getPath(), "/data/" + dirs.get(i) + "/test-team/test-dataset");
}
}
Config flowTemplateConfig = flowTemplate.getRawTemplateConfig();
Assert.assertEquals(flowTemplateConfig.getString(DatasetDescriptorConfigKeys.FLOW_EDGE_INPUT_DATASET_DESCRIPTOR_PREFIX + ".0."
+ DatasetDescriptorConfigKeys.CLASS_KEY), FSDatasetDescriptor.class.getCanonicalName());
Assert.assertEquals(flowTemplateConfig.getString(DatasetDescriptorConfigKeys.FLOW_EDGE_OUTPUT_DATASET_DESCRIPTOR_PREFIX
+ ".0." + DatasetDescriptorConfigKeys.CLASS_KEY), FSDatasetDescriptor.class.getCanonicalName());
}
} | 3,793 |
0 | Create_ds/gobblin/gobblin-service/src/test/java/org/apache/gobblin/service/modules | Create_ds/gobblin/gobblin-service/src/test/java/org/apache/gobblin/service/modules/template_catalog/ObservingFSFlowEdgeTemplateCatalogTest.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.service.modules.template_catalog;
import java.io.File;
import java.io.IOException;
import java.net.URI;
import java.net.URISyntaxException;
import java.nio.file.Path;
import java.util.List;
import java.util.Properties;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.locks.ReentrantReadWriteLock;
import org.apache.commons.io.FileUtils;
import org.testng.Assert;
import org.testng.annotations.BeforeClass;
import org.testng.annotations.Test;
import com.google.common.base.Function;
import com.google.common.collect.Lists;
import com.google.common.io.Files;
import com.google.common.util.concurrent.ServiceManager;
import com.typesafe.config.Config;
import com.typesafe.config.ConfigFactory;
import lombok.AllArgsConstructor;
import lombok.extern.slf4j.Slf4j;
import org.apache.gobblin.configuration.ConfigurationKeys;
import org.apache.gobblin.runtime.api.JobTemplate;
import org.apache.gobblin.runtime.api.SpecNotFoundException;
import org.apache.gobblin.service.ServiceConfigKeys;
import org.apache.gobblin.service.modules.template.FlowTemplate;
import org.apache.gobblin.testing.AssertWithBackoff;
@Slf4j
public class ObservingFSFlowEdgeTemplateCatalogTest {
private File templateDir;
private Config templateCatalogCfg;
@BeforeClass
public void setUp() throws Exception {
URI flowTemplateCatalogUri = this.getClass().getClassLoader().getResource("template_catalog").toURI();
this.templateDir = Files.createTempDir();
FileUtils.forceDeleteOnExit(templateDir);
FileUtils.copyDirectory(new File(flowTemplateCatalogUri.getPath()), templateDir);
Properties properties = new Properties();
properties.put(ServiceConfigKeys.TEMPLATE_CATALOGS_FULLY_QUALIFIED_PATH_KEY, templateDir.toURI().toString());
properties.put(ConfigurationKeys.JOB_CONFIG_FILE_MONITOR_POLLING_INTERVAL_KEY, "1000");
Config config = ConfigFactory.parseProperties(properties);
this.templateCatalogCfg = config.withValue(ConfigurationKeys.JOB_CONFIG_FILE_GENERAL_PATH_KEY,
config.getValue(ServiceConfigKeys.TEMPLATE_CATALOGS_FULLY_QUALIFIED_PATH_KEY));
}
@Test
public void testModifyFlowTemplate() throws Exception {
ObservingFSFlowEdgeTemplateCatalog catalog = new ObservingFSFlowEdgeTemplateCatalog(this.templateCatalogCfg, new ReentrantReadWriteLock());
ServiceManager serviceManager = new ServiceManager(Lists.newArrayList(catalog));
serviceManager.startAsync().awaitHealthy(5, TimeUnit.SECONDS);
// Check cached flow template is returned
FlowTemplate flowTemplate1 = catalog.getFlowTemplate(new URI(FSFlowTemplateCatalogTest.TEST_TEMPLATE_DIR_URI));
FlowTemplate flowTemplate2 = catalog.getFlowTemplate(new URI(FSFlowTemplateCatalogTest.TEST_TEMPLATE_DIR_URI));
Assert.assertSame(flowTemplate1, flowTemplate2);
// Update a file flow catalog and check that the getFlowTemplate returns the new value
Path flowConfPath = new File(new File(this.templateDir, FSFlowTemplateCatalogTest.TEST_TEMPLATE_NAME), "flow.conf").toPath();
List<String> lines = java.nio.file.Files.readAllLines(flowConfPath);
for (int i = 0; i < lines.size(); i++) {
if (lines.get(i).equals("gobblin.flow.edge.input.dataset.descriptor.0.format=avro")) {
lines.set(i, "gobblin.flow.edge.input.dataset.descriptor.0.format=any");
break;
}
}
java.nio.file.Files.write(flowConfPath, lines);
Function testFunction = new GetFlowTemplateConfigFunction(new URI(FSFlowTemplateCatalogTest.TEST_TEMPLATE_DIR_URI), catalog,
"gobblin.flow.edge.input.dataset.descriptor.0.format");
AssertWithBackoff.create().timeoutMs(10000).assertEquals(testFunction, "any", "flow template updated");
}
@AllArgsConstructor
private class GetFlowTemplateConfigFunction implements Function<Void, String> {
private URI flowTemplateCatalogUri;
private FSFlowTemplateCatalog flowTemplateCatalog;
private String configKey;
@Override
public String apply(Void input) {
try {
return this.flowTemplateCatalog.getFlowTemplate(this.flowTemplateCatalogUri).getRawTemplateConfig().getString(this.configKey);
} catch (SpecNotFoundException | JobTemplate.TemplateException | IOException | URISyntaxException e) {
throw new RuntimeException(e);
}
}
}
} | 3,794 |
0 | Create_ds/gobblin/gobblin-service/src/test/java/org/apache/gobblin/service/modules | Create_ds/gobblin/gobblin-service/src/test/java/org/apache/gobblin/service/modules/template_catalog/UpdatableFSFFlowTemplateCatalogTest.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.service.modules.template_catalog;
import java.io.File;
import java.net.URI;
import java.nio.file.Path;
import java.util.List;
import java.util.Properties;
import java.util.concurrent.locks.ReentrantReadWriteLock;
import org.apache.commons.io.FileUtils;
import org.testng.Assert;
import org.testng.annotations.BeforeClass;
import org.testng.annotations.Test;
import com.google.common.io.Files;
import com.typesafe.config.Config;
import com.typesafe.config.ConfigFactory;
import lombok.extern.slf4j.Slf4j;
import org.apache.gobblin.configuration.ConfigurationKeys;
import org.apache.gobblin.service.ServiceConfigKeys;
import org.apache.gobblin.service.modules.template.FlowTemplate;
@Slf4j
public class UpdatableFSFFlowTemplateCatalogTest {
private File templateDir;
private Config templateCatalogCfg;
@BeforeClass
public void setUp() throws Exception {
URI flowTemplateCatalogUri = this.getClass().getClassLoader().getResource("template_catalog").toURI();
this.templateDir = Files.createTempDir();
FileUtils.forceDeleteOnExit(templateDir);
FileUtils.copyDirectory(new File(flowTemplateCatalogUri.getPath()), templateDir);
Properties properties = new Properties();
properties.put(ServiceConfigKeys.TEMPLATE_CATALOGS_FULLY_QUALIFIED_PATH_KEY, templateDir.toURI().toString());
Config config = ConfigFactory.parseProperties(properties);
this.templateCatalogCfg = config.withValue(ConfigurationKeys.JOB_CONFIG_FILE_GENERAL_PATH_KEY,
config.getValue(ServiceConfigKeys.TEMPLATE_CATALOGS_FULLY_QUALIFIED_PATH_KEY));
}
@Test
public void testModifyFlowTemplate() throws Exception {
UpdatableFSFlowTemplateCatalog catalog = new UpdatableFSFlowTemplateCatalog(this.templateCatalogCfg, new ReentrantReadWriteLock());
// Check cached flow template is returned
FlowTemplate flowTemplate1 = catalog.getFlowTemplate(new URI(FSFlowTemplateCatalogTest.TEST_TEMPLATE_DIR_URI));
FlowTemplate flowTemplate2 = catalog.getFlowTemplate(new URI(FSFlowTemplateCatalogTest.TEST_TEMPLATE_DIR_URI));
Assert.assertSame(flowTemplate1, flowTemplate2);
// Update a file flow catalog and check that the getFlowTemplate returns the new value
Path flowConfPath = new File(new File(this.templateDir, FSFlowTemplateCatalogTest.TEST_TEMPLATE_NAME), "flow.conf").toPath();
List<String> lines = java.nio.file.Files.readAllLines(flowConfPath);
for (int i = 0; i < lines.size(); i++) {
if (lines.get(i).equals("gobblin.flow.edge.input.dataset.descriptor.0.format=avro")) {
lines.set(i, "gobblin.flow.edge.input.dataset.descriptor.0.format=any");
break;
}
}
java.nio.file.Files.write(flowConfPath, lines);
catalog.clearTemplates();
Assert.assertEquals(catalog.getFlowTemplate(new URI(FSFlowTemplateCatalogTest.TEST_TEMPLATE_DIR_URI)).
getRawTemplateConfig().getString("gobblin.flow.edge.input.dataset.descriptor.0.format"), "any");
}
} | 3,795 |
0 | Create_ds/gobblin/gobblin-service/src/test/java/org/apache/gobblin/service/modules | Create_ds/gobblin/gobblin-service/src/test/java/org/apache/gobblin/service/modules/troubleshooter/MySqlMultiContextIssueRepositoryTest.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.service.modules.troubleshooter;
import java.time.Duration;
import java.time.ZoneOffset;
import java.time.ZonedDateTime;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.HashMap;
import java.util.List;
import java.util.Random;
import java.util.concurrent.ForkJoinPool;
import java.util.concurrent.TimeUnit;
import org.apache.commons.lang.RandomStringUtils;
import org.apache.commons.lang.StringUtils;
import org.eclipse.jetty.util.ConcurrentHashSet;
import org.testcontainers.containers.MySQLContainer;
import org.testng.annotations.AfterClass;
import org.testng.annotations.BeforeClass;
import org.testng.annotations.BeforeMethod;
import org.testng.annotations.Test;
import com.google.common.base.Stopwatch;
import com.typesafe.config.ConfigFactory;
import org.apache.gobblin.runtime.troubleshooter.Issue;
import org.apache.gobblin.runtime.troubleshooter.IssueSeverity;
import org.apache.gobblin.runtime.troubleshooter.TroubleshooterException;
import org.apache.gobblin.runtime.troubleshooter.TroubleshooterUtils;
import org.apache.gobblin.service.ServiceConfigKeys;
import org.apache.gobblin.service.TestServiceDatabaseConfig;
import org.apache.gobblin.service.modules.db.ServiceDatabaseManager;
import org.apache.gobblin.service.modules.db.ServiceDatabaseProviderImpl;
import static org.assertj.core.api.Assertions.assertThat;
import static org.assertj.core.api.Assertions.assertThatThrownBy;
public class MySqlMultiContextIssueRepositoryTest {
private int testId = 1;
private MySQLContainer mysql;
private ServiceDatabaseProviderImpl databaseProvider;
private ServiceDatabaseManager databaseManager;
private MySqlMultiContextIssueRepository repository;
@BeforeMethod
public void setup() {
testId++;
}
@BeforeClass
public void classSetUp() {
mysql = new MySQLContainer("mysql:" + TestServiceDatabaseConfig.MysqlVersion);
mysql.start();
ServiceDatabaseProviderImpl.Configuration dbConfig =
ServiceDatabaseProviderImpl.Configuration.builder().url(mysql.getJdbcUrl()).userName(mysql.getUsername())
.password(mysql.getPassword()).build();
databaseProvider = new ServiceDatabaseProviderImpl(dbConfig);
databaseManager = new ServiceDatabaseManager(databaseProvider);
databaseManager.startAsync().awaitRunning();
repository = new MySqlMultiContextIssueRepository(databaseProvider);
}
@AfterClass
public void classTearDown() {
databaseManager.stopAsync().awaitTerminated();
mysql.stop();
}
@Test
public void canReadEmptyRepository()
throws Exception {
List<Issue> issues = repository.getAll("test-nonexistent");
assertThat(issues).isEmpty();
}
@Test
public void canCreateWithEmptyConfiguration()
throws Exception {
MySqlMultiContextIssueRepository.Configuration configuration =
new MySqlMultiContextIssueRepository.Configuration(ConfigFactory.empty());
MySqlMultiContextIssueRepository newRepo = new MySqlMultiContextIssueRepository(databaseProvider, configuration);
newRepo.startAsync().awaitRunning();
List<Issue> issues = newRepo.getAll("test-nonexistent");
assertThat(issues).isEmpty();
newRepo.stopAsync().awaitTerminated();
}
@Test
public void canPutAndGetFullIssue()
throws Exception {
HashMap<String, String> properties = new HashMap<>();
properties.put("test.prop1", "test value 1");
properties.put("test.prop2", "test value 2");
// Mysql date has less precision than Java date, so we zero sub-second component of the date to get the same
// value after retrieval from db
Issue issue = Issue.builder().summary("Test summary \" ' -- ").code("CODE1")
.time(ZonedDateTime.now().withNano(0).withZoneSameInstant(ZoneOffset.UTC)).severity(IssueSeverity.ERROR)
.details("details for test issue").exceptionClass("java.io.IOException")
.sourceClass("org.apache.gobblin.service.modules.troubleshooter.AutoTroubleshooterLogAppender")
.properties(properties).build();
String contextId = "context-" + testId;
repository.put(contextId, issue);
List<Issue> issues = repository.getAll(contextId);
assertThat(issues).hasSize(1);
assertThat(issues.get(0)).usingRecursiveComparison().isEqualTo(issue);
}
@Test
public void canPutAndGetMinimalIssue()
throws Exception {
Issue issue = Issue.builder().summary("Test summary").code("CODE1")
.time(ZonedDateTime.now().withNano(0).withZoneSameInstant(ZoneOffset.UTC)).severity(IssueSeverity.WARN).build();
String contextId = "context-" + testId;
repository.put(contextId, issue);
List<Issue> issues = repository.getAll(contextId);
assertThat(issues).hasSize(1);
assertThat(issues.get(0)).usingRecursiveComparison().isEqualTo(issue);
}
@Test
public void canPutIssueWithMaximumFieldLengths()
throws Exception {
// summary and details are bounded at 16MB, so we just put reasonably large values there
Issue issue = Issue.builder().summary(StringUtils.repeat("s", 100000)).details(StringUtils.repeat("s", 100000))
.code(StringUtils.repeat("c", Issue.MAX_ISSUE_CODE_LENGTH))
.exceptionClass(StringUtils.repeat("e", Issue.MAX_CLASSNAME_LENGTH))
.sourceClass(StringUtils.repeat("s", Issue.MAX_CLASSNAME_LENGTH))
.time(ZonedDateTime.now().withNano(0).withZoneSameInstant(ZoneOffset.UTC)).severity(IssueSeverity.WARN).build();
String contextId = TroubleshooterUtils
.getContextIdForJob(StringUtils.repeat("g", ServiceConfigKeys.MAX_FLOW_GROUP_LENGTH),
StringUtils.repeat("f", ServiceConfigKeys.MAX_FLOW_NAME_LENGTH),
String.valueOf(Long.MAX_VALUE),
StringUtils.repeat("j", ServiceConfigKeys.MAX_JOB_NAME_LENGTH));
repository.put(contextId, issue);
List<Issue> issues = repository.getAll(contextId);
assertThat(issues).hasSize(1);
assertThat(issues.get(0)).usingRecursiveComparison().isEqualTo(issue);
}
@Test
public void willGetMeaningfulErrorOnOversizedData()
throws Exception {
Issue issue = Issue.builder().summary("Test summary").code(StringUtils.repeat("c", Issue.MAX_ISSUE_CODE_LENGTH * 2))
.time(ZonedDateTime.now().withNano(0).withZoneSameInstant(ZoneOffset.UTC)).severity(IssueSeverity.WARN).build();
String contextId = "context-" + testId;
assertThatThrownBy(() -> {
repository.put(contextId, issue);
}).isInstanceOf(TroubleshooterException.class).getRootCause()
.hasMessageContaining("Data too long for column 'code'");
}
@Test
public void willRollbackWhenSomeIssuesAreInvalid()
throws Exception {
Issue validIssue = getTestIssue("test", "test1");
Issue invalidIssue =
Issue.builder().summary("Test summary").code(StringUtils.repeat("c", Issue.MAX_ISSUE_CODE_LENGTH * 2))
.time(ZonedDateTime.now().withNano(0).withZoneSameInstant(ZoneOffset.UTC)).severity(IssueSeverity.WARN)
.build();
String contextId = "context-" + testId;
try {
repository.put(contextId, Arrays.asList(validIssue, invalidIssue));
} catch (TroubleshooterException ex) {
// exception is expected
}
List<Issue> issues = repository.getAll(contextId);
assertThat(issues).isEmpty();
}
@Test
public void canPutIssueRepeatedly()
throws Exception {
Issue issue = getTestIssue("test", "test1");
String contextId = "context-" + testId;
repository.put(contextId, issue);
repository.put(contextId, issue);
List<Issue> issues = repository.getAll(contextId);
assertThat(issues).hasSize(1);
assertThat(issues.get(0)).usingRecursiveComparison().isEqualTo(issue);
}
@Test
public void canPutAndGetMultipleIssues()
throws Exception {
Issue issue1 = getTestIssue("test-1", "code1");
Issue issue2 = getTestIssue("test-2", "code2");
Issue issue3 = getTestIssue("test-3", "code3");
repository.put("context-1-" + testId, issue1);
repository.put("context-1-" + testId, issue2);
repository.put("context-2-" + testId, issue2);
repository.put("context-2-" + testId, issue3);
List<Issue> context1Issues = repository.getAll("context-1-" + testId);
assertThat(context1Issues).hasSize(2);
assertThat(context1Issues.get(0)).usingRecursiveComparison().isEqualTo(issue1);
assertThat(context1Issues.get(1)).usingRecursiveComparison().isEqualTo(issue2);
List<Issue> context2Issues = repository.getAll("context-2-" + testId);
assertThat(context2Issues).hasSize(2);
assertThat(context2Issues.get(0)).usingRecursiveComparison().isEqualTo(issue2);
assertThat(context2Issues.get(1)).usingRecursiveComparison().isEqualTo(issue3);
}
@Test
public void canRemoveIssue()
throws Exception {
Issue issue1 = getTestIssue("test-1", "code1");
Issue issue2 = getTestIssue("test-2", "code2");
Issue issue3 = getTestIssue("test-3", "code3");
String contextId = "context-1-" + testId;
repository.put(contextId, issue1);
repository.put(contextId, issue2);
repository.put(contextId, issue3);
repository.remove(contextId, issue2.getCode());
List<Issue> issues = repository.getAll(contextId);
assertThat(issues).hasSize(2);
assertThat(issues.get(0)).usingRecursiveComparison().isEqualTo(issue1);
assertThat(issues.get(1)).usingRecursiveComparison().isEqualTo(issue3);
}
@Test
public void willPreserveIssueOrder()
throws Exception {
Random random = new Random(1);
List<Issue> issues = new ArrayList<>();
String contextId = "context-" + testId;
for (int i = 0; i < 100; i++) {
Issue issue = getTestIssue("test-" + random.nextInt(), "code-" + random.nextInt());
issues.add(issue);
repository.put(contextId, issue);
}
List<Issue> retrievedIssues = repository.getAll(contextId);
assertThat(retrievedIssues).usingRecursiveComparison().isEqualTo(issues);
}
@Test
public void canRemoveIssuesAboveSpecifiedCount()
throws Exception {
String contextId = "context-" + testId;
for (int i = 0; i < 100; i++) {
Issue issue = getTestIssue("test-" + i, "code-" + i);
repository.put(contextId, issue);
}
repository.deleteOldIssuesOverTheCount(20);
List<Issue> retrievedIssues = repository.getAll(contextId);
assertThat(retrievedIssues).hasSize(20);
assertThat(retrievedIssues.get(0).getCode()).isEqualTo("code-80");
assertThat(retrievedIssues.get(19).getCode()).isEqualTo("code-99");
}
@Test
public void canRemoveOlderIssues()
throws Exception {
String contextId = "context-" + testId;
int issueCount = 100;
ZonedDateTime startTime = ZonedDateTime.now().withNano(0).withZoneSameInstant(ZoneOffset.UTC);
for (int i = 0; i < 100; i++) {
Issue issue = Issue.builder().summary("test summary").code("code-" + i)
.time(startTime.minus(Duration.ofDays(issueCount - i))).severity(IssueSeverity.ERROR).build();
repository.put(contextId, issue);
}
repository.deleteIssuesOlderThan(startTime.minus(Duration.ofDays(20).plus(Duration.ofHours(1))));
List<Issue> retrievedIssues = repository.getAll(contextId);
assertThat(retrievedIssues).hasSize(20);
}
@Test(enabled = false) // Load test takes several minutes to run and is disabled by default
public void canWriteLotsOfIssuesConcurrently()
throws Exception {
canWriteLotsOfIssuesConcurrently(false);
}
@Test(enabled = false) // Load test takes several minutes to run and is disabled by default
public void canWriteLotsOfIssuesConcurrentlyWithBatching()
throws Exception {
canWriteLotsOfIssuesConcurrently(true);
}
private void canWriteLotsOfIssuesConcurrently(boolean useBatching)
throws Exception {
int threadCount = 10;
int contextsPerThread = 100;
int issuesPerContext = 10;
Stopwatch stopwatch = Stopwatch.createStarted();
ConcurrentHashSet<Exception> exceptions = new ConcurrentHashSet<>();
ForkJoinPool forkJoinPool = new ForkJoinPool(threadCount);
for (int i = 0; i < threadCount; i++) {
int threadId = i;
forkJoinPool.submit(() -> {
try {
runLoadTestThread(repository, threadId, contextsPerThread, issuesPerContext, useBatching);
} catch (Exception ex) {
exceptions.add(ex);
}
});
}
forkJoinPool.shutdown();
assertThat(forkJoinPool.awaitTermination(30, TimeUnit.MINUTES)).isTrue();
if (!exceptions.isEmpty()) {
throw exceptions.stream().findFirst().get();
}
int totalIssues = threadCount * contextsPerThread * issuesPerContext;
System.out.printf("Created %d issues in %d ms. Speed: %d issues/second%n", totalIssues,
stopwatch.elapsed(TimeUnit.MILLISECONDS), totalIssues / stopwatch.elapsed(TimeUnit.SECONDS));
}
private void runLoadTestThread(MySqlMultiContextIssueRepository repository, int threadNumber, int contextsPerThread,
int issuesPerContext, boolean useBatching) {
Random random = new Random(threadNumber);
try {
for (int i = 0; i < contextsPerThread; i++) {
String contextId = "load-test-" + testId + "-thread-" + threadNumber + "-context-" + i;
List<Issue> issues = new ArrayList<>();
for (int j = 0; j < issuesPerContext; j++) {
Issue issue = getLargeTestIssue("load-test-1-" + random.nextInt(), "code-" + random.nextInt());
issues.add(issue);
if (!useBatching) {
repository.put(contextId, issue);
}
}
if (useBatching) {
repository.put(contextId, issues);
}
List<Issue> retrievedIssues = repository.getAll(contextId);
assertThat(retrievedIssues).usingRecursiveComparison().isEqualTo(issues);
}
} catch (TroubleshooterException e) {
throw new RuntimeException(e);
}
}
private Issue getTestIssue(String summary, String code) {
return Issue.builder().summary(summary).code(code)
.time(ZonedDateTime.now().withNano(0).withZoneSameInstant(ZoneOffset.UTC)).severity(IssueSeverity.ERROR)
.details("test details for " + summary).build();
}
private Issue getLargeTestIssue(String summary, String code) {
HashMap<String, String> properties = new HashMap<>();
for (int i = 0; i < 5; i++) {
properties.put("test.property" + i, RandomStringUtils.random(100));
}
Issue.IssueBuilder issue = Issue.builder();
issue.summary(summary);
issue.code(code);
issue.time(ZonedDateTime.now().withNano(0).withZoneSameInstant(ZoneOffset.UTC));
issue.severity(IssueSeverity.ERROR);
issue.details(RandomStringUtils.random(3000));
issue.sourceClass(RandomStringUtils.random(100));
issue.exceptionClass(RandomStringUtils.random(100));
issue.properties(properties);
return issue.build();
}
} | 3,796 |
0 | Create_ds/gobblin/gobblin-service/src/test/java/org/apache/gobblin/service/modules | Create_ds/gobblin/gobblin-service/src/test/java/org/apache/gobblin/service/modules/orchestration/MysqlUserQuotaManagerTest.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.service.modules.orchestration;
import java.io.IOException;
import java.sql.Connection;
import java.sql.SQLException;
import org.testng.Assert;
import org.testng.annotations.BeforeClass;
import org.testng.annotations.Test;
import com.typesafe.config.Config;
import org.apache.gobblin.config.ConfigBuilder;
import org.apache.gobblin.configuration.ConfigurationKeys;
import org.apache.gobblin.metastore.testing.ITestMetastoreDatabase;
import org.apache.gobblin.metastore.testing.TestMetastoreDatabaseFactory;
public class MysqlUserQuotaManagerTest {
private static final String USER = "testUser";
private static final String PASSWORD = "testPassword";
private static final String TABLE = "quotas";
private static final String PROXY_USER = "abora";
private MysqlUserQuotaManager quotaManager;
public static int INCREMENTS = 1000;
@BeforeClass
public void setUp() throws Exception {
ITestMetastoreDatabase testDb = TestMetastoreDatabaseFactory.get();
Config config = ConfigBuilder.create()
.addPrimitive(MysqlUserQuotaManager.CONFIG_PREFIX + '.' + ConfigurationKeys.STATE_STORE_DB_URL_KEY, testDb.getJdbcUrl())
.addPrimitive(MysqlUserQuotaManager.CONFIG_PREFIX + '.' + ConfigurationKeys.STATE_STORE_DB_USER_KEY, USER)
.addPrimitive(MysqlUserQuotaManager.CONFIG_PREFIX + '.' + ConfigurationKeys.STATE_STORE_DB_PASSWORD_KEY, PASSWORD)
.addPrimitive(MysqlUserQuotaManager.CONFIG_PREFIX + '.' + ConfigurationKeys.STATE_STORE_DB_TABLE_KEY, TABLE)
.build();
this.quotaManager = new MysqlUserQuotaManager(config);
}
@Test
public void testRunningDagStore() throws Exception {
String dagId = DagManagerUtils.generateDagId(DagManagerTest.buildDag("dagId", 1234L, "", 1).getNodes().get(0)).toString();
Connection connection = this.quotaManager.quotaStore.dataSource.getConnection();
Assert.assertFalse(this.quotaManager.containsDagId(dagId));
this.quotaManager.addDagId(connection, dagId);
connection.commit();
Assert.assertTrue(this.quotaManager.containsDagId(dagId));
Assert.assertTrue(this.quotaManager.removeDagId(connection, dagId));
connection.commit();
Assert.assertFalse(this.quotaManager.containsDagId(dagId));
Assert.assertFalse(this.quotaManager.removeDagId(connection, dagId));
connection.commit();
connection.close();
}
@Test
public void testIncreaseCount() throws Exception {
Connection connection = this.quotaManager.quotaStore.dataSource.getConnection();
int prevCount = this.quotaManager.incrementJobCount(connection, PROXY_USER, AbstractUserQuotaManager.CountType.USER_COUNT);
connection.commit();
Assert.assertEquals(prevCount, 0);
prevCount = this.quotaManager.incrementJobCount(connection, PROXY_USER, AbstractUserQuotaManager.CountType.USER_COUNT);
connection.commit();
Assert.assertEquals(prevCount, 1);
Assert.assertEquals(this.quotaManager.getCount(PROXY_USER, AbstractUserQuotaManager.CountType.USER_COUNT), 2);
prevCount = this.quotaManager.incrementJobCount(connection, PROXY_USER, AbstractUserQuotaManager.CountType.FLOWGROUP_COUNT);
connection.commit();
Assert.assertEquals(prevCount, 0);
prevCount = this.quotaManager.incrementJobCount(connection, PROXY_USER, AbstractUserQuotaManager.CountType.FLOWGROUP_COUNT);
connection.commit();
Assert.assertEquals(prevCount, 1);
connection.close();
}
@Test(dependsOnMethods = "testIncreaseCount")
public void testDecreaseCount() throws Exception {
Connection connection = this.quotaManager.quotaStore.dataSource.getConnection();
this.quotaManager.decrementJobCount(connection, PROXY_USER, AbstractUserQuotaManager.CountType.USER_COUNT);
connection.commit();
Assert.assertEquals(this.quotaManager.getCount(PROXY_USER, AbstractUserQuotaManager.CountType.USER_COUNT), 1);
this.quotaManager.decrementJobCount(connection, PROXY_USER, AbstractUserQuotaManager.CountType.USER_COUNT);
connection.commit();
Assert.assertEquals(this.quotaManager.getCount(PROXY_USER, AbstractUserQuotaManager.CountType.USER_COUNT), 0);
this.quotaManager.decrementJobCount(connection, PROXY_USER, AbstractUserQuotaManager.CountType.USER_COUNT);
connection.commit();
Assert.assertEquals(this.quotaManager.getCount(PROXY_USER, AbstractUserQuotaManager.CountType.USER_COUNT), 0);
this.quotaManager.decrementJobCount(connection, PROXY_USER, AbstractUserQuotaManager.CountType.FLOWGROUP_COUNT);
connection.commit();
Assert.assertEquals(this.quotaManager.getCount(PROXY_USER, AbstractUserQuotaManager.CountType.FLOWGROUP_COUNT), 1);
this.quotaManager.decrementJobCount(connection, PROXY_USER, AbstractUserQuotaManager.CountType.FLOWGROUP_COUNT);
connection.commit();
// on count reduced to zero, the row should get deleted and the get call should return -1 instead of 0.
Assert.assertEquals(this.quotaManager.getCount(PROXY_USER, AbstractUserQuotaManager.CountType.FLOWGROUP_COUNT), -1);
}
class ChangeCountRunnable implements Runnable {
boolean increaseOrDecrease;
public ChangeCountRunnable(boolean increaseOrDecrease) {
this.increaseOrDecrease = increaseOrDecrease;
}
@Override
public void run() {
int i = 0;
while (i++ < INCREMENTS) {
try (Connection connection = MysqlUserQuotaManagerTest.this.quotaManager.quotaStore.dataSource.getConnection();) {
if (increaseOrDecrease) {
MysqlUserQuotaManagerTest.this.quotaManager.incrementJobCount(connection, PROXY_USER, AbstractUserQuotaManager.CountType.USER_COUNT);
} else {
MysqlUserQuotaManagerTest.this.quotaManager.decrementJobCount(connection, PROXY_USER, AbstractUserQuotaManager.CountType.USER_COUNT);
}
connection.commit();
} catch (IOException | SQLException e) {
Assert.fail("Thread got an exception.", e);
}
}
}
}
@Test(dependsOnMethods = "testDecreaseCount")
public void testConcurrentChanges() throws IOException, InterruptedException {
int numOfThreads = 3;
Thread thread1 = new Thread(new ChangeCountRunnable(true));
Thread thread2 = new Thread(new ChangeCountRunnable(true));
Thread thread3 = new Thread(new ChangeCountRunnable(true));
Thread thread4 = new Thread(new ChangeCountRunnable(false));
Thread thread5 = new Thread(new ChangeCountRunnable(false));
Thread thread6 = new Thread(new ChangeCountRunnable(false));
thread1.start();
thread2.start();
thread3.start();
thread1.join();
thread2.join();
thread3.join();
Assert.assertEquals(this.quotaManager.getCount(PROXY_USER, AbstractUserQuotaManager.CountType.USER_COUNT),
INCREMENTS * 3);
thread4.start();
thread5.start();
thread6.start();
thread4.join();
thread5.join();
thread6.join();
Assert.assertEquals(this.quotaManager.getCount(PROXY_USER, AbstractUserQuotaManager.CountType.USER_COUNT), -1);
}
}
| 3,797 |
0 | Create_ds/gobblin/gobblin-service/src/test/java/org/apache/gobblin/service/modules | Create_ds/gobblin/gobblin-service/src/test/java/org/apache/gobblin/service/modules/orchestration/DagTestUtils.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.service.modules.orchestration;
import java.net.URI;
import java.net.URISyntaxException;
import java.util.ArrayList;
import java.util.List;
import java.util.Properties;
import org.apache.hadoop.fs.Path;
import com.google.common.base.Optional;
import com.typesafe.config.Config;
import com.typesafe.config.ConfigValueFactory;
import org.apache.gobblin.config.ConfigBuilder;
import org.apache.gobblin.configuration.ConfigurationKeys;
import org.apache.gobblin.runtime.api.JobSpec;
import org.apache.gobblin.runtime.api.SpecExecutor;
import org.apache.gobblin.runtime.api.TopologySpec;
import org.apache.gobblin.runtime.spec_executorInstance.InMemorySpecExecutor;
import org.apache.gobblin.service.ExecutionStatus;
import org.apache.gobblin.service.modules.flowgraph.Dag;
import org.apache.gobblin.service.modules.spec.JobExecutionPlan;
import org.apache.gobblin.service.modules.spec.JobExecutionPlanDagFactory;
import org.apache.gobblin.util.CompletedFuture;
import org.apache.gobblin.util.ConfigUtils;
public class DagTestUtils {
private DagTestUtils() {
}
public static TopologySpec buildNaiveTopologySpec(String specUriInString) {
String specStoreDir = "/tmp/specStoreDir";
Properties properties = new Properties();
properties.put("specStore.fs.dir", specStoreDir);
properties.put("specExecInstance.capabilities", "source:destination");
properties.put("specExecInstance.uri", specUriInString);
properties.put("uri",specUriInString);
Config specExecConfig = ConfigUtils.propertiesToConfig(properties);
SpecExecutor specExecutorInstanceProducer = new InMemorySpecExecutor(specExecConfig);
TopologySpec.Builder topologySpecBuilder = TopologySpec.builder(new Path(specStoreDir).toUri())
.withConfig(specExecConfig)
.withDescription("test")
.withVersion("1")
.withSpecExecutor(specExecutorInstanceProducer);
return topologySpecBuilder.build();
}
/**
* Create a {@link Dag < JobExecutionPlan >} with one parent and one child.
* @return a Dag.
*/
public static Dag<JobExecutionPlan> buildDag(String id, Long flowExecutionId) throws URISyntaxException {
List<JobExecutionPlan> jobExecutionPlans = new ArrayList<>();
for (int i = 0; i < 2; i++) {
String suffix = Integer.toString(i);
Config jobConfig = ConfigBuilder.create().
addPrimitive(ConfigurationKeys.FLOW_GROUP_KEY, "group" + id).
addPrimitive(ConfigurationKeys.FLOW_NAME_KEY, "flow" + id).
addPrimitive(ConfigurationKeys.FLOW_EXECUTION_ID_KEY, flowExecutionId).
addPrimitive(ConfigurationKeys.JOB_NAME_KEY, "job" + suffix).build();
if (i > 0) {
jobConfig = jobConfig.withValue(ConfigurationKeys.JOB_DEPENDENCIES, ConfigValueFactory.fromAnyRef("job" + (i - 1)));
}
JobSpec js = JobSpec.builder("test_job" + suffix).withVersion(suffix).withConfig(jobConfig).
withTemplate(new URI("job" + suffix)).build();
SpecExecutor specExecutor = buildNaiveTopologySpec("mySpecExecutor").getSpecExecutor();
JobExecutionPlan jobExecutionPlan = new JobExecutionPlan(js, specExecutor);
jobExecutionPlan.setExecutionStatus(ExecutionStatus.RUNNING);
// Future of type CompletedFuture is used because in tests InMemorySpecProducer is used and that responds with CompletedFuture
CompletedFuture future = new CompletedFuture<>(Boolean.TRUE, null);
jobExecutionPlan.setJobFuture(Optional.of(future));
jobExecutionPlans.add(jobExecutionPlan);
}
return new JobExecutionPlanDagFactory().createDag(jobExecutionPlans);
}
}
| 3,798 |
0 | Create_ds/gobblin/gobblin-service/src/test/java/org/apache/gobblin/service/modules | Create_ds/gobblin/gobblin-service/src/test/java/org/apache/gobblin/service/modules/orchestration/FSDagStateStoreTest.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.service.modules.orchestration;
import java.io.File;
import java.io.IOException;
import java.net.URI;
import java.net.URISyntaxException;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.Set;
import java.util.concurrent.ExecutionException;
import org.apache.commons.io.FileUtils;
import org.junit.Assert;
import org.testng.annotations.AfterClass;
import org.testng.annotations.BeforeClass;
import org.testng.annotations.Test;
import com.google.common.collect.Lists;
import com.typesafe.config.Config;
import com.typesafe.config.ConfigFactory;
import com.typesafe.config.ConfigValueFactory;
import org.apache.gobblin.configuration.ConfigurationKeys;
import org.apache.gobblin.runtime.api.TopologySpec;
import org.apache.gobblin.service.ExecutionStatus;
import org.apache.gobblin.service.modules.flowgraph.Dag;
import org.apache.gobblin.service.modules.spec.JobExecutionPlan;
public class FSDagStateStoreTest {
private DagStateStore _dagStateStore;
private final String dagStateStoreDir = "/tmp/fsDagStateStoreTest/dagStateStore";
private File checkpointDir;
private Map<URI, TopologySpec> topologySpecMap;
private TopologySpec topologySpec;
private URI specExecURI;
@BeforeClass
public void setUp()
throws IOException, URISyntaxException {
this.checkpointDir = new File(dagStateStoreDir);
FileUtils.deleteDirectory(this.checkpointDir);
Config config = ConfigFactory.empty().withValue(FSDagStateStore.DAG_STATESTORE_DIR, ConfigValueFactory.fromAnyRef(
this.dagStateStoreDir));
this.topologySpecMap = new HashMap<>();
// Construct the TopologySpec and its map.
String specExecInstanceUriInString = "mySpecExecutor";
this.topologySpec = DagTestUtils.buildNaiveTopologySpec(specExecInstanceUriInString);
this.specExecURI = new URI(specExecInstanceUriInString);
this.topologySpecMap.put(this.specExecURI, topologySpec);
this._dagStateStore = new FSDagStateStore(config, this.topologySpecMap);
}
@Test
public void testWriteCheckpoint() throws IOException, URISyntaxException {
long flowExecutionId = System.currentTimeMillis();
String flowGroupId = "0";
Dag<JobExecutionPlan> dag = DagTestUtils.buildDag(flowGroupId, flowExecutionId);
this._dagStateStore.writeCheckpoint(dag);
String fileName = DagManagerUtils.generateDagId(dag) + FSDagStateStore.DAG_FILE_EXTENSION;
File dagFile = new File(this.checkpointDir, fileName);
Dag<JobExecutionPlan> dagDeserialized = ((FSDagStateStore) this._dagStateStore).getDag(dagFile);
Assert.assertEquals(dagDeserialized.getNodes().size(), 2);
Assert.assertEquals(dagDeserialized.getStartNodes().size(), 1);
Assert.assertEquals(dagDeserialized.getEndNodes().size(), 1);
Dag.DagNode<JobExecutionPlan> child = dagDeserialized.getEndNodes().get(0);
Dag.DagNode<JobExecutionPlan> parent = dagDeserialized.getStartNodes().get(0);
Assert.assertEquals(dagDeserialized.getParentChildMap().size(), 1);
Assert.assertTrue(dagDeserialized.getParentChildMap().get(parent).contains(child));
for (int i = 0; i < 2; i++) {
JobExecutionPlan plan = dagDeserialized.getNodes().get(i).getValue();
Config jobConfig = plan.getJobSpec().getConfig();
Assert.assertEquals(jobConfig.getString(ConfigurationKeys.FLOW_GROUP_KEY), "group" + flowGroupId);
Assert.assertEquals(jobConfig.getString(ConfigurationKeys.FLOW_NAME_KEY), "flow" + flowGroupId);
Assert.assertEquals(jobConfig.getLong(ConfigurationKeys.FLOW_EXECUTION_ID_KEY), flowExecutionId);
Assert.assertEquals(plan.getExecutionStatus(), ExecutionStatus.RUNNING);
}
}
@Test (dependsOnMethods = "testWriteCheckpoint")
public void testCleanUp() throws IOException, URISyntaxException {
long flowExecutionId = System.currentTimeMillis();
String flowGroupId = "0";
Dag<JobExecutionPlan> dag = DagTestUtils.buildDag(flowGroupId, flowExecutionId);
this._dagStateStore.writeCheckpoint(dag);
String fileName = DagManagerUtils.generateDagId(dag) + FSDagStateStore.DAG_FILE_EXTENSION;
File dagFile = new File(this.checkpointDir, fileName);
Assert.assertTrue(dagFile.exists());
this._dagStateStore.cleanUp(dag);
Assert.assertFalse(dagFile.exists());
this._dagStateStore.writeCheckpoint(dag);
Assert.assertTrue(dagFile.exists());
this._dagStateStore.cleanUp(DagManagerUtils.generateDagId(dag).toString());
Assert.assertFalse(dagFile.exists());
}
@Test (dependsOnMethods = "testCleanUp")
public void testGetDags() throws IOException, URISyntaxException, ExecutionException, InterruptedException {
//Set up a new FSDagStateStore instance.
setUp();
List<Long> flowExecutionIds = Lists.newArrayList(System.currentTimeMillis(), System.currentTimeMillis() + 1);
for (int i = 0; i < 2; i++) {
String flowGroupId = Integer.toString(i);
Dag<JobExecutionPlan> dag = DagTestUtils.buildDag(flowGroupId, flowExecutionIds.get(i));
this._dagStateStore.writeCheckpoint(dag);
}
List<Dag<JobExecutionPlan>> dags = this._dagStateStore.getDags();
Assert.assertEquals(dags.size(), 2);
Dag<JobExecutionPlan> singleDag = this._dagStateStore.getDag(DagManagerUtils.generateDagId(dags.get(0)).toString());
dags.add(singleDag);
for (Dag<JobExecutionPlan> dag: dags) {
Assert.assertEquals(dag.getNodes().size(), 2);
Assert.assertEquals(dag.getStartNodes().size(), 1);
Assert.assertEquals(dag.getEndNodes().size(), 1);
Assert.assertEquals(dag.getParentChildMap().size(), 1);
Assert.assertEquals(dag.getNodes().get(0).getValue().getSpecExecutor().getUri(), specExecURI);
Assert.assertEquals(dag.getNodes().get(1).getValue().getSpecExecutor().getUri(), specExecURI);
Assert.assertTrue(Boolean.parseBoolean(dag.getNodes().get(0).getValue().getJobFuture().get().get().toString()));
Assert.assertTrue(Boolean.parseBoolean(dag.getNodes().get(1).getValue().getJobFuture().get().get().toString()));
}
Set<String> dagIds = this._dagStateStore.getDagIds();
Assert.assertEquals(dagIds.size(), 2);
for (Dag<JobExecutionPlan> dag: dags) {
Assert.assertTrue(dagIds.contains(DagManagerUtils.generateDagId(dag).toString()));
}
}
@AfterClass
public void cleanUp() throws IOException {
FileUtils.deleteDirectory(this.checkpointDir);
}
} | 3,799 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.