index int64 0 0 | repo_id stringlengths 9 205 | file_path stringlengths 31 246 | content stringlengths 1 12.2M | __index_level_0__ int64 0 10k |
|---|---|---|---|---|
0 | Create_ds/gobblin/gobblin-restli/gobblin-throttling-service/gobblin-throttling-service-server/src/main/java/org/apache/gobblin/restli | Create_ds/gobblin/gobblin-restli/gobblin-throttling-service/gobblin-throttling-service-server/src/main/java/org/apache/gobblin/restli/throttling/ThrottlingPolicyFactory.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.restli.throttling;
import com.typesafe.config.Config;
import org.apache.gobblin.broker.TTLResourceEntry;
import org.apache.gobblin.broker.iface.ConfigView;
import org.apache.gobblin.broker.iface.NotConfiguredException;
import org.apache.gobblin.broker.iface.ScopedConfigView;
import org.apache.gobblin.broker.iface.SharedResourceFactory;
import org.apache.gobblin.broker.iface.SharedResourceFactoryResponse;
import org.apache.gobblin.broker.iface.SharedResourcesBroker;
import org.apache.gobblin.util.ClassAliasResolver;
import org.apache.gobblin.util.ConfigUtils;
import org.apache.gobblin.util.limiter.broker.SharedLimiterKey;
/**
* A {@link SharedResourceFactory} to create {@link ThrottlingPolicy}s.
*/
public class ThrottlingPolicyFactory implements SharedResourceFactory<ThrottlingPolicy, SharedLimiterKey, ThrottlingServerScopes> {
public static final String NAME = "throttlingPolicy";
public static final String POLICY_KEY = "policy";
public static final String FAIL_ON_UNKNOWN_RESOURCE_ID = "faiOnUnknownResourceId";
public static final String RELOAD_FREQUENCY_KEY = "reloadFrequencyMillis";
public static final long DEFAULT_RELOAD_FREQUENCY = 5 * 60 * 1000L; // 5 minutes
public static final ClassAliasResolver<SpecificPolicyFactory> POLICY_CLASS_RESOLVER = new
ClassAliasResolver<>(SpecificPolicyFactory.class);
@Override
public String getName() {
return NAME;
}
@Override
public SharedResourceFactoryResponse<ThrottlingPolicy> createResource(SharedResourcesBroker<ThrottlingServerScopes> broker,
ScopedConfigView<ThrottlingServerScopes, SharedLimiterKey> configView) throws NotConfiguredException {
Config config = configView.getConfig();
if (!config.hasPath(POLICY_KEY)) {
if (config.hasPath(FAIL_ON_UNKNOWN_RESOURCE_ID) && config.getBoolean(FAIL_ON_UNKNOWN_RESOURCE_ID)) {
throw new NotConfiguredException("Missing key " + POLICY_KEY);
} else {
return new TTLResourceEntry<ThrottlingPolicy>(new NoopPolicy(),
ConfigUtils.getLong(config, RELOAD_FREQUENCY_KEY, DEFAULT_RELOAD_FREQUENCY), false);
}
}
try {
SpecificPolicyFactory factory = POLICY_CLASS_RESOLVER.resolveClass(config.getString(POLICY_KEY)).newInstance();
return new TTLResourceEntry<>(factory.createPolicy(configView.getKey(), broker, config),
ConfigUtils.getLong(config, RELOAD_FREQUENCY_KEY, DEFAULT_RELOAD_FREQUENCY), false);
} catch (ReflectiveOperationException roe) {
throw new RuntimeException(roe);
}
}
@Override
public ThrottlingServerScopes getAutoScope(SharedResourcesBroker<ThrottlingServerScopes> broker,
ConfigView<ThrottlingServerScopes, SharedLimiterKey> config) {
return ThrottlingServerScopes.GLOBAL;
}
public interface SpecificPolicyFactory {
/**
* @param sharedLimiterKey The {@link SharedLimiterKey} for the resource limited.
* @param broker The {@link SharedResourcesBroker} used by the throttling server. Can be used to acquire resources
* shared among different threads / policies in the server.
* @param config The resource configuration.
*/
ThrottlingPolicy createPolicy(SharedLimiterKey sharedLimiterKey, SharedResourcesBroker<ThrottlingServerScopes> broker, Config config);
}
}
| 1,700 |
0 | Create_ds/gobblin/gobblin-restli/gobblin-throttling-service/gobblin-throttling-service-server/src/main/java/org/apache/gobblin/restli | Create_ds/gobblin/gobblin-restli/gobblin-throttling-service/gobblin-throttling-service-server/src/main/java/org/apache/gobblin/restli/throttling/LimiterServerResource.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.restli.throttling;
import java.io.Closeable;
import java.io.IOException;
import java.net.URI;
import java.util.concurrent.ExecutionException;
import com.codahale.metrics.Meter;
import com.codahale.metrics.Timer;
import com.google.common.base.Optional;
import com.google.common.collect.ImmutableMap;
import com.linkedin.common.callback.Callback;
import com.linkedin.common.callback.FutureCallback;
import com.linkedin.data.DataMap;
import com.linkedin.data.template.GetMode;
import com.linkedin.restli.common.ComplexResourceKey;
import com.linkedin.restli.common.EmptyRecord;
import com.linkedin.restli.common.HttpStatus;
import com.linkedin.restli.server.RestLiServiceException;
import com.linkedin.restli.server.annotations.CallbackParam;
import com.linkedin.restli.server.annotations.RestLiCollection;
import com.linkedin.restli.server.annotations.RestMethod;
import com.linkedin.restli.server.resources.ComplexKeyResourceAsyncTemplate;
import org.apache.gobblin.annotation.Alpha;
import org.apache.gobblin.broker.iface.NotConfiguredException;
import org.apache.gobblin.broker.iface.SharedResourcesBroker;
import org.apache.gobblin.metrics.MetricContext;
import org.apache.gobblin.metrics.broker.MetricContextFactory;
import org.apache.gobblin.metrics.broker.SubTaggedMetricContextKey;
import org.apache.gobblin.util.ClosableTimerContext;
import org.apache.gobblin.util.NoopCloseable;
import org.apache.gobblin.util.Sleeper;
import org.apache.gobblin.util.limiter.Limiter;
import org.apache.gobblin.util.limiter.broker.SharedLimiterKey;
import javax.inject.Inject;
import javax.inject.Named;
import lombok.extern.slf4j.Slf4j;
/**
* Restli resource for allocating permits through Rest calls. Simply calls a {@link Limiter} in the server configured
* through {@link SharedResourcesBroker}.
*/
@Alpha
@Slf4j
@RestLiCollection(name = "permits", namespace = "org.apache.gobblin.restli.throttling")
public class LimiterServerResource extends ComplexKeyResourceAsyncTemplate<PermitRequest, EmptyRecord, PermitAllocation> {
public static final long TIMEOUT_MILLIS = 7000; // resli client times out after 10 seconds
public static final String BROKER_INJECT_NAME = "broker";
public static final String METRIC_CONTEXT_INJECT_NAME = "limiterResourceMetricContext";
public static final String REQUEST_TIMER_INJECT_NAME = "limiterResourceRequestTimer";
public static final String LEADER_FINDER_INJECT_NAME = "leaderFinder";
public static final String REQUEST_TIMER_NAME = "limiterServer.requestTimer";
public static final String PERMITS_REQUESTED_METER_NAME = "limiterServer.permitsRequested";
public static final String PERMITS_GRANTED_METER_NAME = "limiterServer.permitsGranted";
public static final String LIMITER_TIMER_NAME = "limiterServer.limiterTimer";
public static final String RESOURCE_ID_TAG = "resourceId";
public static final String LOCATION_301 = "Location";
@Inject @Named(BROKER_INJECT_NAME)
SharedResourcesBroker broker;
@Inject @Named(METRIC_CONTEXT_INJECT_NAME)
MetricContext metricContext;
@Inject @Named(REQUEST_TIMER_INJECT_NAME)
Timer requestTimer;
@Inject @Named(LEADER_FINDER_INJECT_NAME)
Optional<LeaderFinder<URIMetadata>> leaderFinderOpt;
@Inject
Sleeper sleeper;
/**
* Request permits from the limiter server. The returned {@link PermitAllocation} specifies the number of permits
* that the client can use.
*/
@Override
@RestMethod.Get
public void get(
ComplexResourceKey<PermitRequest, EmptyRecord> key,
@CallbackParam final Callback<PermitAllocation> callback) {
try (Closeable context = (this.requestTimer == null ? NoopCloseable.INSTANCE : new ClosableTimerContext(this.requestTimer.time()))) {
long startNanos = System.nanoTime();
PermitRequest request = key.getKey();
String resourceId = request.getResource();
MetricContext resourceContext = (MetricContext) broker.getSharedResource(new MetricContextFactory(),
new SubTaggedMetricContextKey(resourceId, ImmutableMap.of(RESOURCE_ID_TAG, resourceId)));
Meter permitsRequestedMeter = resourceContext.meter(PERMITS_REQUESTED_METER_NAME);
Meter permitsGrantedMeter = resourceContext.meter(PERMITS_GRANTED_METER_NAME);
Timer limiterTimer = resourceContext.timer(LIMITER_TIMER_NAME);
permitsRequestedMeter.mark(request.getPermits());
if (this.leaderFinderOpt.isPresent() && !this.leaderFinderOpt.get().isLeader()) {
URI leaderUri = this.leaderFinderOpt.get().getLeaderMetadata().getUri();
RestLiServiceException exception = new RestLiServiceException(HttpStatus.S_301_MOVED_PERMANENTLY,
String.format("New leader <a href=\"%s\">%s</a>", leaderUri, leaderUri));
exception.setErrorDetails(new DataMap(ImmutableMap.of(LOCATION_301, leaderUri.toString())));
throw exception;
} else {
ThrottlingPolicy policy = (ThrottlingPolicy) this.broker.getSharedResource(new ThrottlingPolicyFactory(),
new SharedLimiterKey(request.getResource()));
PermitAllocation allocation;
try (Closeable thisContext = new ClosableTimerContext(limiterTimer.time())) {
allocation = policy.computePermitAllocation(request);
}
if (request.getVersion(GetMode.DEFAULT) < ThrottlingProtocolVersion.WAIT_ON_CLIENT.ordinal()) {
// If the client does not understand "waitForPermitsUse", delay the response at the server side.
// This has a detrimental effect to server performance
long wait = allocation.getWaitForPermitUseMillis(GetMode.DEFAULT);
allocation.setWaitForPermitUseMillis(0);
if (wait > 0) {
try {
this.sleeper.sleep(wait);
} catch (InterruptedException ie) {
allocation.setPermits(0);
}
}
}
permitsGrantedMeter.mark(allocation.getPermits());
log.debug("Request: {}, allocation: {}, elapsedTime: {} ns", request, allocation, System.nanoTime() - startNanos);
callback.onSuccess(allocation);
}
} catch (NotConfiguredException nce) {
throw new RestLiServiceException(HttpStatus.S_422_UNPROCESSABLE_ENTITY, "No configuration for the requested resource.");
} catch (IOException ioe) {
// Failed to close timer context. This should never happen
throw new RuntimeException(ioe);
}
}
/**
* Request permits from the limiter server. The returned {@link PermitAllocation} specifies the number of permits
* that the client can use.
*/
public PermitAllocation getSync(ComplexResourceKey<PermitRequest, EmptyRecord> key) {
try {
FutureCallback<PermitAllocation> callback = new FutureCallback<>();
get(key, callback);
return callback.get();
} catch (ExecutionException ee) {
Throwable t = ee.getCause();
if (t instanceof RestLiServiceException) {
throw (RestLiServiceException) t;
} else {
throw new RuntimeException(t);
}
} catch (InterruptedException ie) {
throw new RuntimeException(ie);
}
}
}
| 1,701 |
0 | Create_ds/gobblin/gobblin-restli/gobblin-throttling-service/gobblin-throttling-service-server/src/main/java/org/apache/gobblin/restli | Create_ds/gobblin/gobblin-restli/gobblin-throttling-service/gobblin-throttling-service-server/src/main/java/org/apache/gobblin/restli/throttling/DynamicTokenBucket.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.restli.throttling;
import java.util.concurrent.TimeUnit;
import com.google.common.annotations.VisibleForTesting;
import lombok.Data;
import lombok.Getter;
import lombok.extern.slf4j.Slf4j;
/**
* A wrapper around a {@link TokenBucket} that returns different number of tokens following an internal heuristic.
*
* The heuristic is as follows:
* * The calling process specifies an ideal and minimum number of token it requires, as well as a timeout.
* * If there is a large number of tokens stored (i.e. underutilization), this class may return more than the requested
* ideal number of tokens (up to 1/2 of the stored tokens). This reduces unnecessary slowdown when there is no
* contention.
* * The object computes a target timeout equal to the minimum time needed to fulfill the minimum requested permits
* (according to the configured qps) plus a {@link #baseTimeout}.
* * The object will return as many permits as it can using that timeout, bounded by minimum and desired number of permits.
*/
@Slf4j
public class DynamicTokenBucket {
/**
* Contains number of allocated permits and delay before they can be used.
*/
@Data
public static class PermitsAndDelay {
private final long permits;
private final long delay;
private final boolean possibleToSatisfy;
}
@VisibleForTesting
@Getter
private final TokenBucket tokenBucket;
private final long baseTimeout;
/**
* @param qps the average qps desired.
* @param fullRequestTimeoutMillis max time to fully satisfy a token request. This is generally a small timeout, on the
* order of the network latency (e.g. ~100 ms).
* @param maxBucketSizeMillis maximum number of unused tokens that can be stored during under-utilization time, in
* milliseconds. The actual tokens stored will be 1000 * qps * maxBucketSizeMillis.
*/
DynamicTokenBucket(long qps, long fullRequestTimeoutMillis, long maxBucketSizeMillis) {
this.tokenBucket = new TokenBucket(qps, maxBucketSizeMillis);
this.baseTimeout = fullRequestTimeoutMillis;
}
/**
* Request tokens.
* @param requestedPermits the ideal number of tokens to acquire.
* @param minPermits the minimum number of tokens useful for the calling process. If this many tokens cannot be acquired,
* the method will return 0 instead,
* @param timeoutMillis the maximum wait the calling process is willing to wait for tokens.
* @return a {@link PermitsAndDelay} for the allocated permits.
*/
public PermitsAndDelay getPermitsAndDelay(long requestedPermits, long minPermits, long timeoutMillis) {
try {
long storedTokens = this.tokenBucket.getStoredTokens();
long eagerTokens = storedTokens / 2;
if (eagerTokens > requestedPermits && this.tokenBucket.getTokens(eagerTokens, 0, TimeUnit.MILLISECONDS)) {
return new PermitsAndDelay(eagerTokens, 0, true);
}
long millisToSatisfyMinPermits = (long) (minPermits / this.tokenBucket.getTokensPerMilli());
if (millisToSatisfyMinPermits > timeoutMillis) {
return new PermitsAndDelay(0, 0, false);
}
long allowedTimeout = Math.min(millisToSatisfyMinPermits + this.baseTimeout, timeoutMillis);
while (requestedPermits > minPermits) {
long wait = this.tokenBucket.tryReserveTokens(requestedPermits, allowedTimeout);
if (wait >= 0) {
return new PermitsAndDelay(requestedPermits, wait, true);
}
requestedPermits /= 2;
}
long wait = this.tokenBucket.tryReserveTokens(minPermits, allowedTimeout);
if (wait >= 0) {
return new PermitsAndDelay(requestedPermits, wait, true);
}
} catch (InterruptedException ie) {
// Fallback to returning 0
}
return new PermitsAndDelay(0, 0, true);
}
/**
* Request tokens. Like {@link #getPermitsAndDelay(long, long, long)} but block until the wait time passes.
*/
public long getPermits(long requestedPermits, long minPermits, long timeoutMillis) {
PermitsAndDelay permitsAndDelay = getPermitsAndDelay(requestedPermits, minPermits, timeoutMillis);
if (permitsAndDelay.delay > 0) {
try {
Thread.sleep(permitsAndDelay.delay);
} catch (InterruptedException ie) {
return 0;
}
}
return permitsAndDelay.permits;
}
}
| 1,702 |
0 | Create_ds/gobblin/gobblin-restli/gobblin-throttling-service/gobblin-throttling-service-server/src/main/java/org/apache/gobblin/restli | Create_ds/gobblin/gobblin-restli/gobblin-throttling-service/gobblin-throttling-service-server/src/main/java/org/apache/gobblin/restli/throttling/URIMetadata.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.restli.throttling;
import java.net.URI;
import lombok.AllArgsConstructor;
import lombok.Getter;
/**
* A {@link org.apache.gobblin.restli.throttling.LeaderFinder.Metadata} that contains the {@link URI} of the process.
*/
@AllArgsConstructor
public class URIMetadata implements LeaderFinder.Metadata {
@Getter
private final URI uri;
@Override
public String getShortName() {
return this.uri.toString().replaceAll("[:/]", "_");
}
}
| 1,703 |
0 | Create_ds/gobblin/gobblin-restli/gobblin-throttling-service/gobblin-throttling-service-server/src/main/java/org/apache/gobblin/restli | Create_ds/gobblin/gobblin-restli/gobblin-throttling-service/gobblin-throttling-service-server/src/main/java/org/apache/gobblin/restli/throttling/NoopPolicy.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.restli.throttling;
import java.util.Map;
import com.google.common.collect.ImmutableMap;
import com.typesafe.config.Config;
import org.apache.gobblin.annotation.Alias;
import org.apache.gobblin.annotation.Alpha;
import org.apache.gobblin.broker.iface.SharedResourcesBroker;
import org.apache.gobblin.util.limiter.broker.SharedLimiterKey;
/**
* A {@link ThrottlingPolicy} that does no throttling and eagerly returns a large amount of permits.
*/
@Alpha
public class NoopPolicy implements ThrottlingPolicy {
public static final String FACTORY_ALIAS = "noop";
@Alias(FACTORY_ALIAS)
public static class Factory implements ThrottlingPolicyFactory.SpecificPolicyFactory {
@Override
public ThrottlingPolicy createPolicy(SharedLimiterKey key,
SharedResourcesBroker<ThrottlingServerScopes> broker, Config config) {
return new NoopPolicy();
}
}
@Override
public PermitAllocation computePermitAllocation(PermitRequest request) {
PermitAllocation allocation = new PermitAllocation();
// For overflow safety, don't return max long
allocation.setPermits(Math.max(Long.MAX_VALUE / 100, request.getPermits()));
allocation.setExpiration(Long.MAX_VALUE);
return allocation;
}
@Override
public Map<String, String> getParameters() {
return ImmutableMap.of();
}
@Override
public String getDescription() {
return "Noop policy. Infinite permits available.";
}
}
| 1,704 |
0 | Create_ds/gobblin/gobblin-restli/gobblin-throttling-service/gobblin-throttling-service-server/src/main/java/org/apache/gobblin/restli | Create_ds/gobblin/gobblin-restli/gobblin-throttling-service/gobblin-throttling-service-server/src/main/java/org/apache/gobblin/restli/throttling/LeaderFinder.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.restli.throttling;
import java.io.Serializable;
import com.google.common.util.concurrent.Service;
/**
* An interface to find the leader in a cluster of processes.
*
* Multiple processes that instantiate equivalent {@link LeaderFinder}s automatically elect a single leader at all times.
* Each process can verify whether it is the leader using {@link #isLeader()}, and it can obtain the metadata of the
* current leader using {@link #getLeaderMetadata()}.
*
* The metadata is application specific, but it might include the {@link java.net.URI} of the leader for example.
*
* @param <T>
*/
public interface LeaderFinder<T extends LeaderFinder.Metadata> extends Service {
/**
* @return true if the current process is the leader.
*/
boolean isLeader();
/**
* @return The metadata of the current leader.
*/
T getLeaderMetadata();
/**
* @return The metadata of the current process.
*/
T getLocalMetadata();
/**
* An interface for process-specific metadata in a cluster using {@link LeaderFinder}. In general, this metadata will
* contain information useful for the non-leaders (for example, the {@link java.net.URI} of the leader).
*/
interface Metadata extends Serializable {
/**
* @return A short, sanitized name for the current process. In general, the name should only include characters
* that are valid in a hostname.
*/
String getShortName();
}
}
| 1,705 |
0 | Create_ds/gobblin/gobblin-restli/gobblin-throttling-service/gobblin-throttling-service-server/src/main/java/org/apache/gobblin/restli | Create_ds/gobblin/gobblin-restli/gobblin-throttling-service/gobblin-throttling-service-server/src/main/java/org/apache/gobblin/restli/throttling/PoliciesResource.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.restli.throttling;
import java.util.Map;
import com.codahale.metrics.Meter;
import com.google.common.collect.ImmutableMap;
import com.linkedin.data.template.StringMap;
import com.linkedin.restli.common.HttpStatus;
import com.linkedin.restli.server.RestLiServiceException;
import com.linkedin.restli.server.annotations.RestLiCollection;
import com.linkedin.restli.server.resources.CollectionResourceTemplate;
import org.apache.gobblin.broker.iface.NotConfiguredException;
import org.apache.gobblin.broker.iface.SharedResourcesBroker;
import org.apache.gobblin.metrics.MetricContext;
import org.apache.gobblin.metrics.broker.MetricContextFactory;
import org.apache.gobblin.metrics.broker.SubTaggedMetricContextKey;
import org.apache.gobblin.util.limiter.broker.SharedLimiterKey;
import javax.inject.Inject;
import javax.inject.Named;
import static org.apache.gobblin.restli.throttling.LimiterServerResource.*;
/**
* A Rest.li endpoint for getting the {@link ThrottlingPolicy} associated with a resource id.
*/
@RestLiCollection(name = "policies", namespace = "org.apache.gobblin.restli.throttling")
public class PoliciesResource extends CollectionResourceTemplate<String, Policy> {
@Inject
@Named(BROKER_INJECT_NAME)
SharedResourcesBroker broker;
@Override
public Policy get(String resourceId) {
try {
ThrottlingPolicy throttlingPolicy =
(ThrottlingPolicy) this.broker.getSharedResource(new ThrottlingPolicyFactory(), new SharedLimiterKey(resourceId));
Policy restliPolicy = new Policy();
restliPolicy.setPolicyName(throttlingPolicy.getClass().getSimpleName());
restliPolicy.setResource(resourceId);
restliPolicy.setParameters(new StringMap(throttlingPolicy.getParameters()));
restliPolicy.setPolicyDetails(throttlingPolicy.getDescription());
MetricContext resourceContext = (MetricContext) broker.getSharedResource(new MetricContextFactory(),
new SubTaggedMetricContextKey(resourceId, ImmutableMap.of(RESOURCE_ID_TAG, resourceId)));
StringMap metrics = new StringMap();
for (Map.Entry<String, Meter> meter : resourceContext.getMeters().entrySet()) {
metrics.put(meter.getKey(), Double.toString(meter.getValue().getOneMinuteRate()));
}
restliPolicy.setMetrics(metrics);
return restliPolicy;
} catch (NotConfiguredException nce) {
throw new RestLiServiceException(HttpStatus.S_404_NOT_FOUND, "Policy not found for resource " + resourceId);
}
}
}
| 1,706 |
0 | Create_ds/gobblin/gobblin-restli/gobblin-throttling-service/gobblin-throttling-service-server/src/main/java/org/apache/gobblin/restli | Create_ds/gobblin/gobblin-restli/gobblin-throttling-service/gobblin-throttling-service-server/src/main/java/org/apache/gobblin/restli/throttling/ThrottlingServerScopes.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.restli.throttling;
import java.util.Collection;
import java.util.List;
import java.util.Set;
import com.google.common.collect.Lists;
import com.google.common.collect.Sets;
import org.apache.gobblin.broker.SimpleScope;
import org.apache.gobblin.broker.iface.ScopeInstance;
import org.apache.gobblin.broker.iface.ScopeType;
import javax.annotation.Nullable;
/**
* Scopes for throttling server.
*/
public enum ThrottlingServerScopes implements ScopeType<ThrottlingServerScopes> {
GLOBAL("global"),
LEADER("leader", GLOBAL),
SLAVE("slave", LEADER);
private static final Set<ThrottlingServerScopes> LOCAL_SCOPES = Sets.newHashSet(LEADER, SLAVE);
private final List<ThrottlingServerScopes> parentScopes;
private final String defaultId;
ThrottlingServerScopes(String defaultId, ThrottlingServerScopes... parentScopes) {
this.defaultId = defaultId;
this.parentScopes = Lists.newArrayList(parentScopes);
}
@Override
public boolean isLocal() {
return LOCAL_SCOPES.contains(this);
}
@Override
public Collection<ThrottlingServerScopes> parentScopes() {
return this.parentScopes;
}
@Nullable
@Override
public ScopeInstance defaultScopeInstance() {
return this.defaultId == null ? null : new SimpleScope<>(this, this.defaultId);
}
@Override
public ThrottlingServerScopes rootScope() {
return GLOBAL;
}
}
| 1,707 |
0 | Create_ds/gobblin/gobblin-restli/gobblin-throttling-service/gobblin-throttling-service-server/src/main/java/org/apache/gobblin/restli | Create_ds/gobblin/gobblin-restli/gobblin-throttling-service/gobblin-throttling-service-server/src/main/java/org/apache/gobblin/restli/throttling/ConfigClientBasedPolicyFactory.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.restli.throttling;
import java.net.URI;
import java.net.URISyntaxException;
import com.typesafe.config.Config;
import org.apache.gobblin.broker.TTLResourceEntry;
import org.apache.gobblin.broker.iface.SharedResourcesBroker;
import org.apache.gobblin.config.client.ConfigClient;
import org.apache.gobblin.config.client.api.ConfigStoreFactoryDoesNotExistsException;
import org.apache.gobblin.config.client.api.VersionStabilityPolicy;
import org.apache.gobblin.config.store.api.ConfigStoreCreationException;
import org.apache.gobblin.util.ConfigUtils;
import org.apache.gobblin.util.limiter.broker.SharedLimiterKey;
/**
* A {@link org.apache.gobblin.restli.throttling.ThrottlingPolicyFactory.SpecificPolicyFactory} that looks up policies using a
* {@link ConfigClient}.
*
* The config store prefix should be specified at key {@link #CONFIG_KEY_URI_PREFIX_KEY}
* (e.g. "simple-hdfs://myCluster.com/myConfigStore"), and keys in the config store should be prepended with
* {@link #THROTTLING_CONFIG_PREFIX}.
*/
public class ConfigClientBasedPolicyFactory implements ThrottlingPolicyFactory.SpecificPolicyFactory {
private static final long CONFIG_CLIENT_TTL_IN_MILLIS = 60000;
private static TTLResourceEntry<ConfigClient> CONFIG_CLIENT;
public static final String CONFIG_KEY_URI_PREFIX_KEY = "configKeyUriPrefix";
public static final String THROTTLING_CONFIG_PREFIX = "globalThrottling";
public static final String POLICY_KEY = THROTTLING_CONFIG_PREFIX + "." + ThrottlingPolicyFactory.POLICY_KEY;
@Override
public ThrottlingPolicy createPolicy(SharedLimiterKey key, SharedResourcesBroker<ThrottlingServerScopes> broker, Config config) {
try {
Config resourceConfig =
getConfigClient().getConfig(new URI(config.getString(CONFIG_KEY_URI_PREFIX_KEY) + key.getResourceLimitedPath()));
ThrottlingPolicyFactory.SpecificPolicyFactory factory =
ThrottlingPolicyFactory.POLICY_CLASS_RESOLVER.resolveClass(resourceConfig.getString(POLICY_KEY)).newInstance();
return factory.createPolicy(key, broker, ConfigUtils.getConfigOrEmpty(resourceConfig, THROTTLING_CONFIG_PREFIX));
} catch (URISyntaxException | ConfigStoreFactoryDoesNotExistsException | ConfigStoreCreationException |
ReflectiveOperationException exc) {
throw new RuntimeException(exc);
}
}
private synchronized static ConfigClient getConfigClient() {
if (CONFIG_CLIENT == null || !CONFIG_CLIENT.isValid()) {
CONFIG_CLIENT = new TTLResourceEntry<>(ConfigClient.createConfigClient(VersionStabilityPolicy.READ_FRESHEST),
CONFIG_CLIENT_TTL_IN_MILLIS, false);
}
return CONFIG_CLIENT.getResource();
}
}
| 1,708 |
0 | Create_ds/gobblin/gobblin-restli/gobblin-throttling-service/gobblin-throttling-service-server/src/main/java/org/apache/gobblin/restli | Create_ds/gobblin/gobblin-restli/gobblin-throttling-service/gobblin-throttling-service-server/src/main/java/org/apache/gobblin/restli/throttling/CountBasedPolicy.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.restli.throttling;
import java.util.Map;
import com.google.common.base.Preconditions;
import com.google.common.collect.ImmutableMap;
import com.linkedin.restli.common.HttpStatus;
import com.linkedin.restli.server.RestLiServiceException;
import com.typesafe.config.Config;
import org.apache.gobblin.annotation.Alias;
import org.apache.gobblin.broker.iface.SharedResourcesBroker;
import org.apache.gobblin.util.limiter.CountBasedLimiter;
import org.apache.gobblin.util.limiter.broker.SharedLimiterKey;
import lombok.Getter;
/**
* A count based {@link ThrottlingPolicy} used for testing. Not recommended as an actual policy.
*/
public class CountBasedPolicy implements ThrottlingPolicy {
public static final String COUNT_KEY = "count";
public static final String FACTORY_ALIAS = "countForTesting";
@Alias(FACTORY_ALIAS)
public static class Factory implements ThrottlingPolicyFactory.SpecificPolicyFactory {
@Override
public ThrottlingPolicy createPolicy(SharedLimiterKey key, SharedResourcesBroker<ThrottlingServerScopes> broker, Config config) {
Preconditions.checkArgument(config.hasPath(COUNT_KEY), "Missing key " + COUNT_KEY);
return new CountBasedPolicy(config.getLong(COUNT_KEY));
}
}
private final CountBasedLimiter limiter;
@Getter
private final long count;
public CountBasedPolicy(long count) {
this.count = count;
this.limiter = new CountBasedLimiter(count);
}
@Override
public PermitAllocation computePermitAllocation(PermitRequest request) {
long permits = request.getPermits();
long allocated = 0;
try {
if (limiter.acquirePermits(permits) != null) {
allocated = permits;
} else {
throw new RestLiServiceException(HttpStatus.S_403_FORBIDDEN, "Not enough permits.");
}
} catch (InterruptedException ie) {
// return no permits
}
PermitAllocation allocation = new PermitAllocation();
allocation.setPermits(allocated);
allocation.setExpiration(Long.MAX_VALUE);
if (allocated <= 0) {
allocation.setMinRetryDelayMillis(60000);
}
return allocation;
}
@Override
public Map<String, String> getParameters() {
return ImmutableMap.of("maxPermits", Long.toString(this.count));
}
@Override
public String getDescription() {
return "Count based policy. Max permits: " + this.count;
}
}
| 1,709 |
0 | Create_ds/gobblin/gobblin-restli/gobblin-throttling-service/gobblin-throttling-service-server/src/main/java/org/apache/gobblin/restli | Create_ds/gobblin/gobblin-restli/gobblin-throttling-service/gobblin-throttling-service-server/src/main/java/org/apache/gobblin/restli/throttling/QPSPolicy.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.restli.throttling;
import java.util.Map;
import com.google.common.annotations.VisibleForTesting;
import com.google.common.base.Preconditions;
import com.google.common.collect.ImmutableMap;
import com.linkedin.data.template.GetMode;
import com.typesafe.config.Config;
import org.apache.gobblin.annotation.Alias;
import org.apache.gobblin.annotation.Alpha;
import org.apache.gobblin.broker.iface.SharedResourcesBroker;
import org.apache.gobblin.util.limiter.broker.SharedLimiterKey;
import lombok.Getter;
import lombok.extern.slf4j.Slf4j;
/**
* A {@link ThrottlingPolicy} based on a QPS (queries per second). It internally uses a {@link DynamicTokenBucket}.
*/
@Alpha
@Slf4j
public class QPSPolicy implements ThrottlingPolicy {
public static final String FACTORY_ALIAS = "qps";
/** Max time we will ask clients to wait for a new allocation. By default set to the Rest.li timeout because
* that is how we determine whether permists can be granted. */
public static final long MAX_WAIT_MILLIS = LimiterServerResource.TIMEOUT_MILLIS;
/**
* The qps the policy should enforce.
*/
public static final String QPS = "qps";
/**
* The time the policy should spend trying to satisfy the full permit request.
*/
public static final String FULL_REQUEST_TIMEOUT_MILLIS = "fullRequestTimeoutMillis";
public static final long DEFAULT_FULL_REQUEST_TIMEOUT = 50;
/**
* Maximum number of tokens (in milliseconds) that can be accumulated when underutilized.
*/
public static final String MAX_BUCKET_SIZE_MILLIS = "maxBucketSizeMillis";
public static final long DEFAULT_MAX_BUCKET_SIZE = 10000;
@Getter
private final long qps;
@VisibleForTesting
@Getter
private final DynamicTokenBucket tokenBucket;
@Alias(FACTORY_ALIAS)
public static class Factory implements ThrottlingPolicyFactory.SpecificPolicyFactory {
@Override
public ThrottlingPolicy createPolicy(SharedLimiterKey key, SharedResourcesBroker<ThrottlingServerScopes> broker, Config config) {
return new QPSPolicy(config);
}
}
public QPSPolicy(Config config) {
Preconditions.checkArgument(config.hasPath(QPS), "QPS required.");
this.qps = config.getLong(QPS);
long fullRequestTimeoutMillis = config.hasPath(FULL_REQUEST_TIMEOUT_MILLIS)
? config.getLong(FULL_REQUEST_TIMEOUT_MILLIS) : DEFAULT_FULL_REQUEST_TIMEOUT;
long maxBucketSizeMillis = config.hasPath(MAX_BUCKET_SIZE_MILLIS)
? config.getLong(MAX_BUCKET_SIZE_MILLIS) : DEFAULT_MAX_BUCKET_SIZE;
this.tokenBucket = new DynamicTokenBucket(qps, fullRequestTimeoutMillis, maxBucketSizeMillis);
}
@Override
public PermitAllocation computePermitAllocation(PermitRequest request) {
long permitsRequested = request.getPermits();
Long minPermits = request.getMinPermits(GetMode.NULL);
if (minPermits == null) {
minPermits = permitsRequested;
}
DynamicTokenBucket.PermitsAndDelay permitsGranted =
this.tokenBucket.getPermitsAndDelay(permitsRequested, minPermits, LimiterServerResource.TIMEOUT_MILLIS);
PermitAllocation allocation = new PermitAllocation();
allocation.setPermits(permitsGranted.getPermits());
allocation.setExpiration(Long.MAX_VALUE);
allocation.setWaitForPermitUseMillis(permitsGranted.getDelay());
if (!permitsGranted.isPossibleToSatisfy()) {
allocation.setUnsatisfiablePermits(request.getMinPermits(GetMode.DEFAULT));
}
if (permitsRequested > 0) {
// This heuristic asks clients to wait before making any requests
// for an amount of time based on the percentage of their requested permits that were granted
double fractionGranted = Math.min(1.0, ((double) permitsGranted.getPermits() / permitsRequested));
double wait = MAX_WAIT_MILLIS * (1 - fractionGranted);
allocation.setMinRetryDelayMillis((long) wait);
}
return allocation;
}
@Override
public Map<String, String> getParameters() {
return ImmutableMap.of("qps", Long.toString(this.qps));
}
@Override
public String getDescription() {
return "QPS based policy. QPS: " + this.qps;
}
}
| 1,710 |
0 | Create_ds/gobblin/gobblin-restli/gobblin-throttling-service/gobblin-throttling-service-server/src/main/java/org/apache/gobblin/restli | Create_ds/gobblin/gobblin-restli/gobblin-throttling-service/gobblin-throttling-service-server/src/main/java/org/apache/gobblin/restli/throttling/ThrottlingGuiceServletConfig.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.restli.throttling;
import com.codahale.metrics.Timer;
import com.google.common.base.Optional;
import com.google.common.base.Preconditions;
import com.google.common.collect.Maps;
import com.google.inject.AbstractModule;
import com.google.inject.Guice;
import com.google.inject.Injector;
import com.google.inject.TypeLiteral;
import com.google.inject.name.Names;
import com.google.inject.servlet.GuiceServletContextListener;
import com.google.inject.servlet.ServletModule;
import com.linkedin.r2.filter.FilterChain;
import com.linkedin.r2.filter.FilterChains;
import com.linkedin.r2.filter.compression.EncodingType;
import com.linkedin.r2.filter.compression.ServerCompressionFilter;
import com.linkedin.r2.filter.logging.SimpleLoggingFilter;
import com.linkedin.r2.filter.message.rest.RestFilter;
import com.linkedin.r2.filter.message.stream.StreamFilter;
import com.linkedin.restli.server.RestLiConfig;
import com.linkedin.restli.server.guice.GuiceRestliServlet;
import com.typesafe.config.Config;
import com.typesafe.config.ConfigFactory;
import org.apache.gobblin.broker.SharedResourcesBrokerFactory;
import org.apache.gobblin.broker.iface.NotConfiguredException;
import org.apache.gobblin.broker.iface.SharedResourcesBroker;
import org.apache.gobblin.metrics.MetricContext;
import org.apache.gobblin.metrics.broker.MetricContextFactory;
import org.apache.gobblin.metrics.broker.MetricContextKey;
import org.apache.gobblin.util.Sleeper;
import java.io.Closeable;
import java.io.IOException;
import java.net.InetAddress;
import java.net.URI;
import java.net.URISyntaxException;
import java.util.ArrayList;
import java.util.Enumeration;
import java.util.List;
import java.util.Map;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.TimeoutException;
import javax.servlet.ServletContext;
import javax.servlet.ServletContextEvent;
import lombok.Getter;
import lombok.extern.slf4j.Slf4j;
/**
* {@link GuiceServletContextListener} for creating an injector in a gobblin-throttling-server servlet.
*/
@Slf4j
@Getter
public class ThrottlingGuiceServletConfig extends GuiceServletContextListener implements Closeable {
public static final String THROTTLING_SERVER_PREFIX = "throttlingServer.";
public static final String LISTENING_PORT = THROTTLING_SERVER_PREFIX + "listeningPort";
public static final String HOSTNAME = THROTTLING_SERVER_PREFIX + "hostname";
public static final String ZK_STRING_KEY = THROTTLING_SERVER_PREFIX + "ha.zkString";
public static final String HA_CLUSTER_NAME = THROTTLING_SERVER_PREFIX + "ha.clusterName";
private Optional<LeaderFinder<URIMetadata>> _leaderFinder;
private Config _config;
private Sleeper _sleeper = null;
private Injector _injector;
@Override
public void contextInitialized(ServletContextEvent servletContextEvent) {
ServletContext context = servletContextEvent.getServletContext();
Enumeration<String> parameters = context.getInitParameterNames();
Map<String, String> configMap = Maps.newHashMap();
while (parameters.hasMoreElements()) {
String key = parameters.nextElement();
configMap.put(key, context.getInitParameter(key));
}
initialize(ConfigFactory.parseMap(configMap));
super.contextInitialized(servletContextEvent);
}
/**
* Use a mock sleeper for testing. Note this should be called before initialization.
*/
public Sleeper.MockSleeper mockSleeper() {
this._sleeper = new Sleeper.MockSleeper();
return (Sleeper.MockSleeper) this._sleeper;
}
public void initialize(Config config) {
try {
this._config = config;
this._leaderFinder = getLeaderFinder(this._config);
if (this._leaderFinder.isPresent()) {
this._leaderFinder.get().startAsync();
this._leaderFinder.get().awaitRunning(100, TimeUnit.SECONDS);
}
this._injector = createInjector(this._config, this._leaderFinder);
} catch (URISyntaxException | IOException | TimeoutException exc) {
log.error(String.format("Error in %s initialization.", ThrottlingGuiceServletConfig.class.getSimpleName()), exc);
throw new RuntimeException(exc);
}
}
@Override
public Injector getInjector() {
return this._injector;
}
private Injector createInjector(final Config config, final Optional<LeaderFinder<URIMetadata>> leaderFinder) {
final SharedResourcesBroker<ThrottlingServerScopes> topLevelBroker =
SharedResourcesBrokerFactory.createDefaultTopLevelBroker(config, ThrottlingServerScopes.GLOBAL.defaultScopeInstance());
return Guice.createInjector(new AbstractModule() {
@Override
protected void configure() {
try {
if (_sleeper == null) {
_sleeper = new Sleeper();
}
RestLiConfig restLiConfig = new RestLiConfig();
restLiConfig.setResourcePackageNames("org.apache.gobblin.restli.throttling");
bind(RestLiConfig.class).toInstance(restLiConfig);
bind(Sleeper.class).toInstance(_sleeper);
bind(SharedResourcesBroker.class).annotatedWith(Names.named(LimiterServerResource.BROKER_INJECT_NAME)).toInstance(topLevelBroker);
MetricContext metricContext =
topLevelBroker.getSharedResource(new MetricContextFactory<ThrottlingServerScopes>(), new MetricContextKey());
Timer timer = metricContext.timer(LimiterServerResource.REQUEST_TIMER_NAME);
bind(MetricContext.class).annotatedWith(Names.named(LimiterServerResource.METRIC_CONTEXT_INJECT_NAME)).toInstance(metricContext);
bind(Timer.class).annotatedWith(Names.named(LimiterServerResource.REQUEST_TIMER_INJECT_NAME)).toInstance(timer);
bind(new TypeLiteral<Optional<LeaderFinder<URIMetadata>>>() {
}).annotatedWith(Names.named(LimiterServerResource.LEADER_FINDER_INJECT_NAME)).toInstance(leaderFinder);
List<RestFilter> restFilters = new ArrayList<>();
restFilters.add(new ServerCompressionFilter(EncodingType.SNAPPY.getHttpName()));
List<StreamFilter> streamFilters = new ArrayList<>();
streamFilters.add(new SimpleLoggingFilter());
FilterChain filterChain = FilterChains.create(restFilters, streamFilters);
bind(FilterChain.class).toInstance(filterChain);
} catch (NotConfiguredException nce) {
throw new RuntimeException(nce);
}
}
}, new ServletModule() {
@Override
protected void configureServlets() {
serve("/*").with(GuiceRestliServlet.class);
}
});
}
private static Optional<LeaderFinder<URIMetadata>> getLeaderFinder(Config config) throws URISyntaxException,
IOException {
if (config.hasPath(ZK_STRING_KEY)) {
Preconditions.checkArgument(config.hasPath(LISTENING_PORT), "Missing required config " + LISTENING_PORT);
Preconditions.checkArgument(config.hasPath(HA_CLUSTER_NAME), "Missing required config " + HA_CLUSTER_NAME);
int port = config.getInt(LISTENING_PORT);
String hostname = config.hasPath(HOSTNAME) ? config.getString(HOSTNAME) : InetAddress.getLocalHost().getCanonicalHostName();
String clusterName = config.getString(HA_CLUSTER_NAME);
String zkString = config.getString(ZK_STRING_KEY);
return Optional.<LeaderFinder<URIMetadata>>of(new ZookeeperLeaderElection<>(zkString, clusterName,
new URIMetadata(new URI("http", null, hostname, port, null, null, null))));
}
return Optional.absent();
}
@Override
public void contextDestroyed(ServletContextEvent servletContextEvent) {
close();
super.contextDestroyed(servletContextEvent);
}
@Override
public void close() {
try {
if (this._leaderFinder.isPresent()) {
this._leaderFinder.get().stopAsync();
this._leaderFinder.get().awaitTerminated(2, TimeUnit.SECONDS);
}
} catch (TimeoutException te) {
// Do nothing
}
}
/**
* Get an instance of {@link LimiterServerResource}.
*/
public LimiterServerResource getLimiterResource() {
return this._injector.getInstance(LimiterServerResource.class);
}
}
| 1,711 |
0 | Create_ds/gobblin/gobblin-restli/gobblin-flow-config-service/gobblin-flow-config-service-server/src/test/java/org/apache/gobblin | Create_ds/gobblin/gobblin-restli/gobblin-flow-config-service/gobblin-flow-config-service-server/src/test/java/org/apache/gobblin/service/FlowConfigResourceLocalHandlerTest.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.service;
import java.net.URI;
import java.net.URISyntaxException;
import java.util.Map;
import org.testng.Assert;
import org.testng.annotations.Test;
import com.google.common.collect.Maps;
import com.linkedin.data.template.StringMap;
import org.apache.gobblin.configuration.ConfigurationKeys;
import org.apache.gobblin.runtime.api.FlowSpec;
public class FlowConfigResourceLocalHandlerTest {
private static final String TEST_GROUP_NAME = "testGroup1";
private static final String TEST_FLOW_NAME = "testFlow1";
private static final String TEST_SCHEDULE = "0 1/0 * ? * *";
private static final String TEST_TEMPLATE_URI = "FS:///templates/test.template";
@Test
public void testCreateFlowSpecForConfig() throws URISyntaxException {
Map<String, String> flowProperties = Maps.newHashMap();
flowProperties.put("param1", "a:b:c*.d");
FlowConfig flowConfig = new FlowConfig().setId(new FlowId().setFlowGroup(TEST_GROUP_NAME).setFlowName(TEST_FLOW_NAME))
.setTemplateUris(TEST_TEMPLATE_URI).setSchedule(new Schedule().setCronSchedule(TEST_SCHEDULE).
setRunImmediately(true))
.setProperties(new StringMap(flowProperties));
FlowSpec flowSpec = FlowConfigResourceLocalHandler.createFlowSpecForConfig(flowConfig);
Assert.assertEquals(flowSpec.getConfig().getString(ConfigurationKeys.FLOW_GROUP_KEY), TEST_GROUP_NAME);
Assert.assertEquals(flowSpec.getConfig().getString(ConfigurationKeys.FLOW_NAME_KEY), TEST_FLOW_NAME);
Assert.assertEquals(flowSpec.getConfig().getString(ConfigurationKeys.JOB_SCHEDULE_KEY), TEST_SCHEDULE);
Assert.assertEquals(flowSpec.getConfig().getBoolean(ConfigurationKeys.FLOW_RUN_IMMEDIATELY), true);
Assert.assertEquals(flowSpec.getConfig().getString("param1"), "a:b:c*.d");
Assert.assertEquals(flowSpec.getTemplateURIs().get().size(), 1);
Assert.assertTrue(flowSpec.getTemplateURIs().get().contains(new URI(TEST_TEMPLATE_URI)));
flowProperties = Maps.newHashMap();
flowProperties.put("param1", "value1");
flowProperties.put("param2", "${param1}-123");
flowProperties.put("param3", "\"a:b:c*.d\"");
flowConfig = new FlowConfig().setId(new FlowId().setFlowGroup(TEST_GROUP_NAME).setFlowName(TEST_FLOW_NAME))
.setTemplateUris(TEST_TEMPLATE_URI).setSchedule(new Schedule().setCronSchedule(TEST_SCHEDULE).
setRunImmediately(true))
.setProperties(new StringMap(flowProperties));
flowSpec = FlowConfigResourceLocalHandler.createFlowSpecForConfig(flowConfig);
Assert.assertEquals(flowSpec.getConfig().getString(ConfigurationKeys.FLOW_GROUP_KEY), TEST_GROUP_NAME);
Assert.assertEquals(flowSpec.getConfig().getString(ConfigurationKeys.FLOW_NAME_KEY), TEST_FLOW_NAME);
Assert.assertEquals(flowSpec.getConfig().getString(ConfigurationKeys.JOB_SCHEDULE_KEY), TEST_SCHEDULE);
Assert.assertEquals(flowSpec.getConfig().getBoolean(ConfigurationKeys.FLOW_RUN_IMMEDIATELY), true);
Assert.assertEquals(flowSpec.getConfig().getString("param1"),"value1");
Assert.assertEquals(flowSpec.getConfig().getString("param2"),"value1-123");
Assert.assertEquals(flowSpec.getConfig().getString("param3"), "a:b:c*.d");
Assert.assertEquals(flowSpec.getTemplateURIs().get().size(), 1);
Assert.assertTrue(flowSpec.getTemplateURIs().get().contains(new URI(TEST_TEMPLATE_URI)));
}
} | 1,712 |
0 | Create_ds/gobblin/gobblin-restli/gobblin-flow-config-service/gobblin-flow-config-service-server/src/test/java/org/apache/gobblin | Create_ds/gobblin/gobblin-restli/gobblin-flow-config-service/gobblin-flow-config-service-server/src/test/java/org/apache/gobblin/service/FlowExecutionResourceLocalHandlerTest.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.service;
import org.testng.Assert;
import org.testng.annotations.Test;
public class FlowExecutionResourceLocalHandlerTest {
@Test
public void testEstimateCopyTimeLeftSanityCheck() throws Exception {
long currentTime = 10000;
long startTime = 0;
int copyPercentage = 50;
long timeLeft = FlowExecutionResourceLocalHandler.estimateCopyTimeLeft(currentTime, startTime, copyPercentage);
Assert.assertEquals(timeLeft, 10);
}
@Test
public void testEstimateCopyTimeLeftSimple() throws Exception {
long currentTime = 50000;
long startTime = 20000;
int copyPercentage = 10;
long timeLeft = FlowExecutionResourceLocalHandler.estimateCopyTimeLeft(currentTime, startTime, copyPercentage);
Assert.assertEquals(timeLeft, 270);
}
@Test
public void testEstimateCopyTimeLeftMedium() throws Exception {
long currentTime = 5000000;
long startTime = 1500000;
int copyPercentage = 25;
long timeLeft = FlowExecutionResourceLocalHandler.estimateCopyTimeLeft(currentTime, startTime, copyPercentage);
Assert.assertEquals(timeLeft, 10500);
}
@Test
public void testEstimateCopyTimeRealData() throws Exception {
long currentTime = 1626717751099L;
long startTime = 1626716510626L;
int copyPercentage = 24;
long timeLeft = FlowExecutionResourceLocalHandler.estimateCopyTimeLeft(currentTime, startTime, copyPercentage);
Assert.assertEquals(timeLeft, 3926L);
}
} | 1,713 |
0 | Create_ds/gobblin/gobblin-restli/gobblin-flow-config-service/gobblin-flow-config-service-server/src/test/java/org/apache/gobblin | Create_ds/gobblin/gobblin-restli/gobblin-flow-config-service/gobblin-flow-config-service-server/src/test/java/org/apache/gobblin/service/LocalGroupOwnershipServiceTest.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.service;
import java.io.File;
import java.util.ArrayList;
import java.util.List;
import org.testng.Assert;
import org.testng.annotations.AfterClass;
import org.testng.annotations.BeforeClass;
import org.testng.annotations.Test;
import com.google.common.io.Files;
import com.typesafe.config.Config;
import org.apache.gobblin.config.ConfigBuilder;
@Test(groups = { "gobblin.service" })
public class LocalGroupOwnershipServiceTest {
private File _testDirectory;
private GroupOwnershipService groupOwnershipService;
private File groupConfigFile;
@BeforeClass
public void setUp() throws Exception {
_testDirectory = Files.createTempDir();
this.groupConfigFile = new File(_testDirectory + "/TestGroups.json");
String groups ="{\"testGroup\": \"testName,testName2\"}";
Files.write(groups.getBytes(), this.groupConfigFile);
Config groupServiceConfig = ConfigBuilder.create()
.addPrimitive(LocalGroupOwnershipService.GROUP_MEMBER_LIST, this.groupConfigFile.getAbsolutePath())
.build();
groupOwnershipService = new LocalGroupOwnershipService(groupServiceConfig);
}
@Test
public void testLocalGroupOwnershipUpdates() throws Exception {
List<ServiceRequester> testRequester = new ArrayList<>();
testRequester.add(new ServiceRequester("testName", "USER_PRINCIPAL", "testFrom"));
Assert.assertFalse(this.groupOwnershipService.isMemberOfGroup(testRequester, "testGroup2"));
String filePath = this.groupConfigFile.getAbsolutePath();
this.groupConfigFile.delete();
this.groupConfigFile = new File(filePath);
String groups ="{\"testGroup2\": \"testName,testName3\"}";
Files.write(groups.getBytes(), this.groupConfigFile);
// this should return true now as the localGroupOwnership service should have updated as the file changed
Assert.assertTrue(this.groupOwnershipService.isMemberOfGroup(testRequester, "testGroup2"));
}
@AfterClass(alwaysRun = true)
public void tearDown() {
_testDirectory.delete();
}
}
| 1,714 |
0 | Create_ds/gobblin/gobblin-restli/gobblin-flow-config-service/gobblin-flow-config-service-server/src/test/java/org/apache/gobblin | Create_ds/gobblin/gobblin-restli/gobblin-flow-config-service/gobblin-flow-config-service-server/src/test/java/org/apache/gobblin/service/ServiceRequesterSerDerTest.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.service;
import java.io.IOException;
import java.util.ArrayList;
import java.util.Collections;
import java.util.List;
import java.util.Properties;
import org.testng.Assert;
import org.testng.annotations.Test;
import com.typesafe.config.Config;
import com.typesafe.config.ConfigFactory;
import org.apache.gobblin.config.ConfigBuilder;
import org.apache.gobblin.util.ConfigUtils;
@Test
public class ServiceRequesterSerDerTest {
public void testSerDerWithEmptyRequester() throws IOException {
List<ServiceRequester> list = new ArrayList<>();
String serialize = RequesterService.serialize(list);
Properties props = new Properties();
props.put(RequesterService.REQUESTER_LIST, serialize);
Config initConfig = ConfigBuilder.create().build();
Config config = initConfig.withFallback(ConfigFactory.parseString(props.toString()).resolve());
Properties props2 = ConfigUtils.configToProperties(config);
String serialize2 = props2.getProperty(RequesterService.REQUESTER_LIST);
// This may not hold true unless we use/write a json comparator
// Assert.assertEquals(serialize2, serialize);
List<ServiceRequester> list2 = RequesterService.deserialize(serialize);
Assert.assertEquals(list2, list);
}
public void testSerDerWithConfig() throws IOException {
ServiceRequester sr1 = new ServiceRequester("kafkaetl", "user", "dv");
ServiceRequester sr2 = new ServiceRequester("gobblin", "group", "dv");
ServiceRequester sr3 = new ServiceRequester("crm-backend", "service", "cert");
List<ServiceRequester> list = new ArrayList<>();
sr1.getProperties().put("customKey", "${123}");
list.add(sr1);
list.add(sr2);
list.add(sr3);
String serialize = RequesterService.serialize(list);
Properties props = new Properties();
props.put(RequesterService.REQUESTER_LIST, serialize);
// config creation must happen this way because in FlowConfigResourceLocalHandler we read the flowconfig like this
Config initConfig = ConfigBuilder.create().build();
Config config = initConfig.withFallback(ConfigFactory.parseString(props.toString()).resolve());
Properties props2 = ConfigUtils.configToProperties(config);
String serialize2 = props2.getProperty(RequesterService.REQUESTER_LIST);
// This may not hold true unless we use/write a json comparator
// Assert.assertEquals(serialize2, serialize);
List<ServiceRequester> list2 = RequesterService.deserialize(serialize);
Assert.assertEquals(list2, list);
}
public void testOldSerde() throws IOException {
// test for backward compatibility
String serialized = "W3sibmFtZSI6ImNocmxpIiwidHlwZSI6IlVTRVJfUFJJTkNJUEFMIiwiZnJvbSI6ImR2X3Rva2VuIiwicHJvcGVydGllcyI6e319XQ%3D%3D";
List<ServiceRequester> list = RequesterService.deserialize(serialized);
List<ServiceRequester> list2 = Collections.singletonList(new ServiceRequester("chrli", "USER_PRINCIPAL", "dv_token"));
Assert.assertEquals(list, list2);
}
}
| 1,715 |
0 | Create_ds/gobblin/gobblin-restli/gobblin-flow-config-service/gobblin-flow-config-service-server/src/main/java/org/apache/gobblin | Create_ds/gobblin/gobblin-restli/gobblin-flow-config-service/gobblin-flow-config-service-server/src/main/java/org/apache/gobblin/service/NoopGroupOwnershipService.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.service;
import java.util.List;
import com.typesafe.config.Config;
import javax.inject.Inject;
import javax.inject.Singleton;
import org.apache.gobblin.annotation.Alias;
@Alias("noop")
@Singleton
public class NoopGroupOwnershipService extends GroupOwnershipService{
@Inject
public NoopGroupOwnershipService(Config config) {
}
public boolean isMemberOfGroup(List<ServiceRequester> serviceRequesters, String group) {
return true;
}
}
| 1,716 |
0 | Create_ds/gobblin/gobblin-restli/gobblin-flow-config-service/gobblin-flow-config-service-server/src/main/java/org/apache/gobblin | Create_ds/gobblin/gobblin-restli/gobblin-flow-config-service/gobblin-flow-config-service-server/src/main/java/org/apache/gobblin/service/FlowConfigsResource.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.service;
import java.io.IOException;
import java.util.List;
import java.util.Map;
import java.util.Properties;
import java.util.Set;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import com.google.common.collect.ImmutableSet;
import com.linkedin.restli.common.ComplexResourceKey;
import com.linkedin.restli.common.EmptyRecord;
import com.linkedin.restli.common.HttpStatus;
import com.linkedin.restli.server.CreateResponse;
import com.linkedin.restli.server.UpdateResponse;
import com.linkedin.restli.server.annotations.RestLiCollection;
import com.linkedin.restli.server.resources.ComplexKeyResourceTemplate;
import javax.inject.Inject;
import javax.inject.Named;
/**
* Resource for handling flow configuration requests
*/
@RestLiCollection(name = "flowconfigs", namespace = "org.apache.gobblin.service", keyName = "id")
public class FlowConfigsResource extends ComplexKeyResourceTemplate<FlowId, EmptyRecord, FlowConfig> {
private static final Logger LOG = LoggerFactory.getLogger(FlowConfigsResource.class);
public static final String INJECT_READY_TO_USE = "readToUse";
private static final Set<String> ALLOWED_METADATA = ImmutableSet.of("delete.state.store");
@Inject
private FlowConfigsResourceHandler flowConfigsResourceHandler;
// For getting who sends the request
@Inject
private RequesterService requesterService;
// For blocking use of this resource until it is ready
@Inject
@Named(INJECT_READY_TO_USE)
private Boolean readyToUse;
public FlowConfigsResource() {
}
/**
* Retrieve the flow configuration with the given key
* @param key flow config id key containing group name and flow name
* @return {@link FlowConfig} with flow configuration
*/
@Override
public FlowConfig get(ComplexResourceKey<FlowId, EmptyRecord> key) {
String flowGroup = key.getKey().getFlowGroup();
String flowName = key.getKey().getFlowName();
FlowId flowId = new FlowId().setFlowGroup(flowGroup).setFlowName(flowName);
return this.flowConfigsResourceHandler.getFlowConfig(flowId);
}
/**
* Create a flow configuration that the service will forward to execution instances for execution
* @param flowConfig flow configuration
* @return {@link CreateResponse}
*/
@Override
public CreateResponse create(FlowConfig flowConfig) {
if (flowConfig.hasOwningGroup()) {
throw new FlowConfigLoggedException(HttpStatus.S_401_UNAUTHORIZED, "Owning group property may "
+ "not be set through flowconfigs API, use flowconfigsV2");
}
List<ServiceRequester> requesterList = this.requesterService.findRequesters(this);
try {
String serialized = RequesterService.serialize(requesterList);
flowConfig.getProperties().put(RequesterService.REQUESTER_LIST, serialized);
LOG.info("Rest requester list is " + serialized);
} catch (IOException e) {
throw new FlowConfigLoggedException(HttpStatus.S_401_UNAUTHORIZED, "cannot get who is the requester", e);
}
return this.flowConfigsResourceHandler.createFlowConfig(flowConfig);
}
/**
* Update the flow configuration with the specified key. Running flows are not affected.
* An error is raised if the flow configuration does not exist.
* @param key composite key containing group name and flow name that identifies the flow to update
* @param flowConfig new flow configuration
* @return {@link UpdateResponse}
*/
@Override
public UpdateResponse update(ComplexResourceKey<FlowId, EmptyRecord> key, FlowConfig flowConfig) {
if (flowConfig.hasOwningGroup()) {
throw new FlowConfigLoggedException(HttpStatus.S_401_UNAUTHORIZED, "Owning group property may "
+ "not be set through flowconfigs API, use flowconfigsV2");
}
if (flowConfig.getProperties().containsKey(RequesterService.REQUESTER_LIST)) {
throw new FlowConfigLoggedException(HttpStatus.S_401_UNAUTHORIZED, RequesterService.REQUESTER_LIST + " property may "
+ "not be set through flowconfigs API, use flowconfigsV2");
}
checkRequester(this.requesterService, get(key), this.requesterService.findRequesters(this));
String flowGroup = key.getKey().getFlowGroup();
String flowName = key.getKey().getFlowName();
FlowId flowId = new FlowId().setFlowGroup(flowGroup).setFlowName(flowName);
return this.flowConfigsResourceHandler.updateFlowConfig(flowId, flowConfig);
}
/**
* Delete a configured flow. Running flows are not affected. The schedule will be removed for scheduled flows.
* @param key composite key containing flow group and flow name that identifies the flow to remove from the flow catalog
* @return {@link UpdateResponse}
*/
@Override
public UpdateResponse delete(ComplexResourceKey<FlowId, EmptyRecord> key) {
checkRequester(this.requesterService, get(key), this.requesterService.findRequesters(this));
String flowGroup = key.getKey().getFlowGroup();
String flowName = key.getKey().getFlowName();
FlowId flowId = new FlowId().setFlowGroup(flowGroup).setFlowName(flowName);
return this.flowConfigsResourceHandler.deleteFlowConfig(flowId, getHeaders());
}
/**
* Check that all {@link ServiceRequester}s in this request are contained within the original service requester list
* when the flow was submitted. If they are not, throw a {@link FlowConfigLoggedException} with {@link HttpStatus#S_401_UNAUTHORIZED}.
* If there is a failure when deserializing the original requester list, throw a {@link FlowConfigLoggedException} with
* {@link HttpStatus#S_400_BAD_REQUEST}.
*
* @param requesterService the {@link RequesterService} used to verify the requester
* @param originalFlowConfig original flow config to find original requester
* @param requesterList list of requesters for this request
*/
public static void checkRequester(
RequesterService requesterService, FlowConfig originalFlowConfig, List<ServiceRequester> requesterList) {
if (requesterService.isRequesterWhitelisted(requesterList)) {
return;
}
try {
String serializedOriginalRequesterList = originalFlowConfig.getProperties().get(RequesterService.REQUESTER_LIST);
if (serializedOriginalRequesterList != null) {
List<ServiceRequester> originalRequesterList = RequesterService.deserialize(serializedOriginalRequesterList);
if (!requesterService.isRequesterAllowed(originalRequesterList, requesterList)) {
throw new FlowConfigLoggedException(HttpStatus.S_401_UNAUTHORIZED, "Requester not allowed to make this request");
}
}
} catch (IOException e) {
throw new FlowConfigLoggedException(HttpStatus.S_400_BAD_REQUEST, "Failed to get original requester list", e);
}
}
private Properties getHeaders() {
Properties headerProperties = new Properties();
for (Map.Entry<String, String> entry : getContext().getRequestHeaders().entrySet()) {
if (ALLOWED_METADATA.contains(entry.getKey())) {
headerProperties.put(entry.getKey(), entry.getValue());
}
}
return headerProperties;
}
}
| 1,717 |
0 | Create_ds/gobblin/gobblin-restli/gobblin-flow-config-service/gobblin-flow-config-service-server/src/main/java/org/apache/gobblin | Create_ds/gobblin/gobblin-restli/gobblin-flow-config-service/gobblin-flow-config-service-server/src/main/java/org/apache/gobblin/service/ServiceRequester.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.service;
import java.util.HashMap;
import java.util.Map;
import lombok.EqualsAndHashCode;
import lombok.Getter;
import lombok.Setter;
/**
* A {@code ServiceRequester} represents who sends a request
* via a rest api. The requester have multiple attributes.
*
* 'name' indicates who is the sender.
* 'type' indicates if the sender is a user or a group or a specific application.
* 'from' indicates where this sender information is extracted.
*
* Please note that 'name' should be unique for the same 'type' of requester(s).
*/
@Getter
@Setter
@EqualsAndHashCode
public class ServiceRequester {
private String name; // requester name
private String type; // requester can be user name, service name, group name, etc.
private String from; // the location or context where this requester info is obtained
private Map<String, String> properties = new HashMap<>(); // additional information for future expansion
/*
* Default constructor is required for deserialization from json
*/
public ServiceRequester() {
}
public ServiceRequester(String name, String type, String from) {
this.name = name;
this.type = type;
this.from = from;
}
public String toString() {
return "[name : " + name + " type : " + type + " from : "+ from + " additional : " + properties + "]";
}
}
| 1,718 |
0 | Create_ds/gobblin/gobblin-restli/gobblin-flow-config-service/gobblin-flow-config-service-server/src/main/java/org/apache/gobblin | Create_ds/gobblin/gobblin-restli/gobblin-flow-config-service/gobblin-flow-config-service-server/src/main/java/org/apache/gobblin/service/FlowExecutionResource.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.service;
import java.util.List;
import com.google.inject.Inject;
import com.linkedin.restli.common.ComplexResourceKey;
import com.linkedin.restli.common.EmptyRecord;
import com.linkedin.restli.server.PagingContext;
import com.linkedin.restli.server.PathKeys;
import com.linkedin.restli.server.ResourceLevel;
import com.linkedin.restli.server.UpdateResponse;
import com.linkedin.restli.server.annotations.Action;
import com.linkedin.restli.server.annotations.Context;
import com.linkedin.restli.server.annotations.Finder;
import com.linkedin.restli.server.annotations.Optional;
import com.linkedin.restli.server.annotations.PathKeysParam;
import com.linkedin.restli.server.annotations.QueryParam;
import com.linkedin.restli.server.annotations.RestLiCollection;
import com.linkedin.restli.server.resources.ComplexKeyResourceTemplate;
/**
* Resource for handling flow execution requests
*/
@RestLiCollection(name = "flowexecutions", namespace = "org.apache.gobblin.service", keyName = "id")
public class FlowExecutionResource extends ComplexKeyResourceTemplate<FlowStatusId, EmptyRecord, FlowExecution> {
@Inject
FlowExecutionResourceHandler flowExecutionResourceHandler;
public FlowExecutionResource() {}
/**
* Retrieve the FlowExecution with the given key
* @param key {@link FlowStatusId} of flow to get
* @return corresponding {@link FlowExecution}
*/
@Override
public FlowExecution get(ComplexResourceKey<FlowStatusId, EmptyRecord> key) {
return this.flowExecutionResourceHandler.get(key);
}
/**
* Retrieve the most recent matching FlowExecution(s) of the identified FlowId
* @param includeIssues include job issues in the response. Otherwise empty array of issues will be returned.
*/
@Finder("latestFlowExecution")
public List<FlowExecution> getLatestFlowExecution(@Context PagingContext context, @QueryParam("flowId") FlowId flowId,
@Optional @QueryParam("count") Integer count, @Optional @QueryParam("tag") String tag, @Optional @QueryParam("executionStatus") String executionStatus,
@Optional("false") @QueryParam("includeIssues") Boolean includeIssues) {
return this.flowExecutionResourceHandler.getLatestFlowExecution(context, flowId, count, tag, executionStatus, includeIssues);
}
/**
* Retrieve the most recent matching FlowExecution(s) for each flow in the identified flowGroup
* @param countPerFlow (maximum) number of FlowExecutions for each flow in flowGroup *
* @param includeIssues include job issues in the response. Otherwise empty array of issues will be returned.
* @return
*/
@Finder("latestFlowGroupExecutions")
public List<FlowExecution> getLatestFlowGroupExecutions(@Context PagingContext context, @QueryParam("flowGroup") String flowGroup,
@Optional @QueryParam("countPerFlow") Integer countPerFlow, @Optional @QueryParam("tag") String tag,
@Optional("false") @QueryParam("includeIssues") Boolean includeIssues) {
return this.flowExecutionResourceHandler.getLatestFlowGroupExecutions(context, flowGroup, countPerFlow, tag, includeIssues);
}
/**
* Resume a failed {@link FlowExecution} from the point before failure.
* @param pathKeys key of {@link FlowExecution} specified in path
*/
@Action(name="resume",resourceLevel= ResourceLevel.ENTITY)
public void resume(@PathKeysParam PathKeys pathKeys) {
this.flowExecutionResourceHandler.resume(pathKeys.get("id"));
}
/**
* Kill the FlowExecution with the given key
* @param key {@link FlowStatusId} of flow to kill
* @return {@link UpdateResponse}
*/
@Override
public UpdateResponse delete(ComplexResourceKey<FlowStatusId, EmptyRecord> key) {
return this.flowExecutionResourceHandler.delete(key);
}
}
| 1,719 |
0 | Create_ds/gobblin/gobblin-restli/gobblin-flow-config-service/gobblin-flow-config-service-server/src/main/java/org/apache/gobblin | Create_ds/gobblin/gobblin-restli/gobblin-flow-config-service/gobblin-flow-config-service-server/src/main/java/org/apache/gobblin/service/FlowConfigsV2Resource.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.service;
import java.io.IOException;
import java.nio.charset.Charset;
import java.util.ArrayList;
import java.util.List;
import java.util.Map;
import java.util.Properties;
import java.util.Set;
import org.apache.commons.io.IOUtils;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import com.google.common.collect.ImmutableSet;
import com.linkedin.data.DataMap;
import com.linkedin.data.transform.DataProcessingException;
import com.linkedin.restli.common.ComplexResourceKey;
import com.linkedin.restli.common.HttpStatus;
import com.linkedin.restli.common.PatchRequest;
import com.linkedin.restli.internal.server.util.DataMapUtils;
import com.linkedin.restli.server.CreateKVResponse;
import com.linkedin.restli.server.CreateResponse;
import com.linkedin.restli.server.PagingContext;
import com.linkedin.restli.server.PathKeys;
import com.linkedin.restli.server.ResourceLevel;
import com.linkedin.restli.server.UpdateResponse;
import com.linkedin.restli.server.annotations.Action;
import com.linkedin.restli.server.annotations.Context;
import com.linkedin.restli.server.annotations.Finder;
import com.linkedin.restli.server.annotations.Optional;
import com.linkedin.restli.server.annotations.PathKeysParam;
import com.linkedin.restli.server.annotations.QueryParam;
import com.linkedin.restli.server.annotations.RestLiCollection;
import com.linkedin.restli.server.annotations.ReturnEntity;
import com.linkedin.restli.server.resources.ComplexKeyResourceTemplate;
import com.linkedin.restli.server.util.PatchApplier;
import javax.inject.Inject;
import javax.inject.Named;
import lombok.extern.slf4j.Slf4j;
import org.apache.gobblin.runtime.api.FlowSpecSearchObject;
/**
* Resource for handling flow configuration requests
*/
@Slf4j
@RestLiCollection(name = "flowconfigsV2", namespace = "org.apache.gobblin.service", keyName = "id")
public class FlowConfigsV2Resource extends ComplexKeyResourceTemplate<FlowId, FlowStatusId, FlowConfig> {
private static final Logger LOG = LoggerFactory.getLogger(FlowConfigsV2Resource.class);
public static final String INJECT_READY_TO_USE = "v2ReadyToUse";
private static final Set<String> ALLOWED_METADATA = ImmutableSet.of("delete.state.store");
@edu.umd.cs.findbugs.annotations.SuppressWarnings("MS_SHOULD_BE_FINAL")
public static FlowConfigsResourceHandler global_flowConfigsResourceHandler = null;
@Inject
private FlowConfigsV2ResourceHandler flowConfigsResourceHandler;
// For getting who sends the request
@Inject
private RequesterService requesterService;
// For blocking use of this resource until it is ready
@Inject
@Named(INJECT_READY_TO_USE)
private Boolean readyToUse;
@Inject
private GroupOwnershipService groupOwnershipService;
public FlowConfigsV2Resource() {
}
/**
* Retrieve the flow configuration with the given key
* @param key flow config id key containing group name and flow name
* @return {@link FlowConfig} with flow configuration
*/
@Override
public FlowConfig get(ComplexResourceKey<FlowId, FlowStatusId> key) {
return this.getFlowConfigResourceHandler().getFlowConfig(key.getKey());
}
/**
* Retrieve all the flow configurations
*/
@Override
public List<FlowConfig> getAll(@Context PagingContext pagingContext) {
// Check to see if the count and start parameters are user defined or default from the framework
if (!pagingContext.hasCount() && !pagingContext.hasStart())
return (List) this.getFlowConfigResourceHandler().getAllFlowConfigs();
else {
return (List) this.getFlowConfigResourceHandler().getAllFlowConfigs(pagingContext.getStart(), pagingContext.getCount());
}
}
/**
* Get all {@link FlowConfig}s that matches the provided parameters. All the parameters are optional.
* If a parameter is null, it is ignored. {@see FlowConfigV2Resource#getFilteredFlows}
*/
@Finder("filterFlows")
public List<FlowConfig> getFilteredFlows(@Context PagingContext context,
@Optional @QueryParam("flowGroup") String flowGroup,
@Optional @QueryParam("flowName") String flowName,
@Optional @QueryParam("templateUri") String templateUri,
@Optional @QueryParam("userToProxy") String userToProxy,
@Optional @QueryParam("sourceIdentifier") String sourceIdentifier,
@Optional @QueryParam("destinationIdentifier") String destinationIdentifier,
@Optional @QueryParam("schedule") String schedule,
@Optional @QueryParam("isRunImmediately") Boolean isRunImmediately,
@Optional @QueryParam("owningGroup") String owningGroup,
@Optional @QueryParam("propertyFilter") String propertyFilter) {
FlowSpecSearchObject flowSpecSearchObject;
// Check to see if the count and start parameters are user defined or default from the framework
// Start is the index of the first specStore configurations to return
// Count is the total number of specStore configurations to return
if (!context.hasCount() && !context.hasStart()){
flowSpecSearchObject = new FlowSpecSearchObject(null, flowGroup, flowName,
templateUri, userToProxy, sourceIdentifier, destinationIdentifier, schedule, null,
isRunImmediately, owningGroup, propertyFilter, -1, -1);
}
else {
flowSpecSearchObject = new FlowSpecSearchObject(null, flowGroup, flowName,
templateUri, userToProxy, sourceIdentifier, destinationIdentifier, schedule, null,
isRunImmediately, owningGroup, propertyFilter, context.getStart(), context.getCount());
}
return (List) this.getFlowConfigResourceHandler().getFlowConfig(flowSpecSearchObject);
}
/**
* Create a flow configuration that the service will forward to execution instances for execution
* @param flowConfig flow configuration
* @return {@link CreateResponse}
*/
@ReturnEntity
@Override
public CreateKVResponse create(FlowConfig flowConfig) {
List<ServiceRequester> requesterList = this.requesterService.findRequesters(this);
try {
String serialized = RequesterService.serialize(requesterList);
flowConfig.getProperties().put(RequesterService.REQUESTER_LIST, serialized);
LOG.info("Rest requester list is " + serialized);
if (flowConfig.hasOwningGroup() && !this.groupOwnershipService.isMemberOfGroup(requesterList, flowConfig.getOwningGroup())) {
throw new FlowConfigLoggedException(HttpStatus.S_401_UNAUTHORIZED, "Requester not part of owning group specified");
}
} catch (IOException e) {
throw new FlowConfigLoggedException(HttpStatus.S_401_UNAUTHORIZED, "cannot get who is the requester", e);
}
return (CreateKVResponse) this.getFlowConfigResourceHandler().createFlowConfig(flowConfig);
}
/**
* Update the flow configuration with the specified key. Running flows are not affected.
* An error is raised if the flow configuration does not exist.
* @param key composite key containing group name and flow name that identifies the flow to update
* @param flowConfig new flow configuration
* @return {@link UpdateResponse}
*/
@Override
public UpdateResponse update(ComplexResourceKey<FlowId, FlowStatusId> key, FlowConfig flowConfig) {
checkUpdateDeleteAllowed(get(key), flowConfig);
String flowGroup = key.getKey().getFlowGroup();
String flowName = key.getKey().getFlowName();
FlowId flowId = new FlowId().setFlowGroup(flowGroup).setFlowName(flowName);
return this.getFlowConfigResourceHandler().updateFlowConfig(flowId, flowConfig);
}
/**
* Partial update the flowConfig specified
* @param key composite key containing group name and flow name that identifies the flow to update
* @param flowConfigPatch patch describing what fields to change
* @return {@link UpdateResponse}
*/
@Override
public UpdateResponse update(ComplexResourceKey<FlowId, FlowStatusId> key, PatchRequest<FlowConfig> flowConfigPatch) {
// Apply patch to an empty FlowConfig just to check which properties are being set
FlowConfig flowConfig = new FlowConfig();
try {
PatchApplier.applyPatch(flowConfig, flowConfigPatch);
} catch (DataProcessingException e) {
throw new FlowConfigLoggedException(HttpStatus.S_400_BAD_REQUEST, "Failed to apply patch", e);
}
checkUpdateDeleteAllowed(get(key), flowConfig);
String flowGroup = key.getKey().getFlowGroup();
String flowName = key.getKey().getFlowName();
FlowId flowId = new FlowId().setFlowGroup(flowGroup).setFlowName(flowName);
return this.getFlowConfigResourceHandler().partialUpdateFlowConfig(flowId, flowConfigPatch);
}
/**
* Delete a configured flow. Running flows are not affected. The schedule will be removed for scheduled flows.
* @param key composite key containing flow group and flow name that identifies the flow to remove from the flow catalog
* @return {@link UpdateResponse}
*/
@Override
public UpdateResponse delete(ComplexResourceKey<FlowId, FlowStatusId> key) {
checkUpdateDeleteAllowed(get(key), null);
String flowGroup = key.getKey().getFlowGroup();
String flowName = key.getKey().getFlowName();
FlowId flowId = new FlowId().setFlowGroup(flowGroup).setFlowName(flowName);
return this.getFlowConfigResourceHandler().deleteFlowConfig(flowId, getHeaders());
}
/**
* Trigger a new execution of an existing flow
* @param pathKeys key of {@link FlowId} specified in path
*/
@Action(name="runImmediately", resourceLevel=ResourceLevel.ENTITY)
public String runImmediately(@PathKeysParam PathKeys pathKeys) {
String patchJson = "{\"schedule\":{\"$set\":{\"runImmediately\":true}}}";
DataMap dataMap = DataMapUtils.readMap(IOUtils.toInputStream(patchJson, Charset.defaultCharset()));
PatchRequest<FlowConfig> flowConfigPatch = PatchRequest.createFromPatchDocument(dataMap);
ComplexResourceKey<FlowId, FlowStatusId> id = pathKeys.get("id");
update(id, flowConfigPatch);
return "Successfully triggered flow " + id.getKey().toString();
}
private FlowConfigsResourceHandler getFlowConfigResourceHandler() {
if (global_flowConfigsResourceHandler != null) {
return global_flowConfigsResourceHandler;
}
return flowConfigsResourceHandler;
}
private Properties getHeaders() {
Properties headerProperties = new Properties();
for (Map.Entry<String, String> entry : getContext().getRequestHeaders().entrySet()) {
if (ALLOWED_METADATA.contains(entry.getKey())) {
headerProperties.put(entry.getKey(), entry.getValue());
}
}
return headerProperties;
}
/**
* Check that this update or delete operation is allowed, throw a {@link FlowConfigLoggedException} if not.
*/
public void checkUpdateDeleteAllowed(FlowConfig originalFlowConfig, FlowConfig updatedFlowConfig) {
List<ServiceRequester> requesterList = this.requesterService.findRequesters(this);
if (updatedFlowConfig != null) {
checkPropertyUpdatesAllowed(requesterList, updatedFlowConfig);
}
checkRequester(originalFlowConfig, requesterList);
}
/**
* Check that the properties being updated are allowed to be updated. This includes:
* 1. Checking that the requester is part of the owningGroup if it is being modified
* 2. Checking if the {@link RequesterService#REQUESTER_LIST} is being modified, and only allow it if a user is changing
* it to themselves.
*/
public void checkPropertyUpdatesAllowed(List<ServiceRequester> requesterList, FlowConfig updatedFlowConfig) {
if (this.requesterService.isRequesterWhitelisted(requesterList)) {
return;
}
// Check that requester is part of owning group if owning group is being updated
if (updatedFlowConfig.hasOwningGroup() && !this.groupOwnershipService.isMemberOfGroup(requesterList, updatedFlowConfig.getOwningGroup())) {
throw new FlowConfigLoggedException(HttpStatus.S_401_UNAUTHORIZED, "Requester not part of owning group specified. Requester " + requesterList
+ " should join group " + updatedFlowConfig.getOwningGroup() + " and retry.");
}
if (updatedFlowConfig.hasProperties() && updatedFlowConfig.getProperties().containsKey(RequesterService.REQUESTER_LIST)) {
List<ServiceRequester> updatedRequesterList;
try {
updatedRequesterList = RequesterService.deserialize(updatedFlowConfig.getProperties().get(RequesterService.REQUESTER_LIST));
} catch (Exception e) {
String exampleRequester = "";
try {
List<ServiceRequester> exampleRequesterList = new ArrayList<>();
exampleRequesterList.add(new ServiceRequester("name", "type", "from"));
exampleRequester = " An example requester is " + RequesterService.serialize(exampleRequesterList);
} catch (IOException ioe) {
log.error("Failed to serialize example requester list", e);
}
throw new FlowConfigLoggedException(HttpStatus.S_400_BAD_REQUEST, RequesterService.REQUESTER_LIST + " property was "
+ "provided but could not be deserialized." + exampleRequester, e);
}
if (!updatedRequesterList.equals(requesterList)) {
throw new FlowConfigLoggedException(HttpStatus.S_401_UNAUTHORIZED, RequesterService.REQUESTER_LIST + " property may "
+ "only be updated to yourself. Requesting user: " + requesterList + ", updated requester: " + updatedRequesterList);
}
}
}
/**
* Check that all {@link ServiceRequester}s in this request are contained within the original service requester list
* or is part of the original requester's owning group when the flow was submitted. If they are not, throw a {@link FlowConfigLoggedException} with {@link HttpStatus#S_401_UNAUTHORIZED}.
* If there is a failure when deserializing the original requester list, throw a {@link FlowConfigLoggedException} with
* {@link HttpStatus#S_400_BAD_REQUEST}.
* @param originalFlowConfig original flow config to find original requester
* @param requesterList list of requesters for this request
*/
public void checkRequester(FlowConfig originalFlowConfig, List<ServiceRequester> requesterList) {
if (this.requesterService.isRequesterWhitelisted(requesterList)) {
return;
}
try {
String serializedOriginalRequesterList = originalFlowConfig.getProperties().get(RequesterService.REQUESTER_LIST);
if (serializedOriginalRequesterList != null) {
List<ServiceRequester> originalRequesterList = RequesterService.deserialize(serializedOriginalRequesterList);
if (!requesterService.isRequesterAllowed(originalRequesterList, requesterList)) {
// if the requester is not whitelisted or the original requester, reject the requester if it is not part of the owning group
// of the original requester
if (!(originalFlowConfig.hasOwningGroup() && this.groupOwnershipService.isMemberOfGroup(
requesterList, originalFlowConfig.getOwningGroup()))) {
throw new FlowConfigLoggedException(HttpStatus.S_401_UNAUTHORIZED, "Requester not allowed to make this request");
}
}
}
} catch (IOException e) {
throw new FlowConfigLoggedException(HttpStatus.S_400_BAD_REQUEST, "Failed to get original requester list", e);
}
}
}
| 1,720 |
0 | Create_ds/gobblin/gobblin-restli/gobblin-flow-config-service/gobblin-flow-config-service-server/src/main/java/org/apache/gobblin | Create_ds/gobblin/gobblin-restli/gobblin-flow-config-service/gobblin-flow-config-service-server/src/main/java/org/apache/gobblin/service/FlowExecutionResourceLocalHandler.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.service;
import java.time.Duration;
import java.time.Instant;
import java.util.Comparator;
import java.util.Iterator;
import java.util.List;
import java.util.stream.Collectors;
import org.apache.commons.lang3.ObjectUtils;
import com.google.common.base.Strings;
import com.linkedin.data.template.SetMode;
import com.linkedin.data.template.StringMap;
import com.linkedin.restli.common.ComplexResourceKey;
import com.linkedin.restli.common.EmptyRecord;
import com.linkedin.restli.common.HttpStatus;
import com.linkedin.restli.server.PagingContext;
import com.linkedin.restli.server.RestLiServiceException;
import com.linkedin.restli.server.UpdateResponse;
import javax.inject.Inject;
import lombok.extern.slf4j.Slf4j;
import org.apache.gobblin.runtime.troubleshooter.Issue;
import org.apache.gobblin.service.monitoring.FlowStatus;
import org.apache.gobblin.service.monitoring.FlowStatusGenerator;
import org.apache.gobblin.service.monitoring.JobStatusRetriever;
@Slf4j
public class FlowExecutionResourceLocalHandler implements FlowExecutionResourceHandler {
private final FlowStatusGenerator flowStatusGenerator;
@Inject
public FlowExecutionResourceLocalHandler(FlowStatusGenerator flowStatusGenerator) {
this.flowStatusGenerator = flowStatusGenerator;
}
@Override
public FlowExecution get(ComplexResourceKey<FlowStatusId, EmptyRecord> key) {
FlowExecution flowExecution = convertFlowStatus(getFlowStatusFromGenerator(key, this.flowStatusGenerator), true);
if (flowExecution == null) {
throw new RestLiServiceException(HttpStatus.S_404_NOT_FOUND, "No flow execution found for flowStatusId " + key.getKey()
+ ". The flowStatusId may be incorrect, or the flow execution may have been cleaned up.");
}
return flowExecution;
}
@Override
public List<FlowExecution> getLatestFlowExecution(PagingContext context, FlowId flowId, Integer count, String tag,
String executionStatus, Boolean includeIssues) {
List<org.apache.gobblin.service.monitoring.FlowStatus> flowStatuses = getLatestFlowStatusesFromGenerator(flowId, count, tag, executionStatus, this.flowStatusGenerator);
if (flowStatuses != null) {
return flowStatuses.stream()
.map((FlowStatus monitoringFlowStatus) -> convertFlowStatus(monitoringFlowStatus, includeIssues))
.collect(Collectors.toList());
}
throw new RestLiServiceException(HttpStatus.S_404_NOT_FOUND, "No flow execution found for flowId " + flowId
+ ". The flowId may be incorrect, the flow execution may have been cleaned up, or not matching tag (" + tag
+ ") and/or execution status (" + executionStatus + ").");
}
@Override
public List<FlowExecution> getLatestFlowGroupExecutions(PagingContext context, String flowGroup, Integer countPerFlow,
String tag, Boolean includeIssues) {
List<org.apache.gobblin.service.monitoring.FlowStatus> flowStatuses =
getLatestFlowGroupStatusesFromGenerator(flowGroup, countPerFlow, tag, this.flowStatusGenerator);
if (flowStatuses != null) {
// todo: flow end time will be incorrect when dag manager is not used
// and FLOW_SUCCEEDED/FLOW_CANCELLED/FlowFailed events are not sent
return flowStatuses.stream()
.map((FlowStatus monitoringFlowStatus) -> convertFlowStatus(monitoringFlowStatus, includeIssues))
.collect(Collectors.toList());
}
throw new RestLiServiceException(HttpStatus.S_404_NOT_FOUND, "No flow executions found for flowGroup " + flowGroup
+ ". The group name may be incorrect, the flow execution may have been cleaned up, or not matching tag (" + tag
+ ").");
}
@Override
public void resume(ComplexResourceKey<FlowStatusId, EmptyRecord> key) {
throw new UnsupportedOperationException("Resume should be handled in GobblinServiceFlowConfigResourceHandler");
}
@Override
public UpdateResponse delete(ComplexResourceKey<FlowStatusId, EmptyRecord> key) {
throw new UnsupportedOperationException("Delete should be handled in GobblinServiceFlowConfigResourceHandler");
}
public static org.apache.gobblin.service.monitoring.FlowStatus getFlowStatusFromGenerator(ComplexResourceKey<FlowStatusId, EmptyRecord> key,
FlowStatusGenerator flowStatusGenerator) {
String flowGroup = key.getKey().getFlowGroup();
String flowName = key.getKey().getFlowName();
long flowExecutionId = key.getKey().getFlowExecutionId();
log.info("Get called with flowGroup " + flowGroup + " flowName " + flowName + " flowExecutionId " + flowExecutionId);
return flowStatusGenerator.getFlowStatus(flowName, flowGroup, flowExecutionId, null);
}
public static List<FlowStatus> getLatestFlowStatusesFromGenerator(FlowId flowId,
Integer count, String tag, String executionStatus, FlowStatusGenerator flowStatusGenerator) {
if (count == null) {
count = 1;
}
log.info("get latest called with flowGroup " + flowId.getFlowGroup() + " flowName " + flowId.getFlowName() + " count " + count);
return flowStatusGenerator.getLatestFlowStatus(flowId.getFlowName(), flowId.getFlowGroup(), count, tag, executionStatus);
}
public static List<FlowStatus> getLatestFlowGroupStatusesFromGenerator(String flowGroup,
Integer countPerFlowName, String tag, FlowStatusGenerator flowStatusGenerator) {
if (countPerFlowName == null) {
countPerFlowName = 1;
}
log.info("get latest (for group) called with flowGroup " + flowGroup + " count " + countPerFlowName);
return flowStatusGenerator.getFlowStatusesAcrossGroup(flowGroup, countPerFlowName, tag);
}
/**
* Forms a {@link FlowExecution} from a {@link org.apache.gobblin.service.monitoring.FlowStatus}
* @param monitoringFlowStatus
* @return a {@link FlowExecution} converted from a {@link org.apache.gobblin.service.monitoring.FlowStatus}
*/
public static FlowExecution convertFlowStatus(org.apache.gobblin.service.monitoring.FlowStatus monitoringFlowStatus,
boolean includeIssues) {
if (monitoringFlowStatus == null) {
return null;
}
Iterator<org.apache.gobblin.service.monitoring.JobStatus> jobStatusIter = monitoringFlowStatus.getJobStatusIterator();
JobStatusArray jobStatusArray = new JobStatusArray();
FlowId flowId = new FlowId().setFlowName(monitoringFlowStatus.getFlowName())
.setFlowGroup(monitoringFlowStatus.getFlowGroup());
long flowEndTime = 0L;
long maxJobEndTime = Long.MIN_VALUE;
String flowMessage = "";
while (jobStatusIter.hasNext()) {
org.apache.gobblin.service.monitoring.JobStatus queriedJobStatus = jobStatusIter.next();
// Check if this is the flow status instead of a single job status
if (JobStatusRetriever.isFlowStatus(queriedJobStatus)) {
flowEndTime = queriedJobStatus.getEndTime();
if (queriedJobStatus.getMessage() != null) {
flowMessage = queriedJobStatus.getMessage();
}
continue;
}
maxJobEndTime = Math.max(maxJobEndTime, queriedJobStatus.getEndTime());
JobStatus jobStatus = new JobStatus();
Long timeLeft = estimateCopyTimeLeft(queriedJobStatus.getLastProgressEventTime(), queriedJobStatus.getStartTime(),
queriedJobStatus.getProgressPercentage());
jobStatus.setFlowId(flowId)
.setJobId(new JobId()
.setJobName(queriedJobStatus.getJobName())
.setJobGroup(queriedJobStatus.getJobGroup()))
.setJobTag(queriedJobStatus.getJobTag(), SetMode.IGNORE_NULL)
.setExecutionStatistics(new JobStatistics()
.setExecutionStartTime(queriedJobStatus.getStartTime())
.setExecutionEndTime(queriedJobStatus.getEndTime())
.setProcessedCount(queriedJobStatus.getProcessedCount())
.setJobProgress(queriedJobStatus.getProgressPercentage())
.setEstimatedSecondsToCompletion(timeLeft))
.setExecutionStatus(ExecutionStatus.valueOf(queriedJobStatus.getEventName()))
.setMessage(queriedJobStatus.getMessage())
.setJobState(new JobState()
.setLowWatermark(queriedJobStatus.getLowWatermark()).
setHighWatermark(queriedJobStatus.getHighWatermark()));
if (includeIssues) {
jobStatus.setIssues(new IssueArray(queriedJobStatus.getIssues().get().stream()
.map(FlowExecutionResourceLocalHandler::convertIssueToRestApiObject)
.collect(Collectors.toList())));
} else {
jobStatus.setIssues(new IssueArray());
}
if (!Strings.isNullOrEmpty(queriedJobStatus.getMetrics())) {
jobStatus.setMetrics(queriedJobStatus.getMetrics());
}
jobStatusArray.add(jobStatus);
}
// If DagManager is not enabled, we have to determine flow end time by individual job's end times.
flowEndTime = flowEndTime == 0L ? maxJobEndTime : flowEndTime;
jobStatusArray.sort(Comparator.comparing((JobStatus js) -> js.getExecutionStatistics().getExecutionStartTime()));
return new FlowExecution()
.setId(new FlowStatusId().setFlowGroup(flowId.getFlowGroup()).setFlowName(flowId.getFlowName())
.setFlowExecutionId(monitoringFlowStatus.getFlowExecutionId()))
.setExecutionStatistics(new FlowStatistics().setExecutionStartTime(getFlowStartTime(monitoringFlowStatus))
.setExecutionEndTime(flowEndTime))
.setMessage(flowMessage)
.setExecutionStatus(monitoringFlowStatus.getFlowExecutionStatus())
.setJobStatuses(jobStatusArray);
}
private static org.apache.gobblin.service.Issue convertIssueToRestApiObject(Issue issues) {
org.apache.gobblin.service.Issue converted = new org.apache.gobblin.service.Issue();
converted.setCode(issues.getCode())
.setSummary(ObjectUtils.firstNonNull(issues.getSummary(), ""))
.setDetails(ObjectUtils.firstNonNull(issues.getDetails(), ""))
.setSeverity(IssueSeverity.valueOf(issues.getSeverity().name()))
.setTime(issues.getTime().toInstant().toEpochMilli());
if (issues.getProperties() != null) {
converted.setProperties(new StringMap(issues.getProperties()));
} else {
converted.setProperties(new StringMap());
}
return converted;
}
/**
* Return the flow start time given a {@link org.apache.gobblin.service.monitoring.FlowStatus}. Flow execution ID is
* assumed to be the flow start time.
*/
private static long getFlowStartTime(org.apache.gobblin.service.monitoring.FlowStatus flowStatus) {
return flowStatus.getFlowExecutionId();
}
/**
* Estimate the time left to complete the copy based on the following formula -
* timeLeft = (100/completionPercentage - 1) * timeElapsed
* @param currentTime as an epoch
* @param startTime as an epoch
* @param completionPercentage of the job
* @return time left in seconds
*/
public static long estimateCopyTimeLeft(Long currentTime, Long startTime, int completionPercentage) {
if (completionPercentage == 0) {
return 0;
}
Instant current = Instant.ofEpochMilli(currentTime);
Instant start = Instant.ofEpochMilli(startTime);
long timeElapsed = Duration.between(start, current).getSeconds();
return (long) (timeElapsed * (100.0 / (double) completionPercentage - 1));
}
}
| 1,721 |
0 | Create_ds/gobblin/gobblin-restli/gobblin-flow-config-service/gobblin-flow-config-service-server/src/main/java/org/apache/gobblin | Create_ds/gobblin/gobblin-restli/gobblin-flow-config-service/gobblin-flow-config-service-server/src/main/java/org/apache/gobblin/service/FlowConfigV2ResourceLocalHandler.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.service;
import java.util.Map;
import org.apache.commons.lang3.StringEscapeUtils;
import com.linkedin.data.template.StringMap;
import com.linkedin.data.transform.DataProcessingException;
import com.linkedin.restli.common.ComplexResourceKey;
import com.linkedin.restli.common.HttpStatus;
import com.linkedin.restli.common.PatchRequest;
import com.linkedin.restli.server.CreateKVResponse;
import com.linkedin.restli.server.RestLiServiceException;
import com.linkedin.restli.server.UpdateResponse;
import com.linkedin.restli.server.util.PatchApplier;
import javax.inject.Inject;
import lombok.extern.slf4j.Slf4j;
import org.apache.gobblin.configuration.ConfigurationKeys;
import org.apache.gobblin.exception.QuotaExceededException;
import org.apache.gobblin.runtime.api.FlowSpec;
import org.apache.gobblin.runtime.spec_catalog.AddSpecResponse;
import org.apache.gobblin.runtime.spec_catalog.FlowCatalog;
@Slf4j
public class FlowConfigV2ResourceLocalHandler extends FlowConfigResourceLocalHandler implements FlowConfigsV2ResourceHandler {
@Inject
public FlowConfigV2ResourceLocalHandler(FlowCatalog flowCatalog) {
super(flowCatalog);
}
@Override
/**
* Add flowConfig locally and trigger all listeners iff @param triggerListener is set to true
*/
public CreateKVResponse createFlowConfig(FlowConfig flowConfig, boolean triggerListener) throws FlowConfigLoggedException {
String createLog = "[GAAS-REST] Create called with flowGroup " + flowConfig.getId().getFlowGroup() + " flowName " + flowConfig.getId().getFlowName();
this.createFlow.mark();
if (flowConfig.hasExplain()) {
createLog += " explain " + flowConfig.isExplain();
}
log.info(createLog);
FlowSpec flowSpec = createFlowSpecForConfig(flowConfig);
FlowStatusId flowStatusId =
new FlowStatusId().setFlowName(flowSpec.getConfigAsProperties().getProperty(ConfigurationKeys.FLOW_NAME_KEY))
.setFlowGroup(flowSpec.getConfigAsProperties().getProperty(ConfigurationKeys.FLOW_GROUP_KEY));
if (flowSpec.getConfigAsProperties().containsKey(ConfigurationKeys.FLOW_EXECUTION_ID_KEY)) {
flowStatusId.setFlowExecutionId(Long.valueOf(flowSpec.getConfigAsProperties().getProperty(ConfigurationKeys.FLOW_EXECUTION_ID_KEY)));
} else {
flowStatusId.setFlowExecutionId(-1L);
}
// Return conflict and take no action if flowSpec has already been created
if (this.flowCatalog.exists(flowSpec.getUri())) {
log.warn("FlowSpec with URI {} already exists, no action will be taken", flowSpec.getUri());
return new CreateKVResponse<>(new RestLiServiceException(HttpStatus.S_409_CONFLICT,
"FlowSpec with URI " + flowSpec.getUri() + " already exists, no action will be taken"));
}
Map<String, AddSpecResponse> responseMap;
try {
responseMap = this.flowCatalog.put(flowSpec, triggerListener);
} catch (QuotaExceededException e) {
throw new RestLiServiceException(HttpStatus.S_503_SERVICE_UNAVAILABLE, e.getMessage());
} catch (Throwable e) {
// TODO: Compilation errors should fall under throwable exceptions as well instead of checking for strings
log.warn(String.format("Failed to add flow configuration %s.%s to catalog due to", flowConfig.getId().getFlowGroup(), flowConfig.getId().getFlowName()), e);
throw new RestLiServiceException(HttpStatus.S_500_INTERNAL_SERVER_ERROR, e.getMessage());
}
HttpStatus httpStatus;
if (flowConfig.hasExplain() && flowConfig.isExplain()) {
//This is an Explain request. So no resource is actually created.
//Enrich original FlowConfig entity by adding the compiledFlow to the properties map.
StringMap props = flowConfig.getProperties();
AddSpecResponse<String> addSpecResponse = responseMap.getOrDefault(ServiceConfigKeys.COMPILATION_RESPONSE, null);
props.put("gobblin.flow.compiled",
addSpecResponse != null && addSpecResponse.getValue() != null ? StringEscapeUtils.escapeJson(addSpecResponse.getValue()) : "");
flowConfig.setProperties(props);
httpStatus = HttpStatus.S_200_OK;
} else if (Boolean.parseBoolean(responseMap.getOrDefault(ServiceConfigKeys.COMPILATION_SUCCESSFUL, new AddSpecResponse<>("false")).getValue().toString())) {
httpStatus = HttpStatus.S_201_CREATED;
} else {
throw new RestLiServiceException(HttpStatus.S_400_BAD_REQUEST, getErrorMessage(flowSpec));
}
return new CreateKVResponse<>(new ComplexResourceKey<>(flowConfig.getId(), flowStatusId), flowConfig, httpStatus);
}
/**
* Note: this method is only implemented for testing, normally partial update would be called in
* GobblinServiceFlowConfigResourceHandler.partialUpdateFlowConfig
*/
@Override
public UpdateResponse partialUpdateFlowConfig(FlowId flowId, PatchRequest<FlowConfig> flowConfigPatch) throws FlowConfigLoggedException {
FlowConfig flowConfig = getFlowConfig(flowId);
try {
PatchApplier.applyPatch(flowConfig, flowConfigPatch);
} catch (DataProcessingException e) {
throw new FlowConfigLoggedException(HttpStatus.S_400_BAD_REQUEST, "Failed to apply partial update", e);
}
return updateFlowConfig(flowId, flowConfig);
}
}
| 1,722 |
0 | Create_ds/gobblin/gobblin-restli/gobblin-flow-config-service/gobblin-flow-config-service-server/src/main/java/org/apache/gobblin | Create_ds/gobblin/gobblin-restli/gobblin-flow-config-service/gobblin-flow-config-service-server/src/main/java/org/apache/gobblin/service/LocalGroupOwnershipService.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.service;
import java.io.IOException;
import java.util.List;
import org.apache.hadoop.fs.Path;
import com.google.common.base.Splitter;
import com.google.gson.JsonObject;
import com.typesafe.config.Config;
import javax.inject.Inject;
import javax.inject.Singleton;
import org.apache.gobblin.annotation.Alias;
import org.apache.gobblin.util.filesystem.PathAlterationObserver;
/**
* Reads and updates from a JSON where keys denote group names
* and values denote a list of group members
*/
@Alias("local")
@Singleton
public class LocalGroupOwnershipService extends GroupOwnershipService {
public static final String GROUP_MEMBER_LIST = "groupOwnershipService.groupMembers.path";
LocalGroupOwnershipPathAlterationListener listener;
PathAlterationObserver observer;
@Inject
public LocalGroupOwnershipService(Config config) {
Path groupOwnershipFilePath = new Path(config.getString(GROUP_MEMBER_LIST));
try {
observer = new PathAlterationObserver(groupOwnershipFilePath.getParent());
this.listener = new LocalGroupOwnershipPathAlterationListener(groupOwnershipFilePath);
observer.addListener(this.listener);
} catch (IOException e) {
throw new RuntimeException("Could not get initialize PathAlterationObserver at %" + groupOwnershipFilePath.toString(), e);
}
}
@Override
public boolean isMemberOfGroup(List<ServiceRequester> serviceRequesters, String group) {
// ensure that the group ownership file is up to date
try {
this.observer.checkAndNotify();
} catch (IOException e) {
throw new RuntimeException("Group Ownership observer could not check for file changes", e);
}
JsonObject groupOwnerships = this.listener.getGroupOwnerships();
if (groupOwnerships.has(group)) {
List<String> groupMembers = Splitter.on(',').trimResults().omitEmptyStrings().splitToList(
groupOwnerships.get(group).getAsString());
for (ServiceRequester requester: serviceRequesters) {
if (groupMembers.contains(requester.getName())) {
return true;
}
}
}
return false;
}
}
| 1,723 |
0 | Create_ds/gobblin/gobblin-restli/gobblin-flow-config-service/gobblin-flow-config-service-server/src/main/java/org/apache/gobblin | Create_ds/gobblin/gobblin-restli/gobblin-flow-config-service/gobblin-flow-config-service-server/src/main/java/org/apache/gobblin/service/RequesterService.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.service;
import java.io.IOException;
import java.net.URLDecoder;
import java.nio.charset.StandardCharsets;
import java.util.Base64;
import java.util.List;
import org.codehaus.jackson.map.ObjectMapper;
import org.codehaus.jackson.type.TypeReference;
import com.google.gson.Gson;
import com.google.gson.JsonSyntaxException;
import com.google.gson.reflect.TypeToken;
import com.linkedin.restli.server.resources.BaseResource;
import com.typesafe.config.Config;
/**
* Use this class to get who sends the request.
*/
public abstract class RequesterService {
protected Config sysConfig;
public RequesterService(Config config) {
sysConfig = config;
}
public static final String REQUESTER_LIST = "gobblin.service.requester.list";
private static final Gson gson = new Gson();
/**
* <p> This implementation converts a given list to a json string.
*/
public static String serialize(List<ServiceRequester> requesterList) throws IOException {
try {
return gson.toJson(requesterList);
} catch (RuntimeException e) {
throw new IOException(e);
}
}
/**
* <p> This implementation decode a given string encoded by
* {@link #serialize(List)}.
*/
public static List<ServiceRequester> deserialize(String encodedString) throws IOException {
try {
return gson.fromJson(encodedString, new TypeToken<List<ServiceRequester>>() {}.getType());
} catch (JsonSyntaxException e) {
// For backward compatibility
String base64Str = URLDecoder.decode(encodedString, StandardCharsets.UTF_8.name());
byte[] decodedBytes = Base64.getDecoder().decode(base64Str);
String jsonList = new String(decodedBytes, StandardCharsets.UTF_8);
TypeReference<List<ServiceRequester>> mapType = new TypeReference<List<ServiceRequester>>() {};
return new ObjectMapper().readValue(jsonList, mapType);
} catch (RuntimeException e) {
throw new IOException(e);
}
}
protected abstract List<ServiceRequester> findRequesters(BaseResource resource);
/**
* Return true if the requester is whitelisted to always be accepted
*/
public boolean isRequesterWhitelisted(List<ServiceRequester> requesterList) {
return false;
}
/**
* returns true if the requester is allowed to make this request.
* This default implementation accepts all requesters.
* @param originalRequesterList original requester list
* @param currentRequesterList current requester list
* @return true if the requester is allowed to make this request, false otherwise
*/
protected boolean isRequesterAllowed(
List<ServiceRequester> originalRequesterList, List<ServiceRequester> currentRequesterList) {
return true;
}
}
| 1,724 |
0 | Create_ds/gobblin/gobblin-restli/gobblin-flow-config-service/gobblin-flow-config-service-server/src/main/java/org/apache/gobblin | Create_ds/gobblin/gobblin-restli/gobblin-flow-config-service/gobblin-flow-config-service-server/src/main/java/org/apache/gobblin/service/LdapGroupOwnershipService.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.service;
import java.util.List;
import java.util.Set;
import org.apache.log4j.Logger;
import com.typesafe.config.Config;
import javax.inject.Inject;
import javax.inject.Singleton;
import javax.naming.NamingException;
import javax.naming.PartialResultException;
import org.apache.gobblin.annotation.Alias;
import org.apache.gobblin.util.LdapUtils;
/**
* Queries external Active Directory service to check if the requester is part of the group
*/
@Alias("ldap")
@Singleton
public class LdapGroupOwnershipService extends GroupOwnershipService {
LdapUtils ldapUtils;
private static final Logger logger = Logger.getLogger(LdapGroupOwnershipService.class);
@Inject
public LdapGroupOwnershipService(Config config) {
this.ldapUtils = new LdapUtils(config);
}
@Override
public boolean isMemberOfGroup(List<ServiceRequester> serviceRequesters, String group) {
try {
Set<String> groupMemberships = ldapUtils.getGroupMembers(group);
if (!groupMemberships.isEmpty()) {
for (ServiceRequester requester: serviceRequesters) {
if (groupMemberships.contains(requester.getName())) {
return true;
}
}
}
return false;
} catch (NamingException e) {
logger.warn(String.format("Caught naming exception when parsing results from LDAP server. Message: %s",
e.getExplanation()));
if (e instanceof PartialResultException) {
logger.warn("Check that the Ldap group exists");
return false;
}
throw new RuntimeException(e);
}
}
}
| 1,725 |
0 | Create_ds/gobblin/gobblin-restli/gobblin-flow-config-service/gobblin-flow-config-service-server/src/main/java/org/apache/gobblin | Create_ds/gobblin/gobblin-restli/gobblin-flow-config-service/gobblin-flow-config-service-server/src/main/java/org/apache/gobblin/service/FlowConfigsV2ResourceHandler.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.service;
public interface FlowConfigsV2ResourceHandler extends FlowConfigsResourceHandler {
}
| 1,726 |
0 | Create_ds/gobblin/gobblin-restli/gobblin-flow-config-service/gobblin-flow-config-service-server/src/main/java/org/apache/gobblin | Create_ds/gobblin/gobblin-restli/gobblin-flow-config-service/gobblin-flow-config-service-server/src/main/java/org/apache/gobblin/service/GroupOwnershipService.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.service;
import java.util.List;
/**
* Service for handling group ownership of flows
*/
public abstract class GroupOwnershipService {
/**
* @return true if any of the serviceRequesters belong in the group
*/
public abstract boolean isMemberOfGroup(List<ServiceRequester> serviceRequesters, String group);
}
| 1,727 |
0 | Create_ds/gobblin/gobblin-restli/gobblin-flow-config-service/gobblin-flow-config-service-server/src/main/java/org/apache/gobblin | Create_ds/gobblin/gobblin-restli/gobblin-flow-config-service/gobblin-flow-config-service-server/src/main/java/org/apache/gobblin/service/FlowConfigLoggedException.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.service;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import com.linkedin.restli.common.HttpStatus;
import com.linkedin.restli.server.RestLiServiceException;
/**
* Exception thrown by {@link FlowConfigsResourceHandler} when it cannot handle Restli gracefully.
*/
public class FlowConfigLoggedException extends RestLiServiceException {
private static final Logger log = LoggerFactory.getLogger(FlowConfigLoggedException.class);
public FlowConfigLoggedException(final HttpStatus status, final String message) {
super(status, message);
log.error(message);
}
public FlowConfigLoggedException(final HttpStatus status, final String message, final Throwable cause) {
super(status, message, cause);
log.error(message, cause);
}
}
| 1,728 |
0 | Create_ds/gobblin/gobblin-restli/gobblin-flow-config-service/gobblin-flow-config-service-server/src/main/java/org/apache/gobblin | Create_ds/gobblin/gobblin-restli/gobblin-flow-config-service/gobblin-flow-config-service-server/src/main/java/org/apache/gobblin/service/FlowConfigResourceLocalHandler.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.service;
import java.net.URI;
import java.net.URISyntaxException;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Collection;
import java.util.Collections;
import java.util.Comparator;
import java.util.HashMap;
import java.util.Map;
import java.util.Properties;
import java.util.stream.Collectors;
import org.apache.commons.lang.StringUtils;
import com.codahale.metrics.MetricRegistry;
import com.fasterxml.jackson.core.JsonProcessingException;
import com.fasterxml.jackson.databind.ObjectMapper;
import com.linkedin.restli.common.ComplexResourceKey;
import com.linkedin.restli.common.EmptyRecord;
import com.linkedin.restli.common.HttpStatus;
import com.linkedin.restli.common.PatchRequest;
import com.linkedin.restli.server.CreateResponse;
import com.linkedin.restli.server.RestLiServiceException;
import com.linkedin.restli.server.UpdateResponse;
import com.typesafe.config.Config;
import com.typesafe.config.ConfigFactory;
import javax.inject.Inject;
import lombok.Getter;
import lombok.extern.slf4j.Slf4j;
import org.apache.gobblin.config.ConfigBuilder;
import org.apache.gobblin.configuration.ConfigurationKeys;
import org.apache.gobblin.exception.QuotaExceededException;
import org.apache.gobblin.instrumented.Instrumented;
import org.apache.gobblin.metrics.ContextAwareMeter;
import org.apache.gobblin.metrics.MetricContext;
import org.apache.gobblin.metrics.ServiceMetricNames;
import org.apache.gobblin.runtime.api.FlowSpec;
import org.apache.gobblin.runtime.api.FlowSpecSearchObject;
import org.apache.gobblin.runtime.api.SpecNotFoundException;
import org.apache.gobblin.runtime.spec_catalog.AddSpecResponse;
import org.apache.gobblin.runtime.spec_catalog.FlowCatalog;
import org.apache.gobblin.util.ConfigUtils;
/**
* A {@link FlowConfigsResourceHandler} that handles Restli locally.
*/
@Slf4j
public class FlowConfigResourceLocalHandler implements FlowConfigsResourceHandler {
public static final Schedule NEVER_RUN_CRON_SCHEDULE = new Schedule().setCronSchedule("0 0 0 ? 1 1 2050");
@Getter
protected FlowCatalog flowCatalog;
protected final ContextAwareMeter createFlow;
protected final ContextAwareMeter deleteFlow;
protected final ContextAwareMeter runImmediatelyFlow;
@Inject
public FlowConfigResourceLocalHandler(FlowCatalog flowCatalog) {
this.flowCatalog = flowCatalog;
MetricContext metricContext = Instrumented.getMetricContext(ConfigUtils.configToState(ConfigFactory.empty()), getClass());
this.createFlow = metricContext.contextAwareMeter(
MetricRegistry.name(ServiceMetricNames.GOBBLIN_SERVICE_PREFIX, ServiceMetricNames.CREATE_FLOW_METER));
this.deleteFlow = metricContext.contextAwareMeter(
MetricRegistry.name(ServiceMetricNames.GOBBLIN_SERVICE_PREFIX, ServiceMetricNames.DELETE_FLOW_METER));
this.runImmediatelyFlow = metricContext.contextAwareMeter(
MetricRegistry.name(ServiceMetricNames.GOBBLIN_SERVICE_PREFIX, ServiceMetricNames.RUN_IMMEDIATELY_FLOW_METER));
}
/**
* Get flow config given a {@link FlowId}
*/
public FlowConfig getFlowConfig(FlowId flowId) throws FlowConfigLoggedException {
log.info("[GAAS-REST] Get called with flowGroup {} flowName {}", flowId.getFlowGroup(), flowId.getFlowName());
try {
URI flowUri = FlowSpec.Utils.createFlowSpecUri(flowId);
FlowSpec spec = (FlowSpec) flowCatalog.getSpecs(flowUri);
return FlowSpec.Utils.toFlowConfig(spec);
} catch (URISyntaxException e) {
throw new FlowConfigLoggedException(HttpStatus.S_400_BAD_REQUEST, "bad URI " + flowId.getFlowName(), e);
} catch (SpecNotFoundException e) {
throw new FlowConfigLoggedException(HttpStatus.S_404_NOT_FOUND, "Flow requested does not exist: " + flowId.getFlowName(), null);
}
}
/**
* Get flow config given a {@link FlowSpecSearchObject}
* @return all the {@link FlowConfig}s that satisfy the {@link FlowSpecSearchObject}
*/
public Collection<FlowConfig> getFlowConfig(FlowSpecSearchObject flowSpecSearchObject) throws FlowConfigLoggedException {
log.info("[GAAS-REST] Get called with flowSpecSearchObject {}", flowSpecSearchObject);
return flowCatalog.getSpecs(flowSpecSearchObject).stream().map(FlowSpec.Utils::toFlowConfig).collect(Collectors.toList());
}
/**
* Get all flow configs
*/
public Collection<FlowConfig> getAllFlowConfigs() {
log.info("[GAAS-REST] GetAll called");
return flowCatalog.getAllSpecs().stream().map(FlowSpec.Utils::toFlowConfig).collect(Collectors.toList());
}
/**
* Get all flow configs in between start and start + count - 1
*/
public Collection<FlowConfig> getAllFlowConfigs(int start, int count) {
return flowCatalog.getSpecsPaginated(start, count).stream().map(FlowSpec.Utils::toFlowConfig).collect(Collectors.toList());
}
/**
* Add flowConfig locally and trigger all listeners iff @param triggerListener is set to true
*/
public CreateResponse createFlowConfig(FlowConfig flowConfig, boolean triggerListener) throws FlowConfigLoggedException {
log.info("[GAAS-REST] Create called with flowGroup " + flowConfig.getId().getFlowGroup() + " flowName " + flowConfig.getId().getFlowName());
this.createFlow.mark();
if (!flowConfig.hasSchedule() || StringUtils.isEmpty(flowConfig.getSchedule().getCronSchedule())) {
this.runImmediatelyFlow.mark();
}
if (flowConfig.hasExplain()) {
//Return Error if FlowConfig has explain set. Explain request is only valid for v2 FlowConfig.
return new CreateResponse(new RestLiServiceException(HttpStatus.S_400_BAD_REQUEST, "FlowConfig with explain not supported."));
}
FlowSpec flowSpec = createFlowSpecForConfig(flowConfig);
// Existence of a flow spec in the flow catalog implies that the flow is currently running.
// If the new flow spec has a schedule we should allow submission of the new flow to accept the new schedule.
// However, if the new flow spec does not have a schedule, we should allow submission only if it is not running.
if (!flowConfig.hasSchedule() && this.flowCatalog.exists(flowSpec.getUri())) {
return new CreateResponse(new ComplexResourceKey<>(flowConfig.getId(), new EmptyRecord()), HttpStatus.S_409_CONFLICT);
} else {
try {
this.flowCatalog.put(flowSpec, triggerListener);
} catch (QuotaExceededException e) {
throw new RestLiServiceException(HttpStatus.S_503_SERVICE_UNAVAILABLE, e.getMessage());
} catch (Throwable e) {
// TODO: Compilation errors should fall under throwable exceptions as well instead of checking for strings
log.warn(String.format("Failed to add flow configuration %s.%s to catalog due to", flowConfig.getId().getFlowGroup(), flowConfig.getId().getFlowName()), e);
throw new RestLiServiceException(HttpStatus.S_500_INTERNAL_SERVER_ERROR, e.getMessage());
}
return new CreateResponse(new ComplexResourceKey<>(flowConfig.getId(), new EmptyRecord()), HttpStatus.S_201_CREATED);
}
}
/**
* Add flowConfig locally and trigger all listeners
*/
public CreateResponse createFlowConfig(FlowConfig flowConfig) throws FlowConfigLoggedException {
return this.createFlowConfig(flowConfig, true);
}
public UpdateResponse updateFlowConfig(FlowId flowId, FlowConfig flowConfig, boolean triggerListener) {
// Set the max version to be the largest value so that we blindly update the flow spec in this case
return updateFlowConfig(flowId, flowConfig, triggerListener, Long.MAX_VALUE);
}
/**
* Update flowConfig locally and trigger all listeners iff @param triggerListener is set to true
*/
public UpdateResponse updateFlowConfig(FlowId flowId, FlowConfig flowConfig, boolean triggerListener, long modifiedWatermark) {
log.info("[GAAS-REST] Update called with flowGroup {} flowName {}", flowId.getFlowGroup(), flowId.getFlowName());
if (!flowId.getFlowGroup().equals(flowConfig.getId().getFlowGroup()) || !flowId.getFlowName().equals(flowConfig.getId().getFlowName())) {
throw new FlowConfigLoggedException(HttpStatus.S_400_BAD_REQUEST,
"flowName and flowGroup cannot be changed in update", null);
}
FlowConfig originalFlowConfig = getFlowConfig(flowId);
if (!flowConfig.getProperties().containsKey(RequesterService.REQUESTER_LIST)) {
// Carry forward the requester list property if it is not being updated since it was added at time of creation
flowConfig.getProperties().put(RequesterService.REQUESTER_LIST, originalFlowConfig.getProperties().get(RequesterService.REQUESTER_LIST));
}
if (isUnscheduleRequest(flowConfig)) {
// flow config is not changed if it is just a request to un-schedule
originalFlowConfig.setSchedule(NEVER_RUN_CRON_SCHEDULE);
flowConfig = originalFlowConfig;
}
FlowSpec flowSpec = createFlowSpecForConfig(flowConfig);
Map<String, AddSpecResponse> responseMap;
try {
responseMap = this.flowCatalog.update(flowSpec, triggerListener, modifiedWatermark);
} catch (QuotaExceededException e) {
throw new RestLiServiceException(HttpStatus.S_503_SERVICE_UNAVAILABLE, e.getMessage());
} catch (Throwable e) {
// TODO: Compilation errors should fall under throwable exceptions as well instead of checking for strings
log.warn(String.format("Failed to add flow configuration %s.%s to catalog due to", flowId.getFlowGroup(), flowId.getFlowName()), e);
throw new RestLiServiceException(HttpStatus.S_500_INTERNAL_SERVER_ERROR, e.getMessage());
}
if (Boolean.parseBoolean(responseMap.getOrDefault(ServiceConfigKeys.COMPILATION_SUCCESSFUL, new AddSpecResponse<>("false")).getValue().toString())) {
return new UpdateResponse(HttpStatus.S_200_OK);
} else {
throw new RestLiServiceException(HttpStatus.S_400_BAD_REQUEST, getErrorMessage(flowSpec));
}
}
private boolean isUnscheduleRequest(FlowConfig flowConfig) {
return Boolean.parseBoolean(flowConfig.getProperties().getOrDefault(ConfigurationKeys.FLOW_UNSCHEDULE_KEY, "false"));
}
/**
* Update flowConfig locally and trigger all listeners
*/
public UpdateResponse updateFlowConfig(FlowId flowId, FlowConfig flowConfig) throws FlowConfigLoggedException {
return updateFlowConfig(flowId, flowConfig, true);
}
@Override
public UpdateResponse partialUpdateFlowConfig(FlowId flowId, PatchRequest<FlowConfig> flowConfigPatch) throws FlowConfigLoggedException {
throw new UnsupportedOperationException("Partial update only supported by GobblinServiceFlowConfigResourceHandler");
}
/**
* Delete flowConfig locally and trigger all listeners iff @param triggerListener is set to true
*/
public UpdateResponse deleteFlowConfig(FlowId flowId, Properties header, boolean triggerListener) throws FlowConfigLoggedException {
log.info("[GAAS-REST] Delete called with flowGroup {} flowName {}", flowId.getFlowGroup(), flowId.getFlowName());
this.deleteFlow.mark();
URI flowUri = null;
try {
flowUri = FlowSpec.Utils.createFlowSpecUri(flowId);
this.flowCatalog.remove(flowUri, header, triggerListener);
return new UpdateResponse(HttpStatus.S_200_OK);
} catch (URISyntaxException e) {
throw new FlowConfigLoggedException(HttpStatus.S_400_BAD_REQUEST, "bad URI " + flowUri, e);
}
}
/**
* Delete flowConfig locally and trigger all listeners
*/
public UpdateResponse deleteFlowConfig(FlowId flowId, Properties header) throws FlowConfigLoggedException {
return deleteFlowConfig(flowId, header, true);
}
/**
* Build a {@link FlowSpec} from a {@link FlowConfig}
* @param flowConfig flow configuration
* @return {@link FlowSpec} created with attributes from flowConfig
*/
public static FlowSpec createFlowSpecForConfig(FlowConfig flowConfig) {
ConfigBuilder configBuilder = ConfigBuilder.create()
.addPrimitive(ConfigurationKeys.FLOW_GROUP_KEY, flowConfig.getId().getFlowGroup())
.addPrimitive(ConfigurationKeys.FLOW_NAME_KEY, flowConfig.getId().getFlowName());
if (flowConfig.hasSchedule()) {
Schedule schedule = flowConfig.getSchedule();
configBuilder.addPrimitive(ConfigurationKeys.JOB_SCHEDULE_KEY, schedule.getCronSchedule());
configBuilder.addPrimitive(ConfigurationKeys.FLOW_RUN_IMMEDIATELY, schedule.isRunImmediately());
} else {
// If the job does not have schedule, it is a run-once job.
// In this case, we add flow execution id to the flow spec now to be able to send this id back to the user for
// flow status tracking purpose.
// If it is not a run-once job, we should not add flow execution id here,
// because execution id is generated for every scheduled execution of the flow and cannot be materialized to
// the flow catalog. In this case, this id is added during flow compilation.
String flowExecutionId;
if (flowConfig.getProperties().containsKey(ConfigurationKeys.FLOW_EXECUTION_ID_KEY)) {
flowExecutionId = flowConfig.getProperties().get(ConfigurationKeys.FLOW_EXECUTION_ID_KEY);
// FLOW_EXECUTION_ID may already be present in FlowSpec in cases
// where the FlowSpec is forwarded by a slave to the master.
log.info("Using the existing flowExecutionId {} for {},{}", flowExecutionId, flowConfig.getId().getFlowGroup(), flowConfig.getId().getFlowName());
} else {
flowExecutionId = String.valueOf(System.currentTimeMillis());
log.info("Created a flowExecutionId {} for {},{}", flowExecutionId, flowConfig.getId().getFlowGroup(), flowConfig.getId().getFlowName());
}
flowConfig.getProperties().put(ConfigurationKeys.FLOW_EXECUTION_ID_KEY, flowExecutionId);
configBuilder.addPrimitive(ConfigurationKeys.FLOW_EXECUTION_ID_KEY, flowExecutionId);
}
if (flowConfig.hasExplain()) {
configBuilder.addPrimitive(ConfigurationKeys.FLOW_EXPLAIN_KEY, flowConfig.isExplain());
}
if (flowConfig.hasOwningGroup()) {
configBuilder.addPrimitive(ConfigurationKeys.FLOW_OWNING_GROUP_KEY, flowConfig.getOwningGroup());
}
Config config = configBuilder.build();
Config configWithFallback;
// We first attempt to process the REST.li request as a HOCON string. If the request is not a valid HOCON string
// (e.g. when certain special characters such as ":" or "*" are not properly escaped), we catch the Typesafe ConfigException and
// fallback to assuming that values are literal strings.
try {
// We first convert the StringMap object to a String object and then use ConfigFactory#parseString() to parse the
// HOCON string.
configWithFallback = config.withFallback(ConfigFactory.parseString(flowConfig.getProperties().toString()).resolve());
} catch (Exception e) {
configWithFallback = config.withFallback(ConfigFactory.parseMap(flowConfig.getProperties()));
}
try {
URI templateURI = new URI(flowConfig.getTemplateUris());
return FlowSpec.builder().withConfig(configWithFallback).withTemplate(templateURI).build();
} catch (URISyntaxException e) {
throw new FlowConfigLoggedException(HttpStatus.S_400_BAD_REQUEST, "bad URI " + flowConfig.getTemplateUris(), e);
}
}
protected String getErrorMessage(FlowSpec flowSpec) {
StringBuilder message = new StringBuilder("Flow was not compiled successfully.");
Map<String, ArrayList<String>> allErrors = new HashMap<>();
if (!flowSpec.getCompilationErrors().isEmpty()) {
message.append(" Compilation errors encountered (Sorted by relevance): ");
FlowSpec.CompilationError[] errors = flowSpec.getCompilationErrors().stream().distinct().toArray(FlowSpec.CompilationError[]::new);
Arrays.sort(errors, Comparator.comparingInt(c -> ((FlowSpec.CompilationError)c).errorPriority));
int errorIdSingleHop = 1;
int errorIdMultiHop = 1;
ArrayList<String> singleHopErrors = new ArrayList<>();
ArrayList<String> multiHopErrors = new ArrayList<>();
for (FlowSpec.CompilationError error: errors) {
if (error.errorPriority == 0) {
singleHopErrors.add(String.format("ERROR %s of single-step data movement: ", errorIdSingleHop) + error.errorMessage.replace("\n", " ").replace("\t", ""));
errorIdSingleHop++;
} else {
multiHopErrors.add(String.format("ERROR %s of multi-step data movement: ", errorIdMultiHop) + error.errorMessage.replace("\n", " ").replace("\t", ""));
errorIdMultiHop++;
}
}
allErrors.put("singleHopErrors", singleHopErrors);
allErrors.put("multiHopErrors", multiHopErrors);
}
allErrors.put("message", new ArrayList<>(Collections.singletonList(message.toString())));
ObjectMapper mapper = new ObjectMapper();
try {
return mapper.writeValueAsString(allErrors);
} catch (JsonProcessingException e) {
log.error(String.format("FlowSpec %s errored on Json processing", flowSpec.toString()), e);
}
return "Could not form JSON in " + getClass().getSimpleName();
}
}
| 1,729 |
0 | Create_ds/gobblin/gobblin-restli/gobblin-flow-config-service/gobblin-flow-config-service-server/src/main/java/org/apache/gobblin | Create_ds/gobblin/gobblin-restli/gobblin-flow-config-service/gobblin-flow-config-service-server/src/main/java/org/apache/gobblin/service/NoopRequesterService.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.service;
import java.util.List;
import com.google.common.collect.Lists;
import com.linkedin.restli.server.resources.BaseResource;
import com.typesafe.config.Config;
import javax.inject.Inject;
/**
* Default requester service which does not track any requester information.
*/
public class NoopRequesterService extends RequesterService {
@Inject
public NoopRequesterService(Config config) {
super(config);
}
@Override
public List<ServiceRequester> findRequesters(BaseResource resource) {
return Lists.newArrayList();
}
} | 1,730 |
0 | Create_ds/gobblin/gobblin-restli/gobblin-flow-config-service/gobblin-flow-config-service-server/src/main/java/org/apache/gobblin | Create_ds/gobblin/gobblin-restli/gobblin-flow-config-service/gobblin-flow-config-service-server/src/main/java/org/apache/gobblin/service/FlowStatusResource.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.service;
import java.util.List;
import java.util.stream.Collectors;
import com.linkedin.restli.common.ComplexResourceKey;
import com.linkedin.restli.common.EmptyRecord;
import com.linkedin.restli.server.PagingContext;
import com.linkedin.restli.server.annotations.Context;
import com.linkedin.restli.server.annotations.Finder;
import com.linkedin.restli.server.annotations.Optional;
import com.linkedin.restli.server.annotations.QueryParam;
import com.linkedin.restli.server.annotations.RestLiCollection;
import com.linkedin.restli.server.resources.ComplexKeyResourceTemplate;
import javax.inject.Inject;
import org.apache.gobblin.service.monitoring.FlowStatusGenerator;
/**
* Resource for handling flow status requests
*/
@RestLiCollection(name = "flowstatuses", namespace = "org.apache.gobblin.service", keyName = "id")
public class FlowStatusResource extends ComplexKeyResourceTemplate<FlowStatusId, EmptyRecord, FlowStatus> {
public static final String MESSAGE_SEPARATOR = ", ";
@Inject
FlowStatusGenerator _flowStatusGenerator;
public FlowStatusResource() {}
/**
* Retrieve the FlowStatus with the given key
* @param key flow status id key containing group name and flow name
* @return {@link FlowStatus} with flow status for the latest execution of the flow
*/
@Override
public FlowStatus get(ComplexResourceKey<FlowStatusId, EmptyRecord> key) {
// this returns null to raise a 404 error if flowStatus is null
return convertFlowStatus(FlowExecutionResourceLocalHandler.getFlowStatusFromGenerator(key, this._flowStatusGenerator));
}
@Finder("latestFlowStatus")
public List<FlowStatus> getLatestFlowStatus(@Context PagingContext context,
@QueryParam("flowId") FlowId flowId, @Optional @QueryParam("count") Integer count, @Optional @QueryParam("tag") String tag) {
List<org.apache.gobblin.service.monitoring.FlowStatus> flowStatuses = FlowExecutionResourceLocalHandler
.getLatestFlowStatusesFromGenerator(flowId, count, tag, null, this._flowStatusGenerator);
if (flowStatuses != null) {
return flowStatuses.stream().map(this::convertFlowStatus).collect(Collectors.toList());
}
// will return 404 status code
return null;
}
/**
* Forms a {@link org.apache.gobblin.service.FlowStatus} from a {@link org.apache.gobblin.service.monitoring.FlowStatus}
* Logic is used from {@link FlowExecutionResource} since this class is deprecated
* @param monitoringFlowStatus
* @return a {@link org.apache.gobblin.service.FlowStatus} converted from a {@link org.apache.gobblin.service.monitoring.FlowStatus}
*/
private FlowStatus convertFlowStatus(org.apache.gobblin.service.monitoring.FlowStatus monitoringFlowStatus) {
if (monitoringFlowStatus == null) {
return null;
}
FlowExecution flowExecution = FlowExecutionResourceLocalHandler.convertFlowStatus(monitoringFlowStatus, false);
return new FlowStatus()
.setId(flowExecution.getId())
.setExecutionStatistics(flowExecution.getExecutionStatistics())
.setMessage(flowExecution.getMessage())
.setExecutionStatus(flowExecution.getExecutionStatus())
.setJobStatuses(flowExecution.getJobStatuses());
}
}
| 1,731 |
0 | Create_ds/gobblin/gobblin-restli/gobblin-flow-config-service/gobblin-flow-config-service-server/src/main/java/org/apache/gobblin | Create_ds/gobblin/gobblin-restli/gobblin-flow-config-service/gobblin-flow-config-service-server/src/main/java/org/apache/gobblin/service/FlowConfigsResourceHandler.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.service;
import java.util.Collection;
import java.util.Properties;
import com.linkedin.restli.common.PatchRequest;
import com.linkedin.restli.server.CreateResponse;
import com.linkedin.restli.server.UpdateResponse;
import org.apache.gobblin.runtime.api.FlowSpecSearchObject;
public interface FlowConfigsResourceHandler {
/**
* Get {@link FlowConfig}
*/
FlowConfig getFlowConfig(FlowId flowId) throws FlowConfigLoggedException;
/**
* Get {@link FlowConfig}
* @return
*/
Collection<FlowConfig> getFlowConfig(FlowSpecSearchObject flowSpecSearchObject) throws FlowConfigLoggedException;
/**
* Get all {@link FlowConfig}
*/
Collection<FlowConfig> getAllFlowConfigs();
/**
* Get all {@link FlowConfig} with pagination
*/
Collection<FlowConfig> getAllFlowConfigs(int start, int count);
/**
* Add {@link FlowConfig}
*/
CreateResponse createFlowConfig(FlowConfig flowConfig) throws FlowConfigLoggedException;
/**
* Update {@link FlowConfig}
*/
UpdateResponse updateFlowConfig(FlowId flowId, FlowConfig flowConfig) throws FlowConfigLoggedException;
/**
* Partial update a {@link FlowConfig}
*/
UpdateResponse partialUpdateFlowConfig(FlowId flowId, PatchRequest<FlowConfig> flowConfig) throws FlowConfigLoggedException;
/**
* Delete {@link FlowConfig}
*/
UpdateResponse deleteFlowConfig(FlowId flowId, Properties header) throws FlowConfigLoggedException;
}
| 1,732 |
0 | Create_ds/gobblin/gobblin-restli/gobblin-flow-config-service/gobblin-flow-config-service-server/src/main/java/org/apache/gobblin | Create_ds/gobblin/gobblin-restli/gobblin-flow-config-service/gobblin-flow-config-service-server/src/main/java/org/apache/gobblin/service/FlowExecutionResourceHandler.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.service;
import java.util.List;
import com.linkedin.restli.common.ComplexResourceKey;
import com.linkedin.restli.common.EmptyRecord;
import com.linkedin.restli.server.PagingContext;
import com.linkedin.restli.server.UpdateResponse;
public interface FlowExecutionResourceHandler {
/**
* Get {@link FlowExecution}
*/
public FlowExecution get(ComplexResourceKey<FlowStatusId, EmptyRecord> key);
/**
* Get latest {@link FlowExecution}
*/
public List<FlowExecution> getLatestFlowExecution(PagingContext context, FlowId flowId, Integer count, String tag,
String executionStatus, Boolean includeIssues);
/**
* Get latest {@link FlowExecution} for every flow in `flowGroup`
*
* NOTE: `executionStatus` param not provided yet, without justifying use case, due to complexity of interaction with `countPerFlow`
* and resulting efficiency concern of performing across many flows sharing the single named group.
*/
public List<FlowExecution> getLatestFlowGroupExecutions(PagingContext context, String flowGroup, Integer countPerFLow,
String tag, Boolean includeIssues);
/**
* Resume a failed {@link FlowExecution} from the point before failure
*/
public void resume(ComplexResourceKey<FlowStatusId, EmptyRecord> key);
/**
* Kill a running {@link FlowExecution}
*/
public UpdateResponse delete(ComplexResourceKey<FlowStatusId, EmptyRecord> key);
}
| 1,733 |
0 | Create_ds/gobblin/gobblin-restli/gobblin-flow-config-service/gobblin-flow-config-service-server/src/main/java/org/apache/gobblin | Create_ds/gobblin/gobblin-restli/gobblin-flow-config-service/gobblin-flow-config-service-server/src/main/java/org/apache/gobblin/service/LocalGroupOwnershipPathAlterationListener.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.service;
import com.google.gson.JsonObject;
import com.google.gson.JsonParser;
import java.io.IOException;
import java.nio.charset.Charset;
import org.apache.commons.io.IOUtils;
import org.apache.gobblin.util.filesystem.PathAlterationListenerAdaptor;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FSDataInputStream;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
public class LocalGroupOwnershipPathAlterationListener extends PathAlterationListenerAdaptor {
private static final Logger LOG = LoggerFactory.getLogger(LocalGroupOwnershipPathAlterationListener.class);
private JsonObject groupOwnerships;
FileSystem fs;
Path groupOwnershipFilePath;
LocalGroupOwnershipPathAlterationListener(Path filePath) {
this.groupOwnershipFilePath = filePath;
try {
this.fs = FileSystem.get(new Configuration());
updateGroupOwnerships(filePath);
} catch (IOException e) {
throw new RuntimeException("Could not get local filesystem", e);
}
}
public JsonObject getGroupOwnerships() {
return groupOwnerships;
}
void updateGroupOwnerships(Path path) {
// only update if the group ownership file is changed
if (path.toUri().getPath().equals(this.groupOwnershipFilePath.toString())) {
LOG.info("Detected change in group ownership file, updating groups");
try (FSDataInputStream in = this.fs.open(path)) {
String jsonString = IOUtils.toString(in, Charset.defaultCharset());
JsonParser parser = new JsonParser();
this.groupOwnerships = parser.parse(jsonString).getAsJsonObject();
} catch (IOException e) {
throw new RuntimeException("Could not open group ownership file at " + path.toString(), e);
}
}
}
@Override
public void onFileCreate(Path path) {
updateGroupOwnerships(path);
}
@Override
public void onFileChange(Path path) {
updateGroupOwnerships(path);
}
@Override
public void onFileDelete(Path path) {
// ignore if another file in same directory is deleted
if (path.toUri().getPath().equals(this.groupOwnershipFilePath.toString())) {
this.groupOwnerships = new JsonObject();
}
}
}
| 1,734 |
0 | Create_ds/gobblin/gobblin-restli/gobblin-flow-config-service/gobblin-flow-config-service-server/src/main/java/org/apache/gobblin/service | Create_ds/gobblin/gobblin-restli/gobblin-flow-config-service/gobblin-flow-config-service-server/src/main/java/org/apache/gobblin/service/validator/TemplateUriValidator.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.service.validator;
import java.net.URI;
import java.net.URISyntaxException;
import com.google.common.base.Splitter;
import com.linkedin.data.DataMap;
import com.linkedin.data.element.DataElement;
import com.linkedin.data.message.Message;
import com.linkedin.data.schema.validator.AbstractValidator;
import com.linkedin.data.schema.validator.ValidatorContext;
/**
* Validates the String value to ensure that it is a comma separated list of FS scheme URIs
*/
public class TemplateUriValidator extends AbstractValidator
{
private static final String FS_SCHEME = "FS";
public TemplateUriValidator(DataMap config)
{
super(config);
}
@Override
public void validate(ValidatorContext ctx)
{
DataElement element = ctx.dataElement();
Object value = element.getValue();
String str = String.valueOf(value);
boolean valid = true;
try {
Iterable<String> uriStrings = Splitter.on(",").omitEmptyStrings().trimResults().split(str);
for (String uriString : uriStrings) {
URI uri = new URI(uriString);
if (!uri.getScheme().equalsIgnoreCase(FS_SCHEME)) {
throw new URISyntaxException(uriString, "Scheme is not FS");
}
}
}
catch (URISyntaxException e) {
valid = false;
}
if (!valid)
{
ctx.addResult(new Message(element.path(), "\"%1$s\" is not a well-formed comma-separated list of URIs", str));
}
}
} | 1,735 |
0 | Create_ds/gobblin/gobblin-restli/gobblin-flow-config-service/gobblin-flow-config-service-server/src/main/java/org/apache/gobblin/service | Create_ds/gobblin/gobblin-restli/gobblin-flow-config-service/gobblin-flow-config-service-server/src/main/java/org/apache/gobblin/service/validator/CronValidator.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.service.validator;
import org.quartz.CronExpression;
import com.linkedin.data.DataMap;
import com.linkedin.data.element.DataElement;
import com.linkedin.data.message.Message;
import com.linkedin.data.schema.validator.AbstractValidator;
import com.linkedin.data.schema.validator.ValidatorContext;
/**
* Validates the String value to ensure it is a valid Cron expression
*/
public class CronValidator extends AbstractValidator
{
public CronValidator(DataMap config)
{
super(config);
}
@Override
public void validate(ValidatorContext ctx)
{
DataElement element = ctx.dataElement();
Object value = element.getValue();
String str = String.valueOf(value);
if (!CronExpression.isValidExpression(str))
{
ctx.addResult(new Message(element.path(), "\"%1$s\" is not in Cron format", str));
}
}
} | 1,736 |
0 | Create_ds/gobblin/gobblin-restli/gobblin-flow-config-service/gobblin-flow-config-service-client/src/test/java/org/apache/gobblin | Create_ds/gobblin/gobblin-restli/gobblin-flow-config-service/gobblin-flow-config-service-client/src/test/java/org/apache/gobblin/service/FlowConfigV2Test.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.service;
import java.io.File;
import java.util.List;
import java.util.Map;
import java.util.Set;
import org.apache.commons.io.FileUtils;
import org.apache.commons.io.IOUtils;
import org.mortbay.jetty.HttpStatus;
import org.mockito.ArgumentMatchers;
import org.testng.Assert;
import org.testng.annotations.AfterClass;
import org.testng.annotations.BeforeClass;
import org.testng.annotations.Test;
import com.google.common.collect.Lists;
import com.google.common.collect.Maps;
import com.google.common.collect.Sets;
import com.google.common.io.Files;
import com.google.inject.Binder;
import com.google.inject.Guice;
import com.google.inject.Injector;
import com.google.inject.Module;
import com.google.inject.name.Names;
import com.linkedin.data.DataMap;
import com.linkedin.data.template.StringMap;
import com.linkedin.r2.transport.http.client.HttpClientFactory;
import com.linkedin.restli.client.RestLiResponseException;
import com.linkedin.restli.common.PatchRequest;
import com.linkedin.restli.internal.server.util.DataMapUtils;
import com.linkedin.restli.server.resources.BaseResource;
import com.typesafe.config.Config;
import com.typesafe.config.ConfigFactory;
import lombok.Setter;
import org.apache.gobblin.config.ConfigBuilder;
import org.apache.gobblin.configuration.ConfigurationKeys;
import org.apache.gobblin.restli.EmbeddedRestliServer;
import org.apache.gobblin.runtime.api.FlowSpec;
import org.apache.gobblin.runtime.api.SpecCatalogListener;
import org.apache.gobblin.runtime.spec_catalog.AddSpecResponse;
import org.apache.gobblin.runtime.spec_catalog.FlowCatalog;
import org.apache.gobblin.runtime.spec_store.FSSpecStore;
import static org.mockito.Mockito.any;
import static org.mockito.Mockito.mock;
import static org.mockito.Mockito.when;
@Test(groups = { "gobblin.service" }, singleThreaded = true)
public class FlowConfigV2Test {
private FlowConfigV2Client _client;
private EmbeddedRestliServer _server;
private File _testDirectory;
private TestRequesterService _requesterService;
private GroupOwnershipService groupOwnershipService;
private File groupConfigFile;
private Set<String> _compilationFailureFlowPaths = Sets.newHashSet();
private static final String TEST_SPEC_STORE_DIR = "/tmp/flowConfigV2Test/";
private static final String TEST_GROUP_NAME = "testGroup1";
private static final String TEST_FLOW_NAME = "testFlow1";
private static final String TEST_FLOW_NAME_2 = "testFlow2";
private static final String TEST_FLOW_NAME_3 = "testFlow3";
private static final String TEST_FLOW_NAME_4 = "testFlow4";
private static final String TEST_FLOW_NAME_5 = "testFlow5";
private static final String TEST_FLOW_NAME_6 = "testFlow6";
private static final String TEST_FLOW_NAME_7 = "testFlow7";
private static final String TEST_FLOW_NAME_8 = "testFlow8";
private static final String TEST_FLOW_NAME_9 = "testFlow9";
private static final String TEST_FLOW_NAME_10 = "testFlow10";
private static final String TEST_FLOW_NAME_11 = "testFlow11";
private static final String TEST_SCHEDULE = "0 1/0 * ? * *";
private static final String TEST_TEMPLATE_URI = "FS:///templates/test.template";
private static final ServiceRequester TEST_REQUESTER = new ServiceRequester("testName", "USER_PRINCIPAL", "testFrom");
private static final ServiceRequester TEST_REQUESTER2 = new ServiceRequester("testName2", "USER_PRINCIPAL", "testFrom");
@BeforeClass
public void setUp() throws Exception {
ConfigBuilder configBuilder = ConfigBuilder.create();
_testDirectory = Files.createTempDir();
configBuilder
.addPrimitive(ConfigurationKeys.JOB_CONFIG_FILE_DIR_KEY, _testDirectory.getAbsolutePath())
.addPrimitive(FSSpecStore.SPECSTORE_FS_DIR_KEY, TEST_SPEC_STORE_DIR);
cleanUpDir(TEST_SPEC_STORE_DIR);
Config config = configBuilder.build();
final FlowCatalog flowCatalog = new FlowCatalog(config);
final SpecCatalogListener mockListener = mock(SpecCatalogListener.class);
when(mockListener.getName()).thenReturn(ServiceConfigKeys.GOBBLIN_SERVICE_JOB_SCHEDULER_LISTENER_CLASS);
// NOTE: more general `ArgumentMatchers` (indicating compilation unsuccessful) must precede the specific
when(mockListener.onAddSpec(any())).thenReturn(new AddSpecResponse(null));
when(mockListener.onAddSpec(ArgumentMatchers.argThat((FlowSpec flowSpec) -> {
return !_compilationFailureFlowPaths.contains(flowSpec.getUri().getPath());
}))).thenReturn(new AddSpecResponse(""));
flowCatalog.addListener(mockListener);
flowCatalog.startAsync();
flowCatalog.awaitRunning();
_requesterService = new TestRequesterService(ConfigFactory.empty());
this.groupConfigFile = new File(_testDirectory + "/TestGroups.json");
String groups ="{\"testGroup\": \"testName,testName2\"}";
Files.write(groups.getBytes(), this.groupConfigFile);
Config groupServiceConfig = ConfigBuilder.create()
.addPrimitive(LocalGroupOwnershipService.GROUP_MEMBER_LIST, this.groupConfigFile.getAbsolutePath())
.build();
groupOwnershipService = new LocalGroupOwnershipService(groupServiceConfig);
Injector injector = Guice.createInjector(new Module() {
@Override
public void configure(Binder binder) {
binder.bind(FlowConfigsV2ResourceHandler.class).toInstance(new FlowConfigV2ResourceLocalHandler(flowCatalog));
// indicate that we are in unit testing since the resource is being blocked until flow catalog changes have
// been made
binder.bindConstant().annotatedWith(Names.named(FlowConfigsV2Resource.INJECT_READY_TO_USE)).to(Boolean.TRUE);
binder.bind(RequesterService.class).toInstance(_requesterService);
binder.bind(GroupOwnershipService.class).toInstance(groupOwnershipService);
}
});
_server = EmbeddedRestliServer.builder().resources(
Lists.<Class<? extends BaseResource>>newArrayList(FlowConfigsV2Resource.class)).injector(injector).build();
_server.startAsync();
_server.awaitRunning();
Map<String, String> transportClientProperties = Maps.newHashMap();
transportClientProperties.put(HttpClientFactory.HTTP_REQUEST_TIMEOUT, "10000");
_client =
new FlowConfigV2Client(String.format("http://localhost:%s/", _server.getPort()), transportClientProperties);
}
protected void cleanUpDir(String dir) throws Exception {
File specStoreDir = new File(dir);
if (specStoreDir.exists()) {
FileUtils.deleteDirectory(specStoreDir);
}
}
@Test
public void testCheckFlowExecutionId() throws Exception {
Map<String, String> flowProperties = Maps.newHashMap();
flowProperties.put("param1", "value1");
FlowConfig flowConfig = new FlowConfig().setId(new FlowId().setFlowGroup(TEST_GROUP_NAME).setFlowName(TEST_FLOW_NAME))
.setTemplateUris(TEST_TEMPLATE_URI).setProperties(new StringMap(flowProperties));
FlowStatusId flowStatusId =_client.createFlowConfig(flowConfig);
Assert.assertEquals(TEST_GROUP_NAME, flowStatusId.getFlowGroup());
Assert.assertEquals(TEST_FLOW_NAME, flowStatusId.getFlowName());
Assert.assertTrue(flowStatusId.getFlowExecutionId() != -1);
flowConfig = new FlowConfig().setId(new FlowId().setFlowGroup(TEST_GROUP_NAME).setFlowName(TEST_FLOW_NAME_2))
.setTemplateUris(TEST_TEMPLATE_URI).setProperties(new StringMap(flowProperties))
.setSchedule(new Schedule().setCronSchedule(TEST_SCHEDULE).setRunImmediately(true));
Assert.assertEquals(_client.createFlowConfig(flowConfig).getFlowExecutionId().longValue(), -1L);
}
@Test
public void testCreateRejectedWhenFailsCompilation() throws Exception {
FlowId flowId = new FlowId().setFlowGroup(TEST_GROUP_NAME).setFlowName(TEST_FLOW_NAME_10);
_requesterService.setRequester(TEST_REQUESTER);
Map<String, String> flowProperties = Maps.newHashMap();
flowProperties.put("param1", "value1");
flowProperties.put("param2", "value2");
flowProperties.put("param3", "value3");
FlowConfig flowConfig = new FlowConfig().setId(new FlowId().setFlowGroup(TEST_GROUP_NAME).setFlowName(TEST_FLOW_NAME_10))
.setTemplateUris(TEST_TEMPLATE_URI).setSchedule(new Schedule().setCronSchedule(TEST_SCHEDULE).setRunImmediately(false))
.setProperties(new StringMap(flowProperties));
// inform mock that this flow should fail compilation
_compilationFailureFlowPaths.add(String.format("/%s/%s", TEST_GROUP_NAME, TEST_FLOW_NAME_10));
try {
_client.createFlowConfig(flowConfig);
Assert.fail("create seemingly accepted (despite anticipated flow compilation failure)");
} catch (RestLiResponseException e) {
Assert.assertEquals(e.getStatus(), HttpStatus.ORDINAL_400_Bad_Request);
Assert.assertTrue(e.getMessage().contains("Flow was not compiled successfully."));
}
}
@Test
public void testPartialUpdate() throws Exception {
FlowId flowId = new FlowId().setFlowGroup(TEST_GROUP_NAME).setFlowName(TEST_FLOW_NAME_3);
_requesterService.setRequester(TEST_REQUESTER);
Map<String, String> flowProperties = Maps.newHashMap();
flowProperties.put("param1", "value1");
flowProperties.put("param2", "value2");
flowProperties.put("param3", "value3");
FlowConfig flowConfig = new FlowConfig().setId(new FlowId().setFlowGroup(TEST_GROUP_NAME).setFlowName(TEST_FLOW_NAME_3))
.setTemplateUris(TEST_TEMPLATE_URI).setSchedule(new Schedule().setCronSchedule(TEST_SCHEDULE).setRunImmediately(false))
.setProperties(new StringMap(flowProperties));
// Set some initial config
_client.createFlowConfig(flowConfig);
// Change param2 to value4, delete param3
String patchJson = "{\"schedule\":{\"$set\":{\"runImmediately\":true}},"
+ "\"properties\":{\"$set\":{\"param2\":\"value4\"},\"$delete\":[\"param3\"]}}";
DataMap dataMap = DataMapUtils.readMap(IOUtils.toInputStream(patchJson));
PatchRequest<FlowConfig> flowConfigPatch = PatchRequest.createFromPatchDocument(dataMap);
_client.partialUpdateFlowConfig(flowId, flowConfigPatch);
FlowConfig retrievedFlowConfig = _client.getFlowConfig(flowId);
Assert.assertTrue(retrievedFlowConfig.getSchedule().isRunImmediately());
Assert.assertEquals(retrievedFlowConfig.getProperties().get("param1"), "value1");
Assert.assertEquals(retrievedFlowConfig.getProperties().get("param2"), "value4");
Assert.assertFalse(retrievedFlowConfig.getProperties().containsKey("param3"));
}
@Test (expectedExceptions = RestLiResponseException.class)
public void testPartialUpdateNotPossibleWithoutCreateFirst() throws Exception {
FlowId flowId = new FlowId().setFlowGroup(TEST_GROUP_NAME).setFlowName(TEST_FLOW_NAME);
String patchJson = "{\"schedule\":{\"$set\":{\"runImmediately\":true}},"
+ "\"properties\":{\"$set\":{\"param2\":\"value4\"},\"$delete\":[\"param3\"]}}";
DataMap dataMap = DataMapUtils.readMap(IOUtils.toInputStream(patchJson));
PatchRequest<FlowConfig> flowConfigPatch = PatchRequest.createFromPatchDocument(dataMap);
// Throws exception since flow was not created first, prior to partial update
_client.partialUpdateFlowConfig(flowId, flowConfigPatch);
}
@Test
public void testPartialUpdateRejectedWhenFailsCompilation() throws Exception {
FlowId flowId = new FlowId().setFlowGroup(TEST_GROUP_NAME).setFlowName(TEST_FLOW_NAME_11);
_requesterService.setRequester(TEST_REQUESTER);
Map<String, String> flowProperties = Maps.newHashMap();
flowProperties.put("param1", "value1");
flowProperties.put("param2", "value2");
flowProperties.put("param3", "value3");
FlowConfig flowConfig = new FlowConfig().setId(new FlowId().setFlowGroup(TEST_GROUP_NAME).setFlowName(TEST_FLOW_NAME_11))
.setTemplateUris(TEST_TEMPLATE_URI).setSchedule(new Schedule().setCronSchedule(TEST_SCHEDULE).setRunImmediately(false))
.setProperties(new StringMap(flowProperties));
// Set some initial config
_client.createFlowConfig(flowConfig);
// Change param2 to value4, delete param3, add param5=value5
String patchJson = "{\"schedule\":{\"$set\":{\"runImmediately\":true}},"
+ "\"properties\":{\"$set\":{\"param2\":\"value4\",\"param5\":\"value5\"},\"$delete\":[\"param3\"]}}";
DataMap dataMap = DataMapUtils.readMap(IOUtils.toInputStream(patchJson));
PatchRequest<FlowConfig> flowConfigPatch = PatchRequest.createFromPatchDocument(dataMap);
// inform mock that this flow should hereafter fail compilation
_compilationFailureFlowPaths.add(String.format("/%s/%s", TEST_GROUP_NAME, TEST_FLOW_NAME_11));
try {
_client.partialUpdateFlowConfig(flowId, flowConfigPatch);
Assert.fail("update seemingly accepted (despite anticipated flow compilation failure)");
} catch (RestLiResponseException e) {
Assert.assertEquals(e.getStatus(), HttpStatus.ORDINAL_400_Bad_Request);
Assert.assertTrue(e.getMessage().contains("Flow was not compiled successfully."));
}
// verify that prior state of flow config still retained: that updates had no effect
FlowConfig retrievedFlowConfig = _client.getFlowConfig(flowId);
Assert.assertTrue(!retrievedFlowConfig.getSchedule().isRunImmediately());
Assert.assertEquals(retrievedFlowConfig.getProperties().get("param1"), "value1");
Assert.assertEquals(retrievedFlowConfig.getProperties().get("param2"), "value2");
Assert.assertEquals(retrievedFlowConfig.getProperties().get("param3"), "value3");
Assert.assertFalse(retrievedFlowConfig.getProperties().containsKey("param5"));
}
@Test
public void testDisallowedRequester() throws Exception {
try {
ServiceRequester testRequester = new ServiceRequester("testName", "testType", "testFrom");
_requesterService.setRequester(testRequester);
Map<String, String> flowProperties = Maps.newHashMap();
flowProperties.put("param1", "value1");
FlowConfig flowConfig = new FlowConfig().setId(new FlowId().setFlowGroup(TEST_GROUP_NAME).setFlowName(TEST_FLOW_NAME_4))
.setTemplateUris(TEST_TEMPLATE_URI)
.setProperties(new StringMap(flowProperties));
_client.createFlowConfig(flowConfig);
testRequester.setName("testName2");
_client.deleteFlowConfig(new FlowId().setFlowGroup(TEST_GROUP_NAME).setFlowName(TEST_FLOW_NAME_4));
} catch (RestLiResponseException e) {
Assert.assertEquals(e.getStatus(), HttpStatus.ORDINAL_401_Unauthorized);
}
}
@Test
public void testGroupRequesterAllowed() throws Exception {
ServiceRequester testRequester = new ServiceRequester("testName", "USER_PRINCIPAL", "testFrom");
_requesterService.setRequester(testRequester);
Map<String, String> flowProperties = Maps.newHashMap();
FlowConfig flowConfig = new FlowConfig().setId(new FlowId().setFlowGroup(TEST_GROUP_NAME).setFlowName(TEST_FLOW_NAME_5))
.setTemplateUris(TEST_TEMPLATE_URI)
.setProperties(new StringMap(flowProperties))
.setOwningGroup("testGroup");
_client.createFlowConfig(flowConfig);
testRequester.setName("testName2");
_client.deleteFlowConfig(new FlowId().setFlowGroup(TEST_GROUP_NAME).setFlowName(TEST_FLOW_NAME_5));
}
@Test
public void testGroupRequesterRejected() throws Exception {
try {
ServiceRequester testRequester = new ServiceRequester("testName", "USER_PRINCIPAL", "testFrom");
_requesterService.setRequester(testRequester);
Map<String, String> flowProperties = Maps.newHashMap();
FlowConfig flowConfig = new FlowConfig().setId(new FlowId().setFlowGroup(TEST_GROUP_NAME).setFlowName(TEST_FLOW_NAME_6))
.setTemplateUris(TEST_TEMPLATE_URI)
.setProperties(new StringMap(flowProperties))
.setOwningGroup("testGroup");
_client.createFlowConfig(flowConfig);
testRequester.setName("testName3");
_client.deleteFlowConfig(new FlowId().setFlowGroup(TEST_GROUP_NAME).setFlowName(TEST_FLOW_NAME_6));
} catch (RestLiResponseException e) {
Assert.assertEquals(e.getStatus(), HttpStatus.ORDINAL_401_Unauthorized);
}
}
@Test
public void testGroupUpdateRejected() throws Exception {
_requesterService.setRequester(TEST_REQUESTER);
Map<String, String> flowProperties = Maps.newHashMap();
FlowConfig flowConfig = new FlowConfig().setId(new FlowId().setFlowGroup(TEST_GROUP_NAME).setFlowName(TEST_FLOW_NAME_7))
.setTemplateUris(TEST_TEMPLATE_URI)
.setProperties(new StringMap(flowProperties))
.setOwningGroup("testGroup");
_client.createFlowConfig(flowConfig);
// Update should be rejected because testName is not part of dummyGroup
flowConfig.setOwningGroup("dummyGroup");
try {
_client.updateFlowConfig(flowConfig);
Assert.fail("Expected update to be rejected");
} catch (RestLiResponseException e) {
Assert.assertEquals(e.getStatus(), HttpStatus.ORDINAL_401_Unauthorized);
}
}
@Test
public void testRequesterUpdate() throws Exception {
_requesterService.setRequester(TEST_REQUESTER);
Map<String, String> flowProperties = Maps.newHashMap();
FlowId flowId = new FlowId().setFlowGroup(TEST_GROUP_NAME).setFlowName(TEST_FLOW_NAME_8);
FlowConfig flowConfig = new FlowConfig().setId(flowId)
.setTemplateUris(TEST_TEMPLATE_URI)
.setProperties(new StringMap(flowProperties))
.setOwningGroup("testGroup");
_client.createFlowConfig(flowConfig);
// testName2 takes ownership of the flow
flowProperties.put(RequesterService.REQUESTER_LIST, RequesterService.serialize(Lists.newArrayList(TEST_REQUESTER2)));
flowConfig.setProperties(new StringMap(flowProperties));
_requesterService.setRequester(TEST_REQUESTER2);
_client.updateFlowConfig(flowConfig);
// Check that the requester list was actually updated
FlowConfig updatedFlowConfig = _client.getFlowConfig(flowId);
Assert.assertEquals(RequesterService.deserialize(updatedFlowConfig.getProperties().get(RequesterService.REQUESTER_LIST)),
Lists.newArrayList(TEST_REQUESTER2));
}
@Test
public void testRequesterUpdateRejected() throws Exception {
_requesterService.setRequester(TEST_REQUESTER);
Map<String, String> flowProperties = Maps.newHashMap();
FlowConfig flowConfig = new FlowConfig().setId(new FlowId().setFlowGroup(TEST_GROUP_NAME).setFlowName(TEST_FLOW_NAME_9))
.setTemplateUris(TEST_TEMPLATE_URI)
.setProperties(new StringMap(flowProperties));
_client.createFlowConfig(flowConfig);
// Update should be rejected because testName is not allowed to update the owner to testName2
flowProperties.put(RequesterService.REQUESTER_LIST, RequesterService.serialize(Lists.newArrayList(TEST_REQUESTER2)));
flowConfig.setProperties(new StringMap(flowProperties));
try {
_client.updateFlowConfig(flowConfig);
Assert.fail("Expected update to be rejected");
} catch (RestLiResponseException e) {
Assert.assertEquals(e.getStatus(), HttpStatus.ORDINAL_401_Unauthorized);
}
}
@Test
public void testInvalidFlowId() throws Exception {
Map<String, String> flowProperties = Maps.newHashMap();
flowProperties.put("param1", "value1");
StringBuilder sb1 = new StringBuilder();
StringBuilder sb2 = new StringBuilder();
int maxFlowNameLength = ServiceConfigKeys.MAX_FLOW_NAME_LENGTH;
int maxFlowGroupLength = ServiceConfigKeys.MAX_FLOW_GROUP_LENGTH;
while(maxFlowGroupLength-- >= 0) {
sb1.append("A");
}
while(maxFlowNameLength-- >= 0) {
sb2.append("A");
}
String TOO_LONG_FLOW_GROUP = sb1.toString();
String TOO_LONG_FLOW_NAME = sb2.toString();
FlowConfig flowConfig = new FlowConfig().setId(new FlowId().setFlowGroup(TOO_LONG_FLOW_GROUP).setFlowName(TOO_LONG_FLOW_NAME))
.setTemplateUris(TEST_TEMPLATE_URI).setProperties(new StringMap(flowProperties));
try {
_client.createFlowConfig(flowConfig);
} catch (RestLiResponseException e) {
Assert.assertEquals(e.getStatus(), HttpStatus.ORDINAL_422_Unprocessable_Entity);
Assert.assertTrue(e.getMessage().contains("is out of range"));
return;
}
Assert.fail();
}
@Test
public void testRunFlow() throws Exception {
String flowName = "testRunFlow";
FlowId flowId = new FlowId().setFlowGroup(TEST_GROUP_NAME).setFlowName(flowName);
_requesterService.setRequester(TEST_REQUESTER);
Map<String, String> flowProperties = Maps.newHashMap();
flowProperties.put("param1", "value1");
FlowConfig flowConfig = new FlowConfig().setId(new FlowId().setFlowGroup(TEST_GROUP_NAME).setFlowName(flowName))
.setTemplateUris(TEST_TEMPLATE_URI).setSchedule(new Schedule().setCronSchedule(TEST_SCHEDULE).setRunImmediately(false))
.setProperties(new StringMap(flowProperties));
// Create initial flowConfig
_client.createFlowConfig(flowConfig);
// Trigger flow
_client.runImmediately(flowId);
// Verify runImmediately was changed to true
Assert.assertTrue(_client.getFlowConfig(flowId).getSchedule().isRunImmediately());
}
@AfterClass(alwaysRun = true)
public void tearDown() throws Exception {
if (_client != null) {
_client.close();
}
if (_server != null) {
_server.stopAsync();
_server.awaitTerminated();
}
_testDirectory.delete();
cleanUpDir(TEST_SPEC_STORE_DIR);
}
public class TestRequesterService extends RequesterService {
@Setter
private ServiceRequester requester;
public TestRequesterService(Config config) {
super(config);
}
@Override
public List<ServiceRequester> findRequesters(BaseResource resource) {
return requester == null ? Lists.newArrayList() : Lists.newArrayList(requester);
}
@Override
public boolean isRequesterAllowed(
List<ServiceRequester> originalRequesterList, List<ServiceRequester> currentRequesterList) {
for (ServiceRequester s: currentRequesterList) {
if (originalRequesterList.contains(s)) {
return true;
}
}
return false;
}
}
}
| 1,737 |
0 | Create_ds/gobblin/gobblin-restli/gobblin-flow-config-service/gobblin-flow-config-service-client/src/test/java/org/apache/gobblin | Create_ds/gobblin/gobblin-restli/gobblin-flow-config-service/gobblin-flow-config-service-client/src/test/java/org/apache/gobblin/service/FlowConfigTest.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.service;
import java.io.File;
import java.util.ArrayList;
import java.util.Map;
import org.apache.commons.io.FileUtils;
import org.testng.Assert;
import org.testng.annotations.AfterClass;
import org.testng.annotations.BeforeClass;
import org.testng.annotations.Test;
import com.google.common.collect.Lists;
import com.google.common.collect.Maps;
import com.google.common.io.Files;
import com.google.inject.Binder;
import com.google.inject.Guice;
import com.google.inject.Injector;
import com.google.inject.Module;
import com.google.inject.name.Names;
import com.linkedin.data.template.StringMap;
import com.linkedin.r2.transport.http.client.HttpClientFactory;
import com.linkedin.restli.client.RestLiResponseException;
import com.linkedin.restli.common.HttpStatus;
import com.linkedin.restli.server.resources.BaseResource;
import com.typesafe.config.Config;
import org.apache.gobblin.config.ConfigBuilder;
import org.apache.gobblin.configuration.ConfigurationKeys;
import org.apache.gobblin.restli.EmbeddedRestliServer;
import org.apache.gobblin.runtime.api.SpecCatalogListener;
import org.apache.gobblin.runtime.spec_catalog.AddSpecResponse;
import org.apache.gobblin.runtime.spec_catalog.FlowCatalog;
import org.apache.gobblin.runtime.spec_store.FSSpecStore;
import static org.mockito.Mockito.any;
import static org.mockito.Mockito.mock;
import static org.mockito.Mockito.when;
@Test(groups = { "gobblin.service" }, singleThreaded = true)
public class FlowConfigTest {
private FlowConfigClient _client;
private EmbeddedRestliServer _server;
private File _testDirectory;
private static final String TEST_SPEC_STORE_DIR = "/tmp/flowConfigTest/";
private static final String TEST_GROUP_NAME = "testGroup1";
private static final String TEST_FLOW_NAME = "testFlow1";
private static final String TEST_SCHEDULE = "0 1/0 * ? * *";
private static final String TEST_TEMPLATE_URI = "FS:///templates/test.template";
private static final String TEST_DUMMY_GROUP_NAME = "dummyGroup";
private static final String TEST_DUMMY_FLOW_NAME = "dummyFlow";
@BeforeClass
public void setUp() throws Exception {
ConfigBuilder configBuilder = ConfigBuilder.create();
_testDirectory = Files.createTempDir();
configBuilder
.addPrimitive(ConfigurationKeys.JOB_CONFIG_FILE_DIR_KEY, _testDirectory.getAbsolutePath())
.addPrimitive(FSSpecStore.SPECSTORE_FS_DIR_KEY, TEST_SPEC_STORE_DIR);
cleanUpDir(TEST_SPEC_STORE_DIR);
Config config = configBuilder.build();
final FlowCatalog flowCatalog = new FlowCatalog(config);
final SpecCatalogListener mockListener = mock(SpecCatalogListener.class);
when(mockListener.getName()).thenReturn(ServiceConfigKeys.GOBBLIN_SERVICE_JOB_SCHEDULER_LISTENER_CLASS);
when(mockListener.onAddSpec(any())).thenReturn(new AddSpecResponse(""));
flowCatalog.addListener(mockListener);
flowCatalog.startAsync();
flowCatalog.awaitRunning();
Injector injector = Guice.createInjector(new Module() {
@Override
public void configure(Binder binder) {
binder.bind(FlowConfigsResourceHandler.class)
.toInstance(new FlowConfigResourceLocalHandler(flowCatalog));
// indicate that we are in unit testing since the resource is being blocked until flow catalog changes have
// been made
binder.bindConstant().annotatedWith(Names.named(FlowConfigsResource.INJECT_READY_TO_USE)).to(Boolean.TRUE);
binder.bind(RequesterService.class).toInstance(new NoopRequesterService(config));
}
});
_server = EmbeddedRestliServer.builder().resources(
Lists.<Class<? extends BaseResource>>newArrayList(FlowConfigsResource.class)).injector(injector).build();
_server.startAsync();
_server.awaitRunning();
Map<String, String> transportClientProperties = Maps.newHashMap();
transportClientProperties.put(HttpClientFactory.HTTP_REQUEST_TIMEOUT, "10000");
_client =
new FlowConfigClient(String.format("http://localhost:%s/", _server.getPort()), transportClientProperties);
}
private void cleanUpDir(String dir) throws Exception {
File specStoreDir = new File(dir);
if (specStoreDir.exists()) {
FileUtils.deleteDirectory(specStoreDir);
}
}
@Test
public void testCreateBadSchedule() throws Exception {
Map<String, String> flowProperties = Maps.newHashMap();
flowProperties.put("param1", "value1");
FlowConfig flowConfig = new FlowConfig().setId(new FlowId().setFlowGroup(TEST_GROUP_NAME).setFlowName(TEST_FLOW_NAME))
.setTemplateUris(TEST_TEMPLATE_URI).setSchedule(new Schedule().setCronSchedule("bad schedule").
setRunImmediately(true))
.setProperties(new StringMap(flowProperties));
try {
_client.createFlowConfig(flowConfig);
} catch (RestLiResponseException e) {
Assert.assertEquals(e.getStatus(), HttpStatus.S_422_UNPROCESSABLE_ENTITY.getCode());
return;
}
Assert.fail("Get should have gotten a 422 error");
}
@Test
public void testCreateBadTemplateUri() throws Exception {
Map<String, String> flowProperties = Maps.newHashMap();
flowProperties.put("param1", "value1");
FlowConfig flowConfig = new FlowConfig().setId(new FlowId().setFlowGroup(TEST_GROUP_NAME).setFlowName(TEST_FLOW_NAME))
.setTemplateUris("FILE://bad/uri").setSchedule(new Schedule().setCronSchedule(TEST_SCHEDULE).
setRunImmediately(true))
.setProperties(new StringMap(flowProperties));
try {
_client.createFlowConfig(flowConfig);
} catch (RestLiResponseException e) {
Assert.assertEquals(e.getStatus(), HttpStatus.S_422_UNPROCESSABLE_ENTITY.getCode());
return;
}
Assert.fail("Get should have gotten a 422 error");
}
@Test
public void testCreate() throws Exception {
Map<String, String> flowProperties = Maps.newHashMap();
flowProperties.put("param1", "value1");
FlowConfig flowConfig = new FlowConfig().setId(new FlowId().setFlowGroup(TEST_GROUP_NAME).setFlowName(TEST_FLOW_NAME))
.setTemplateUris(TEST_TEMPLATE_URI).setSchedule(new Schedule().setCronSchedule(TEST_SCHEDULE).
setRunImmediately(true))
.setProperties(new StringMap(flowProperties));
_client.createFlowConfig(flowConfig);
}
@Test (dependsOnMethods = "testCreate")
public void testCreateAgain() throws Exception {
Map<String, String> flowProperties = Maps.newHashMap();
flowProperties.put("param1", "value1");
FlowConfig flowConfig = new FlowConfig().setId(new FlowId().setFlowGroup(TEST_GROUP_NAME).setFlowName(TEST_FLOW_NAME))
.setTemplateUris(TEST_TEMPLATE_URI).setSchedule(new Schedule().setCronSchedule(TEST_SCHEDULE))
.setProperties(new StringMap(flowProperties));
try {
_client.createFlowConfig(flowConfig);
} catch (RestLiResponseException e) {
Assert.fail("Create Again should pass without complaining that the spec already exists.");
}
}
@Test (dependsOnMethods = "testCreateAgain")
public void testGet() throws Exception {
FlowId flowId = new FlowId().setFlowGroup(TEST_GROUP_NAME).setFlowName(TEST_FLOW_NAME);
FlowConfig flowConfig = _client.getFlowConfig(flowId);
Assert.assertEquals(flowConfig.getId().getFlowGroup(), TEST_GROUP_NAME);
Assert.assertEquals(flowConfig.getId().getFlowName(), TEST_FLOW_NAME);
Assert.assertEquals(flowConfig.getSchedule().getCronSchedule(), TEST_SCHEDULE );
Assert.assertEquals(flowConfig.getTemplateUris(), TEST_TEMPLATE_URI);
Assert.assertFalse(flowConfig.getSchedule().isRunImmediately());
// Add this assert back when getFlowSpec() is changed to return the raw flow spec
//Assert.assertEquals(flowConfig.getProperties().size(), 1);
Assert.assertEquals(flowConfig.getProperties().get("param1"), "value1");
}
@Test (dependsOnMethods = "testGet")
public void testUpdate() throws Exception {
FlowId flowId = new FlowId().setFlowGroup(TEST_GROUP_NAME).setFlowName(TEST_FLOW_NAME);
Map<String, String> flowProperties = Maps.newHashMap();
flowProperties.put("param1", "value1b");
flowProperties.put("param2", "value2b");
FlowConfig flowConfig = new FlowConfig().setId(new FlowId().setFlowGroup(TEST_GROUP_NAME).setFlowName(TEST_FLOW_NAME))
.setTemplateUris(TEST_TEMPLATE_URI).setSchedule(new Schedule().setCronSchedule(TEST_SCHEDULE))
.setProperties(new StringMap(flowProperties));
_client.updateFlowConfig(flowConfig);
FlowConfig retrievedFlowConfig = _client.getFlowConfig(flowId);
Assert.assertEquals(retrievedFlowConfig.getId().getFlowGroup(), TEST_GROUP_NAME);
Assert.assertEquals(retrievedFlowConfig.getId().getFlowName(), TEST_FLOW_NAME);
Assert.assertEquals(retrievedFlowConfig.getSchedule().getCronSchedule(), TEST_SCHEDULE );
Assert.assertEquals(retrievedFlowConfig.getTemplateUris(), TEST_TEMPLATE_URI);
// Add this asssert when getFlowSpec() is changed to return the raw flow spec
//Assert.assertEquals(flowConfig.getProperties().size(), 2);
Assert.assertEquals(retrievedFlowConfig.getProperties().get("param1"), "value1b");
Assert.assertEquals(retrievedFlowConfig.getProperties().get("param2"), "value2b");
Assert.assertEquals(retrievedFlowConfig.getProperties().get(RequesterService.REQUESTER_LIST), RequesterService.serialize(new ArrayList<>()));
}
@Test (dependsOnMethods = "testUpdate")
public void testUnschedule() throws Exception {
FlowId flowId = new FlowId().setFlowGroup(TEST_GROUP_NAME).setFlowName(TEST_FLOW_NAME);
Map<String, String> flowProperties = Maps.newHashMap();
flowProperties.put("param1", "value1");
flowProperties.put(ConfigurationKeys.FLOW_UNSCHEDULE_KEY, "true");
FlowConfig flowConfig = new FlowConfig().setId(flowId)
.setTemplateUris(TEST_TEMPLATE_URI).setSchedule(new Schedule().setCronSchedule(TEST_SCHEDULE).
setRunImmediately(true))
.setProperties(new StringMap(flowProperties));
_client.updateFlowConfig(flowConfig);
FlowConfig persistedFlowConfig = _client.getFlowConfig(flowId);
Assert.assertFalse(persistedFlowConfig.getProperties().containsKey(ConfigurationKeys.FLOW_UNSCHEDULE_KEY));
Assert.assertEquals(persistedFlowConfig.getSchedule().getCronSchedule(), FlowConfigResourceLocalHandler.NEVER_RUN_CRON_SCHEDULE.getCronSchedule());
}
@Test (dependsOnMethods = "testUnschedule")
public void testDelete() throws Exception {
FlowId flowId = new FlowId().setFlowGroup(TEST_GROUP_NAME).setFlowName(TEST_FLOW_NAME);
// make sure flow config exists
FlowConfig flowConfig = _client.getFlowConfig(flowId);
Assert.assertEquals(flowConfig.getId().getFlowGroup(), TEST_GROUP_NAME);
Assert.assertEquals(flowConfig.getId().getFlowName(), TEST_FLOW_NAME);
_client.deleteFlowConfig(flowId);
try {
_client.getFlowConfig(flowId);
} catch (RestLiResponseException e) {
Assert.assertEquals(e.getStatus(), HttpStatus.S_404_NOT_FOUND.getCode());
return;
}
Assert.fail("Get should have gotten a 404 error");
}
@Test
public void testBadGet() throws Exception {
FlowId flowId = new FlowId().setFlowGroup(TEST_DUMMY_GROUP_NAME).setFlowName(TEST_DUMMY_FLOW_NAME);
try {
_client.getFlowConfig(flowId);
} catch (RestLiResponseException e) {
Assert.assertEquals(e.getStatus(), HttpStatus.S_404_NOT_FOUND.getCode());
return;
}
Assert.fail("Get should have raised a 404 error");
}
@Test
public void testBadDelete() throws Exception {
FlowId flowId = new FlowId().setFlowGroup(TEST_DUMMY_GROUP_NAME).setFlowName(TEST_DUMMY_FLOW_NAME);
try {
_client.getFlowConfig(flowId);
} catch (RestLiResponseException e) {
Assert.assertEquals(e.getStatus(), HttpStatus.S_404_NOT_FOUND.getCode());
return;
}
Assert.fail("Get should have raised a 404 error");
}
@Test
public void testBadUpdate() throws Exception {
Map<String, String> flowProperties = Maps.newHashMap();
flowProperties.put("param1", "value1b");
flowProperties.put("param2", "value2b");
FlowConfig flowConfig = new FlowConfig().setId(new FlowId().setFlowGroup(TEST_DUMMY_GROUP_NAME)
.setFlowName(TEST_DUMMY_FLOW_NAME))
.setTemplateUris(TEST_TEMPLATE_URI).setSchedule(new Schedule().setCronSchedule(TEST_SCHEDULE))
.setProperties(new StringMap(flowProperties));
try {
_client.updateFlowConfig(flowConfig);
} catch (RestLiResponseException e) {
Assert.assertEquals(e.getStatus(), HttpStatus.S_404_NOT_FOUND.getCode());
return;
}
Assert.fail("Update should have raised a 404 error");
}
@AfterClass(alwaysRun = true)
public void tearDown() throws Exception {
if (_client != null) {
_client.close();
}
if (_server != null) {
_server.stopAsync();
_server.awaitTerminated();
}
_testDirectory.delete();
cleanUpDir(TEST_SPEC_STORE_DIR);
}
}
| 1,738 |
0 | Create_ds/gobblin/gobblin-restli/gobblin-flow-config-service/gobblin-flow-config-service-client/src/test/java/org/apache/gobblin | Create_ds/gobblin/gobblin-restli/gobblin-flow-config-service/gobblin-flow-config-service-client/src/test/java/org/apache/gobblin/service/FlowStatusTest.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.service;
import java.util.Collections;
import java.util.Iterator;
import java.util.List;
import java.util.stream.Collectors;
import java.util.stream.IntStream;
import org.testng.Assert;
import org.testng.annotations.AfterClass;
import org.testng.annotations.BeforeClass;
import org.testng.annotations.Test;
import com.google.common.base.Suppliers;
import com.google.common.collect.Lists;
import com.google.inject.Binder;
import com.google.inject.Guice;
import com.google.inject.Injector;
import com.google.inject.Module;
import org.apache.gobblin.configuration.State;
import org.apache.gobblin.metastore.StateStore;
import org.apache.gobblin.restli.EmbeddedRestliServer;
import org.apache.gobblin.runtime.troubleshooter.MultiContextIssueRepository;
import org.apache.gobblin.service.monitoring.FlowStatusGenerator;
import org.apache.gobblin.service.monitoring.JobStatusRetriever;
import static org.mockito.Mockito.mock;
@Test(groups = { "gobblin.service" }, singleThreaded = true)
public class FlowStatusTest {
private FlowStatusClient _client;
private EmbeddedRestliServer _server;
private List<List<org.apache.gobblin.service.monitoring.JobStatus>> _listOfJobStatusLists;
class TestJobStatusRetriever extends JobStatusRetriever {
protected TestJobStatusRetriever(MultiContextIssueRepository issueRepository) {
super(ServiceConfigKeys.DEFAULT_GOBBLIN_SERVICE_DAG_MANAGER_ENABLED, issueRepository);
}
@Override
public Iterator<org.apache.gobblin.service.monitoring.JobStatus> getJobStatusesForFlowExecution(String flowName,
String flowGroup, long flowExecutionId) {
return _listOfJobStatusLists.get((int) flowExecutionId).iterator();
}
@Override
public Iterator<org.apache.gobblin.service.monitoring.JobStatus> getJobStatusesForFlowExecution(String flowName,
String flowGroup, long flowExecutionId, String jobGroup, String jobName) {
return Collections.emptyIterator();
}
@Override
public StateStore<State> getStateStore() {
return null;
}
@Override
public List<Long> getLatestExecutionIdsForFlow(String flowName, String flowGroup, int count) {
if (_listOfJobStatusLists == null) {
return null;
}
int startIndex = (_listOfJobStatusLists.size() >= count) ? _listOfJobStatusLists.size() - count : 0;
List<Long> flowExecutionIds = IntStream.range(startIndex, _listOfJobStatusLists.size()).mapToObj(i -> (long) i)
.collect(Collectors.toList());
Collections.reverse(flowExecutionIds);
return flowExecutionIds;
}
@Override
public List<org.apache.gobblin.service.monitoring.FlowStatus> getFlowStatusesForFlowGroupExecutions(String flowGroup,
int countJobStatusesPerFlowName) {
return Lists.newArrayList(); // (as this method not exercised within `FlowStatusResource`)
}
}
@BeforeClass
public void setUp() throws Exception {
JobStatusRetriever jobStatusRetriever = new TestJobStatusRetriever(mock(MultiContextIssueRepository.class));
final FlowStatusGenerator flowStatusGenerator = new FlowStatusGenerator(jobStatusRetriever);
Injector injector = Guice.createInjector(new Module() {
@Override
public void configure(Binder binder) {
binder.bind(FlowStatusGenerator.class)
.toInstance(flowStatusGenerator);
}
});
_server = EmbeddedRestliServer.builder().resources(
Lists.newArrayList(FlowStatusResource.class)).injector(injector).build();
_server.startAsync();
_server.awaitRunning();
_client =
new FlowStatusClient(String.format("http://localhost:%s/", _server.getPort()));
}
/**
* Test finding the latest flow status
* @throws Exception
*/
@Test
public void testFindLatest() throws Exception {
org.apache.gobblin.service.monitoring.JobStatus js1 =
org.apache.gobblin.service.monitoring.JobStatus.builder().flowGroup("fgroup1").flowName("flow1")
.jobGroup("jgroup1").jobName("job1").startTime(1000L).endTime(5000L)
.eventName(ExecutionStatus.COMPLETE.name()).flowExecutionId(0).message("Test message 1").processedCount(100)
.jobExecutionId(1).lowWatermark("watermark:1").highWatermark("watermark:2")
.issues(Suppliers.ofInstance(Collections.emptyList())).build();
org.apache.gobblin.service.monitoring.JobStatus fs1 =
org.apache.gobblin.service.monitoring.JobStatus.builder().flowGroup("fgroup1").flowName("flow1")
.jobGroup(JobStatusRetriever.NA_KEY).jobName(JobStatusRetriever.NA_KEY).endTime(5000L)
.eventName(ExecutionStatus.COMPLETE.name()).flowExecutionId(0)
.issues(Suppliers.ofInstance(Collections.emptyList())).build();
org.apache.gobblin.service.monitoring.JobStatus js2 =
org.apache.gobblin.service.monitoring.JobStatus.builder().flowGroup("fgroup1").flowName("flow1")
.jobGroup("jgroup1").jobName("job1").jobTag("dataset1").startTime(2000L).endTime(6000L)
.eventName(ExecutionStatus.COMPLETE.name()).flowExecutionId(1).message("Test message 2").processedCount(200)
.jobExecutionId(2).lowWatermark("watermark:2").highWatermark("watermark:3")
.issues(Suppliers.ofInstance(Collections.emptyList()))
.build();
org.apache.gobblin.service.monitoring.JobStatus js3 =
org.apache.gobblin.service.monitoring.JobStatus.builder().flowGroup("fgroup1").flowName("flow1")
.jobGroup("jgroup1").jobName("job2").jobTag("dataset2").startTime(2000L).endTime(6000L)
.eventName(ExecutionStatus.COMPLETE.name()).flowExecutionId(1).message("Test message 3").processedCount(200)
.jobExecutionId(2).lowWatermark("watermark:2").highWatermark("watermark:3")
.issues(Suppliers.ofInstance(Collections.emptyList()))
.build();
org.apache.gobblin.service.monitoring.JobStatus fs2 =
org.apache.gobblin.service.monitoring.JobStatus.builder().flowGroup("fgroup1").flowName("flow1")
.jobGroup(JobStatusRetriever.NA_KEY).jobName(JobStatusRetriever.NA_KEY).endTime(7000L)
.eventName(ExecutionStatus.COMPLETE.name()).flowExecutionId(1).message("Flow message")
.issues(Suppliers.ofInstance(Collections.emptyList())).build();
List<org.apache.gobblin.service.monitoring.JobStatus> jobStatusList1 = Lists.newArrayList(js1, fs1);
List<org.apache.gobblin.service.monitoring.JobStatus> jobStatusList2 = Lists.newArrayList(js2, js3, fs2);
_listOfJobStatusLists = Lists.newArrayList();
_listOfJobStatusLists.add(jobStatusList1);
_listOfJobStatusLists.add(jobStatusList2);
FlowId flowId = new FlowId().setFlowGroup("fgroup1").setFlowName("flow1");
FlowStatus flowStatus = _client.getLatestFlowStatus(flowId);
Assert.assertEquals(flowStatus.getId().getFlowGroup(), "fgroup1");
Assert.assertEquals(flowStatus.getId().getFlowName(), "flow1");
Assert.assertEquals(flowStatus.getExecutionStatistics().getExecutionStartTime().longValue(), 1L);
Assert.assertEquals(flowStatus.getExecutionStatistics().getExecutionEndTime().longValue(), 7000L);
Assert.assertEquals(flowStatus.getMessage(), fs2.getMessage());
Assert.assertEquals(flowStatus.getExecutionStatus(), ExecutionStatus.COMPLETE);
JobStatusArray jobStatuses = flowStatus.getJobStatuses();
Assert.assertEquals(jobStatusList2.size(), jobStatuses.size() + 1);
for (int i = 0; i < jobStatuses.size(); i++) {
org.apache.gobblin.service.monitoring.JobStatus mjs = jobStatusList2.get(i);
JobStatus js = jobStatuses.get(i);
compareJobStatus(js, mjs);
}
List<FlowStatus> flowStatusList = _client.getLatestFlowStatus(flowId, 2, null);
Assert.assertEquals(flowStatusList.size(), 2);
Assert.assertEquals(flowStatusList.get(0).getId().getFlowExecutionId(), (Long) 1L);
Assert.assertEquals(flowStatusList.get(1).getId().getFlowExecutionId(), (Long) 0L);
Assert.assertEquals(flowStatusList.get(0).getJobStatuses().size(), 2);
List<FlowStatus> flowStatusList2 = _client.getLatestFlowStatus(flowId, 1, "dataset1");
Assert.assertEquals(flowStatusList2.get(0).getJobStatuses().size(), 1);
Assert.assertEquals(flowStatusList2.get(0).getJobStatuses().get(0).getJobTag(), "dataset1");
}
/**
* Test a flow that has all jobs completed
* @throws Exception
*/
@Test
public void testGetCompleted() throws Exception {
org.apache.gobblin.service.monitoring.JobStatus js1 =
org.apache.gobblin.service.monitoring.JobStatus.builder().flowGroup("fgroup1").flowName("flow1")
.jobGroup("jgroup1").jobName("job1").startTime(1000L).endTime(5000L)
.eventName(ExecutionStatus.COMPLETE.name()).flowExecutionId(0).message("Test message 1").processedCount(100)
.jobExecutionId(1).lowWatermark("watermark:1").highWatermark("watermark:2")
.issues(Suppliers.ofInstance(Collections.emptyList()))
.build();
org.apache.gobblin.service.monitoring.JobStatus js2 =
org.apache.gobblin.service.monitoring.JobStatus.builder().flowGroup("fgroup1").flowName("flow1")
.jobGroup("jgroup1").jobName("job2").startTime(2000L).endTime(6000L)
.eventName(ExecutionStatus.COMPLETE.name()).flowExecutionId(0).message("Test message 2").processedCount(200)
.jobExecutionId(2).lowWatermark("watermark:2").highWatermark("watermark:3")
.issues(Suppliers.ofInstance(Collections.emptyList()))
.build();
org.apache.gobblin.service.monitoring.JobStatus fs1 =
org.apache.gobblin.service.monitoring.JobStatus.builder().flowGroup("fgroup1").flowName("flow1")
.jobGroup(JobStatusRetriever.NA_KEY).jobName(JobStatusRetriever.NA_KEY).endTime(7000L)
.eventName(ExecutionStatus.COMPLETE.name()).flowExecutionId(0).message("Flow message")
.issues(Suppliers.ofInstance(Collections.emptyList())).build();
List<org.apache.gobblin.service.monitoring.JobStatus> jobStatusList = Lists.newArrayList(js1, js2, fs1);
_listOfJobStatusLists = Lists.newArrayList();
_listOfJobStatusLists.add(jobStatusList);
FlowStatusId flowId = new FlowStatusId().setFlowGroup("fgroup1").setFlowName("flow1").setFlowExecutionId(0);
FlowStatus flowStatus = _client.getFlowStatus(flowId);
Assert.assertEquals(flowStatus.getId().getFlowGroup(), "fgroup1");
Assert.assertEquals(flowStatus.getId().getFlowName(), "flow1");
Assert.assertEquals(flowStatus.getExecutionStatistics().getExecutionStartTime().longValue(), 0L);
Assert.assertEquals(flowStatus.getExecutionStatistics().getExecutionEndTime().longValue(), 7000L);
Assert.assertEquals(flowStatus.getMessage(), fs1.getMessage());
Assert.assertEquals(flowStatus.getExecutionStatus(), ExecutionStatus.COMPLETE);
JobStatusArray jobStatuses = flowStatus.getJobStatuses();
Assert.assertEquals(jobStatusList.size(), jobStatuses.size() + 1);
for (int i = 0; i < jobStatuses.size(); i++) {
org.apache.gobblin.service.monitoring.JobStatus mjs = jobStatusList.get(i);
JobStatus js = jobStatuses.get(i);
compareJobStatus(js, mjs);
}
}
/**
* Test a flow that has some jobs still running
* @throws Exception
*/
@Test
public void testGetRunning() throws Exception {
org.apache.gobblin.service.monitoring.JobStatus js1 =
org.apache.gobblin.service.monitoring.JobStatus.builder().flowGroup("fgroup1").flowName("flow1")
.jobGroup("jgroup1").jobName("job1").startTime(1000L).endTime(5000L)
.eventName(ExecutionStatus.RUNNING.name()).flowExecutionId(0).message("Test message 1").processedCount(100)
.jobExecutionId(1).lowWatermark("watermark:1").highWatermark("watermark:2")
.issues(Suppliers.ofInstance(Collections.emptyList()))
.build();
org.apache.gobblin.service.monitoring.JobStatus js2 =
org.apache.gobblin.service.monitoring.JobStatus.builder().flowGroup("fgroup1").flowName("flow1")
.jobGroup("jgroup1").jobName("job2").startTime(2000L).endTime(6000L)
.eventName(ExecutionStatus.COMPLETE.name()).flowExecutionId(0).message("Test message 2").processedCount(200)
.jobExecutionId(2).lowWatermark("watermark:2").highWatermark("watermark:3")
.issues(Suppliers.ofInstance(Collections.emptyList()))
.build();
org.apache.gobblin.service.monitoring.JobStatus fs1 =
org.apache.gobblin.service.monitoring.JobStatus.builder().flowGroup("fgroup1").flowName("flow1")
.jobGroup(JobStatusRetriever.NA_KEY).jobName(JobStatusRetriever.NA_KEY)
.eventName(ExecutionStatus.RUNNING.name()).flowExecutionId(0).message("Flow message")
.issues(Suppliers.ofInstance(Collections.emptyList())).build();
List<org.apache.gobblin.service.monitoring.JobStatus> jobStatusList = Lists.newArrayList(js1, js2, fs1);
_listOfJobStatusLists = Lists.newArrayList();
_listOfJobStatusLists.add(jobStatusList);
FlowStatusId flowId = new FlowStatusId().setFlowGroup("fgroup1").setFlowName("flow1").setFlowExecutionId(0);
FlowStatus flowStatus = _client.getFlowStatus(flowId);
Assert.assertEquals(flowStatus.getId().getFlowGroup(), "fgroup1");
Assert.assertEquals(flowStatus.getId().getFlowName(), "flow1");
Assert.assertEquals(flowStatus.getExecutionStatistics().getExecutionStartTime().longValue(), 0L);
Assert.assertEquals(flowStatus.getExecutionStatistics().getExecutionEndTime().longValue(), 6000L);
Assert.assertEquals(flowStatus.getMessage(), fs1.getMessage());
Assert.assertEquals(flowStatus.getExecutionStatus(), ExecutionStatus.RUNNING);
JobStatusArray jobStatuses = flowStatus.getJobStatuses();
Assert.assertEquals(jobStatusList.size(), jobStatuses.size() + 1);
for (int i = 0; i < jobStatuses.size(); i++) {
org.apache.gobblin.service.monitoring.JobStatus mjs = jobStatusList.get(i);
JobStatus js = jobStatuses.get(i);
compareJobStatus(js, mjs);
}
}
/**
* Test a flow that has some failed jobs
* @throws Exception
*/
@Test
public void testGetFailed() throws Exception {
org.apache.gobblin.service.monitoring.JobStatus js1 =
org.apache.gobblin.service.monitoring.JobStatus.builder().flowGroup("fgroup1").flowName("flow1")
.jobGroup("jgroup1").jobName("job1").startTime(1000L).endTime(5000L)
.eventName(ExecutionStatus.COMPLETE.name()).flowExecutionId(0).message("Test message 1").processedCount(100)
.jobExecutionId(1).lowWatermark("watermark:1").highWatermark("watermark:2")
.issues(Suppliers.ofInstance(Collections.emptyList()))
.build();
org.apache.gobblin.service.monitoring.JobStatus js2 =
org.apache.gobblin.service.monitoring.JobStatus.builder().flowGroup("fgroup1").flowName("flow1")
.jobGroup("jgroup1").jobName("job2").startTime(2000L).endTime(6000L)
.eventName(ExecutionStatus.FAILED.name()).flowExecutionId(0).message("Test message 2").processedCount(200)
.jobExecutionId(2).lowWatermark("watermark:2").highWatermark("watermark:3")
.issues(Suppliers.ofInstance(Collections.emptyList()))
.build();
org.apache.gobblin.service.monitoring.JobStatus fs1 =
org.apache.gobblin.service.monitoring.JobStatus.builder().flowGroup("fgroup1").flowName("flow1")
.jobGroup(JobStatusRetriever.NA_KEY).jobName(JobStatusRetriever.NA_KEY).endTime(7000L)
.eventName(ExecutionStatus.FAILED.name()).flowExecutionId(0).message("Flow message")
.issues(Suppliers.ofInstance(Collections.emptyList())).build();
List<org.apache.gobblin.service.monitoring.JobStatus> jobStatusList = Lists.newArrayList(js1, js2, fs1);
_listOfJobStatusLists = Lists.newArrayList();
_listOfJobStatusLists.add(jobStatusList);
FlowStatusId flowId = new FlowStatusId().setFlowGroup("fgroup1").setFlowName("flow1").setFlowExecutionId(0);
FlowStatus flowStatus = _client.getFlowStatus(flowId);
Assert.assertEquals(flowStatus.getId().getFlowGroup(), "fgroup1");
Assert.assertEquals(flowStatus.getId().getFlowName(), "flow1");
Assert.assertEquals(flowStatus.getExecutionStatistics().getExecutionStartTime().longValue(), 0L);
Assert.assertEquals(flowStatus.getExecutionStatistics().getExecutionEndTime().longValue(), 7000L);
Assert.assertEquals(flowStatus.getMessage(), fs1.getMessage());
Assert.assertEquals(flowStatus.getExecutionStatus(), ExecutionStatus.FAILED);
JobStatusArray jobStatuses = flowStatus.getJobStatuses();
Assert.assertEquals(jobStatusList.size(), jobStatuses.size() + 1);
for (int i = 0; i < jobStatuses.size(); i++) {
org.apache.gobblin.service.monitoring.JobStatus mjs = jobStatusList.get(i);
JobStatus js = jobStatuses.get(i);
compareJobStatus(js, mjs);
}
}
@AfterClass(alwaysRun = true)
public void tearDown() throws Exception {
if (_client != null) {
_client.close();
}
if (_server != null) {
_server.stopAsync();
_server.awaitTerminated();
}
}
/**
* compare the job status objects from the REST call and monitoring service
* @param js JobStatus from REST
* @param mjs JobStatus from monitoring
*/
private void compareJobStatus(JobStatus js, org.apache.gobblin.service.monitoring.JobStatus mjs) {
Assert.assertEquals(mjs.getFlowGroup(), js.getFlowId().getFlowGroup());
Assert.assertEquals(mjs.getFlowName(), js.getFlowId().getFlowName());
Assert.assertEquals(mjs.getJobGroup(), js.getJobId().getJobGroup());
Assert.assertEquals(mjs.getJobName(), js.getJobId().getJobName());
Assert.assertEquals(mjs.getMessage(), js.getMessage());
Assert.assertEquals(mjs.getStartTime(), js.getExecutionStatistics().getExecutionStartTime().longValue());
Assert.assertEquals(mjs.getEndTime(), js.getExecutionStatistics().getExecutionEndTime().longValue());
Assert.assertEquals(mjs.getProcessedCount(), js.getExecutionStatistics().getProcessedCount().longValue());
Assert.assertEquals(mjs.getLowWatermark(), js.getJobState().getLowWatermark());
Assert.assertEquals(mjs.getHighWatermark(), js.getJobState().getHighWatermark());
}
}
| 1,739 |
0 | Create_ds/gobblin/gobblin-restli/gobblin-flow-config-service/gobblin-flow-config-service-client/src/main/java/org/apache/gobblin | Create_ds/gobblin/gobblin-restli/gobblin-flow-config-service/gobblin-flow-config-service-client/src/main/java/org/apache/gobblin/service/FlowConfigClient.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.service;
import java.io.Closeable;
import java.io.IOException;
import java.util.Collections;
import java.util.Map;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import com.google.common.base.Optional;
import com.linkedin.common.callback.FutureCallback;
import com.linkedin.common.util.None;
import com.linkedin.r2.RemoteInvocationException;
import com.linkedin.r2.transport.common.Client;
import com.linkedin.r2.transport.common.bridge.client.TransportClientAdapter;
import com.linkedin.r2.transport.http.client.HttpClientFactory;
import com.linkedin.restli.client.CreateIdRequest;
import com.linkedin.restli.client.DeleteRequest;
import com.linkedin.restli.client.GetRequest;
import com.linkedin.restli.client.Response;
import com.linkedin.restli.client.ResponseFuture;
import com.linkedin.restli.client.RestClient;
import com.linkedin.restli.client.UpdateRequest;
import com.linkedin.restli.common.ComplexResourceKey;
import com.linkedin.restli.common.EmptyRecord;
import com.linkedin.restli.common.IdResponse;
/**
* Flow Configuration client for REST flow configuration server
*/
public class FlowConfigClient implements Closeable {
private static final Logger LOG = LoggerFactory.getLogger(FlowConfigClient.class);
private Optional<HttpClientFactory> _httpClientFactory;
private Optional<RestClient> _restClient;
private final FlowconfigsRequestBuilders _flowconfigsRequestBuilders;
public static final String DELETE_STATE_STORE_KEY = "delete.state.store";
/**
* Construct a {@link FlowConfigClient} to communicate with http flow config server at URI serverUri
* @param serverUri address and port of the REST server
*/
public FlowConfigClient(String serverUri) {
this(serverUri, Collections.emptyMap());
}
public FlowConfigClient(String serverUri, Map<String, String> properties) {
LOG.debug("FlowConfigClient with serverUri " + serverUri);
_httpClientFactory = Optional.of(new HttpClientFactory());
Client r2Client = new TransportClientAdapter(_httpClientFactory.get().getClient(properties));
_restClient = Optional.of(new RestClient(r2Client, serverUri));
_flowconfigsRequestBuilders = createRequestBuilders();
}
/**
* Construct a {@link FlowConfigClient} to communicate with http flow config server at URI serverUri
* @param restClient restClient to send restli request
*/
public FlowConfigClient(RestClient restClient) {
LOG.debug("FlowConfigClient with restClient " + restClient);
_httpClientFactory = Optional.absent();
_restClient = Optional.of(restClient);
_flowconfigsRequestBuilders = createRequestBuilders();
}
// Clients using different service name can override this method
// RequestBuilders decide the name of the service requests go to.
protected FlowconfigsRequestBuilders createRequestBuilders() {
return new FlowconfigsRequestBuilders();
}
/**
* Create a flow configuration
* @param flowConfig flow configuration attributes
* @throws RemoteInvocationException
*/
public void createFlowConfig(FlowConfig flowConfig)
throws RemoteInvocationException {
LOG.debug("createFlowConfig with groupName " + flowConfig.getId().getFlowGroup() + " flowName " +
flowConfig.getId().getFlowName());
CreateIdRequest<ComplexResourceKey<FlowId, EmptyRecord>, FlowConfig> request =
_flowconfigsRequestBuilders.create().input(flowConfig).build();
ResponseFuture<IdResponse<ComplexResourceKey<FlowId, EmptyRecord>>> flowConfigResponseFuture =
_restClient.get().sendRequest(request);
flowConfigResponseFuture.getResponse();
}
/**
* Update a flow configuration
* @param flowConfig flow configuration attributes
* @throws RemoteInvocationException
*/
public void updateFlowConfig(FlowConfig flowConfig)
throws RemoteInvocationException {
LOG.debug("updateFlowConfig with groupName " + flowConfig.getId().getFlowGroup() + " flowName " +
flowConfig.getId().getFlowName());
FlowId flowId = new FlowId().setFlowGroup(flowConfig.getId().getFlowGroup())
.setFlowName(flowConfig.getId().getFlowName());
UpdateRequest<FlowConfig> updateRequest =
_flowconfigsRequestBuilders.update().id(new ComplexResourceKey<>(flowId, new EmptyRecord()))
.input(flowConfig).build();
ResponseFuture<EmptyRecord> response = _restClient.get().sendRequest(updateRequest);
response.getResponse();
}
/**
* Get a flow configuration
* @param flowId identifier of flow configuration to get
* @return a {@link FlowConfig} with the flow configuration
* @throws RemoteInvocationException
*/
public FlowConfig getFlowConfig(FlowId flowId)
throws RemoteInvocationException {
LOG.debug("getFlowConfig with groupName " + flowId.getFlowGroup() + " flowName " + flowId.getFlowName());
GetRequest<FlowConfig> getRequest = _flowconfigsRequestBuilders.get()
.id(new ComplexResourceKey<>(flowId, new EmptyRecord())).build();
Response<FlowConfig> response =
_restClient.get().sendRequest(getRequest).getResponse();
return response.getEntity();
}
/**
* Delete a flow configuration
* @param flowId identifier of flow configuration to delete
* @throws RemoteInvocationException
*/
public void deleteFlowConfig(FlowId flowId)
throws RemoteInvocationException {
LOG.debug("deleteFlowConfig with groupName " + flowId.getFlowGroup() + " flowName " +
flowId.getFlowName());
DeleteRequest<FlowConfig> deleteRequest = _flowconfigsRequestBuilders.delete()
.id(new ComplexResourceKey<>(flowId, new EmptyRecord())).build();
ResponseFuture<EmptyRecord> response = _restClient.get().sendRequest(deleteRequest);
response.getResponse();
}
/**
* Delete a flow configuration
* @param flowId identifier of flow configuration to delete
* @throws RemoteInvocationException
*/
public void deleteFlowConfigWithStateStore(FlowId flowId)
throws RemoteInvocationException {
LOG.debug("deleteFlowConfig and state store with groupName " + flowId.getFlowGroup() + " flowName " +
flowId.getFlowName());
DeleteRequest<FlowConfig> deleteRequest = _flowconfigsRequestBuilders.delete()
.id(new ComplexResourceKey<>(flowId, new EmptyRecord())).setHeader(DELETE_STATE_STORE_KEY, Boolean.TRUE.toString()).build();
ResponseFuture<EmptyRecord> response = _restClient.get().sendRequest(deleteRequest);
response.getResponse();
}
@Override
public void close()
throws IOException {
if (_restClient.isPresent()) {
_restClient.get().shutdown(new FutureCallback<None>());
}
if (_httpClientFactory.isPresent()) {
_httpClientFactory.get().shutdown(new FutureCallback<None>());
}
}
} | 1,740 |
0 | Create_ds/gobblin/gobblin-restli/gobblin-flow-config-service/gobblin-flow-config-service-client/src/main/java/org/apache/gobblin | Create_ds/gobblin/gobblin-restli/gobblin-flow-config-service/gobblin-flow-config-service-client/src/main/java/org/apache/gobblin/service/FlowConfigV2Client.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.service;
import com.google.common.collect.Maps;
import java.io.Closeable;
import java.io.IOException;
import java.util.Collection;
import java.util.Collections;
import java.util.Map;
import java.util.regex.Matcher;
import java.util.regex.Pattern;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import com.google.common.base.Optional;
import com.linkedin.common.callback.FutureCallback;
import com.linkedin.common.util.None;
import com.linkedin.r2.RemoteInvocationException;
import com.linkedin.r2.transport.common.Client;
import com.linkedin.r2.transport.common.bridge.client.TransportClientAdapter;
import com.linkedin.r2.transport.http.client.HttpClientFactory;
import com.linkedin.restli.client.ActionRequest;
import com.linkedin.restli.client.CreateIdEntityRequest;
import com.linkedin.restli.client.DeleteRequest;
import com.linkedin.restli.client.FindRequest;
import com.linkedin.restli.client.GetAllRequest;
import com.linkedin.restli.client.GetRequest;
import com.linkedin.restli.client.PartialUpdateRequest;
import com.linkedin.restli.client.Response;
import com.linkedin.restli.client.ResponseFuture;
import com.linkedin.restli.client.RestClient;
import com.linkedin.restli.client.UpdateRequest;
import com.linkedin.restli.common.CollectionResponse;
import com.linkedin.restli.common.ComplexResourceKey;
import com.linkedin.restli.common.EmptyRecord;
import com.linkedin.restli.common.PatchRequest;
/**
* Flow Configuration client for REST flow configuration server
*/
public class FlowConfigV2Client implements Closeable {
private static final Logger LOG = LoggerFactory.getLogger(FlowConfigV2Client.class);
private Optional<HttpClientFactory> _httpClientFactory;
private Optional<RestClient> _restClient;
private final FlowconfigsV2RequestBuilders _flowconfigsV2RequestBuilders;
public static final String DELETE_STATE_STORE_KEY = "delete.state.store";
private static final Pattern flowStatusIdParams = Pattern.compile(".*params:\\((?<flowStatusIdParams>.*?)\\)");
/**
* Construct a {@link FlowConfigV2Client} to communicate with http flow config server at URI serverUri
* @param serverUri address and port of the REST server
*/
public FlowConfigV2Client(String serverUri) {
this(serverUri, Collections.emptyMap());
}
public FlowConfigV2Client(String serverUri, Map<String, String> properties) {
LOG.debug("FlowConfigClient with serverUri " + serverUri);
_httpClientFactory = Optional.of(new HttpClientFactory());
Client r2Client = new TransportClientAdapter(_httpClientFactory.get().getClient(properties));
_restClient = Optional.of(new RestClient(r2Client, serverUri));
_flowconfigsV2RequestBuilders = createRequestBuilders();
}
/**
* Construct a {@link FlowConfigV2Client} to communicate with http flow config server at URI serverUri
* @param restClient restClient to send restli request
*/
public FlowConfigV2Client(RestClient restClient) {
LOG.debug("FlowConfigV2Client with restClient " + restClient);
_httpClientFactory = Optional.absent();
_restClient = Optional.of(restClient);
_flowconfigsV2RequestBuilders = createRequestBuilders();
}
// Clients using different service name can override this method
// RequestBuilders decide the name of the service requests go to.
protected FlowconfigsV2RequestBuilders createRequestBuilders() {
return new FlowconfigsV2RequestBuilders();
}
/**
* Create a flow configuration
* It differs from {@link FlowConfigClient} in a way that it returns FlowStatusId,
* which can be used to find the FlowExecutionId
* @param flowConfig FlowConfig to be used to create the flow
* @return FlowStatusId
* @throws RemoteInvocationException
*/
public FlowStatusId createFlowConfig(FlowConfig flowConfig)
throws RemoteInvocationException {
LOG.debug("createFlowConfig with groupName " + flowConfig.getId().getFlowGroup() + " flowName " +
flowConfig.getId().getFlowName());
CreateIdEntityRequest<ComplexResourceKey<FlowId, FlowStatusId>, FlowConfig> request =
_flowconfigsV2RequestBuilders.createAndGet().input(flowConfig).build();
Response<?> response = FlowClientUtils.sendRequestWithRetry(_restClient.get(), request, FlowconfigsV2RequestBuilders.getPrimaryResource());
return createFlowStatusId(response.getLocation().toString());
}
private FlowStatusId createFlowStatusId(String locationHeader) {
Matcher matcher = flowStatusIdParams.matcher(locationHeader);
matcher.find();
String allFields = matcher.group("flowStatusIdParams");
String[] flowStatusIdParams = allFields.split(",");
Map<String, String> paramsMap = Maps.newHashMapWithExpectedSize(flowStatusIdParams.length);
for (String flowStatusIdParam : flowStatusIdParams) {
paramsMap.put(flowStatusIdParam.split(":")[0], flowStatusIdParam.split(":")[1]);
}
FlowStatusId flowStatusId = new FlowStatusId()
.setFlowName(paramsMap.get("flowName"))
.setFlowGroup(paramsMap.get("flowGroup"));
if (paramsMap.containsKey("flowExecutionId")) {
flowStatusId.setFlowExecutionId(Long.parseLong(paramsMap.get("flowExecutionId")));
}
return flowStatusId;
}
/**
* Update a flow configuration
* @param flowConfig flow configuration attributes
* @throws RemoteInvocationException
*/
public void updateFlowConfig(FlowConfig flowConfig)
throws RemoteInvocationException {
LOG.debug("updateFlowConfig with groupName " + flowConfig.getId().getFlowGroup() + " flowName " +
flowConfig.getId().getFlowName());
FlowId flowId = new FlowId().setFlowGroup(flowConfig.getId().getFlowGroup())
.setFlowName(flowConfig.getId().getFlowName());
UpdateRequest<FlowConfig> updateRequest =
_flowconfigsV2RequestBuilders.update().id(new ComplexResourceKey<>(flowId, new FlowStatusId()))
.input(flowConfig).build();
FlowClientUtils.sendRequestWithRetry(_restClient.get(), updateRequest, FlowconfigsV2RequestBuilders.getPrimaryResource());
}
/**
* Partially update a flow configuration
* @param flowId flow ID to update
* @param flowConfigPatch {@link PatchRequest} containing changes to the flowConfig
* @throws RemoteInvocationException
*/
public void partialUpdateFlowConfig(FlowId flowId, PatchRequest<FlowConfig> flowConfigPatch) throws RemoteInvocationException {
LOG.debug("partialUpdateFlowConfig with groupName " + flowId.getFlowGroup() + " flowName " +
flowId.getFlowName());
PartialUpdateRequest<FlowConfig> partialUpdateRequest =
_flowconfigsV2RequestBuilders.partialUpdate().id(new ComplexResourceKey<>(flowId, new FlowStatusId()))
.input(flowConfigPatch).build();
FlowClientUtils.sendRequestWithRetry(_restClient.get(), partialUpdateRequest, FlowconfigsV2RequestBuilders.getPrimaryResource());
}
/**
* Get a flow configuration
* @param flowId identifier of flow configuration to get
* @return a {@link FlowConfig} with the flow configuration
* @throws RemoteInvocationException
*/
public FlowConfig getFlowConfig(FlowId flowId)
throws RemoteInvocationException {
LOG.debug("getFlowConfig with groupName " + flowId.getFlowGroup() + " flowName " + flowId.getFlowName());
GetRequest<FlowConfig> getRequest = _flowconfigsV2RequestBuilders.get()
.id(new ComplexResourceKey<>(flowId, new FlowStatusId())).build();
Response<FlowConfig> response = _restClient.get().sendRequest(getRequest).getResponse();
return response.getEntity();
}
/**
* Get all {@link FlowConfig}s
* @return all {@link FlowConfig}s
* @throws RemoteInvocationException
*/
public Collection<FlowConfig> getAllFlowConfigs() throws RemoteInvocationException {
LOG.debug("getAllFlowConfigs called");
GetAllRequest<FlowConfig> getRequest = _flowconfigsV2RequestBuilders.getAll().build();
Response<CollectionResponse<FlowConfig>> response = _restClient.get().sendRequest(getRequest).getResponse();
return response.getEntity().getElements();
}
/**
* Get all {@link FlowConfig}s
* @return all {@link FlowConfig}s within the range of start + count - 1, inclusive
* @throws RemoteInvocationException
*/
public Collection<FlowConfig> getAllFlowConfigs(int start, int count) throws RemoteInvocationException {
LOG.debug("getAllFlowConfigs with pagination called. Start: {}. Count: {}.", start, count);
GetAllRequest<FlowConfig> getRequest = _flowconfigsV2RequestBuilders.getAll().paginate(start, count).build();
Response<CollectionResponse<FlowConfig>> response = _restClient.get().sendRequest(getRequest).getResponse();
return response.getEntity().getElements();
}
/**
* Get all {@link FlowConfig}s that matches the provided parameters. All the parameters are optional.
* If a parameter is null, it is ignored. {@see FlowConfigV2Resource#getFilteredFlows}
*/
public Collection<FlowConfig> getFlowConfigs(String flowGroup, String flowName, String templateUri, String userToProxy,
String sourceIdentifier, String destinationIdentifier, String schedule, Boolean isRunImmediately, String owningGroup,
String propertyFilter) throws RemoteInvocationException {
LOG.debug("getAllFlowConfigs called");
FindRequest<FlowConfig> getRequest = _flowconfigsV2RequestBuilders.findByFilterFlows()
.flowGroupParam(flowGroup).flowNameParam(flowName).templateUriParam(templateUri).userToProxyParam(userToProxy)
.sourceIdentifierParam(sourceIdentifier).destinationIdentifierParam(destinationIdentifier).scheduleParam(schedule)
.isRunImmediatelyParam(isRunImmediately).owningGroupParam(owningGroup).propertyFilterParam(propertyFilter).build();
Response<CollectionResponse<FlowConfig>> response = _restClient.get().sendRequest(getRequest).getResponse();
return response.getEntity().getElements();
}
/**
* Get all {@link FlowConfig}s that matches the provided parameters. All the parameters are optional.
* If a parameter is null, it is ignored. {@see FlowConfigV2Resource#getFilteredFlows}
*/
public Collection<FlowConfig> getFlowConfigs(String flowGroup, String flowName, String templateUri, String userToProxy,
String sourceIdentifier, String destinationIdentifier, String schedule, Boolean isRunImmediately, String owningGroup,
String propertyFilter, int start, int count) throws RemoteInvocationException {
LOG.debug("getFilteredFlows pagination called. flowGroup: {}, flowName: {}, start: {}, count: {}.", flowGroup, flowName, start, count);
FindRequest<FlowConfig> getRequest = _flowconfigsV2RequestBuilders.findByFilterFlows()
.flowGroupParam(flowGroup).flowNameParam(flowName).templateUriParam(templateUri).userToProxyParam(userToProxy)
.sourceIdentifierParam(sourceIdentifier).destinationIdentifierParam(destinationIdentifier).scheduleParam(schedule)
.isRunImmediatelyParam(isRunImmediately).owningGroupParam(owningGroup).propertyFilterParam(propertyFilter).paginate(start, count).build();
Response<CollectionResponse<FlowConfig>> response = _restClient.get().sendRequest(getRequest).getResponse();
return response.getEntity().getElements();
}
/**
* Delete a flow configuration
* @param flowId identifier of flow configuration to delete
* @throws RemoteInvocationException
*/
public void deleteFlowConfig(FlowId flowId)
throws RemoteInvocationException {
LOG.debug("deleteFlowConfig with groupName {}, flowName {}", flowId.getFlowGroup(), flowId.getFlowName());
DeleteRequest<FlowConfig> deleteRequest = _flowconfigsV2RequestBuilders.delete()
.id(new ComplexResourceKey<>(flowId, new FlowStatusId())).build();
FlowClientUtils.sendRequestWithRetry(_restClient.get(), deleteRequest, FlowconfigsV2RequestBuilders.getPrimaryResource());
}
/**
* Delete a flow configuration
* @param flowId identifier of flow configuration to delete
* @throws RemoteInvocationException
*/
public void deleteFlowConfigWithStateStore(FlowId flowId)
throws RemoteInvocationException {
LOG.debug("deleteFlowConfig and state store with groupName " + flowId.getFlowGroup() + " flowName " +
flowId.getFlowName());
DeleteRequest<FlowConfig> deleteRequest = _flowconfigsV2RequestBuilders.delete()
.id(new ComplexResourceKey<>(flowId, new FlowStatusId())).setHeader(DELETE_STATE_STORE_KEY, Boolean.TRUE.toString()).build();
ResponseFuture<EmptyRecord> response = _restClient.get().sendRequest(deleteRequest);
response.getResponse();
}
public String runImmediately(FlowId flowId)
throws RemoteInvocationException {
LOG.debug("runImmediately with groupName " + flowId.getFlowGroup() + " flowName " + flowId.getFlowName());
ActionRequest<String> runImmediatelyRequest = _flowconfigsV2RequestBuilders.actionRunImmediately()
.id(new ComplexResourceKey<>(flowId, new FlowStatusId())).build();
Response<String> response = (Response<String>) FlowClientUtils.sendRequestWithRetry(_restClient.get(), runImmediatelyRequest,
FlowconfigsV2RequestBuilders.getPrimaryResource());
return response.getEntity();
}
@Override
public void close()
throws IOException {
if (_restClient.isPresent()) {
_restClient.get().shutdown(new FutureCallback<None>());
}
if (_httpClientFactory.isPresent()) {
_httpClientFactory.get().shutdown(new FutureCallback<None>());
}
}
} | 1,741 |
0 | Create_ds/gobblin/gobblin-restli/gobblin-flow-config-service/gobblin-flow-config-service-client/src/main/java/org/apache/gobblin | Create_ds/gobblin/gobblin-restli/gobblin-flow-config-service/gobblin-flow-config-service-client/src/main/java/org/apache/gobblin/service/FlowExecutionClient.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.service;
import java.io.Closeable;
import java.io.IOException;
import java.util.Collections;
import java.util.List;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import com.google.common.base.Optional;
import com.google.common.base.Preconditions;
import com.linkedin.common.callback.FutureCallback;
import com.linkedin.common.util.None;
import com.linkedin.r2.RemoteInvocationException;
import com.linkedin.r2.transport.common.Client;
import com.linkedin.r2.transport.common.bridge.client.TransportClientAdapter;
import com.linkedin.r2.transport.http.client.HttpClientFactory;
import com.linkedin.restli.client.ActionRequest;
import com.linkedin.restli.client.DeleteRequest;
import com.linkedin.restli.client.FindRequest;
import com.linkedin.restli.client.GetRequest;
import com.linkedin.restli.client.Response;
import com.linkedin.restli.client.RestClient;
import com.linkedin.restli.common.CollectionResponse;
import com.linkedin.restli.common.ComplexResourceKey;
import com.linkedin.restli.common.EmptyRecord;
/**
* Flow execution client for REST flow execution server
*/
public class FlowExecutionClient implements Closeable {
private static final Logger LOG = LoggerFactory.getLogger(FlowExecutionClient.class);
private Optional<HttpClientFactory> _httpClientFactory;
private Optional<RestClient> _restClient;
private final FlowexecutionsRequestBuilders _flowexecutionsRequestBuilders;
/**
* Construct a {@link FlowExecutionClient} to communicate with http flow execution server at URI serverUri
* @param serverUri address and port of the REST server
*/
public FlowExecutionClient(String serverUri) {
LOG.debug("FlowExecutionClient with serverUri " + serverUri);
_httpClientFactory = Optional.of(new HttpClientFactory());
Client r2Client = new TransportClientAdapter(_httpClientFactory.get().getClient(Collections.<String, String>emptyMap()));
_restClient = Optional.of(new RestClient(r2Client, serverUri));
_flowexecutionsRequestBuilders = createRequestBuilders();
}
/**
* Construct a {@link FlowExecutionClient} to communicate with http flow execution server at URI serverUri
* @param restClient restClient to send restli request
*/
public FlowExecutionClient(RestClient restClient) {
LOG.debug("FlowExecutionClient with restClient " + restClient);
_httpClientFactory = Optional.absent();
_restClient = Optional.of(restClient);
_flowexecutionsRequestBuilders = createRequestBuilders();
}
protected FlowexecutionsRequestBuilders createRequestBuilders() {
return new FlowexecutionsRequestBuilders();
}
/**
* Get a flow execution
* @param flowStatusId identifier of flow execution to get
* @return a {@link FlowExecution} with the flow execution
* @throws RemoteInvocationException
*/
public FlowExecution getFlowExecution(FlowStatusId flowStatusId)
throws RemoteInvocationException {
LOG.debug("getFlowExecution with groupName " + flowStatusId.getFlowGroup() + " flowName " +
flowStatusId.getFlowName());
GetRequest<FlowExecution> getRequest = _flowexecutionsRequestBuilders.get()
.id(new ComplexResourceKey<>(flowStatusId, new EmptyRecord())).build();
Response<FlowExecution> response =
_restClient.get().sendRequest(getRequest).getResponse();
return response.getEntity();
}
/**
* Get the latest flow execution
* @param flowId identifier of flow execution to get
* @return a {@link FlowExecution}
* @throws RemoteInvocationException
*/
public FlowExecution getLatestFlowExecution(FlowId flowId)
throws RemoteInvocationException {
LOG.debug("getFlowExecution with groupName " + flowId.getFlowGroup() + " flowName " +
flowId.getFlowName());
FindRequest<FlowExecution> findRequest = _flowexecutionsRequestBuilders.findByLatestFlowExecution().flowIdParam(flowId).build();
Response<CollectionResponse<FlowExecution>> response =
_restClient.get().sendRequest(findRequest).getResponse();
List<FlowExecution> flowExecutionList = response.getEntity().getElements();
if (flowExecutionList.isEmpty()) {
return null;
} else {
Preconditions.checkArgument(flowExecutionList.size() == 1);
return flowExecutionList.get(0);
}
}
public List<FlowExecution> getLatestFlowExecution(FlowId flowId, Integer count, String tag) throws RemoteInvocationException {
return getLatestFlowExecution(flowId, count, tag, null);
}
/**
* Get the latest k flow executions
* @param flowId identifier of flow execution to get
* @return a list of {@link FlowExecution}es corresponding to the latest <code>count</code> executions, containing only
* jobStatuses that match the given tag. If <code>executionStatus</code> is not null, only flows with that status are
* returned.
* @throws RemoteInvocationException
*/
public List<FlowExecution> getLatestFlowExecution(FlowId flowId, Integer count, String tag, String executionStatus)
throws RemoteInvocationException {
LOG.debug("getFlowExecution with groupName " + flowId.getFlowGroup() + " flowName " +
flowId.getFlowName() + " count " + Integer.toString(count));
FindRequest<FlowExecution> findRequest = _flowexecutionsRequestBuilders.findByLatestFlowExecution().flowIdParam(flowId).
addReqParam("count", count, Integer.class).addParam("tag", tag, String.class).addParam("executionStatus", executionStatus, String.class).build();
Response<CollectionResponse<FlowExecution>> response =
_restClient.get().sendRequest(findRequest).getResponse();
List<FlowExecution> flowExecutionList = response.getEntity().getElements();
if (flowExecutionList.isEmpty()) {
return null;
} else {
return flowExecutionList;
}
}
/**
* Resume the flow with given FlowStatusId from it's state before failure
* @param flowStatusId identifier of flow execution to resume
* @throws RemoteInvocationException
*/
public void resumeFlowExecution(FlowStatusId flowStatusId)
throws RemoteInvocationException {
LOG.debug("resumeFlowExecution with groupName " + flowStatusId.getFlowGroup() + " flowName " +
flowStatusId.getFlowName() + " flowExecutionId " + flowStatusId.getFlowExecutionId());
ActionRequest<Void> resumeRequest = _flowexecutionsRequestBuilders.actionResume()
.id(new ComplexResourceKey<>(flowStatusId, new EmptyRecord())).build();
FlowClientUtils.sendRequestWithRetry(_restClient.get(), resumeRequest, FlowexecutionsRequestBuilders.getPrimaryResource());
}
/**
* Kill the flow with given FlowStatusId
* @param flowStatusId identifier of flow execution to kill
* @throws RemoteInvocationException
*/
public void deleteFlowExecution(FlowStatusId flowStatusId)
throws RemoteInvocationException {
LOG.debug("deleteFlowExecution with groupName " + flowStatusId.getFlowGroup() + " flowName " +
flowStatusId.getFlowName() + " flowExecutionId " + flowStatusId.getFlowExecutionId());
DeleteRequest<FlowExecution> deleteRequest = _flowexecutionsRequestBuilders.delete()
.id(new ComplexResourceKey<>(flowStatusId, new EmptyRecord())).build();
FlowClientUtils.sendRequestWithRetry(_restClient.get(), deleteRequest, FlowexecutionsRequestBuilders.getPrimaryResource());
}
@Override
public void close()
throws IOException {
if (_restClient.isPresent()) {
_restClient.get().shutdown(new FutureCallback<None>());
}
if (_httpClientFactory.isPresent()) {
_httpClientFactory.get().shutdown(new FutureCallback<None>());
}
}
} | 1,742 |
0 | Create_ds/gobblin/gobblin-restli/gobblin-flow-config-service/gobblin-flow-config-service-client/src/main/java/org/apache/gobblin | Create_ds/gobblin/gobblin-restli/gobblin-flow-config-service/gobblin-flow-config-service-client/src/main/java/org/apache/gobblin/service/FlowClientUtils.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.service;
import java.net.URI;
import java.net.URISyntaxException;
import com.linkedin.r2.RemoteInvocationException;
import com.linkedin.r2.message.RequestContext;
import com.linkedin.restli.client.Request;
import com.linkedin.restli.client.Response;
import com.linkedin.restli.client.RestClient;
import com.linkedin.restli.client.RestLiResponseException;
/**
* Utils to be used by clients
*/
public class FlowClientUtils {
/**
* Send a restli {@link Request} to the server through a {@link RestClient}, but if the request is rejected due to not
* being sent to a leader node, get the leader node from the errorDetails and retry the request with that node by setting
* the D2-Hint-TargetService attribute.
* @param restClient rest client to use to send the request
* @param request request to send
* @param primaryResource resource part of the request URL (e.g. flowconfigsV2, which can be taken from
* {@link FlowconfigsV2RequestBuilders#getPrimaryResource()}
* @return {@link Response} returned from the request
* @throws RemoteInvocationException
*/
public static Response<?> sendRequestWithRetry(RestClient restClient, Request<?> request, String primaryResource) throws RemoteInvocationException {
Response<?> response;
try {
response = restClient.sendRequest(request).getResponse();
} catch (RestLiResponseException exception) {
if (exception.hasErrorDetails() && exception.getErrorDetails().containsKey(ServiceConfigKeys.LEADER_URL)) {
String leaderUrl = exception.getErrorDetails().getString(ServiceConfigKeys.LEADER_URL);
RequestContext requestContext = new RequestContext();
try {
requestContext.putLocalAttr("D2-Hint-TargetService", new URI(leaderUrl + "/" + primaryResource));
} catch (URISyntaxException e) {
throw new RuntimeException("Could not build URI for for url " + leaderUrl, e);
}
response = restClient.sendRequest(request, requestContext).getResponse();
} else {
throw exception;
}
}
return response;
}
} | 1,743 |
0 | Create_ds/gobblin/gobblin-restli/gobblin-flow-config-service/gobblin-flow-config-service-client/src/main/java/org/apache/gobblin | Create_ds/gobblin/gobblin-restli/gobblin-flow-config-service/gobblin-flow-config-service-client/src/main/java/org/apache/gobblin/service/FlowStatusClient.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.service;
import java.io.Closeable;
import java.io.IOException;
import java.util.Collections;
import java.util.List;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import com.google.common.base.Optional;
import com.google.common.base.Preconditions;
import com.linkedin.common.callback.FutureCallback;
import com.linkedin.common.util.None;
import com.linkedin.r2.RemoteInvocationException;
import com.linkedin.r2.transport.common.Client;
import com.linkedin.r2.transport.common.bridge.client.TransportClientAdapter;
import com.linkedin.r2.transport.http.client.HttpClientFactory;
import com.linkedin.restli.client.FindRequest;
import com.linkedin.restli.client.GetRequest;
import com.linkedin.restli.client.Response;
import com.linkedin.restli.client.RestClient;
import com.linkedin.restli.common.CollectionResponse;
import com.linkedin.restli.common.ComplexResourceKey;
import com.linkedin.restli.common.EmptyRecord;
/**
* Flow status client for REST flow status server
* @deprecated Use {@link FlowExecutionClient}
*/
@Deprecated
public class FlowStatusClient implements Closeable {
private static final Logger LOG = LoggerFactory.getLogger(FlowStatusClient.class);
private Optional<HttpClientFactory> _httpClientFactory;
private Optional<RestClient> _restClient;
private final FlowstatusesRequestBuilders _flowstatusesRequestBuilders;
/**
* Construct a {@link FlowStatusClient} to communicate with http flow status server at URI serverUri
* @param serverUri address and port of the REST server
*/
public FlowStatusClient(String serverUri) {
LOG.debug("FlowStatusClient with serverUri " + serverUri);
_httpClientFactory = Optional.of(new HttpClientFactory());
Client r2Client = new TransportClientAdapter(_httpClientFactory.get().getClient(Collections.<String, String>emptyMap()));
_restClient = Optional.of(new RestClient(r2Client, serverUri));
_flowstatusesRequestBuilders = createRequestBuilders();
}
/**
* Construct a {@link FlowStatusClient} to communicate with http flow status server at URI serverUri
* @param restClient restClient to send restli request
*/
public FlowStatusClient(RestClient restClient) {
LOG.debug("FlowStatusClient with restClient " + restClient);
_httpClientFactory = Optional.absent();
_restClient = Optional.of(restClient);
_flowstatusesRequestBuilders = createRequestBuilders();
}
protected FlowstatusesRequestBuilders createRequestBuilders() {
return new FlowstatusesRequestBuilders();
}
/**
* Get a flow status
* @param flowStatusId identifier of flow status to get
* @return a {@link FlowStatus} with the flow status
* @throws RemoteInvocationException
*/
public FlowStatus getFlowStatus(FlowStatusId flowStatusId)
throws RemoteInvocationException {
LOG.debug("getFlowStatus with groupName " + flowStatusId.getFlowGroup() + " flowName " +
flowStatusId.getFlowName());
GetRequest<FlowStatus> getRequest = _flowstatusesRequestBuilders.get()
.id(new ComplexResourceKey<>(flowStatusId, new EmptyRecord())).build();
Response<FlowStatus> response =
_restClient.get().sendRequest(getRequest).getResponse();
return response.getEntity();
}
/**
* Get the latest flow status
* @param flowId identifier of flow status to get
* @return a {@link FlowStatus} with the flow status
* @throws RemoteInvocationException
*/
public FlowStatus getLatestFlowStatus(FlowId flowId)
throws RemoteInvocationException {
LOG.debug("getFlowStatus with groupName " + flowId.getFlowGroup() + " flowName " +
flowId.getFlowName());
FindRequest<FlowStatus> findRequest = _flowstatusesRequestBuilders.findByLatestFlowStatus().flowIdParam(flowId).build();
Response<CollectionResponse<FlowStatus>> response =
_restClient.get().sendRequest(findRequest).getResponse();
List<FlowStatus> flowStatusList = response.getEntity().getElements();
if (flowStatusList.isEmpty()) {
return null;
} else {
Preconditions.checkArgument(flowStatusList.size() == 1);
return flowStatusList.get(0);
}
}
/**
* Get the latest k flow statuses
* @param flowId identifier of flow status to get
* @return a list of {@link FlowStatus}es corresponding to the latest <code>count</code> executions, containing only
* jobStatuses that match the given tag.
* @throws RemoteInvocationException
*/
public List<FlowStatus> getLatestFlowStatus(FlowId flowId, Integer count, String tag)
throws RemoteInvocationException {
LOG.debug("getFlowStatus with groupName " + flowId.getFlowGroup() + " flowName " +
flowId.getFlowName() + " count " + Integer.toString(count));
FindRequest<FlowStatus> findRequest = _flowstatusesRequestBuilders.findByLatestFlowStatus().flowIdParam(flowId).
addReqParam("count", count, Integer.class).addParam("tag", tag, String.class).build();
Response<CollectionResponse<FlowStatus>> response =
_restClient.get().sendRequest(findRequest).getResponse();
List<FlowStatus> flowStatusList = response.getEntity().getElements();
if (flowStatusList.isEmpty()) {
return null;
} else {
return flowStatusList;
}
}
@Override
public void close()
throws IOException {
if (_restClient.isPresent()) {
_restClient.get().shutdown(new FutureCallback<None>());
}
if (_httpClientFactory.isPresent()) {
_httpClientFactory.get().shutdown(new FutureCallback<None>());
}
}
} | 1,744 |
0 | Create_ds/gobblin/gobblin-restli/gobblin-restli-utils/src/main/java/org/apache/gobblin | Create_ds/gobblin/gobblin-restli/gobblin-restli-utils/src/main/java/org/apache/gobblin/restli/SharedRestClientKey.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.restli;
import org.apache.gobblin.broker.iface.SharedResourceKey;
import lombok.AllArgsConstructor;
import lombok.EqualsAndHashCode;
/**
* {@link SharedResourceKey} for {@link SharedRestClientFactory}. Contains an identifier for type of service
* (e.g. throttling).
*/
@AllArgsConstructor
@EqualsAndHashCode
public class SharedRestClientKey implements SharedResourceKey {
public final String serviceName;
@Override
public String toConfigurationKey() {
return this.serviceName;
}
}
| 1,745 |
0 | Create_ds/gobblin/gobblin-restli/gobblin-restli-utils/src/main/java/org/apache/gobblin | Create_ds/gobblin/gobblin-restli/gobblin-restli-utils/src/main/java/org/apache/gobblin/restli/SharedRestClientFactory.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.restli;
import java.net.URI;
import java.net.URISyntaxException;
import java.util.List;
import java.util.Properties;
import java.util.Random;
import java.util.Set;
import java.util.concurrent.Executors;
import org.apache.gobblin.util.ConfigUtils;
import org.slf4j.Logger;
import com.google.common.base.Optional;
import com.google.common.base.Preconditions;
import com.google.common.base.Splitter;
import com.google.common.collect.Lists;
import com.google.common.collect.Maps;
import com.google.common.collect.Sets;
import com.linkedin.r2.filter.FilterChains;
import com.linkedin.r2.transport.common.Client;
import com.linkedin.r2.transport.common.bridge.client.TransportClientAdapter;
import com.linkedin.r2.transport.http.client.HttpClientFactory;
import com.linkedin.restli.client.RestClient;
import com.typesafe.config.Config;
import org.apache.gobblin.broker.ResourceCoordinate;
import org.apache.gobblin.broker.ResourceInstance;
import org.apache.gobblin.broker.iface.ConfigView;
import org.apache.gobblin.broker.iface.NotConfiguredException;
import org.apache.gobblin.broker.iface.ScopeType;
import org.apache.gobblin.broker.iface.ScopedConfigView;
import org.apache.gobblin.broker.iface.SharedResourceFactory;
import org.apache.gobblin.broker.iface.SharedResourceFactoryResponse;
import org.apache.gobblin.broker.iface.SharedResourcesBroker;
import org.apache.gobblin.util.ExecutorsUtils;
import io.netty.channel.nio.NioEventLoopGroup;
import lombok.extern.slf4j.Slf4j;
/**
* A {@link SharedResourceFactory} to create {@link RestClient}s.
*
* To configure, specify rest server uri at key "serverUri". Note uri must start with "http" or "https".
*/
@Slf4j
public class SharedRestClientFactory<S extends ScopeType<S>> implements SharedResourceFactory<RestClient, SharedRestClientKey, S> {
public static final String FACTORY_NAME = "restli";
public static final String SERVER_URI_KEY = "serverUri";
private static final Set<String> RESTLI_SCHEMES = Sets.newHashSet("http", "https");
@Override
public String getName() {
return FACTORY_NAME;
}
@Override
public SharedResourceFactoryResponse<RestClient>
createResource(SharedResourcesBroker<S> broker, ScopedConfigView<S, SharedRestClientKey> config) throws NotConfiguredException {
try {
SharedRestClientKey key = config.getKey();
if (!(key instanceof UriRestClientKey)) {
return new ResourceCoordinate<>(this, new UriRestClientKey(key.serviceName, resolveUriPrefix(config.getConfig(), key)),
config.getScope());
}
String uriPrefix = ((UriRestClientKey) key).getUri();
log.info(String.format("Creating a brand new rest client for service name %s and uri prefix %s", key.serviceName, uriPrefix));
HttpClientFactory http = new HttpClientFactory(FilterChains.empty(),
new NioEventLoopGroup(0 /* use default settings */,
ExecutorsUtils.newDaemonThreadFactory(Optional.<Logger>absent(), Optional.of("R2 Nio Event Loop-%d"))),
true,
Executors.newSingleThreadScheduledExecutor(
ExecutorsUtils.newDaemonThreadFactory(Optional.<Logger>absent(), Optional.of("R2 Netty Scheduler"))),
true);
Properties props = ConfigUtils.configToProperties(config.getConfig());
if (!props.containsKey(HttpClientFactory.HTTP_REQUEST_TIMEOUT)) {
// Rest.li changed the default timeout from 10s to 1s. Since some clients (e.g. throttling) relied on the longer
// timeout, override this property uless set by the user explicitly
props.setProperty(HttpClientFactory.HTTP_REQUEST_TIMEOUT, "10000");
}
Client r2Client = new TransportClientAdapter(http.getClient(Maps.fromProperties(props)));
return new ResourceInstance<>(new RestClient(r2Client,uriPrefix));
} catch (URISyntaxException use) {
throw new RuntimeException("Could not create a rest client for key " + Optional.fromNullable(config.getKey().toConfigurationKey()).or("null"));
}
}
@Override
public S getAutoScope(SharedResourcesBroker<S> broker, ConfigView<S, SharedRestClientKey> config) {
return broker.selfScope().getType().rootScope();
}
/**
* Get a uri prefix from the input configuration.
*/
public static String resolveUriPrefix(Config config, SharedRestClientKey key) throws URISyntaxException, NotConfiguredException {
List<String> connectionPrefixes = parseConnectionPrefixes(config, key);
Preconditions.checkArgument(connectionPrefixes.size() > 0, "No uris found for service " + key.serviceName);
return connectionPrefixes.get(new Random().nextInt(connectionPrefixes.size()));
}
/**
* Parse the list of available input prefixes from the input configuration.
*/
public static List<String> parseConnectionPrefixes(Config config, SharedRestClientKey key) throws URISyntaxException, NotConfiguredException {
if (key instanceof UriRestClientKey) {
return Lists.newArrayList(((UriRestClientKey) key).getUri());
}
if (!config.hasPath(SERVER_URI_KEY)) {
throw new NotConfiguredException("Missing key " + SERVER_URI_KEY);
}
List<String> uris = Lists.newArrayList();
for (String uri : Splitter.on(",").omitEmptyStrings().trimResults().splitToList(config.getString(SERVER_URI_KEY))) {
uris.add(resolveUriPrefix(new URI(uri)));
}
return uris;
}
/**
* Convert the input URI into a correctly formatted uri prefix. In the future, may also resolve d2 uris.
*/
public static String resolveUriPrefix(URI serverURI)
throws URISyntaxException {
if (RESTLI_SCHEMES.contains(serverURI.getScheme())) {
return new URI(serverURI.getScheme(), serverURI.getAuthority(), null, null, null).toString() + "/";
}
throw new RuntimeException("Unrecognized scheme for URI " + serverURI);
}
}
| 1,746 |
0 | Create_ds/gobblin/gobblin-restli/gobblin-restli-utils/src/main/java/org/apache/gobblin | Create_ds/gobblin/gobblin-restli/gobblin-restli-utils/src/main/java/org/apache/gobblin/restli/UriRestClientKey.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.restli;
import java.net.URI;
import java.net.URISyntaxException;
import com.google.common.base.Preconditions;
import lombok.EqualsAndHashCode;
import lombok.Getter;
/**
* A {@link SharedRestClientKey} that explicitly specifies the {@link URI} of the remote server.
*/
@EqualsAndHashCode(callSuper = true)
public class UriRestClientKey extends SharedRestClientKey {
@Getter
private final String uri;
public UriRestClientKey(String serviceName, URI uri) {
super(serviceName);
try {
Preconditions.checkNotNull(uri, "URI cannot be null.");
this.uri = SharedRestClientFactory.resolveUriPrefix(uri);
} catch (URISyntaxException use) {
// THis should never happen
throw new RuntimeException(use);
}
}
/**
* This constructor assumes uriPrefix is correctly formatted. Most use cases should use the constructor
* {@link UriRestClientKey(String, URI)} instead.
*/
public UriRestClientKey(String serviceName, String uriPrefix) {
super(serviceName);
this.uri = uriPrefix;
}
}
| 1,747 |
0 | Create_ds/gobblin/gobblin-restli/gobblin-restli-utils/src/main/java/org/apache/gobblin | Create_ds/gobblin/gobblin-restli/gobblin-restli-utils/src/main/java/org/apache/gobblin/restli/EmbeddedRestliServer.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.restli;
import java.net.URI;
import java.net.URISyntaxException;
import java.util.Collection;
import java.util.Random;
import java.util.Set;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import com.google.common.base.Optional;
import com.google.common.base.Strings;
import com.google.common.collect.Sets;
import com.google.common.util.concurrent.AbstractIdleService;
import com.google.inject.Binder;
import com.google.inject.Guice;
import com.google.inject.Injector;
import com.google.inject.Module;
import com.google.inject.Singleton;
import com.linkedin.r2.filter.FilterChain;
import com.linkedin.r2.filter.FilterChains;
import com.linkedin.r2.filter.compression.EncodingType;
import com.linkedin.r2.filter.compression.ServerCompressionFilter;
import com.linkedin.r2.transport.common.bridge.server.TransportDispatcher;
import com.linkedin.r2.transport.http.server.HttpNettyServerFactory;
import com.linkedin.r2.transport.http.server.HttpServer;
import com.linkedin.restli.docgen.DefaultDocumentationRequestHandler;
import com.linkedin.restli.server.DelegatingTransportDispatcher;
import com.linkedin.restli.server.RestLiConfig;
import com.linkedin.restli.server.RestLiServer;
import com.linkedin.restli.server.guice.GuiceInjectResourceFactory;
import com.linkedin.restli.server.resources.BaseResource;
import com.linkedin.restli.server.resources.ResourceFactory;
import com.linkedin.restli.server.validation.RestLiValidationFilter;
import lombok.Builder;
import lombok.Getter;
import lombok.Setter;
/**
* An embedded Rest.li server using Netty.
*
* Usage:
* EmbeddedRestliServer server = EmbeddedRestliServer.builder().resources(List<RestliResource>).build();
* server.startAsync()
*
* The server is a {@link com.google.common.util.concurrent.Service} that provides access to a collection of Rest.li
* resources. The following are optional settings (available through builder pattern):
* * port - defaults to randomly chosen port between {@link #MIN_PORT} and {@link #MAX_PORT}.
* * log - defaults to class Logger.
* * name - defaults to the name of the first resource in the resource collection.
* * injector - an {@link Injector} to inject dependencies into the Rest.li resources.
*/
@Singleton
public class EmbeddedRestliServer extends AbstractIdleService {
private static final int MAX_PORT = 65535;
private static final int MIN_PORT = 1024;
private static final Logger LOGGER = LoggerFactory.getLogger(EmbeddedRestliServer.class);
@Getter
private final URI serverUri;
@Getter @Setter
private int port;
@Getter
private final Injector injector;
private final Logger log;
@Getter
private final String name;
private final Collection<Class<? extends BaseResource>> resources;
private volatile Optional<HttpServer> httpServer;
@Builder
public EmbeddedRestliServer(URI serverUri, int port, Injector injector, Logger log, String name,
Collection<Class<? extends BaseResource>> resources) {
this.resources = resources;
if (this.resources.isEmpty()) {
throw new RuntimeException("No resources specified for embedded server.");
}
try {
this.serverUri = serverUri == null ? new URI("http://localhost") : serverUri;
} catch (URISyntaxException use) {
throw new RuntimeException("Invalid URI. This is an error in code.", use);
}
this.port = computePort(port, this.serverUri);
this.injector = injector == null ? Guice.createInjector(new Module() {
@Override
public void configure(Binder binder) {
}
}) : injector;
this.log = log == null ? LOGGER : log;
this.name = Strings.isNullOrEmpty(name) ? this.resources.iterator().next().getSimpleName() : name;
}
private final int computePort(int port, URI uri) {
if (port > 0) {
return port;
} else if (uri.getPort() > 0) {
return uri.getPort();
} else {
return new Random().nextInt(MAX_PORT - MIN_PORT + 1) + MIN_PORT;
}
}
@Override
protected void startUp() throws Exception {
RestLiConfig config = new RestLiConfig();
Set<String> resourceClassNames = Sets.newHashSet();
for (Class<? extends BaseResource> resClass : this.resources) {
resourceClassNames.add(resClass.getName());
}
config.addResourceClassNames(resourceClassNames);
config.setServerNodeUri(this.serverUri);
config.setDocumentationRequestHandler(new DefaultDocumentationRequestHandler());
config.addFilter(new RestLiValidationFilter());
ResourceFactory factory = new GuiceInjectResourceFactory(this.injector);
TransportDispatcher dispatcher = new DelegatingTransportDispatcher(new RestLiServer(config, factory));
String acceptedFilters = EncodingType.SNAPPY.getHttpName() + "," + EncodingType.GZIP.getHttpName();
FilterChain filterChain = FilterChains.createRestChain(new ServerCompressionFilter(acceptedFilters));
this.httpServer = Optional.of(new HttpNettyServerFactory(filterChain).createServer(this.port, dispatcher));
this.log.info("Starting the {} embedded server at port {}.", this.name, this.port);
this.httpServer.get().start();
}
@Override
protected void shutDown() throws Exception {
if (this.httpServer.isPresent()) {
this.log.info("Stopping the {} embedded server at port {}", this.name, this.port);
this.httpServer.get().stop();
this.httpServer.get().waitForStop();
}
}
/**
* Get the scheme and authority at which this server is listening.
*/
public URI getListeningURI() {
try {
return new URI(this.serverUri.getScheme(), this.serverUri.getUserInfo(), this.serverUri.getHost(), this.port,
null, null, null);
} catch (URISyntaxException use) {
throw new RuntimeException("Invalid URI. This is an error in code.", use);
}
}
/**
* Get uri prefix that should be used to create a {@link com.linkedin.restli.client.RestClient}.
*/
public String getURIPrefix() {
return getListeningURI().toString() + "/";
}
}
| 1,748 |
0 | Create_ds/gobblin/gobblin-iceberg/src/test/java/org/apache/gobblin/iceberg | Create_ds/gobblin/gobblin-iceberg/src/test/java/org/apache/gobblin/iceberg/predicates/DatasetHiveSchemaContainsNonOptionalUnionTest.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.iceberg.predicates;
import java.io.File;
import java.util.Collections;
import org.apache.commons.io.FileUtils;
import org.apache.hadoop.hive.metastore.api.AlreadyExistsException;
import org.apache.hadoop.hive.metastore.api.Database;
import org.apache.hadoop.hive.metastore.api.NoSuchObjectException;
import org.apache.hadoop.hive.metastore.api.Table;
import org.apache.hadoop.hive.ql.io.avro.AvroContainerInputFormat;
import org.apache.hadoop.hive.ql.io.avro.AvroContainerOutputFormat;
import org.apache.hadoop.hive.serde2.avro.AvroSerDe;
import org.apache.iceberg.hive.HiveMetastoreTest;
import org.testng.Assert;
import org.testng.annotations.AfterSuite;
import org.testng.annotations.BeforeSuite;
import org.testng.annotations.Test;
import com.google.common.io.Files;
import lombok.extern.slf4j.Slf4j;
import org.apache.gobblin.configuration.State;
import org.apache.gobblin.dataset.Dataset;
import org.apache.gobblin.dataset.test.SimpleDatasetForTesting;
import org.apache.gobblin.hive.HiveTable;
import org.apache.gobblin.hive.metastore.HiveMetaStoreUtils;
import org.apache.gobblin.util.ConfigUtils;
@Slf4j
// depends on icebergMetadataWriterTest to avoid concurrency between other HiveMetastoreTest(s) in CI.
// You can uncomment the dependsOnGroups if you want to test this class in isolation
@Test(dependsOnGroups = "icebergMetadataWriterTest")
public class DatasetHiveSchemaContainsNonOptionalUnionTest extends HiveMetastoreTest {
private static String dbName = "dbName";
private static File tmpDir;
private static State state;
private static String dbUri;
private static String testTable = "test_table01";
private static String datasetUrn = String.format("/data/%s/streaming/test-Table01/hourly/2023/01/01", dbName);
@AfterSuite
public void clean() throws Exception {
FileUtils.forceDeleteOnExit(tmpDir);
}
@BeforeSuite
public void setup() throws Exception {
Class.forName("org.apache.derby.jdbc.EmbeddedDriver").newInstance();
try {
startMetastore();
} catch (AlreadyExistsException ignored) { }
tmpDir = Files.createTempDir();
dbUri = String.format("%s/%s/%s", tmpDir.getAbsolutePath(),"metastore", dbName);
try {
metastoreClient.getDatabase(dbName);
} catch (NoSuchObjectException e) {
metastoreClient.createDatabase(
new Database(dbName, "database", dbUri, Collections.emptyMap()));
}
final State serdeProps = new State();
final String avroSchema = "{\"type\": \"record\", \"name\": \"TestEvent\",\"namespace\": \"test.namespace\", \"fields\": "
+ "[{\"name\":\"fieldName\", \"type\": %s}]}";
serdeProps.setProp("avro.schema.literal", String.format(avroSchema, "[\"string\", \"int\"]"));
HiveTable testTable = createTestHiveTable_Avro(serdeProps);
metastoreClient.createTable(HiveMetaStoreUtils.getTable(testTable));
state = ConfigUtils.configToState(ConfigUtils.propertiesToConfig(hiveConf.getAllProperties()));
state.setProp(DatasetHiveSchemaContainsNonOptionalUnion.PATTERN, "/data/(\\w+)/.*/([\\w\\d_-]+)/hourly.*");
Assert.assertNotNull(metastoreClient.getTable(dbName, DatasetHiveSchemaContainsNonOptionalUnionTest.testTable));
}
@Test
public void testContainsNonOptionalUnion() throws Exception {
DatasetHiveSchemaContainsNonOptionalUnion predicate = new DatasetHiveSchemaContainsNonOptionalUnion(state.getProperties());
Dataset dataset = new SimpleDatasetForTesting(datasetUrn);
Assert.assertTrue(predicate.test(dataset));
}
private HiveTable createTestHiveTable_Avro(State props) {
HiveTable.Builder builder = new HiveTable.Builder();
HiveTable hiveTable = builder.withDbName(dbName).withTableName(testTable).withProps(props).build();
hiveTable.setInputFormat(AvroContainerInputFormat.class.getName());
hiveTable.setOutputFormat(AvroContainerOutputFormat.class.getName());
hiveTable.setSerDeType(AvroSerDe.class.getName());
// Serialize then deserialize as a way to quickly setup table object
Table table = HiveMetaStoreUtils.getTable(hiveTable);
return HiveMetaStoreUtils.getHiveTable(table);
}
}
| 1,749 |
0 | Create_ds/gobblin/gobblin-iceberg/src/test/java/org/apache/gobblin/iceberg | Create_ds/gobblin/gobblin-iceberg/src/test/java/org/apache/gobblin/iceberg/publisher/GobblinMCEPublisherTest.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.iceberg.publisher;
import java.io.File;
import java.io.IOException;
import java.io.InputStream;
import java.nio.CharBuffer;
import java.nio.charset.Charset;
import java.nio.charset.CharsetEncoder;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.List;
import java.util.Map;
import org.apache.avro.Schema;
import org.apache.avro.SchemaBuilder;
import org.apache.avro.file.DataFileWriter;
import org.apache.avro.generic.GenericData;
import org.apache.avro.generic.GenericDatumReader;
import org.apache.avro.generic.GenericDatumWriter;
import org.apache.avro.generic.GenericRecord;
import org.apache.avro.io.DatumWriter;
import org.apache.avro.io.Decoder;
import org.apache.avro.io.DecoderFactory;
import org.apache.commons.io.FileUtils;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.iceberg.Metrics;
import org.mockito.Mockito;
import org.mockito.invocation.InvocationOnMock;
import org.mockito.stubbing.Answer;
import org.testng.Assert;
import org.testng.annotations.AfterClass;
import org.testng.annotations.BeforeClass;
import org.testng.annotations.Test;
import com.google.common.io.Closer;
import com.google.common.io.Files;
import azkaban.jobExecutor.AbstractJob;
import gobblin.configuration.WorkUnitState;
import org.apache.gobblin.configuration.ConfigurationKeys;
import org.apache.gobblin.configuration.State;
import org.apache.gobblin.hive.policy.HiveSnapshotRegistrationPolicy;
import org.apache.gobblin.iceberg.GobblinMCEProducer;
import org.apache.gobblin.metadata.GobblinMetadataChangeEvent;
import org.apache.gobblin.metadata.OperationType;
import org.apache.gobblin.metadata.SchemaSource;
import org.apache.gobblin.source.workunit.WorkUnit;
import org.apache.gobblin.writer.FsDataWriterBuilder;
import org.apache.gobblin.writer.GobblinOrcWriter;
import org.apache.gobblin.writer.PartitionedDataWriter;
import org.apache.gobblin.writer.partitioner.TimeBasedWriterPartitioner;
import static org.mockito.Mockito.any;
import static org.mockito.Mockito.anyList;
import static org.mockito.Mockito.anyMap;
import static org.mockito.Mockito.when;
public class GobblinMCEPublisherTest {
Schema avroDataSchema = SchemaBuilder.record("test")
.fields()
.name("id")
.type()
.longType()
.noDefault()
.name("data")
.type()
.optional()
.stringType()
.endRecord();
Schema _avroPartitionSchema;
private String dbName = "hivedb";
static File tmpDir;
static File dataDir;
static File dataFile;
static File datasetDir;
static Path orcFilePath;
static String orcSchema;
public static final List<GenericRecord> deserializeAvroRecords(Class clazz, Schema schema, String schemaPath)
throws IOException {
List<GenericRecord> records = new ArrayList<>();
GenericDatumReader<GenericRecord> reader = new GenericDatumReader<>(schema);
InputStream dataInputStream = clazz.getClassLoader().getResourceAsStream(schemaPath);
Decoder decoder = DecoderFactory.get().jsonDecoder(schema, dataInputStream);
GenericRecord recordContainer = reader.read(null, decoder);
try {
while (recordContainer != null) {
records.add(recordContainer);
recordContainer = reader.read(null, decoder);
}
} catch (IOException ioe) {
dataInputStream.close();
}
return records;
}
@BeforeClass
public void setUp() throws Exception {
tmpDir = Files.createTempDir();
datasetDir = new File(tmpDir, "/data/tracking/testTable");
dataFile = new File(datasetDir, "/hourly/2020/03/17/08/data.avro");
Files.createParentDirs(dataFile);
dataDir = new File(dataFile.getParent());
Assert.assertTrue(dataDir.exists());
writeRecord();
_avroPartitionSchema =
SchemaBuilder.record("partitionTest").fields().name("ds").type().optional().stringType().endRecord();
//Write ORC file for test
Schema schema =
new Schema.Parser().parse(this.getClass().getClassLoader().getResourceAsStream("publisherTest/schema.avsc"));
orcSchema = schema.toString();
List<GenericRecord> recordList = deserializeAvroRecords(this.getClass(), schema, "publisherTest/data.json");
// Mock WriterBuilder, bunch of mocking behaviors to work-around precondition checks in writer builder
FsDataWriterBuilder<Schema, GenericRecord> mockBuilder =
(FsDataWriterBuilder<Schema, GenericRecord>) Mockito.mock(FsDataWriterBuilder.class);
when(mockBuilder.getSchema()).thenReturn(schema);
State dummyState = new WorkUnit();
String stagingDir = new File(tmpDir, "/orc/staging").getAbsolutePath();
String outputDir = new File(tmpDir, "/orc/output").getAbsolutePath();
dummyState.setProp(ConfigurationKeys.WRITER_STAGING_DIR, stagingDir);
dummyState.setProp(ConfigurationKeys.WRITER_FILE_PATH, "simple");
dummyState.setProp(ConfigurationKeys.WRITER_OUTPUT_DIR, outputDir);
dummyState.setProp(ConfigurationKeys.WRITER_STAGING_DIR, stagingDir);
when(mockBuilder.getFileName(dummyState)).thenReturn("file.orc");
orcFilePath = new Path(outputDir, "simple/file.orc");
// Having a closer to manage the life-cycle of the writer object.
// Will verify if scenarios like double-close could survive.
Closer closer = Closer.create();
GobblinOrcWriter orcWriter = closer.register(new GobblinOrcWriter(mockBuilder, dummyState));
for (GenericRecord record : recordList) {
orcWriter.write(record);
}
orcWriter.commit();
orcWriter.close();
// Verify ORC file contains correct records.
FileSystem fs = FileSystem.getLocal(new Configuration());
Assert.assertTrue(fs.exists(orcFilePath));
}
@AfterClass
public void cleanUp() throws Exception {
FileUtils.forceDeleteOnExit(tmpDir);
}
@Test
public void testPublishGMCEForAvro() throws IOException {
GobblinMCEProducer producer = Mockito.mock(GobblinMCEProducer.class);
Mockito.doCallRealMethod()
.when(producer)
.getGobblinMetadataChangeEvent(anyMap(), anyList(), anyList(), anyMap(), any(), any(), any());
Mockito.doAnswer(new Answer() {
@Override
public Object answer(InvocationOnMock invocation) throws Throwable {
Object[] args = invocation.getArguments();
GobblinMetadataChangeEvent gmce =
producer.getGobblinMetadataChangeEvent((Map<Path, Metrics>) args[0], null, null,
(Map<String, String>) args[1], OperationType.add_files, SchemaSource.SCHEMAREGISTRY, null);
Assert.assertEquals(gmce.getNewFiles().size(), 1);
FileSystem fs = FileSystem.get(new Configuration());
Assert.assertEquals(gmce.getNewFiles().get(0).getFilePath(),
new Path(dataFile.getAbsolutePath()).makeQualified(fs.getUri(), new Path("/")).toString());
return null;
}
}).when(producer).sendGMCE(anyMap(), anyList(), anyList(), anyMap(), any(), any());
WorkUnitState state = new WorkUnitState();
setGMCEPublisherStateForAvroFile(state);
Mockito.doCallRealMethod().when(producer).setState(state);
producer.setState(state);
GobblinMCEPublisher publisher = new GobblinMCEPublisher(state, producer);
publisher.publishData(Arrays.asList(state));
}
@Test
public void testPublishGMCEForORC() throws IOException {
GobblinMCEProducer producer = Mockito.mock(GobblinMCEProducer.class);
Mockito.doCallRealMethod()
.when(producer)
.getGobblinMetadataChangeEvent(anyMap(), anyList(), anyList(), anyMap(), any(), any(), any());
Mockito.doAnswer(new Answer() {
@Override
public Object answer(InvocationOnMock invocation) throws Throwable {
Object[] args = invocation.getArguments();
GobblinMetadataChangeEvent gmce =
producer.getGobblinMetadataChangeEvent((Map<Path, Metrics>) args[0], null, null,
(Map<String, String>) args[1], OperationType.add_files, SchemaSource.SCHEMAREGISTRY, null);
Assert.assertEquals(gmce.getNewFiles().size(), 1);
FileSystem fs = FileSystem.get(new Configuration());
Charset charset = Charset.forName("UTF-8");
CharsetEncoder encoder = charset.newEncoder();
Assert.assertEquals(gmce.getNewFiles().get(0).getFilePath(),
orcFilePath.makeQualified(fs.getUri(), new Path("/")).toString());
Assert.assertEquals(gmce.getNewFiles().get(0).getFileMetrics().getLowerBounds().get(1).getValue(),
encoder.encode(CharBuffer.wrap("Alyssa")));
Assert.assertEquals(gmce.getNewFiles().get(0).getFileMetrics().getUpperBounds().get(1).getValue(),
encoder.encode(CharBuffer.wrap("Bob")));
return null;
}
}).when(producer).sendGMCE(anyMap(), anyList(), anyList(), anyMap(), any(), any());
WorkUnitState state = new WorkUnitState();
setGMCEPublisherStateForOrcFile(state);
Mockito.doCallRealMethod().when(producer).setState(state);
producer.setState(state);
GobblinMCEPublisher publisher = new GobblinMCEPublisher(state, producer);
publisher.publishData(Arrays.asList(state));
}
@Test (dependsOnMethods = {"testPublishGMCEForAvro"})
public void testPublishGMCEWithoutFile() throws IOException {
GobblinMCEProducer producer = Mockito.mock(GobblinMCEProducer.class);
Mockito.doCallRealMethod()
.when(producer)
.getGobblinMetadataChangeEvent(anyMap(), anyList(), anyList(), anyMap(), any(), any(), any());
Mockito.doAnswer(new Answer() {
@Override
public Object answer(InvocationOnMock invocation) throws Throwable {
Object[] args = invocation.getArguments();
GobblinMetadataChangeEvent gmce =
producer.getGobblinMetadataChangeEvent((Map<Path, Metrics>) args[0], null, null,
(Map<String, String>) args[1], OperationType.change_property, SchemaSource.NONE, null);
Assert.assertEquals(gmce.getNewFiles().size(), 1);
Assert.assertNull(gmce.getOldFiles());
Assert.assertNull(gmce.getOldFilePrefixes());
Assert.assertEquals(gmce.getOperationType(), OperationType.change_property);
return null;
}
}).when(producer).sendGMCE(anyMap(), anyList(), anyList(), anyMap(), any(), any());
WorkUnitState state = new WorkUnitState();
setGMCEPublisherStateWithoutNewFile(state);
Mockito.doCallRealMethod().when(producer).setState(state);
producer.setState(state);
GobblinMCEPublisher publisher = new GobblinMCEPublisher(state, producer);
publisher.publishData(Arrays.asList(state));
}
private void setGMCEPublisherStateForOrcFile(WorkUnitState state) {
state.setProp(GobblinMCEPublisher.NEW_FILES_LIST, orcFilePath.toString());
state.setProp(ConfigurationKeys.WRITER_OUTPUT_FORMAT_KEY, "ORC");
state.setProp(GobblinMCEPublisher.OFFSET_RANGE_KEY, "testTopic-1:0-1000");
state.setProp(ConfigurationKeys.HIVE_REGISTRATION_POLICY,
HiveSnapshotRegistrationPolicy.class.getCanonicalName());
state.setProp(ConfigurationKeys.DATA_PUBLISHER_DATASET_DIR, datasetDir.toString());
state.setProp(AbstractJob.JOB_ID, "testFlow");
state.setProp(PartitionedDataWriter.WRITER_LATEST_SCHEMA, orcSchema);
}
private void setGMCEPublisherStateWithoutNewFile(WorkUnitState state) {
//state.setProp(GobblinMCEPublisher.NEW_FILES_LIST, dataFile.toString());
state.setProp(ConfigurationKeys.WRITER_OUTPUT_FORMAT_KEY, "AVRO");
state.setProp(GobblinMCEPublisher.OFFSET_RANGE_KEY, "testTopic-1:0-1000");
state.setProp(ConfigurationKeys.HIVE_REGISTRATION_POLICY,
HiveSnapshotRegistrationPolicy.class.getCanonicalName());
state.setProp(ConfigurationKeys.DATA_PUBLISHER_DATASET_DIR, datasetDir.toString());
state.setProp(AbstractJob.JOB_ID, "testFlow");
state.setProp(PartitionedDataWriter.WRITER_LATEST_SCHEMA, _avroPartitionSchema);
state.setProp(TimeBasedWriterPartitioner.WRITER_PARTITION_PREFIX, "hourly");
}
private void setGMCEPublisherStateForAvroFile(WorkUnitState state) {
state.setProp(GobblinMCEPublisher.NEW_FILES_LIST, dataFile.toString());
state.setProp(ConfigurationKeys.WRITER_OUTPUT_FORMAT_KEY, "AVRO");
state.setProp(GobblinMCEPublisher.OFFSET_RANGE_KEY, "testTopic-1:0-1000");
state.setProp(ConfigurationKeys.HIVE_REGISTRATION_POLICY,
HiveSnapshotRegistrationPolicy.class.getCanonicalName());
state.setProp(ConfigurationKeys.DATA_PUBLISHER_DATASET_DIR, datasetDir.toString());
state.setProp(AbstractJob.JOB_ID, "testFlow");
state.setProp(PartitionedDataWriter.WRITER_LATEST_SCHEMA, _avroPartitionSchema);
}
private String writeRecord() throws IOException {
GenericData.Record record = new GenericData.Record(avroDataSchema);
record.put("id", 1L);
record.put("data", "data");
String path = dataFile.toString();
DatumWriter<GenericRecord> datumWriter = new GenericDatumWriter<>();
DataFileWriter<GenericRecord> dataFileWriter = new DataFileWriter<>(datumWriter);
dataFileWriter.create(avroDataSchema, dataFile);
dataFileWriter.append(record);
dataFileWriter.close();
return path;
}
}
| 1,750 |
0 | Create_ds/gobblin/gobblin-iceberg/src/test/java/org/apache/gobblin/iceberg | Create_ds/gobblin/gobblin-iceberg/src/test/java/org/apache/gobblin/iceberg/writer/GobblinMCEWriterTest.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.iceberg.writer;
import java.io.IOException;
import java.util.Arrays;
import java.util.Collections;
import java.util.HashMap;
import java.util.List;
import java.util.Set;
import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.RejectedExecutionException;
import java.util.function.BiConsumer;
import org.apache.hadoop.fs.FileSystem;
import org.mockito.Mock;
import org.mockito.MockedStatic;
import org.mockito.Mockito;
import org.mockito.MockitoAnnotations;
import org.testng.Assert;
import org.testng.annotations.AfterMethod;
import org.testng.annotations.BeforeMethod;
import org.testng.annotations.DataProvider;
import org.testng.annotations.Test;
import com.google.common.collect.Sets;
import lombok.SneakyThrows;
import org.apache.gobblin.configuration.State;
import org.apache.gobblin.hive.HiveTable;
import org.apache.gobblin.hive.spec.HiveSpec;
import org.apache.gobblin.hive.writer.MetadataWriter;
import org.apache.gobblin.metadata.DatasetIdentifier;
import org.apache.gobblin.metadata.GobblinMetadataChangeEvent;
import org.apache.gobblin.metadata.OperationType;
import org.apache.gobblin.metadata.SchemaSource;
import org.apache.gobblin.source.extractor.extract.LongWatermark;
import org.apache.gobblin.source.extractor.extract.kafka.KafkaPartition;
import org.apache.gobblin.source.extractor.extract.kafka.KafkaStreamingExtractor;
import org.apache.gobblin.stream.RecordEnvelope;
import org.apache.gobblin.util.ClustersNames;
import org.apache.gobblin.util.reflection.GobblinConstructorUtils;
import static org.mockito.Mockito.*;
public class GobblinMCEWriterTest {
private String dbName = "hivedb";
private String tableName = "testTable";
private GobblinMCEWriter gobblinMCEWriter;
private KafkaStreamingExtractor.KafkaWatermark watermark;
private GobblinMetadataChangeEvent.Builder gmceBuilder;
// Not using field injection because they must be different classes
private MetadataWriter mockWriter;
private MetadataWriter exceptionWriter;
@Mock
private FileSystem fs;
@Mock
private HiveSpec mockHiveSpec;
@Mock
private HiveTable mockTable;
private MockedStatic<GobblinConstructorUtils> mockConstructorUtils;
private MockedStatic<FileSystem> mockedFileSystem;
@AfterMethod
public void clean() throws Exception {
gobblinMCEWriter.close();
mockConstructorUtils.close();
mockedFileSystem.close();
}
@BeforeMethod
public void setUp() throws Exception {
initMocks();
gmceBuilder = GobblinMetadataChangeEvent.newBuilder()
.setDatasetIdentifier(DatasetIdentifier.newBuilder()
.setDataPlatformUrn("urn:namespace:dataPlatform:hdfs")
.setNativeName("testDB/testTable")
.build())
.setFlowId("testFlow")
.setSchemaSource(SchemaSource.EVENT)
.setOperationType(OperationType.add_files)
.setCluster(ClustersNames.getInstance().getClusterName());
watermark = new KafkaStreamingExtractor.KafkaWatermark(
new KafkaPartition.Builder().withTopicName("GobblinMetadataChangeEvent_test").withId(1).build(),
new LongWatermark(10L));
State state = new State();
String metadataWriters = String.join(",",
Arrays.asList(mockWriter.getClass().getName(), exceptionWriter.getClass().getName()));
state.setProp("gmce.metadata.writer.classes", metadataWriters);
Mockito.doNothing().when(mockWriter)
.writeEnvelope(
any(RecordEnvelope.class), anyMap(), anyMap(), any(HiveSpec.class));
Mockito.doThrow(new IOException("Test Exception")).when(exceptionWriter)
.writeEnvelope(
any(RecordEnvelope.class), anyMap(), anyMap(), any(HiveSpec.class));
mockConstructorUtils = Mockito.mockStatic(GobblinConstructorUtils.class);
mockConstructorUtils.when(() -> GobblinConstructorUtils.invokeConstructor(
eq(MetadataWriter.class), eq(mockWriter.getClass().getName()), any(State.class)))
.thenReturn(mockWriter);
when(GobblinConstructorUtils.invokeConstructor(
eq(MetadataWriter.class), eq(exceptionWriter.getClass().getName()), any(State.class)))
.thenReturn(exceptionWriter);
mockedFileSystem = Mockito.mockStatic(FileSystem.class);
mockedFileSystem.when(() -> FileSystem.get(any()))
.thenReturn(fs);
when(mockTable.getDbName()).thenReturn(dbName);
when(mockTable.getTableName()).thenReturn(tableName);
when(mockHiveSpec.getTable()).thenReturn(mockTable);
gobblinMCEWriter = new GobblinMCEWriter(new GobblinMCEWriterBuilder(), state);
}
@Test
public void testWriteWhenWriterSpecified() throws IOException {
gmceBuilder.setAllowedMetadataWriters(Arrays.asList(mockWriter.getClass().getName()));
writeWithMetadataWriters(gmceBuilder.build());
Mockito.verify(mockWriter, Mockito.times(1)).writeEnvelope(
any(RecordEnvelope.class), anyMap(), anyMap(), any(HiveSpec.class));
Mockito.verify(exceptionWriter, never()).writeEnvelope(
any(RecordEnvelope.class), anyMap(), anyMap(), any(HiveSpec.class));
}
@Test
public void testFaultTolerance() throws IOException {
gobblinMCEWriter.setMaxErrorDataset(1);
gobblinMCEWriter.metadataWriters = Arrays.asList(mockWriter, exceptionWriter, mockWriter);
gobblinMCEWriter.tableOperationTypeMap = new HashMap<>();
String dbName2 = dbName + "2";
String otherDb = "someOtherDB";
addTableStatus(dbName, "datasetPath");
addTableStatus(dbName2, "datasetPath");
addTableStatus(otherDb, "otherDatasetPath");
BiConsumer<String, String> verifyMocksCalled = new BiConsumer<String, String>(){
private int timesCalled = 0;
@Override
@SneakyThrows
public void accept(String dbName, String tableName) {
timesCalled++;
// also validates that order is maintained since all writers after an exception should reset instead of write
Mockito.verify(mockWriter, Mockito.times(timesCalled)).writeEnvelope(
any(RecordEnvelope.class), anyMap(), anyMap(), any(HiveSpec.class));
Mockito.verify(exceptionWriter, Mockito.times(timesCalled)).writeEnvelope(
any(RecordEnvelope.class), anyMap(), anyMap(), any(HiveSpec.class));
Mockito.verify(exceptionWriter, Mockito.times(1)).reset(dbName, tableName);
Mockito.verify(mockWriter, Mockito.times(1)).reset(dbName, tableName);
}
};
writeWithMetadataWriters(gmceBuilder.build());
verifyMocksCalled.accept(dbName, tableName);
// Another exception for same dataset but different db
when(mockTable.getDbName()).thenReturn(dbName2);
writeWithMetadataWriters(gmceBuilder.build());
verifyMocksCalled.accept(dbName2, tableName);
// exception thrown because exceeds max number of datasets with errors
when(mockTable.getDbName()).thenReturn(otherDb);
Assert.expectThrows(IOException.class, () -> writeWithMetadataWriters(gmceBuilder.setDatasetIdentifier(DatasetIdentifier.newBuilder()
.setDataPlatformUrn("urn:namespace:dataPlatform:hdfs")
.setNativeName("someOtherDB/testTable")
.build()).build()));
}
@Test(dataProvider = "AllowMockMetadataWriter")
public void testGetAllowedMetadataWriters(List<String> metadataWriters) {
Assert.assertNotEquals(mockWriter.getClass().getName(), exceptionWriter.getClass().getName());
gmceBuilder.setAllowedMetadataWriters(metadataWriters);
List<MetadataWriter> allowedWriters = GobblinMCEWriter.getAllowedMetadataWriters(
gmceBuilder.build(),
Arrays.asList(mockWriter, exceptionWriter));
Assert.assertEquals(allowedWriters.size(), 2);
Assert.assertEquals(allowedWriters.get(0).getClass().getName(), mockWriter.getClass().getName());
Assert.assertEquals(allowedWriters.get(1).getClass().getName(), exceptionWriter.getClass().getName());
}
@Test
public void testDetectTransientException() {
Set<String> transientExceptions = Sets.newHashSet("Filesystem closed", "Hive timeout", "RejectedExecutionException");
IOException transientException = new IOException("test1 Filesystem closed test");
IOException wrapperException = new IOException("wrapper exception", transientException);
Assert.assertTrue(GobblinMCEWriter.exceptionMatches(transientException, transientExceptions));
Assert.assertTrue(GobblinMCEWriter.exceptionMatches(wrapperException, transientExceptions));
IOException nonTransientException = new IOException("Write failed due to bad schema");
Assert.assertFalse(GobblinMCEWriter.exceptionMatches(nonTransientException, transientExceptions));
RejectedExecutionException rejectedExecutionException = new RejectedExecutionException("");
Assert.assertTrue(GobblinMCEWriter.exceptionMatches(rejectedExecutionException, transientExceptions));
}
@DataProvider(name="AllowMockMetadataWriter")
public Object[][] allowMockMetadataWriterParams() {
initMocks();
return new Object[][] {
{Arrays.asList(mockWriter.getClass().getName(), exceptionWriter.getClass().getName())},
{Collections.emptyList()}
};
}
private void initMocks() {
MockitoAnnotations.openMocks(this);
// Hacky way to have 2 mock MetadataWriter "classes" with different underlying names
mockWriter = Mockito.mock(MetadataWriter.class);
exceptionWriter = Mockito.mock(TestExceptionMetadataWriter.class);
}
private static abstract class TestExceptionMetadataWriter implements MetadataWriter { }
private void writeWithMetadataWriters(GobblinMetadataChangeEvent gmce) throws IOException {
List<MetadataWriter> allowedMetadataWriters = GobblinMCEWriter.getAllowedMetadataWriters(
gmce, gobblinMCEWriter.getMetadataWriters());
gobblinMCEWriter.writeWithMetadataWriters(new RecordEnvelope<>(gmce, watermark), allowedMetadataWriters,
new ConcurrentHashMap(), new ConcurrentHashMap(), mockHiveSpec);
}
private void addTableStatus(String dbName, String datasetPath) {
gobblinMCEWriter.tableOperationTypeMap.put(dbName + "." + tableName, new GobblinMCEWriter.TableStatus(
OperationType.add_files, datasetPath, "GobblinMetadataChangeEvent_test-1", 0, 50));
}
}
| 1,751 |
0 | Create_ds/gobblin/gobblin-iceberg/src/test/java/org/apache/gobblin/iceberg | Create_ds/gobblin/gobblin-iceberg/src/test/java/org/apache/gobblin/iceberg/writer/CompletenessWatermarkUpdaterTest.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.iceberg.writer;
import com.google.common.base.Optional;
import com.google.common.collect.ImmutableList;
import com.google.common.collect.ImmutableMap;
import java.io.IOException;
import java.time.ZoneId;
import java.time.ZonedDateTime;
import java.time.temporal.ChronoUnit;
import java.util.Collections;
import java.util.HashMap;
import java.util.List;
import java.util.Locale;
import java.util.Map;
import java.util.SortedSet;
import java.util.TreeSet;
import org.apache.gobblin.completeness.verifier.KafkaAuditCountVerifier;
import org.apache.gobblin.configuration.State;
import org.apache.gobblin.time.TimeIterator;
import org.apache.hadoop.conf.Configuration;
import org.apache.iceberg.Table;
import org.mockito.Mockito;
import org.testng.Assert;
import org.testng.annotations.Test;
import static org.apache.gobblin.iceberg.writer.IcebergMetadataWriterConfigKeys.*;
import static org.mockito.Mockito.*;
public class CompletenessWatermarkUpdaterTest {
static final String TOPIC = "testTopic";
static final String TABLE_NAME = "testTopic_tableName";
static final String TIME_ZONE = "America/Los_Angeles";
static final String AUDIT_CHECK_GRANULARITY = "HOUR";
static final ZonedDateTime NOW = ZonedDateTime.now(ZoneId.of(TIME_ZONE)).truncatedTo(ChronoUnit.HOURS);
static final ZonedDateTime ONE_HOUR_AGO = TimeIterator.dec(NOW, TimeIterator.Granularity.valueOf(AUDIT_CHECK_GRANULARITY), 1);
static final ZonedDateTime TWO_HOUR_AGO = TimeIterator.dec(NOW, TimeIterator.Granularity.valueOf(AUDIT_CHECK_GRANULARITY), 2);
static final ZonedDateTime THREE_HOUR_AGO = TimeIterator.dec(NOW, TimeIterator.Granularity.valueOf(AUDIT_CHECK_GRANULARITY), 3);
@Test
public void testClassicWatermarkOnly() throws IOException {
TestParams params = createTestParams();
// Round 1: the expected completion watermark bootstraps to ONE_HOUR_AGO
// total completion watermark is not enabled
KafkaAuditCountVerifier verifier = mockKafkaAuditCountVerifier(ImmutableList.of(
new AuditCountVerificationResult(TWO_HOUR_AGO, ONE_HOUR_AGO, true /* isCompleteClassic */, false /* isCompleteTotalCount */),
new AuditCountVerificationResult(THREE_HOUR_AGO, TWO_HOUR_AGO, true, true)));
CompletenessWatermarkUpdater updater =
new CompletenessWatermarkUpdater("testTopic", AUDIT_CHECK_GRANULARITY, TIME_ZONE, params.tableMetadata, params.props, params.state, verifier);
SortedSet<ZonedDateTime> timestamps = new TreeSet<>(Collections.reverseOrder());
timestamps.add(ONE_HOUR_AGO);
timestamps.add(TWO_HOUR_AGO);
boolean includeTotalCountCompletionWatermark = false;
updater.run(timestamps, includeTotalCountCompletionWatermark);
validateCompletionWaterMark(ONE_HOUR_AGO, params);
validateEmptyTotalCompletionWatermark(params);
// Round 2: the expected completion watermark moves from ONE_HOUR_AGO to NOW
// total completion watermark is not enabled
verifier = mockKafkaAuditCountVerifier(ImmutableList.of(
new AuditCountVerificationResult(ONE_HOUR_AGO, NOW, true /* isCompleteClassic */, false /* isCompleteTotalCount */),
new AuditCountVerificationResult(TWO_HOUR_AGO, ONE_HOUR_AGO, true, true),
new AuditCountVerificationResult(THREE_HOUR_AGO, TWO_HOUR_AGO, true, true)));
updater.setAuditCountVerifier(verifier);
timestamps.add(NOW);
updater.run(timestamps, includeTotalCountCompletionWatermark);
validateCompletionWaterMark(NOW, params);
validateEmptyTotalCompletionWatermark(params);
}
@Test
public void testClassicAndTotalCountWatermark() throws IOException {
TestParams params = createTestParams();
// Round 1: the expected completion watermark bootstraps to ONE_HOUR_AGO
// the expected total completion watermark bootstraps to TOW_HOUR_AGO
KafkaAuditCountVerifier verifier = mockKafkaAuditCountVerifier(ImmutableList.of(
new AuditCountVerificationResult(TWO_HOUR_AGO, ONE_HOUR_AGO, true /* isCompleteClassic */, false /* isCompleteTotalCount */),
new AuditCountVerificationResult(THREE_HOUR_AGO, TWO_HOUR_AGO, true, true)));
CompletenessWatermarkUpdater updater =
new CompletenessWatermarkUpdater("testTopic", AUDIT_CHECK_GRANULARITY, TIME_ZONE, params.tableMetadata, params.props, params.state, verifier);
SortedSet<ZonedDateTime> timestamps = new TreeSet<>(Collections.reverseOrder());
timestamps.add(ONE_HOUR_AGO);
timestamps.add(TWO_HOUR_AGO);
boolean includeTotalCountCompletionWatermark = true;
updater.run(timestamps, includeTotalCountCompletionWatermark);
validateCompletionWaterMark(ONE_HOUR_AGO, params);
validateTotalCompletionWatermark(TWO_HOUR_AGO, params);
// Round 2: the expected completion watermark moves from ONE_HOUR_AGO to NOW
// the expected total completion watermark moves from TOW_HOUR_AGO to ONE_HOUR_AGO
verifier = mockKafkaAuditCountVerifier(ImmutableList.of(
new AuditCountVerificationResult(ONE_HOUR_AGO, NOW, true /* isCompleteClassic */, false /* isCompleteTotalCount */),
new AuditCountVerificationResult(TWO_HOUR_AGO, ONE_HOUR_AGO, true, true),
new AuditCountVerificationResult(THREE_HOUR_AGO, TWO_HOUR_AGO, true, true)));
updater.setAuditCountVerifier(verifier);
timestamps.add(NOW);
updater.run(timestamps, includeTotalCountCompletionWatermark);
validateCompletionWaterMark(NOW, params);
validateTotalCompletionWatermark(ONE_HOUR_AGO, params);
}
static void validateCompletionWaterMark(ZonedDateTime expectedDT, TestParams params) {
long expected = expectedDT.toInstant().toEpochMilli();
// 1. assert updated tableMetadata.completionWatermark
Assert.assertEquals(params.tableMetadata.completionWatermark, expected);
// 2. assert updated property
Assert.assertEquals(params.props.get(COMPLETION_WATERMARK_KEY), String.valueOf(expected));
Assert.assertEquals(params.props.get(COMPLETION_WATERMARK_TIMEZONE_KEY), TIME_ZONE);
// 3. assert updated state
String watermarkKey = String.format(STATE_COMPLETION_WATERMARK_KEY_OF_TABLE,
params.tableMetadata.table.get().name().toLowerCase(Locale.ROOT));
Assert.assertEquals(params.state.getProp(watermarkKey), String.valueOf(expected));
}
static void validateTotalCompletionWatermark(ZonedDateTime expectedDT, TestParams params) {
long expected = expectedDT.toInstant().toEpochMilli();
// 1. expect updated tableMetadata.totalCountCompletionWatermark
Assert.assertEquals(params.tableMetadata.totalCountCompletionWatermark, expected);
// 2. expect updated property
Assert.assertEquals(params.props.get(TOTAL_COUNT_COMPLETION_WATERMARK_KEY), String.valueOf(expected));
// 3. expect updated state
String totalCountWatermarkKey = String.format(STATE_TOTAL_COUNT_COMPLETION_WATERMARK_KEY_OF_TABLE,
params.tableMetadata.table.get().name().toLowerCase(Locale.ROOT));
Assert.assertEquals(params.state.getProp(totalCountWatermarkKey), String.valueOf(expected));
}
static void validateEmptyTotalCompletionWatermark(TestParams params) {
Assert.assertEquals(params.tableMetadata.totalCountCompletionWatermark, DEFAULT_COMPLETION_WATERMARK);
Assert.assertNull(params.props.get(TOTAL_COUNT_COMPLETION_WATERMARK_KEY));
String totalCountWatermarkKey = String.format(STATE_TOTAL_COUNT_COMPLETION_WATERMARK_KEY_OF_TABLE,
params.tableMetadata.table.get().name().toLowerCase(Locale.ROOT));
Assert.assertNull(params.state.getProp(totalCountWatermarkKey));
}
static class TestParams {
IcebergMetadataWriter.TableMetadata tableMetadata;
Map<String, String> props;
State state;
}
static TestParams createTestParams() throws IOException {
TestParams params = new TestParams();
params.tableMetadata = new IcebergMetadataWriter.TableMetadata(new Configuration());
Table table = mock(Table.class);
when(table.name()).thenReturn(TABLE_NAME);
params.tableMetadata.table = Optional.of(table);
params.props = new HashMap<>();
params.state = new State();
return params;
}
static class AuditCountVerificationResult {
AuditCountVerificationResult(ZonedDateTime start, ZonedDateTime end, boolean isCompleteClassic, boolean isCompleteTotalCount) {
this.start = start;
this.end = end;
this.isCompleteClassic = isCompleteClassic;
this.isCompleteTotalCount = isCompleteTotalCount;
}
ZonedDateTime start;
ZonedDateTime end;
boolean isCompleteClassic;
boolean isCompleteTotalCount;
}
static KafkaAuditCountVerifier mockKafkaAuditCountVerifier(List<AuditCountVerificationResult> resultsToMock)
throws IOException {
KafkaAuditCountVerifier verifier = mock(IcebergMetadataWriterTest.TestAuditCountVerifier.class);
for (AuditCountVerificationResult result : resultsToMock) {
Mockito.when(verifier.calculateCompleteness(TOPIC, result.start.toInstant().toEpochMilli(), result.end.toInstant().toEpochMilli()))
.thenReturn(ImmutableMap.of(
KafkaAuditCountVerifier.CompletenessType.ClassicCompleteness, result.isCompleteClassic,
KafkaAuditCountVerifier.CompletenessType.TotalCountCompleteness, result.isCompleteTotalCount));
}
return verifier;
}
}
| 1,752 |
0 | Create_ds/gobblin/gobblin-iceberg/src/test/java/org/apache/gobblin/iceberg | Create_ds/gobblin/gobblin-iceberg/src/test/java/org/apache/gobblin/iceberg/writer/HiveMetadataWriterTest.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.iceberg.writer;
import java.io.File;
import java.io.IOException;
import java.util.Collections;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.function.Function;
import org.apache.avro.SchemaBuilder;
import org.apache.avro.file.DataFileWriter;
import org.apache.avro.generic.GenericData;
import org.apache.avro.generic.GenericDatumWriter;
import org.apache.avro.generic.GenericRecord;
import org.apache.avro.io.DatumWriter;
import org.apache.commons.io.FileUtils;
import org.apache.commons.lang3.StringUtils;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hive.metastore.IMetaStoreClient;
import org.apache.hadoop.hive.metastore.api.AlreadyExistsException;
import org.apache.hadoop.hive.metastore.api.Database;
import org.apache.hadoop.hive.metastore.api.NoSuchObjectException;
import org.apache.hadoop.hive.serde.serdeConstants;
import org.apache.hadoop.hive.serde2.avro.AvroSerdeUtils;
import org.apache.iceberg.hive.HiveMetastoreTest;
import org.apache.iceberg.hive.TestHiveMetastore;
import org.apache.thrift.TException;
import org.mockito.Mockito;
import org.testng.Assert;
import org.testng.annotations.AfterSuite;
import org.testng.annotations.BeforeSuite;
import org.testng.annotations.Test;
import com.google.common.base.Optional;
import com.google.common.collect.ImmutableList;
import com.google.common.collect.ImmutableMap;
import com.google.common.collect.Lists;
import com.google.common.io.Files;
import org.apache.gobblin.configuration.State;
import org.apache.gobblin.data.management.copy.hive.WhitelistBlacklist;
import org.apache.gobblin.hive.HiveMetastoreClientPool;
import org.apache.gobblin.hive.HivePartition;
import org.apache.gobblin.hive.HiveRegister;
import org.apache.gobblin.hive.HiveRegistrationUnit;
import org.apache.gobblin.hive.HiveTable;
import org.apache.gobblin.hive.metastore.HiveMetaStoreBasedRegister;
import org.apache.gobblin.hive.policy.HiveRegistrationPolicyBase;
import org.apache.gobblin.hive.spec.HiveSpec;
import org.apache.gobblin.hive.spec.SimpleHiveSpec;
import org.apache.gobblin.hive.writer.HiveMetadataWriter;
import org.apache.gobblin.metadata.DataFile;
import org.apache.gobblin.metadata.DataMetrics;
import org.apache.gobblin.metadata.DataOrigin;
import org.apache.gobblin.metadata.DatasetIdentifier;
import org.apache.gobblin.metadata.GobblinMetadataChangeEvent;
import org.apache.gobblin.metadata.OperationType;
import org.apache.gobblin.metadata.SchemaSource;
import org.apache.gobblin.metrics.kafka.KafkaSchemaRegistry;
import org.apache.gobblin.source.extractor.extract.LongWatermark;
import org.apache.gobblin.source.extractor.extract.kafka.KafkaPartition;
import org.apache.gobblin.source.extractor.extract.kafka.KafkaStreamTestUtils;
import org.apache.gobblin.source.extractor.extract.kafka.KafkaStreamingExtractor;
import org.apache.gobblin.stream.RecordEnvelope;
import org.apache.gobblin.util.ClustersNames;
import org.apache.gobblin.util.ConfigUtils;
import org.apache.gobblin.util.function.CheckedExceptionFunction;
import static org.mockito.Mockito.eq;
public class HiveMetadataWriterTest extends HiveMetastoreTest {
org.apache.avro.Schema avroDataSchema = SchemaBuilder.record("test")
.fields()
.name("id")
.type()
.longType()
.noDefault()
.name("data")
.type()
.optional()
.stringType()
.endRecord();
org.apache.avro.Schema _avroPartitionSchema;
private String dbName = "hivedb";
private String dedupedDbName = "hivedb_deduped";
private String tableName = "testTable";
private GobblinMCEWriter gobblinMCEWriter;
GobblinMetadataChangeEvent.Builder gmceBuilder;
GobblinMetadataChangeEvent gmce;
static File tmpDir;
static File dataDir;
static File hourlyDataFile_2;
static File hourlyDataFile_1;
static File dailyDataFile;
HiveMetastoreClientPool hc;
IMetaStoreClient client;
private static TestHiveMetastore testHiveMetastore;
@AfterSuite
public void clean() throws Exception {
gobblinMCEWriter.close();
FileUtils.forceDeleteOnExit(tmpDir);
//Finally stop the metaStore
stopMetastore();
}
@BeforeSuite
public void setUp() throws Exception {
Class.forName("org.apache.derby.jdbc.EmbeddedDriver").newInstance();
try {
startMetastore();
} catch (AlreadyExistsException ignored) { }
State state = ConfigUtils.configToState(ConfigUtils.propertiesToConfig(hiveConf.getAllProperties()));
Optional<String> metastoreUri = Optional.fromNullable(state.getProperties().getProperty(HiveRegister.HIVE_METASTORE_URI_KEY));
hc = HiveMetastoreClientPool.get(state.getProperties(), metastoreUri);
client = hc.getClient().get();
tmpDir = Files.createTempDir();
try {
client.getDatabase(dbName);
} catch (NoSuchObjectException e) {
client.createDatabase(
new Database(dbName, "database", tmpDir.getAbsolutePath() + "/metastore", Collections.emptyMap()));
}
try {
client.getDatabase(dedupedDbName);
} catch (NoSuchObjectException e) {
client.createDatabase(
new Database(dedupedDbName, "dedupeddatabase", tmpDir.getAbsolutePath() + "/metastore_deduped", Collections.emptyMap()));
}
hourlyDataFile_1 = new File(tmpDir, "testDB/testTable/hourly/2020/03/17/08/data.avro");
Files.createParentDirs(hourlyDataFile_1);
hourlyDataFile_2 = new File(tmpDir, "testDB/testTable/hourly/2020/03/17/09/data.avro");
Files.createParentDirs(hourlyDataFile_2);
dailyDataFile = new File(tmpDir, "testDB/testTable/daily/2020/03/17/data.avro");
Files.createParentDirs(dailyDataFile);
dataDir = new File(hourlyDataFile_1.getParent());
Assert.assertTrue(dataDir.exists());
writeRecord(hourlyDataFile_1);
writeRecord(hourlyDataFile_2);
writeRecord(dailyDataFile);
Map<String, String> registrationState = new HashMap();
registrationState.put("hive.database.name", dbName);
gmceBuilder = GobblinMetadataChangeEvent.newBuilder()
.setDatasetIdentifier(DatasetIdentifier.newBuilder()
.setDataOrigin(DataOrigin.EI)
.setDataPlatformUrn("urn:namespace:dataPlatform:hdfs")
.setNativeName("/testDB/testTable")
.build())
.setTopicPartitionOffsetsRange(ImmutableMap.<String, String>builder().put("testTopic-1", "0-1000").build())
.setFlowId("testFlow")
.setNewFiles(Lists.newArrayList(DataFile.newBuilder()
.setFilePath(hourlyDataFile_1.toString())
.setFileFormat("avro")
.setFileMetrics(DataMetrics.newBuilder().setRecordCount(10L).build())
.build()))
.setSchemaSource(SchemaSource.EVENT)
.setOperationType(OperationType.add_files)
.setTableSchema(avroDataSchema.toString())
.setCluster(ClustersNames.getInstance().getClusterName())
.setPartitionColumns(Lists.newArrayList("testpartition"))
.setRegistrationPolicy(TestHiveRegistrationPolicy.class.getName())
.setRegistrationProperties(registrationState)
.setAllowedMetadataWriters(Collections.singletonList(TestHiveMetadataWriter.class.getName()));
gmce = gmceBuilder.build();
state.setProp(KafkaSchemaRegistry.KAFKA_SCHEMA_REGISTRY_CLASS,
KafkaStreamTestUtils.MockSchemaRegistry.class.getName());
state.setProp("default.hive.registration.policy",
TestHiveRegistrationPolicy.class.getName());
state.setProp("gmce.metadata.writer.classes", TestHiveMetadataWriter.class.getName());
gobblinMCEWriter = new GobblinMCEWriter(new GobblinMCEWriterBuilder(), state);
}
@Test
public void testHiveWriteAddFileGMCE() throws IOException {
gobblinMCEWriter.writeEnvelope(new RecordEnvelope<>(gmce,
new KafkaStreamingExtractor.KafkaWatermark(
new KafkaPartition.Builder().withTopicName("GobblinMetadataChangeEvent_test").withId(1).build(),
new LongWatermark(10L))));
gobblinMCEWriter.writeEnvelope(new RecordEnvelope<>(gmce,
new KafkaStreamingExtractor.KafkaWatermark(
new KafkaPartition.Builder().withTopicName("GobblinMetadataChangeEvent_test").withId(1).build(),
new LongWatermark(20L))));
gobblinMCEWriter.flush();
/*test flush twice*/
gmce.setTopicPartitionOffsetsRange(ImmutableMap.<String, String>builder().put("testTopic-1", "2000-3000").build());
gmce.setNewFiles(Lists.newArrayList(DataFile.newBuilder()
.setFilePath(hourlyDataFile_2.toString())
.setFileFormat("avro")
.setFileMetrics(DataMetrics.newBuilder().setRecordCount(10L).build())
.build()));
gobblinMCEWriter.writeEnvelope(new RecordEnvelope<>(gmce,
new KafkaStreamingExtractor.KafkaWatermark(
new KafkaPartition.Builder().withTopicName("GobblinMetadataChangeEvent_test").withId(1).build(),
new LongWatermark(30L))));
gobblinMCEWriter.flush();
//Test Hive writer can register partition
try {
Assert.assertTrue(client.tableExists("hivedb", "testTable"));
Assert.assertTrue(client.getPartition("hivedb", "testTable",Lists.newArrayList("2020-03-17-09")) != null);
Assert.assertTrue(client.getPartition("hivedb", "testTable",Lists.newArrayList("2020-03-17-08")) != null);
} catch (TException e) {
throw new IOException(e);
}
}
@Test(dependsOnMethods = {"testHiveWriteAddFileGMCE"}, groups={"hiveMetadataWriterTest"})
public void testHiveWriteRewriteFileGMCE() throws IOException {
gmce.setTopicPartitionOffsetsRange(null);
Map<String, String> registrationState = gmce.getRegistrationProperties();
registrationState.put("additional.hive.database.names", dedupedDbName);
registrationState.put(HiveMetaStoreBasedRegister.SCHEMA_SOURCE_DB, dbName);
gmce.setRegistrationProperties(registrationState);
gmce.setSchemaSource(SchemaSource.NONE);
FileSystem fs = FileSystem.get(new Configuration());
String filePath = new Path(hourlyDataFile_1.getParentFile().getAbsolutePath()).toString();
String filePath_1 = new Path(hourlyDataFile_2.getParentFile().getAbsolutePath()).toString();
DataFile dailyFile = DataFile.newBuilder()
.setFilePath(dailyDataFile.toString())
.setFileFormat("avro")
.setFileMetrics(DataMetrics.newBuilder().setRecordCount(10L).build())
.build();
gmce.setNewFiles(Lists.newArrayList(dailyFile));
gmce.setOldFilePrefixes(Lists.newArrayList(filePath, filePath_1));
gmce.setOperationType(OperationType.rewrite_files);
gobblinMCEWriter.writeEnvelope(new RecordEnvelope<>(gmce,
new KafkaStreamingExtractor.KafkaWatermark(
new KafkaPartition.Builder().withTopicName("GobblinMetadataChangeEvent_test").withId(1).build(),
new LongWatermark(40L))));
gobblinMCEWriter.flush();
//Test hive writer re-write operation can de-register old partitions and register new one
try {
Assert.assertTrue(client.getPartition("hivedb", "testTable",Lists.newArrayList("2020-03-17-00")) != null);
// Test additional table been registered
Assert.assertTrue(client.tableExists(dedupedDbName, "testTable"));
} catch (TException e) {
throw new IOException(e);
}
Assert.assertThrows(new Assert.ThrowingRunnable() {
@Override public void run() throws Throwable {
client.getPartition("hivedb", "testTable",Lists.newArrayList("2020-03-17-08"));
}
});
}
/**
* Goal: General test for de-registering a partition created in
* {@link HiveMetadataWriterTest#testHiveWriteRewriteFileGMCE()}
*/
@Test(dependsOnMethods = {"testHiveWriteRewriteFileGMCE"}, groups={"hiveMetadataWriterTest"})
public void testHiveWriteDeleteFileGMCE() throws IOException, TException {
// partitions should exist from the previous test
Assert.assertNotNull(client.getPartition(dbName, "testTable", Lists.newArrayList("2020-03-17-00")));
Assert.assertNotNull(client.getPartition(dedupedDbName, "testTable", Lists.newArrayList("2020-03-17-00")));
gmce.setTopicPartitionOffsetsRange(null);
Map<String, String> registrationState = gmce.getRegistrationProperties();
registrationState.put("additional.hive.database.names", dedupedDbName);
registrationState.put(HiveMetaStoreBasedRegister.SCHEMA_SOURCE_DB, dbName);
gmce.setRegistrationProperties(registrationState);
gmce.setSchemaSource(SchemaSource.NONE);
gmce.setOldFilePrefixes(Lists.newArrayList(dailyDataFile.toString()));
gmce.setOperationType(OperationType.drop_files);
gobblinMCEWriter.writeEnvelope(new RecordEnvelope<>(gmce,
new KafkaStreamingExtractor.KafkaWatermark(
new KafkaPartition.Builder().withTopicName("GobblinMetadataChangeEvent_test").withId(1).build(),
new LongWatermark(40L))));
gobblinMCEWriter.flush();
// Partition created in previous test should now be dropped in the DB and the additional DB
Assert.assertThrows(NoSuchObjectException.class, () ->
client.getPartition(dbName, "testTable",Lists.newArrayList("2020-03-17-00")));
Assert.assertThrows(NoSuchObjectException.class, () ->
client.getPartition(dedupedDbName, "testTable",Lists.newArrayList("2020-03-17-00")));
// Test additional table still registered, since this operation should only drop partitions but not table
Assert.assertTrue(client.tableExists(dedupedDbName, "testTable"));
Assert.assertTrue(client.tableExists(dbName, "testTable"));
// dropping a partition that does not exist anymore should be safe
gobblinMCEWriter.writeEnvelope(new RecordEnvelope<>(gmce,
new KafkaStreamingExtractor.KafkaWatermark(
new KafkaPartition.Builder().withTopicName("GobblinMetadataChangeEvent_test").withId(1).build(),
new LongWatermark(40L))));
}
/**
* Goal: to ensure that errors when creating a table do not bubble up any exceptions (which would otherwise
* cause the container to fail and metadata registration to be blocked)
* <ul>
* <li>add file to non existent DB should swallow up exception</li>
* </ul>
*/
@Test(dependsOnMethods = {"testHiveWriteDeleteFileGMCE"}, groups={"hiveMetadataWriterTest"})
public void testHiveWriteSwallowsExceptionOnCreateTable() throws IOException {
// add file to a DB that does not exist should trigger an exception and the exception should be swallowed
HiveSpec spec = new SimpleHiveSpec.Builder(new org.apache.hadoop.fs.Path("pathString"))
.withTable(new HiveTable.Builder()
.withDbName("dbWhichDoesNotExist")
.withTableName("testTable")
.build()).build();
gmce.setOperationType(OperationType.add_files);
HiveMetadataWriter hiveWriter = (HiveMetadataWriter) gobblinMCEWriter.getMetadataWriters().get(0);
hiveWriter.write(gmce, null, null, spec, "someTopicPartition");
}
@Test(dependsOnMethods = {"testHiveWriteSwallowsExceptionOnCreateTable"}, groups={"hiveMetadataWriterTest"})
public void testDropFilesDoesNotCreateTable() throws IOException {
HiveMetadataWriter hiveWriter = (HiveMetadataWriter) gobblinMCEWriter.getMetadataWriters().get(0);
HiveRegister mockRegister = Mockito.mock(HiveRegister.class);
HiveSpec spec = new SimpleHiveSpec.Builder(new org.apache.hadoop.fs.Path("pathString"))
.withTable(new HiveTable.Builder().withDbName("stubDB").withTableName("stubTable").build()).build();
// Since there are no old file prefixes, there are no files to delete. And the writer shouldn't touch the hive register
// i.e. dropping files will not create a table
gmce.setOperationType(OperationType.drop_files);
gmce.setOldFilePrefixes(null);
hiveWriter.write(gmce, null, null, spec, "someTopicPartition");
Mockito.verifyNoInteractions(mockRegister);
}
/**
* Goal: Ensure the logic for always using the latest schema in Hive table is working properly:
* <ul>
* <li>deny listed topics should fetch the schema once, and then use a cached version for all future calls</li>
* <li>allow listed topics should fetch the schema each time</li>
* </ul>
*/
@Test
public void testUpdateLatestSchemaWithExistingSchema() throws IOException {
final String tableNameAllowed = "tableAllowed";
final String tableNameDenied = "tableDenied";
final WhitelistBlacklist useExistingTableSchemaAllowDenyList = new WhitelistBlacklist(
"hivedb.tableAllowed", "hivedb.tableDenied", true);
final HiveRegister hiveRegister = Mockito.mock(HiveRegister.class);
final HashMap<String, String> latestSchemaMap = new HashMap<>();
final Function<String,String> getTableKey = (tableName) -> String.format("%s.%s", dbName, tableName);
final HiveTable mockTable = Mockito.mock(HiveTable.class);
final State avroSchemaProp = Mockito.mock(State.class);
final String avroSchema = "avro schema";
Mockito.when(hiveRegister.getTable(eq(dbName), eq(tableNameAllowed))).thenReturn(Optional.of(mockTable));
Mockito.when(hiveRegister.getTable(eq(dbName), eq(tableNameDenied))).thenReturn(Optional.of(mockTable));
Mockito.when(avroSchemaProp.getProp(eq(AvroSerdeUtils.AvroTableProperties.SCHEMA_LITERAL.getPropName()))).thenReturn(avroSchema);
Mockito.when(mockTable.getSerDeProps()).thenReturn(avroSchemaProp);
CheckedExceptionFunction<String, Boolean, IOException> updateLatestSchema = (tableName) ->
TestHiveMetadataWriter.updateLatestSchemaMapWithExistingSchema(dbName, tableName, getTableKey.apply(tableName),
useExistingTableSchemaAllowDenyList, hiveRegister, latestSchemaMap);
// Tables part of deny list, schema only fetched from hive on the first time and the all future calls will use the cache
Assert.assertTrue(updateLatestSchema.apply(tableNameDenied));
Assert.assertFalse(updateLatestSchema.apply(tableNameDenied));
Assert.assertEquals(latestSchemaMap, ImmutableMap.of(
getTableKey.apply(tableNameDenied), avroSchema
));
Mockito.verify(hiveRegister, Mockito.times(1)).getTable(eq(dbName), eq(tableNameDenied));
// For tables included in the allow list, hive should be called and schema map should be updated with the latest schema
Assert.assertTrue(updateLatestSchema.apply(tableNameAllowed));
Assert.assertEquals(latestSchemaMap, ImmutableMap.of(
getTableKey.apply(tableNameAllowed), avroSchema,
getTableKey.apply(tableNameDenied), avroSchema
));
Assert.assertTrue(updateLatestSchema.apply(tableNameAllowed));
Mockito.verify(hiveRegister, Mockito.times(2)).getTable(eq(dbName), eq(tableNameAllowed));
HiveTable tableThatHasNoSchemaLiteral = Mockito.mock(HiveTable.class);
String nameOfTableThatHasNoSchemaLiteral = "improperlyConfiguredTable";
Mockito.when(hiveRegister.getTable(eq(dbName), eq(nameOfTableThatHasNoSchemaLiteral))).thenReturn(Optional.of(tableThatHasNoSchemaLiteral));
Mockito.when(tableThatHasNoSchemaLiteral.getSerDeProps()).thenReturn(new State());
Assert.assertThrows(IllegalStateException.class, () -> updateLatestSchema.apply(nameOfTableThatHasNoSchemaLiteral));
}
@Test
public void testGetTopicName() {
final String expectedTopicName = "123-topic-Name-123_v2";
Function<String, GobblinMetadataChangeEvent> getGmce = (offsetRangeKey) -> {
Map<String, String> offsetRangeMap = new HashMap<>();
offsetRangeMap.put(String.format(offsetRangeKey, expectedTopicName), "0-100");
return GobblinMetadataChangeEvent.newBuilder(gmceBuilder).setTopicPartitionOffsetsRange(offsetRangeMap).build();
};
TestHiveMetadataWriter hiveWriter = (TestHiveMetadataWriter) gobblinMCEWriter.getMetadataWriters().get(0);
Assert.assertEquals(hiveWriter.getTopicName(getGmce.apply("%s-0")), expectedTopicName);
Assert.assertEquals(hiveWriter.getTopicName(getGmce.apply("kafkaIdentifier.%s-0")), expectedTopicName);
Assert.assertEquals(hiveWriter.getTopicName(getGmce.apply("kafkaIdentifier.foobar.%s-0")), expectedTopicName);
}
private String writeRecord(File file) throws IOException {
GenericData.Record record = new GenericData.Record(avroDataSchema);
record.put("id", 1L);
record.put("data", "data");
String path = file.toString();
DatumWriter<GenericRecord> datumWriter = new GenericDatumWriter<>();
DataFileWriter<GenericRecord> dataFileWriter = new DataFileWriter<>(datumWriter);
dataFileWriter.create(avroDataSchema, file);
dataFileWriter.append(record);
dataFileWriter.close();
return path;
}
/**
* Test class for exposing internal {@link HiveMetadataWriter} functions without making them public.
* Although the ultimate fix would be to break up the logic in the hive metadata writer into smaller pieces,
* this a stop gap way to make testing internal logic easier.
*
* This approach was taken because the writer lives in a separate module from this test class, and dependencies make
* putting the test and implementation classes in the same module difficult
*/
public static class TestHiveMetadataWriter extends HiveMetadataWriter {
public TestHiveMetadataWriter(State state) throws IOException {
super(state);
}
public String getTopicName(GobblinMetadataChangeEvent gmce) {
return super.getTopicName(gmce);
}
public static boolean updateLatestSchemaMapWithExistingSchema(
String dbName,
String tableName,
String tableKey,
WhitelistBlacklist useExistingTableSchemaAllowDenyList,
HiveRegister hiveRegister,
HashMap<String, String> latestSchemaMap
) throws IOException{
return HiveMetadataWriter.updateLatestSchemaMapWithExistingSchema(dbName,
tableName,
tableKey,
useExistingTableSchemaAllowDenyList,
hiveRegister,
latestSchemaMap
);
}
}
public static class TestHiveRegistrationPolicy extends HiveRegistrationPolicyBase {
public TestHiveRegistrationPolicy(State props) throws IOException {
super(props);
}
protected Optional<HivePartition> getPartition(Path path, HiveTable table) throws IOException {
String partitionValue = "";
if (path.toString().contains("hourly/2020/03/17/08")) {
partitionValue = "2020-03-17-08";
} else if (path.toString().contains("hourly/2020/03/17/09")) {
partitionValue = "2020-03-17-09";
} else if (path.toString().contains("daily/2020/03/17")) {
partitionValue = "2020-03-17-00";
}
HivePartition partition = new HivePartition.Builder().withPartitionValues(Lists.newArrayList(partitionValue))
.withDbName(table.getDbName()).withTableName(table.getTableName()).build();
partition.setLocation(path.toString());
return Optional.of(partition);
}
@Override
protected List<HiveTable> getTables(Path path) throws IOException {
List<HiveTable> tables = super.getTables(path);
for (HiveTable table : tables) {
table.setPartitionKeys(ImmutableList.<HiveRegistrationUnit.Column>of(
new HiveRegistrationUnit.Column("testpartition", serdeConstants.STRING_TYPE_NAME, StringUtils.EMPTY)));
table.setLocation(tmpDir.getAbsolutePath());
}
return tables;
}
protected List<String> getTableNames(Optional<String> dbPrefix, Path path) {
return Lists.newArrayList("testTable");
}
}
}
| 1,753 |
0 | Create_ds/gobblin/gobblin-iceberg/src/test/java/org/apache/gobblin/iceberg | Create_ds/gobblin/gobblin-iceberg/src/test/java/org/apache/gobblin/iceberg/writer/IcebergMetadataWriterTest.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.iceberg.writer;
import java.io.File;
import java.io.IOException;
import java.time.ZoneId;
import java.time.ZonedDateTime;
import java.time.temporal.ChronoUnit;
import java.util.*;
import java.util.concurrent.TimeUnit;
import org.apache.avro.SchemaBuilder;
import org.apache.avro.file.DataFileWriter;
import org.apache.avro.generic.GenericData;
import org.apache.avro.generic.GenericDatumWriter;
import org.apache.avro.generic.GenericRecord;
import org.apache.avro.io.DatumWriter;
import org.apache.avro.specific.SpecificData;
import org.apache.commons.io.FileUtils;
import org.apache.commons.lang3.StringUtils;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hive.serde.serdeConstants;
import org.apache.iceberg.FindFiles;
import org.apache.iceberg.Table;
import org.apache.iceberg.catalog.Namespace;
import org.apache.iceberg.expressions.Expressions;
import org.apache.iceberg.hive.HiveMetastoreTest;
import org.mockito.Mockito;
import org.testng.Assert;
import org.testng.annotations.AfterClass;
import org.testng.annotations.BeforeClass;
import org.testng.annotations.Test;
import com.google.common.base.Optional;
import com.google.common.collect.ImmutableList;
import com.google.common.collect.ImmutableMap;
import com.google.common.collect.Lists;
import com.google.common.io.Files;
import org.apache.gobblin.completeness.audit.AuditCountClient;
import org.apache.gobblin.completeness.audit.AuditCountClientFactory;
import org.apache.gobblin.completeness.audit.TestAuditClientFactory;
import org.apache.gobblin.completeness.verifier.KafkaAuditCountVerifier;
import org.apache.gobblin.configuration.State;
import org.apache.gobblin.hive.HivePartition;
import org.apache.gobblin.hive.HiveRegistrationUnit;
import org.apache.gobblin.hive.HiveTable;
import org.apache.gobblin.hive.policy.HiveRegistrationPolicyBase;
import org.apache.gobblin.hive.writer.MetadataWriter;
import org.apache.gobblin.hive.writer.MetadataWriterKeys;
import org.apache.gobblin.metadata.DataFile;
import org.apache.gobblin.metadata.DataMetrics;
import org.apache.gobblin.metadata.DataOrigin;
import org.apache.gobblin.metadata.DatasetIdentifier;
import org.apache.gobblin.metadata.GobblinMetadataChangeEvent;
import org.apache.gobblin.metadata.OperationType;
import org.apache.gobblin.metadata.SchemaSource;
import org.apache.gobblin.metrics.event.EventSubmitter;
import org.apache.gobblin.metrics.event.GobblinEventBuilder;
import org.apache.gobblin.metrics.kafka.KafkaSchemaRegistry;
import org.apache.gobblin.source.extractor.extract.LongWatermark;
import org.apache.gobblin.source.extractor.extract.kafka.KafkaPartition;
import org.apache.gobblin.source.extractor.extract.kafka.KafkaStreamTestUtils;
import org.apache.gobblin.source.extractor.extract.kafka.KafkaStreamingExtractor;
import org.apache.gobblin.stream.RecordEnvelope;
import org.apache.gobblin.util.ClustersNames;
import org.apache.gobblin.util.ConfigUtils;
import static org.apache.gobblin.iceberg.writer.IcebergMetadataWriterConfigKeys.*;
public class IcebergMetadataWriterTest extends HiveMetastoreTest {
org.apache.avro.Schema avroDataSchema = SchemaBuilder.record("test")
.fields()
.name("id")
.type()
.longType()
.noDefault()
.name("data")
.type()
.optional()
.stringType()
.endRecord();
org.apache.avro.Schema _avroPartitionSchema;
private String dbName = "hivedb";
private GobblinMCEWriter gobblinMCEWriter;
private GobblinMCEWriter gobblinMCEWriterWithCompletness;
private GobblinMCEWriter gobblinMCEWriterWithAcceptClusters;
GobblinMetadataChangeEvent gmce;
static File tmpDir;
static File dataDir;
static File hourlyDataFile_2;
static File hourlyDataFile_1;
static File dailyDataFile;
List<GobblinEventBuilder> eventsSent = new ArrayList<>();
@AfterClass
public void clean() throws Exception {
gobblinMCEWriter.close();
gobblinMCEWriterWithAcceptClusters.close();
FileUtils.forceDeleteOnExit(tmpDir);
}
@BeforeClass
public void setUp() throws Exception {
Class.forName("org.apache.derby.jdbc.EmbeddedDriver").newInstance();
tmpDir = Files.createTempDir();
hourlyDataFile_1 = new File(tmpDir, "testDB/testTopic/hourly/2020/03/17/08/data.avro");
Files.createParentDirs(hourlyDataFile_1);
hourlyDataFile_2 = new File(tmpDir, "testDB/testTopic/hourly/2020/03/17/09/data.avro");
Files.createParentDirs(hourlyDataFile_2);
dailyDataFile = new File(tmpDir, "testDB/testTopic/daily/2020/03/17/data.avro");
Files.createParentDirs(dailyDataFile);
dataDir = new File(hourlyDataFile_1.getParent());
Assert.assertTrue(dataDir.exists());
writeRecord(hourlyDataFile_1);
writeRecord(hourlyDataFile_2);
writeRecord(dailyDataFile);
gmce = GobblinMetadataChangeEvent.newBuilder()
.setDatasetIdentifier(DatasetIdentifier.newBuilder()
.setDataOrigin(DataOrigin.EI)
.setDataPlatformUrn("urn:namespace:dataPlatform:hdfs")
.setNativeName(new File(tmpDir, "testDB/testTopic").getAbsolutePath())
.build())
.setTopicPartitionOffsetsRange(ImmutableMap.<String, String>builder().put("testTopic-1", "0-1000").build())
.setFlowId("testFlow")
.setNewFiles(Lists.newArrayList(DataFile.newBuilder()
.setFilePath(hourlyDataFile_1.toString())
.setFileFormat("avro")
.setFileMetrics(DataMetrics.newBuilder().setRecordCount(10L).build())
.build()))
.setSchemaSource(SchemaSource.EVENT)
.setOperationType(OperationType.add_files)
.setTableSchema(avroDataSchema.toString())
.setCluster(ClustersNames.getInstance().getClusterName())
.setPartitionColumns(Lists.newArrayList("testpartition"))
.setRegistrationPolicy(TestHiveRegistrationPolicyForIceberg.class.getName())
.setRegistrationProperties(ImmutableMap.<String, String>builder().put("hive.database.name", dbName).build())
.setAllowedMetadataWriters(Arrays.asList(IcebergMetadataWriter.class.getName()))
.build();
State state = getState();
gobblinMCEWriter = new GobblinMCEWriter(new GobblinMCEWriterBuilder(), state);
((IcebergMetadataWriter) gobblinMCEWriter.getMetadataWriters().iterator().next()).setCatalog(
HiveMetastoreTest.catalog);
State stateWithCompletenessConfig = getStateWithCompletenessConfig();
gobblinMCEWriterWithCompletness = new GobblinMCEWriter(new GobblinMCEWriterBuilder(), stateWithCompletenessConfig);
((IcebergMetadataWriter) gobblinMCEWriterWithCompletness.getMetadataWriters().iterator().next()).setCatalog(
HiveMetastoreTest.catalog);
state.setProp(GobblinMCEWriter.ACCEPTED_CLUSTER_NAMES, "randomCluster");
gobblinMCEWriterWithAcceptClusters = new GobblinMCEWriter(new GobblinMCEWriterBuilder(), state);
_avroPartitionSchema =
SchemaBuilder.record("partitionTest").fields().name("ds").type().optional().stringType().endRecord();
gobblinMCEWriter.eventSubmitter = Mockito.mock(EventSubmitter.class);
Mockito.doAnswer(invocation -> eventsSent.add(invocation.getArgument(0, GobblinEventBuilder.class)))
.when(gobblinMCEWriter.eventSubmitter).submit(Mockito.any(GobblinEventBuilder.class));
}
private State getState() {
State state = ConfigUtils.configToState(ConfigUtils.propertiesToConfig(hiveConf.getAllProperties()));
state.setProp(KafkaSchemaRegistry.KAFKA_SCHEMA_REGISTRY_CLASS,
KafkaStreamTestUtils.MockSchemaRegistry.class.getName());
state.setProp("default.hive.registration.policy",
TestHiveRegistrationPolicyForIceberg.class.getName());
state.setProp("use.data.path.as.table.location", true);
return state;
}
private State getStateWithCompletenessConfig() {
State state = getState();
state.setProp(ICEBERG_NEW_PARTITION_ENABLED, true);
state.setProp(ICEBERG_COMPLETENESS_ENABLED, true);
state.setProp(ICEBERG_TOTAL_COUNT_COMPLETENESS_ENABLED, true);
state.setProp(NEW_PARTITION_KEY, "late");
state.setProp(NEW_PARTITION_TYPE_KEY, "int");
state.setProp(AuditCountClientFactory.AUDIT_COUNT_CLIENT_FACTORY, TestAuditClientFactory.class.getName());
state.setProp(KafkaAuditCountVerifier.SOURCE_TIER, "gobblin");
state.setProp(KafkaAuditCountVerifier.REFERENCE_TIERS, "producer");
return state;
}
@Test(dependsOnGroups={"hiveMetadataWriterTest"})
public void testWriteAddFileGMCE() throws IOException {
// Creating a copy of gmce with static type in GenericRecord to work with writeEnvelop method
// without risking running into type cast runtime error.
GenericRecord genericGmce = GenericData.get().deepCopy(gmce.getSchema(), gmce);
gobblinMCEWriterWithAcceptClusters.writeEnvelope(new RecordEnvelope<>(genericGmce,
new KafkaStreamingExtractor.KafkaWatermark(
new KafkaPartition.Builder().withTopicName("GobblinMetadataChangeEvent_test").withId(1).build(),
new LongWatermark(10L))));
//Test when accept clusters does not contain the gmce cluster, we will skip
Assert.assertEquals(catalog.listTables(Namespace.of(dbName)).size(), 0);
gobblinMCEWriter.writeEnvelope(new RecordEnvelope<>(genericGmce,
new KafkaStreamingExtractor.KafkaWatermark(
new KafkaPartition.Builder().withTopicName("GobblinMetadataChangeEvent_test").withId(1).build(),
new LongWatermark(10L))));
Assert.assertEquals(catalog.listTables(Namespace.of(dbName)).size(), 1);
Table table = catalog.loadTable(catalog.listTables(Namespace.of(dbName)).get(0));
Assert.assertFalse(table.properties().containsKey("offset.range.testTopic-1"));
Assert.assertEquals(table.location(),
new File(tmpDir, "testDB/testTopic/_iceberg_metadata/").getAbsolutePath() + "/" + dbName);
Assert.assertFalse(table.properties().containsKey(COMPLETION_WATERMARK_KEY));
Assert.assertFalse(table.properties().containsKey(TOTAL_COUNT_COMPLETION_WATERMARK_KEY));
gmce.setTopicPartitionOffsetsRange(ImmutableMap.<String, String>builder().put("testTopic-1", "1000-2000").build());
GenericRecord genericGmce_1000_2000 = GenericData.get().deepCopy(gmce.getSchema(), gmce);
gobblinMCEWriter.writeEnvelope(new RecordEnvelope<>(genericGmce_1000_2000,
new KafkaStreamingExtractor.KafkaWatermark(
new KafkaPartition.Builder().withTopicName("GobblinMetadataChangeEvent_test").withId(1).build(),
new LongWatermark(20L))));
gobblinMCEWriter.flush();
table = catalog.loadTable(catalog.listTables(Namespace.of(dbName)).get(0));
Assert.assertEquals(table.properties().get("offset.range.testTopic-1"), "0-2000");
Assert.assertEquals(table.currentSnapshot().allManifests(table.io()).size(), 1);
// Assert low watermark and high watermark set properly
Assert.assertEquals(table.properties().get("gmce.low.watermark.GobblinMetadataChangeEvent_test-1"), "9");
Assert.assertEquals(table.properties().get("gmce.high.watermark.GobblinMetadataChangeEvent_test-1"), "20");
Assert.assertFalse(table.properties().containsKey(COMPLETION_WATERMARK_KEY));
Assert.assertFalse(table.properties().containsKey(TOTAL_COUNT_COMPLETION_WATERMARK_KEY));
/*test flush twice*/
gmce.setTopicPartitionOffsetsRange(ImmutableMap.<String, String>builder().put("testTopic-1", "2000-3000").build());
gmce.setNewFiles(Lists.newArrayList(DataFile.newBuilder()
.setFilePath(hourlyDataFile_2.toString())
.setFileFormat("avro")
.setFileMetrics(DataMetrics.newBuilder().setRecordCount(10L).build())
.build()));
GenericRecord genericGmce_2000_3000 = GenericData.get().deepCopy(gmce.getSchema(), gmce);
gobblinMCEWriter.writeEnvelope(new RecordEnvelope<>(genericGmce_2000_3000,
new KafkaStreamingExtractor.KafkaWatermark(
new KafkaPartition.Builder().withTopicName("GobblinMetadataChangeEvent_test").withId(1).build(),
new LongWatermark(30L))));
gobblinMCEWriter.flush();
table = catalog.loadTable(catalog.listTables(Namespace.of(dbName)).get(0));
Assert.assertEquals(table.properties().get("offset.range.testTopic-1"), "0-3000");
Assert.assertEquals(table.currentSnapshot().allManifests(table.io()).size(), 2);
Assert.assertEquals(table.properties().get("gmce.low.watermark.GobblinMetadataChangeEvent_test-1"), "20");
Assert.assertEquals(table.properties().get("gmce.high.watermark.GobblinMetadataChangeEvent_test-1"), "30");
Assert.assertFalse(table.properties().containsKey(COMPLETION_WATERMARK_KEY));
Assert.assertFalse(table.properties().containsKey(TOTAL_COUNT_COMPLETION_WATERMARK_KEY));
/* Test it will skip event with lower watermark*/
gmce.setTopicPartitionOffsetsRange(ImmutableMap.<String, String>builder().put("testTopic-1", "3000-4000").build());
gobblinMCEWriter.writeEnvelope(new RecordEnvelope<>(genericGmce,
new KafkaStreamingExtractor.KafkaWatermark(
new KafkaPartition.Builder().withTopicName("GobblinMetadataChangeEvent_test").withId(1).build(),
new LongWatermark(30L))));
gobblinMCEWriter.flush();
table = catalog.loadTable(catalog.listTables(Namespace.of(dbName)).get(0));
Assert.assertEquals(table.properties().get("offset.range.testTopic-1"), "0-3000");
Assert.assertEquals(table.currentSnapshot().allManifests(table.io()).size(), 2);
Assert.assertFalse(table.properties().containsKey(COMPLETION_WATERMARK_KEY));
Assert.assertFalse(table.properties().containsKey(TOTAL_COUNT_COMPLETION_WATERMARK_KEY));
}
//Make sure hive test execute later and close the metastore
@Test(dependsOnMethods={"testWriteAddFileGMCE"}, groups={"icebergMetadataWriterTest"})
public void testWriteRewriteFileGMCE() throws IOException {
gmce.setTopicPartitionOffsetsRange(null);
FileSystem fs = FileSystem.get(new Configuration());
String filePath = new Path(hourlyDataFile_1.getParentFile().getAbsolutePath()).toString();
String filePath_1 = new Path(hourlyDataFile_2.getParentFile().getAbsolutePath()).toString();
DataFile dailyFile = DataFile.newBuilder()
.setFilePath(dailyDataFile.toString())
.setFileFormat("avro")
.setFileMetrics(DataMetrics.newBuilder().setRecordCount(10L).build())
.build();
gmce.setNewFiles(Lists.newArrayList(dailyFile));
gmce.setOldFilePrefixes(Lists.newArrayList(filePath, filePath_1));
gmce.setOperationType(OperationType.rewrite_files);
Table table = catalog.loadTable(catalog.listTables(Namespace.of(dbName)).get(0));
Iterator<org.apache.iceberg.DataFile>
result = FindFiles.in(table).withMetadataMatching(Expressions.startsWith("file_path", filePath_1)).collect().iterator();
Assert.assertEquals(table.currentSnapshot().allManifests(table.io()).size(), 2);
Assert.assertTrue(result.hasNext());
GenericRecord genericGmce = GenericData.get().deepCopy(gmce.getSchema(), gmce);
gobblinMCEWriter.writeEnvelope(new RecordEnvelope<>(genericGmce,
new KafkaStreamingExtractor.KafkaWatermark(
new KafkaPartition.Builder().withTopicName("GobblinMetadataChangeEvent_test").withId(1).build(),
new LongWatermark(40L))));
gobblinMCEWriter.flush();
table = catalog.loadTable(catalog.listTables(Namespace.of(dbName)).get(0));
String dailyFilePath = new Path(dailyDataFile.toString()).toString();
result = FindFiles.in(table).withMetadataMatching(Expressions.startsWith("file_path", dailyFilePath)).collect().iterator();
Assert.assertEquals(result.next().path(), dailyFilePath);
Assert.assertFalse(result.hasNext());
result = FindFiles.in(table).withMetadataMatching(Expressions.startsWith("file_path", filePath)).collect().iterator();
Assert.assertFalse(result.hasNext());
result = FindFiles.in(table).withMetadataMatching(Expressions.startsWith("file_path", filePath_1)).collect().iterator();
Assert.assertFalse(result.hasNext());
}
@Test(dependsOnMethods={"testWriteRewriteFileGMCE"}, groups={"icebergMetadataWriterTest"} )
public void testChangeProperty() throws IOException {
Table table = catalog.loadTable(catalog.listTables(Namespace.of(dbName)).get(0));
Assert.assertEquals(table.properties().get("offset.range.testTopic-1"), "0-3000");
Assert.assertEquals(table.currentSnapshot().allManifests(table.io()).size(), 3);
Assert.assertEquals(table.properties().get("gmce.low.watermark.GobblinMetadataChangeEvent_test-1"), "30");
Assert.assertEquals(table.properties().get("gmce.high.watermark.GobblinMetadataChangeEvent_test-1"), "40");
gmce.setOldFilePrefixes(null);
DataFile dailyFile = DataFile.newBuilder()
.setFilePath(dailyDataFile.toString())
.setFileFormat("avro")
.setFileMetrics(DataMetrics.newBuilder().setRecordCount(0L).build())
.build();
gmce.setNewFiles(Lists.newArrayList(dailyFile));
gmce.setOperationType(OperationType.change_property);
gmce.setTopicPartitionOffsetsRange(ImmutableMap.<String, String>builder().put("testTopic-1", "2000-4000").build());
GenericRecord genericGmce = GenericData.get().deepCopy(gmce.getSchema(), gmce);
gobblinMCEWriter.writeEnvelope(new RecordEnvelope<>(genericGmce,
new KafkaStreamingExtractor.KafkaWatermark(
new KafkaPartition.Builder().withTopicName("GobblinMetadataChangeEvent_test").withId(1).build(),
new LongWatermark(45L))));
gobblinMCEWriter.flush();
table = catalog.loadTable(catalog.listTables(Namespace.of(dbName)).get(0));
// Assert the offset has been updated
Assert.assertEquals(table.properties().get("offset.range.testTopic-1"), "0-4000");
Assert.assertEquals(table.currentSnapshot().allManifests(table.io()).size(), 3);
// Assert low watermark and high watermark set properly
Assert.assertEquals(table.properties().get("gmce.low.watermark.GobblinMetadataChangeEvent_test-1"), "40");
Assert.assertEquals(table.properties().get("gmce.high.watermark.GobblinMetadataChangeEvent_test-1"), "45");
}
@Test(dependsOnMethods={"testWriteAddFileGMCECompleteness"}, groups={"icebergMetadataWriterTest"})
public void testFaultTolerant() throws Exception {
// Set fault tolerant dataset number to be 1
gobblinMCEWriter.setMaxErrorDataset(1);
// Add a mock writer that always throws exception so that write will fail
MetadataWriter mockWriter = Mockito.mock(MetadataWriter.class);
Mockito.doThrow(new IOException("Test failure")).when(mockWriter).writeEnvelope(Mockito.any(), Mockito.any(), Mockito.any(), Mockito.any());
gobblinMCEWriter.metadataWriters.add(0, mockWriter);
gobblinMCEWriter.metadataWriterWriteTimers.put(mockWriter.getClass().getName(), gobblinMCEWriter.metricContext
.contextAwareTimer(mockWriter.getClass().getName() + ".write", 1, TimeUnit.HOURS));
gobblinMCEWriter.metadataWriterFlushTimers.put(mockWriter.getClass().getName(), gobblinMCEWriter.metricContext
.contextAwareTimer(mockWriter.getClass().getName() + ".flush", 1, TimeUnit.HOURS));
GobblinMetadataChangeEvent gmceWithMockWriter = SpecificData.get().deepCopy(gmce.getSchema(), gmce);
gmceWithMockWriter.setAllowedMetadataWriters(Arrays.asList(IcebergMetadataWriter.class.getName(), mockWriter.getClass().getName()));
GenericRecord genericGmce = GenericData.get().deepCopy(gmceWithMockWriter.getSchema(), gmceWithMockWriter);
gobblinMCEWriter.writeEnvelope(new RecordEnvelope<>(genericGmce,
new KafkaStreamingExtractor.KafkaWatermark(
new KafkaPartition.Builder().withTopicName("GobblinMetadataChangeEvent_test").withId(1).build(),
new LongWatermark(51L))));
gobblinMCEWriter.writeEnvelope(new RecordEnvelope<>(genericGmce,
new KafkaStreamingExtractor.KafkaWatermark(
new KafkaPartition.Builder().withTopicName("GobblinMetadataChangeEvent_test").withId(1).build(),
new LongWatermark(52L))));
Assert.assertEquals(gobblinMCEWriter.getDatasetErrorMap().size(), 1);
Assert.assertEquals(gobblinMCEWriter.getDatasetErrorMap().values().iterator().next().size(), 1);
Assert.assertEquals(gobblinMCEWriter.getDatasetErrorMap()
.get(new File(tmpDir, "testDB/testTopic").getAbsolutePath())
.get("hivedb.testTopicCompleteness").get(0).getMessage(), "failed to flush table hivedb, testTopicCompleteness");
// No events sent yet since the topic has not been flushed
Assert.assertEquals(eventsSent.size(), 0);
//We should not see exception as we have fault tolerant
gobblinMCEWriter.flush();
// Since this topic has been flushed, there should be an event sent for previous failure, and the table
// should be removed from the error map
Assert.assertEquals(eventsSent.size(), 1);
Assert.assertEquals(eventsSent.get(0).getMetadata().get(MetadataWriterKeys.TABLE_NAME_KEY), "testTopicCompleteness");
Assert.assertEquals(eventsSent.get(0).getMetadata().get(MetadataWriterKeys.GMCE_LOW_WATERMARK), "50");
Assert.assertEquals(eventsSent.get(0).getMetadata().get(MetadataWriterKeys.GMCE_HIGH_WATERMARK), "52");
Assert.assertEquals(gobblinMCEWriter.getDatasetErrorMap().values().iterator().next().size(), 0);
gmceWithMockWriter.getDatasetIdentifier().setNativeName("testDB/testFaultTolerant");
GenericRecord genericGmce_differentDb = GenericData.get().deepCopy(gmceWithMockWriter.getSchema(), gmceWithMockWriter);
Assert.expectThrows(IOException.class, () -> gobblinMCEWriter.writeEnvelope((new RecordEnvelope<>(genericGmce_differentDb,
new KafkaStreamingExtractor.KafkaWatermark(
new KafkaPartition.Builder().withTopicName("GobblinMetadataChangeEvent_test").withId(1).build(),
new LongWatermark(54L))))));
gobblinMCEWriter.metadataWriters.remove(0);
}
@Test(dependsOnMethods={"testChangeProperty"}, groups={"icebergMetadataWriterTest"})
public void testWriteAddFileGMCECompleteness() throws IOException {
// Creating a copy of gmce with static type in GenericRecord to work with writeEnvelop method
// without risking running into type cast runtime error.
gmce.setOperationType(OperationType.add_files);
File hourlyFile = new File(tmpDir, "testDB/testTopicCompleteness/hourly/2021/09/16/10/data.avro");
long timestampMillis = 1631811600000L;
Files.createParentDirs(hourlyFile);
writeRecord(hourlyFile);
gmce.setNewFiles(Lists.newArrayList(DataFile.newBuilder()
.setFilePath(hourlyFile.toString())
.setFileFormat("avro")
.setFileMetrics(DataMetrics.newBuilder().setRecordCount(10L).build())
.build()));
gmce.setTopicPartitionOffsetsRange(ImmutableMap.<String, String>builder().put("testTopicCompleteness-1", "3000-4000").build());
GenericRecord genericGmce_3000_4000 = GenericData.get().deepCopy(gmce.getSchema(), gmce);
gobblinMCEWriterWithCompletness.writeEnvelope(new RecordEnvelope<>(genericGmce_3000_4000,
new KafkaStreamingExtractor.KafkaWatermark(
new KafkaPartition.Builder().withTopicName("GobblinMetadataChangeEvent_test").withId(1).build(),
new LongWatermark(50L))));
Table table = catalog.loadTable(catalog.listTables(Namespace.of(dbName)).get(1));
Assert.assertTrue(table.spec().fields().size() == 2);
Assert.assertEquals(table.spec().fields().get(1).name(), "late");
// Test when completeness watermark = -1 bootstrap case
KafkaAuditCountVerifier verifier = Mockito.mock(TestAuditCountVerifier.class);
Mockito.when(verifier.calculateCompleteness("testTopicCompleteness", timestampMillis - TimeUnit.HOURS.toMillis(1), timestampMillis))
.thenReturn(ImmutableMap.of(KafkaAuditCountVerifier.CompletenessType.ClassicCompleteness, true,
KafkaAuditCountVerifier.CompletenessType.TotalCountCompleteness, true));
IcebergMetadataWriter imw = (IcebergMetadataWriter) gobblinMCEWriterWithCompletness.metadataWriters.iterator().next();
imw.setAuditCountVerifier(verifier);
gobblinMCEWriterWithCompletness.flush();
table = catalog.loadTable(catalog.listTables(Namespace.of(dbName)).get(1));
//completeness watermark = "2020-09-16-10"
Assert.assertEquals(table.properties().get(TOPIC_NAME_KEY), "testTopic");
Assert.assertEquals(table.properties().get(COMPLETION_WATERMARK_TIMEZONE_KEY), "America/Los_Angeles");
Assert.assertEquals(table.properties().get(COMPLETION_WATERMARK_KEY), String.valueOf(timestampMillis));
Assert.assertEquals(table.properties().get(TOTAL_COUNT_COMPLETION_WATERMARK_KEY), String.valueOf(timestampMillis));
// 1631811600000L correspond to 2020-09-16-10 in PT
Assert.assertEquals(imw.state.getPropAsLong(String.format(STATE_COMPLETION_WATERMARK_KEY_OF_TABLE, table.name().toLowerCase(Locale.ROOT))), 1631811600000L);
Assert.assertEquals(imw.state.getPropAsLong(String.format(STATE_TOTAL_COUNT_COMPLETION_WATERMARK_KEY_OF_TABLE, table.name().toLowerCase(Locale.ROOT))), 1631811600000L);
Iterator<org.apache.iceberg.DataFile> dfl = FindFiles.in(table).withMetadataMatching(Expressions.startsWith("file_path", hourlyFile.getAbsolutePath())).collect().iterator();
Assert.assertTrue(dfl.hasNext());
// Test when completeness watermark is still "2021-09-16-10" but have a late file for "2021-09-16-09"
File hourlyFile1 = new File(tmpDir, "testDB/testTopicCompleteness/hourly/2021/09/16/09/data1.avro");
Files.createParentDirs(hourlyFile1);
writeRecord(hourlyFile1);
gmce.setNewFiles(Lists.newArrayList(DataFile.newBuilder()
.setFilePath(hourlyFile1.toString())
.setFileFormat("avro")
.setFileMetrics(DataMetrics.newBuilder().setRecordCount(10L).build())
.build()));
gmce.setTopicPartitionOffsetsRange(ImmutableMap.<String, String>builder().put("testTopicCompleteness-1", "4000-5000").build());
GenericRecord genericGmce_4000_5000 = GenericData.get().deepCopy(gmce.getSchema(), gmce);
gobblinMCEWriterWithCompletness.writeEnvelope(new RecordEnvelope<>(genericGmce_4000_5000,
new KafkaStreamingExtractor.KafkaWatermark(
new KafkaPartition.Builder().withTopicName("GobblinMetadataChangeEvent_test").withId(1).build(),
new LongWatermark(55L))));
gobblinMCEWriterWithCompletness.flush();
table = catalog.loadTable(catalog.listTables(Namespace.of(dbName)).get(1));
Assert.assertEquals(table.properties().get(COMPLETION_WATERMARK_KEY), String.valueOf(timestampMillis));
Assert.assertEquals(table.properties().get(TOTAL_COUNT_COMPLETION_WATERMARK_KEY), String.valueOf(timestampMillis));
dfl = FindFiles.in(table).withMetadataMatching(Expressions.startsWith("file_path", hourlyFile1.getAbsolutePath())).collect().iterator();
Assert.assertTrue(dfl.hasNext());
Assert.assertEquals((int) dfl.next().partition().get(1, Integer.class), 1);
// Test when completeness watermark will advance to "2021-09-16-11"
File hourlyFile2 = new File(tmpDir, "testDB/testTopicCompleteness/hourly/2021/09/16/11/data.avro");
long timestampMillis1 = timestampMillis + TimeUnit.HOURS.toMillis(1);
Files.createParentDirs(hourlyFile2);
writeRecord(hourlyFile2);
gmce.setNewFiles(Lists.newArrayList(DataFile.newBuilder()
.setFilePath(hourlyFile2.toString())
.setFileFormat("avro")
.setFileMetrics(DataMetrics.newBuilder().setRecordCount(10L).build())
.build()));
gmce.setTopicPartitionOffsetsRange(ImmutableMap.<String, String>builder().put("testTopicCompleteness-1", "5000-6000").build());
GenericRecord genericGmce_5000_6000 = GenericData.get().deepCopy(gmce.getSchema(), gmce);
gobblinMCEWriterWithCompletness.writeEnvelope(new RecordEnvelope<>(genericGmce_5000_6000,
new KafkaStreamingExtractor.KafkaWatermark(
new KafkaPartition.Builder().withTopicName("GobblinMetadataChangeEvent_test").withId(1).build(),
new LongWatermark(60L))));
Mockito.when(verifier.calculateCompleteness("testTopicCompleteness", timestampMillis1 - TimeUnit.HOURS.toMillis(1), timestampMillis1))
.thenReturn(ImmutableMap.of(KafkaAuditCountVerifier.CompletenessType.ClassicCompleteness, true,
KafkaAuditCountVerifier.CompletenessType.TotalCountCompleteness, true));
gobblinMCEWriterWithCompletness.flush();
table = catalog.loadTable(catalog.listTables(Namespace.of(dbName)).get(1));
Assert.assertEquals(table.properties().get(COMPLETION_WATERMARK_KEY), String.valueOf(timestampMillis1));
Assert.assertEquals(table.properties().get(TOTAL_COUNT_COMPLETION_WATERMARK_KEY), String.valueOf(timestampMillis1));
// watermark 1631815200000L correspond to 2021-09-16-11 in PT
Assert.assertEquals(imw.state.getPropAsLong(String.format(STATE_COMPLETION_WATERMARK_KEY_OF_TABLE, table.name().toLowerCase(Locale.ROOT))), 1631815200000L);
Assert.assertEquals(imw.state.getPropAsLong(String.format(STATE_TOTAL_COUNT_COMPLETION_WATERMARK_KEY_OF_TABLE, table.name().toLowerCase(Locale.ROOT))), 1631815200000L);
dfl = FindFiles.in(table).withMetadataMatching(Expressions.startsWith("file_path", hourlyFile2.getAbsolutePath())).collect().iterator();
Assert.assertTrue(dfl.hasNext());
Assert.assertTrue(dfl.next().partition().get(1, Integer.class) == 0);
}
@Test(dependsOnMethods={"testWriteAddFileGMCECompleteness"}, groups={"icebergMetadataWriterTest"})
public void testChangePropertyGMCECompleteness() throws IOException {
ZonedDateTime expectedCWDt = ZonedDateTime.now(ZoneId.of(DEFAULT_TIME_ZONE)).truncatedTo(ChronoUnit.HOURS);
// For quiet topics, watermark should always be beginning of current hour
long expectedWatermark = expectedCWDt.toInstant().toEpochMilli();
File hourlyFile2 = new File(tmpDir, "testDB/testTopicCompleteness/hourly/2021/09/16/11/data.avro");
gmce.setOldFilePrefixes(null);
gmce.setNewFiles(Lists.newArrayList(DataFile.newBuilder()
.setFilePath(hourlyFile2.toString())
.setFileFormat("avro")
.setFileMetrics(DataMetrics.newBuilder().setRecordCount(10L).build())
.build()));
gmce.setOperationType(OperationType.change_property);
gmce.setTopicPartitionOffsetsRange(ImmutableMap.<String, String>builder().put("testTopicCompleteness-1", "6000-7000").build());
GenericRecord genericGmce = GenericData.get().deepCopy(gmce.getSchema(), gmce);
gobblinMCEWriterWithCompletness.writeEnvelope(new RecordEnvelope<>(genericGmce,
new KafkaStreamingExtractor.KafkaWatermark(
new KafkaPartition.Builder().withTopicName("GobblinMetadataChangeEvent_test").withId(1).build(),
new LongWatermark(65L))));
KafkaAuditCountVerifier verifier = Mockito.mock(TestAuditCountVerifier.class);
// For quiet topics always check for previous hour window
Mockito.when(verifier.calculateCompleteness("testTopicCompleteness", expectedCWDt.minusHours(1).toInstant().toEpochMilli(), expectedWatermark))
.thenReturn(ImmutableMap.of(KafkaAuditCountVerifier.CompletenessType.ClassicCompleteness, true,
KafkaAuditCountVerifier.CompletenessType.TotalCountCompleteness, true));
((IcebergMetadataWriter) gobblinMCEWriterWithCompletness.metadataWriters.iterator().next()).setAuditCountVerifier(verifier);
gobblinMCEWriterWithCompletness.flush();
Table table = catalog.loadTable(catalog.listTables(Namespace.of(dbName)).get(1));
Assert.assertEquals(table.properties().get("offset.range.testTopicCompleteness-1"), "3000-7000");
Assert.assertEquals(table.spec().fields().get(1).name(), "late");
Assert.assertEquals(table.properties().get(TOPIC_NAME_KEY), "testTopic");
Assert.assertEquals(table.properties().get(COMPLETION_WATERMARK_TIMEZONE_KEY), "America/Los_Angeles");
Assert.assertEquals(table.properties().get(COMPLETION_WATERMARK_KEY), String.valueOf(expectedWatermark));
Assert.assertEquals(table.properties().get(TOTAL_COUNT_COMPLETION_WATERMARK_KEY), String.valueOf(expectedWatermark));
}
private String writeRecord(File file) throws IOException {
GenericData.Record record = new GenericData.Record(avroDataSchema);
record.put("id", 1L);
record.put("data", "data");
String path = file.toString();
DatumWriter<GenericRecord> datumWriter = new GenericDatumWriter<>();
DataFileWriter<GenericRecord> dataFileWriter = new DataFileWriter<>(datumWriter);
dataFileWriter.create(avroDataSchema, file);
dataFileWriter.append(record);
dataFileWriter.close();
return path;
}
public static class TestHiveRegistrationPolicyForIceberg extends HiveRegistrationPolicyBase {
public TestHiveRegistrationPolicyForIceberg(State props) throws IOException {
super(props);
}
protected Optional<HivePartition> getPartition(Path path, HiveTable table) throws IOException {
String partitionValue = "";
if (path.toString().contains("hourly/2020/03/17/08")) {
partitionValue = "2020-03-17-08";
} else if (path.toString().contains("hourly/2020/03/17/09")) {
partitionValue = "2020-03-17-09";
} else if (path.toString().contains("hourly/2021/09/16/09")) {
partitionValue = "2021-09-16-09";
} else if (path.toString().contains("hourly/2021/09/16/10")) {
partitionValue = "2021-09-16-10";
} else if (path.toString().contains("hourly/2021/09/16/11")) {
partitionValue = "2021-09-16-11";
} else if (path.toString().contains("daily/2020/03/17")) {
partitionValue = "2020-03-17-00";
}
return Optional.of(new HivePartition.Builder().withPartitionValues(Lists.newArrayList(partitionValue))
.withDbName("hivedb").withTableName(table.getTableName()).build());
}
@Override
protected List<HiveTable> getTables(Path path) throws IOException {
List<HiveTable> tables = super.getTables(path);
for (HiveTable table : tables) {
if (table.getTableName().equals("testTopicCompleteness")) {
table.setPartitionKeys(ImmutableList.<HiveRegistrationUnit.Column>of(
new HiveRegistrationUnit.Column("datepartition", serdeConstants.STRING_TYPE_NAME, StringUtils.EMPTY)
, new HiveRegistrationUnit.Column("late", serdeConstants.INT_TYPE_NAME, StringUtils.EMPTY)));
} else {
table.setPartitionKeys(ImmutableList.<HiveRegistrationUnit.Column>of(new HiveRegistrationUnit.Column("datepartition", serdeConstants.STRING_TYPE_NAME, StringUtils.EMPTY)));
//table.setLocation(tmpDir.getAbsolutePath());
}
}
return tables;
}
protected Iterable<String> getDatabaseNames(Path path) {
return Lists.newArrayList("hivedb");
}
protected List<String> getTableNames(Optional<String> dbPrefix, Path path) {
if (path.toString().contains("testFaultTolerant")) {
return Lists.newArrayList("testFaultTolerantIcebergTable");
}
else if (path.toString().contains("testTopicCompleteness")) {
return Lists.newArrayList("testTopicCompleteness");
}
return Lists.newArrayList("testTopic");
}
}
static class TestAuditCountVerifier extends KafkaAuditCountVerifier {
public TestAuditCountVerifier(State state) {
super(state);
}
public TestAuditCountVerifier(State state, AuditCountClient client) {
super(state, client);
}
}
}
| 1,754 |
0 | Create_ds/gobblin/gobblin-iceberg/src/main/java/org/apache/gobblin | Create_ds/gobblin/gobblin-iceberg/src/main/java/org/apache/gobblin/iceberg/GobblinMCEProducer.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.iceberg;
import azkaban.jobExecutor.AbstractJob;
import com.google.common.collect.Iterables;
import com.google.common.collect.Lists;
import java.io.Closeable;
import java.io.IOException;
import java.nio.ByteBuffer;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import lombok.Setter;
import lombok.extern.slf4j.Slf4j;
import org.apache.gobblin.configuration.ConfigurationKeys;
import org.apache.gobblin.configuration.State;
import org.apache.gobblin.hive.HiveSerDeManager;
import org.apache.gobblin.hive.metastore.HiveMetaStoreBasedRegister;
import org.apache.gobblin.hive.policy.HiveRegistrationPolicyBase;
import org.apache.gobblin.iceberg.Utils.IcebergUtils;
import org.apache.gobblin.iceberg.publisher.GobblinMCEPublisher;
import org.apache.gobblin.instrumented.Instrumented;
import org.apache.gobblin.metadata.DataFile;
import org.apache.gobblin.metadata.DataMetrics;
import org.apache.gobblin.metadata.DataOrigin;
import org.apache.gobblin.metadata.DatasetIdentifier;
import org.apache.gobblin.metadata.GobblinMetadataChangeEvent;
import org.apache.gobblin.metadata.IntegerBytesPair;
import org.apache.gobblin.metadata.IntegerLongPair;
import org.apache.gobblin.metadata.OperationType;
import org.apache.gobblin.metadata.SchemaSource;
import org.apache.gobblin.metrics.MetricContext;
import org.apache.gobblin.source.extractor.extract.kafka.KafkaSource;
import org.apache.gobblin.util.ClustersNames;
import org.apache.gobblin.util.reflection.GobblinConstructorUtils;
import org.apache.gobblin.writer.PartitionedDataWriter;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path;
import org.apache.iceberg.Metrics;
import static org.apache.gobblin.iceberg.writer.GobblinMCEWriter.HIVE_PARTITION_NAME;
/**
* A class running along with data ingestion pipeline for emitting GobblinMCE (Gobblin Metadata Change Event
* that includes the information of the file metadata change, i.e., add or delete file, and the column min/max value of the added file.
* GMCE will be consumed by another metadata ingestion pipeline to register/de-register hive/iceberg metadata)
*
* This is an abstract class, we need a sub system like Kakfa, which support at least once delivery, to emit the event
*/
@Slf4j
public abstract class GobblinMCEProducer implements Closeable {
public static final String GMCE_PRODUCER_CLASS = "GobblinMCEProducer.class.name";
public static final String GMCE_CLUSTER_NAME = "GobblinMCE.cluster.name";
public static final String OLD_FILES_HIVE_REGISTRATION_KEY = "old.files.hive.registration.policy";
private static final String HDFS_PLATFORM_URN = "urn:li:dataPlatform:hdfs";
private static final String DATASET_ORIGIN_KEY = "dataset.origin";
private static final String DEFAULT_DATASET_ORIGIN = "PROD";
@Setter
protected State state;
protected MetricContext metricContext;
public GobblinMCEProducer(State state) {
this.state = state;
this.metricContext = Instrumented.getMetricContext(state, this.getClass());
}
public void sendGMCE(Map<Path, Metrics> newFiles, List<String> oldFiles, List<String> oldFilePrefixes,
Map<String, String> offsetRange, OperationType operationType, SchemaSource schemaSource) throws IOException {
sendGMCE(newFiles, oldFiles, oldFilePrefixes, offsetRange, operationType, schemaSource, null);
}
/**
* This method will use the files to compute the table name and dataset name, for each table it will generate one GMCE and send that to kafka so
* the metadata ingestion pipeline can use the information to register metadata
* @param newFiles The map of new files' path and metrics
* @param oldFiles the list of old file to be dropped
* @param offsetRange offset range of the new data, can be null
* @param operationType The opcode of gmce emitted by this method.
* @param serializedAuditCountMap Audit count map to be used by {@link org.apache.gobblin.iceberg.writer.IcebergMetadataWriter} to track iceberg
* registration counts
* @throws IOException
*/
public void sendGMCE(Map<Path, Metrics> newFiles, List<String> oldFiles, List<String> oldFilePrefixes,
Map<String, String> offsetRange, OperationType operationType, SchemaSource schemaSource, String serializedAuditCountMap) throws IOException {
GobblinMetadataChangeEvent gmce =
getGobblinMetadataChangeEvent(newFiles, oldFiles, oldFilePrefixes, offsetRange, operationType, schemaSource, serializedAuditCountMap);
underlyingSendGMCE(gmce);
}
/**
* Use the producer to send GMCE, the implementation need to make sure the emitting is at-least once in-order delivery
* (i.e. use kafka producer to send event and config it to be at-least once delivery)
* @param gmce GMCE that contains information of the metadata change
*/
public abstract void underlyingSendGMCE(GobblinMetadataChangeEvent gmce);
private void setBasicInformationForGMCE(GobblinMetadataChangeEvent.Builder gmceBuilder,
Map<String, String> offsetRange, SchemaSource schemaSource) {
String origin = state.getProp(DATASET_ORIGIN_KEY, DEFAULT_DATASET_ORIGIN);
gmceBuilder.setDatasetIdentifier(DatasetIdentifier.newBuilder()
.setDataPlatformUrn(HDFS_PLATFORM_URN)
.setDataOrigin(DataOrigin.valueOf(origin))
.setNativeName(state.getProp(ConfigurationKeys.DATA_PUBLISHER_DATASET_DIR))
.build());
gmceBuilder.setCluster(state.getProp(GMCE_CLUSTER_NAME, ClustersNames.getInstance().getClusterName()));
//retention job does not have job.id
gmceBuilder.setFlowId(
state.getProp(AbstractJob.JOB_ID, new Configuration().get(ConfigurationKeys.AZKABAN_FLOW_ID)));
gmceBuilder.setRegistrationPolicy(state.getProp(ConfigurationKeys.HIVE_REGISTRATION_POLICY));
gmceBuilder.setSchemaSource(schemaSource);
gmceBuilder.setPartitionColumns(Lists.newArrayList(state.getProp(HIVE_PARTITION_NAME, "")));
if (offsetRange != null) {
gmceBuilder.setTopicPartitionOffsetsRange(offsetRange);
}
String schemaString = state.getProp(PartitionedDataWriter.WRITER_LATEST_SCHEMA);
if (schemaString != null) {
gmceBuilder.setTableSchema(schemaString);
}
if (state.contains(GobblinMCEPublisher.AVRO_SCHEMA_WITH_ICEBERG_ID)) {
gmceBuilder.setAvroSchemaWithIcebergSchemaID(state.getProp(GobblinMCEPublisher.AVRO_SCHEMA_WITH_ICEBERG_ID));
}
if (state.contains(OLD_FILES_HIVE_REGISTRATION_KEY)) {
gmceBuilder.setRegistrationPolicyForOldData(state.getProp(OLD_FILES_HIVE_REGISTRATION_KEY));
} else {
log.warn(
"properties {} does not set, if it's for rewrite/drop operation, there may be trouble to get partition value for old data",
OLD_FILES_HIVE_REGISTRATION_KEY);
}
Map<String, String> regProperties = new HashMap<>();
if (state.contains(HiveRegistrationPolicyBase.HIVE_DATABASE_NAME)) {
regProperties.put(HiveRegistrationPolicyBase.HIVE_DATABASE_NAME,
state.getProp(HiveRegistrationPolicyBase.HIVE_DATABASE_NAME));
}
if (state.contains(HiveRegistrationPolicyBase.HIVE_TABLE_NAME)) {
regProperties.put(HiveRegistrationPolicyBase.HIVE_TABLE_NAME,
state.getProp(HiveRegistrationPolicyBase.HIVE_TABLE_NAME));
}
if (state.contains(KafkaSource.TOPIC_NAME)) {
regProperties.put(KafkaSource.TOPIC_NAME,
state.getProp(KafkaSource.TOPIC_NAME));
}
if (state.contains(HiveRegistrationPolicyBase.ADDITIONAL_HIVE_DATABASE_NAMES)) {
regProperties.put(HiveRegistrationPolicyBase.ADDITIONAL_HIVE_DATABASE_NAMES,
state.getProp(HiveRegistrationPolicyBase.ADDITIONAL_HIVE_DATABASE_NAMES));
}
if (state.contains(HiveRegistrationPolicyBase.ADDITIONAL_HIVE_TABLE_NAMES)) {
regProperties.put(HiveRegistrationPolicyBase.ADDITIONAL_HIVE_TABLE_NAMES,
state.getProp(HiveRegistrationPolicyBase.ADDITIONAL_HIVE_TABLE_NAMES));
}
if (state.contains(HiveMetaStoreBasedRegister.SCHEMA_SOURCE_DB)) {
regProperties.put(HiveMetaStoreBasedRegister.SCHEMA_SOURCE_DB,
state.getProp(HiveMetaStoreBasedRegister.SCHEMA_SOURCE_DB));
}
if (state.contains(HiveSerDeManager.HIVE_ROW_FORMAT)) {
regProperties.put(HiveSerDeManager.HIVE_ROW_FORMAT,
state.getProp(HiveSerDeManager.HIVE_ROW_FORMAT));
}
if (!regProperties.isEmpty()) {
gmceBuilder.setRegistrationProperties(regProperties);
}
}
public GobblinMetadataChangeEvent getGobblinMetadataChangeEvent(Map<Path, Metrics> newFiles, List<String> oldFiles,
List<String> oldFilePrefixes, Map<String, String> offsetRange, OperationType operationType,
SchemaSource schemaSource, String serializedAuditCountMap) {
if (!verifyInput(newFiles, oldFiles, oldFilePrefixes, operationType)) {
return null;
}
GobblinMetadataChangeEvent.Builder gmceBuilder = GobblinMetadataChangeEvent.newBuilder();
setBasicInformationForGMCE(gmceBuilder, offsetRange, schemaSource);
if (newFiles != null && !newFiles.isEmpty()) {
gmceBuilder.setNewFiles(toGobblinDataFileList(newFiles));
}
if (oldFiles != null && !oldFiles.isEmpty()) {
gmceBuilder.setOldFiles(oldFiles);
}
if (oldFilePrefixes != null && !oldFilePrefixes.isEmpty()) {
gmceBuilder.setOldFilePrefixes(oldFilePrefixes);
}
gmceBuilder.setOperationType(operationType);
gmceBuilder.setAuditCountMap(serializedAuditCountMap);
return gmceBuilder.build();
}
private boolean verifyInput(Map<Path, Metrics> newFiles, List<String> oldFiles, List<String> oldFilePrefixes,
OperationType operationType) {
switch (operationType) {
case rewrite_files: {
if (newFiles == null || ((oldFiles == null || oldFiles.isEmpty()) && (oldFilePrefixes == null || oldFilePrefixes
.isEmpty())) || newFiles.isEmpty()) {
log.error("Rewrite files operation must contain newFiles to be added and oldFiles to be deleted");
return false;
}
break;
}
case add_files: {
if (newFiles == null || newFiles.isEmpty()) {
log.error("Add files operation must contain newFiles to be added");
return false;
}
break;
}
case drop_files: {
if ((oldFiles == null || oldFiles.isEmpty()) && (oldFilePrefixes == null || oldFilePrefixes.isEmpty())) {
log.error("Drop files operation must contain old files to be deleted");
return false;
}
break;
}
case change_property: {
if(oldFiles != null) {
log.warn("{} old files detected while no file alteration is performed", oldFiles.size());
}
log.info("Setting GMCE while no file changes need to be performed.");
break;
}
default: {
//unsupported operation
log.error("Unsupported operation type {}", operationType);
return false;
}
}
return true;
}
private List<DataFile> toGobblinDataFileList(Map<Path, Metrics> files) {
return Lists.newArrayList(Iterables.transform(files.entrySet(), file ->
{
DataFile.Builder builder = createBuilderWithFilePath(file.getKey());
addMetricsToFileBuilder(builder, file.getValue());
return builder.build();
}
));
}
private DataFile.Builder createBuilderWithFilePath(Path filePath) {
return DataFile.newBuilder()
.setFilePath(filePath.toString())
.setFileFormat(IcebergUtils.getIcebergFormat(state).toString());
}
private void addMetricsToFileBuilder(DataFile.Builder builder, Metrics metrics) {
// If metrics is null or empty, set FileMetrics a dummy one
if(metrics == null || metrics.recordCount() == null) {
builder.setFileMetrics(DataMetrics.newBuilder().setRecordCount(0)
.build());
return;
}
// If metrics is concrete, fill all fields
builder.setFileMetrics(DataMetrics.newBuilder()
.setRecordCount(metrics.recordCount())
.setColumnSizes(getIntegerLongPairsFromMap(metrics.columnSizes()))
.setValueCounts(getIntegerLongPairsFromMap(metrics.valueCounts()))
.setNullValueCounts(getIntegerLongPairsFromMap(metrics.nullValueCounts()))
.setLowerBounds(getIntegerBytesPairsFromMap(metrics.lowerBounds()))
.setUpperBounds(getIntegerBytesPairsFromMap(metrics.upperBounds()))
.build());
}
private List<IntegerLongPair> getIntegerLongPairsFromMap(Map<Integer, Long> map) {
if (map == null || map.size() == 0) {
return null;
}
Iterable<Map.Entry<Integer, Long>> entries = map.entrySet();
Iterable<IntegerLongPair> pairs =
Iterables.transform(entries, entry -> new IntegerLongPair(entry.getKey(), entry.getValue()));
return Lists.newArrayList(pairs);
}
private List<IntegerBytesPair> getIntegerBytesPairsFromMap(Map<Integer, ByteBuffer> map) {
if (map == null || map.size() == 0) {
return null;
}
Iterable<Map.Entry<Integer, ByteBuffer>> entries = map.entrySet();
Iterable<IntegerBytesPair> pairs =
Iterables.transform(entries, entry -> new IntegerBytesPair(entry.getKey(), entry.getValue()));
return Lists.newArrayList(pairs);
}
public static GobblinMCEProducer getGobblinMCEProducer(State state) {
return GobblinConstructorUtils.invokeConstructor(GobblinMCEProducer.class,
state.getProp(GMCE_PRODUCER_CLASS), state);
}
@Override
public void close() throws IOException {
//producer close will handle by the cache
this.metricContext.close();
}
}
| 1,755 |
0 | Create_ds/gobblin/gobblin-iceberg/src/main/java/org/apache/gobblin/iceberg | Create_ds/gobblin/gobblin-iceberg/src/main/java/org/apache/gobblin/iceberg/predicates/DatasetHiveSchemaContainsNonOptionalUnion.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.iceberg.predicates;
import java.io.IOException;
import java.util.Properties;
import java.util.regex.Matcher;
import java.util.regex.Pattern;
import com.google.common.base.Optional;
import gobblin.configuration.State;
import lombok.Data;
import lombok.extern.slf4j.Slf4j;
import org.apache.gobblin.dataset.Dataset;
import org.apache.gobblin.hive.HiveRegister;
import org.apache.gobblin.hive.HiveTable;
import org.apache.gobblin.hive.metastore.HiveMetaStoreUtils;
import org.apache.gobblin.util.function.CheckedExceptionPredicate;
/**
* Determines if a dataset's hive schema contains a non optional union
*/
@Slf4j
public class DatasetHiveSchemaContainsNonOptionalUnion<T extends Dataset> implements CheckedExceptionPredicate<T, IOException> {
private final HiveRegister hiveRegister;
private final Pattern pattern;
public static final String PREFIX = DatasetHiveSchemaContainsNonOptionalUnion.class.getName();
/**
* 1st match group is assumed to be the DB and the 2nd match group the Table for the pattern
*/
public static final String PATTERN = PREFIX + ".db.table.pattern";
public DatasetHiveSchemaContainsNonOptionalUnion(Properties properties) {
this.hiveRegister = getHiveRegister(new State(properties));
this.pattern = Pattern.compile(properties.getProperty(PATTERN));
}
@Override
public boolean test(T dataset) throws IOException {
Optional<HiveTable> hiveTable = getTable(dataset);
if (!hiveTable.isPresent()) {
log.error("No matching table for dataset={}", dataset);
return false;
}
return containsNonOptionalUnion(hiveTable.get());
}
private Optional<HiveTable> getTable(T dataset) throws IOException {
DbAndTable dbAndTable = getDbAndTable(dataset);
return this.hiveRegister.getTable(dbAndTable.getDb(), dbAndTable.getTable());
}
private DbAndTable getDbAndTable(T dataset) {
Matcher m = pattern.matcher(dataset.getUrn());
if (!m.matches() || m.groupCount() != 2) {
throw new IllegalStateException(String.format("Dataset urn [%s] doesn't follow expected pattern. " +
"Expected pattern = %s", dataset.getUrn(), pattern.pattern()));
}
return new DbAndTable(m.group(1), HiveMetaStoreUtils.getHiveTableName(m.group(2)));
}
boolean containsNonOptionalUnion(HiveTable table) {
return HiveMetaStoreUtils.containsNonOptionalUnionTypeColumn(table);
}
private HiveRegister getHiveRegister(State state){
return HiveRegister.get(state);
}
@Data
private static class DbAndTable {
private final String db;
private final String table;
}
}
| 1,756 |
0 | Create_ds/gobblin/gobblin-iceberg/src/main/java/org/apache/gobblin/iceberg | Create_ds/gobblin/gobblin-iceberg/src/main/java/org/apache/gobblin/iceberg/publisher/GobblinMCEPublisher.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.iceberg.publisher;
import com.google.common.io.Closer;
import java.io.IOException;
import java.util.Arrays;
import java.util.Collection;
import java.util.HashMap;
import java.util.LinkedList;
import java.util.List;
import java.util.Map;
import java.util.PriorityQueue;
import java.util.stream.Collectors;
import lombok.extern.slf4j.Slf4j;
import org.apache.gobblin.configuration.ConfigurationKeys;
import org.apache.gobblin.configuration.State;
import org.apache.gobblin.configuration.WorkUnitState;
import org.apache.gobblin.iceberg.GobblinMCEProducer;
import org.apache.gobblin.iceberg.Utils.IcebergUtils;
import org.apache.gobblin.iceberg.writer.GobblinMCEWriter;
import org.apache.gobblin.metadata.OperationType;
import org.apache.gobblin.metadata.SchemaSource;
import org.apache.gobblin.publisher.DataPublisher;
import org.apache.gobblin.util.HadoopUtils;
import org.apache.gobblin.util.filters.HiddenFilter;
import org.apache.gobblin.writer.PartitionedDataWriter;
import org.apache.gobblin.writer.partitioner.TimeBasedWriterPartitioner;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.PathFilter;
import org.apache.iceberg.Metrics;
import org.apache.iceberg.MetricsConfig;
import org.apache.iceberg.Schema;
import org.apache.iceberg.avro.AvroSchemaUtil;
import org.apache.iceberg.hadoop.HadoopInputFile;
import org.apache.iceberg.mapping.MappingUtil;
import org.apache.iceberg.mapping.NameMapping;
import org.apache.iceberg.orc.OrcMetrics;
/**
* A {@link DataPublisher} that compute and emit GobblinMetadataChangeEvent to kafka and rely on metadata ingestion pipeline
* to register metadata.
*
* <p>
* This publisher is not responsible for publishing data, and it relies on another publisher
* to document the published paths in property {@link NEW_FILES_LIST}.
* This publisher will use {@link GobblinMCEProducer} to emit GMCE events.
* </p>
*/
@Slf4j
public class GobblinMCEPublisher extends DataPublisher {
public static final String OFFSET_RANGE_KEY = "offset.range";
public static final String MAP_DELIMITER_KEY = ":";
public static final String NEW_FILES_LIST = "new.files.list";
public static final String AVRO_SCHEMA_WITH_ICEBERG_ID = "avro.schema.with.iceberg.id";
private final GobblinMCEProducer producer;
private final Closer closer = Closer.create();
private final Configuration conf;
private static final PathFilter HIDDEN_FILES_FILTER = new HiddenFilter();
private static final Metrics DUMMY_METRICS = new Metrics(100000000L, null, null, null, null);
public static final String SERIALIZED_AUDIT_COUNT_MAP_KEY = "serializedAuditCountMap";
public GobblinMCEPublisher(State state) throws IOException {
this(state, GobblinMCEProducer.getGobblinMCEProducer(state));
}
public GobblinMCEPublisher(State state, GobblinMCEProducer producer) {
super(state);
this.producer = this.closer.register(producer);
conf = HadoopUtils.getConfFromState(state);
}
public void publishData(Collection<? extends WorkUnitState> states) throws IOException {
// First aggregate the new files by partition
for (State state : states) {
Map<Path, Metrics> newFiles = computeFileMetrics(state, state.getPropAsList(NEW_FILES_LIST, ""));
Map<String, String> offsetRange = getPartitionOffsetRange(OFFSET_RANGE_KEY);
if (newFiles.isEmpty()) {
// There'll be only one dummy file here. This file is parsed for DB and table name calculation.
newFiles = computeDummyFile(state);
if (!newFiles.isEmpty()) {
log.info("Dummy file: " + newFiles.keySet().iterator().next());
this.producer.sendGMCE(newFiles, null, null, offsetRange, OperationType.change_property, SchemaSource.NONE);
} else {
log.info("No dummy file created. Not sending GMCE");
}
} else {
this.producer.sendGMCE(newFiles, null, null, offsetRange, OperationType.add_files, SchemaSource.SCHEMAREGISTRY,
state.getProp(SERIALIZED_AUDIT_COUNT_MAP_KEY));
}
}
}
protected Map<String, String> getPartitionOffsetRange(String offsetKey) {
return state.getPropAsList(offsetKey)
.stream()
.collect(Collectors.toMap(s -> s.split(MAP_DELIMITER_KEY)[0], s -> s.split(MAP_DELIMITER_KEY)[1]));
}
/**
* For each publish path, get all the data files under path
* and calculate the hive spec for each datafile and submit the task to register that datafile
* @throws IOException
*/
protected Map<Path, Metrics> computeFileMetrics(State state, List<String> fileList) throws IOException {
Map<Path, Metrics> newFiles = new HashMap<>();
NameMapping mapping = getNameMapping();
FileSystem fs = FileSystem.get(conf);
for (final String pathString : fileList) {
Path path = new Path(pathString);
LinkedList<FileStatus> fileStatuses = new LinkedList<>();
fileStatuses.add(fs.getFileStatus(path));
// Only register files
while (!fileStatuses.isEmpty()) {
FileStatus fileStatus = fileStatuses.pollFirst();
if (fileStatus.isDirectory()) {
fileStatuses.addAll(Arrays.asList(fs.listStatus(fileStatus.getPath(), HIDDEN_FILES_FILTER)));
} else {
Path filePath = fileStatus.getPath();
Metrics metrics = getMetrics(state, filePath, conf, mapping);
newFiles.put(filePath, metrics);
}
}
}
return newFiles;
}
/**
* Choose the latest file from the work unit state. There will be no modification to the file.
* It's used in GMCE writer {@link GobblinMCEWriter} merely for getting the DB and table name.
* @throws IOException
*/
protected Map<Path, Metrics> computeDummyFile(State state) throws IOException {
Map<Path, Metrics> newFiles = new HashMap<>();
FileSystem fs = FileSystem.get(conf);
if (!state.contains(ConfigurationKeys.DATA_PUBLISHER_DATASET_DIR)) {
return newFiles;
}
String baseDatasetString = state.getProp(ConfigurationKeys.DATA_PUBLISHER_DATASET_DIR);
Path searchPath = new Path(baseDatasetString);
if (state.contains(TimeBasedWriterPartitioner.WRITER_PARTITION_PREFIX)) {
searchPath = new Path(searchPath, state.getProp(TimeBasedWriterPartitioner.WRITER_PARTITION_PREFIX));
}
PriorityQueue<FileStatus> fileStatuses = new PriorityQueue<>((x, y) -> Long.compare(y.getModificationTime(), x.getModificationTime()));
if (fs.exists(searchPath)) {
fileStatuses.add(fs.getFileStatus(searchPath));
}
// Only register files
while (!fileStatuses.isEmpty()) {
FileStatus fileStatus = fileStatuses.poll();
if (fileStatus.isDirectory()) {
fileStatuses.addAll(Arrays.asList(fs.listStatus(fileStatus.getPath(), HIDDEN_FILES_FILTER)));
} else {
Path filePath = fileStatus.getPath();
newFiles.put(filePath, null);
// Only one concrete file from the path is needed
return newFiles;
}
}
return newFiles;
}
protected NameMapping getNameMapping() {
String writerSchema = state.getProp(PartitionedDataWriter.WRITER_LATEST_SCHEMA);
if (writerSchema == null) {
return null;
}
try {
org.apache.iceberg.shaded.org.apache.avro.Schema avroSchema =
new org.apache.iceberg.shaded.org.apache.avro.Schema.Parser().parse(writerSchema);
Schema icebergSchema = AvroSchemaUtil.toIceberg(avroSchema);
//This conversion is to make sure the schema has the iceberg id setup
state.setProp(AVRO_SCHEMA_WITH_ICEBERG_ID, AvroSchemaUtil.convert(icebergSchema.asStruct()).toString());
return MappingUtil.create(icebergSchema);
} catch (Exception e) {
//This means table schema is not compatible with iceberg, so directly return null
log.warn("Dataset {} contains schema that does not compatible with iceberg, will not emit file metrics for it",
state.getProp(ConfigurationKeys.DATA_PUBLISHER_DATASET_DIR));
return null;
}
}
public static Metrics getMetrics(State state, Path path, Configuration conf, NameMapping mapping) {
switch (IcebergUtils.getIcebergFormat(state)) {
case ORC: {
if (mapping == null) {
//This means the table is not compatible with iceberg, so return a dummy metric
return DUMMY_METRICS;
}
try {
return OrcMetrics.fromInputFile(HadoopInputFile.fromPath(path, conf), MetricsConfig.getDefault(), mapping);
} catch (Exception e) {
//This means the table is not compatible with iceberg, so return a dummy metric
return DUMMY_METRICS;
}
}
case AVRO: {
try {
return new Metrics(100000000L, null, null, null, null);
} catch (Exception e) {
throw new RuntimeException("Cannot get file information for file " + path.toString(), e);
}
}
default: {
throw new IllegalArgumentException("Unsupported data format for file " + path);
}
}
}
public void publishMetadata(Collection<? extends WorkUnitState> states) {
}
@Deprecated
@Override
public void initialize() {
}
@Override
public void close() throws IOException {
this.closer.close();
}
}
| 1,757 |
0 | Create_ds/gobblin/gobblin-iceberg/src/main/java/org/apache/gobblin/iceberg | Create_ds/gobblin/gobblin-iceberg/src/main/java/org/apache/gobblin/iceberg/Utils/TypeInfoToSchemaParser.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.iceberg.Utils;
import com.google.common.collect.Lists;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Iterator;
import java.util.List;
import java.util.Map;
import org.apache.avro.Schema;
import org.apache.avro.Schema.Field;
import org.apache.avro.Schema.Type;
import org.apache.commons.lang.StringUtils;
import org.apache.hadoop.hive.serde2.objectinspector.ObjectInspector.Category;
import org.apache.hadoop.hive.serde2.objectinspector.PrimitiveObjectInspector.PrimitiveCategory;
import org.apache.hadoop.hive.serde2.typeinfo.DecimalTypeInfo;
import org.apache.hadoop.hive.serde2.typeinfo.ListTypeInfo;
import org.apache.hadoop.hive.serde2.typeinfo.MapTypeInfo;
import org.apache.hadoop.hive.serde2.typeinfo.PrimitiveTypeInfo;
import org.apache.hadoop.hive.serde2.typeinfo.StructTypeInfo;
import org.apache.hadoop.hive.serde2.typeinfo.TypeInfo;
import org.apache.hadoop.hive.serde2.typeinfo.UnionTypeInfo;
import org.codehaus.jackson.node.JsonNodeFactory;
import com.linkedin.avroutil1.compatibility.AvroCompatibilityHelper;
public final class TypeInfoToSchemaParser {
private static final String DECIMAL_TYPE_NAME = "decimal";
private static final String CHAR_TYPE_NAME = "char";
private static final String VARCHAR_TYPE_NAME = "varchar";
private static final String DATE_TYPE_NAME = "date";
private static final String TIMESTAMP_TYPE_NAME = "timestamp-millis";
private static final String AVRO_PROP_LOGICAL_TYPE = "logicalType";
private static final String AVRO_PROP_PRECISION = "precision";
private static final String AVRO_PROP_SCALE = "scale";
private static final String AVRO_PROP_MAX_LENGTH = "maxLength";
private static final String AVRO_STRING_TYPE_NAME = "string";
private static final String AVRO_INT_TYPE_NAME = "int";
private static final String AVRO_LONG_TYPE_NAME = "long";
private static final String AVRO_SHORT_TYPE_NAME = "short";
private static final String AVRO_BYTE_TYPE_NAME = "byte";
private int _recordCounter = 0;
private final String _namespace;
private final boolean _mkFieldsOptional;
private final Map<String, String> _downToUpCaseMap;
public TypeInfoToSchemaParser(String namespace, boolean mkFieldsOptional, Map<String, String> downToUpCaseMap) {
this._namespace = namespace;
this._mkFieldsOptional = mkFieldsOptional;
this._downToUpCaseMap = downToUpCaseMap;
}
public Schema parseSchemaFromFieldsTypeInfo(String recordNamespace, String recordName, List<String> fieldNames,
List<TypeInfo> fieldTypeInfos) {
List<Field> fields = new ArrayList();
for (int i = 0; i < fieldNames.size(); ++i) {
TypeInfo fieldTypeInfo = (TypeInfo) fieldTypeInfos.get(i);
String fieldName = (String) fieldNames.get(i);
fieldName = removePrefix(fieldName);
fieldName = (String) this._downToUpCaseMap.getOrDefault(fieldName, fieldName);
Schema schema = this.parseSchemaFromTypeInfo(fieldTypeInfo, recordNamespace + "." + recordName.toLowerCase(),
StringUtils.capitalize(fieldName));
Field f = AvroCompatibilityHelper.createSchemaField(fieldName, schema, null, null);
fields.add(f);
}
Schema recordSchema = Schema.createRecord(recordName, (String) null, this._namespace + recordNamespace, false);
recordSchema.setFields(fields);
return recordSchema;
}
Schema parseSchemaFromTypeInfo(TypeInfo typeInfo, String recordNamespace, String recordName) {
Category c = typeInfo.getCategory();
Schema schema;
switch (c) {
case STRUCT:
schema = this.parseSchemaFromStruct((StructTypeInfo) typeInfo, recordNamespace, recordName);
break;
case LIST:
schema = this.parseSchemaFromList((ListTypeInfo) typeInfo, recordNamespace, recordName);
break;
case MAP:
schema = this.parseSchemaFromMap((MapTypeInfo) typeInfo, recordNamespace, recordName);
break;
case PRIMITIVE:
schema = this.parseSchemaFromPrimitive((PrimitiveTypeInfo) typeInfo);
break;
case UNION:
schema = this.parseSchemaFromUnion((UnionTypeInfo) typeInfo, recordNamespace, recordName);
break;
default:
throw new UnsupportedOperationException("Conversion from " + c + " not supported");
}
return this._mkFieldsOptional ? wrapInNullableUnion(schema) : schema;
}
private Schema parseSchemaFromUnion(UnionTypeInfo typeInfo, String recordNamespace, String recordName) {
List<TypeInfo> typeInfos = typeInfo.getAllUnionObjectTypeInfos();
List<Schema> schemas = new ArrayList();
Schema candidate;
for (Iterator var6 = typeInfos.iterator(); var6.hasNext();
schemas.add(isNullableType(candidate) ? getOtherTypeFromNullableType(candidate) : candidate)) {
TypeInfo ti = (TypeInfo) var6.next();
if (ti instanceof StructTypeInfo) {
StructTypeInfo sti = (StructTypeInfo) ti;
String newRecordName = recordName + this._recordCounter;
++this._recordCounter;
candidate = this.parseSchemaFromStruct(sti, recordNamespace, newRecordName);
} else {
candidate = this.parseSchemaFromTypeInfo(ti, recordNamespace, recordName);
}
}
return Schema.createUnion(schemas);
}
private Schema parseSchemaFromStruct(StructTypeInfo typeInfo, String recordNamespace, String recordName) {
Schema recordSchema =
this.parseSchemaFromFieldsTypeInfo(recordNamespace, recordName, typeInfo.getAllStructFieldNames(),
typeInfo.getAllStructFieldTypeInfos());
return recordSchema;
}
private Schema parseSchemaFromList(ListTypeInfo typeInfo, String recordNamespace, String recordName) {
Schema listSchema = this.parseSchemaFromTypeInfo(typeInfo.getListElementTypeInfo(), recordNamespace, recordName);
return Schema.createArray(listSchema);
}
private Schema parseSchemaFromMap(MapTypeInfo typeInfo, String recordNamespace, String recordName) {
TypeInfo keyTypeInfo = typeInfo.getMapKeyTypeInfo();
PrimitiveCategory pc = ((PrimitiveTypeInfo) keyTypeInfo).getPrimitiveCategory();
if (pc != PrimitiveCategory.STRING) {
throw new UnsupportedOperationException("Key of Map can only be a String");
} else {
TypeInfo valueTypeInfo = typeInfo.getMapValueTypeInfo();
Schema valueSchema = this.parseSchemaFromTypeInfo(valueTypeInfo, recordNamespace, recordName);
return Schema.createMap(valueSchema);
}
}
private Schema parseSchemaFromPrimitive(PrimitiveTypeInfo primitiveTypeInfo) {
Schema schema;
switch (primitiveTypeInfo.getPrimitiveCategory()) {
case LONG:
schema = Schema.create(Type.LONG);
break;
case DATE:
schema = Schema.create(Type.INT);
schema.addProp("logicalType", "date");
break;
case TIMESTAMP:
schema = Schema.create(Type.LONG);
schema.addProp("logicalType", "timestamp-millis");
break;
case BINARY:
schema = Schema.create(Type.BYTES);
break;
case BOOLEAN:
schema = Schema.create(Type.BOOLEAN);
break;
case DOUBLE:
schema = Schema.create(Type.DOUBLE);
break;
case DECIMAL:
DecimalTypeInfo dti = (DecimalTypeInfo) primitiveTypeInfo;
JsonNodeFactory factory = JsonNodeFactory.instance;
schema = Schema.create(Type.BYTES);
schema.addProp("logicalType", "decimal");
schema.addProp("precision", factory.numberNode(dti.getPrecision()));
schema.addProp("scale", factory.numberNode(dti.getScale()));
break;
case FLOAT:
schema = Schema.create(Type.FLOAT);
break;
case BYTE:
schema = Schema.create(Type.INT);
schema.addProp("logicalType", "byte");
break;
case SHORT:
schema = Schema.create(Type.INT);
schema.addProp("logicalType", "short");
break;
case INT:
schema = Schema.create(Type.INT);
break;
case CHAR:
case STRING:
case VARCHAR:
schema = Schema.create(Type.STRING);
break;
case VOID:
schema = Schema.create(Type.NULL);
break;
default:
throw new UnsupportedOperationException(primitiveTypeInfo + " is not supported.");
}
return schema;
}
@SuppressWarnings("checkstyle:FallThrough")
private static Schema wrapInNullableUnion(Schema schema) {
Schema wrappedSchema = schema;
switch (schema.getType()) {
case NULL:
break;
case UNION:
List<Schema> unionSchemas = Lists.newArrayList(new Schema[]{Schema.create(Type.NULL)});
unionSchemas.addAll(schema.getTypes());
wrappedSchema = Schema.createUnion(unionSchemas);
break;
default:
wrappedSchema = Schema.createUnion(Arrays.asList(Schema.create(Type.NULL), schema));
}
return wrappedSchema;
}
private static String removePrefix(String name) {
int idx = name.lastIndexOf(46);
return idx > 0 ? name.substring(idx + 1) : name;
}
private static boolean isNullableType(Schema schema) {
if (!schema.getType().equals(Type.UNION)) {
return false;
} else {
List<Schema> itemSchemas = schema.getTypes();
if (itemSchemas.size() < 2) {
return false;
} else {
Iterator var2 = itemSchemas.iterator();
Schema itemSchema;
do {
if (!var2.hasNext()) {
return false;
}
itemSchema = (Schema) var2.next();
} while (!Type.NULL.equals(itemSchema.getType()));
return true;
}
}
}
private static Schema getOtherTypeFromNullableType(Schema unionSchema) {
List<Schema> types = unionSchema.getTypes();
if (types.size() == 2) {
if (((Schema) types.get(0)).getType() == Type.NULL) {
return (Schema) types.get(1);
} else {
return ((Schema) types.get(1)).getType() == Type.NULL ? (Schema) types.get(0) : unionSchema;
}
} else {
List<Schema> itemSchemas = new ArrayList();
Iterator var3 = types.iterator();
while (var3.hasNext()) {
Schema itemSchema = (Schema) var3.next();
if (!Type.NULL.equals(itemSchema.getType())) {
itemSchemas.add(itemSchema);
}
}
if (itemSchemas.size() > 1) {
return Schema.createUnion(itemSchemas);
} else {
return (Schema) itemSchemas.get(0);
}
}
}
}
| 1,758 |
0 | Create_ds/gobblin/gobblin-iceberg/src/main/java/org/apache/gobblin/iceberg | Create_ds/gobblin/gobblin-iceberg/src/main/java/org/apache/gobblin/iceberg/Utils/IcebergUtils.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.iceberg.Utils;
import com.google.common.base.Preconditions;
import com.google.common.collect.Lists;
import java.io.IOException;
import java.nio.ByteBuffer;
import java.util.ArrayList;
import java.util.Collections;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.concurrent.atomic.AtomicInteger;
import java.util.stream.Collectors;
import lombok.extern.slf4j.Slf4j;
import org.apache.gobblin.configuration.ConfigurationKeys;
import org.apache.gobblin.configuration.State;
import org.apache.gobblin.metadata.IntegerBytesPair;
import org.apache.gobblin.metadata.IntegerLongPair;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hive.metastore.api.FieldSchema;
import org.apache.hadoop.hive.serde2.typeinfo.TypeInfo;
import org.apache.hadoop.hive.serde2.typeinfo.TypeInfoUtils;
import org.apache.iceberg.DataFile;
import org.apache.iceberg.DataFiles;
import org.apache.iceberg.FileFormat;
import org.apache.iceberg.Metrics;
import org.apache.iceberg.PartitionSpec;
import org.apache.iceberg.Schema;
import org.apache.iceberg.StructLike;
import org.apache.iceberg.avro.AvroSchemaUtil;
import org.apache.iceberg.exceptions.RuntimeIOException;
import org.apache.iceberg.types.Conversions;
import org.apache.iceberg.types.TypeUtil;
import org.apache.iceberg.types.Types;
@Slf4j
public class IcebergUtils {
private static final String AVRO_SCHEMA_URL = "avro.schema.url";
private static final String AVRO_SCHEMA_LITERAL = "avro.schema.literal";
private static final String[] RESTRICTED_PROPERTIES =
new String[]{AVRO_SCHEMA_URL, AVRO_SCHEMA_LITERAL};
private IcebergUtils() {
}
/**
* Calculate the {@Link PartitionSpec} used to create iceberg table
*/
public static PartitionSpec getPartitionSpec(Schema tableSchema, Schema partitionSchema) {
//TODO: Add more information into partition spec e.g. day, year, month, kafka partition ids, offset ranges for better consuming
PartitionSpec.Builder builder = PartitionSpec.builderFor(tableSchema);
partitionSchema.asStruct().fields().forEach(f -> builder.identity(f.name()));
return builder.build();
}
/**
* Given a avro schema string and a hive table,
* calculate the iceberg table schema and partition schema.
* (E.g. we use 'datepartition' as the partition column, which is not included inside the data schema,
* we'll need to add that column to data schema to construct table schema
*/
public static IcebergDataAndPartitionSchema getIcebergSchema(String schema,
org.apache.hadoop.hive.metastore.api.Table table) {
org.apache.iceberg.shaded.org.apache.avro.Schema icebergDataSchema =
new org.apache.iceberg.shaded.org.apache.avro.Schema.Parser().parse(schema);
Types.StructType dataStructType = AvroSchemaUtil.convert(icebergDataSchema).asStructType();
List<Types.NestedField> dataFields = Lists.newArrayList(dataStructType.fields());
org.apache.iceberg.shaded.org.apache.avro.Schema icebergPartitionSchema =
parseSchemaFromCols(table.getPartitionKeys(), table.getDbName(), table.getTableName(), true);
Types.StructType partitionStructType = AvroSchemaUtil.convert(icebergPartitionSchema).asStructType();
List<Types.NestedField> partitionFields = partitionStructType.fields();
Preconditions.checkArgument(partitionFields.stream().allMatch(f -> f.type().isPrimitiveType()),
"Only primitive fields are supported for partition columns");
dataFields.addAll(partitionFields);
Types.StructType updatedStructType = Types.StructType.of(dataFields);
updatedStructType =
(Types.StructType) TypeUtil.assignFreshIds(updatedStructType, new AtomicInteger(0)::incrementAndGet);
return new IcebergDataAndPartitionSchema(new org.apache.iceberg.Schema(updatedStructType.fields()),
new org.apache.iceberg.Schema(partitionFields));
}
private static org.apache.iceberg.shaded.org.apache.avro.Schema parseSchemaFromCols(List<FieldSchema> cols,
String namespace, String recordName, boolean mkFieldsOptional) {
final List<String> colNames = new ArrayList<>(cols.size());
final List<TypeInfo> colsTypeInfo = new ArrayList<>(cols.size());
cols.forEach(fs -> {
colNames.add(fs.getName());
colsTypeInfo.add(TypeInfoUtils.getTypeInfoFromTypeString(fs.getType()));
});
final TypeInfoToSchemaParser parser =
new TypeInfoToSchemaParser(namespace, mkFieldsOptional, Collections.emptyMap());
return new org.apache.iceberg.shaded.org.apache.avro.Schema.Parser().parse(
parser.parseSchemaFromFieldsTypeInfo("", recordName, colNames, colsTypeInfo).toString());
}
/**
* Given a Hive table, get all the properties of the table, and drop unneeded ones and transfer to a map
*/
public static Map<String, String> getTableProperties(org.apache.hadoop.hive.metastore.api.Table table) {
final Map<String, String> parameters = getRawTableProperties(table);
// drop unneeded parameters
for (String k : RESTRICTED_PROPERTIES) {
parameters.remove(k);
}
return parameters;
}
private static Map<String, String> getRawTableProperties(org.apache.hadoop.hive.metastore.api.Table table) {
final Map<String, String> parameters = new HashMap<>();
// lowest to highest priority of updating tableProperties
parameters.putAll(table.getSd().getSerdeInfo().getParameters());
parameters.putAll(table.getSd().getParameters());
parameters.putAll(table.getParameters());
return parameters;
}
/**
* Get the iceberg partition value for given partition strings
*/
public static StructLike getPartition(Types.StructType partitionType, List<String> partitionValues) {
//TODO parse partitionValue as per partitionSchema
return new StructLike() {
@Override
public int size() {
return partitionValues.size();
}
@Override
public <T> T get(int pos, Class<T> javaClass) {
return partitionValue(partitionType.fields().get(pos), partitionValues.get(pos));
}
@Override
public <T> void set(int pos, T value) {
throw new UnsupportedOperationException();
}
};
}
private static <T> T partitionValue(Types.NestedField partitionField, String colAsString) {
Preconditions.checkState(partitionField.type().isPrimitiveType(), "Partition column {} is not of primitive type",
partitionField);
return (T) Conversions.fromPartitionString(partitionField.type(), colAsString);
}
/**
* Transfer list of {@Link IntegerLongPair} from origin id to long, to Map<Integer, Long> from real column id to long
* This method is mainly used to get parse the file metrics from GMCE
* @param list list of {@Link IntegerLongPair}
* @param schemaIdMap A map from origin ID (defined by data pipeline) to the real iceberg table column id
* @return A map from real id to long as the file metrics
*/
public static Map<Integer, Long> getMapFromIntegerLongPairs(
List<IntegerLongPair> list, Map<Integer, Integer> schemaIdMap) {
//If schemaIdMap is not set, we directly return null to avoid set wrong file metrics
if (list == null || list.size() == 0 || schemaIdMap == null) {
return null;
}
try {
return list.stream().filter(t -> schemaIdMap.containsKey(t.getKey()))
.collect(Collectors.toMap(t -> schemaIdMap.get(t.getKey()), IntegerLongPair::getValue));
} catch (Exception e) {
log.warn("get exception {} when calculate metrics", e);
return null;
}
}
/**
* Transfer list of {@Link IntegerBytesPair} from origin id to bytes, to Map<Integer, ByteBuffer> from real column id to ByteBuffer
* This method is mainly used to get parse the file metrics from GMCE
* @param list list of {@Link IntegerBytesPair} from origin id to bytes
* @param schemaIdMap A map from origin ID (defined by data pipeline) to the real iceberg table column id
* @return A map from real id to ByteBuffer as the file metrics
*/
public static Map<Integer, ByteBuffer> getMapFromIntegerBytesPairs(
List<IntegerBytesPair> list, Map<Integer, Integer> schemaIdMap) {
//If schemaWithOriginId is not set, we directly return null to avoid set wrong file metrics
if (list == null || list.size() == 0 || schemaIdMap == null) {
return null;
}
try {
return list.stream().filter(t -> schemaIdMap.containsKey(t.getKey()))
.collect(Collectors.toMap(t -> schemaIdMap.get(t.getKey()), IntegerBytesPair::getValue));
} catch (Exception e) {
log.warn("get exception {} when calculate metrics", e);
return null;
}
}
/**
* Method to get DataFile without format and metrics information
* This method is mainly used to get the file to be deleted
*/
public static DataFile getIcebergDataFileWithoutMetric(String file, PartitionSpec partitionSpec,
StructLike partitionVal) {
//Use raw Path to support federation.
String rawPath = new Path(file).toUri().getRawPath();
//Just want to remove the old files, so set the record number and file size to a random value
DataFiles.Builder dataFileBuilder =
DataFiles.builder(partitionSpec).withPath(rawPath).withFileSizeInBytes(0).withRecordCount(0);
if (partitionVal != null) {
dataFileBuilder.withPartition(partitionVal);
}
return dataFileBuilder.build();
}
/**
* Method to get DataFile with format and metrics information
* This method is mainly used to get the file to be added
*/
public static DataFile getIcebergDataFileWithMetric(org.apache.gobblin.metadata.DataFile file,
PartitionSpec partitionSpec, StructLike partition, Configuration conf, Map<Integer, Integer> schemaIdMap) {
Path filePath = new Path(file.getFilePath());
DataFiles.Builder dataFileBuilder = DataFiles.builder(partitionSpec);
try {
// Use absolute path to support federation
dataFileBuilder.withPath(filePath.toUri().getRawPath())
.withFileSizeInBytes(filePath.getFileSystem(conf).getFileStatus(filePath).getLen())
.withFormat(file.getFileFormat());
} catch (IOException exception) {
throw new RuntimeIOException(exception, "Failed to get dataFile for path: %s", filePath);
}
if (partition != null) {
dataFileBuilder.withPartition(partition);
}
Metrics metrics = new Metrics(file.getFileMetrics().getRecordCount(),
IcebergUtils.getMapFromIntegerLongPairs(file.getFileMetrics().getColumnSizes(), schemaIdMap),
IcebergUtils.getMapFromIntegerLongPairs(file.getFileMetrics().getValueCounts(), schemaIdMap),
IcebergUtils.getMapFromIntegerLongPairs(file.getFileMetrics().getNullValueCounts(), schemaIdMap),
// TODO: If required, handle NaN value count File metric conversion in ORC metrics with iceberg upgrade
IcebergUtils.getMapFromIntegerLongPairs(Lists.newArrayList(), schemaIdMap), // metric value will be null since Nan values are supported from avro version 1.10.*
IcebergUtils.getMapFromIntegerBytesPairs(file.getFileMetrics().getLowerBounds(), schemaIdMap),
IcebergUtils.getMapFromIntegerBytesPairs(file.getFileMetrics().getUpperBounds(), schemaIdMap));
return dataFileBuilder.withMetrics(metrics).build();
}
/**
* Calculate the schema id map from origin id (file metrics used) to the current table schema id
* @param schemaWithOriginId coming from GMCE, contains the original schema id which is used to calculate the file metrics
* @param tableSchema table schema, coming from iceberg
* @return Schema id map
*/
public static Map<Integer, Integer> getSchemaIdMap(Schema schemaWithOriginId, Schema tableSchema) {
if (schemaWithOriginId == null || tableSchema == null) {
return null;
}
Map<Integer, Integer> map = new HashMap();
Map<String, Integer> originNameIdMap = TypeUtil.indexByName(schemaWithOriginId.asStruct());
for (Map.Entry<String, Integer> nameIdPair : originNameIdMap.entrySet()) {
if (tableSchema.findField(nameIdPair.getKey()) != null) {
map.put(nameIdPair.getValue(), tableSchema.findField(nameIdPair.getKey()).fieldId());
} else {
log.warn("Cannot find field {}, will skip the metrics for this column", nameIdPair.getKey());
}
}
return map;
}
/**
* Method to get Iceberg format from state, the format is determined by {@Link ConfigurationKeys.WRITER_OUTPUT_FORMAT_KEY}
*/
public static FileFormat getIcebergFormat(State state) {
if (state.getProp(ConfigurationKeys.WRITER_OUTPUT_FORMAT_KEY).equalsIgnoreCase("AVRO")) {
return FileFormat.AVRO;
} else if (state.getProp(ConfigurationKeys.WRITER_OUTPUT_FORMAT_KEY).equalsIgnoreCase("ORC")) {
return FileFormat.ORC;
}
throw new IllegalArgumentException("Unsupported data format: " + state.getProp(ConfigurationKeys.WRITER_OUTPUT_FORMAT_KEY));
}
public static class IcebergDataAndPartitionSchema {
public Schema tableSchema;
public Schema partitionSchema;
IcebergDataAndPartitionSchema(Schema tableSchema, Schema partitionSchema) {
this.tableSchema = tableSchema;
this.partitionSchema = partitionSchema;
}
}
}
| 1,759 |
0 | Create_ds/gobblin/gobblin-iceberg/src/main/java/org/apache/gobblin/iceberg | Create_ds/gobblin/gobblin-iceberg/src/main/java/org/apache/gobblin/iceberg/writer/IcebergMetadataWriterConfigKeys.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.iceberg.writer;
public class IcebergMetadataWriterConfigKeys {
public static final String ICEBERG_COMPLETENESS_ENABLED = "iceberg.completeness.enabled";
public static final boolean DEFAULT_ICEBERG_COMPLETENESS = false;
public static final String ICEBERG_TOTAL_COUNT_COMPLETENESS_ENABLED = "iceberg.completeness.totalCount.enabled";
public static final boolean DEFAULT_ICEBERG_TOTAL_COUNT_COMPLETENESS = false;
public static final String ICEBERG_COMPLETENESS_WHITELIST = "iceberg.completeness.whitelist";
public static final String ICEBERG_TOTAL_COUNT_COMPLETENESS_WHITELIST = "iceberg.totalCount.completeness.whitelist";
public static final String ICEBERG_COMPLETENESS_BLACKLIST = "iceberg.completeness.blacklist";
public static final String ICEBERG_TOTAL_COUNT_COMPLETENESS_BLACKLIST = "iceberg.totalCount.completeness.blacklist";
public static final String COMPLETION_WATERMARK_KEY = "completionWatermark";
public static final String TOTAL_COUNT_COMPLETION_WATERMARK_KEY = "totalCountCompletionWatermark";
public static final String COMPLETION_WATERMARK_TIMEZONE_KEY = "completionWatermarkTimezone";
public static final long DEFAULT_COMPLETION_WATERMARK = -1L;
public static final String TIME_ZONE_KEY = "iceberg.completeness.timezone";
public static final String DEFAULT_TIME_ZONE = "America/Los_Angeles";
public static final String DATEPARTITION_FORMAT = "yyyy-MM-dd-HH";
public static final String NEW_PARTITION_KEY = "iceberg.completeness.add.partition";
public static final String DEFAULT_NEW_PARTITION = "late";
public static final String NEW_PARTITION_TYPE_KEY = "iceberg.completeness.add.partition.type";
public static final String DEFAULT_PARTITION_COLUMN_TYPE = "string";
public static final String TOPIC_NAME_KEY = "topic.name";
public static final String AUDIT_CHECK_GRANULARITY = "iceberg.completeness.audit.check.granularity";
public static final String DEFAULT_AUDIT_CHECK_GRANULARITY = "HOUR";
public static final String ICEBERG_NEW_PARTITION_ENABLED = "iceberg.new.partition.enabled";
public static final boolean DEFAULT_ICEBERG_NEW_PARTITION_ENABLED = false;
public static final String ICEBERG_NEW_PARTITION_WHITELIST = "iceberg.new.partition.whitelist";
public static final String ICEBERG_NEW_PARTITION_BLACKLIST = "iceberg.new.partition.blacklist";
public static final String STATE_COMPLETION_WATERMARK_KEY_OF_TABLE = "completion.watermark.%s";
public static final String STATE_TOTAL_COUNT_COMPLETION_WATERMARK_KEY_OF_TABLE = "totalCount.completion.watermark.%s";
public static final String ICEBERG_ENABLE_CUSTOM_METADATA_RETENTION_POLICY = "iceberg.enable.custom.metadata.retention.policy";
public static final boolean DEFAULT_ICEBERG_ENABLE_CUSTOM_METADATA_RETENTION_POLICY = true;
}
| 1,760 |
0 | Create_ds/gobblin/gobblin-iceberg/src/main/java/org/apache/gobblin/iceberg | Create_ds/gobblin/gobblin-iceberg/src/main/java/org/apache/gobblin/iceberg/writer/CompletenessWatermarkUpdater.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.iceberg.writer;
import com.google.common.annotations.VisibleForTesting;
import java.io.IOException;
import java.time.Instant;
import java.time.ZoneId;
import java.time.ZonedDateTime;
import java.util.ArrayList;
import java.util.List;
import java.util.Locale;
import java.util.Map;
import java.util.SortedSet;
import lombok.extern.slf4j.Slf4j;
import org.apache.gobblin.completeness.verifier.KafkaAuditCountVerifier;
import org.apache.gobblin.configuration.State;
import org.apache.gobblin.time.TimeIterator;
import static org.apache.gobblin.iceberg.writer.IcebergMetadataWriterConfigKeys.*;
/**
* A class for completeness watermark updater.
* It computes the new watermarks and updates below entities:
* 1. the properties in {@link IcebergMetadataWriter.TableMetadata}
* 2. {@link gobblin.configuration.State}
* 3. the completionWatermark in {@link IcebergMetadataWriter.TableMetadata}
*/
@Slf4j
public class CompletenessWatermarkUpdater {
private final String topic;
private final String auditCheckGranularity;
protected final String timeZone;
protected final IcebergMetadataWriter.TableMetadata tableMetadata;
protected final Map<String, String> propsToUpdate;
protected final State stateToUpdate;
protected KafkaAuditCountVerifier auditCountVerifier;
public CompletenessWatermarkUpdater(String topic, String auditCheckGranularity, String timeZone,
IcebergMetadataWriter.TableMetadata tableMetadata, Map<String, String> propsToUpdate, State stateToUpdate,
KafkaAuditCountVerifier auditCountVerifier) {
this.tableMetadata = tableMetadata;
this.topic = topic;
this.auditCheckGranularity = auditCheckGranularity;
this.timeZone = timeZone;
this.propsToUpdate = propsToUpdate;
this.stateToUpdate = stateToUpdate;
this.auditCountVerifier = auditCountVerifier;
}
/**
* Update TableMetadata with the new completion watermark upon a successful audit check
* @param timestamps Sorted set in reverse order of timestamps to check audit counts for
* @param includeTotalCountCompletionWatermark If totalCountCompletionWatermark should be calculated
*/
public void run(SortedSet<ZonedDateTime> timestamps, boolean includeTotalCountCompletionWatermark) {
String tableName = tableMetadata.table.get().name();
if (this.topic == null) {
log.error(String.format("Not performing audit check. %s is null. Please set as table property of %s",
TOPIC_NAME_KEY, tableName));
}
computeAndUpdateWatermark(tableName, timestamps, includeTotalCountCompletionWatermark);
}
private void computeAndUpdateWatermark(String tableName, SortedSet<ZonedDateTime> timestamps,
boolean includeTotalCountWatermark) {
log.info(String.format("Compute completion watermark for %s and timestamps %s with previous watermark %s, previous totalCount watermark %s, includeTotalCountWatermark=%b",
this.topic, timestamps, tableMetadata.completionWatermark, tableMetadata.totalCountCompletionWatermark,
includeTotalCountWatermark));
WatermarkUpdaterSet updaterSet = new WatermarkUpdaterSet(this.tableMetadata, this.timeZone, this.propsToUpdate,
this.stateToUpdate, includeTotalCountWatermark);
if(timestamps == null || timestamps.size() <= 0) {
log.error("Cannot create time iterator. Empty for null timestamps");
return;
}
ZonedDateTime now = ZonedDateTime.now(ZoneId.of(this.timeZone));
TimeIterator.Granularity granularity = TimeIterator.Granularity.valueOf(this.auditCheckGranularity);
ZonedDateTime startDT = timestamps.first();
ZonedDateTime endDT = timestamps.last();
TimeIterator iterator = new TimeIterator(startDT, endDT, granularity, true);
try {
while (iterator.hasNext()) {
ZonedDateTime timestampDT = iterator.next();
updaterSet.checkForEarlyStop(timestampDT, now, granularity);
if (updaterSet.allFinished()) {
break;
}
ZonedDateTime auditCountCheckLowerBoundDT = TimeIterator.dec(timestampDT, granularity, 1);
Map<KafkaAuditCountVerifier.CompletenessType, Boolean> results =
this.auditCountVerifier.calculateCompleteness(this.topic,
auditCountCheckLowerBoundDT.toInstant().toEpochMilli(),
timestampDT.toInstant().toEpochMilli());
updaterSet.computeAndUpdate(results, timestampDT);
}
} catch (IOException e) {
log.warn("Exception during audit count check: ", e);
}
}
/**
* A class that contains both ClassicWatermakrUpdater and TotalCountWatermarkUpdater
*/
static class WatermarkUpdaterSet {
private final List<WatermarkUpdater> updaters;
WatermarkUpdaterSet(IcebergMetadataWriter.TableMetadata tableMetadata, String timeZone,
Map<String, String> propsToUpdate, State stateToUpdate, boolean includeTotalCountWatermark) {
this.updaters = new ArrayList<>();
this.updaters.add(new ClassicWatermarkUpdater(tableMetadata.completionWatermark, timeZone, tableMetadata,
propsToUpdate, stateToUpdate));
if (includeTotalCountWatermark) {
this.updaters.add(new TotalCountWatermarkUpdater(tableMetadata.totalCountCompletionWatermark, timeZone,
tableMetadata, propsToUpdate, stateToUpdate));
}
}
void checkForEarlyStop(ZonedDateTime timestampDT, ZonedDateTime now,
TimeIterator.Granularity granularity) {
this.updaters.stream().forEach(updater
-> updater.checkForEarlyStop(timestampDT, now, granularity));
}
boolean allFinished() {
return this.updaters.stream().allMatch(updater -> updater.isFinished());
}
void computeAndUpdate(Map<KafkaAuditCountVerifier.CompletenessType, Boolean> results,
ZonedDateTime timestampDT) {
this.updaters.stream()
.filter(updater -> !updater.isFinished())
.forEach(updater -> updater.computeAndUpdate(results, timestampDT));
}
}
/**
* A stateful class for watermark updaters.
* The updater starts with finished=false state.
* Then computeAndUpdate() is called multiple times with the parameters:
* 1. The completeness audit results within (datepartition-1, datepartition)
* 2. the datepartition timestamp
* The method is call multiple times in descending order of the datepartition timestamp.
* <p>
* When the audit result is complete for a timestamp, it updates below entities:
* 1. the properties in {@link IcebergMetadataWriter.TableMetadata}
* 2. {@link gobblin.configuration.State}
* 3. the completionWatermark in {@link IcebergMetadataWriter.TableMetadata}
* And it turns into finished=true state, in which the following computeAndUpdate() calls will be skipped.
*/
static abstract class WatermarkUpdater {
protected final long previousWatermark;
protected final ZonedDateTime prevWatermarkDT;
protected final String timeZone;
protected boolean finished = false;
protected final IcebergMetadataWriter.TableMetadata tableMetadata;
protected final Map<String, String> propsToUpdate;
protected final State stateToUpdate;
public WatermarkUpdater(long previousWatermark, String timeZone, IcebergMetadataWriter.TableMetadata tableMetadata,
Map<String, String> propsToUpdate, State stateToUpdate) {
this.previousWatermark = previousWatermark;
this.timeZone = timeZone;
this.tableMetadata = tableMetadata;
this.propsToUpdate = propsToUpdate;
this.stateToUpdate = stateToUpdate;
prevWatermarkDT = Instant.ofEpochMilli(previousWatermark).atZone(ZoneId.of(this.timeZone));
}
public void computeAndUpdate(Map<KafkaAuditCountVerifier.CompletenessType, Boolean> results,
ZonedDateTime timestampDT) {
if (finished) {
return;
}
computeAndUpdateInternal(results, timestampDT);
}
protected abstract void computeAndUpdateInternal(Map<KafkaAuditCountVerifier.CompletenessType, Boolean> results,
ZonedDateTime timestampDT);
protected boolean isFinished() {
return this.finished;
}
protected void setFinished() {
this.finished = true;
}
protected void checkForEarlyStop(ZonedDateTime timestampDT, ZonedDateTime now,
TimeIterator.Granularity granularity) {
if (isFinished()
|| (timestampDT.isAfter(this.prevWatermarkDT)
&& TimeIterator.durationBetween(this.prevWatermarkDT, now, granularity) > 0)) {
return;
}
setFinished();
}
}
@VisibleForTesting
void setAuditCountVerifier(KafkaAuditCountVerifier auditCountVerifier) {
this.auditCountVerifier = auditCountVerifier;
}
static class ClassicWatermarkUpdater extends WatermarkUpdater {
public ClassicWatermarkUpdater(long previousWatermark, String timeZone,
IcebergMetadataWriter.TableMetadata tableMetadata, Map<String, String> propsToUpdate, State stateToUpdate) {
super(previousWatermark, timeZone, tableMetadata, propsToUpdate, stateToUpdate);
}
@Override
protected void computeAndUpdateInternal(Map<KafkaAuditCountVerifier.CompletenessType, Boolean> results,
ZonedDateTime timestampDT) {
if (!results.getOrDefault(KafkaAuditCountVerifier.CompletenessType.ClassicCompleteness, false)) {
return;
}
setFinished();
long updatedWatermark = timestampDT.toInstant().toEpochMilli();
this.stateToUpdate.setProp(
String.format(STATE_COMPLETION_WATERMARK_KEY_OF_TABLE,
this.tableMetadata.table.get().name().toLowerCase(Locale.ROOT)),
updatedWatermark);
if (updatedWatermark > this.previousWatermark) {
log.info(String.format("Updating %s for %s from %s to %s", COMPLETION_WATERMARK_KEY,
this.tableMetadata.table.get().name(), this.previousWatermark, updatedWatermark));
this.propsToUpdate.put(COMPLETION_WATERMARK_KEY, String.valueOf(updatedWatermark));
this.propsToUpdate.put(COMPLETION_WATERMARK_TIMEZONE_KEY, this.timeZone);
this.tableMetadata.completionWatermark = updatedWatermark;
}
}
}
static class TotalCountWatermarkUpdater extends WatermarkUpdater {
public TotalCountWatermarkUpdater(long previousWatermark, String timeZone,
IcebergMetadataWriter.TableMetadata tableMetadata, Map<String, String> propsToUpdate, State stateToUpdate) {
super(previousWatermark, timeZone, tableMetadata, propsToUpdate, stateToUpdate);
}
@Override
protected void computeAndUpdateInternal(Map<KafkaAuditCountVerifier.CompletenessType, Boolean> results,
ZonedDateTime timestampDT) {
if (!results.getOrDefault(KafkaAuditCountVerifier.CompletenessType.TotalCountCompleteness, false)) {
return;
}
setFinished();
long updatedWatermark = timestampDT.toInstant().toEpochMilli();
this.stateToUpdate.setProp(
String.format(STATE_TOTAL_COUNT_COMPLETION_WATERMARK_KEY_OF_TABLE,
this.tableMetadata.table.get().name().toLowerCase(Locale.ROOT)),
updatedWatermark);
if (updatedWatermark > previousWatermark) {
log.info(String.format("Updating %s for %s from %s to %s", TOTAL_COUNT_COMPLETION_WATERMARK_KEY,
this.tableMetadata.table.get().name(), previousWatermark, updatedWatermark));
this.propsToUpdate.put(TOTAL_COUNT_COMPLETION_WATERMARK_KEY, String.valueOf(updatedWatermark));
tableMetadata.totalCountCompletionWatermark = updatedWatermark;
}
}
}
}
| 1,761 |
0 | Create_ds/gobblin/gobblin-iceberg/src/main/java/org/apache/gobblin/iceberg | Create_ds/gobblin/gobblin-iceberg/src/main/java/org/apache/gobblin/iceberg/writer/IcebergMetadataWriter.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.iceberg.writer;
import java.io.IOException;
import java.time.Instant;
import java.time.LocalDate;
import java.time.ZoneId;
import java.time.ZonedDateTime;
import java.time.format.DateTimeFormatter;
import java.time.temporal.ChronoUnit;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Collection;
import java.util.Collections;
import java.util.Comparator;
import java.util.HashMap;
import java.util.HashSet;
import java.util.List;
import java.util.Map;
import java.util.Set;
import java.util.SortedSet;
import java.util.TreeSet;
import java.util.concurrent.Callable;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.locks.Lock;
import java.util.concurrent.locks.ReadWriteLock;
import java.util.concurrent.locks.ReentrantReadWriteLock;
import java.util.function.Consumer;
import java.util.stream.Collectors;
import java.util.stream.Stream;
import org.apache.avro.generic.GenericRecord;
import org.apache.avro.specific.SpecificData;
import org.apache.commons.lang3.tuple.Pair;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.permission.FsPermission;
import org.apache.hadoop.hive.serde2.avro.AvroSerdeUtils;
import org.apache.iceberg.AppendFiles;
import org.apache.iceberg.CatalogUtil;
import org.apache.iceberg.DataFile;
import org.apache.iceberg.DeleteFiles;
import org.apache.iceberg.ExpireSnapshots;
import org.apache.iceberg.FindFiles;
import org.apache.iceberg.PartitionSpec;
import org.apache.iceberg.Schema;
import org.apache.iceberg.Snapshot;
import org.apache.iceberg.StructLike;
import org.apache.iceberg.Table;
import org.apache.iceberg.TableProperties;
import org.apache.iceberg.Transaction;
import org.apache.iceberg.UpdateProperties;
import org.apache.iceberg.avro.AvroSchemaUtil;
import org.apache.iceberg.catalog.Catalog;
import org.apache.iceberg.catalog.TableIdentifier;
import org.apache.iceberg.exceptions.AlreadyExistsException;
import org.apache.iceberg.exceptions.NoSuchTableException;
import org.apache.iceberg.expressions.Expression;
import org.apache.iceberg.expressions.Expressions;
import org.apache.iceberg.hive.HiveCatalog;
import org.apache.iceberg.types.Types;
import org.joda.time.DateTime;
import org.joda.time.format.PeriodFormatter;
import org.joda.time.format.PeriodFormatterBuilder;
import com.codahale.metrics.Timer;
import com.google.common.annotations.VisibleForTesting;
import com.google.common.base.Joiner;
import com.google.common.base.Optional;
import com.google.common.base.Preconditions;
import com.google.common.base.Splitter;
import com.google.common.cache.Cache;
import com.google.common.cache.CacheBuilder;
import com.google.common.collect.Lists;
import com.google.common.collect.Maps;
import com.google.common.collect.Range;
import com.google.common.collect.Sets;
import com.google.common.io.Closer;
import lombok.Getter;
import lombok.Setter;
import lombok.extern.slf4j.Slf4j;
import org.apache.gobblin.completeness.verifier.KafkaAuditCountVerifier;
import org.apache.gobblin.configuration.ConfigurationKeys;
import org.apache.gobblin.configuration.State;
import org.apache.gobblin.data.management.copy.hive.WhitelistBlacklist;
import org.apache.gobblin.hive.AutoCloseableHiveLock;
import org.apache.gobblin.hive.HiveLock;
import org.apache.gobblin.hive.HivePartition;
import org.apache.gobblin.hive.metastore.HiveMetaStoreUtils;
import org.apache.gobblin.hive.spec.HiveSpec;
import org.apache.gobblin.hive.writer.HiveMetadataWriter;
import org.apache.gobblin.hive.writer.MetadataWriter;
import org.apache.gobblin.hive.writer.MetadataWriterKeys;
import org.apache.gobblin.iceberg.Utils.IcebergUtils;
import org.apache.gobblin.metadata.GobblinMetadataChangeEvent;
import org.apache.gobblin.metadata.OperationType;
import org.apache.gobblin.metrics.GobblinMetricsRegistry;
import org.apache.gobblin.metrics.MetricContext;
import org.apache.gobblin.metrics.Tag;
import org.apache.gobblin.metrics.event.EventSubmitter;
import org.apache.gobblin.metrics.event.GobblinEventBuilder;
import org.apache.gobblin.metrics.kafka.KafkaSchemaRegistry;
import org.apache.gobblin.metrics.kafka.SchemaRegistryException;
import org.apache.gobblin.source.extractor.extract.LongWatermark;
import org.apache.gobblin.stream.RecordEnvelope;
import org.apache.gobblin.util.AvroUtils;
import org.apache.gobblin.util.ClustersNames;
import org.apache.gobblin.util.HadoopUtils;
import org.apache.gobblin.util.ParallelRunner;
import org.apache.gobblin.util.WriterUtils;
import static org.apache.gobblin.iceberg.writer.IcebergMetadataWriterConfigKeys.*;
/**
* This writer is used to calculate iceberg metadata from GMCE and register to iceberg
* iceberg metadata here includes:
* 1. Data files that contained by the table and the metrics of the data files
* 2. Properties of the table (origin table properties, data offset range, high watermark of the GMCE and schema created time)
* 3. Latest schema of the table
*/
@Slf4j
public class IcebergMetadataWriter implements MetadataWriter {
// Critical when there's dataset-level ACL enforced for both data and Iceberg metadata
public static final String USE_DATA_PATH_AS_TABLE_LOCATION = "use.data.path.as.table.location";
public static final String TABLE_LOCATION_SUFFIX = "/_iceberg_metadata/%s";
public static final String GMCE_HIGH_WATERMARK_KEY = "gmce.high.watermark.%s";
public static final String GMCE_LOW_WATERMARK_KEY = "gmce.low.watermark.%s";
private final static String EXPIRE_SNAPSHOTS_LOOKBACK_TIME = "gobblin.iceberg.dataset.expire.snapshots.lookBackTime";
private final static String DEFAULT_EXPIRE_SNAPSHOTS_LOOKBACK_TIME = "3d";
private static final String ICEBERG_REGISTRATION_BLACKLIST = "iceberg.registration.blacklist";
private static final String ICEBERG_REGISTRATION_WHITELIST = "iceberg.registration.whitelist";
private static final String ICEBERG_REGISTRATION_AUDIT_COUNT_BLACKLIST = "iceberg.registration.audit.count.blacklist";
private static final String ICEBERG_REGISTRATION_AUDIT_COUNT_WHITELIST = "iceberg.registration.audit.count.whitelist";
private static final String ICEBERG_METADATA_FILE_PERMISSION = "iceberg.metadata.file.permission";
private static final String CREATE_TABLE_TIME = "iceberg.create.table.time";
private static final String SCHEMA_CREATION_TIME_KEY = "schema.creation.time";
private static final String ADDED_FILES_CACHE_EXPIRING_TIME = "added.files.cache.expiring.time";
private static final int DEFAULT_ADDED_FILES_CACHE_EXPIRING_TIME = 1;
private static final String OFFSET_RANGE_KEY_PREFIX = "offset.range.";
private static final String OFFSET_RANGE_KEY_FORMAT = OFFSET_RANGE_KEY_PREFIX + "%s";
private static final String DEFAULT_CREATION_TIME = "0";
private static final String SNAPSHOT_EXPIRE_THREADS = "snapshot.expire.threads";
private static final long DEFAULT_WATERMARK = -1L;
/* one of the fields in DataFile entry to describe the location URI of a data file with FS Scheme */
private static final String ICEBERG_FILE_PATH_COLUMN = DataFile.FILE_PATH.name();
private final boolean completenessEnabled;
private final boolean totalCountCompletenessEnabled;
private final WhitelistBlacklist completenessWhitelistBlacklist;
private final WhitelistBlacklist totalCountBasedCompletenessWhitelistBlacklist;
private final String timeZone;
private final DateTimeFormatter HOURLY_DATEPARTITION_FORMAT;
private final String newPartitionColumn;
private final String newPartitionColumnType;
private final boolean newPartitionEnabled;
private final WhitelistBlacklist newPartitionTableWhitelistBlacklist;
private Optional<KafkaAuditCountVerifier> auditCountVerifier;
private final String auditCheckGranularity;
protected final MetricContext metricContext;
protected EventSubmitter eventSubmitter;
private final WhitelistBlacklist whitelistBlacklist;
private final WhitelistBlacklist auditWhitelistBlacklist;
private final Closer closer = Closer.create();
// Mapping between table-id and currently processed watermark
private final Map<TableIdentifier, Long> tableCurrentWatermarkMap;
// Used to store the relationship between table and the gmce topicPartition
private final Map<TableIdentifier, String> tableTopicPartitionMap;
@Getter
private final KafkaSchemaRegistry schemaRegistry;
protected final Map<TableIdentifier, TableMetadata> tableMetadataMap;
@Setter
protected Catalog catalog;
protected final Configuration conf;
protected final ReadWriteLock readWriteLock;
private final HiveLock locks;
private final boolean useDataLocationAsTableLocation;
private final ParallelRunner parallelRunner;
private FsPermission permission;
protected State state;
public IcebergMetadataWriter(State state) throws IOException {
this.state = state;
this.schemaRegistry = KafkaSchemaRegistry.get(state.getProperties());
conf = HadoopUtils.getConfFromState(state);
initializeCatalog();
tableTopicPartitionMap = new HashMap<>();
tableMetadataMap = new HashMap<>();
tableCurrentWatermarkMap = new HashMap<>();
List<Tag<?>> tags = Lists.newArrayList();
String clusterIdentifier = ClustersNames.getInstance().getClusterName();
tags.add(new Tag<>(MetadataWriterKeys.CLUSTER_IDENTIFIER_KEY_NAME, clusterIdentifier));
metricContext = closer.register(
GobblinMetricsRegistry.getInstance().getMetricContext(state, IcebergMetadataWriter.class, tags));
this.eventSubmitter =
new EventSubmitter.Builder(this.metricContext, MetadataWriterKeys.METRICS_NAMESPACE_ICEBERG_WRITER).build();
this.whitelistBlacklist = new WhitelistBlacklist(state.getProp(ICEBERG_REGISTRATION_WHITELIST, ""),
state.getProp(ICEBERG_REGISTRATION_BLACKLIST, ""));
this.auditWhitelistBlacklist = new WhitelistBlacklist(state.getProp(ICEBERG_REGISTRATION_AUDIT_COUNT_WHITELIST, ""),
state.getProp(ICEBERG_REGISTRATION_AUDIT_COUNT_BLACKLIST, ""));
// Use rw-lock to make it thread-safe when flush and write(which is essentially aggregate & reading metadata),
// are called in separate threads.
readWriteLock = new ReentrantReadWriteLock();
this.locks = new HiveLock(state.getProperties());
parallelRunner = closer.register(new ParallelRunner(state.getPropAsInt(SNAPSHOT_EXPIRE_THREADS, 20),
FileSystem.get(HadoopUtils.getConfFromState(state))));
useDataLocationAsTableLocation = state.getPropAsBoolean(USE_DATA_PATH_AS_TABLE_LOCATION, false);
if (useDataLocationAsTableLocation) {
permission =
HadoopUtils.deserializeFsPermission(state, ICEBERG_METADATA_FILE_PERMISSION,
FsPermission.getDefault());
}
this.completenessEnabled = state.getPropAsBoolean(ICEBERG_COMPLETENESS_ENABLED, DEFAULT_ICEBERG_COMPLETENESS);
this.totalCountCompletenessEnabled = state.getPropAsBoolean(ICEBERG_TOTAL_COUNT_COMPLETENESS_ENABLED,
DEFAULT_ICEBERG_TOTAL_COUNT_COMPLETENESS);
this.completenessWhitelistBlacklist = new WhitelistBlacklist(state.getProp(ICEBERG_COMPLETENESS_WHITELIST, ""),
state.getProp(ICEBERG_COMPLETENESS_BLACKLIST, ""));
this.totalCountBasedCompletenessWhitelistBlacklist = new WhitelistBlacklist(
state.getProp(ICEBERG_TOTAL_COUNT_COMPLETENESS_WHITELIST, ""),
state.getProp(ICEBERG_TOTAL_COUNT_COMPLETENESS_BLACKLIST, ""));
this.timeZone = state.getProp(TIME_ZONE_KEY, DEFAULT_TIME_ZONE);
this.HOURLY_DATEPARTITION_FORMAT = DateTimeFormatter.ofPattern(DATEPARTITION_FORMAT)
.withZone(ZoneId.of(this.timeZone));
this.auditCountVerifier = Optional.fromNullable(this.completenessEnabled ? new KafkaAuditCountVerifier(state) : null);
this.newPartitionColumn = state.getProp(NEW_PARTITION_KEY, DEFAULT_NEW_PARTITION);
this.newPartitionColumnType = state.getProp(NEW_PARTITION_TYPE_KEY, DEFAULT_PARTITION_COLUMN_TYPE);
this.newPartitionEnabled = state.getPropAsBoolean(ICEBERG_NEW_PARTITION_ENABLED, DEFAULT_ICEBERG_NEW_PARTITION_ENABLED);
this.newPartitionTableWhitelistBlacklist = new WhitelistBlacklist(state.getProp(ICEBERG_NEW_PARTITION_WHITELIST, ""),
state.getProp(ICEBERG_NEW_PARTITION_BLACKLIST, ""));
this.auditCheckGranularity = state.getProp(AUDIT_CHECK_GRANULARITY, DEFAULT_AUDIT_CHECK_GRANULARITY);
}
@VisibleForTesting
protected void setAuditCountVerifier(KafkaAuditCountVerifier verifier) {
this.auditCountVerifier = Optional.of(verifier);
}
protected void initializeCatalog() {
catalog = CatalogUtil.loadCatalog(HiveCatalog.class.getName(), "HiveCatalog", new HashMap<>(), conf);
}
private org.apache.iceberg.Table getIcebergTable(TableIdentifier tid) throws NoSuchTableException {
TableMetadata tableMetadata = tableMetadataMap.computeIfAbsent(tid, t -> new TableMetadata(this.conf));
if (!tableMetadata.table.isPresent()) {
tableMetadata.table = Optional.of(catalog.loadTable(tid));
}
return tableMetadata.table.get();
}
/**
* The method is used to get current watermark of the gmce topic partition for a table, and persist the value
* in the {@link #tableMetadataMap} as a side effect.
*
* Make the watermark config name contains topicPartition in case we change the gmce topic name for some reason
*/
private Long getAndPersistCurrentWatermark(TableIdentifier tid, String topicPartition) {
if (tableCurrentWatermarkMap.containsKey(tid)) {
return tableCurrentWatermarkMap.get(tid);
}
org.apache.iceberg.Table icebergTable;
Long currentWatermark = DEFAULT_WATERMARK;
try {
icebergTable = getIcebergTable(tid);
} catch (NoSuchTableException e) {
return currentWatermark;
}
currentWatermark =
icebergTable.properties().containsKey(String.format(GMCE_HIGH_WATERMARK_KEY, topicPartition)) ? Long.parseLong(
icebergTable.properties().get(String.format(GMCE_HIGH_WATERMARK_KEY, topicPartition))) : DEFAULT_WATERMARK;
return currentWatermark;
}
/**
* The write method will be responsible for processing gmce and aggregating the metadata.
* The logic of this function will be:
* 1. Check whether a table exists, if not then create a iceberg table
* - If completeness is enabled, Add new parititon column to
* table {#NEW_PARTITION_KEY}
* 2. Compute schema from the gmce and update the cache for candidate schemas
* 3. Do the required operation of the gmce, i.e. addFile, rewriteFile, dropFile or change_property.
*
* Note: this method only aggregates the metadata in cache without committing,
* while the actual commit will be done in flush method (except rewrite and drop methods where preserving older file
* information increases the memory footprints, therefore we would like to flush them eagerly).
*/
public void write(GobblinMetadataChangeEvent gmce, Map<String, Collection<HiveSpec>> newSpecsMap,
Map<String, Collection<HiveSpec>> oldSpecsMap, HiveSpec tableSpec) throws IOException {
TableIdentifier tid = TableIdentifier.of(tableSpec.getTable().getDbName(), tableSpec.getTable().getTableName());
TableMetadata tableMetadata = tableMetadataMap.computeIfAbsent(tid, t -> new TableMetadata(this.conf));
Table table;
try {
table = getIcebergTable(tid);
} catch (NoSuchTableException e) {
try {
if (gmce.getOperationType() == OperationType.drop_files ||
gmce.getOperationType() == OperationType.change_property) {
log.warn("Table {} does not exist, skip processing this {} event", tid.toString(), gmce.getOperationType());
return;
}
table = createTable(gmce, tableSpec);
tableMetadata.table = Optional.of(table);
} catch (Exception e1) {
log.error("skip processing {} for table {}.{} due to error when creating table", gmce.toString(),
tableSpec.getTable().getDbName(), tableSpec.getTable().getTableName());
log.debug(e1.toString());
return;
}
}
if(tableMetadata.completenessEnabled) {
tableMetadata.completionWatermark = Long.parseLong(table.properties().getOrDefault(COMPLETION_WATERMARK_KEY,
String.valueOf(DEFAULT_COMPLETION_WATERMARK)));
if (tableMetadata.totalCountCompletenessEnabled) {
tableMetadata.totalCountCompletionWatermark = Long.parseLong(
table.properties().getOrDefault(TOTAL_COUNT_COMPLETION_WATERMARK_KEY,
String.valueOf(DEFAULT_COMPLETION_WATERMARK)));
}
}
computeCandidateSchema(gmce, tid, tableSpec);
tableMetadata.ensureTxnInit();
tableMetadata.lowestGMCEEmittedTime = Long.min(tableMetadata.lowestGMCEEmittedTime, gmce.getGMCEmittedTime());
switch (gmce.getOperationType()) {
case add_files: {
updateTableProperty(tableSpec, tid, gmce);
addFiles(gmce, newSpecsMap, table, tableMetadata);
if (gmce.getAuditCountMap() != null && auditWhitelistBlacklist.acceptTable(tableSpec.getTable().getDbName(),
tableSpec.getTable().getTableName())) {
tableMetadata.serializedAuditCountMaps.add(gmce.getAuditCountMap());
}
if (gmce.getTopicPartitionOffsetsRange() != null) {
mergeOffsets(gmce, tid);
}
break;
}
case rewrite_files: {
updateTableProperty(tableSpec, tid, gmce);
rewriteFiles(gmce, newSpecsMap, oldSpecsMap, table, tableMetadata);
break;
}
case drop_files: {
dropFiles(gmce, oldSpecsMap, table, tableMetadata, tid);
break;
}
case change_property: {
updateTableProperty(tableSpec, tid, gmce);
if (gmce.getTopicPartitionOffsetsRange() != null) {
mergeOffsets(gmce, tid);
}
log.info("No file operation need to be performed by Iceberg Metadata Writer at this point.");
break;
}
default: {
log.error("unsupported operation {}", gmce.getOperationType().toString());
return;
}
}
}
private HashMap<String, List<Range>> getLastOffset(TableMetadata tableMetadata) {
HashMap<String, List<Range>> offsets = new HashMap<>();
if (tableMetadata.lastProperties.isPresent()) {
for (Map.Entry<String, String> entry : tableMetadata.lastProperties.get().entrySet()) {
if (entry.getKey().startsWith(OFFSET_RANGE_KEY_PREFIX)) {
List<Range> ranges = Arrays.asList(entry.getValue().split(ConfigurationKeys.LIST_DELIMITER_KEY))
.stream()
.map(s -> {
List<String> rangePair = Splitter.on(ConfigurationKeys.RANGE_DELIMITER_KEY).splitToList(s);
return Range.openClosed(Long.parseLong(rangePair.get(0)), Long.parseLong(rangePair.get(1)));})
.collect(Collectors.toList());
offsets.put(entry.getKey().substring(OFFSET_RANGE_KEY_PREFIX.length()), ranges);
}
}
}
return offsets;
}
/**
* The side effect of this method is to update the offset-range of the table identified by
* the given {@link TableIdentifier} with the input {@link GobblinMetadataChangeEvent}
*/
private void mergeOffsets(GobblinMetadataChangeEvent gmce, TableIdentifier tid) {
TableMetadata tableMetadata = tableMetadataMap.computeIfAbsent(tid, t -> new TableMetadata(this.conf));
tableMetadata.dataOffsetRange = Optional.of(tableMetadata.dataOffsetRange.or(() -> getLastOffset(tableMetadata)));
Map<String, List<Range>> offsets = tableMetadata.dataOffsetRange.get();
for (Map.Entry<String, String> entry : gmce.getTopicPartitionOffsetsRange().entrySet()) {
List<String> rangePair = Splitter.on(ConfigurationKeys.RANGE_DELIMITER_KEY).splitToList(entry.getValue());
Range range = Range.openClosed(Long.parseLong(rangePair.get(0)), Long.parseLong(rangePair.get(1)));
if (range.lowerEndpoint().equals(range.upperEndpoint())) {
//Ignore this case
continue;
}
List<Range> existRanges = offsets.getOrDefault(entry.getKey(), new ArrayList<>());
List<Range> newRanges = new ArrayList<>();
for (Range r : existRanges) {
if (range.isConnected(r)) {
range = range.span(r);
} else {
newRanges.add(r);
}
}
newRanges.add(range);
Collections.sort(newRanges, new Comparator<Range>() {
@Override
public int compare(Range o1, Range o2) {
return o1.lowerEndpoint().compareTo(o2.lowerEndpoint());
}
});
offsets.put(entry.getKey(), newRanges);
}
}
protected void updateTableProperty(HiveSpec tableSpec, TableIdentifier tid, GobblinMetadataChangeEvent gmce) {
org.apache.hadoop.hive.metastore.api.Table table = HiveMetaStoreUtils.getTable(tableSpec.getTable());
TableMetadata tableMetadata = tableMetadataMap.computeIfAbsent(tid, t -> new TableMetadata(this.conf));
tableMetadata.newProperties = Optional.of(IcebergUtils.getTableProperties(table));
String nativeName = tableMetadata.datasetName;
String topic = nativeName.substring(nativeName.lastIndexOf("/") + 1);
tableMetadata.newProperties.get().put(TOPIC_NAME_KEY, topic);
}
/**
* Compute the candidate schema from the gmce.
* If the schema source is schemaRegistry, we will use the schema creation time as the schema version to compute candidate schema and determine latest schema
* If the schema does not contain creation time, we will treat it the same as when schema source is event
* If the schema source is event, we will put it as default creation time, during flush, if we only have one candidate with default creation time,
* we'll use that to update schema.
* @param gmce
* @param tid
*/
private void computeCandidateSchema(GobblinMetadataChangeEvent gmce, TableIdentifier tid, HiveSpec spec) {
Table table = getIcebergTable(tid);
TableMetadata tableMetadata = tableMetadataMap.computeIfAbsent(tid, t -> new TableMetadata(this.conf));
org.apache.hadoop.hive.metastore.api.Table hiveTable = HiveMetaStoreUtils.getTable(spec.getTable());
tableMetadata.lastProperties = Optional.of(tableMetadata.lastProperties.or(() -> table.properties()));
Map<String, String> props = tableMetadata.lastProperties.get();
tableMetadata.lastSchemaVersion = Optional.of(
tableMetadata.lastSchemaVersion.or(() -> props.getOrDefault(SCHEMA_CREATION_TIME_KEY, DEFAULT_CREATION_TIME)));
String lastSchemaVersion = tableMetadata.lastSchemaVersion.get();
tableMetadata.candidateSchemas = Optional.of(tableMetadata.candidateSchemas.or(() -> CacheBuilder.newBuilder()
.expireAfterAccess(conf.getInt(MetadataWriter.CACHE_EXPIRING_TIME,
MetadataWriter.DEFAULT_CACHE_EXPIRING_TIME), TimeUnit.HOURS)
.build()));
Cache<String, Pair<Schema, String>> candidate = tableMetadata.candidateSchemas.get();
try {
switch (gmce.getSchemaSource()) {
case SCHEMAREGISTRY: {
org.apache.avro.Schema schema = new org.apache.avro.Schema.Parser().parse(gmce.getTableSchema());
String createdOn = AvroUtils.getSchemaCreationTime(schema);
if (createdOn == null) {
candidate.put(DEFAULT_CREATION_TIME,
Pair.of(IcebergUtils.getIcebergSchema(gmce.getTableSchema(), hiveTable).tableSchema, gmce.getTableSchema()));
} else if (!createdOn.equals(lastSchemaVersion)) {
candidate.put(createdOn, Pair.of(IcebergUtils.getIcebergSchema(gmce.getTableSchema(), hiveTable).tableSchema, gmce.getTableSchema()));
}
break;
}
case EVENT: {
candidate.put(DEFAULT_CREATION_TIME,
Pair.of(IcebergUtils.getIcebergSchema(gmce.getTableSchema(), hiveTable).tableSchema, gmce.getTableSchema()));
break;
}
case NONE: {
log.debug("Schema source set to be none, will ignore the schema");
break;
}
default: {
throw new IOException(String.format("unsupported schema source %s", gmce.getSchemaSource()));
}
}
} catch (Exception e) {
log.error("Cannot get candidate schema from event due to", e);
}
}
/**
* Add a partition column to the schema and partition spec
* @param table incoming iceberg table
* @param fieldName name of partition column
* @param type datatype of partition column
* @return table with updated schema and partition spec
*/
private Table addPartitionToIcebergTable(Table table, String fieldName, String type) {
boolean isTableUpdated = false;
if(!table.schema().columns().stream().anyMatch(x -> x.name().equalsIgnoreCase(fieldName))) {
table.updateSchema().addColumn(fieldName, Types.fromPrimitiveString(type)).commit();
isTableUpdated = true;
}
if(!table.spec().fields().stream().anyMatch(x -> x.name().equalsIgnoreCase(fieldName))) {
table.updateSpec().addField(fieldName).commit();
isTableUpdated = true;
}
if (isTableUpdated) {
table.refresh();
}
return table;
}
protected Table createTable(GobblinMetadataChangeEvent gmce, HiveSpec spec) throws IOException {
String schema = gmce.getTableSchema();
org.apache.hadoop.hive.metastore.api.Table table = HiveMetaStoreUtils.getTable(spec.getTable());
IcebergUtils.IcebergDataAndPartitionSchema schemas = IcebergUtils.getIcebergSchema(schema, table);
TableIdentifier tid = TableIdentifier.of(table.getDbName(), table.getTableName());
Schema tableSchema = schemas.tableSchema;
Preconditions.checkState(tableSchema != null, "Table schema cannot be null when creating a table");
PartitionSpec partitionSpec = IcebergUtils.getPartitionSpec(tableSchema, schemas.partitionSchema);
Table icebergTable = null;
String tableLocation = null;
if (useDataLocationAsTableLocation) {
tableLocation = gmce.getDatasetIdentifier().getNativeName() + String.format(TABLE_LOCATION_SUFFIX, table.getDbName());
//Set the path permission
Path tablePath = new Path(tableLocation);
WriterUtils.mkdirsWithRecursivePermission(tablePath.getFileSystem(conf), tablePath, permission);
}
try (Timer.Context context = metricContext.timer(CREATE_TABLE_TIME).time()) {
icebergTable =
catalog.createTable(tid, tableSchema, partitionSpec, tableLocation, IcebergUtils.getTableProperties(table));
// We should set the avro schema literal when creating the table.
icebergTable.updateProperties().set(AvroSerdeUtils.AvroTableProperties.SCHEMA_LITERAL.getPropName(), schema).commit();
log.info("Created table {}, schema: {} partition spec: {}", tid, tableSchema, partitionSpec);
} catch (AlreadyExistsException e) {
log.warn("table {} already exist, there may be some other process try to create table concurrently", tid);
}
return icebergTable;
}
protected void rewriteFiles(GobblinMetadataChangeEvent gmce, Map<String, Collection<HiveSpec>> newSpecsMap,
Map<String, Collection<HiveSpec>> oldSpecsMap, Table table, TableMetadata tableMetadata) throws IOException {
PartitionSpec partitionSpec = table.spec();
tableMetadata.ensureTxnInit();
Set<DataFile> newDataFiles = new HashSet<>();
getIcebergDataFilesToBeAddedHelper(gmce, table, newSpecsMap, tableMetadata)
.forEach(dataFile -> {
newDataFiles.add(dataFile);
tableMetadata.addedFiles.put(dataFile.path(), "");
});
Set<DataFile> oldDataFiles = getIcebergDataFilesToBeDeleted(gmce, table, newSpecsMap, oldSpecsMap, partitionSpec);
// Dealing with the case when old file doesn't exist, in which it could either be converted into noop or AppendFile.
if (oldDataFiles.isEmpty() && !newDataFiles.isEmpty()) {
//We randomly check whether one of the new data files already exists in the db to avoid reprocessing re-write events
DataFile dataFile = newDataFiles.iterator().next();
Expression exp = Expressions.startsWith(ICEBERG_FILE_PATH_COLUMN, dataFile.path().toString());
if (FindFiles.in(table).withMetadataMatching(exp).collect().iterator().hasNext()) {
//This means this re-write event is duplicated with the one we already handled, so noop.
return;
}
// This is the case when the files to be deleted do not exist in table
// So we directly call addFiles interface to add new files into the table.
// Note that this AppendFiles won't be committed here, in contrast to a real rewrite operation
// where the commit will be called at once to save memory footprints.
AppendFiles appendFiles = tableMetadata.getOrInitAppendFiles();
newDataFiles.forEach(appendFiles::appendFile);
return;
}
tableMetadata.transaction.get().newRewrite().rewriteFiles(oldDataFiles, newDataFiles).commit();
}
/**
* Given the GMCE, get the iceberg schema with the origin ID specified by data pipeline which
* is corresponding to the file metrics index.
* @param gmce GMCE emitted by data pipeline
* @return iceberg schema with the origin ID
*/
private Schema getSchemaWithOriginId(GobblinMetadataChangeEvent gmce) {
Schema schemaWithOriginId = null;
if (gmce.getAvroSchemaWithIcebergSchemaID() != null) {
org.apache.iceberg.shaded.org.apache.avro.Schema avroSchema =
new org.apache.iceberg.shaded.org.apache.avro.Schema.Parser().parse(gmce.getAvroSchemaWithIcebergSchemaID());
schemaWithOriginId = AvroSchemaUtil.toIceberg(avroSchema);
}
return schemaWithOriginId;
}
/**
* Deal with both regular file deletions manifested by GMCE(aggregation but no commit),
* and expiring older snapshots(commit).
*/
protected void dropFiles(GobblinMetadataChangeEvent gmce, Map<String, Collection<HiveSpec>> oldSpecsMap, Table table,
TableMetadata tableMetadata, TableIdentifier tid) throws IOException {
PartitionSpec partitionSpec = table.spec();
// Update DeleteFiles in tableMetadata: This is regular file deletion
DeleteFiles deleteFiles = tableMetadata.getOrInitDeleteFiles();
Set<DataFile> oldDataFiles =
getIcebergDataFilesToBeDeleted(gmce, table, new HashMap<>(), oldSpecsMap, partitionSpec);
oldDataFiles.forEach(deleteFiles::deleteFile);
// Update ExpireSnapshots and commit the updates at once: This is for expiring snapshots that are
// beyond look-back allowance for time-travel.
parallelRunner.submitCallable(new Callable<Void>() {
@Override
public Void call() throws Exception {
try {
long olderThan = getExpireSnapshotTime();
long start = System.currentTimeMillis();
ExpireSnapshots expireSnapshots = table.expireSnapshots();
final Table tmpTable = table;
expireSnapshots.deleteWith(new Consumer<String>() {
@Override
public void accept(String file) {
if (file.startsWith(tmpTable.location())) {
tmpTable.io().deleteFile(file);
}
}
}).expireOlderThan(olderThan).commit();
//TODO: emit these metrics to Ingraphs, in addition to metrics for publishing new snapshots and other Iceberg metadata operations.
log.info("Spent {} ms to expire snapshots older than {} ({}) in table {}", System.currentTimeMillis() - start,
new DateTime(olderThan).toString(), olderThan, tid.toString());
} catch (Exception e) {
log.error(String.format("Fail to expire snapshots for table %s due to exception ", tid.toString()), e);
}
return null;
}
}, tid.toString());
}
private long getExpireSnapshotTime() {
PeriodFormatter periodFormatter = new PeriodFormatterBuilder().appendYears()
.appendSuffix("y")
.appendMonths()
.appendSuffix("M")
.appendDays()
.appendSuffix("d")
.appendHours()
.appendSuffix("h")
.appendMinutes()
.appendSuffix("m")
.toFormatter();
return DateTime.now()
.minus(periodFormatter.parsePeriod(
conf.get(EXPIRE_SNAPSHOTS_LOOKBACK_TIME, DEFAULT_EXPIRE_SNAPSHOTS_LOOKBACK_TIME)))
.getMillis();
}
protected void addFiles(GobblinMetadataChangeEvent gmce, Map<String, Collection<HiveSpec>> newSpecsMap, Table table,
TableMetadata tableMetadata) {
AppendFiles appendFiles = tableMetadata.getOrInitAppendFiles();
getIcebergDataFilesToBeAddedHelper(gmce, table, newSpecsMap, tableMetadata)
.forEach(dataFile -> {
appendFiles.appendFile(dataFile);
tableMetadata.addedFiles.put(dataFile.path(), "");
});
}
private Stream<DataFile> getIcebergDataFilesToBeAddedHelper(GobblinMetadataChangeEvent gmce, Table table,
Map<String, Collection<HiveSpec>> newSpecsMap,
TableMetadata tableMetadata) {
return getIcebergDataFilesToBeAdded(table, tableMetadata, gmce, gmce.getNewFiles(), table.spec(), newSpecsMap,
IcebergUtils.getSchemaIdMap(getSchemaWithOriginId(gmce), table.schema())).stream()
.filter(dataFile -> tableMetadata.addedFiles.getIfPresent(dataFile.path()) == null);
}
/**
* Method to get a {@link DataFile} collection without metrics information
* This method is used to get files to be deleted from iceberg
* If oldFilePrefixes is specified in gmce, this method will use those prefixes to find old file in iceberg,
* or the method will call method {IcebergUtils.getIcebergDataFileWithMetric} to get DataFile for specific file path
*/
private Set<DataFile> getIcebergDataFilesToBeDeleted(GobblinMetadataChangeEvent gmce, Table table,
Map<String, Collection<HiveSpec>> newSpecsMap, Map<String, Collection<HiveSpec>> oldSpecsMap,
PartitionSpec partitionSpec) throws IOException {
Set<DataFile> oldDataFiles = new HashSet<>();
if (gmce.getOldFilePrefixes() != null) {
Expression exp = Expressions.alwaysFalse();
for (String prefix : gmce.getOldFilePrefixes()) {
// Use both full path and raw path to filter old files
exp = Expressions.or(exp, Expressions.startsWith(ICEBERG_FILE_PATH_COLUMN, prefix));
String rawPathPrefix = new Path(prefix).toUri().getRawPath();
exp = Expressions.or(exp, Expressions.startsWith(ICEBERG_FILE_PATH_COLUMN, rawPathPrefix));
}
long start = System.currentTimeMillis();
oldDataFiles.addAll(Sets.newHashSet(FindFiles.in(table).withMetadataMatching(exp).collect().iterator()));
//Use INFO level log here to get better estimate.
//This shouldn't overwhelm the log since we receive limited number of rewrite_file gmces for one table in a day
log.info("Spent {}ms to query all old files in iceberg.", System.currentTimeMillis() - start);
} else {
for (String file : gmce.getOldFiles()) {
String specPath = new Path(file).getParent().toString();
// For the use case of recompaction, the old path may contains /daily path, in this case, we find the spec from newSpecsMap
StructLike partitionVal = getIcebergPartitionVal(
oldSpecsMap.containsKey(specPath) ? oldSpecsMap.get(specPath) : newSpecsMap.get(specPath), file,
partitionSpec);
oldDataFiles.add(IcebergUtils.getIcebergDataFileWithoutMetric(file, partitionSpec, partitionVal));
}
}
return oldDataFiles;
}
/**
* Method to get dataFiles with metrics information
* This method is used to get files to be added to iceberg
* if completeness is enabled a new field (late) is added to table schema and partition spec
* computed based on datepartition and completion watermark
* This method will call method {IcebergUtils.getIcebergDataFileWithMetric} to get DataFile for specific file path
*/
private Set<DataFile> getIcebergDataFilesToBeAdded(Table table, TableMetadata tableMetadata, GobblinMetadataChangeEvent gmce, List<org.apache.gobblin.metadata.DataFile> files,
PartitionSpec partitionSpec, Map<String, Collection<HiveSpec>> newSpecsMap, Map<Integer, Integer> schemaIdMap) {
Set<DataFile> dataFiles = new HashSet<>();
for (org.apache.gobblin.metadata.DataFile file : files) {
try {
Collection<HiveSpec> hiveSpecs = newSpecsMap.get(new Path(file.getFilePath()).getParent().toString());
StructLike partition = getIcebergPartitionVal(hiveSpecs, file.getFilePath(), partitionSpec);
if(tableMetadata.newPartitionColumnEnabled && gmce.getOperationType() == OperationType.add_files) {
// Assumes first partition value to be partitioned by date
// TODO Find better way to determine a partition value
String datepartition = partition.get(0, null);
partition = addLatePartitionValueToIcebergTable(table, tableMetadata,
hiveSpecs.iterator().next().getPartition().get(), datepartition);
tableMetadata.datePartitions.add(getDateTimeFromDatepartitionString(datepartition));
}
dataFiles.add(IcebergUtils.getIcebergDataFileWithMetric(file, table.spec(), partition, conf, schemaIdMap));
} catch (Exception e) {
log.warn("Cannot get DataFile for {} dur to {}", file.getFilePath(), e);
}
}
return dataFiles;
}
/**
* 1. Add "late" partition column to iceberg table if not exists
* 2. compute "late" partition value based on datepartition and completion watermark
* 3. Default to late=0 if completion watermark check is disabled
* @param table
* @param tableMetadata
* @param hivePartition
* @param datepartition
* @return new iceberg partition value for file
*/
private StructLike addLatePartitionValueToIcebergTable(Table table, TableMetadata tableMetadata, HivePartition hivePartition, String datepartition) {
table = addPartitionToIcebergTable(table, newPartitionColumn, newPartitionColumnType);
PartitionSpec partitionSpec = table.spec();
int late = !tableMetadata.completenessEnabled ? 0 : isLate(datepartition, tableMetadata.completionWatermark);
List<String> partitionValues = new ArrayList<>(hivePartition.getValues());
partitionValues.add(String.valueOf(late));
return IcebergUtils.getPartition(partitionSpec.partitionType(), partitionValues);
}
private int isLate(String datepartition, long previousWatermark) {
ZonedDateTime partitionDateTime = ZonedDateTime.parse(datepartition, HOURLY_DATEPARTITION_FORMAT);
long partitionEpochTime = partitionDateTime.toInstant().toEpochMilli();
if(partitionEpochTime >= previousWatermark) {
return 0;
} else if(partitionEpochTime < previousWatermark
&& partitionDateTime.toLocalDate().equals(getDateFromEpochMillis(previousWatermark))) {
return 1;
} else {
return 2;
}
}
private LocalDate getDateFromEpochMillis(long epochMillis) {
return ZonedDateTime.ofInstant(Instant.ofEpochMilli(epochMillis), ZoneId.of(timeZone)).toLocalDate();
}
private ZonedDateTime getDateTimeFromDatepartitionString(String datepartition) {
return ZonedDateTime.parse(datepartition, HOURLY_DATEPARTITION_FORMAT);
}
/**
* Obtain Iceberg partition value with a collection of {@link HiveSpec}.
* @param specs A collection of {@link HiveSpec}s.
* @param filePath URI of file, used for logging purpose in this method.
* @param partitionSpec The scheme of partition.
* @return The value of partition based on the given {@link PartitionSpec}.
* @throws IOException
*/
private StructLike getIcebergPartitionVal(Collection<HiveSpec> specs, String filePath, PartitionSpec partitionSpec)
throws IOException {
if (specs == null || specs.isEmpty()) {
throw new IOException("Cannot get hive spec for " + filePath);
}
HivePartition hivePartition = specs.iterator().next().getPartition().orNull();
StructLike partitionVal = hivePartition == null ? null
: IcebergUtils.getPartition(partitionSpec.partitionType(), hivePartition.getValues());
return partitionVal;
}
/**
* We will firstly try to use datasetOffsetRange to get the topic name, as the pattern for datasetOffsetRange key should be ({topicName}-{partitionNumber})
* In case there is no datasetOffsetRange, we fall back to the table property that we set previously for "topic.name"
* @return kafka topic name for this table
*/
protected String getTopicName(TableIdentifier tid, TableMetadata tableMetadata) {
if (tableMetadata.dataOffsetRange.isPresent() && tableMetadata.dataOffsetRange.get().size() != 0) {
String topicPartitionString = tableMetadata.dataOffsetRange.get().keySet().iterator().next();
//In case the topic name is not the table name or the topic name contains '-'
return HiveMetadataWriter.parseTopicNameFromOffsetRangeKey(topicPartitionString);
}
return tableMetadata.newProperties.or(
Maps.newHashMap(tableMetadata.lastProperties.or(getIcebergTable(tid).properties()))).get(TOPIC_NAME_KEY);
}
/**
* For flush of each table, we do the following logic:
* 1. Commit the appendFiles if it exist
* 2. Update the new table property: high watermark of GMCE, data offset range, schema versions
* 3. Update the schema
* 4. Commit the transaction
* 5. reset tableMetadata
* @param dbName
* @param tableName
*/
@Override
public void flush(String dbName, String tableName) throws IOException {
Lock writeLock = readWriteLock.writeLock();
writeLock.lock();
try {
TableIdentifier tid = TableIdentifier.of(dbName, tableName);
TableMetadata tableMetadata = tableMetadataMap.getOrDefault(tid, new TableMetadata(this.conf));
if (tableMetadata.transaction.isPresent()) {
Transaction transaction = tableMetadata.transaction.get();
Map<String, String> props = tableMetadata.newProperties.or(
Maps.newHashMap(tableMetadata.lastProperties.or(getIcebergTable(tid).properties())));
//Set data offset range
setDatasetOffsetRange(tableMetadata, props);
String topicName = getTopicName(tid, tableMetadata);
if (tableMetadata.appendFiles.isPresent()) {
tableMetadata.appendFiles.get().commit();
try (Timer.Context context = new Timer().time()) {
sendAuditCounts(topicName, tableMetadata.serializedAuditCountMaps);
log.info("Sending audit counts for {} took {} ms", topicName, TimeUnit.NANOSECONDS.toMillis(context.stop()));
}
if (tableMetadata.completenessEnabled) {
updateWatermarkWithFilesRegistered(topicName, tableMetadata, props,
tableMetadata.totalCountCompletenessEnabled);
}
}
if (tableMetadata.deleteFiles.isPresent()) {
tableMetadata.deleteFiles.get().commit();
}
// Check and update completion watermark when there are no files to be registered, typically for quiet topics
// The logic is to check the window [currentHour-1,currentHour] and update the watermark if there are no audit counts
if(!tableMetadata.appendFiles.isPresent() && !tableMetadata.deleteFiles.isPresent()
&& tableMetadata.completenessEnabled) {
updateWatermarkWithNoFilesRegistered(topicName, tableMetadata, props,
tableMetadata.totalCountCompletenessEnabled);
}
//Set high waterMark
Long highWatermark = tableCurrentWatermarkMap.get(tid);
props.put(String.format(GMCE_HIGH_WATERMARK_KEY, tableTopicPartitionMap.get(tid)), highWatermark.toString());
//Set low waterMark
props.put(String.format(GMCE_LOW_WATERMARK_KEY, tableTopicPartitionMap.get(tid)),
tableMetadata.lowWatermark.get().toString());
//Set whether to delete metadata files after commit
if (conf.getBoolean(ICEBERG_ENABLE_CUSTOM_METADATA_RETENTION_POLICY, DEFAULT_ICEBERG_ENABLE_CUSTOM_METADATA_RETENTION_POLICY)) {
props.put(TableProperties.METADATA_DELETE_AFTER_COMMIT_ENABLED, Boolean.toString(
conf.getBoolean(TableProperties.METADATA_DELETE_AFTER_COMMIT_ENABLED, TableProperties.METADATA_DELETE_AFTER_COMMIT_ENABLED_DEFAULT)));
props.put(TableProperties.METADATA_PREVIOUS_VERSIONS_MAX, Integer.toString(
conf.getInt(TableProperties.METADATA_PREVIOUS_VERSIONS_MAX, TableProperties.METADATA_PREVIOUS_VERSIONS_MAX_DEFAULT)));
}
//Update schema(commit)
updateSchema(tableMetadata, props, topicName);
//Update properties
UpdateProperties updateProperties = transaction.updateProperties();
props.forEach(updateProperties::set);
updateProperties.commit();
try (AutoCloseableHiveLock lock = this.locks.getTableLock(dbName, tableName);
Timer.Context context = new Timer().time()) {
transaction.commitTransaction();
log.info("Committing transaction for table {} took {} ms", tid, TimeUnit.NANOSECONDS.toMillis(context.stop()));
}
// Emit GTE for snapshot commits
Snapshot snapshot = tableMetadata.table.get().currentSnapshot();
Map<String, String> currentProps = tableMetadata.table.get().properties();
try (Timer.Context context = new Timer().time()) {
submitSnapshotCommitEvent(snapshot, tableMetadata, dbName, tableName, currentProps, highWatermark);
log.info("Sending snapshot commit event for {} took {} ms", topicName, TimeUnit.NANOSECONDS.toMillis(context.stop()));
}
//Reset the table metadata for next accumulation period
tableMetadata.reset(currentProps, highWatermark);
log.info(String.format("Finish commit of new snapshot %s for table %s", snapshot.snapshotId(), tid));
} else {
log.info("There's no transaction initiated for the table {}", tid);
}
} catch (RuntimeException e) {
throw new IOException(String.format("Fail to flush table %s %s", dbName, tableName), e);
} catch (Exception e) {
throw new IOException(String.format("Fail to flush table %s %s", dbName, tableName), e);
} finally {
writeLock.unlock();
}
}
private CompletenessWatermarkUpdater getWatermarkUpdater(String topicName, TableMetadata tableMetadata,
Map<String, String> propsToUpdate) {
return new CompletenessWatermarkUpdater(topicName, this.auditCheckGranularity, this.timeZone,
tableMetadata, propsToUpdate, this.state, this.auditCountVerifier.get());
}
private void updateWatermarkWithFilesRegistered(String topicName, TableMetadata tableMetadata,
Map<String, String> propsToUpdate, boolean includeTotalCountCompletionWatermark) {
getWatermarkUpdater(topicName, tableMetadata, propsToUpdate)
.run(tableMetadata.datePartitions, includeTotalCountCompletionWatermark);
}
private void updateWatermarkWithNoFilesRegistered(String topicName, TableMetadata tableMetadata,
Map<String, String> propsToUpdate, boolean includeTotalCountCompletionWatermark) {
if (tableMetadata.completionWatermark > DEFAULT_COMPLETION_WATERMARK) {
log.info(String.format("Checking kafka audit for %s on change_property ", topicName));
SortedSet<ZonedDateTime> timestamps = new TreeSet<>();
ZonedDateTime dtAtBeginningOfHour = ZonedDateTime.now(ZoneId.of(this.timeZone)).truncatedTo(ChronoUnit.HOURS);
timestamps.add(dtAtBeginningOfHour);
getWatermarkUpdater(topicName, tableMetadata, propsToUpdate).run(timestamps, includeTotalCountCompletionWatermark);
} else {
log.info(String.format("Need valid watermark, current watermark is %s, Not checking kafka audit for %s",
tableMetadata.completionWatermark, topicName));
}
}
@Override
public void reset(String dbName, String tableName) throws IOException {
this.tableMetadataMap.remove(TableIdentifier.of(dbName, tableName));
}
private void submitSnapshotCommitEvent(Snapshot snapshot, TableMetadata tableMetadata, String dbName,
String tableName, Map<String, String> props, Long highWaterMark) {
GobblinEventBuilder gobblinTrackingEvent =
new GobblinEventBuilder(MetadataWriterKeys.ICEBERG_COMMIT_EVENT_NAME);
long currentSnapshotID = snapshot.snapshotId();
long endToEndLag = System.currentTimeMillis() - tableMetadata.lowestGMCEEmittedTime;
TableIdentifier tid = TableIdentifier.of(dbName, tableName);
String gmceTopicPartition = tableTopicPartitionMap.get(tid);
//Add information to automatically trigger repair jon when data loss happen
gobblinTrackingEvent.addMetadata(MetadataWriterKeys.GMCE_TOPIC_NAME, gmceTopicPartition.split("-")[0]);
gobblinTrackingEvent.addMetadata(MetadataWriterKeys.GMCE_TOPIC_PARTITION, gmceTopicPartition.split("-")[1]);
gobblinTrackingEvent.addMetadata(MetadataWriterKeys.GMCE_HIGH_WATERMARK, highWaterMark.toString());
gobblinTrackingEvent.addMetadata(MetadataWriterKeys.GMCE_LOW_WATERMARK,
tableMetadata.lowWatermark.get().toString());
//Add information for lag monitoring
gobblinTrackingEvent.addMetadata(MetadataWriterKeys.LAG_KEY_NAME, Long.toString(endToEndLag));
gobblinTrackingEvent.addMetadata(MetadataWriterKeys.SNAPSHOT_KEY_NAME, Long.toString(currentSnapshotID));
gobblinTrackingEvent.addMetadata(MetadataWriterKeys.MANIFEST_LOCATION, snapshot.manifestListLocation());
gobblinTrackingEvent.addMetadata(MetadataWriterKeys.SNAPSHOT_INFORMATION_KEY_NAME,
Joiner.on(",").withKeyValueSeparator("=").join(snapshot.summary()));
gobblinTrackingEvent.addMetadata(MetadataWriterKeys.ICEBERG_TABLE_KEY_NAME, tableName);
gobblinTrackingEvent.addMetadata(MetadataWriterKeys.ICEBERG_DATABASE_KEY_NAME, dbName);
gobblinTrackingEvent.addMetadata(MetadataWriterKeys.DATASET_HDFS_PATH, tableMetadata.datasetName);
for (Map.Entry<String, String> entry : props.entrySet()) {
if (entry.getKey().startsWith(OFFSET_RANGE_KEY_PREFIX)) {
gobblinTrackingEvent.addMetadata(entry.getKey(), entry.getValue());
}
}
if (tableMetadata.completenessEnabled) {
gobblinTrackingEvent.addMetadata(COMPLETION_WATERMARK_KEY, Long.toString(tableMetadata.completionWatermark));
if (tableMetadata.totalCountCompletenessEnabled) {
gobblinTrackingEvent.addMetadata(TOTAL_COUNT_COMPLETION_WATERMARK_KEY,
Long.toString(tableMetadata.totalCountCompletionWatermark));
}
}
eventSubmitter.submit(gobblinTrackingEvent);
}
private boolean setDatasetOffsetRange(TableMetadata tableMetadata, Map<String, String> props) {
if (tableMetadata.dataOffsetRange.isPresent() && !tableMetadata.dataOffsetRange.get().isEmpty()) {
for (Map.Entry<String, List<Range>> offsets : tableMetadata.dataOffsetRange.get().entrySet()) {
List<Range> ranges = offsets.getValue();
String rangeString = ranges.stream()
.map(r -> Joiner.on(ConfigurationKeys.RANGE_DELIMITER_KEY).join(r.lowerEndpoint(), r.upperEndpoint()))
.collect(Collectors.joining(ConfigurationKeys.LIST_DELIMITER_KEY));
props.put(String.format(OFFSET_RANGE_KEY_FORMAT, offsets.getKey()), rangeString);
}
return true;
}
return false;
}
private void updateSchema(TableMetadata tableMetadata, Map<String, String> props, String topicName) {
//Set default schema versions
props.put(SCHEMA_CREATION_TIME_KEY, tableMetadata.lastSchemaVersion.or(DEFAULT_CREATION_TIME));
// Update Schema
try {
if (tableMetadata.candidateSchemas.isPresent() && tableMetadata.candidateSchemas.get().size() > 0) {
Cache candidates = tableMetadata.candidateSchemas.get();
//Only have default schema, so either we calculate schema from event or the schema does not have creation time, directly update it
if (candidates.size() == 1 && candidates.getIfPresent(DEFAULT_CREATION_TIME) != null) {
updateSchemaHelper(DEFAULT_CREATION_TIME,
(Pair<Schema, String>) candidates.getIfPresent(DEFAULT_CREATION_TIME), props,
tableMetadata.table.get());
} else {
//update schema if candidates contains the schema that has the same creation time with the latest schema
org.apache.avro.Schema latestSchema =
(org.apache.avro.Schema) schemaRegistry.getLatestSchemaByTopic(topicName);
String creationTime = AvroUtils.getSchemaCreationTime(latestSchema);
if (creationTime == null) {
log.warn(
"Schema from schema registry does not contain creation time, check config for schema registry class");
} else if (candidates.getIfPresent(creationTime) != null) {
updateSchemaHelper(creationTime, (Pair<Schema, String>) candidates.getIfPresent(creationTime), props,
tableMetadata.table.get());
}
}
}
} catch (SchemaRegistryException e) {
log.error("Cannot get schema form schema registry, will not update this schema", e);
}
}
private void updateSchemaHelper(String schemaCreationTime, Pair<Schema, String> schema, Map<String, String> props, Table table) {
try {
table.updateSchema().unionByNameWith(schema.getLeft()).commit();
props.put(SCHEMA_CREATION_TIME_KEY, schemaCreationTime);
props.put(AvroSerdeUtils.AvroTableProperties.SCHEMA_LITERAL.getPropName(), schema.getRight());
} catch (Exception e) {
log.error("Cannot update schema to " + schema.toString() + "for table " + table.location(), e);
}
}
@Override
public void writeEnvelope(RecordEnvelope<GenericRecord> recordEnvelope, Map<String, Collection<HiveSpec>> newSpecsMap,
Map<String, Collection<HiveSpec>> oldSpecsMap, HiveSpec tableSpec) throws IOException {
Lock readLock = readWriteLock.readLock();
readLock.lock();
try {
GenericRecord genericRecord = recordEnvelope.getRecord();
GobblinMetadataChangeEvent gmce =
(GobblinMetadataChangeEvent) SpecificData.get().deepCopy(genericRecord.getSchema(), genericRecord);
String dbName = tableSpec.getTable().getDbName();
String tableName = tableSpec.getTable().getTableName();
if (whitelistBlacklist.acceptTable(dbName, tableName)) {
TableIdentifier tid = TableIdentifier.of(dbName, tableName);
String topicPartition = tableTopicPartitionMap.computeIfAbsent(tid,
t -> recordEnvelope.getWatermark().getSource());
Long currentWatermark = getAndPersistCurrentWatermark(tid, topicPartition);
Long currentOffset = ((LongWatermark)recordEnvelope.getWatermark().getWatermark()).getValue();
if (currentOffset > currentWatermark) {
if (!tableMetadataMap.computeIfAbsent(tid, t -> new TableMetadata(this.conf)).lowWatermark.isPresent()) {
//This means we haven't register this table or met some error before, we need to reset the low watermark
tableMetadataMap.get(tid).lowWatermark = Optional.of(currentOffset - 1);
tableMetadataMap.get(tid).setDatasetName(gmce.getDatasetIdentifier().getNativeName());
if (this.newPartitionEnabled && this.newPartitionTableWhitelistBlacklist.acceptTable(dbName, tableName)) {
tableMetadataMap.get(tid).newPartitionColumnEnabled = true;
if (this.completenessEnabled && this.completenessWhitelistBlacklist.acceptTable(dbName, tableName)) {
tableMetadataMap.get(tid).completenessEnabled = true;
if (this.totalCountCompletenessEnabled
&& this.totalCountBasedCompletenessWhitelistBlacklist.acceptTable(dbName, tableName)) {
tableMetadataMap.get(tid).totalCountCompletenessEnabled = true;
}
}
}
}
write(gmce, newSpecsMap, oldSpecsMap, tableSpec);
tableCurrentWatermarkMap.put(tid, currentOffset);
} else {
log.warn(String.format("Skip processing record for table: %s.%s, GMCE offset: %d, GMCE partition: %s since it has lower watermark",
dbName, tableName, currentOffset, topicPartition));
}
} else {
log.info(String.format("Skip table %s.%s since it's not selected", tableSpec.getTable().getDbName(),
tableSpec.getTable().getTableName()));
}
} finally {
readLock.unlock();
}
}
/**
* Method to send audit counts given a topic name and a list of serialized audit count maps. Called only when new files
* are added. Default is no-op, must be implemented in a subclass.
*/
public void sendAuditCounts(String topicName, Collection<String> serializedAuditCountMaps) {
}
@Override
public void close() throws IOException {
this.closer.close();
}
/**
* A collection of Iceberg metadata including {@link Table} itself as well as
* A set of buffered objects (reflecting table's {@link org.apache.iceberg.PendingUpdate}s) within the flush interval
* that aggregates the metadata like location arriving / deleting files, schema,
* along with other table-level metadata like watermark, offset-range, etc.
*
* Also note the difference with {@link org.apache.iceberg.TableMetadata}.
*/
public static class TableMetadata {
Optional<Table> table = Optional.absent();
/**
* The {@link Transaction} object holds the reference of a {@link org.apache.iceberg.BaseTransaction.TransactionTableOperations}
* that is shared by all individual operation (e.g. {@link AppendFiles}) to ensure atomicity even if commit method
* is invoked from a individual operation.
*/
Optional<Transaction> transaction = Optional.absent();
private Optional<AppendFiles> appendFiles = Optional.absent();
private Optional<DeleteFiles> deleteFiles = Optional.absent();
public Optional<Map<String, String>> newProperties = Optional.absent();
Optional<Map<String, String>> lastProperties = Optional.absent();
Optional<Cache<String, Pair<Schema, String>>> candidateSchemas = Optional.absent();
Optional<Map<String, List<Range>>> dataOffsetRange = Optional.absent();
Optional<String> lastSchemaVersion = Optional.absent();
Optional<Long> lowWatermark = Optional.absent();
long completionWatermark = DEFAULT_COMPLETION_WATERMARK;
long totalCountCompletionWatermark = DEFAULT_COMPLETION_WATERMARK;
SortedSet<ZonedDateTime> datePartitions = new TreeSet<>(Collections.reverseOrder());
List<String> serializedAuditCountMaps = new ArrayList<>();
@Setter
String datasetName;
boolean completenessEnabled;
boolean totalCountCompletenessEnabled;
boolean newPartitionColumnEnabled;
Configuration conf;
Cache<CharSequence, String> addedFiles;
long lowestGMCEEmittedTime = Long.MAX_VALUE;
/**
* Always use this method to obtain {@link AppendFiles} object within flush interval
* if clients want to have the {@link AppendFiles} committed along with other updates in a txn.
*/
AppendFiles getOrInitAppendFiles() {
ensureTxnInit();
if (!this.appendFiles.isPresent()) {
this.appendFiles = Optional.of(this.transaction.get().newAppend());
}
return this.appendFiles.get();
}
DeleteFiles getOrInitDeleteFiles() {
ensureTxnInit();
if (!this.deleteFiles.isPresent()) {
this.deleteFiles = Optional.of(this.transaction.get().newDelete());
}
return this.deleteFiles.get();
}
/**
* Initializing {@link Transaction} object within {@link TableMetadata} when needed.
*/
void ensureTxnInit() {
if (!this.transaction.isPresent()) {
this.transaction = Optional.of(table.get().newTransaction());
}
}
void reset(Map<String, String> props, Long lowWaterMark) {
this.lastProperties = Optional.of(props);
this.lastSchemaVersion = Optional.of(props.get(SCHEMA_CREATION_TIME_KEY));
this.transaction = Optional.absent();
this.deleteFiles = Optional.absent();
this.appendFiles = Optional.absent();
// Clean cache and reset to eagerly release unreferenced objects.
if (this.candidateSchemas.isPresent()) {
this.candidateSchemas.get().cleanUp();
}
this.candidateSchemas = Optional.absent();
this.dataOffsetRange = Optional.absent();
this.newProperties = Optional.absent();
this.lowestGMCEEmittedTime = Long.MAX_VALUE;
this.lowWatermark = Optional.of(lowWaterMark);
this.datePartitions.clear();
this.serializedAuditCountMaps.clear();
}
TableMetadata(Configuration conf) {
this.conf = conf;
addedFiles = CacheBuilder.newBuilder()
.expireAfterAccess(this.conf.getInt(ADDED_FILES_CACHE_EXPIRING_TIME, DEFAULT_ADDED_FILES_CACHE_EXPIRING_TIME),
TimeUnit.HOURS)
.build();
}
}
}
| 1,762 |
0 | Create_ds/gobblin/gobblin-iceberg/src/main/java/org/apache/gobblin/iceberg | Create_ds/gobblin/gobblin-iceberg/src/main/java/org/apache/gobblin/iceberg/writer/GobblinMetadataException.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.iceberg.writer;
import java.io.IOException;
import java.util.HashSet;
import java.util.List;
import java.util.Set;
import org.apache.gobblin.hive.HiveRegistrationUnit;
import org.apache.gobblin.metadata.OperationType;
public class GobblinMetadataException extends IOException {
public String datasetPath;
public String dbName;
public String tableName;
public String GMCETopicPartition;
public long highWatermark;
public long lowWatermark;
public List<String> failedWriters;
public OperationType operationType;
public Set<String> addedPartitionValues;
public Set<String> droppedPartitionValues;
public List<HiveRegistrationUnit.Column> partitionKeys;
GobblinMetadataException(String datasetPath, String dbName, String tableName, String GMCETopicPartition, long lowWatermark, long highWatermark,
List<String> failedWriters, OperationType operationType, List<HiveRegistrationUnit.Column> partitionKeys, Exception exception) {
super(String.format("failed to flush table %s, %s", dbName, tableName), exception);
this.datasetPath = datasetPath;
this.dbName = dbName;
this.tableName = tableName;
this.GMCETopicPartition = GMCETopicPartition;
this.highWatermark = highWatermark;
this.lowWatermark = lowWatermark;
this.failedWriters = failedWriters;
this.operationType = operationType;
this.addedPartitionValues = new HashSet<>();
this.droppedPartitionValues = new HashSet<>();
this.partitionKeys = partitionKeys;
}
}
| 1,763 |
0 | Create_ds/gobblin/gobblin-iceberg/src/main/java/org/apache/gobblin/iceberg | Create_ds/gobblin/gobblin-iceberg/src/main/java/org/apache/gobblin/iceberg/writer/GobblinMCEWriter.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.iceberg.writer;
import java.io.IOException;
import java.util.ArrayList;
import java.util.Collection;
import java.util.HashMap;
import java.util.HashSet;
import java.util.List;
import java.util.Map;
import java.util.Set;
import java.util.concurrent.Callable;
import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.atomic.AtomicLong;
import java.util.stream.Collectors;
import org.apache.avro.Schema;
import org.apache.avro.generic.GenericRecord;
import org.apache.avro.specific.SpecificData;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hive.serde2.avro.AvroSerdeUtils;
import org.apache.commons.collections.CollectionUtils;
import com.codahale.metrics.Timer;
import com.google.common.annotations.VisibleForTesting;
import com.google.common.base.Joiner;
import com.google.common.base.Preconditions;
import com.google.common.base.Splitter;
import com.google.common.base.Throwables;
import com.google.common.cache.Cache;
import com.google.common.cache.CacheBuilder;
import com.google.common.collect.Iterables;
import com.google.common.collect.Lists;
import com.google.common.io.Closer;
import lombok.AllArgsConstructor;
import lombok.Getter;
import lombok.Setter;
import lombok.extern.slf4j.Slf4j;
import org.apache.gobblin.configuration.ConfigurationKeys;
import org.apache.gobblin.configuration.State;
import org.apache.gobblin.dataset.Descriptor;
import org.apache.gobblin.hive.HiveRegistrationUnit;
import org.apache.gobblin.hive.policy.HiveRegistrationPolicy;
import org.apache.gobblin.hive.policy.HiveRegistrationPolicyBase;
import org.apache.gobblin.hive.spec.HiveSpec;
import org.apache.gobblin.hive.writer.HiveMetadataWriterWithPartitionInfoException;
import org.apache.gobblin.hive.writer.MetadataWriterKeys;
import org.apache.gobblin.hive.writer.MetadataWriter;
import org.apache.gobblin.instrumented.Instrumented;
import org.apache.gobblin.metadata.DataFile;
import org.apache.gobblin.metadata.GobblinMetadataChangeEvent;
import org.apache.gobblin.metadata.OperationType;
import org.apache.gobblin.metrics.ContextAwareTimer;
import org.apache.gobblin.metrics.MetricContext;
import org.apache.gobblin.metrics.Tag;
import org.apache.gobblin.metrics.event.EventSubmitter;
import org.apache.gobblin.metrics.event.GobblinEventBuilder;
import org.apache.gobblin.source.extractor.CheckpointableWatermark;
import org.apache.gobblin.source.extractor.extract.LongWatermark;
import org.apache.gobblin.stream.RecordEnvelope;
import org.apache.gobblin.util.ClustersNames;
import org.apache.gobblin.util.HadoopUtils;
import org.apache.gobblin.util.ParallelRunner;
import org.apache.gobblin.util.reflection.GobblinConstructorUtils;
import org.apache.gobblin.writer.DataWriter;
import org.apache.gobblin.writer.DataWriterBuilder;
/**
* This is a wrapper of all the MetadataWriters. This writer will manage a list of {@Link MetadataWriter} which do the actual
* metadata registration for different metadata store.
* This writer is responsible for:
* 0. Consuming {@link GobblinMetadataChangeEvent} and execute metadata registration.
* 1. Managing a map of Iceberg tables that it is currently processing
* 2. Ensuring that the underlying metadata writers flush the metadata associated with each Iceberg table
* 3. Call flush method for a specific table on a change in operation type
* 4. Calculate {@Link HiveSpec}s and pass them to metadata writers to register metadata
*/
@SuppressWarnings("UnstableApiUsage")
@Slf4j
public class GobblinMCEWriter implements DataWriter<GenericRecord> {
public static final String GOBBLIN_MCE_WRITER_METRIC_NAMESPACE = GobblinMCEWriter.class.getCanonicalName();
public static final String DEFAULT_HIVE_REGISTRATION_POLICY_KEY = "default.hive.registration.policy";
public static final String FORCE_HIVE_DATABASE_NAME = "force.hive.database.name";
public static final String ACCEPTED_CLUSTER_NAMES = "accepted.cluster.names";
public static final String METADATA_REGISTRATION_THREADS = "metadata.registration.threads";
public static final String METADATA_PARALLEL_RUNNER_TIMEOUT_MILLS = "metadata.parallel.runner.timeout.mills";
public static final String HIVE_PARTITION_NAME = "hive.partition.name";
public static final String GMCE_METADATA_WRITER_CLASSES = "gmce.metadata.writer.classes";
public static final String GMCE_METADATA_WRITER_MAX_ERROR_DATASET = "gmce.metadata.writer.max.error.dataset";
public static final String TRANSIENT_EXCEPTION_MESSAGES_KEY = "gmce.metadata.writer.transient.exception.messages";
public static final String NON_TRANSIENT_EXCEPTION_MESSAGES_KEY = "gmce.metadata.writer.nonTransient.exception.messages";
public static final int DEFUALT_GMCE_METADATA_WRITER_MAX_ERROR_DATASET = 0;
public static final int DEFAULT_ICEBERG_PARALLEL_TIMEOUT_MILLS = 60000;
public static final String TABLE_NAME_DELIMITER = ".";
@Getter
List<MetadataWriter> metadataWriters;
Map<String, TableStatus> tableOperationTypeMap;
@Getter
Map<String, Map<String, List<GobblinMetadataException>>> datasetErrorMap;
Set<String> acceptedClusters;
protected State state;
private final ParallelRunner parallelRunner;
private int parallelRunnerTimeoutMills;
private Map<String, Cache<String, Collection<HiveSpec>>> oldSpecsMaps;
private Map<String, Cache<String, Collection<HiveSpec>>> newSpecsMaps;
private Map<String, List<HiveRegistrationUnit.Column>> partitionKeysMap;
private Closer closer = Closer.create();
protected final AtomicLong recordCount = new AtomicLong(0L);
private final Set<String> currentErrorDatasets = new HashSet<>();
@Setter
private int maxErrorDataset;
@VisibleForTesting
public final MetricContext metricContext;
protected EventSubmitter eventSubmitter;
private final Set<String> transientExceptionMessages;
private final Set<String> nonTransientExceptionMessages;
@VisibleForTesting
public final Map<String, ContextAwareTimer> metadataWriterWriteTimers = new HashMap<>();
@VisibleForTesting
public final Map<String, ContextAwareTimer> metadataWriterFlushTimers = new HashMap<>();
private final ContextAwareTimer hiveSpecComputationTimer;
private final Map<String, ContextAwareTimer> datasetTimers = new HashMap<>();
@AllArgsConstructor
static class TableStatus {
OperationType operationType;
String datasetPath;
String gmceTopicPartition;
long gmceLowWatermark;
long gmceHighWatermark;
}
public GobblinMCEWriter(DataWriterBuilder<Schema, GenericRecord> builder, State properties) throws IOException {
newSpecsMaps = new HashMap<>();
oldSpecsMaps = new HashMap<>();
metadataWriters = new ArrayList<>();
datasetErrorMap = new HashMap<>();
partitionKeysMap = new HashMap<>();
acceptedClusters = properties.getPropAsSet(ACCEPTED_CLUSTER_NAMES, ClustersNames.getInstance().getClusterName());
state = properties;
maxErrorDataset = state.getPropAsInt(GMCE_METADATA_WRITER_MAX_ERROR_DATASET, DEFUALT_GMCE_METADATA_WRITER_MAX_ERROR_DATASET);
List<Tag<?>> tags = Lists.newArrayList();
String clusterIdentifier = ClustersNames.getInstance().getClusterName();
tags.add(new Tag<>(MetadataWriterKeys.CLUSTER_IDENTIFIER_KEY_NAME, clusterIdentifier));
metricContext = Instrumented.getMetricContext(state, this.getClass(), tags);
eventSubmitter = new EventSubmitter.Builder(metricContext, GOBBLIN_MCE_WRITER_METRIC_NAMESPACE).build();
for (String className : state.getPropAsList(GMCE_METADATA_WRITER_CLASSES, IcebergMetadataWriter.class.getName())) {
metadataWriters.add(closer.register(GobblinConstructorUtils.invokeConstructor(MetadataWriter.class, className, state)));
metadataWriterWriteTimers.put(className, metricContext.contextAwareTimer(className + ".write", 1, TimeUnit.HOURS));
metadataWriterFlushTimers.put(className, metricContext.contextAwareTimer(className + ".flush", 1, TimeUnit.HOURS));
}
hiveSpecComputationTimer = metricContext.contextAwareTimer("hiveSpec.computation", 1, TimeUnit.HOURS);
tableOperationTypeMap = new HashMap<>();
parallelRunner = closer.register(new ParallelRunner(state.getPropAsInt(METADATA_REGISTRATION_THREADS, 20),
FileSystem.get(HadoopUtils.getConfFromState(properties))));
parallelRunnerTimeoutMills =
state.getPropAsInt(METADATA_PARALLEL_RUNNER_TIMEOUT_MILLS, DEFAULT_ICEBERG_PARALLEL_TIMEOUT_MILLS);
transientExceptionMessages = new HashSet<>(properties.getPropAsList(TRANSIENT_EXCEPTION_MESSAGES_KEY, ""));
nonTransientExceptionMessages = new HashSet<>(properties.getPropAsList(NON_TRANSIENT_EXCEPTION_MESSAGES_KEY, ""));
}
@Override
public void write(GenericRecord record) throws IOException {
//Do nothing
}
/**
* This method is used to get the specs map for a list of files. It will firstly look up in cache
* to see if the specs has been calculated previously to reduce the computing time
* We have an assumption here: "for the same path and the same operation type, the specs should be the same"
* @param files List of leaf-level files' names
* @param specsMap The specs map for the files
* @param cache Cache that store the pre-calculated specs information
* @param registerState State used to compute the specs
* @param isPrefix If false, we get the parent file name to calculate the hiveSpec. This is to comply with
* hive's convention on partition which is the parent folder for leaf-level files.
* @throws IOException
*/
private void computeSpecMap(List<String> files, ConcurrentHashMap<String, Collection<HiveSpec>> specsMap,
Cache<String, Collection<HiveSpec>> cache, State registerState, boolean isPrefix) throws IOException {
try (Timer.Context context = hiveSpecComputationTimer.time()) {
HiveRegistrationPolicy policy = HiveRegistrationPolicyBase.getPolicy(registerState);
for (String file : files) {
parallelRunner.submitCallable(new Callable<Void>() {
@Override
public Void call() throws Exception {
try {
Path regPath = isPrefix ? new Path(file) : new Path(file).getParent();
//Use raw path to comply with HDFS federation setting.
Path rawPath = new Path(regPath.toUri().getRawPath());
specsMap.put(regPath.toString(), cache.get(regPath.toString(), () -> policy.getHiveSpecs(rawPath)));
} catch (Throwable e) {
//todo: Emit failed GMCE in the future to easily track the error gmce and investigate the reason for that.
log.warn("Cannot get Hive Spec for {} using policy {} due to:", file, policy.toString());
log.warn(e.getMessage());
}
return null;
}
}, file);
}
parallelRunner.waitForTasks(parallelRunnerTimeoutMills);
}
}
@Override
public void commit() throws IOException {
this.flush();
}
@Override
public void cleanup() throws IOException {
//do nothing
}
@Override
public long recordsWritten() {
return recordCount.get();
}
@Override
public long bytesWritten() throws IOException {
return 0;
}
@Override
public Descriptor getDataDescriptor() {
return null;
}
@Override
public void writeEnvelope(RecordEnvelope<GenericRecord> recordEnvelope) throws IOException {
GenericRecord genericRecord = recordEnvelope.getRecord();
CheckpointableWatermark watermark = recordEnvelope.getWatermark();
Preconditions.checkNotNull(watermark);
//filter out the events that not emitted by accepted clusters
if (!acceptedClusters.contains(genericRecord.get("cluster"))) {
return;
}
// Use schema from record to avoid issue when schema evolution
GobblinMetadataChangeEvent gmce =
(GobblinMetadataChangeEvent) SpecificData.get().deepCopy(genericRecord.getSchema(), genericRecord);
String datasetName = gmce.getDatasetIdentifier().toString();
//remove the old hive spec cache after flush
//Here we assume that new hive spec for one path always be the same(ingestion flow register to same tables)
oldSpecsMaps.remove(datasetName);
// Mapping from URI of path of arrival files to the list of HiveSpec objects.
// We assume in one same operation interval, for same dataset, the table property will not change to reduce the time to compute hiveSpec.
ConcurrentHashMap<String, Collection<HiveSpec>> newSpecsMap = new ConcurrentHashMap<>();
ConcurrentHashMap<String, Collection<HiveSpec>> oldSpecsMap = new ConcurrentHashMap<>();
if (gmce.getNewFiles() != null) {
State registerState = setHiveRegProperties(state, gmce, true);
computeSpecMap(Lists.newArrayList(Iterables.transform(gmce.getNewFiles(), DataFile::getFilePath)),
newSpecsMap, newSpecsMaps.computeIfAbsent(datasetName, t -> CacheBuilder.newBuilder()
.expireAfterAccess(state.getPropAsInt(MetadataWriter.CACHE_EXPIRING_TIME,
MetadataWriter.DEFAULT_CACHE_EXPIRING_TIME), TimeUnit.HOURS)
.build()), registerState, false);
}
if (gmce.getOldFilePrefixes() != null) {
State registerState = setHiveRegProperties(state, gmce, false);
computeSpecMap(gmce.getOldFilePrefixes(), oldSpecsMap, oldSpecsMaps.computeIfAbsent(datasetName, t -> CacheBuilder
.newBuilder()
.expireAfterAccess(state.getPropAsInt(MetadataWriter.CACHE_EXPIRING_TIME,
MetadataWriter.DEFAULT_CACHE_EXPIRING_TIME), TimeUnit.HOURS)
.build()), registerState, true);
} else if (gmce.getOldFiles() != null) {
State registerState = setHiveRegProperties(state, gmce, false);
computeSpecMap(gmce.getOldFiles(), oldSpecsMap, oldSpecsMaps.computeIfAbsent(datasetName,
t -> CacheBuilder.newBuilder()
.expireAfterAccess(state.getPropAsInt(MetadataWriter.CACHE_EXPIRING_TIME,
MetadataWriter.DEFAULT_CACHE_EXPIRING_TIME), TimeUnit.HOURS)
.build()), registerState, false);
}
if (newSpecsMap.isEmpty() && oldSpecsMap.isEmpty()) {
return;
}
// Sample one entry among all "Path <--> List<HiveSpec>" pair is good enough, reasoning:
// 0. Objective here is to execute metadata registration for all target table destinations of a dataset,
// 1. GMCE guarantees all paths coming from single dataset (but not necessary single "partition" in Hive's layout),
// 2. HiveSpec of paths from a dataset should be targeting at the same set of table destinations,
// 3. therefore fetching one path's HiveSpec and iterate through it is good enough to cover all table destinations.
Collection<HiveSpec> specs =
newSpecsMap.isEmpty() ? oldSpecsMap.values().iterator().next() : newSpecsMap.values().iterator().next();
for (HiveSpec spec : specs) {
String dbName = spec.getTable().getDbName();
String tableName = spec.getTable().getTableName();
String tableString = Joiner.on(TABLE_NAME_DELIMITER).join(dbName, tableName);
partitionKeysMap.put(tableString, spec.getTable().getPartitionKeys());
if (!tableOperationTypeMap.containsKey(tableString)) {
tableOperationTypeMap.put(tableString, new TableStatus(gmce.getOperationType(),
gmce.getDatasetIdentifier().getNativeName(), watermark.getSource(),
((LongWatermark)watermark.getWatermark()).getValue()-1, ((LongWatermark)watermark.getWatermark()).getValue()));
} else if (tableOperationTypeMap.get(tableString).operationType != gmce.getOperationType() && gmce.getOperationType() != OperationType.change_property) {
flush(dbName, tableName);
tableOperationTypeMap.put(tableString, new TableStatus(gmce.getOperationType(),
gmce.getDatasetIdentifier().getNativeName(), watermark.getSource(),
((LongWatermark)watermark.getWatermark()).getValue()-1, ((LongWatermark)watermark.getWatermark()).getValue()));
}
tableOperationTypeMap.get(tableString).gmceHighWatermark = ((LongWatermark)watermark.getWatermark()).getValue();
List<MetadataWriter> allowedWriters = getAllowedMetadataWriters(gmce, metadataWriters);
writeWithMetadataWriters(recordEnvelope, allowedWriters, newSpecsMap, oldSpecsMap, spec);
}
this.recordCount.incrementAndGet();
}
/**
* Entry point for calling the allowed metadata writers specified in the GMCE
* Adds fault tolerant ability and make sure we can emit GTE as desired
* Visible for testing because the WriteEnvelope method has complicated hive logic
* @param recordEnvelope
* @param allowedWriters metadata writers that will be written to
* @param newSpecsMap
* @param oldSpecsMap
* @param spec
* @throws IOException when max number of dataset errors is exceeded
*/
@VisibleForTesting
void writeWithMetadataWriters(
RecordEnvelope<GenericRecord> recordEnvelope,
List<MetadataWriter> allowedWriters,
ConcurrentHashMap newSpecsMap,
ConcurrentHashMap oldSpecsMap,
HiveSpec spec
) throws IOException {
boolean meetException = false;
String dbName = spec.getTable().getDbName();
String tableName = spec.getTable().getTableName();
String tableString = Joiner.on(TABLE_NAME_DELIMITER).join(dbName, tableName);
for (MetadataWriter writer : allowedWriters) {
if (meetException) {
writer.reset(dbName, tableName);
} else {
try {
Timer writeTimer = metadataWriterWriteTimers.get(writer.getClass().getName());
Timer datasetTimer = datasetTimers.computeIfAbsent(tableName, k -> metricContext.contextAwareTimer(k, 1, TimeUnit.HOURS));
try (Timer.Context writeContext = writeTimer.time();
Timer.Context datasetContext = datasetTimer.time()) {
writer.writeEnvelope(recordEnvelope, newSpecsMap, oldSpecsMap, spec);
}
} catch (Exception e) {
if (exceptionMatches(e, transientExceptionMessages)) {
throw new RuntimeException("Failing container due to transient exception for db: " + dbName + " table: " + tableName, e);
}
meetException = true;
writer.reset(dbName, tableName);
addOrThrowException(e, tableString, dbName, tableName, getFailedWriterList(writer));
}
}
}
}
/**
* All metadata writers will be returned if no metadata writers are specified in gmce
* @param gmce
* @param metadataWriters
* @return The metadata writers allowed as specified by GMCE. Relative order of {@code metadataWriters} is maintained
*/
@VisibleForTesting
static List<MetadataWriter> getAllowedMetadataWriters(GobblinMetadataChangeEvent gmce, List<MetadataWriter> metadataWriters) {
if (CollectionUtils.isEmpty(gmce.getAllowedMetadataWriters())) {
return metadataWriters;
}
Set<String> allowSet = new HashSet<>(gmce.getAllowedMetadataWriters());
return metadataWriters.stream()
.filter(writer -> allowSet.contains(writer.getClass().getName()))
.collect(Collectors.toList());
}
private void addOrThrowException(Exception e, String tableString, String dbName, String tableName, List<String> failedWriters) throws IOException {
TableStatus tableStatus = tableOperationTypeMap.get(tableString);
Map<String, List<GobblinMetadataException>> tableErrorMap = this.datasetErrorMap.getOrDefault(tableStatus.datasetPath, new HashMap<>());
GobblinMetadataException lastException = null;
if (tableErrorMap.containsKey(tableString) && !tableErrorMap.get(tableString).isEmpty()) {
lastException = tableErrorMap.get(tableString).get(tableErrorMap.get(tableString).size() - 1);
} else {
tableErrorMap.put(tableString, new ArrayList<>());
}
// If operationType has changed, add a new exception to the list so that each failure event represents an offset range all containing the same operation
if (lastException != null && lastException.operationType.equals(tableStatus.operationType)) {
lastException.highWatermark = tableStatus.gmceHighWatermark;
} else {
lastException = new GobblinMetadataException(tableStatus.datasetPath, dbName, tableName, tableStatus.gmceTopicPartition,
tableStatus.gmceLowWatermark, tableStatus.gmceHighWatermark, failedWriters, tableStatus.operationType, partitionKeysMap.get(tableString), e);
tableErrorMap.get(tableString).add(lastException);
}
if (e instanceof HiveMetadataWriterWithPartitionInfoException) {
lastException.addedPartitionValues.addAll(((HiveMetadataWriterWithPartitionInfoException) e).addedPartitionValues);
lastException.droppedPartitionValues.addAll(((HiveMetadataWriterWithPartitionInfoException) e).droppedPartitionValues);
}
this.datasetErrorMap.put(tableStatus.datasetPath, tableErrorMap);
if (!exceptionMatches(e, this.nonTransientExceptionMessages)) {
currentErrorDatasets.add(tableStatus.datasetPath);
log.error(String.format("Meet exception when flush table %s", tableString), e);
} else {
log.error(String.format("Detected known non-transient failure for table %s", tableString), e);
}
if (currentErrorDatasets.size() > maxErrorDataset) {
//Fail the job if the error size exceeds some number
throw new IOException(String.format("Container fails to flush for more than %s dataset, last exception we met is: ", maxErrorDataset), e);
}
}
// Add fault tolerant ability and make sure we can emit GTE as desired
private void flush(String dbName, String tableName) throws IOException {
boolean meetException = false;
String tableString = Joiner.on(TABLE_NAME_DELIMITER).join(dbName, tableName);
if (tableOperationTypeMap.get(tableString).gmceLowWatermark == tableOperationTypeMap.get(tableString).gmceHighWatermark) {
// No need to flush
return;
}
for (MetadataWriter writer : metadataWriters) {
if(meetException) {
writer.reset(dbName, tableName);
} else {
try {
Timer flushTimer = metadataWriterFlushTimers.get(writer.getClass().getName());
Timer datasetTimer = datasetTimers.computeIfAbsent(tableName, k -> metricContext.contextAwareTimer(k, 1, TimeUnit.HOURS));
try (Timer.Context flushContext = flushTimer.time();
Timer.Context datasetContext = datasetTimer.time()) {
writer.flush(dbName, tableName);
}
} catch (IOException e) {
if (exceptionMatches(e, transientExceptionMessages)) {
throw new RuntimeException("Failing container due to transient exception for db: " + dbName + " table: " + tableName, e);
}
meetException = true;
writer.reset(dbName, tableName);
addOrThrowException(e, tableString, dbName, tableName, getFailedWriterList(writer));
}
}
}
if (!meetException) {
String datasetPath = tableOperationTypeMap.get(tableString).datasetPath;
if (datasetErrorMap.containsKey(datasetPath) && datasetErrorMap.get(datasetPath).containsKey(tableString)) {
// We only want to emit GTE when the table watermark moves. There can be two scenario that watermark move, one is after one flush interval,
// we commit new watermark to state store, anther is here, where during the flush interval, we flush table because table operation changes.
// Under this condition, error map contains this dataset means we met error before this flush, but this time when flush succeed and
// the watermark inside the table moves, so we want to emit GTE to indicate there is some data loss here
submitFailureEvents(datasetErrorMap.get(datasetPath).get(tableString));
this.datasetErrorMap.get(datasetPath).remove(tableString);
}
}
}
/**
* Check if exception is contained within a known list of known exceptions. Transient exceptions should not be caught
* to avoid advancing watermarks and skipping GMCEs unnecessarily, while non-transient exceptions should not count
* towards the maximum number of failed datasets.
*/
public static boolean exceptionMatches(Exception e, Set<String> exceptionMessages) {
return exceptionMessages.stream().anyMatch(message -> Throwables.getRootCause(e).toString().contains(message));
}
/**
* Call the metadata writers to do flush each table metadata.
* Flush of metadata writer is the place that do real metadata
* registrations (e.g. for iceberg, this method will generate a snapshot)
* Flush of metadata writer is the place that do really metadata
* registration (For iceberg, this method will generate a snapshot)
*
* Note that this is one of the place where the materialization of aggregated metadata happens.
* When there's a change of {@link OperationType}, it also interrupts metadata aggregation,
* and triggers materialization of metadata.
* @throws IOException
*/
@Override
public void flush() throws IOException {
log.info(String.format("begin flushing %s records", String.valueOf(recordCount.get())));
for (String tableString : tableOperationTypeMap.keySet()) {
List<String> tid = Splitter.on(TABLE_NAME_DELIMITER).splitToList(tableString);
flush(tid.get(0), tid.get(1));
}
tableOperationTypeMap.clear();
recordCount.lazySet(0L);
// Emit events for all current errors, since the GMCE watermark will be advanced
for (Map.Entry<String, Map<String, List<GobblinMetadataException>>> entry : datasetErrorMap.entrySet()) {
for (List<GobblinMetadataException> exceptionList : entry.getValue().values()) {
submitFailureEvents(exceptionList);
}
entry.getValue().clear();
}
logTimers();
}
@Override
public void close() throws IOException {
this.closer.close();
}
/**
* Get a new state object with the properties used to calculate the {@Link HiveSpec} set
* First set the policy to compute HiveSpec, if the policy is not specified in record,
* then use the default policy defined in the pipeline
* Then set the {@Link ConfigurationKeys.HIVE_REGISTRATION_POLICY} and SCHEMA_LITERAL
*/
public static State setHiveRegProperties(State state, GobblinMetadataChangeEvent gmce, boolean forNew) {
Preconditions.checkArgument(state.contains(DEFAULT_HIVE_REGISTRATION_POLICY_KEY),
String.format("Missing required configuration %s", DEFAULT_HIVE_REGISTRATION_POLICY_KEY));
String defaultPolicy = state.getProp(DEFAULT_HIVE_REGISTRATION_POLICY_KEY);
State tmpState = new State(state);
String policyClass = forNew ? (gmce.getRegistrationPolicy() != null ? gmce.getRegistrationPolicy() : defaultPolicy)
: (gmce.getRegistrationPolicyForOldData() != null ? gmce.getRegistrationPolicyForOldData() : defaultPolicy);
tmpState.setProp(ConfigurationKeys.HIVE_REGISTRATION_POLICY, policyClass);
if (!forNew) {
//For old data, we don't use the old spec to update table, we set the flag to true to avoid listing operation
tmpState.setProp(HiveRegistrationPolicy.MAPREDUCE_JOB_INPUT_PATH_EMPTY_KEY, true);
}
if (gmce.getPartitionColumns() != null && !gmce.getPartitionColumns().isEmpty()) {
//We only support on partition column for now
//TODO: Support multi partition columns
tmpState.setProp(HIVE_PARTITION_NAME, String.join(",", gmce.getPartitionColumns()));
}
if (gmce.getRegistrationProperties() != null) {
for (Map.Entry<String, String> entry : gmce.getRegistrationProperties().entrySet()) {
tmpState.setProp(entry.getKey(), entry.getValue());
}
}
//This config will force all the GMCE to register into the FORCE_HIVE_DATABASE_NAME
//This is mainly used during verification period that we register all iceberg data under iceberg_test
if (state.contains(FORCE_HIVE_DATABASE_NAME)) {
tmpState.setProp(HiveRegistrationPolicyBase.HIVE_DATABASE_NAME, state.getProp(FORCE_HIVE_DATABASE_NAME));
}
//Set schema
if (gmce.getTableSchema() != null) {
tmpState.setProp(AvroSerdeUtils.AvroTableProperties.SCHEMA_LITERAL.getPropName(), gmce.getTableSchema());
}
return tmpState;
}
/**
* Submit events indicating that a specific set of GMCEs have been skipped, so there is a gap in the registration
*/
private void submitFailureEvents(List<GobblinMetadataException> exceptionList) {
if (exceptionList.isEmpty()) {
return;
}
log.warn(String.format("Sending GTEs to indicate table flush failure for %s.%s", exceptionList.get(0).dbName, exceptionList.get(0).tableName));
for (GobblinMetadataException exception : exceptionList) {
GobblinEventBuilder gobblinTrackingEvent = new GobblinEventBuilder(MetadataWriterKeys.METADATA_WRITER_FAILURE_EVENT);
gobblinTrackingEvent.addMetadata(MetadataWriterKeys.DATASET_HDFS_PATH, exception.datasetPath);
gobblinTrackingEvent.addMetadata(MetadataWriterKeys.DATABASE_NAME_KEY, exception.dbName);
gobblinTrackingEvent.addMetadata(MetadataWriterKeys.TABLE_NAME_KEY, exception.tableName);
gobblinTrackingEvent.addMetadata(MetadataWriterKeys.GMCE_TOPIC_NAME, exception.GMCETopicPartition.split("-")[0]);
gobblinTrackingEvent.addMetadata(MetadataWriterKeys.GMCE_TOPIC_PARTITION, exception.GMCETopicPartition.split("-")[1]);
gobblinTrackingEvent.addMetadata(MetadataWriterKeys.GMCE_HIGH_WATERMARK, Long.toString(exception.highWatermark));
gobblinTrackingEvent.addMetadata(MetadataWriterKeys.GMCE_LOW_WATERMARK, Long.toString(exception.lowWatermark));
gobblinTrackingEvent.addMetadata(MetadataWriterKeys.FAILED_WRITERS_KEY, Joiner.on(',').join(exception.failedWriters));
gobblinTrackingEvent.addMetadata(MetadataWriterKeys.OPERATION_TYPE_KEY, exception.operationType.toString());
gobblinTrackingEvent.addMetadata(MetadataWriterKeys.FAILED_TO_ADD_PARTITION_VALUES_KEY, Joiner.on(',').join(exception.addedPartitionValues));
gobblinTrackingEvent.addMetadata(MetadataWriterKeys.FAILED_TO_DROP_PARTITION_VALUES_KEY, Joiner.on(',').join(exception.droppedPartitionValues));
gobblinTrackingEvent.addMetadata(MetadataWriterKeys.PARTITION_KEYS, Joiner.on(',').join(exception.partitionKeys.stream()
.map(HiveRegistrationUnit.Column::getName).collect(Collectors.toList())));
String message = Throwables.getRootCause(exception).getMessage();
gobblinTrackingEvent.addMetadata(MetadataWriterKeys.EXCEPTION_MESSAGE_KEY_NAME, message);
eventSubmitter.submit(gobblinTrackingEvent);
}
}
private List<String> getFailedWriterList(MetadataWriter failedWriter) {
List<MetadataWriter> failedWriters = metadataWriters.subList(metadataWriters.indexOf(failedWriter), metadataWriters.size());
return failedWriters.stream().map(writer -> writer.getClass().getName()).collect(Collectors.toList());
}
private void logTimers() {
logTimer(hiveSpecComputationTimer);
metadataWriterWriteTimers.values().forEach(this::logTimer);
metadataWriterFlushTimers.values().forEach(this::logTimer);
datasetTimers.values().forEach(this::logTimer);
}
private void logTimer(ContextAwareTimer timer) {
log.info("Timer {} 1 hour mean duration: {} ms", timer.getName(), TimeUnit.NANOSECONDS.toMillis((long) timer.getSnapshot().getMean()));
log.info("Timer {} 1 hour 99th percentile duration: {} ms", timer.getName(), TimeUnit.NANOSECONDS.toMillis((long) timer.getSnapshot().get99thPercentile()));
}
}
| 1,764 |
0 | Create_ds/gobblin/gobblin-iceberg/src/main/java/org/apache/gobblin/iceberg | Create_ds/gobblin/gobblin-iceberg/src/main/java/org/apache/gobblin/iceberg/writer/GobblinMCEWriterBuilder.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.iceberg.writer;
import java.io.IOException;
import org.apache.avro.Schema;
import org.apache.avro.generic.GenericRecord;
import org.apache.gobblin.writer.DataWriter;
import org.apache.gobblin.writer.DataWriterBuilder;
public class GobblinMCEWriterBuilder extends DataWriterBuilder<Schema, GenericRecord> {
@Override
public DataWriter<GenericRecord> build() throws IOException {
return new GobblinMCEWriter(this, this.destination.getProperties());
}
}
| 1,765 |
0 | Create_ds/gobblin/gobblin-compaction/src/test/java/org/apache/gobblin/compaction | Create_ds/gobblin/gobblin-compaction/src/test/java/org/apache/gobblin/compaction/verify/PinotAuditCountVerifierTest.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.compaction.verify;
import java.util.Map;
import org.apache.hadoop.fs.Path;
import org.testng.Assert;
import org.testng.annotations.Test;
import com.google.common.collect.ImmutableMap;
import com.google.common.collect.Maps;
import lombok.Getter;
import lombok.Setter;
import org.apache.gobblin.compaction.audit.AuditCountClient;
import org.apache.gobblin.compaction.dataset.TimeBasedSubDirDatasetsFinder;
import org.apache.gobblin.compaction.mapreduce.MRCompactor;
import org.apache.gobblin.configuration.State;
import org.apache.gobblin.data.management.dataset.SimpleFileSystemDataset;
import org.apache.gobblin.dataset.FileSystemDataset;
/**
* Class to test audit count verification logic
*/
public class PinotAuditCountVerifierTest {
public static final String PRODUCER_TIER = "producer";
public static final String ORIGIN_TIER = "origin";
public static final String GOBBLIN_TIER = "gobblin";
@Test
public void testTier() throws Exception {
final String topic = "randomTopic";
final String input = "/base/input";
final String output = "/base/output";
final String inputSub = "hourly";
final String outputSub = "hourly";
TestAuditCountClient client = new TestAuditCountClient();
FileSystemDataset dataset = new SimpleFileSystemDataset(
new Path(input + topic + inputSub + "/2017/04/03/10"));
State props = new State();
props.setProp (CompactionAuditCountVerifier.PRODUCER_TIER, PRODUCER_TIER);
props.setProp (CompactionAuditCountVerifier.ORIGIN_TIER, ORIGIN_TIER);
props.setProp (CompactionAuditCountVerifier.GOBBLIN_TIER, GOBBLIN_TIER);
props.setProp (MRCompactor.COMPACTION_INPUT_DIR, input);
props.setProp (MRCompactor.COMPACTION_INPUT_SUBDIR, inputSub);
props.setProp (MRCompactor.COMPACTION_DEST_DIR, output);
props.setProp (MRCompactor.COMPACTION_DEST_SUBDIR, outputSub);
props.setProp (MRCompactor.COMPACTION_TMP_DEST_DIR, "/tmp/compaction/verifier");
props.setProp (TimeBasedSubDirDatasetsFinder.COMPACTION_TIMEBASED_MAX_TIME_AGO, "3000d");
props.setProp (TimeBasedSubDirDatasetsFinder.COMPACTION_TIMEBASED_MIN_TIME_AGO, "1d");
CompactionAuditCountVerifier verifier = new CompactionAuditCountVerifier (props, client);
// All complete
client.setCounts(ImmutableMap.of(
PRODUCER_TIER, 1000L,
ORIGIN_TIER, 1000L,
GOBBLIN_TIER, 1000L
));
Assert.assertTrue (verifier.verify(dataset).isSuccessful);
// test true because GOBBLIN_TIER / PRODUCER_TIER is above threshold
client.setCounts(ImmutableMap.of(
PRODUCER_TIER, 1000L,
ORIGIN_TIER, 1100L,
GOBBLIN_TIER, 1000L
));
Assert.assertTrue (verifier.verify(dataset).isSuccessful);
// test false because GOBBLIN_TIER / (PRODUCER_TIER || ORIGIN_TIER) is below threshold
client.setCounts(ImmutableMap.of(
PRODUCER_TIER, 1100L,
ORIGIN_TIER, 1100L,
GOBBLIN_TIER, 1000L
));
Assert.assertFalse (verifier.verify(dataset).isSuccessful);
}
/**
* A helper client
*/
public class TestAuditCountClient implements AuditCountClient {
@Setter
@Getter
Map<String, Long> counts = Maps.newHashMap();
public Map<String, Long> fetch (String topic, long start, long end) {
return counts;
}
}
}
| 1,766 |
0 | Create_ds/gobblin/gobblin-compaction/src/test/java/org/apache/gobblin/compaction | Create_ds/gobblin/gobblin-compaction/src/test/java/org/apache/gobblin/compaction/verify/CompactionWatermarkCheckerTest.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.compaction.verify;
import java.time.ZoneId;
import java.time.ZonedDateTime;
import org.apache.hadoop.fs.Path;
import org.testng.Assert;
import org.testng.annotations.Test;
import org.apache.gobblin.compaction.source.CompactionSource;
import org.apache.gobblin.configuration.State;
import org.apache.gobblin.data.management.dataset.SimpleFileSystemDataset;
import org.apache.gobblin.dataset.FileSystemDataset;
import org.apache.gobblin.time.TimeIterator;
public class CompactionWatermarkCheckerTest {
private final ZoneId zone = ZoneId.of("America/Los_Angeles");
@Test
public void testGetWatermark() {
// 2019/12/01 18:16:00.000
ZonedDateTime time = ZonedDateTime.of(2019, 12, 1, 18, 16, 0, 0, zone);
// minute watermark is 2019/12/01 18:15:59.999
Assert.assertEquals(CompactionWatermarkChecker.getWatermarkTimeMillis(time, TimeIterator.Granularity.MINUTE),
1575252959999L);
// hour watermark is 2019/12/01 17:59:59.999
Assert.assertEquals(CompactionWatermarkChecker.getWatermarkTimeMillis(time, TimeIterator.Granularity.HOUR),
1575251999999L);
// day watermark is 2019/11/30 23:59:59.999
Assert.assertEquals(CompactionWatermarkChecker.getWatermarkTimeMillis(time, TimeIterator.Granularity.DAY),
1575187199999L);
// month watermark is 2019/11/30 23:59:59.999
Assert.assertEquals(CompactionWatermarkChecker.getWatermarkTimeMillis(time, TimeIterator.Granularity.MONTH),
1575187199999L);
}
@Test
public void testVerify() {
ZonedDateTime time = ZonedDateTime.of(2019, 12, 1, 18, 16, 0, 0, zone);
State state = new State();
state.setProp(CompactionSource.COMPACTION_INIT_TIME, time.toInstant().toEpochMilli());
state.setProp(CompactionAuditCountVerifier.COMPACTION_COMMPLETENESS_GRANULARITY, "DAY");
state.setProp(CompactionWatermarkChecker.TIME_FORMAT, "yyyy/MM/dd");
FileSystemDataset dataset1201 = new SimpleFileSystemDataset(new Path("/dataset/2019/12/01"));
FileSystemDataset dataset1130 = new SimpleFileSystemDataset(new Path("/dataset/2019/11/30"));
FileSystemDataset datasetDash = new SimpleFileSystemDataset(new Path("/dataset/datepartition=2019-11-30"));
// CASE: completeness is disabled
state.setProp(CompactionAuditCountVerifier.COMPACTION_COMMPLETENESS_ENABLED, false);
doVerifyDataset(new State(state), dataset1201, null, null);
doVerifyDataset(new State(state), dataset1130, "1575187199999", null);
doVerifyDataset(new State(state), datasetDash, null, null);
// CASE: completeness is enabld
state.setProp(CompactionAuditCountVerifier.COMPACTION_COMMPLETENESS_ENABLED, true);
doVerifyDataset(new State(state), dataset1201, null, null);
doVerifyDataset(new State(state), dataset1130, "1575187199999", "1575187199999");
doVerifyDataset(new State(state), datasetDash, null, null);
}
private void doVerifyDataset(State state, FileSystemDataset dataset, String compactionWatermark, String completionAndCompactionWatermark) {
CompactionWatermarkChecker checker = new CompactionWatermarkChecker(state);
checker.verify(dataset);
Assert.assertEquals(state.getProp(CompactionWatermarkChecker.COMPACTION_WATERMARK), compactionWatermark);
Assert.assertEquals(state.getProp(CompactionWatermarkChecker.COMPLETION_COMPACTION_WATERMARK),
completionAndCompactionWatermark);
}
}
| 1,767 |
0 | Create_ds/gobblin/gobblin-compaction/src/test/java/org/apache/gobblin/compaction | Create_ds/gobblin/gobblin-compaction/src/test/java/org/apache/gobblin/compaction/verify/CompactionTimeVerifierTest.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.compaction.verify;
import org.testng.Assert;
import org.testng.annotations.Test;
public class CompactionTimeVerifierTest {
@Test
public void testOneDatasetTime() {
String timeString = "Identity.MemberAccount:1d2h";
String lb1 = CompactionTimeRangeVerifier.getMatchedLookbackTime("Identity/MemberAccount", timeString, "2d");
Assert.assertEquals(lb1, "1d2h");
String lb2 = CompactionTimeRangeVerifier.getMatchedLookbackTime("BizProfile/BizCompany", timeString, "2d");
Assert.assertEquals(lb2, "2d");
timeString = "2d;Identity.MemberAccount:1d2h";
String lb3 = CompactionTimeRangeVerifier.getMatchedLookbackTime("Identity/MemberAccount", timeString, "2d");
Assert.assertEquals(lb3, "1d2h");
String lb4 = CompactionTimeRangeVerifier.getMatchedLookbackTime("BizProfile/BizCompany", timeString, "2d");
Assert.assertEquals(lb4, "2d");
}
@Test
public void testTwoDatasetTime() {
String timeString = "Identity.*:1d2h;BizProfile.BizCompany:3d";
String lb1 = CompactionTimeRangeVerifier.getMatchedLookbackTime("Identity/MemberAccount", timeString, "2d");
Assert.assertEquals(lb1, "1d2h");
String lb2 = CompactionTimeRangeVerifier.getMatchedLookbackTime("BizProfile/BizCompany", timeString, "2d");
Assert.assertEquals(lb2, "3d");
String lb3 = CompactionTimeRangeVerifier.getMatchedLookbackTime("ABC/Unknown", timeString, "2d");
Assert.assertEquals(lb3, "2d");
timeString = "2d;Identity.MemberAccount:1d2h;BizProfile.BizCompany:3d";
String lb4 = CompactionTimeRangeVerifier.getMatchedLookbackTime("Identity/MemberAccount", timeString, "2d");
Assert.assertEquals(lb4, "1d2h");
String lb5 = CompactionTimeRangeVerifier.getMatchedLookbackTime("BizProfile/BizCompany", timeString, "2d");
Assert.assertEquals(lb5, "3d");
String lb6 = CompactionTimeRangeVerifier.getMatchedLookbackTime("ABC/Unknown", timeString, "2d");
Assert.assertEquals(lb6, "2d");
}
@Test
public void testDefaultDatasetTime() {
String timeString = "Identity.*:1d2h;3d2h;BizProfile.BizCompany:3d";
String lb1 = CompactionTimeRangeVerifier.getMatchedLookbackTime("ABC/Unknown", timeString, "2d");
Assert.assertEquals(lb1, "3d2h");
timeString = "3d2h";
String lb2 = CompactionTimeRangeVerifier.getMatchedLookbackTime("ABC/Unknown", timeString, "2d");
Assert.assertEquals(lb2, "3d2h");
}
@Test
public void testEmptySpace() {
String timeString = "Identity.* : 1d2h ; BizProfile.BizCompany : 3d";
String lb1 = CompactionTimeRangeVerifier.getMatchedLookbackTime("Identity/MemberAccount", timeString, "2d");
Assert.assertEquals(lb1, "1d2h");
String lb2 = CompactionTimeRangeVerifier.getMatchedLookbackTime("BizProfile/BizCompany", timeString, "2d");
Assert.assertEquals(lb2, "3d");
String lb3 = CompactionTimeRangeVerifier.getMatchedLookbackTime("ABC/Unknown", timeString, "2d");
Assert.assertEquals(lb3, "2d");
timeString = "2d;Identity.MemberAccount :1d2h; BizProfile.BizCompany:3d";
String lb4 = CompactionTimeRangeVerifier.getMatchedLookbackTime("Identity/MemberAccount", timeString, "2d");
Assert.assertEquals(lb4, "1d2h");
String lb5 = CompactionTimeRangeVerifier.getMatchedLookbackTime("BizProfile/BizCompany", timeString, "2d");
Assert.assertEquals(lb5, "3d");
String lb6 = CompactionTimeRangeVerifier.getMatchedLookbackTime("ABC/Unknown", timeString, "2d");
Assert.assertEquals(lb6, "2d");
}
@Test
public void testPartialMatchedNames() {
String timeString = "Identity.Member$ : 1d2h";
String lb1 = CompactionTimeRangeVerifier.getMatchedLookbackTime("Identity/Member", timeString, "2d");
Assert.assertEquals(lb1, "1d2h");
String lb2 = CompactionTimeRangeVerifier.getMatchedLookbackTime("Identity/MemberAccount", timeString, "2d");
Assert.assertEquals(lb2, "2d");
}
}
| 1,768 |
0 | Create_ds/gobblin/gobblin-compaction/src/test/java/org/apache/gobblin/compaction | Create_ds/gobblin/gobblin-compaction/src/test/java/org/apache/gobblin/compaction/action/CompactionGMCEPublishingActionTest.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.compaction.action;
import com.google.common.base.Optional;
import com.google.common.io.Files;
import java.io.File;
import java.io.FileOutputStream;
import java.io.IOException;
import org.apache.avro.Schema;
import org.apache.avro.SchemaBuilder;
import org.apache.avro.file.DataFileWriter;
import org.apache.avro.generic.GenericDatumWriter;
import org.apache.avro.generic.GenericRecord;
import org.apache.avro.generic.GenericRecordBuilder;
import org.apache.gobblin.compaction.dataset.TimeBasedSubDirDatasetsFinder;
import org.apache.gobblin.compaction.mapreduce.MRCompactor;
import org.apache.gobblin.compaction.source.CompactionSource;
import org.apache.gobblin.configuration.ConfigurationKeys;
import org.apache.gobblin.configuration.State;
import org.apache.gobblin.data.management.retention.profile.ConfigurableGlobDatasetFinder;
import org.apache.gobblin.iceberg.GobblinMCEProducer;
import org.apache.gobblin.metadata.GobblinMetadataChangeEvent;
import org.apache.gobblin.runtime.api.JobExecutionResult;
import org.apache.gobblin.runtime.embedded.EmbeddedGobblin;
import org.apache.hadoop.fs.Path;
import org.testng.Assert;
import org.testng.annotations.Test;
public class CompactionGMCEPublishingActionTest {
@Test
public void testDedup() throws Exception {
File basePath = Files.createTempDir();
basePath.deleteOnExit();
File jobDir = new File(basePath, "Identity/MemberAccount/minutely/2017/04/03/10/20_30/run_2017-04-03-10-20");
Assert.assertTrue(jobDir.mkdirs());
GenericRecord r1 = createRandomRecord();
GenericRecord r2 = createRandomRecord();
GenericRecord r3 = createEvolvedSchemaRecord();
writeFileWithContent(jobDir, "file1", r1, 20);
writeFileWithContent(jobDir, "file2", r2, 18);
File newestFile = writeFileWithContent(jobDir, "file3", r3, 10, r3.getSchema());
newestFile.setLastModified(Long.MAX_VALUE);
EmbeddedGobblin embeddedGobblin = createEmbeddedGobblin("dedup", basePath.getAbsolutePath().toString());
JobExecutionResult result = embeddedGobblin.run();
Assert.assertTrue(result.isSuccessful());
}
public GenericRecord createEvolvedSchemaRecord() {
Schema evolvedSchema = SchemaBuilder.record("evolved")
.fields()
.requiredLong("partitionKey")
.requiredString("environment")
.requiredString("subKey")
.optionalString("oppo")
.endRecord();
GenericRecordBuilder keyRecordBuilder = new GenericRecordBuilder(evolvedSchema);
keyRecordBuilder.set("partitionKey", new Long(1));
keyRecordBuilder.set("environment", "test");
keyRecordBuilder.set("subKey", "2");
keyRecordBuilder.set("oppo", "poop");
return keyRecordBuilder.build();
}
// Returning file handler for setting modfication time.
private File writeFileWithContent(File dir, String fileName, GenericRecord r, int count) throws IOException {
File file = new File(dir, fileName + "." + count + ".avro");
Assert.assertTrue(file.createNewFile());
this.createAvroFileWithRepeatingRecords(file, r, count, Optional.absent());
return file;
}
private File writeFileWithContent(File dir, String fileName, GenericRecord r, int count, Schema schema)
throws IOException {
File file = new File(dir, fileName + "." + count + ".avro");
Assert.assertTrue(file.createNewFile());
this.createAvroFileWithRepeatingRecords(file, r, count, Optional.of(schema));
return file;
}
public void createAvroFileWithRepeatingRecords(File file, GenericRecord r, int count, Optional<Schema> schema)
throws IOException {
DataFileWriter<GenericRecord> writer = new DataFileWriter<>(new GenericDatumWriter<GenericRecord>());
writer.create(schema.isPresent() ? schema.get() : getSchema(), new FileOutputStream(file));
for (int i = 0; i < count; ++i) {
writer.append(r);
}
writer.close();
}
public GenericRecord createRandomRecord() {
GenericRecordBuilder keyRecordBuilder = new GenericRecordBuilder(getSchema());
keyRecordBuilder.set("partitionKey", new Long(1));
keyRecordBuilder.set("environment", "test");
keyRecordBuilder.set("subKey", "2");
GenericRecord record = keyRecordBuilder.build();
return record;
}
public Schema getSchema() {
final String KEY_SCHEMA =
"{ \"type\" : \"record\", \"name\" : \"etl\",\"namespace\" : \"reducerTest\", \"fields\" : [ { \"name\" : "
+ "\"key\", \"type\" : {\"type\" : \"record\", \"name\" : \"key_name\", \"namespace\" : \"key_namespace\", "
+ "\"fields\" : [ {\"name\" : \"partitionKey\", \"type\" : \"long\", \"doc\" : \"\"}, { \"name\" : \"environment"
+ "\", \"type\" : \"string\",\"doc\" : \"\"}, {\"name\" : \"subKey\",\"type\" : \"string\", \"doc\" : \"\"} ]}, "
+ "\"doc\" : \"\", \"attributes_json\" : \"{\\\"delta\\\":false,\\\"pk\\\":true}\" }]}";
Schema keySchema = new Schema.Parser().parse(KEY_SCHEMA);
return keySchema.getField("key").schema();
}
static EmbeddedGobblin createEmbeddedGobblin(String name, String basePath) {
String pattern = new Path(basePath, "*/*/minutely/*/*/*/*").toString();
return new EmbeddedGobblin(name).setConfiguration(ConfigurationKeys.SOURCE_CLASS_KEY,
CompactionSource.class.getName())
.setConfiguration(ConfigurableGlobDatasetFinder.DATASET_FINDER_PATTERN_KEY, pattern)
.setConfiguration(MRCompactor.COMPACTION_INPUT_DIR, basePath.toString())
.setConfiguration(MRCompactor.COMPACTION_INPUT_SUBDIR, "minutely")
.setConfiguration(MRCompactor.COMPACTION_DEST_DIR, basePath.toString())
.setConfiguration(MRCompactor.COMPACTION_DEST_SUBDIR, "hourly")
.setConfiguration(MRCompactor.COMPACTION_TMP_DEST_DIR, "/tmp/compaction/" + name)
.setConfiguration(TimeBasedSubDirDatasetsFinder.COMPACTION_TIMEBASED_MAX_TIME_AGO, "3000d")
.setConfiguration(TimeBasedSubDirDatasetsFinder.COMPACTION_TIMEBASED_MIN_TIME_AGO, "1d")
.setConfiguration(ConfigurationKeys.MAX_TASK_RETRIES_KEY, "0")
.setConfiguration("compaction.suite.factory",
"org.apache.gobblin.compaction.suite.CompactionSuiteBaseWithConfigurableCompleteActionFactory")
.setConfiguration("compaction.complete.actions",
"org.apache.gobblin.compaction.action.CompactionCompleteFileOperationAction, org.apache.gobblin.compaction.action.CompactionGMCEPublishingAction, org.apache.gobblin.compaction.action.CompactionMarkDirectoryAction")
.setConfiguration("old.files.hive.registration.policy",
"org.apache.gobblin.hive.policy.HiveRegistrationPolicyBase")
.setConfiguration("writer.output.format", "AVRO")
.setConfiguration(GobblinMCEProducer.GMCE_PRODUCER_CLASS, GobblinMCETestProducer.class.getName())
.setConfiguration("hive.registration.policy", "com.linkedin.gobblin.hive.policy.LiHiveDailyRegistrationPolicy");
}
public static class GobblinMCETestProducer extends GobblinMCEProducer {
public GobblinMCETestProducer(State state) {
super(state);
}
@Override
public void underlyingSendGMCE(GobblinMetadataChangeEvent gmce) {
System.out.println(gmce);
Assert.assertEquals(gmce.getNewFiles().size(), 1);
Assert.assertNotEquals(gmce.getOldFilePrefixes().size(), 0);
}
}
}
| 1,769 |
0 | Create_ds/gobblin/gobblin-compaction/src/test/java/org/apache/gobblin/compaction | Create_ds/gobblin/gobblin-compaction/src/test/java/org/apache/gobblin/compaction/action/CompactionWatermarkActionTest.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.compaction.action;
import java.io.IOException;
import java.util.List;
import org.apache.hadoop.fs.Path;
import org.testng.Assert;
import org.testng.annotations.Test;
import com.google.common.base.Optional;
import org.apache.gobblin.compaction.mapreduce.MRCompactor;
import org.apache.gobblin.compaction.verify.CompactionWatermarkChecker;
import org.apache.gobblin.configuration.State;
import org.apache.gobblin.data.management.dataset.SimpleFileSystemDataset;
import org.apache.gobblin.dataset.FileSystemDataset;
import org.apache.gobblin.hive.HivePartition;
import org.apache.gobblin.hive.HiveRegister;
import org.apache.gobblin.hive.HiveRegistrationUnit;
import org.apache.gobblin.hive.HiveTable;
import org.apache.gobblin.hive.spec.HiveSpec;
public class CompactionWatermarkActionTest {
private final String compactionWatermark = "compactionWatermark";
private final String completionCompactionWatermark = "completionAndCompactionWatermark";
@Test
public void testUpdateWatermark()
throws Exception {
doTestUpdateWatermark("tracking","event1", "event1");
doTestUpdateWatermark("db1","table1", "db1/table1");
}
private void doTestUpdateWatermark(String db, String table, String dataset)
throws Exception {
State state = new State();
String defaultDb = "tracking";
state.setProp(CompactionWatermarkAction.DEFAULT_HIVE_DB, defaultDb);
String inputDir = "/data/tracking";
String inputSubDir = "hourly";
String destSubDir = "daily";
String datasetPath = String.format("%s/%s/%s/2019/12/20", inputDir, dataset, inputSubDir);
state.setProp(MRCompactor.COMPACTION_INPUT_DIR, inputDir);
state.setProp(MRCompactor.COMPACTION_DEST_DIR, inputDir);
state.setProp(MRCompactor.COMPACTION_INPUT_SUBDIR, inputSubDir);
state.setProp(MRCompactor.COMPACTION_DEST_SUBDIR, destSubDir);
state.setProp(HiveRegister.HIVE_REGISTER_TYPE, MockHiveRegister.class.getName());
state.setProp(CompactionWatermarkAction.GRANULARITY, "DAY");
State tableProps = new State();
// 2019-12-31 23:59:59.999
String existingWatermark = "1577836799999";
tableProps.setProp(compactionWatermark, existingWatermark);
tableProps.setProp(completionCompactionWatermark, existingWatermark);
HiveTable existingTable = new HiveTable.Builder().withDbName(db).withTableName(table)
.withProps(tableProps).build();
MockHiveRegister.existingTable = existingTable;
CompactionWatermarkAction action = new CompactionWatermarkAction(state);
FileSystemDataset fsDataset = new SimpleFileSystemDataset(new Path(datasetPath));
// Will not update if old watermarks are reported
String actualWatermark = "1577750399999"; // 2019-10-30 23:59:59.999
doWatermarkTest(action, fsDataset, state, actualWatermark, existingWatermark);
// Will throw a runtime exception if watermark is not continuous
Exception exception = null;
try {
actualWatermark = "1578009599999"; // 2020-01-01 23:59:59.999
doWatermarkTest(action, fsDataset, state, actualWatermark, actualWatermark);
} catch (Exception e) {
exception = e;
}
Assert.assertEquals(exception.getMessage(),
String.format("Fail to advance %s of dataset %s: expect 1577923199999 but got %s, "
+ "please manually fill the gap and rerun.",
compactionWatermark, fsDataset.datasetRoot(), actualWatermark));
// Will update if newer continuous watermarks are reported
actualWatermark = "1577923199999"; // 2020-01-01 23:59:59.999
doWatermarkTest(action, fsDataset, state, actualWatermark, actualWatermark);
}
@Test
public void testWatermarkWithDST() throws Exception {
// Test case 1
// Time zone: PST(America/Los_Angeles)
// Existing watermark millis: 1583654399999 (2020-03-07 23:59:59.999 PST)
// Actual watermark millis: 1583737199999 (2020-03-08 23:59:59.999 PST) with DST
testWatermarkWithDSTTimeZone("America/Los_Angeles", "1583654399999", "1583737199999");
// Test case 2
// Time zone: UTC
// Existing watermark millis: 1583625599999 (2020-03-07 23:59:59.999 UTC)
// Actual watermark millis: 1583711999999 (2020-03-08 23:59:59.999 UTC)
testWatermarkWithDSTTimeZone("UTC", "1583625599999", "1583711999999");
}
private void testWatermarkWithDSTTimeZone(String timeZone, String existingWatermark, String actualWatermark)
throws Exception {
String db = "db1";
String table = "table1";
String dataset = "db1/table1";
State state = new State();
String defaultDb = "tracking";
state.setProp(CompactionWatermarkAction.DEFAULT_HIVE_DB, defaultDb);
String inputDir = "/data/tracking";
String inputSubDir = "hourly";
String destSubDir = "daily";
String datasetPath = String.format("%s/%s/%s/2020/03/08", inputDir, dataset, inputSubDir);
state.setProp(MRCompactor.COMPACTION_INPUT_DIR, inputDir);
state.setProp(MRCompactor.COMPACTION_DEST_DIR, inputDir);
state.setProp(MRCompactor.COMPACTION_INPUT_SUBDIR, inputSubDir);
state.setProp(MRCompactor.COMPACTION_DEST_SUBDIR, destSubDir);
state.setProp(HiveRegister.HIVE_REGISTER_TYPE, MockHiveRegister.class.getName());
state.setProp(CompactionWatermarkAction.GRANULARITY, "DAY");
state.setProp(MRCompactor.COMPACTION_TIMEZONE, timeZone);
State tableProps = new State();
tableProps.setProp(compactionWatermark, existingWatermark);
tableProps.setProp(completionCompactionWatermark, existingWatermark);
HiveTable existingTable = new HiveTable.Builder().withDbName(db).withTableName(table)
.withProps(tableProps).build();
MockHiveRegister.existingTable = existingTable;
CompactionWatermarkAction action = new CompactionWatermarkAction(state);
FileSystemDataset fsDataset = new SimpleFileSystemDataset(new Path(datasetPath));
doWatermarkTest(action, fsDataset, state, actualWatermark, actualWatermark);
}
private void doWatermarkTest(CompactionWatermarkAction action, FileSystemDataset fsDataset,
State state, String actualWatermark, String expectedWatermark)
throws Exception {
state.setProp(CompactionWatermarkChecker.COMPACTION_WATERMARK, actualWatermark);
state.setProp(CompactionWatermarkChecker.COMPLETION_COMPACTION_WATERMARK, actualWatermark);
action.onCompactionJobComplete(fsDataset);
Assert.assertEquals(MockHiveRegister.existingTable.getProps().getProp(compactionWatermark),
expectedWatermark);
Assert.assertEquals(MockHiveRegister.existingTable.getProps().getProp(completionCompactionWatermark),
expectedWatermark);
}
public static class MockHiveRegister extends HiveRegister {
static HiveTable existingTable;
public MockHiveRegister(State state, Optional<String> uri) {
super(state);
}
@Override
protected void registerPath(HiveSpec spec)
throws IOException {
}
@Override
public boolean createDbIfNotExists(String dbName)
throws IOException {
return false;
}
@Override
public boolean createTableIfNotExists(HiveTable table)
throws IOException {
return false;
}
@Override
public boolean addPartitionIfNotExists(HiveTable table, HivePartition partition)
throws IOException {
return false;
}
@Override
public boolean existsTable(String dbName, String tableName)
throws IOException {
return false;
}
@Override
public boolean existsPartition(String dbName, String tableName, List<HiveRegistrationUnit.Column> partitionKeys,
List<String> partitionValues)
throws IOException {
return false;
}
@Override
public void dropTableIfExists(String dbName, String tableName)
throws IOException {
}
@Override
public void dropPartitionIfExists(String dbName, String tableName, List<HiveRegistrationUnit.Column> partitionKeys,
List<String> partitionValues)
throws IOException {
}
@Override
public Optional<HiveTable> getTable(String dbName, String tableName)
throws IOException {
if (dbName.equals(existingTable.getDbName())
&& tableName.equals(existingTable.getTableName())) {
return Optional.of(existingTable);
}
return Optional.absent();
}
@Override
public Optional<HivePartition> getPartition(String dbName, String tableName,
List<HiveRegistrationUnit.Column> partitionKeys, List<String> partitionValues)
throws IOException {
return null;
}
@Override
public void alterTable(HiveTable table)
throws IOException {
existingTable = table;
}
@Override
public void alterPartition(HiveTable table, HivePartition partition)
throws IOException {
}
}
}
| 1,770 |
0 | Create_ds/gobblin/gobblin-compaction/src/test/java/org/apache/gobblin/compaction | Create_ds/gobblin/gobblin-compaction/src/test/java/org/apache/gobblin/compaction/action/CompactionHiveRegistrationActionTest.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.compaction.action;
import java.util.List;
import java.util.Map;
import org.apache.hadoop.fs.Path;
import org.testng.Assert;
import org.testng.annotations.Test;
import com.google.common.collect.Lists;
import org.apache.gobblin.compaction.mapreduce.MRCompactionTask;
import org.apache.gobblin.compaction.mapreduce.MRCompactor;
import org.apache.gobblin.configuration.WorkUnitState;
import org.apache.gobblin.data.management.dataset.SimpleFileSystemDataset;
import org.apache.gobblin.dataset.FileSystemDataset;
import org.apache.gobblin.metrics.GobblinTrackingEvent;
import org.apache.gobblin.metrics.MetricContext;
import org.apache.gobblin.metrics.context.NameConflictException;
import org.apache.gobblin.metrics.event.CountEventBuilder;
import org.apache.gobblin.metrics.event.EventSubmitter;
public class CompactionHiveRegistrationActionTest {
@Test
public void testEvents()
throws Exception {
WorkUnitState state = new WorkUnitState();
String inputDir = "/data/tracking";
String inputSubDir = "hourly";
String destSubDir = "daily";
String pathPattern = "%s/myTopic/%s/2019/12/20";
String datasetPath = String.format(pathPattern, inputDir, inputSubDir);
state.setProp(MRCompactor.COMPACTION_INPUT_DIR, inputDir);
state.setProp(MRCompactor.COMPACTION_DEST_DIR, inputDir);
state.setProp(MRCompactor.COMPACTION_INPUT_SUBDIR, inputSubDir);
state.setProp(MRCompactor.COMPACTION_DEST_SUBDIR, destSubDir);
state.setProp(MRCompactionTask.FILE_COUNT, "10");
state.setProp(MRCompactionTask.RECORD_COUNT, "100");
CompactionHiveRegistrationAction action = new CompactionHiveRegistrationAction(state);
MockMetricContext mockMetricContext = new MockMetricContext(getClass().getName());
String namespace = "compaction.tracking.events";
EventSubmitter eventSubmitter = new EventSubmitter.Builder(mockMetricContext, namespace).build();
action.addEventSubmitter(eventSubmitter);
FileSystemDataset dataset = new SimpleFileSystemDataset(new Path(datasetPath));
action.onCompactionJobComplete(dataset);
String destinationPath = String.format(pathPattern, inputDir, destSubDir);
Assert.assertEquals(mockMetricContext.events.size(), 1);
CountEventBuilder fileCountEvent = CountEventBuilder.fromEvent(mockMetricContext.events.get(0));
Assert.assertEquals(fileCountEvent.getNamespace(), namespace);
Assert.assertEquals(fileCountEvent.getName(), CompactionHiveRegistrationAction.NUM_OUTPUT_FILES);
Assert.assertEquals(fileCountEvent.getCount(), 10);
Map<String, String> metadata = fileCountEvent.getMetadata();
Assert.assertEquals(metadata.get(CompactionHiveRegistrationAction.DATASET_URN), destinationPath);
Assert.assertEquals(metadata.get(CompactionHiveRegistrationAction.RECORD_COUNT), "100");
Assert.assertEquals(metadata.get(CompactionHiveRegistrationAction.BYTE_COUNT), "-1");
}
private class MockMetricContext extends MetricContext {
List<GobblinTrackingEvent> events;
MockMetricContext(String name)
throws NameConflictException {
super(name,null, Lists.newArrayList(), false);
events = Lists.newArrayList();
}
@Override
public void submitEvent(GobblinTrackingEvent nonReusableEvent) {
events.add(nonReusableEvent);
}
}
}
| 1,771 |
0 | Create_ds/gobblin/gobblin-compaction/src/test/java/org/apache/gobblin/compaction | Create_ds/gobblin/gobblin-compaction/src/test/java/org/apache/gobblin/compaction/suite/TestCompactionSuiteFactories.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.compaction.suite;
import org.apache.gobblin.annotation.Alias;
import org.apache.gobblin.configuration.State;
public class TestCompactionSuiteFactories {
public static final String DATASET_SUCCESS = "Identity/MemberAccount/minutely/2017/04/03/22";
public static final String DATASET_FAIL= "Identity/MemberAccount/minutely/2017/04/03/23";
/**
* Test hive registration failure
*/
@Alias("HiveRegistrationFailureFactory")
public static class HiveRegistrationFailureFactory implements CompactionSuiteFactory {
public TestCompactionSuites.HiveRegistrationCompactionSuite createSuite (State state) {
return new TestCompactionSuites.HiveRegistrationCompactionSuite(state);
}
}
} | 1,772 |
0 | Create_ds/gobblin/gobblin-compaction/src/test/java/org/apache/gobblin/compaction | Create_ds/gobblin/gobblin-compaction/src/test/java/org/apache/gobblin/compaction/suite/TestCompactionSuites.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.compaction.suite;
import java.util.ArrayList;
import java.util.List;
import lombok.extern.slf4j.Slf4j;
import org.apache.gobblin.compaction.action.CompactionCompleteAction;
import org.apache.gobblin.configuration.State;
import org.apache.gobblin.dataset.FileSystemDataset;
@Slf4j
public class TestCompactionSuites {
/**
* Test hive registration failure
*/
public static class HiveRegistrationCompactionSuite extends CompactionSuiteBase {
public HiveRegistrationCompactionSuite(State state) {
super(state);
}
public List<CompactionCompleteAction<FileSystemDataset>> getCompactionCompleteActions() {
ArrayList<CompactionCompleteAction<FileSystemDataset>> array = new ArrayList<>();
array.add((dataset) -> {
if (dataset.datasetURN().contains(TestCompactionSuiteFactories.DATASET_FAIL))
throw new RuntimeException("test-hive-registration-failure");
});
return array;
}
}
}
| 1,773 |
0 | Create_ds/gobblin/gobblin-compaction/src/test/java/org/apache/gobblin/compaction | Create_ds/gobblin/gobblin-compaction/src/test/java/org/apache/gobblin/compaction/mapreduce/KeyDedupReducerTest.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.compaction.mapreduce;
import java.io.IOException;
import java.util.List;
import org.apache.avro.Schema;
import org.apache.avro.generic.GenericRecord;
import org.apache.avro.generic.GenericRecordBuilder;
import org.apache.avro.mapred.AvroKey;
import org.apache.avro.mapred.AvroValue;
import org.apache.gobblin.compaction.mapreduce.avro.AvroKeyDedupReducer;
import org.apache.gobblin.compaction.mapreduce.avro.FieldAttributeBasedDeltaFieldsProvider;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.io.NullWritable;
import org.apache.hadoop.mapreduce.Counter;
import org.apache.hadoop.mapreduce.counters.GenericCounter;
import org.apache.hadoop.mapreduce.lib.reduce.WrappedReducer;
import org.testng.Assert;
import org.testng.annotations.Test;
import com.google.common.collect.Lists;
import static org.mockito.Mockito.any;
import static org.mockito.Mockito.doNothing;
import static org.mockito.Mockito.mock;
import static org.mockito.Mockito.when;
/**
* Test class for {@link org.apache.gobblin.compaction.mapreduce.RecordKeyDedupReducerBase}.
* Will have test separately in both avro and orc.
*/
@Test(groups = {"gobblin.compaction"})
public class KeyDedupReducerTest {
private static final String AVRO_KEY_SCHEMA =
"{ \"type\" : \"record\", \"name\" : \"etl\",\"namespace\" : \"reducerTest\", \"fields\" : [ { \"name\" : "
+ "\"key\", \"type\" : {\"type\" : \"record\", \"name\" : \"key_name\", \"namespace\" : \"key_namespace\", "
+ "\"fields\" : [ {\"name\" : \"partitionKey\", \"type\" : \"long\", \"doc\" : \"\"}, { \"name\" : \"environment"
+ "\", \"type\" : \"string\",\"doc\" : \"\"}, {\"name\" : \"subKey\",\"type\" : \"string\", \"doc\" : \"\"} ]}, "
+ "\"doc\" : \"\", \"attributes_json\" : \"{\\\"delta\\\":false,\\\"pk\\\":true}\" }]}";
private static final String AVRO_FULL_SCHEMA =
"{ \"type\" : \"record\", \"name\" : \"etl\",\"namespace\" : \"reducerTest\", \"fields\" : [ { \"name\" : "
+ "\"key\", \"type\" : {\"type\" : \"record\", \"name\" : \"key_name\", \"namespace\" : \"key_namespace\", "
+ "\"fields\" : [ {\"name\" : \"partitionKey\", \"type\" : \"long\", \"doc\" : \"\"}, { \"name\" : \"environment"
+ "\", \"type\" : \"string\",\"doc\" : \"\"}, {\"name\" : \"subKey\",\"type\" : \"string\", \"doc\" : \"\"} ]}, "
+ "\"doc\" : \"\", \"attributes_json\" : \"{\\\"delta\\\":false,\\\"pk\\\":true}\" }"
+ ", {\"name\" : \"scn2\", \"type\": \"long\", \"doc\" : \"\", \"attributes_json\" : \"{\\\"nullable\\\":false,\\\"delta"
+ "\\\":false,\\\"pk\\\":false,\\\"type\\\":\\\"NUMBER\\\"}\"}"
+ " , {\"name\" : \"scn\", \"type\": \"long\", \"doc\" : \"\", \"attributes_json\" : \"{\\\"nullable\\\":false,\\\"delta"
+ "\\\":true,\\\"pk\\\":false,\\\"type\\\":\\\"NUMBER\\\"}\"}]}";
private static final String AVRO_FULL_SCHEMA_WITH_TWO_DELTA_FIELDS =
"{ \"type\" : \"record\", \"name\" : \"etl\",\"namespace\" : \"reducerTest\", \"fields\" : [ { \"name\" : "
+ "\"key\", \"type\" : {\"type\" : \"record\", \"name\" : \"key_name\", \"namespace\" : \"key_namespace\", "
+ "\"fields\" : [ {\"name\" : \"partitionKey\", \"type\" : \"long\", \"doc\" : \"\"}, { \"name\" : \"environment"
+ "\", \"type\" : \"string\",\"doc\" : \"\"}, {\"name\" : \"subKey\",\"type\" : \"string\", \"doc\" : \"\"} ]}, "
+ "\"doc\" : \"\", \"attributes_json\" : \"{\\\"delta\\\":false,\\\"pk\\\":true}\" }"
+ ", {\"name\" : \"scn2\", \"type\": \"long\", \"doc\" : \"\", \"attributes_json\" : \"{\\\"nullable\\\":false,\\\"delta"
+ "\\\":true,\\\"pk\\\":false,\\\"type\\\":\\\"NUMBER\\\"}\"}"
+ " , {\"name\" : \"scn\", \"type\": \"long\", \"doc\" : \"\", \"attributes_json\" : \"{\\\"nullable\\\":false,\\\"delta"
+ "\\\":true,\\\"pk\\\":false,\\\"type\\\":\\\"NUMBER\\\"}\"}]}";
@Test
public void testAvroReduce()
throws IOException, InterruptedException {
Schema keySchema = new Schema.Parser().parse(AVRO_KEY_SCHEMA);
GenericRecordBuilder keyRecordBuilder = new GenericRecordBuilder(keySchema.getField("key").schema());
keyRecordBuilder.set("partitionKey", 1);
keyRecordBuilder.set("environment", "test");
keyRecordBuilder.set("subKey", "2");
GenericRecord record = keyRecordBuilder.build();
keyRecordBuilder = new GenericRecordBuilder(keySchema);
keyRecordBuilder.set("key", record);
GenericRecord keyRecord = keyRecordBuilder.build();
// Test reducer with delta field "scn"
Schema fullSchema = new Schema.Parser().parse(AVRO_FULL_SCHEMA);
AvroValue<GenericRecord> fullRecord1 = new AvroValue<>();
AvroValue<GenericRecord> fullRecord2 = new AvroValue<>();
AvroValue<GenericRecord> fullRecord3 = new AvroValue<>();
AvroValue<GenericRecord> fullRecord4 = new AvroValue<>();
GenericRecordBuilder fullRecordBuilder1 = new GenericRecordBuilder(fullSchema);
fullRecordBuilder1.set("key", record);
fullRecordBuilder1.set("scn", 123);
fullRecordBuilder1.set("scn2", 100);
fullRecord1.datum(fullRecordBuilder1.build());
fullRecordBuilder1.set("scn", 125);
fullRecordBuilder1.set("scn2", 1);
fullRecord2.datum(fullRecordBuilder1.build());
fullRecordBuilder1.set("scn", 124);
fullRecordBuilder1.set("scn2", 10);
fullRecord3.datum(fullRecordBuilder1.build());
fullRecordBuilder1.set("scn", 122);
fullRecordBuilder1.set("scn2", 1000);
fullRecord4.datum(fullRecordBuilder1.build());
Configuration conf = mock(Configuration.class);
when(conf.get(AvroKeyDedupReducer.DELTA_SCHEMA_PROVIDER))
.thenReturn(FieldAttributeBasedDeltaFieldsProvider.class.getName());
when(conf.get(FieldAttributeBasedDeltaFieldsProvider.ATTRIBUTE_FIELD)).thenReturn("attributes_json");
when(conf.get(FieldAttributeBasedDeltaFieldsProvider.DELTA_PROP_NAME,
FieldAttributeBasedDeltaFieldsProvider.DEFAULT_DELTA_PROP_NAME))
.thenReturn(FieldAttributeBasedDeltaFieldsProvider.DEFAULT_DELTA_PROP_NAME);
RecordKeyDedupReducerBase<AvroKey<GenericRecord>, AvroValue<GenericRecord>,
AvroKey<GenericRecord>, NullWritable> reducer = new AvroKeyDedupReducer();
WrappedReducer.Context reducerContext = mock(WrappedReducer.Context.class);
when(reducerContext.getConfiguration()).thenReturn(conf);
Counter moreThan1Counter = new GenericCounter();
when(reducerContext.getCounter(RecordKeyDedupReducerBase.EVENT_COUNTER.MORE_THAN_1)).thenReturn(moreThan1Counter);
Counter dedupedCounter = new GenericCounter();
when(reducerContext.getCounter(RecordKeyDedupReducerBase.EVENT_COUNTER.DEDUPED)).thenReturn(dedupedCounter);
Counter recordCounter = new GenericCounter();
when(reducerContext.getCounter(RecordKeyDedupReducerBase.EVENT_COUNTER.RECORD_COUNT)).thenReturn(recordCounter);
reducer.setup(reducerContext);
doNothing().when(reducerContext).write(any(AvroKey.class), any(NullWritable.class));
List<AvroValue<GenericRecord>> valueIterable =
Lists.newArrayList(fullRecord1, fullRecord2, fullRecord3, fullRecord4);
AvroKey<GenericRecord> key = new AvroKey<>();
key.datum(keyRecord);
reducer.reduce(key, valueIterable, reducerContext);
Assert.assertEquals(reducer.getOutKey().datum(), fullRecord2.datum());
// Test reducer without delta field
Configuration conf2 = mock(Configuration.class);
when(conf2.get(AvroKeyDedupReducer.DELTA_SCHEMA_PROVIDER)).thenReturn(null);
when(reducerContext.getConfiguration()).thenReturn(conf2);
RecordKeyDedupReducerBase<AvroKey<GenericRecord>, AvroValue<GenericRecord>,
AvroKey<GenericRecord>, NullWritable> reducer2 = new AvroKeyDedupReducer();
reducer2.setup(reducerContext);
reducer2.reduce(key, valueIterable, reducerContext);
Assert.assertEquals(reducer2.getOutKey().datum(), fullRecord1.datum());
// Test reducer with compound delta key.
Schema fullSchema2 = new Schema.Parser().parse(AVRO_FULL_SCHEMA_WITH_TWO_DELTA_FIELDS);
GenericRecordBuilder fullRecordBuilder2 = new GenericRecordBuilder(fullSchema2);
fullRecordBuilder2.set("key", record);
fullRecordBuilder2.set("scn", 123);
fullRecordBuilder2.set("scn2", 100);
fullRecord1.datum(fullRecordBuilder2.build());
fullRecordBuilder2.set("scn", 125);
fullRecordBuilder2.set("scn2", 1000);
fullRecord2.datum(fullRecordBuilder2.build());
fullRecordBuilder2.set("scn", 126);
fullRecordBuilder2.set("scn2", 1000);
fullRecord3.datum(fullRecordBuilder2.build());
fullRecordBuilder2.set("scn", 130);
fullRecordBuilder2.set("scn2", 100);
fullRecord4.datum(fullRecordBuilder2.build());
List<AvroValue<GenericRecord>> valueIterable2 =
Lists.newArrayList(fullRecord1, fullRecord2, fullRecord3, fullRecord4);
reducer.reduce(key, valueIterable2, reducerContext);
Assert.assertEquals(reducer.getOutKey().datum(), fullRecord3.datum());
}
}
| 1,774 |
0 | Create_ds/gobblin/gobblin-compaction/src/test/java/org/apache/gobblin/compaction | Create_ds/gobblin/gobblin-compaction/src/test/java/org/apache/gobblin/compaction/mapreduce/CompactionJobConfiguratorTest.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.compaction.mapreduce;
import java.io.IOException;
import java.io.InputStream;
import org.apache.avro.Schema;
import org.testng.Assert;
import org.testng.annotations.Test;
import org.apache.gobblin.configuration.State;
import org.apache.gobblin.util.AvroUtils;
public class CompactionJobConfiguratorTest {
@Test
public void testKeyFieldBlacklist() throws IOException {
State state = new State();
state.setProp("compaction.job.key.fieldBlacklist", "auditHeader,value.lumos_dropdate,value.__ETL_SCN");
state.setProp("compaction.job.dedup.key", "ALL");
CompactionAvroJobConfigurator configurator = new CompactionAvroJobConfigurator(state);
try (InputStream keyschema = getClass().getClassLoader().getResourceAsStream("dedup-schema/key-schema.avsc")) {
Schema topicSchema = new Schema.Parser().parse(keyschema);
Schema actualKeySchema = configurator.getDedupKeySchema(topicSchema);
Assert.assertEquals(actualKeySchema.getFields().size(), 2);
Assert.assertEquals(actualKeySchema.getField("value").schema().getFields().size(), 57);
Assert.assertFalse(AvroUtils.getFieldSchema(actualKeySchema, "auditheader").isPresent());
Assert.assertFalse(AvroUtils.getFieldSchema(actualKeySchema, "value.lumos_dropdate").isPresent());
Assert.assertFalse(AvroUtils.getFieldSchema(actualKeySchema, "value.__ETL_SCN").isPresent());
}
}
}
| 1,775 |
0 | Create_ds/gobblin/gobblin-compaction/src/test/java/org/apache/gobblin/compaction | Create_ds/gobblin/gobblin-compaction/src/test/java/org/apache/gobblin/compaction/mapreduce/OrcCompactionTaskTest.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.compaction.mapreduce;
import java.io.File;
import java.io.IOException;
import java.util.ArrayList;
import java.util.Comparator;
import java.util.List;
import org.apache.commons.io.FilenameUtils;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.PathFilter;
import org.apache.hadoop.io.IntWritable;
import org.apache.hadoop.io.NullWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.TaskAttemptID;
import org.apache.hadoop.mapreduce.task.TaskAttemptContextImpl;
import org.apache.orc.OrcFile;
import org.apache.orc.Reader;
import org.apache.orc.TypeDescription;
import org.apache.orc.Writer;
import org.apache.orc.impl.ReaderImpl;
import org.apache.orc.mapred.OrcStruct;
import org.apache.orc.mapreduce.OrcMapreduceRecordReader;
import org.apache.orc.mapreduce.OrcMapreduceRecordWriter;
import org.testng.Assert;
import org.testng.annotations.Test;
import com.google.common.collect.ImmutableList;
import com.google.common.io.Files;
import org.apache.gobblin.compaction.mapreduce.orc.OrcTestUtils;
import org.apache.gobblin.compaction.mapreduce.orc.OrcUtils;
import org.apache.gobblin.compaction.mapreduce.test.TestCompactionOrcJobConfigurator;
import org.apache.gobblin.compaction.mapreduce.test.TestCompactionTaskUtils;
import org.apache.gobblin.compaction.verify.InputRecordCountHelper;
import org.apache.gobblin.configuration.State;
import org.apache.gobblin.runtime.api.JobExecutionResult;
import org.apache.gobblin.runtime.embedded.EmbeddedGobblin;
import static org.apache.gobblin.compaction.mapreduce.CompactionCombineFileInputFormat.COMPACTION_JOB_MAPRED_MAX_SPLIT_SIZE;
import static org.apache.gobblin.compaction.mapreduce.CompactionCombineFileInputFormat.COMPACTION_JOB_MAPRED_MIN_SPLIT_SIZE;
import static org.apache.gobblin.compaction.mapreduce.CompactionOrcJobConfigurator.ORC_MAPPER_SHUFFLE_KEY_SCHEMA;
import static org.apache.gobblin.compaction.mapreduce.CompactorOutputCommitter.COMPACTION_OUTPUT_EXTENSION;
import static org.apache.gobblin.compaction.mapreduce.MRCompactor.COMPACTION_LATEDATA_THRESHOLD_FOR_RECOMPACT_PER_DATASET;
import static org.apache.gobblin.compaction.mapreduce.MRCompactor.COMPACTION_SHOULD_DEDUPLICATE;
import static org.apache.gobblin.compaction.mapreduce.test.TestCompactionTaskUtils.createEmbeddedGobblinCompactionJob;
@Test(groups = {"gobblin.compaction"})
public class OrcCompactionTaskTest {
final String extensionName = "orc";
private void createTestingData(File jobDir) throws Exception {
// Write some ORC file for compaction here.
TypeDescription schema = TypeDescription.fromString("struct<i:int,j:int>");
OrcStruct orcStruct_0 = (OrcStruct) OrcStruct.createValue(schema);
orcStruct_0.setFieldValue("i", new IntWritable(1));
orcStruct_0.setFieldValue("j", new IntWritable(2));
OrcStruct orcStruct_1 = (OrcStruct) OrcStruct.createValue(schema);
orcStruct_1.setFieldValue("i", new IntWritable(1));
orcStruct_1.setFieldValue("j", new IntWritable(2));
OrcStruct orcStruct_2 = (OrcStruct) OrcStruct.createValue(schema);
orcStruct_2.setFieldValue("i", new IntWritable(2));
orcStruct_2.setFieldValue("j", new IntWritable(3));
OrcStruct orcStruct_3 = (OrcStruct) OrcStruct.createValue(schema);
orcStruct_3.setFieldValue("i", new IntWritable(4));
orcStruct_3.setFieldValue("j", new IntWritable(5));
// Following pattern: FILENAME.RECORDCOUNT.EXTENSION
File file_0 = new File(jobDir, "file_0.2." + extensionName);
File file_1 = new File(jobDir, "file_1.2." + extensionName);
writeOrcRecordsInFile(new Path(file_0.getAbsolutePath()), schema, ImmutableList.of(orcStruct_0, orcStruct_2));
writeOrcRecordsInFile(new Path(file_1.getAbsolutePath()), schema, ImmutableList.of(orcStruct_1, orcStruct_3));
}
/**
* This test case covers the scenarios when the split size is smaller than an actual file size:
* RecordReader should stop on the boundary when read records that will make the split beyond the max_split_size,
* and the subsequent reader should just load the remaining records.
*/
@Test
public void testWithPartialFileInSplit() throws Exception {
File basePath = Files.createTempDir();
FileSystem fs = FileSystem.getLocal(new Configuration());
basePath.deleteOnExit();
String minutelyPath = "Identity/MemberAccount/minutely/2017/04/03/10/20_30/run_2017-04-03-10-20";
String hourlyPath = "Identity/MemberAccount/hourly/2017/04/03/10/";
File jobDir = new File(basePath, minutelyPath);
Assert.assertTrue(jobDir.mkdirs());
// Writing some basic ORC files
// Testing data is schema'ed with "struct<i:int,j:int>"
createTestingData(jobDir);
// sample a file size
FileStatus[] statuses = fs.listStatus(new Path(jobDir.getAbsolutePath()));
Assert.assertTrue(statuses.length > 0 );
long splitSize = statuses[0].getLen() / 2 ;
Assert.assertTrue(splitSize > 0);
EmbeddedGobblin embeddedGobblin = TestCompactionTaskUtils.createEmbeddedGobblinCompactionJob("basic", basePath.getAbsolutePath())
.setConfiguration(CompactionJobConfigurator.COMPACTION_JOB_CONFIGURATOR_FACTORY_CLASS_KEY,
TestCompactionOrcJobConfigurator.Factory.class.getName())
// Each file generated by the data-creation function is around 250 bytes in terms of length.
// Setting the max split size to be half the size force a single file to be split.
.setConfiguration(COMPACTION_JOB_MAPRED_MAX_SPLIT_SIZE, splitSize + "")
.setConfiguration(COMPACTION_JOB_MAPRED_MIN_SPLIT_SIZE, splitSize + "")
.setConfiguration(COMPACTION_OUTPUT_EXTENSION, extensionName);
JobExecutionResult execution = embeddedGobblin.run();
Assert.assertTrue(execution.isSuccessful());
// Result verification: Verify the duplicate count is expected.
File outputDir = new File(basePath, hourlyPath);
State state = new State();
state.setProp(COMPACTION_OUTPUT_EXTENSION, "orc");
InputRecordCountHelper stateHelper = new InputRecordCountHelper(state);
Assert.assertEquals(stateHelper.readRecordCount(new Path(outputDir.getAbsolutePath())), 4);
Assert.assertEquals(stateHelper.readDuplicationCount(new Path(outputDir.getAbsolutePath())), 1);
}
@Test
public void basicTestWithShuffleKeySpecified() throws Exception {
File basePath = Files.createTempDir();
basePath.deleteOnExit();
String minutelyPath = "Identity/MemberAccount/minutely/2017/04/03/10/20_30/run_2017-04-03-10-20";
String hourlyPath = "Identity/MemberAccount/hourly/2017/04/03/10/";
File jobDir = new File(basePath, minutelyPath);
Assert.assertTrue(jobDir.mkdirs());
// Writing some basic ORC files
// Testing data is schema'ed with "struct<i:int,j:int>"
createTestingData(jobDir);
EmbeddedGobblin embeddedGobblin = TestCompactionTaskUtils.createEmbeddedGobblinCompactionJob("basic", basePath.getAbsolutePath())
.setConfiguration(CompactionJobConfigurator.COMPACTION_JOB_CONFIGURATOR_FACTORY_CLASS_KEY,
TestCompactionOrcJobConfigurator.Factory.class.getName())
.setConfiguration(COMPACTION_OUTPUT_EXTENSION, extensionName)
// A shuffle key that shouldn't be taken.
.setConfiguration(ORC_MAPPER_SHUFFLE_KEY_SCHEMA, "struct<k:int>");
JobExecutionResult execution = embeddedGobblin.run();
Assert.assertTrue(execution.isSuccessful());
// Result verification
File outputDir = new File(basePath, hourlyPath);
FileSystem fs = FileSystem.getLocal(new Configuration());
List<FileStatus> statuses = new ArrayList<>();
reloadFolder(statuses, outputDir, fs);
Assert.assertTrue(statuses.size() == 1);
List<OrcStruct> result = readOrcFile(statuses.get(0).getPath());
Assert.assertEquals(result.size(), 3);
Assert.assertEquals(result.get(0).getFieldValue("i"), new IntWritable(1));
Assert.assertEquals(result.get(0).getFieldValue("j"), new IntWritable(2));
Assert.assertEquals(result.get(1).getFieldValue("i"), new IntWritable(2));
Assert.assertEquals(result.get(1).getFieldValue("j"), new IntWritable(3));
Assert.assertEquals(result.get(2).getFieldValue("i"), new IntWritable(4));
Assert.assertEquals(result.get(2).getFieldValue("j"), new IntWritable(5));
}
@Test
public void basicTestWithRecompactionAndBasicSchemaEvolution() throws Exception {
File basePath = Files.createTempDir();
basePath.deleteOnExit();
String minutelyPath = "Identity/MemberAccount/minutely/2017/04/03/10/20_30/run_2017-04-03-10-20";
String hourlyPath = "Identity/MemberAccount/hourly/2017/04/03/10/";
File jobDir = new File(basePath, minutelyPath);
Assert.assertTrue(jobDir.mkdirs());
// Writing some basic ORC files
createTestingData(jobDir);
// Writing an additional file with ** evolved schema **.
TypeDescription evolvedSchema = TypeDescription.fromString("struct<i:int,j:int,k:int>");
OrcStruct orcStruct_4 = (OrcStruct) OrcStruct.createValue(evolvedSchema);
orcStruct_4.setFieldValue("i", new IntWritable(5));
orcStruct_4.setFieldValue("j", new IntWritable(6));
orcStruct_4.setFieldValue("k", new IntWritable(7));
File file_2 = new File(jobDir, "file_2.1." + extensionName);
writeOrcRecordsInFile(new Path(file_2.getAbsolutePath()), evolvedSchema, ImmutableList.of(orcStruct_4));
// Make this is the newest.
file_2.setLastModified(Long.MAX_VALUE);
// Verify execution
// Overwrite the job configurator factory key.
EmbeddedGobblin embeddedGobblin = createEmbeddedGobblinCompactionJob("basic", basePath.getAbsolutePath())
.setConfiguration(CompactionJobConfigurator.COMPACTION_JOB_CONFIGURATOR_FACTORY_CLASS_KEY,
TestCompactionOrcJobConfigurator.Factory.class.getName())
.setConfiguration(COMPACTION_OUTPUT_EXTENSION, extensionName)
.setConfiguration(COMPACTION_LATEDATA_THRESHOLD_FOR_RECOMPACT_PER_DATASET, "Identity.*:0.1");
JobExecutionResult execution = embeddedGobblin.run();
Assert.assertTrue(execution.isSuccessful());
// Result verification
File outputDir = new File(basePath, hourlyPath);
FileSystem fs = FileSystem.getLocal(new Configuration());
List<FileStatus> statuses = new ArrayList<>();
reloadFolder(statuses, outputDir, fs);
Assert.assertTrue(statuses.size() == 1);
List<OrcStruct> result = readOrcFile(statuses.get(0).getPath());
Assert.assertEquals(result.size(), 4);
Assert.assertEquals(result.get(0).getFieldValue("i"), new IntWritable(1));
Assert.assertEquals(result.get(0).getFieldValue("j"), new IntWritable(2));
Assert.assertNull(result.get(0).getFieldValue("k"));
Assert.assertEquals(result.get(1).getFieldValue("i"), new IntWritable(2));
Assert.assertEquals(result.get(1).getFieldValue("j"), new IntWritable(3));
Assert.assertNull(result.get(1).getFieldValue("k"));
Assert.assertEquals(result.get(2).getFieldValue("i"), new IntWritable(4));
Assert.assertEquals(result.get(2).getFieldValue("j"), new IntWritable(5));
Assert.assertNull(result.get(2).getFieldValue("k"));
Assert.assertEquals(result.get(3).getFieldValue("i"), new IntWritable(5));
Assert.assertEquals(result.get(3).getFieldValue("j"), new IntWritable(6));
Assert.assertEquals(result.get(3).getFieldValue("k"), new IntWritable(7));
// Adding new .orc file into the directory and verify if re-compaction is triggered.
File file_late = new File(jobDir, "file_late.1." + extensionName);
OrcStruct orcStruct_5 = (OrcStruct) OrcStruct.createValue(evolvedSchema);
orcStruct_5.setFieldValue("i", new IntWritable(10));
orcStruct_5.setFieldValue("j", new IntWritable(11));
orcStruct_5.setFieldValue("k", new IntWritable(12));
writeOrcRecordsInFile(new Path(file_late.getAbsolutePath()), evolvedSchema, ImmutableList.of(orcStruct_5));
execution = embeddedGobblin.run();
Assert.assertTrue(execution.isSuccessful());
reloadFolder(statuses, outputDir, fs);
result = readOrcFile(statuses.get(0).getPath());
// Note previous execution's inspection gives 4 result, given re-compaction, this should gives 1 late-record more.
Assert.assertEquals(result.size(), 4 + 1);
}
@Test
public void testReducerSideDedup() throws Exception {
File basePath = Files.createTempDir();
basePath.deleteOnExit();
String minutelyPath = "Identity/MemberAccount/minutely/2020/04/03/10/20_30/run_2020-04-03-10-20";
String hourlyPath = "Identity/MemberAccount/hourly/2020/04/03/10/";
File jobDir = new File(basePath, minutelyPath);
Assert.assertTrue(jobDir.mkdirs());
TypeDescription nestedSchema = TypeDescription.fromString("struct<a:struct<a:int,b:string,c:int>,b:string,c:uniontype<int,string>>");
// Create three records with same value except "b" column in the top-level.
OrcStruct nested_struct_1 = (OrcStruct) OrcUtils.createValueRecursively(nestedSchema);
OrcTestUtils.fillOrcStructWithFixedValue(nested_struct_1, nestedSchema, 1, "test1", true);
((OrcStruct)nested_struct_1).setFieldValue("b", new Text("uno"));
OrcStruct nested_struct_2 = (OrcStruct) OrcUtils.createValueRecursively(nestedSchema);
OrcTestUtils.fillOrcStructWithFixedValue(nested_struct_2, nestedSchema, 1, "test2", true);
((OrcStruct)nested_struct_2).setFieldValue("b", new Text("dos"));
OrcStruct nested_struct_3 = (OrcStruct) OrcUtils.createValueRecursively(nestedSchema);
OrcTestUtils.fillOrcStructWithFixedValue(nested_struct_3, nestedSchema, 1, "test3", true);
((OrcStruct)nested_struct_3).setFieldValue("b", new Text("tres"));
// Create another two records with different value from the above three, and these two differs in column b as well.
OrcStruct nested_struct_4 = (OrcStruct) OrcUtils.createValueRecursively(nestedSchema);
OrcTestUtils.fillOrcStructWithFixedValue(nested_struct_4, nestedSchema, 2, "test2", false);
((OrcStruct)nested_struct_4).setFieldValue("b", new Text("uno"));
// This record will be considered as a duplication as nested_struct_4
OrcStruct nested_struct_5 = (OrcStruct) OrcUtils.createValueRecursively(nestedSchema);
OrcTestUtils.fillOrcStructWithFixedValue(nested_struct_5, nestedSchema, 2, "test2", false);
((OrcStruct)nested_struct_5).setFieldValue("b", new Text("uno"));
// Following pattern: FILENAME.RECORDCOUNT.EXTENSION
File file_0 = new File(jobDir, "file_0.5." + extensionName);
writeOrcRecordsInFile(new Path(file_0.getAbsolutePath()), nestedSchema, ImmutableList.of(nested_struct_1,
nested_struct_2, nested_struct_3, nested_struct_4, nested_struct_5));
EmbeddedGobblin embeddedGobblin = createEmbeddedGobblinCompactionJob("basic", basePath.getAbsolutePath().toString())
.setConfiguration(CompactionJobConfigurator.COMPACTION_JOB_CONFIGURATOR_FACTORY_CLASS_KEY,
TestCompactionOrcJobConfigurator.Factory.class.getName())
.setConfiguration(COMPACTION_OUTPUT_EXTENSION, extensionName)
.setConfiguration(ORC_MAPPER_SHUFFLE_KEY_SCHEMA, "struct<a:struct<a:int,c:int>>");
JobExecutionResult execution = embeddedGobblin.run();
Assert.assertTrue(execution.isSuccessful());
// Verifying result: Reducer should catch all the false-duplicates
File outputDir = new File(basePath, hourlyPath);
FileSystem fs = FileSystem.getLocal(new Configuration());
List<FileStatus> statuses = new ArrayList<>();
reloadFolder(statuses, outputDir, fs);
Assert.assertEquals(statuses.size(), 1);
List<OrcStruct> result = readOrcFile(statuses.get(0).getPath());
// Should still contain original 3 records since they have different value in columns not included in shuffle key.
Assert.assertEquals(result.size(), 4);
Assert.assertTrue(result.contains(nested_struct_1));
Assert.assertTrue(result.contains(nested_struct_2));
Assert.assertTrue(result.contains(nested_struct_3));
Assert.assertTrue(result.contains(nested_struct_4));
}
// A helper method to load all files in the output directory for compaction-result inspection.
private void reloadFolder(List<FileStatus> statuses, File outputDir, FileSystem fs) throws IOException {
statuses.clear();
for (FileStatus status : fs.listStatus(new Path(outputDir.getAbsolutePath()), new PathFilter() {
@Override
public boolean accept(Path path) {
return FilenameUtils.isExtension(path.getName(), extensionName);
}
})) {
statuses.add(status);
}
}
@Test
public void testNonDedup() throws Exception {
File basePath = Files.createTempDir();
basePath.deleteOnExit();
String minutelyPath = "Identity/MemberAccount_2/minutely/2017/04/03/10/20_30/run_2017-04-03-10-20";
String hourlyPath = "Identity/MemberAccount_2/hourly/2017/04/03/10/";
File jobDir = new File(basePath, minutelyPath);
Assert.assertTrue(jobDir.mkdirs());
createTestingData(jobDir);
EmbeddedGobblin embeddedGobblin_nondedup = createEmbeddedGobblinCompactionJob("basic", basePath.getAbsolutePath().toString())
.setConfiguration(CompactionJobConfigurator.COMPACTION_JOB_CONFIGURATOR_FACTORY_CLASS_KEY,
TestCompactionOrcJobConfigurator.Factory.class.getName())
.setConfiguration(COMPACTION_OUTPUT_EXTENSION, "orc")
.setConfiguration(COMPACTION_SHOULD_DEDUPLICATE, "false");
JobExecutionResult execution = embeddedGobblin_nondedup.run();
Assert.assertTrue(execution.isSuccessful());
// Non-dedup result verification
File outputDir = new File(basePath, hourlyPath);
FileSystem fs = FileSystem.getLocal(new Configuration());
List<FileStatus> statuses = new ArrayList<>();
for (FileStatus status : fs.listStatus(new Path(outputDir.getAbsolutePath()), new PathFilter() {
@Override
public boolean accept(Path path) {
return FilenameUtils.isExtension(path.getName(), "orc");
}
})) {
statuses.add(status);
}
Assert.assertTrue(statuses.size() == 1);
List<OrcStruct> result = readOrcFile(statuses.get(0).getPath());
Assert.assertEquals(result.size(), 4);
result.sort(new Comparator<OrcStruct>() {
@Override
public int compare(OrcStruct o1, OrcStruct o2) {
return o1.compareTo(o2);
}
});
Assert.assertEquals(result.get(0).getFieldValue("i"), new IntWritable(1));
Assert.assertEquals(result.get(0).getFieldValue("j"), new IntWritable(2));
Assert.assertEquals(result.get(1).getFieldValue("i"), new IntWritable(1));
Assert.assertEquals(result.get(1).getFieldValue("j"), new IntWritable(2));
Assert.assertEquals(result.get(2).getFieldValue("i"), new IntWritable(2));
Assert.assertEquals(result.get(2).getFieldValue("j"), new IntWritable(3));
Assert.assertEquals(result.get(3).getFieldValue("i"), new IntWritable(4));
Assert.assertEquals(result.get(3).getFieldValue("j"), new IntWritable(5));
}
/**
* Read a output ORC compacted file into memory.
* This only works if fields are int value.
*/
private List<OrcStruct> readOrcFile(Path orcFilePath)
throws IOException, InterruptedException {
ReaderImpl orcReader = new ReaderImpl(orcFilePath, new OrcFile.ReaderOptions(new Configuration()));
Reader.Options options = new Reader.Options().schema(orcReader.getSchema());
OrcMapreduceRecordReader recordReader = new OrcMapreduceRecordReader(orcReader, options);
List<OrcStruct> result = new ArrayList<>();
OrcStruct recordContainer;
while (recordReader.nextKeyValue()) {
recordContainer = (OrcStruct) OrcUtils.createValueRecursively(orcReader.getSchema());
OrcUtils.upConvertOrcStruct((OrcStruct) recordReader.getCurrentValue(), recordContainer, orcReader.getSchema());
result.add(recordContainer);
}
return result;
}
private void writeOrcRecordsInFile(Path path, TypeDescription schema, List<OrcStruct> orcStructs) throws Exception {
Configuration configuration = new Configuration();
OrcFile.WriterOptions options = OrcFile.writerOptions(configuration).setSchema(schema);
Writer writer = OrcFile.createWriter(path, options);
OrcMapreduceRecordWriter recordWriter = new OrcMapreduceRecordWriter(writer);
for (OrcStruct orcRecord : orcStructs) {
recordWriter.write(NullWritable.get(), orcRecord);
}
recordWriter.close(new TaskAttemptContextImpl(configuration, new TaskAttemptID()));
}
}
| 1,776 |
0 | Create_ds/gobblin/gobblin-compaction/src/test/java/org/apache/gobblin/compaction | Create_ds/gobblin/gobblin-compaction/src/test/java/org/apache/gobblin/compaction/mapreduce/AvroCompactionTaskTest.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.compaction.mapreduce;
import java.io.File;
import java.io.FileOutputStream;
import java.io.IOException;
import java.net.URI;
import org.apache.avro.Schema;
import org.apache.avro.SchemaBuilder;
import org.apache.avro.file.DataFileWriter;
import org.apache.avro.generic.GenericDatumWriter;
import org.apache.avro.generic.GenericRecord;
import org.apache.avro.generic.GenericRecordBuilder;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.testng.Assert;
import org.testng.annotations.Test;
import com.google.common.base.Optional;
import com.google.common.io.Files;
import lombok.extern.slf4j.Slf4j;
import org.apache.gobblin.compaction.audit.AuditCountClientFactory;
import org.apache.gobblin.compaction.dataset.TimeBasedSubDirDatasetsFinder;
import org.apache.gobblin.compaction.event.CompactionSlaEventHelper;
import org.apache.gobblin.compaction.source.CompactionSource;
import org.apache.gobblin.compaction.suite.TestCompactionSuiteFactories;
import org.apache.gobblin.compaction.verify.CompactionAuditCountVerifier;
import org.apache.gobblin.compaction.verify.CompactionVerifier;
import org.apache.gobblin.compaction.verify.InputRecordCountHelper;
import org.apache.gobblin.configuration.ConfigurationKeys;
import org.apache.gobblin.configuration.State;
import org.apache.gobblin.data.management.dataset.DatasetUtils;
import org.apache.gobblin.data.management.dataset.SimpleDatasetHierarchicalPrioritizer;
import org.apache.gobblin.data.management.dataset.TimePartitionGlobFinder;
import org.apache.gobblin.data.management.retention.profile.ConfigurableGlobDatasetFinder;
import org.apache.gobblin.runtime.api.JobExecutionResult;
import org.apache.gobblin.runtime.embedded.EmbeddedGobblin;
import static org.apache.gobblin.compaction.mapreduce.test.TestCompactionTaskUtils.createEmbeddedGobblinCompactionJob;
@Slf4j
@Test(groups = {"gobblin.compaction"})
public class AvroCompactionTaskTest {
protected FileSystem getFileSystem()
throws IOException {
String uri = ConfigurationKeys.LOCAL_FS_URI;
FileSystem fs = FileSystem.get(URI.create(uri), new Configuration());
return fs;
}
@Test
public void testDedup() throws Exception {
File basePath = Files.createTempDir();
basePath.deleteOnExit();
File jobDir = new File(basePath, "Identity/MemberAccount/minutely/2017/04/03/10/20_30/run_2017-04-03-10-20");
Assert.assertTrue(jobDir.mkdirs());
GenericRecord r1 = createRandomRecord();
GenericRecord r2 = createRandomRecord();
GenericRecord r3= createEvolvedSchemaRecord();
writeFileWithContent(jobDir, "file1", r1, 20);
writeFileWithContent(jobDir, "file2", r2, 18);
File newestFile = writeFileWithContent(jobDir, "file3", r3, 10, r3.getSchema());
newestFile.setLastModified(Long.MAX_VALUE);
EmbeddedGobblin embeddedGobblin = createEmbeddedGobblinCompactionJob("dedup", basePath.getAbsolutePath());
JobExecutionResult result = embeddedGobblin.run();
Assert.assertTrue(result.isSuccessful());
}
@Test
public void testNonDedup() throws Exception {
File basePath = Files.createTempDir();
basePath.deleteOnExit();
File jobDir = new File(basePath, "Identity/MemberAccount/minutely/2017/04/03/10/20_30/run_2017-04-03-10-20");
Assert.assertTrue(jobDir.mkdirs());
GenericRecord r1 = createRandomRecord();
GenericRecord r2 = createRandomRecord();
writeFileWithContent(jobDir, "file1", r1, 20);
writeFileWithContent(jobDir, "file2", r2, 18);
EmbeddedGobblin embeddedGobblin = createEmbeddedGobblinCompactionJob("non-dedup", basePath.getAbsolutePath().toString());
JobExecutionResult result = embeddedGobblin.run();
Assert.assertTrue(result.isSuccessful());
}
@Test
public void testCompactVirtualDataset() throws Exception {
File basePath = Files.createTempDir();
basePath.deleteOnExit();
File jobDir = new File(basePath, "PageViewEvent");
Assert.assertTrue(jobDir.mkdirs());
String pattern = new Path(basePath.getAbsolutePath(), "*").toString();
String jobName = "compaction-virtual";
EmbeddedGobblin embeddedGobblin = new EmbeddedGobblin(jobName)
.setConfiguration(ConfigurationKeys.SOURCE_CLASS_KEY, CompactionSource.class.getName())
.setConfiguration(ConfigurableGlobDatasetFinder.DATASET_FINDER_PATTERN_KEY, pattern)
.setConfiguration(MRCompactor.COMPACTION_INPUT_DIR, basePath.toString())
.setConfiguration(MRCompactor.COMPACTION_INPUT_SUBDIR, "hourly")
.setConfiguration(MRCompactor.COMPACTION_DEST_DIR, basePath.toString())
.setConfiguration(MRCompactor.COMPACTION_DEST_SUBDIR, "daily")
.setConfiguration(MRCompactor.COMPACTION_TMP_DEST_DIR, "/tmp/compaction/" + jobName)
.setConfiguration(TimeBasedSubDirDatasetsFinder.COMPACTION_TIMEBASED_MAX_TIME_AGO, "3d")
.setConfiguration(TimeBasedSubDirDatasetsFinder.COMPACTION_TIMEBASED_MIN_TIME_AGO, "1d")
.setConfiguration(ConfigurationKeys.MAX_TASK_RETRIES_KEY, "0")
.setConfiguration(DatasetUtils.DATASET_PROFILE_CLASS_KEY,
"org.apache.gobblin.data.management.dataset.TimePartitionGlobFinder")
.setConfiguration(TimePartitionGlobFinder.PARTITION_PREFIX, "hourly/")
.setConfiguration(TimePartitionGlobFinder.TIME_FORMAT, "yyyy/MM/dd")
.setConfiguration(TimePartitionGlobFinder.GRANULARITY, "DAY")
.setConfiguration(TimePartitionGlobFinder.LOOKBACK_SPEC, "P3D")
.setConfiguration(TimePartitionGlobFinder.ENABLE_VIRTUAL_PARTITION, "true");
JobExecutionResult result = embeddedGobblin.run();
Assert.assertTrue(result.isSuccessful());
}
@Test
public void testAvroRecompaction() throws Exception {
FileSystem fs = getFileSystem();
String basePath = "/tmp/testRecompaction";
fs.delete(new Path(basePath), true);
File jobDir = new File(basePath, "Identity/MemberAccount/minutely/2017/04/03/10/20_30/run_2017-04-03-10-20");
Assert.assertTrue(jobDir.mkdirs());
GenericRecord r1 = createRandomRecord();
writeFileWithContent(jobDir, "file1", r1, 20);
EmbeddedGobblin embeddedGobblin = createEmbeddedGobblinCompactionJob("Recompaction-First", basePath);
JobExecutionResult result = embeddedGobblin.run();
long recordCount = InputRecordCountHelper.readRecordCount(fs, (new Path (basePath, new Path("Identity/MemberAccount/hourly/2017/04/03/10"))));
Assert.assertTrue(result.isSuccessful());
Assert.assertEquals(recordCount, 20);
// Now write more avro files to input dir
writeFileWithContent(jobDir, "file2", r1, 22);
EmbeddedGobblin embeddedGobblin_2 = createEmbeddedGobblinCompactionJob("Recompaction-Second", basePath);
embeddedGobblin_2.run();
Assert.assertTrue(result.isSuccessful());
// If recompaction is succeeded, a new record count should be written.
recordCount = InputRecordCountHelper.readRecordCount(fs, (new Path (basePath, new Path("Identity/MemberAccount/hourly/2017/04/03/10"))));
Assert.assertEquals(recordCount, 42);
Assert.assertTrue(fs.exists(new Path (basePath, "Identity/MemberAccount/hourly/2017/04/03/10")));
}
@Test
public void testAvroRecompactionWriteToNewPath() throws Exception {
FileSystem fs = getFileSystem();
String basePath = "/tmp/testRecompactionWriteToNewPath";
fs.delete(new Path(basePath), true);
File jobDir = new File(basePath, "Identity/MemberAccount/minutely/2017/04/03/10/20_30/run_2017-04-03-10-20");
Assert.assertTrue(jobDir.mkdirs());
GenericRecord r1 = createRandomRecord();
writeFileWithContent(jobDir, "file1", r1, 20);
EmbeddedGobblin embeddedGobblin = createEmbeddedGobblinCompactionJob("Recompaction-First", basePath);
embeddedGobblin.setConfiguration(ConfigurationKeys.RECOMPACTION_WRITE_TO_NEW_FOLDER, "true");
JobExecutionResult result = embeddedGobblin.run();
long recordCount = InputRecordCountHelper.readRecordCount(fs, (new Path (basePath, new Path("Identity/MemberAccount/hourly/2017/04/03/10"))));
Assert.assertTrue(result.isSuccessful());
Assert.assertEquals(recordCount, 20);
// Now write more avro files to input dir
writeFileWithContent(jobDir, "file2", r1, 22);
EmbeddedGobblin embeddedGobblin_2 = createEmbeddedGobblinCompactionJob("Recompaction-Second", basePath);
embeddedGobblin_2.setConfiguration(ConfigurationKeys.RECOMPACTION_WRITE_TO_NEW_FOLDER, "true");
embeddedGobblin_2.run();
Assert.assertTrue(result.isSuccessful());
// If recompaction is succeeded, a new record count should be written.
recordCount = InputRecordCountHelper.readRecordCount(fs, (new Path (basePath, new Path("Identity/MemberAccount/hourly/2017/04/03/10"))));
Assert.assertEquals(recordCount, 42);
//Assert both old output and new output exist
Assert.assertTrue(fs.exists(new Path (basePath, "Identity/MemberAccount/hourly/2017/04/03/10/compaction_1")));
Assert.assertTrue(fs.exists(new Path (basePath, "Identity/MemberAccount/hourly/2017/04/03/10/compaction_2")));
}
@Test
public void testAvroRecompactionWithLimitation() throws Exception {
FileSystem fs = getFileSystem();
String basePath = "/tmp/testRecompactionWithLimitation";
fs.delete(new Path(basePath), true);
File jobDir = new File(basePath, "Identity/MemberAccount/minutely/2017/04/03/10/20_30/run_2017-04-03-10-20");
Assert.assertTrue(jobDir.mkdirs());
GenericRecord r1 = createRandomRecord();
writeFileWithContent(jobDir, "file1", r1, 20);
EmbeddedGobblin embeddedGobblin = createEmbeddedGobblinCompactionJob("Recompaction-First", basePath);
JobExecutionResult result = embeddedGobblin.run();
long recordCount = InputRecordCountHelper.readRecordCount(fs, (new Path (basePath, new Path("Identity/MemberAccount/hourly/2017/04/03/10"))));
Assert.assertTrue(result.isSuccessful());
Assert.assertEquals(recordCount, 20);
// Now write more avro files to input dir
writeFileWithContent(jobDir, "file2", r1, 22);
EmbeddedGobblin embeddedGobblin_2 = createEmbeddedGobblinCompactionJob("Recompaction-Second", basePath);
embeddedGobblin_2.setConfiguration(TimeBasedSubDirDatasetsFinder.MIN_RECOMPACTION_DURATION, "8h");
embeddedGobblin_2.run();
Assert.assertTrue(result.isSuccessful());
// Because it's not meet the criteria, we should not run the re-compaction
recordCount = InputRecordCountHelper.readRecordCount(fs, (new Path (basePath, new Path("Identity/MemberAccount/hourly/2017/04/03/10"))));
Assert.assertEquals(recordCount, 20);
State state = InputRecordCountHelper.loadState(fs, (new Path (basePath, new Path("Identity/MemberAccount/hourly/2017/04/03/10"))));
state.setProp(CompactionSlaEventHelper.LAST_RUN_START_TIME,
Long.toString(state.getPropAsLong(CompactionSlaEventHelper.LAST_RUN_START_TIME) - 8 * 60 * 60 * 1000));
InputRecordCountHelper.saveState(fs, (new Path (basePath, new Path("Identity/MemberAccount/hourly/2017/04/03/10"))), state);
embeddedGobblin_2.run();
Assert.assertTrue(result.isSuccessful());
// After two minutes, re-compaction can be trigger, a new record count should be written.
recordCount = InputRecordCountHelper.readRecordCount(fs, (new Path (basePath, new Path("Identity/MemberAccount/hourly/2017/04/03/10"))));
Assert.assertEquals(recordCount, 42);
Assert.assertTrue(fs.exists(new Path (basePath, "Identity/MemberAccount/hourly/2017/04/03/10")));
}
// Returning file handler for setting modfication time.
private File writeFileWithContent(File dir, String fileName, GenericRecord r, int count) throws IOException {
File file = new File(dir, fileName + "." + count + ".avro");
Assert.assertTrue(file.createNewFile());
this.createAvroFileWithRepeatingRecords(file, r, count, Optional.absent());
return file;
}
private File writeFileWithContent(File dir, String fileName, GenericRecord r, int count, Schema schema) throws IOException {
File file = new File(dir, fileName + "." + count + ".avro");
Assert.assertTrue(file.createNewFile());
this.createAvroFileWithRepeatingRecords(file, r, count, Optional.of(schema));
return file;
}
private Schema getSchema() {
final String KEY_SCHEMA =
"{ \"type\" : \"record\", \"name\" : \"etl\",\"namespace\" : \"reducerTest\", \"fields\" : [ { \"name\" : "
+ "\"key\", \"type\" : {\"type\" : \"record\", \"name\" : \"key_name\", \"namespace\" : \"key_namespace\", "
+ "\"fields\" : [ {\"name\" : \"partitionKey\", \"type\" : \"long\", \"doc\" : \"\"}, { \"name\" : \"environment"
+ "\", \"type\" : \"string\",\"doc\" : \"\"}, {\"name\" : \"subKey\",\"type\" : \"string\", \"doc\" : \"\"} ]}, "
+ "\"doc\" : \"\", \"attributes_json\" : \"{\\\"delta\\\":false,\\\"pk\\\":true}\" }]}";
Schema keySchema = new Schema.Parser().parse(KEY_SCHEMA);
return keySchema.getField("key").schema();
}
private GenericRecord createRandomRecord () {
GenericRecordBuilder keyRecordBuilder = new GenericRecordBuilder(getSchema());
keyRecordBuilder.set("partitionKey", new Long(1));
keyRecordBuilder.set("environment", "test");
keyRecordBuilder.set("subKey", "2");
GenericRecord record = keyRecordBuilder.build();
return record;
}
private GenericRecord createEvolvedSchemaRecord() {
Schema evolvedSchema =
SchemaBuilder.record("evolved").fields()
.requiredLong("partitionKey").requiredString("environment").requiredString("subKey").optionalString("oppo").endRecord();
GenericRecordBuilder keyRecordBuilder = new GenericRecordBuilder(evolvedSchema);
keyRecordBuilder.set("partitionKey", new Long(1));
keyRecordBuilder.set("environment", "test");
keyRecordBuilder.set("subKey", "2");
keyRecordBuilder.set("oppo", "poop");
return keyRecordBuilder.build();
}
private void createAvroFileWithRepeatingRecords(File file, GenericRecord r, int count, Optional<Schema> schema) throws IOException {
DataFileWriter<GenericRecord> writer = new DataFileWriter<>(new GenericDatumWriter<GenericRecord>());
writer.create(schema.isPresent() ? schema.get() : getSchema(), new FileOutputStream(file));
for (int i = 0; i < count; ++i) {
writer.append(r);
}
writer.close();
}
private EmbeddedGobblin createEmbeddedGobblinForAllFailures (String name, String basePath) {
return createEmbeddedGobblinCompactionJob(name, basePath)
.setConfiguration(AuditCountClientFactory.AUDIT_COUNT_CLIENT_FACTORY, "KafkaAuditCountHttpClientFactory")
.setConfiguration(CompactionAuditCountVerifier.GOBBLIN_TIER, "dummy")
.setConfiguration(CompactionAuditCountVerifier.ORIGIN_TIER, "dummy")
.setConfiguration(CompactionAuditCountVerifier.PRODUCER_TIER, "dummy")
.setConfiguration(CompactionVerifier.COMPACTION_VERIFICATION_ITERATION_COUNT_LIMIT, "2");
}
private EmbeddedGobblin createEmbeddedGobblinForHiveRegistrationFailure (String name, String basePath) {
return createEmbeddedGobblinCompactionJob(name, basePath)
.setConfiguration(ConfigurationKeys.COMPACTION_SUITE_FACTORY, "HiveRegistrationFailureFactory");
}
private EmbeddedGobblin createEmbeddedGobblinWithPriority (String name, String basePath) {
return createEmbeddedGobblinCompactionJob(name, basePath)
.setConfiguration(ConfigurationKeys.COMPACTION_PRIORITIZER_ALIAS, "TieredDatasets")
.setConfiguration(SimpleDatasetHierarchicalPrioritizer.TIER_KEY + ".0", "Identity")
.setConfiguration(SimpleDatasetHierarchicalPrioritizer.TIER_KEY + ".1", "EVG")
.setConfiguration(SimpleDatasetHierarchicalPrioritizer.TIER_KEY + ".2", "BizProfile");
}
@Test
public void testWorkUnitStream() throws Exception {
File basePath = Files.createTempDir();
basePath.deleteOnExit();
GenericRecord r1 = createRandomRecord();
// verify 24 hours
for (int i = 22; i < 24; ++i) {
String path = "Identity/MemberAccount/minutely/2017/04/03/" + i + "/20_30/run_2017-04-03-10-20";
File jobDir = new File(basePath, path);
Assert.assertTrue(jobDir.mkdirs());
writeFileWithContent(jobDir, "file_random", r1, 20);
}
EmbeddedGobblin embeddedGobblin = createEmbeddedGobblinCompactionJob("workunit_stream", basePath.getAbsolutePath().toString());
JobExecutionResult result = embeddedGobblin.run();
Assert.assertTrue(result.isSuccessful());
}
@Test
public void testWorkUnitStreamForAllFailures () throws Exception {
File basePath = Files.createTempDir();
basePath.deleteOnExit();
GenericRecord r1 = createRandomRecord();
// verify 24 hours
for (int i = 1; i < 24; ++i) {
String path = "Identity/MemberAccount/minutely/2017/04/03/" + i + "/20_30/run_2017-04-03-10-20";
File jobDir = new File(basePath, path);
Assert.assertTrue(jobDir.mkdirs());
writeFileWithContent(jobDir, "file_random", r1, 20);
}
EmbeddedGobblin embeddedGobblin = createEmbeddedGobblinForAllFailures("workunit_stream_all_failure", basePath.getAbsolutePath().toString());
JobExecutionResult result = embeddedGobblin.run();
Assert.assertFalse(result.isSuccessful());
}
@Test
public void testHiveRegistrationFailure () throws Exception {
File basePath = Files.createTempDir();
basePath.deleteOnExit();
GenericRecord r1 = createRandomRecord();
// success dataset
String path1 = TestCompactionSuiteFactories.DATASET_SUCCESS + "/20_30/run_2017-04-03-10-20";
File jobDir1 = new File(basePath, path1);
Assert.assertTrue(jobDir1.mkdirs());
writeFileWithContent(jobDir1, "file_random", r1, 20);
// failed dataset
String path2 = TestCompactionSuiteFactories.DATASET_FAIL + "/20_30/run_2017-04-03-10-20";
File jobDir2 = new File(basePath, path2);
Assert.assertTrue(jobDir2.mkdirs());
writeFileWithContent(jobDir2, "file_random", r1, 20);
EmbeddedGobblin embeddedGobblin = createEmbeddedGobblinForHiveRegistrationFailure("hive_registration_failure", basePath.getAbsolutePath().toString());
JobExecutionResult result = embeddedGobblin.run();
Assert.assertFalse(result.isSuccessful());
}
@Test
public void testPrioritization () throws Exception {
File basePath = Files.createTempDir();
basePath.deleteOnExit();
GenericRecord r1 = createRandomRecord();
// verify 24 hours
for (int i = 1; i < 3; ++i) {
String path = "Identity/MemberAccount/minutely/2017/04/03/" + i + "/20_30/run_2017-04-03-10-20";
File jobDir = new File(basePath, path);
Assert.assertTrue(jobDir.mkdirs());
writeFileWithContent(jobDir, "file_random", r1, 20);
}
for (int i = 1; i < 3; ++i) {
String path = "EVG/People/minutely/2017/04/03/" + i + "/20_30/run_2017-04-03-10-20";
File jobDir = new File(basePath, path);
Assert.assertTrue(jobDir.mkdirs());
writeFileWithContent(jobDir, "file_random", r1, 20);
}
for (int i = 1; i < 3; ++i) {
String path = "BizProfile/BizCompany/minutely/2017/04/03/" + i + "/20_30/run_2017-04-03-10-20";
File jobDir = new File(basePath, path);
Assert.assertTrue(jobDir.mkdirs());
writeFileWithContent(jobDir, "file_random", r1, 20);
}
EmbeddedGobblin embeddedGobblin = createEmbeddedGobblinWithPriority("workunit_stream_priority", basePath.getAbsolutePath().toString());
JobExecutionResult result = embeddedGobblin.run();
Assert.assertTrue(result.isSuccessful());
}
}
| 1,777 |
0 | Create_ds/gobblin/gobblin-compaction/src/test/java/org/apache/gobblin/compaction | Create_ds/gobblin/gobblin-compaction/src/test/java/org/apache/gobblin/compaction/mapreduce/MRCompactorJobRunnerFilenameRecordCountProviderTest.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.compaction.mapreduce;
import java.io.IOException;
import java.util.regex.Pattern;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.testng.Assert;
import org.testng.annotations.Test;
import com.google.common.io.Files;
import org.apache.gobblin.util.RecordCountProvider;
import org.apache.gobblin.util.recordcount.IngestionRecordCountProvider;
import org.apache.gobblin.util.recordcount.LateFileRecordCountProvider;
/**
* Tests for {@link MRCompactorJobRunner.FileNameFormat}.
*/
@Test(groups = { "gobblin.compaction.mapreduce" })
public class MRCompactorJobRunnerFilenameRecordCountProviderTest {
@Test
public void testFileNameRecordCountProvider() throws IOException {
String originalFilename = "test.123.avro";
String suffixPattern = Pattern.quote(".late") + "[\\d]*";
Path testDir = new Path("/tmp/compactorFilenameRecordCountProviderTest");
FileSystem fs = FileSystem.getLocal(new Configuration());
try {
if (fs.exists(testDir)) {
fs.delete(testDir, true);
}
fs.mkdirs(testDir);
RecordCountProvider originFileNameFormat = new IngestionRecordCountProvider();
LateFileRecordCountProvider lateFileRecordCountProvider = new LateFileRecordCountProvider(originFileNameFormat);
Path firstOutput = lateFileRecordCountProvider.constructLateFilePath(originalFilename, fs, testDir);
Assert.assertEquals(new Path(testDir, originalFilename), firstOutput);
Assert.assertEquals(123, lateFileRecordCountProvider.getRecordCount(firstOutput));
fs.create(firstOutput);
Pattern pattern1 =
Pattern.compile(Pattern.quote(Files.getNameWithoutExtension(originalFilename)) + suffixPattern + "\\.avro");
Path secondOutput = lateFileRecordCountProvider.constructLateFilePath(firstOutput.getName(), fs, testDir);
Assert.assertEquals(testDir, secondOutput.getParent());
Assert.assertTrue(pattern1.matcher(secondOutput.getName()).matches());
Assert.assertEquals(123, lateFileRecordCountProvider.getRecordCount(secondOutput));
fs.create(secondOutput);
Pattern pattern2 =
Pattern.compile(Files.getNameWithoutExtension(originalFilename) + suffixPattern + suffixPattern + "\\.avro");
Path thirdOutput = lateFileRecordCountProvider.constructLateFilePath(secondOutput.getName(), fs, testDir);
Assert.assertEquals(testDir, thirdOutput.getParent());
Assert.assertTrue(pattern2.matcher(thirdOutput.getName()).matches());
Assert.assertEquals(123, lateFileRecordCountProvider.getRecordCount(thirdOutput));
} finally {
fs.delete(testDir, true);
}
}
}
| 1,778 |
0 | Create_ds/gobblin/gobblin-compaction/src/test/java/org/apache/gobblin/compaction | Create_ds/gobblin/gobblin-compaction/src/test/java/org/apache/gobblin/compaction/mapreduce/RenameSourceDirectoryTest.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.compaction.mapreduce;
import java.io.File;
import java.io.IOException;
import java.util.HashSet;
import java.util.Set;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.testng.Assert;
import org.testng.annotations.BeforeClass;
import org.testng.annotations.Test;
import org.apache.gobblin.compaction.dataset.Dataset;
import static org.mockito.Mockito.mock;
import static org.mockito.Mockito.when;
/**
* Test for directory renaming strategy
* {@link MRCompactor#getDeepestLevelRenamedDirsWithFileExistence(FileSystem, Set)}
* {@link MRCompactor#getDeepestLevelUnrenamedDirsWithFileExistence(FileSystem, Set)}
* {@link MRCompactor#renameSourceDirAsCompactionComplete(FileSystem, Dataset)}
*/
@Test(groups = { "gobblin.compaction.mapreduce" })
public class RenameSourceDirectoryTest {
private FileSystem fs;
private static final String RENAME_SRC_DIR = "/tmp/renaming-source-dir";
private static final String RENAME_SRC_DIR_RUN1_DIR = RENAME_SRC_DIR + "/00_10/run1";
private static final String RENAME_SRC_DIR_RUN2_DIR = RENAME_SRC_DIR + "/00_10/run2";
private static final String RENAME_SRC_DIR_RUN3_DIR = RENAME_SRC_DIR + "/10_20/run1";
private static final String RENAME_SRC_DIR_RUN4_DIR = RENAME_SRC_DIR + "/20_30/run1";
private static final String RENAME_SRC_DIR_RUN5_DIR = RENAME_SRC_DIR + "/20_30/run2";
private static final String RENAME_SRC_DIR_RUN4_DIR_COMPLETE = RENAME_SRC_DIR + "/20_30/run2_COMPLETE";
private static final String RENAME_SRC_DIR_RUN5_DIR_COMPLETE = RENAME_SRC_DIR + "/20_30/run3_COMPLETE";
private static final String RENAME_SRC_DIR_RUN1_FILE = RENAME_SRC_DIR_RUN1_DIR + "/dummy";
private static final String RENAME_SRC_DIR_RUN2_FILE = RENAME_SRC_DIR_RUN2_DIR + "/dummy";
private static final String RENAME_SRC_DIR_RUN3_FILE = RENAME_SRC_DIR_RUN3_DIR + "/dummy";
private static final String RENAME_SRC_DIR_RUN4_FILE = RENAME_SRC_DIR_RUN4_DIR + "/dummy";
private static final String RENAME_SRC_DIR_RUN5_FILE = RENAME_SRC_DIR_RUN5_DIR + "/dummy";
private static final String RENAME_SRC_DIR_RUN4_COMPLETE_FILE = RENAME_SRC_DIR_RUN4_DIR_COMPLETE + "/dummy";
private static final String RENAME_SRC_DIR_RUN5_COMPLETE_FILE = RENAME_SRC_DIR_RUN5_DIR_COMPLETE + "/dummy";
@BeforeClass
public void setUp() throws Exception {
Configuration conf = new Configuration();
fs = FileSystem.get(conf);
}
private void createFile (String path) throws IOException {
File f = new File(path);
f.getParentFile().mkdirs();
f.createNewFile();
}
@Test
public void testUnrenamedDirs() throws Exception {
fs.delete(new Path(RENAME_SRC_DIR), true);
createFile(RENAME_SRC_DIR_RUN1_FILE);
createFile(RENAME_SRC_DIR_RUN2_FILE);
createFile(RENAME_SRC_DIR_RUN3_FILE);
createFile(RENAME_SRC_DIR_RUN4_FILE);
createFile(RENAME_SRC_DIR_RUN5_FILE);
Set<Path> inputPaths = new HashSet<>();
inputPaths.add(new Path(RENAME_SRC_DIR_RUN1_DIR));
inputPaths.add(new Path(RENAME_SRC_DIR_RUN2_DIR));
inputPaths.add(new Path(RENAME_SRC_DIR_RUN3_DIR));
inputPaths.add(new Path(RENAME_SRC_DIR_RUN4_DIR));
inputPaths.add(new Path(RENAME_SRC_DIR_RUN5_DIR));
Set<Path> unRenamed = MRCompactor.getDeepestLevelUnrenamedDirsWithFileExistence(fs, inputPaths);
Assert.assertEquals(unRenamed.size(), 5);
fs.delete(new Path(RENAME_SRC_DIR_RUN1_FILE), false);
unRenamed = MRCompactor.getDeepestLevelUnrenamedDirsWithFileExistence(fs, inputPaths);
Assert.assertEquals(unRenamed.size(), 4);
fs.delete(new Path(RENAME_SRC_DIR), true);
}
@Test
public void testRenamedDirs() throws Exception {
fs.delete(new Path(RENAME_SRC_DIR), true);
createFile(RENAME_SRC_DIR_RUN1_FILE);
createFile(RENAME_SRC_DIR_RUN2_FILE);
createFile(RENAME_SRC_DIR_RUN3_FILE);
createFile(RENAME_SRC_DIR_RUN4_COMPLETE_FILE);
createFile(RENAME_SRC_DIR_RUN5_COMPLETE_FILE);
Set<Path> inputPaths = new HashSet<>();
inputPaths.add(new Path(RENAME_SRC_DIR_RUN1_DIR));
inputPaths.add(new Path(RENAME_SRC_DIR_RUN2_DIR));
inputPaths.add(new Path(RENAME_SRC_DIR_RUN3_DIR));
inputPaths.add(new Path(RENAME_SRC_DIR_RUN4_DIR_COMPLETE));
inputPaths.add(new Path(RENAME_SRC_DIR_RUN5_DIR_COMPLETE));
Set<Path> renamed = MRCompactor.getDeepestLevelRenamedDirsWithFileExistence(fs, inputPaths);
Assert.assertEquals(renamed.size(), 2);
fs.delete(new Path(RENAME_SRC_DIR_RUN1_FILE), false);
renamed = MRCompactor.getDeepestLevelRenamedDirsWithFileExistence(fs, inputPaths);
Assert.assertEquals(renamed.size(), 2);
fs.delete(new Path(RENAME_SRC_DIR), true);
}
@Test
public void testRenamingProcedure() throws Exception {
fs.delete(new Path(RENAME_SRC_DIR), true);
createFile(RENAME_SRC_DIR_RUN1_FILE);
createFile(RENAME_SRC_DIR_RUN2_FILE);
createFile(RENAME_SRC_DIR_RUN3_FILE);
createFile(RENAME_SRC_DIR_RUN4_COMPLETE_FILE);
createFile(RENAME_SRC_DIR_RUN5_COMPLETE_FILE);
Set<Path> inputPaths = new HashSet<>();
inputPaths.add(new Path(RENAME_SRC_DIR_RUN1_DIR));
inputPaths.add(new Path(RENAME_SRC_DIR_RUN2_DIR));
inputPaths.add(new Path(RENAME_SRC_DIR_RUN3_DIR));
inputPaths.add(new Path(RENAME_SRC_DIR_RUN4_DIR_COMPLETE));
inputPaths.add(new Path(RENAME_SRC_DIR_RUN5_DIR_COMPLETE));
Dataset dataset = mock(Dataset.class);
Set<Path> unrenamed = MRCompactor.getDeepestLevelUnrenamedDirsWithFileExistence(fs, inputPaths);
Assert.assertEquals(unrenamed.size(), 3);
when(dataset.getRenamePaths()).thenReturn(unrenamed);
MRCompactor.renameSourceDirAsCompactionComplete(fs, dataset);
Assert.assertEquals(fs.exists(new Path(RENAME_SRC_DIR_RUN1_DIR + "_COMPLETE/dummy")), true);
Assert.assertEquals(fs.exists(new Path(RENAME_SRC_DIR_RUN2_DIR + "_COMPLETE/dummy")), true);
Assert.assertEquals(fs.exists(new Path(RENAME_SRC_DIR_RUN3_DIR + "_COMPLETE/dummy")), true);
fs.delete(new Path(RENAME_SRC_DIR), true);
}
}
| 1,779 |
0 | Create_ds/gobblin/gobblin-compaction/src/test/java/org/apache/gobblin/compaction/mapreduce | Create_ds/gobblin/gobblin-compaction/src/test/java/org/apache/gobblin/compaction/mapreduce/avro/MRCompactorAvroKeyDedupJobRunnerTest.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.compaction.mapreduce.avro;
import java.io.IOException;
import java.io.InputStream;
import org.apache.avro.Schema;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.mapreduce.Job;
import org.testng.Assert;
import org.testng.annotations.BeforeClass;
import org.testng.annotations.Test;
import org.apache.gobblin.compaction.dataset.Dataset;
import org.apache.gobblin.compaction.mapreduce.MRCompactor;
import org.apache.gobblin.configuration.ConfigurationKeys;
import org.apache.gobblin.configuration.State;
import org.apache.gobblin.util.AvroUtils;
@Test(groups = { "gobblin.compaction" })
public class MRCompactorAvroKeyDedupJobRunnerTest {
private MRCompactorAvroKeyDedupJobRunner runner;
private Job job;
@BeforeClass
public void setUp() throws IOException {
State state = new State();
state.setProp(ConfigurationKeys.JOB_NAME_KEY, "MRCompactorAvroKeyDedupJobRunnerTest");
state.setProp(MRCompactor.COMPACTION_SHOULD_DEDUPLICATE, "true");
Dataset.Builder datasetBuilder = (new Dataset.Builder()).withInputPath(new Path("/tmp"));
Dataset dataset = datasetBuilder.build();
dataset.setJobProps(state);
this.runner = new MRCompactorAvroKeyDedupJobRunner(dataset, FileSystem.get(new Configuration()));
this.job = Job.getInstance();
}
@Test
public void testGetKeySchemaWithPrimaryKey() throws IOException {
try (InputStream schemaWithPKey = getClass().getClassLoader().getResourceAsStream("dedup-schema/dedup-schema-with-pkey.avsc");
InputStream dedupKeySchema = getClass().getClassLoader().getResourceAsStream("dedup-schema/dedup-schema.avsc")) {
Schema topicSchema = new Schema.Parser().parse(schemaWithPKey);
Schema actualKeySchema = this.runner.getKeySchema(this.job, topicSchema);
Schema expectedKeySchema = new Schema.Parser().parse(dedupKeySchema);
Assert.assertEquals(actualKeySchema, expectedKeySchema);
}
}
@Test
public void testGetKeySchemaWithoutPrimaryKey() throws IOException {
try (InputStream schemaNoPkey = getClass().getClassLoader().getResourceAsStream("dedup-schema/dedup-schema-without-pkey.avsc")) {
Schema topicSchema = new Schema.Parser().parse(schemaNoPkey);
Schema actualKeySchema = this.runner.getKeySchema(this.job, topicSchema);
Assert.assertEquals(actualKeySchema, AvroUtils.removeUncomparableFields(topicSchema).get());
}
}
}
| 1,780 |
0 | Create_ds/gobblin/gobblin-compaction/src/test/java/org/apache/gobblin/compaction/mapreduce | Create_ds/gobblin/gobblin-compaction/src/test/java/org/apache/gobblin/compaction/mapreduce/avro/ConfBasedDeltaFieldProviderTest.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.compaction.mapreduce.avro;
import java.util.List;
import org.apache.avro.generic.GenericRecord;
import org.apache.hadoop.conf.Configuration;
import org.testng.Assert;
import org.testng.annotations.Test;
import static org.mockito.Mockito.mock;
import static org.mockito.Mockito.when;
/**
* Test class for {@link ConfBasedDeltaFieldProvider}.
*/
@Test(groups = {"gobblin.compaction"})
public class ConfBasedDeltaFieldProviderTest {
@Test
public void testGetDeltaFieldNamesForNewSchema() {
Configuration conf = mock(Configuration.class);
when(conf.get(ConfBasedDeltaFieldProvider.DELTA_FIELDS_KEY)).thenReturn("scn");
AvroDeltaFieldNameProvider provider = new ConfBasedDeltaFieldProvider(conf);
GenericRecord fakeRecord = mock(GenericRecord.class);
List<String> deltaFields = provider.getDeltaFieldNames(fakeRecord);
Assert.assertEquals(deltaFields.size(), 1);
Assert.assertEquals(deltaFields.get(0), "scn");
when(conf.get(ConfBasedDeltaFieldProvider.DELTA_FIELDS_KEY)).thenReturn("scn, scn2");
AvroDeltaFieldNameProvider provider2 = new ConfBasedDeltaFieldProvider(conf);
List<String> deltaFields2 = provider2.getDeltaFieldNames(fakeRecord);
Assert.assertEquals(deltaFields2.size(), 2);
Assert.assertEquals(deltaFields2.get(0), "scn");
Assert.assertEquals(deltaFields2.get(1), "scn2");
}
}
| 1,781 |
0 | Create_ds/gobblin/gobblin-compaction/src/test/java/org/apache/gobblin/compaction/mapreduce | Create_ds/gobblin/gobblin-compaction/src/test/java/org/apache/gobblin/compaction/mapreduce/avro/FieldAttributeBasedDeltaFieldsProviderTest.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.compaction.mapreduce.avro;
import java.util.List;
import org.apache.avro.Schema;
import org.apache.avro.generic.GenericRecord;
import org.apache.hadoop.conf.Configuration;
import org.testng.Assert;
import org.testng.annotations.Test;
import com.google.common.collect.Lists;
import static org.mockito.Mockito.mock;
import static org.mockito.Mockito.when;
/**
* Test class for {@link FieldAttributeBasedDeltaFieldsProvider}.
*/
@Test(groups = { "gobblin.compaction" })
public class FieldAttributeBasedDeltaFieldsProviderTest {
private static final String FULL_SCHEMA_WITH_ATTRIBUTES =
"{ \"type\" : \"record\", \"name\" : \"etl\",\"namespace\" : \"reducerTest\", \"fields\" : [ { \"name\" : "
+ "\"key\", \"type\" : {\"type\" : \"record\", \"name\" : \"key_name\", \"namespace\" : \"key_namespace\", "
+ "\"fields\" : [ {\"name\" : \"partitionKey\", \"type\" : \"long\", \"doc\" : \"\"}, { \"name\" : \"environment"
+ "\", \"type\" : \"string\",\"doc\" : \"\"}, {\"name\" : \"subKey\",\"type\" : \"string\", \"doc\" : \"\"} ]}, "
+ "\"doc\" : \"\", \"attributes_json\" : \"{\\\"delta\\\":false,\\\"pk\\\":true}\" }"
+ ", {\"name\" : \"scn2\", \"type\": \"long\", \"doc\" : \"\", \"attributes_json\" : \"{\\\"nullable\\\":false,\\\"delta"
+ "\\\":true,\\\"pk\\\":false,\\\"type\\\":\\\"NUMBER\\\"}\"}"
+ " , {\"name\" : \"scn\", \"type\": \"long\", \"doc\" : \"\", \"attributes_json\" : \"{\\\"nullable\\\":false,\\\"delta"
+ "\\\":true,\\\"pk\\\":false,\\\"type\\\":\\\"NUMBER\\\"}\"}]}";
@Test
public void testGetDeltaFieldNamesForNewSchema(){
Configuration conf = mock(Configuration.class);
when(conf.get(FieldAttributeBasedDeltaFieldsProvider.ATTRIBUTE_FIELD)).thenReturn("attributes_json");
when(conf.get(FieldAttributeBasedDeltaFieldsProvider.DELTA_PROP_NAME,
FieldAttributeBasedDeltaFieldsProvider.DEFAULT_DELTA_PROP_NAME))
.thenReturn(FieldAttributeBasedDeltaFieldsProvider.DEFAULT_DELTA_PROP_NAME);
AvroDeltaFieldNameProvider provider = new FieldAttributeBasedDeltaFieldsProvider(conf);
Schema original = new Schema.Parser().parse(FULL_SCHEMA_WITH_ATTRIBUTES);
GenericRecord record = mock(GenericRecord.class);
when(record.getSchema()).thenReturn(original);
List<String> fields = provider.getDeltaFieldNames(record);
Assert.assertEquals(fields, Lists.newArrayList("scn2", "scn"));
}
}
| 1,782 |
0 | Create_ds/gobblin/gobblin-compaction/src/test/java/org/apache/gobblin/compaction/mapreduce | Create_ds/gobblin/gobblin-compaction/src/test/java/org/apache/gobblin/compaction/mapreduce/conditions/RecompactionConditionTest.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.compaction.mapreduce.conditions;
import java.io.IOException;
import java.util.Arrays;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.joda.time.DateTime;
import org.joda.time.DateTimeZone;
import org.joda.time.Period;
import org.joda.time.format.PeriodFormatter;
import org.joda.time.format.PeriodFormatterBuilder;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.testng.Assert;
import org.testng.annotations.BeforeClass;
import org.testng.annotations.Test;
import com.google.common.base.Optional;
import com.google.common.collect.Lists;
import org.apache.gobblin.compaction.conditions.RecompactionCombineCondition;
import org.apache.gobblin.compaction.conditions.RecompactionCondition;
import org.apache.gobblin.compaction.conditions.RecompactionConditionBasedOnDuration;
import org.apache.gobblin.compaction.conditions.RecompactionConditionBasedOnFileCount;
import org.apache.gobblin.compaction.conditions.RecompactionConditionBasedOnRatio;
import org.apache.gobblin.compaction.conditions.RecompactionConditionFactory;
import org.apache.gobblin.compaction.dataset.Dataset;
import org.apache.gobblin.compaction.dataset.DatasetHelper;
import org.apache.gobblin.compaction.mapreduce.MRCompactor;
import static org.mockito.Mockito.mock;
import static org.mockito.Mockito.when;
/**
* Test class for {@link org.apache.gobblin.compaction.conditions.RecompactionCondition}.
*/
@Test(groups = {"gobblin.compaction.mapreduce.conditions"})
public class RecompactionConditionTest {
private Path inputPath = new Path ("/tmp/input");
private Path inputLatePath = new Path ("/tmp/input_late");
private Path outputPath = new Path ("/tmp/output");
private Path outputLatePath = new Path ("/tmp/output_late");
private Path tmpPath = new Path ("/tmp/output_tmp");
private Dataset dataset;
private Logger LOG = LoggerFactory.getLogger(RecompactionConditionTest.class);
public DateTime getCurrentTime() {
DateTimeZone timeZone = DateTimeZone.forID(MRCompactor.DEFAULT_COMPACTION_TIMEZONE);
DateTime currentTime = new DateTime(timeZone);
return currentTime;
}
@BeforeClass
public void setUp() throws IOException {
dataset =
new Dataset.Builder().withPriority(1.0)
.withDatasetName("Identity/MemberAccount")
.withInputPath(inputPath)
.withInputLatePath(inputLatePath)
.withOutputPath(outputPath)
.withOutputLatePath(outputLatePath)
.withOutputTmpPath(tmpPath).build();
dataset.setJobProp(MRCompactor.COMPACTION_LATEDATA_THRESHOLD_DURATION, MRCompactor.DEFAULT_COMPACTION_LATEDATA_THRESHOLD_DURATION);
dataset.setJobProp(MRCompactor.COMPACTION_LATEDATA_THRESHOLD_FILE_NUM, 3);
dataset.setJobProp(MRCompactor.COMPACTION_LATEDATA_THRESHOLD_FOR_RECOMPACT_PER_DATASET, "Identity.*,B.*:0.2; C.*,D.*:0.3");
dataset.setJobProp(MRCompactor.COMPACTION_LATEDATA_THRESHOLD_DURATION, "12h");
}
@Test
public void testRecompactionConditionBasedOnFileCount() {
try {
Configuration conf = new Configuration();
FileSystem fs = FileSystem.get(conf);
fs.delete(outputLatePath, true);
fs.mkdirs(outputLatePath);
RecompactionConditionFactory factory = new RecompactionConditionBasedOnFileCount.Factory();
RecompactionCondition conditionBasedOnFileCount= factory.createRecompactionCondition(dataset);
DatasetHelper helper = new DatasetHelper(dataset, fs, Lists.newArrayList("avro"));
fs.createNewFile(new Path(outputLatePath, new Path ("1.avro")));
fs.createNewFile(new Path(outputLatePath, new Path ("2.avro")));
Assert.assertEquals(conditionBasedOnFileCount.isRecompactionNeeded(helper), false);
fs.createNewFile(new Path(outputLatePath, new Path ("3.avro")));
Assert.assertEquals(conditionBasedOnFileCount.isRecompactionNeeded(helper), true);
fs.delete(outputLatePath, true);
} catch (Exception e) {
e.printStackTrace();
}
}
@Test
public void testRecompactionConditionBasedOnRatio() {
RecompactionConditionFactory factory = new RecompactionConditionBasedOnRatio.Factory();
RecompactionCondition conditionBasedOnRatio = factory.createRecompactionCondition(dataset);
DatasetHelper helper = mock(DatasetHelper.class);
when(helper.getLateOutputRecordCount()).thenReturn(6L);
when(helper.getOutputRecordCount()).thenReturn(94L);
Assert.assertEquals(conditionBasedOnRatio.isRecompactionNeeded(helper), false);
when(helper.getLateOutputRecordCount()).thenReturn(21L);
when(helper.getOutputRecordCount()).thenReturn(79L);
Assert.assertEquals(conditionBasedOnRatio.isRecompactionNeeded(helper), true);
}
@Test
public void testRecompactionConditionBasedOnDuration() {
RecompactionConditionFactory factory = new RecompactionConditionBasedOnDuration.Factory();
RecompactionCondition conditionBasedOnDuration = factory.createRecompactionCondition(dataset);
DatasetHelper helper = mock (DatasetHelper.class);
when(helper.getDataset()).thenReturn(dataset);
PeriodFormatter periodFormatter = new PeriodFormatterBuilder().appendMonths().appendSuffix("m").appendDays().appendSuffix("d").appendHours()
.appendSuffix("h").appendMinutes().appendSuffix("min").toFormatter();
DateTime currentTime = getCurrentTime();
Period period_A = periodFormatter.parsePeriod("11h59min");
DateTime earliest_A = currentTime.minus(period_A);
when(helper.getEarliestLateFileModificationTime()).thenReturn(Optional.of(earliest_A));
when(helper.getCurrentTime()).thenReturn(currentTime);
Assert.assertEquals(conditionBasedOnDuration.isRecompactionNeeded(helper), false);
Period period_B = periodFormatter.parsePeriod("12h01min");
DateTime earliest_B = currentTime.minus(period_B);
when(helper.getEarliestLateFileModificationTime()).thenReturn(Optional.of(earliest_B));
when(helper.getCurrentTime()).thenReturn(currentTime);
Assert.assertEquals(conditionBasedOnDuration.isRecompactionNeeded(helper), true);
}
@Test
public void testRecompactionCombineCondition() {
DatasetHelper helper = mock (DatasetHelper.class);
RecompactionCondition cond1 = mock (RecompactionConditionBasedOnRatio.class);
RecompactionCondition cond2= mock (RecompactionConditionBasedOnFileCount.class);
RecompactionCondition cond3 = mock (RecompactionConditionBasedOnDuration.class);
RecompactionCombineCondition combineConditionOr = new RecompactionCombineCondition(Arrays.asList(cond1,cond2,cond3),
RecompactionCombineCondition.CombineOperation.OR);
when(cond1.isRecompactionNeeded(helper)).thenReturn(false);
when(cond2.isRecompactionNeeded(helper)).thenReturn(false);
when(cond3.isRecompactionNeeded(helper)).thenReturn(false);
Assert.assertEquals(combineConditionOr.isRecompactionNeeded(helper), false);
when(cond1.isRecompactionNeeded(helper)).thenReturn(false);
when(cond2.isRecompactionNeeded(helper)).thenReturn(true);
when(cond3.isRecompactionNeeded(helper)).thenReturn(false);
Assert.assertEquals(combineConditionOr.isRecompactionNeeded(helper), true);
RecompactionCombineCondition combineConditionAnd = new RecompactionCombineCondition(Arrays.asList(cond1,cond2,cond3),
RecompactionCombineCondition.CombineOperation.AND);
when(cond1.isRecompactionNeeded(helper)).thenReturn(true);
when(cond2.isRecompactionNeeded(helper)).thenReturn(true);
when(cond3.isRecompactionNeeded(helper)).thenReturn(false);
Assert.assertEquals(combineConditionAnd.isRecompactionNeeded(helper), false);
when(cond1.isRecompactionNeeded(helper)).thenReturn(true);
when(cond2.isRecompactionNeeded(helper)).thenReturn(true);
when(cond3.isRecompactionNeeded(helper)).thenReturn(true);
Assert.assertEquals(combineConditionAnd.isRecompactionNeeded(helper), true);
}
} | 1,783 |
0 | Create_ds/gobblin/gobblin-compaction/src/test/java/org/apache/gobblin/compaction/mapreduce | Create_ds/gobblin/gobblin-compaction/src/test/java/org/apache/gobblin/compaction/mapreduce/orc/OrcUtilsTest.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.compaction.mapreduce.orc;
import org.apache.hadoop.io.BooleanWritable;
import org.apache.hadoop.io.IntWritable;
import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.io.Text;
import org.apache.orc.TypeDescription;
import org.apache.orc.mapred.OrcList;
import org.apache.orc.mapred.OrcMap;
import org.apache.orc.mapred.OrcStruct;
import org.apache.orc.mapred.OrcUnion;
import org.testng.Assert;
import org.testng.annotations.Test;
@Test(groups = {"gobblin.compaction"})
public class OrcUtilsTest {
final int intValue1 = 10;
final String stringValue1 = "testString1";
final int intValue2 = 20;
final String stringValue2 = "testString2";
final int intValue3 = 30;
final String stringValue3 = "testString3";
final boolean boolValue = true;
@Test
public void testRandomFillOrcStructWithAnySchema() {
// 1. Basic case
TypeDescription schema_1 = TypeDescription.fromString("struct<i:int,j:int,k:int>");
OrcStruct expectedStruct = (OrcStruct) OrcStruct.createValue(schema_1);
expectedStruct.setFieldValue("i", new IntWritable(3));
expectedStruct.setFieldValue("j", new IntWritable(3));
expectedStruct.setFieldValue("k", new IntWritable(3));
OrcStruct actualStruct = (OrcStruct) OrcStruct.createValue(schema_1);
OrcTestUtils.fillOrcStructWithFixedValue(actualStruct, schema_1, 3, "", false);
Assert.assertEquals(actualStruct, expectedStruct);
TypeDescription schema_2 = TypeDescription.fromString("struct<i:boolean,j:int,k:string>");
expectedStruct = (OrcStruct) OrcStruct.createValue(schema_2);
expectedStruct.setFieldValue("i", new BooleanWritable(false));
expectedStruct.setFieldValue("j", new IntWritable(3));
expectedStruct.setFieldValue("k", new Text(""));
actualStruct = (OrcStruct) OrcStruct.createValue(schema_2);
OrcTestUtils.fillOrcStructWithFixedValue(actualStruct, schema_2, 3, "", false);
Assert.assertEquals(actualStruct, expectedStruct);
// 2. Some simple nested cases: struct within struct
TypeDescription schema_3 = TypeDescription.fromString("struct<i:boolean,j:struct<i:boolean,j:int,k:string>>");
OrcStruct expectedStruct_nested_1 = (OrcStruct) OrcStruct.createValue(schema_3);
expectedStruct_nested_1.setFieldValue("i", new BooleanWritable(false));
expectedStruct_nested_1.setFieldValue("j", expectedStruct);
actualStruct = (OrcStruct) OrcStruct.createValue(schema_3);
OrcTestUtils.fillOrcStructWithFixedValue(actualStruct, schema_3, 3, "", false);
Assert.assertEquals(actualStruct, expectedStruct_nested_1);
// 3. array of struct within struct
TypeDescription schema_4 = TypeDescription.fromString("struct<i:boolean,j:array<struct<i:boolean,j:int,k:string>>>");
// Note that this will not create any elements in the array.
expectedStruct_nested_1 = (OrcStruct) OrcStruct.createValue(schema_4);
expectedStruct_nested_1.setFieldValue("i", new BooleanWritable(false));
OrcList list = new OrcList(schema_2, 1);
list.add(expectedStruct);
expectedStruct_nested_1.setFieldValue("j", list);
// Constructing actualStruct: make sure the list is non-Empty. There's any meaningful value within placeholder struct.
actualStruct = (OrcStruct) OrcStruct.createValue(schema_4);
OrcList placeHolderList = new OrcList(schema_2, 1);
OrcStruct placeHolderStruct = (OrcStruct) OrcStruct.createValue(schema_2);
placeHolderList.add(placeHolderStruct);
actualStruct.setFieldValue("j", placeHolderList);
OrcTestUtils.fillOrcStructWithFixedValue(actualStruct, schema_4, 3, "", false);
Assert.assertEquals(actualStruct, expectedStruct_nested_1);
// 4. union of struct within struct
TypeDescription schema_5 = TypeDescription.fromString("struct<i:boolean,j:uniontype<struct<i:boolean,j:int,k:string>>>");
expectedStruct_nested_1 = (OrcStruct) OrcStruct.createValue(schema_5);
expectedStruct_nested_1.setFieldValue("i", new BooleanWritable(false));
OrcUnion union = new OrcUnion(schema_2);
union.set(0, expectedStruct);
expectedStruct_nested_1.setFieldValue("j", union);
// Construct actualStruct: make sure there's a struct-placeholder within the union.
actualStruct = (OrcStruct) OrcStruct.createValue(schema_5);
OrcUnion placeHolderUnion = new OrcUnion(schema_2);
placeHolderUnion.set(0, placeHolderStruct);
actualStruct.setFieldValue("j", placeHolderUnion);
OrcTestUtils.fillOrcStructWithFixedValue(actualStruct, schema_5, 3, "", false);
Assert.assertEquals(actualStruct, expectedStruct_nested_1);
}
@Test
public void testUpConvertSimpleOrcStruct() {
// Basic case, all primitives, newly added value will be set to null
TypeDescription baseStructSchema = TypeDescription.fromString("struct<a:int,b:string>");
// This would be re-used in the following tests as the actual record using the schema.
OrcStruct baseStruct = (OrcStruct) OrcStruct.createValue(baseStructSchema);
// Fill in the baseStruct with specified value.
OrcTestUtils.fillOrcStructWithFixedValue(baseStruct, baseStructSchema, intValue1, stringValue1, boolValue);
TypeDescription evolved_baseStructSchema = TypeDescription.fromString("struct<a:int,b:string,c:int>");
OrcStruct evolvedStruct = (OrcStruct) OrcStruct.createValue(evolved_baseStructSchema);
// This should be equivalent to deserialize(baseStruct).serialize(evolvedStruct, evolvedSchema);
OrcUtils.upConvertOrcStruct(baseStruct, evolvedStruct, evolved_baseStructSchema);
// Check if all value in baseStruct is populated and newly created column in evolvedStruct is filled with null.
Assert.assertEquals(((IntWritable) evolvedStruct.getFieldValue("a")).get(), intValue1);
Assert.assertEquals(evolvedStruct.getFieldValue("b").toString(), stringValue1);
Assert.assertNull(evolvedStruct.getFieldValue("c"));
// Base case: Reverse direction, which is column projection on top-level columns.
OrcStruct baseStruct_shadow = (OrcStruct) OrcStruct.createValue(baseStructSchema);
OrcUtils.upConvertOrcStruct(evolvedStruct, baseStruct_shadow, baseStructSchema);
Assert.assertEquals(baseStruct, baseStruct_shadow);
}
@Test
public void testUpConvertOrcStructOfList() {
// Simple Nested: List within Struct.
// The element type of list contains a new field.
// Prepare two ListInStructs with different size ( the list field contains different number of members)
TypeDescription structOfListSchema = TypeDescription.fromString("struct<a:array<struct<a:int,b:string>>>");
OrcStruct structOfList = (OrcStruct) OrcUtils.createValueRecursively(structOfListSchema);
//Create an OrcList instance with two entries
TypeDescription innerStructSchema = TypeDescription.createStruct().addField("a", TypeDescription.createInt())
.addField("b", TypeDescription.createString());
OrcStruct innerStruct1 = new OrcStruct(innerStructSchema);
innerStruct1.setFieldValue("a", new IntWritable(intValue1));
innerStruct1.setFieldValue("b", new Text(stringValue1));
OrcStruct innerStruct2 = new OrcStruct(innerStructSchema);
innerStruct2.setFieldValue("a", new IntWritable(intValue2));
innerStruct2.setFieldValue("b", new Text(stringValue2));
TypeDescription listSchema = TypeDescription.createList(innerStructSchema);
OrcList orcList = new OrcList(listSchema);
orcList.add(innerStruct1);
orcList.add(innerStruct2);
structOfList.setFieldValue("a", orcList);
TypeDescription evolvedStructOfListSchema =
TypeDescription.fromString("struct<a:array<struct<a:int,b:string,c:int>>>");
OrcStruct evolvedStructOfList = (OrcStruct) OrcUtils.createValueRecursively(evolvedStructOfListSchema);
// Convert and verify contents.
OrcUtils.upConvertOrcStruct(structOfList, evolvedStructOfList, evolvedStructOfListSchema);
Assert.assertEquals(
((IntWritable) ((OrcStruct) ((OrcList) evolvedStructOfList.getFieldValue("a")).get(0)).getFieldValue("a"))
.get(), intValue1);
Assert.assertEquals(
((OrcStruct) ((OrcList) evolvedStructOfList.getFieldValue("a")).get(0)).getFieldValue("b").toString(),
stringValue1);
Assert.assertNull((((OrcStruct) ((OrcList) evolvedStructOfList.getFieldValue("a")).get(0)).getFieldValue("c")));
Assert.assertEquals(
((IntWritable) ((OrcStruct) ((OrcList) evolvedStructOfList.getFieldValue("a")).get(1)).getFieldValue("a"))
.get(), intValue2);
Assert.assertEquals(
((OrcStruct) ((OrcList) evolvedStructOfList.getFieldValue("a")).get(1)).getFieldValue("b").toString(),
stringValue2);
Assert.assertNull((((OrcStruct) ((OrcList) evolvedStructOfList.getFieldValue("a")).get(1)).getFieldValue("c")));
//Create a list in source OrcStruct with 3 elements
structOfList = (OrcStruct) OrcUtils.createValueRecursively(structOfListSchema, 3);
OrcTestUtils.fillOrcStructWithFixedValue(structOfList, structOfListSchema, intValue1, stringValue1, boolValue);
Assert.assertNotEquals(((OrcList) structOfList.getFieldValue("a")).size(),
((OrcList) evolvedStructOfList.getFieldValue("a")).size());
OrcUtils.upConvertOrcStruct(structOfList, evolvedStructOfList, evolvedStructOfListSchema);
Assert.assertEquals(((OrcList) evolvedStructOfList.getFieldValue("a")).size(), 3);
// Original has list.size()=0, target has list.size() = 1
((OrcList) structOfList.getFieldValue("a")).clear();
OrcUtils.upConvertOrcStruct(structOfList, evolvedStructOfList, evolvedStructOfListSchema);
Assert.assertEquals(((OrcList) evolvedStructOfList.getFieldValue("a")).size(), 0);
}
@Test
public void testUpConvertOrcStructOfMap() {
// Map within Struct, contains a type-widening in the map-value type.
TypeDescription structOfMapSchema = TypeDescription.fromString("struct<a:map<string,int>>");
OrcStruct structOfMap = (OrcStruct) OrcStruct.createValue(structOfMapSchema);
TypeDescription mapSchema = TypeDescription.createMap(TypeDescription.createString(), TypeDescription.createInt());
OrcMap testMap = new OrcMap(mapSchema);
//Add dummy entries to initialize the testMap. The actual keys and values will be set later.
testMap.put(new Text(stringValue1), new IntWritable(intValue1));
testMap.put(new Text(stringValue2), new IntWritable(intValue2));
structOfMap.setFieldValue("a", testMap);
// Create the target struct with evolved schema
TypeDescription evolvedStructOfMapSchema = TypeDescription.fromString("struct<a:map<string,bigint>>");
OrcStruct evolvedStructOfMap = (OrcStruct) OrcStruct.createValue(evolvedStructOfMapSchema);
OrcMap evolvedMap =
new OrcMap(TypeDescription.createMap(TypeDescription.createString(), TypeDescription.createInt()));
//Initialize a map
evolvedMap.put(new Text(""), new LongWritable());
evolvedStructOfMap.setFieldValue("a", evolvedMap);
// convert and verify: Type-widening is correct, and size of output file is correct.
OrcUtils.upConvertOrcStruct(structOfMap, evolvedStructOfMap, evolvedStructOfMapSchema);
Assert.assertEquals(((OrcMap) evolvedStructOfMap.getFieldValue("a")).get(new Text(stringValue1)),
new LongWritable(intValue1));
Assert.assertEquals(((OrcMap) evolvedStructOfMap.getFieldValue("a")).get(new Text(stringValue2)),
new LongWritable(intValue2));
Assert.assertEquals(((OrcMap) evolvedStructOfMap.getFieldValue("a")).size(), 2);
// re-use the same object but the source struct has fewer member in the map entry.
testMap.put(new Text(stringValue3), new IntWritable(intValue3));
// sanity check
Assert.assertEquals(((OrcMap) structOfMap.getFieldValue("a")).size(), 3);
OrcUtils.upConvertOrcStruct(structOfMap, evolvedStructOfMap, evolvedStructOfMapSchema);
Assert.assertEquals(((OrcMap) evolvedStructOfMap.getFieldValue("a")).size(), 3);
Assert.assertEquals(((OrcMap) evolvedStructOfMap.getFieldValue("a")).get(new Text(stringValue1)),
new LongWritable(intValue1));
Assert.assertEquals(((OrcMap) evolvedStructOfMap.getFieldValue("a")).get(new Text(stringValue2)),
new LongWritable(intValue2));
Assert.assertEquals(((OrcMap) evolvedStructOfMap.getFieldValue("a")).get(new Text(stringValue3)),
new LongWritable(intValue3));
}
@Test
public void testUpConvertOrcStructOfUnion() {
// Union in struct, type widening within the union's member field.
TypeDescription unionInStructSchema = TypeDescription.fromString("struct<a:uniontype<int,string>>");
OrcStruct unionInStruct = (OrcStruct) OrcStruct.createValue(unionInStructSchema);
OrcUnion placeHolderUnion = new OrcUnion(TypeDescription.fromString("uniontype<int,string>"));
placeHolderUnion.set(0, new IntWritable(1));
unionInStruct.setFieldValue("a", placeHolderUnion);
OrcTestUtils.fillOrcStructWithFixedValue(unionInStruct, unionInStructSchema, intValue1, stringValue1, boolValue);
// Create new structWithUnion
TypeDescription evolved_unionInStructSchema = TypeDescription.fromString("struct<a:uniontype<bigint,string>>");
OrcStruct evolvedUnionInStruct = (OrcStruct) OrcStruct.createValue(evolved_unionInStructSchema);
OrcUnion evolvedPlaceHolderUnion = new OrcUnion(TypeDescription.fromString("uniontype<bigint,string>"));
evolvedPlaceHolderUnion.set(0, new LongWritable(1L));
evolvedUnionInStruct.setFieldValue("a", evolvedPlaceHolderUnion);
OrcUtils.upConvertOrcStruct(unionInStruct, evolvedUnionInStruct, evolved_unionInStructSchema);
// Check in the tag 0(Default from value-filler) within evolvedUnionInStruct, the value is becoming type-widened with correct value.
Assert.assertEquals(((OrcUnion) evolvedUnionInStruct.getFieldValue("a")).getTag(), 0);
Assert.assertEquals(((OrcUnion) evolvedUnionInStruct.getFieldValue("a")).getObject(), new LongWritable(intValue1));
// Check the case when union field is created in different tag.
// Complex: List<Struct> within struct among others and evolution happens on multiple places, also type-widening in deeply nested level.
TypeDescription complexOrcSchema =
TypeDescription.fromString("struct<a:array<struct<a:string,b:int>>,b:struct<a:uniontype<int,string>>>");
OrcStruct complexOrcStruct = (OrcStruct) OrcUtils.createValueRecursively(complexOrcSchema);
OrcTestUtils.fillOrcStructWithFixedValue(complexOrcStruct, complexOrcSchema, intValue1, stringValue1, boolValue);
TypeDescription evolvedComplexOrcSchema = TypeDescription
.fromString("struct<a:array<struct<a:string,b:bigint,c:string>>,b:struct<a:uniontype<bigint,string>,b:int>>");
OrcStruct evolvedComplexStruct = (OrcStruct) OrcUtils.createValueRecursively(evolvedComplexOrcSchema);
OrcTestUtils
.fillOrcStructWithFixedValue(evolvedComplexStruct, evolvedComplexOrcSchema, intValue1, stringValue1, boolValue);
// Check if new columns are assigned with null value and type widening is working fine.
OrcUtils.upConvertOrcStruct(complexOrcStruct, evolvedComplexStruct, evolvedComplexOrcSchema);
Assert
.assertEquals(((OrcStruct)((OrcList)evolvedComplexStruct.getFieldValue("a")).get(0)).getFieldValue("b"), new LongWritable(intValue1));
Assert.assertNull(((OrcStruct)((OrcList)evolvedComplexStruct.getFieldValue("a")).get(0)).getFieldValue("c"));
Assert.assertEquals(((OrcUnion) ((OrcStruct)evolvedComplexStruct.getFieldValue("b")).getFieldValue("a")).getObject(), new LongWritable(intValue1));
Assert.assertNull(((OrcStruct)evolvedComplexStruct.getFieldValue("b")).getFieldValue("b"));
}
@Test
public void testNestedWithinUnionWithDiffTag() {
// Construct union type with different tag for the src object dest object, check if up-convert happens correctly.
TypeDescription structInUnionAsStruct = TypeDescription.fromString("struct<a:uniontype<struct<a:int,b:string>,int>>");
OrcStruct structInUnionAsStructObject = (OrcStruct) OrcUtils.createValueRecursively(structInUnionAsStruct);
OrcTestUtils
.fillOrcStructWithFixedValue(structInUnionAsStructObject, structInUnionAsStruct, 0, intValue1, stringValue1, boolValue);
Assert.assertEquals(((OrcStruct)((OrcUnion)structInUnionAsStructObject.getFieldValue("a")).getObject())
.getFieldValue("a"), new IntWritable(intValue1));
OrcStruct structInUnionAsStructObject_2 = (OrcStruct) OrcUtils.createValueRecursively(structInUnionAsStruct);
OrcTestUtils
.fillOrcStructWithFixedValue(structInUnionAsStructObject_2, structInUnionAsStruct, 1, intValue1, stringValue1, boolValue);
Assert.assertEquals(((OrcUnion)structInUnionAsStructObject_2.getFieldValue("a")).getObject(), new IntWritable(intValue1));
// Create a new record container, do up-convert twice and check if the value is propagated properly.
OrcStruct container = (OrcStruct) OrcUtils.createValueRecursively(structInUnionAsStruct);
OrcUtils.upConvertOrcStruct(structInUnionAsStructObject, container, structInUnionAsStruct);
Assert.assertEquals(structInUnionAsStructObject, container);
OrcUtils.upConvertOrcStruct(structInUnionAsStructObject_2, container, structInUnionAsStruct);
Assert.assertEquals(structInUnionAsStructObject_2, container);
}
/**
* This test mostly target at the following case:
* Schema: struct<a:array<struct<a:int,b:int>>>
* field a was set to null by one call of "upConvertOrcStruct", but the subsequent call should still have the nested
* field filled.
*/
public void testNestedFieldSequenceSet() {
TypeDescription schema = TypeDescription.fromString("struct<a:array<struct<a:int,b:int>>>");
OrcStruct struct = (OrcStruct) OrcUtils.createValueRecursively(schema);
OrcTestUtils.fillOrcStructWithFixedValue(struct, schema, 1, "test", true);
OrcStruct structWithEmptyArray = (OrcStruct) OrcUtils.createValueRecursively(schema);
OrcTestUtils.fillOrcStructWithFixedValue(structWithEmptyArray, schema, 1, "test", true);
structWithEmptyArray.setFieldValue("a", null);
OrcUtils.upConvertOrcStruct(structWithEmptyArray, struct, schema);
Assert.assertEquals(struct, structWithEmptyArray);
OrcStruct struct_2 = (OrcStruct) OrcUtils.createValueRecursively(schema);
OrcTestUtils.fillOrcStructWithFixedValue(struct_2, schema, 2, "test", true);
OrcUtils.upConvertOrcStruct(struct_2, struct, schema);
Assert.assertEquals(struct, struct_2);
}
/**
* Just a sanity test for column project, should be no difference from other cases when provided reader schema.
*/
@Test
public void testOrcStructProjection() {
TypeDescription originalSchema = TypeDescription.fromString("struct<a:struct<a:int,b:int>,b:struct<c:int,d:int>,c:int>");
OrcStruct originalStruct = (OrcStruct) OrcUtils.createValueRecursively(originalSchema);
OrcTestUtils.fillOrcStructWithFixedValue(originalStruct, originalSchema, intValue1, stringValue1, boolValue);
TypeDescription projectedSchema = TypeDescription.fromString("struct<a:struct<b:int>,b:struct<c:int>>");
OrcStruct projectedStructExpectedValue = (OrcStruct) OrcUtils.createValueRecursively(projectedSchema);
OrcTestUtils
.fillOrcStructWithFixedValue(projectedStructExpectedValue, projectedSchema, intValue1, stringValue1, boolValue);
OrcStruct projectColumnStruct = (OrcStruct) OrcUtils.createValueRecursively(projectedSchema);
OrcUtils.upConvertOrcStruct(originalStruct, projectColumnStruct, projectedSchema);
Assert.assertEquals(projectColumnStruct, projectedStructExpectedValue);
}
@Test
public void complexTypeEligibilityCheck() {
TypeDescription struct_array_0 = TypeDescription.fromString("struct<first:array<int>,second:int>");
TypeDescription struct_array_1 = TypeDescription.fromString("struct<first:array<int>,second:int>");
Assert.assertTrue(OrcUtils.eligibleForUpConvert(struct_array_0, struct_array_1));
TypeDescription struct_array_2 = TypeDescription.fromString("struct<first:array<string>,second:int>");
Assert.assertFalse(OrcUtils.eligibleForUpConvert(struct_array_0, struct_array_2));
TypeDescription struct_map_0 = TypeDescription.fromString("struct<first:map<string,string>,second:int>");
TypeDescription struct_map_1 = TypeDescription.fromString("struct<first:map<string,string>,second:int>");
TypeDescription struct_map_2 = TypeDescription.fromString("struct<first:map<string,int>,second:int>");
TypeDescription struct_map_3 = TypeDescription.fromString("struct<second:int>");
Assert.assertTrue(OrcUtils.eligibleForUpConvert(struct_map_0, struct_map_1));
Assert.assertFalse(OrcUtils.eligibleForUpConvert(struct_map_0, struct_map_2));
Assert.assertTrue(OrcUtils.eligibleForUpConvert(struct_map_0, struct_map_3));
}
public void testSchemaContains() {
// Simple case.
TypeDescription struct_0 = TypeDescription.fromString("struct<a:int,b:int>");
TypeDescription struct_1 = TypeDescription.fromString("struct<a:int>");
Assert.assertTrue(OrcUtils.eligibleForUpConvert(struct_0, struct_1));
// Nested schema case.
TypeDescription struct_2 = TypeDescription.fromString("struct<a:struct<a:int,b:int>,b:struct<c:int,d:int>,c:int>");
TypeDescription struct_3 = TypeDescription.fromString("struct<a:struct<a:int>,b:struct<c:int>,c:int>");
Assert.assertTrue(OrcUtils.eligibleForUpConvert(struct_2, struct_3));
// Negative case.
TypeDescription struct_4 = TypeDescription.fromString("struct<a:struct<a:int,b:int>,b:struct<c:int,d:int>,c:int>");
TypeDescription struct_5 = TypeDescription.fromString("struct<a:struct<a:int>,b:struct<c:int>,d:int>");
Assert.assertFalse(OrcUtils.eligibleForUpConvert(struct_4, struct_5));
TypeDescription struct_6 = TypeDescription.fromString("struct<a:struct<a:int>,b:struct<e:int>,c:int>");
Assert.assertFalse(OrcUtils.eligibleForUpConvert(struct_4, struct_6));
// Cases when target schema contains more
TypeDescription struct_7 = TypeDescription.fromString("struct<a:struct<a:int>,b:struct<e:int,f:int>,c:int>");
Assert.assertTrue(OrcUtils.eligibleForUpConvert(struct_6, struct_7));
// Negative case when target schema contains more but not all of the owning schema are there in the target schema.
// Note that struct_8 has a field "a.x".
TypeDescription struct_8 = TypeDescription.fromString("struct<a:struct<x:int>,b:struct<e:int>,c:int>");
TypeDescription struct_9 = TypeDescription.fromString("struct<a:struct<a:int>,b:struct<e:int,f:int>,c:int>");
Assert.assertFalse(OrcUtils.eligibleForUpConvert(struct_8, struct_9));
}
} | 1,784 |
0 | Create_ds/gobblin/gobblin-compaction/src/test/java/org/apache/gobblin/compaction/mapreduce | Create_ds/gobblin/gobblin-compaction/src/test/java/org/apache/gobblin/compaction/mapreduce/orc/OrcKeyComparatorTest.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.compaction.mapreduce.orc;
import java.util.TreeMap;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.io.IntWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.io.WritableComparable;
import org.apache.orc.OrcConf;
import org.apache.orc.TypeDescription;
import org.apache.orc.mapred.OrcKey;
import org.apache.orc.mapred.OrcList;
import org.apache.orc.mapred.OrcMap;
import org.apache.orc.mapred.OrcStruct;
import org.apache.orc.mapred.OrcUnion;
import org.testng.Assert;
import org.testng.annotations.Test;
/**
* Mainly to test {@link OrcKeyComparator} is behaving as expected when it is comparing two {@link OrcStruct}.
* It covers basic(primitive) type of {@link OrcStruct} and those contain complex type (MAP, LIST, UNION, Struct)
*
* Reference: https://orc.apache.org/docs/types.html
*/
public class OrcKeyComparatorTest {
@Test
public void testSimpleComparator() throws Exception {
OrcKeyComparator comparator = new OrcKeyComparator();
Configuration conf = new Configuration();
String orcSchema = "struct<i:int,j:int>";
TypeDescription schema = TypeDescription.fromString(orcSchema);
conf.set(OrcConf.MAPRED_SHUFFLE_KEY_SCHEMA.getAttribute(), orcSchema);
Assert.assertEquals(conf.get(OrcConf.MAPRED_SHUFFLE_KEY_SCHEMA.getAttribute()), orcSchema);
comparator.setConf(conf);
OrcStruct record0 = createSimpleOrcStruct(schema, 1, 2);
OrcStruct record1 = createSimpleOrcStruct(schema, 3, 4);
OrcStruct record2 = createSimpleOrcStruct(schema, 3, 4);
OrcKey orcKey0 = new OrcKey();
orcKey0.key = record0;
OrcKey orcKey1 = new OrcKey();
orcKey1.key = record1;
OrcKey orcKey2 = new OrcKey();
orcKey2.key = record2;
Assert.assertTrue(comparator.compare(orcKey0, orcKey1) < 0);
Assert.assertTrue(comparator.compare(orcKey1, orcKey2) == 0);
Assert.assertTrue(comparator.compare(orcKey1, orcKey0) > 0);
}
@Test
public void testComplexRecordArray() throws Exception {
OrcKeyComparator comparator = new OrcKeyComparator();
Configuration conf = new Configuration();
TypeDescription listSchema = TypeDescription.createList(TypeDescription.createString());
TypeDescription schema =
TypeDescription.createStruct().addField("a", TypeDescription.createInt()).addField("b", listSchema);
conf.set(OrcConf.MAPRED_SHUFFLE_KEY_SCHEMA.getAttribute(), schema.toString());
Assert.assertEquals(conf.get(OrcConf.MAPRED_SHUFFLE_KEY_SCHEMA.getAttribute()), schema.toString());
comparator.setConf(conf);
// base record
OrcStruct record0 = (OrcStruct) OrcStruct.createValue(schema);
record0.setFieldValue("a", new IntWritable(1));
OrcList orcList0 = createOrcList(3, listSchema, 3);
record0.setFieldValue("b", orcList0);
// the same as base but different object, expecting equal to each other.
OrcStruct record1 = (OrcStruct) OrcStruct.createValue(schema);
record1.setFieldValue("a", new IntWritable(1));
OrcList orcList1 = createOrcList(3, listSchema, 3);
record1.setFieldValue("b", orcList1);
// Diff in int field
OrcStruct record2 = (OrcStruct) OrcStruct.createValue(schema);
record2.setFieldValue("a", new IntWritable(2));
OrcList orcList2 = createOrcList(3, listSchema, 3);
record2.setFieldValue("b", orcList2);
// Diff in array field: 1
OrcStruct record3 = (OrcStruct) OrcStruct.createValue(schema);
record3.setFieldValue("a", new IntWritable(1));
OrcList orcList3 = createOrcList(3, listSchema, 5);
record3.setFieldValue("b", orcList3);
// Diff in array field: 2
OrcStruct record4 = (OrcStruct) OrcStruct.createValue(schema);
record4.setFieldValue("a", new IntWritable(1));
OrcList orcList4 = createOrcList(4, listSchema, 3);
record4.setFieldValue("b", orcList4);
OrcKey orcKey0 = new OrcKey();
orcKey0.key = record0;
OrcKey orcKey1 = new OrcKey();
orcKey1.key = record1;
OrcKey orcKey2 = new OrcKey();
orcKey2.key = record2;
OrcKey orcKey3 = new OrcKey();
orcKey3.key = record3;
OrcKey orcKey4 = new OrcKey();
orcKey4.key = record4;
Assert.assertTrue(comparator.compare(orcKey0, orcKey1) == 0);
Assert.assertTrue(comparator.compare(orcKey1, orcKey2) < 0);
Assert.assertTrue(comparator.compare(orcKey1, orcKey3) < 0);
Assert.assertTrue(comparator.compare(orcKey1, orcKey4) < 0);
}
@Test
public void testComplexRecordMap() throws Exception {
OrcKeyComparator comparator = new OrcKeyComparator();
Configuration conf = new Configuration();
TypeDescription mapFieldSchema =
TypeDescription.createMap(TypeDescription.createString(), TypeDescription.createString());
TypeDescription schema =
TypeDescription.createStruct().addField("a", TypeDescription.createInt()).addField("b", mapFieldSchema);
conf.set(OrcConf.MAPRED_SHUFFLE_KEY_SCHEMA.getAttribute(), schema.toString());
Assert.assertEquals(conf.get(OrcConf.MAPRED_SHUFFLE_KEY_SCHEMA.getAttribute()), schema.toString());
comparator.setConf(conf);
// base record
OrcStruct record0 = (OrcStruct) OrcStruct.createValue(schema);
record0.setFieldValue("a", new IntWritable(1));
OrcMap orcMap = createSimpleOrcMap(new Text("key"), new Text("value"), mapFieldSchema);
record0.setFieldValue("b", orcMap);
// key value both differ
OrcStruct record1 = (OrcStruct) OrcStruct.createValue(schema);
record1.setFieldValue("a", new IntWritable(1));
OrcMap orcMap1 = createSimpleOrcMap(new Text("key_key"), new Text("value_value"), mapFieldSchema);
record1.setFieldValue("b", orcMap1);
// Key same, value differ
OrcStruct record2 = (OrcStruct) OrcStruct.createValue(schema);
record2.setFieldValue("a", new IntWritable(1));
OrcMap orcMap2 = createSimpleOrcMap(new Text("key"), new Text("value_value"), mapFieldSchema);
record2.setFieldValue("b", orcMap2);
// Same as base
OrcStruct record3 = (OrcStruct) OrcStruct.createValue(schema);
record3.setFieldValue("a", new IntWritable(1));
OrcMap orcMap3 = createSimpleOrcMap(new Text("key"), new Text("value"), mapFieldSchema);
record3.setFieldValue("b", orcMap3);
// Differ in other field.
OrcStruct record4 = (OrcStruct) OrcStruct.createValue(schema);
record4.setFieldValue("a", new IntWritable(2));
record4.setFieldValue("b", orcMap);
// Record with map containing multiple entries but inserted in different order.
OrcStruct record6 = (OrcStruct) OrcStruct.createValue(schema);
record6.setFieldValue("a", new IntWritable(1));
OrcMap orcMap6 = createSimpleOrcMap(new Text("key"), new Text("value"), mapFieldSchema);
orcMap6.put(new Text("keyLater"), new Text("valueLater"));
record6.setFieldValue("b", orcMap6);
OrcStruct record7 = (OrcStruct) OrcStruct.createValue(schema);
record7.setFieldValue("a", new IntWritable(1));
OrcMap orcMap7 = createSimpleOrcMap(new Text("keyLater"), new Text("valueLater"), mapFieldSchema);
orcMap7.put(new Text("key"), new Text("value"));
record7.setFieldValue("b", orcMap7);
OrcKey orcKey0 = new OrcKey();
orcKey0.key = record0;
OrcKey orcKey1 = new OrcKey();
orcKey1.key = record1;
OrcKey orcKey2 = new OrcKey();
orcKey2.key = record2;
OrcKey orcKey3 = new OrcKey();
orcKey3.key = record3;
OrcKey orcKey4 = new OrcKey();
orcKey4.key = record4;
OrcKey orcKey6 = new OrcKey();
orcKey6.key = record6;
OrcKey orcKey7 = new OrcKey();
orcKey7.key = record7;
Assert.assertTrue(comparator.compare(orcKey0, orcKey1) < 0);
Assert.assertTrue(comparator.compare(orcKey1, orcKey2) > 0);
Assert.assertTrue(comparator.compare(orcKey2, orcKey3) > 0);
Assert.assertTrue(comparator.compare(orcKey0, orcKey3) == 0);
Assert.assertTrue(comparator.compare(orcKey0, orcKey4) < 0);
Assert.assertTrue(comparator.compare(orcKey6, orcKey7) == 0);
}
// Test comparison for union containing complex types and nested record inside.
// Schema: struct<a:int,
// b:uniontype<int,
// array<string>,
// struct<x:int,y:int>
// >
// >
@Test
public void testComplexRecordUnion() throws Exception {
OrcKeyComparator comparator = new OrcKeyComparator();
Configuration conf = new Configuration();
TypeDescription listSchema = TypeDescription.createList(TypeDescription.createString());
TypeDescription nestedRecordSchema = TypeDescription.createStruct()
.addField("x", TypeDescription.createInt())
.addField("y", TypeDescription.createInt());
TypeDescription unionSchema = TypeDescription.createUnion()
.addUnionChild(TypeDescription.createInt())
.addUnionChild(listSchema)
.addUnionChild(nestedRecordSchema);
TypeDescription schema =
TypeDescription.createStruct()
.addField("a", TypeDescription.createInt())
.addField("b", unionSchema);
conf.set(OrcConf.MAPRED_SHUFFLE_KEY_SCHEMA.getAttribute(), schema.toString());
Assert.assertEquals(conf.get(OrcConf.MAPRED_SHUFFLE_KEY_SCHEMA.getAttribute()), schema.toString());
comparator.setConf(conf);
// base record
OrcStruct record0 = (OrcStruct) OrcStruct.createValue(schema);
record0.setFieldValue("a", new IntWritable(1));
OrcStruct nestedRecord0 = createSimpleOrcStruct(nestedRecordSchema, 1, 2);
OrcUnion orcUnion0 = createOrcUnion(unionSchema, nestedRecord0);
record0.setFieldValue("b", orcUnion0);
// same content as base record in diff objects.
OrcStruct record1 = (OrcStruct) OrcStruct.createValue(schema);
record1.setFieldValue("a", new IntWritable(1));
OrcStruct nestedRecord1 = createSimpleOrcStruct(nestedRecordSchema, 1, 2);
OrcUnion orcUnion1 = createOrcUnion(unionSchema, nestedRecord1);
record1.setFieldValue("b", orcUnion1);
// diff records inside union, record0 == record1 < 2
OrcStruct record2 = (OrcStruct) OrcStruct.createValue(schema);
record2.setFieldValue("a", new IntWritable(1));
OrcStruct nestedRecord2 = createSimpleOrcStruct(nestedRecordSchema, 2, 2);
OrcUnion orcUnion2 = createOrcUnion(unionSchema, nestedRecord2);
record2.setFieldValue("b", orcUnion2);
// differ in list inside union, record3 < record4 == record5
OrcStruct record3 = (OrcStruct) OrcStruct.createValue(schema);
record3.setFieldValue("a", new IntWritable(1));
OrcList orcList3 = createOrcList(5, listSchema, 2);
OrcUnion orcUnion3 = createOrcUnion(unionSchema, orcList3);
record3.setFieldValue("b", orcUnion3);
OrcStruct record4 = (OrcStruct) OrcStruct.createValue(schema);
record4.setFieldValue("a", new IntWritable(1));
OrcList orcList4 = createOrcList(6, listSchema, 2);
OrcUnion orcUnion4 = createOrcUnion(unionSchema, orcList4);
record4.setFieldValue("b", orcUnion4);
OrcStruct record5 = (OrcStruct) OrcStruct.createValue(schema);
record5.setFieldValue("a", new IntWritable(1));
OrcList orcList5 = createOrcList(6, listSchema, 2);
OrcUnion orcUnion5 = createOrcUnion(unionSchema, orcList5);
record5.setFieldValue("b", orcUnion5);
OrcKey orcKey0 = new OrcKey();
orcKey0.key = record0;
OrcKey orcKey1 = new OrcKey();
orcKey1.key = record1;
OrcKey orcKey2 = new OrcKey();
orcKey2.key = record2;
OrcKey orcKey3 = new OrcKey();
orcKey3.key = record3;
OrcKey orcKey4 = new OrcKey();
orcKey4.key = record4;
OrcKey orcKey5 = new OrcKey();
orcKey5.key = record5;
Assert.assertEquals(orcUnion0, orcUnion1);
// Int value in orcKey2 is larger
Assert.assertTrue(comparator.compare(orcKey0, orcKey2) < 0);
Assert.assertTrue(comparator.compare(orcKey3, orcKey4) < 0 );
Assert.assertTrue(comparator.compare(orcKey3, orcKey5) < 0);
Assert.assertTrue(comparator.compare(orcKey4, orcKey5) == 0);
}
private OrcMap createSimpleOrcMap(Text key, Text value, TypeDescription schema) {
TreeMap map = new TreeMap<Text, Text>();
map.put(key, value);
OrcMap result = new OrcMap(schema);
result.putAll(map);
return result;
}
/**
* Create a {@link OrcList} repeating the given parameter inside the list for multiple times.
*/
private OrcList createOrcList(int element, TypeDescription schema, int num) {
OrcList result = new OrcList(schema);
for (int i = 0; i < num; i++) {
result.add(new IntWritable(element));
}
return result;
}
private OrcUnion createOrcUnion(TypeDescription schema, WritableComparable value) {
OrcUnion result = new OrcUnion(schema);
result.set(0, value);
return result;
}
private OrcStruct createSimpleOrcStruct(TypeDescription structSchema, int value1, int value2) {
OrcStruct result = new OrcStruct(structSchema);
result.setFieldValue(0, new IntWritable(value1));
result.setFieldValue(1, new IntWritable(value2));
return result;
}
}
| 1,785 |
0 | Create_ds/gobblin/gobblin-compaction/src/test/java/org/apache/gobblin/compaction/mapreduce | Create_ds/gobblin/gobblin-compaction/src/test/java/org/apache/gobblin/compaction/mapreduce/orc/OrcTestUtils.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.compaction.mapreduce.orc;
import java.util.Map;
import org.apache.hadoop.io.BooleanWritable;
import org.apache.hadoop.io.ByteWritable;
import org.apache.hadoop.io.DoubleWritable;
import org.apache.hadoop.io.FloatWritable;
import org.apache.hadoop.io.IntWritable;
import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.io.ShortWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.io.WritableComparable;
import org.apache.orc.TypeDescription;
import org.apache.orc.mapred.OrcList;
import org.apache.orc.mapred.OrcMap;
import org.apache.orc.mapred.OrcStruct;
import org.apache.orc.mapred.OrcUnion;
public class OrcTestUtils {
/**
* Fill in value in OrcStruct with given schema, assuming {@param w} contains the same schema as {@param schema}.
* {@param schema} is still necessary to given given {@param w} do contains schema information itself, because the
* actual value type is only available in {@link TypeDescription} but not {@link org.apache.orc.mapred.OrcValue}.
*
* For simplicity here are some assumptions:
* - We only give 3 primitive values and use them to construct compound values. To make it work for different types that
* can be widened or shrunk to each other, please use value within small range.
* - For List, Map or Union, make sure there's at least one entry within the record-container.
* you may want to try createValueRecursively(TypeDescription) instead of {@link OrcStruct#createValue(TypeDescription)}
*/
public static void fillOrcStructWithFixedValue(WritableComparable w, TypeDescription schema, int unionTag,
int intValue, String stringValue, boolean booleanValue) {
switch (schema.getCategory()) {
case BOOLEAN:
((BooleanWritable) w).set(booleanValue);
break;
case BYTE:
((ByteWritable) w).set((byte) intValue);
break;
case SHORT:
((ShortWritable) w).set((short) intValue);
break;
case INT:
((IntWritable) w).set(intValue);
break;
case LONG:
((LongWritable) w).set(intValue);
break;
case FLOAT:
((FloatWritable) w).set(intValue * 1.0f);
break;
case DOUBLE:
((DoubleWritable) w).set(intValue * 1.0);
break;
case STRING:
case CHAR:
case VARCHAR:
((Text) w).set(stringValue);
break;
case BINARY:
throw new UnsupportedOperationException("Binary type is not supported in random orc data filler");
case DECIMAL:
throw new UnsupportedOperationException("Decimal type is not supported in random orc data filler");
case DATE:
case TIMESTAMP:
case TIMESTAMP_INSTANT:
throw new UnsupportedOperationException(
"Timestamp and its derived types is not supported in random orc data filler");
case LIST:
OrcList castedList = (OrcList) w;
// Here it is not trivial to create typed-object in element-type. So this method expect the value container
// to at least contain one element, or the traversing within the list will be skipped.
for (Object i : castedList) {
fillOrcStructWithFixedValue((WritableComparable) i, schema.getChildren().get(0), unionTag, intValue,
stringValue, booleanValue);
}
break;
case MAP:
OrcMap castedMap = (OrcMap) w;
for (Object entry : castedMap.entrySet()) {
Map.Entry<WritableComparable, WritableComparable> castedEntry =
(Map.Entry<WritableComparable, WritableComparable>) entry;
fillOrcStructWithFixedValue(castedEntry.getKey(), schema.getChildren().get(0), unionTag, intValue,
stringValue, booleanValue);
fillOrcStructWithFixedValue(castedEntry.getValue(), schema.getChildren().get(1), unionTag, intValue,
stringValue, booleanValue);
}
break;
case STRUCT:
OrcStruct castedStruct = (OrcStruct) w;
int fieldIdx = 0;
for (TypeDescription child : schema.getChildren()) {
fillOrcStructWithFixedValue(castedStruct.getFieldValue(fieldIdx), child, unionTag, intValue, stringValue,
booleanValue);
fieldIdx += 1;
}
break;
case UNION:
OrcUnion castedUnion = (OrcUnion) w;
TypeDescription targetMemberSchema = schema.getChildren().get(unionTag);
castedUnion.set(unionTag, OrcUtils.createValueRecursively(targetMemberSchema));
fillOrcStructWithFixedValue((WritableComparable) castedUnion.getObject(), targetMemberSchema, unionTag,
intValue, stringValue, booleanValue);
break;
default:
throw new IllegalArgumentException("Unknown type " + schema.toString());
}
}
/**
* The simple API: Union tag by default set to 0.
*/
public static void fillOrcStructWithFixedValue(WritableComparable w, TypeDescription schema, int intValue,
String stringValue, boolean booleanValue) {
fillOrcStructWithFixedValue(w, schema, 0, intValue, stringValue, booleanValue);
}
}
| 1,786 |
0 | Create_ds/gobblin/gobblin-compaction/src/test/java/org/apache/gobblin/compaction/mapreduce | Create_ds/gobblin/gobblin-compaction/src/test/java/org/apache/gobblin/compaction/mapreduce/orc/OrcValueMapperTest.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.compaction.mapreduce.orc;
import org.apache.orc.TypeDescription;
import org.junit.Assert;
import org.testng.annotations.Test;
public class OrcValueMapperTest {
@Test
public void testIsEvolutionValid() {
TypeDescription schema_1 = TypeDescription.fromString("struct<i:int,j:int,k:int>");
TypeDescription schema_2 = TypeDescription.fromString("struct<i:int,j:int,k:bigint>");
TypeDescription schema_3 = TypeDescription.fromString("struct<i:int,j:int,k:tinyint>");
TypeDescription schema_4 = TypeDescription.fromString("struct<i:int,j:int>");
Assert.assertTrue(OrcUtils.isEvolutionValid(schema_1, schema_2));
Assert.assertTrue(OrcUtils.isEvolutionValid(schema_1, schema_3));
Assert.assertTrue(OrcUtils.isEvolutionValid(schema_1, schema_4));
Assert.assertTrue(OrcUtils.isEvolutionValid(schema_4, schema_1));
}
} | 1,787 |
0 | Create_ds/gobblin/gobblin-compaction/src/main/java/org/apache/gobblin | Create_ds/gobblin/gobblin-compaction/src/main/java/org/apache/gobblin/compaction/CliOptions.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.compaction;
import java.io.IOException;
import java.util.Properties;
import org.apache.commons.cli.CommandLine;
import org.apache.commons.cli.DefaultParser;
import org.apache.commons.cli.HelpFormatter;
import org.apache.commons.cli.Option;
import org.apache.commons.cli.Options;
import org.apache.commons.cli.ParseException;
import org.apache.commons.configuration.ConfigurationException;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.util.GenericOptionsParser;
import org.apache.gobblin.util.JobConfigurationUtils;
/**
* Utility class for parsing command line options for Gobblin compaction jobs.
*
* @author Lorand Bendig
*
*/
public class CliOptions {
private final static Option JOB_CONFIG_OPTION = Option.builder().argName("job configuration file")
.desc("Gobblin compaction job configuration file").hasArgs().longOpt("jobconfig").build();
private final static Option HELP_OPTION =
Option.builder("h").argName("help").desc("Display usage information").longOpt("help").build();
/**
* Parse command line arguments and return a {@link java.util.Properties} object for the Gobblin job found.
* @param caller Class of the calling main method. Used for error logs.
* @param args Command line arguments.
* @param conf Hadoop configuration object
* @return Instance of {@link Properties} for the Gobblin job to run.
* @throws IOException
*/
public static Properties parseArgs(Class<?> caller, String[] args, Configuration conf) throws IOException {
try {
// Parse command-line options
if (conf != null) {
args = new GenericOptionsParser(conf, args).getCommandLine().getArgs();
}
CommandLine cmd = new DefaultParser().parse(options(), args);
if (cmd.hasOption(HELP_OPTION.getOpt())) {
printUsage(caller);
System.exit(0);
}
String jobConfigLocation = JOB_CONFIG_OPTION.getLongOpt();
if (!cmd.hasOption(jobConfigLocation)) {
printUsage(caller);
System.exit(1);
}
// Load job configuration properties
Properties jobConfig;
if (conf == null) {
jobConfig = JobConfigurationUtils.fileToProperties(cmd.getOptionValue(jobConfigLocation));
} else {
jobConfig = JobConfigurationUtils.fileToProperties(cmd.getOptionValue(jobConfigLocation), conf);
for (String configKey : jobConfig.stringPropertyNames()) {
if (conf.get(configKey) != null) {
conf.unset(configKey);
}
}
JobConfigurationUtils.putConfigurationIntoProperties(conf, jobConfig);
}
return jobConfig;
} catch (ParseException | ConfigurationException e) {
throw new IOException(e);
}
}
public static Properties parseArgs(Class<?> caller, String[] args) throws IOException {
return parseArgs(caller, args, null);
}
/**
* Prints the usage of cli.
* @param caller Class of the main method called. Used in printing the usage message.
*/
public static void printUsage(Class<?> caller) {
new HelpFormatter().printHelp(caller.getSimpleName(), options());
}
private static Options options() {
Options options = new Options();
options.addOption(JOB_CONFIG_OPTION);
options.addOption(HELP_OPTION);
return options;
}
}
| 1,788 |
0 | Create_ds/gobblin/gobblin-compaction/src/main/java/org/apache/gobblin | Create_ds/gobblin/gobblin-compaction/src/main/java/org/apache/gobblin/compaction/CompactorFactory.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.compaction;
import java.util.List;
import java.util.Properties;
import com.google.common.base.Optional;
import org.apache.gobblin.annotation.Alpha;
import org.apache.gobblin.compaction.listeners.CompactorListener;
import org.apache.gobblin.metrics.Tag;
/**
* A factory responsible for creating {@link Compactor}s.
* @deprecated Please use {@link org.apache.gobblin.compaction.mapreduce.MRCompactionTask}
* * and {@link org.apache.gobblin.compaction.source.CompactionSource} to launch MR instead.
* * The new way enjoys simpler logic to trigger the compaction flow and more reliable verification criteria,
* * instead of using timestamp only before.
*/
@Alpha
@Deprecated
public interface CompactorFactory {
/**
* Creates a {@link Compactor}.
*
* @param properties a {@link Properties} object used to create the {@link Compactor}
* @param tags a {@link List} of {@link Tag}s used to create the {@link Compactor}.
* @param compactorListener a {@link CompactorListener} used to create the {@link Compactor}.
*
* @return a {@link Compactor}
*
* @throws CompactorCreationException if there is a problem creating the {@link Compactor}
*/
public Compactor createCompactor(Properties properties, List<Tag<String>> tags,
Optional<CompactorListener> compactorListener) throws CompactorCreationException;
}
| 1,789 |
0 | Create_ds/gobblin/gobblin-compaction/src/main/java/org/apache/gobblin | Create_ds/gobblin/gobblin-compaction/src/main/java/org/apache/gobblin/compaction/CompactorCreationException.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.compaction;
/**
* Throw by {@link CompactorFactory} if there is a problem creating a {@link Compactor}.
*/
public class CompactorCreationException extends Exception {
private static final long serialVersionUID = 1L;
public CompactorCreationException(String message, Throwable cause) {
super(message, cause);
}
}
| 1,790 |
0 | Create_ds/gobblin/gobblin-compaction/src/main/java/org/apache/gobblin | Create_ds/gobblin/gobblin-compaction/src/main/java/org/apache/gobblin/compaction/Compactor.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.compaction;
import java.io.IOException;
/**
* Compactor interface.
*/
public interface Compactor {
/**
* A method for data compaction, e.g., merge a snapshot table and a delta table.
* @throws IOException
*/
public void compact() throws IOException;
/**
* Cancel the compaction
* @throws IOException
*/
public void cancel() throws IOException;
}
| 1,791 |
0 | Create_ds/gobblin/gobblin-compaction/src/main/java/org/apache/gobblin | Create_ds/gobblin/gobblin-compaction/src/main/java/org/apache/gobblin/compaction/ReflectionCompactorFactory.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.compaction;
import java.util.List;
import java.util.Properties;
import org.apache.commons.lang3.reflect.ConstructorUtils;
import com.google.common.annotations.VisibleForTesting;
import com.google.common.base.Optional;
import org.apache.gobblin.compaction.listeners.CompactorListener;
import org.apache.gobblin.metrics.Tag;
/**
* Implementation of {@link CompactorFactory} that creates a {@link Compactor} using reflection.
*
* @deprecated Please use {@link org.apache.gobblin.compaction.mapreduce.MRCompactionTask}
* and {@link org.apache.gobblin.compaction.source.CompactionSource} to launch MR instead.
* The new way enjoys simpler logic to trigger the compaction flow and more reliable verification criteria,
* instead of using timestamp only before.
*/
@Deprecated
public class ReflectionCompactorFactory implements CompactorFactory {
@VisibleForTesting
static final String COMPACTION_COMPACTOR_CLASS = "compaction.compactor.class";
private static final String DEFAULT_COMPACTION_COMPACTOR_CLASS = "org.apache.gobblin.compaction.mapreduce.MRCompactor";
@Override
public Compactor createCompactor(Properties properties, List<Tag<String>> tags,
Optional<CompactorListener> compactorListener) throws CompactorCreationException {
String compactorClassName = properties.getProperty(COMPACTION_COMPACTOR_CLASS, DEFAULT_COMPACTION_COMPACTOR_CLASS);
try {
return (Compactor) ConstructorUtils
.invokeConstructor(Class.forName(compactorClassName), properties, tags, compactorListener);
} catch (ReflectiveOperationException e) {
throw new CompactorCreationException(String
.format("Unable to create Compactor from key \"%s\" with value \"%s\"", COMPACTION_COMPACTOR_CLASS,
compactorClassName), e);
}
}
}
| 1,792 |
0 | Create_ds/gobblin/gobblin-compaction/src/main/java/org/apache/gobblin/compaction | Create_ds/gobblin/gobblin-compaction/src/main/java/org/apache/gobblin/compaction/verify/CompactionThresholdVerifier.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.compaction.verify;
import com.google.common.collect.Lists;
import java.io.IOException;
import java.util.Map;
import lombok.extern.slf4j.Slf4j;
import org.apache.commons.lang.StringUtils;
import org.apache.commons.lang.exception.ExceptionUtils;
import org.apache.gobblin.compaction.action.CompactionGMCEPublishingAction;
import org.apache.gobblin.compaction.conditions.RecompactionConditionBasedOnRatio;
import org.apache.gobblin.compaction.mapreduce.MRCompactor;
import org.apache.gobblin.compaction.parser.CompactionPathParser;
import org.apache.gobblin.configuration.ConfigurationKeys;
import org.apache.gobblin.configuration.State;
import org.apache.gobblin.dataset.FileSystemDataset;
import org.apache.hadoop.fs.Path;
/**
* Compare the source and destination file records' count. Determine if a compaction is needed.
*/
@Slf4j
public class CompactionThresholdVerifier implements CompactionVerifier<FileSystemDataset> {
private final State state;
/**
* Constructor
*/
public CompactionThresholdVerifier(State state) {
this.state = state;
}
/**
* There are two record count we are comparing here
* 1) The new record count in the input folder
* 2) The record count we compacted previously from last run
* Calculate two numbers difference and compare with a predefined threshold.
*
* (Alternatively we can save the previous record count to a state store. However each input
* folder is a dataset. We may end up with loading too many redundant job level state for each
* dataset. To avoid scalability issue, we choose a stateless approach where each dataset tracks
* record count by themselves and persist it in the file system)
*
* @return true if the difference exceeds the threshold or this is the first time compaction or
* GMCE is enabled but last run there is something wrong when emitting GMCE
*/
public Result verify(FileSystemDataset dataset) {
Map<String, Double> thresholdMap = RecompactionConditionBasedOnRatio.
getDatasetRegexAndRecompactThreshold(
state.getProp(MRCompactor.COMPACTION_LATEDATA_THRESHOLD_FOR_RECOMPACT_PER_DATASET, StringUtils.EMPTY));
CompactionPathParser.CompactionParserResult result = new CompactionPathParser(state).parse(dataset);
double threshold =
RecompactionConditionBasedOnRatio.getRatioThresholdByDatasetName(result.getDatasetName(), thresholdMap);
log.debug("Threshold is {} for dataset {}", threshold, result.getDatasetName());
InputRecordCountHelper helper = new InputRecordCountHelper(state);
try {
double newRecords = 0;
if (!dataset.isVirtual()) {
newRecords = helper.calculateRecordCount(Lists.newArrayList(new Path(dataset.datasetURN())));
}
double oldRecords = helper.readRecordCount(new Path(result.getDstAbsoluteDir()));
State datasetState = helper.loadState(new Path(result.getDstAbsoluteDir()));
if (oldRecords == 0) {
return new Result(true, "");
}
if (state.getPropAsBoolean(ConfigurationKeys.GOBBLIN_METADATA_CHANGE_EVENT_ENABLED, false)
&& !datasetState.getPropAsBoolean(CompactionGMCEPublishingAction.GMCE_EMITTED_KEY, true)) {
return new Result(true, "GMCE has not sent, need re-compact");
}
if (newRecords < oldRecords) {
return new Result(false, "Illegal state: Current records count should old be smaller.");
}
if ((newRecords - oldRecords) / oldRecords > threshold) {
log.debug("Dataset {} records exceeded the threshold {}", dataset.datasetURN(), threshold);
return new Result(true, "");
}
return new Result(false,
String.format("%s is failed for dataset %s. Prev=%f, Cur=%f, not reaching to threshold %f", this.getName(),
result.getDatasetName(), oldRecords, newRecords, threshold));
} catch (IOException e) {
return new Result(false, ExceptionUtils.getFullStackTrace(e));
}
}
/**
* Get compaction threshold verifier name
*/
public String getName() {
return this.getClass().getName();
}
public boolean isRetriable() {
return false;
}
}
| 1,793 |
0 | Create_ds/gobblin/gobblin-compaction/src/main/java/org/apache/gobblin/compaction | Create_ds/gobblin/gobblin-compaction/src/main/java/org/apache/gobblin/compaction/verify/CompactionAuditCountVerifier.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.compaction.verify;
import java.io.IOException;
import java.time.Instant;
import java.time.ZoneId;
import java.time.ZonedDateTime;
import java.util.Collection;
import java.util.HashSet;
import java.util.Map;
import org.apache.commons.lang.exception.ExceptionUtils;
import com.google.common.base.Splitter;
import lombok.extern.slf4j.Slf4j;
import org.apache.gobblin.compaction.audit.AuditCountClient;
import org.apache.gobblin.compaction.audit.AuditCountClientFactory;
import org.apache.gobblin.compaction.mapreduce.MRCompactor;
import org.apache.gobblin.compaction.parser.CompactionPathParser;
import org.apache.gobblin.configuration.State;
import org.apache.gobblin.dataset.FileSystemDataset;
import org.apache.gobblin.time.TimeIterator;
import org.apache.gobblin.util.ClassAliasResolver;
/**
* Use {@link AuditCountClient} to retrieve all record count across different tiers
* Compare one specific tier (gobblin-tier) with all other refernce tiers and determine
* if verification should be passed based on a pre-defined threshold.
* @TODO: 8/31/21 "Use @{@link org.apache.gobblin.completeness.verifier.KafkaAuditCountVerifier}"
*/
@Slf4j
public class CompactionAuditCountVerifier implements CompactionVerifier<FileSystemDataset> {
public static final String COMPACTION_COMPLETENESS_THRESHOLD = MRCompactor.COMPACTION_PREFIX + "completeness.threshold";
public static final String COMPACTION_COMMPLETENESS_ENABLED = MRCompactor.COMPACTION_PREFIX + "completeness.enabled";
public static final String COMPACTION_COMMPLETENESS_GRANULARITY = MRCompactor.COMPACTION_PREFIX + "completeness.granularity";
public static final double DEFAULT_COMPACTION_COMPLETENESS_THRESHOLD = 0.99;
public static final String PRODUCER_TIER = "producer.tier";
public static final String ORIGIN_TIER = "origin.tier";
public static final String GOBBLIN_TIER = "gobblin.tier";
private Collection<String> referenceTiers;
private Collection<String> originTiers;
private String producerTier;
private String gobblinTier;
private double threshold;
protected final State state;
private final AuditCountClient auditCountClient;
protected final boolean enabled;
protected final TimeIterator.Granularity granularity;
protected final ZoneId zone;
/**
* Constructor with default audit count client
*/
public CompactionAuditCountVerifier (State state) {
this (state, getClientFactory (state).createAuditCountClient(state));
}
/**
* Constructor with user specified audit count client
*/
public CompactionAuditCountVerifier (State state, AuditCountClient client) {
this.auditCountClient = client;
this.state = state;
this.enabled = state.getPropAsBoolean(COMPACTION_COMMPLETENESS_ENABLED, true);
this.granularity = TimeIterator.Granularity.valueOf(
state.getProp(COMPACTION_COMMPLETENESS_GRANULARITY, "HOUR"));
this.zone = ZoneId.of(state.getProp(MRCompactor.COMPACTION_TIMEZONE, MRCompactor.DEFAULT_COMPACTION_TIMEZONE));
// retrieve all tiers information
if (client != null) {
this.threshold =
state.getPropAsDouble(COMPACTION_COMPLETENESS_THRESHOLD, DEFAULT_COMPACTION_COMPLETENESS_THRESHOLD);
this.producerTier = state.getProp(PRODUCER_TIER);
this.gobblinTier = state.getProp(GOBBLIN_TIER);
this.originTiers = Splitter.on(",").omitEmptyStrings().trimResults().splitToList(state.getProp(ORIGIN_TIER));
this.referenceTiers = new HashSet<>(originTiers);
this.referenceTiers.add(producerTier);
}
}
/**
* Obtain a client factory
* @param state job state
* @return a factory which creates {@link AuditCountClient}.
* If no factory is set or an error occurred, a {@link EmptyAuditCountClientFactory} is
* returned which creates a <code>null</code> {@link AuditCountClient}
*/
private static AuditCountClientFactory getClientFactory (State state) {
if (!state.contains(AuditCountClientFactory.AUDIT_COUNT_CLIENT_FACTORY)) {
return new EmptyAuditCountClientFactory ();
}
try {
String factoryName = state.getProp(AuditCountClientFactory.AUDIT_COUNT_CLIENT_FACTORY);
ClassAliasResolver<AuditCountClientFactory> conditionClassAliasResolver = new ClassAliasResolver<>(AuditCountClientFactory.class);
AuditCountClientFactory factory = conditionClassAliasResolver.resolveClass(factoryName).newInstance();
return factory;
} catch (Exception e) {
throw new RuntimeException(e);
}
}
/**
* Verify a specific dataset by following below steps
* 1) Retrieve a tier-to-count mapping
* 2) Read count from {@link CompactionAuditCountVerifier#gobblinTier}
* 3) Read count from all other {@link CompactionAuditCountVerifier#referenceTiers}
* 4) Compare count retrieved from steps 2) and 3), if any of (gobblin/refenence) >= threshold, return true, else return false
* @param dataset Dataset needs to be verified
* @return If verification is succeeded
*/
public Result verify (FileSystemDataset dataset) {
if (!enabled) {
return new Result(true, "");
}
if (auditCountClient == null) {
log.debug("No audit count client specified, skipped");
return new Result(true, "");
}
CompactionPathParser.CompactionParserResult result = new CompactionPathParser(state).parse(dataset);
ZonedDateTime startTime = ZonedDateTime.ofInstant(Instant.ofEpochMilli(result.getTime().getMillis()), zone);
ZonedDateTime endTime = TimeIterator.inc(startTime, granularity, 1);
String datasetName = result.getDatasetName();
try {
Map<String, Long> countsByTier = auditCountClient.fetch(datasetName,
startTime.toInstant().toEpochMilli(), endTime.toInstant().toEpochMilli());
for (String tier: referenceTiers) {
Result rst = passed (datasetName, countsByTier, tier);
if (rst.isSuccessful()) {
return new Result(true, "");
}
}
} catch (IOException e) {
return new Result(false, ExceptionUtils.getFullStackTrace(e));
}
return new Result(false, String.format("%s data is not complete between %s and %s", datasetName, startTime, endTime));
}
/**
* Compare record count between {@link CompactionAuditCountVerifier#gobblinTier} and {@link CompactionAuditCountVerifier#referenceTiers}.
* @param datasetName the name of dataset
* @param countsByTier the tier-to-count mapping retrieved by {@link AuditCountClient#fetch(String, long, long)}
* @param referenceTier the tiers we wants to compare against
* @return If any of (gobblin/refenence) >= threshold, return true, else return false
*/
private Result passed (String datasetName, Map<String, Long> countsByTier, String referenceTier) {
if (!countsByTier.containsKey(this.gobblinTier)) {
log.info("Missing entry for dataset: " + datasetName + " in gobblin tier: " + this.gobblinTier + "; setting count to 0.");
}
if (!countsByTier.containsKey(referenceTier)) {
log.info("Missing entry for dataset: " + datasetName + " in reference tier: " + referenceTier + "; setting count to 0.");
}
long refCount = countsByTier.getOrDefault(referenceTier, 0L);
long gobblinCount = countsByTier.getOrDefault(this.gobblinTier, 0L);
if (refCount == 0) {
return new Result(true, "");
}
if ((double) gobblinCount / (double) refCount < this.threshold) {
return new Result (false, String.format("%s failed for %s : gobblin count = %d, %s count = %d (%f < threshold %f)",
this.getName(), datasetName, gobblinCount, referenceTier, refCount, (double) gobblinCount / (double) refCount, this.threshold));
}
return new Result(true, "");
}
public String getName() {
return this.getClass().getName();
}
private static class EmptyAuditCountClientFactory implements AuditCountClientFactory {
public AuditCountClient createAuditCountClient (State state) {
return null;
}
}
public boolean isRetriable () {
return true;
}
}
| 1,794 |
0 | Create_ds/gobblin/gobblin-compaction/src/main/java/org/apache/gobblin/compaction | Create_ds/gobblin/gobblin-compaction/src/main/java/org/apache/gobblin/compaction/verify/CompactionWatermarkChecker.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.compaction.verify;
import java.time.Instant;
import java.time.ZonedDateTime;
import java.time.format.DateTimeFormatter;
import java.time.temporal.ChronoField;
import java.time.temporal.ChronoUnit;
import com.google.common.annotations.VisibleForTesting;
import lombok.extern.slf4j.Slf4j;
import org.apache.gobblin.compaction.source.CompactionSource;
import org.apache.gobblin.configuration.State;
import org.apache.gobblin.dataset.FileSystemDataset;
import org.apache.gobblin.time.TimeIterator;
/**
* A {@link CompactionAuditCountVerifier} to report compaction watermarks based on verification
* result
*
* <p> A {@code watermarkTime} is the previous time of {@value CompactionSource#COMPACTION_INIT_TIME}. It
* can be computed in different {@link org.apache.gobblin.time.TimeIterator.Granularity}. For example, if
* compaction init time is 2019/12/01 18:16:00.000, its compaction watermark in minute granularity is the
* last millis of previous minute, 2019/12/01 18:15:59.999, and watermark in day granularity is the last
* millis of previous day, 2019/11/30 23:59:59.999.
*
* <p> The checker will report {@code watermarkTime} in epoc millis as {@value COMPACTION_WATERMARK}
* regardless of audit counts. If audit counts match, it will also report the time in epoc millis
* as {@value COMPLETION_COMPACTION_WATERMARK}
*/
@Slf4j
public class CompactionWatermarkChecker extends CompactionAuditCountVerifier {
public static final String TIME_FORMAT = "compactionWatermarkChecker.timeFormat";
public static final String COMPACTION_WATERMARK = "compactionWatermark";
public static final String COMPLETION_COMPACTION_WATERMARK = "completionAndCompactionWatermark";
private final long watermarkTime;
private final String precedingTimeDatasetPartitionName;
public CompactionWatermarkChecker(State state) {
super(state);
ZonedDateTime compactionTime = ZonedDateTime.ofInstant(
Instant.ofEpochMilli(state.getPropAsLong(CompactionSource.COMPACTION_INIT_TIME)), zone);
ZonedDateTime precedingTime = TimeIterator.dec(compactionTime, granularity, 1);
DateTimeFormatter timeFormatter = DateTimeFormatter.ofPattern(state.getProp(TIME_FORMAT));
precedingTimeDatasetPartitionName = timeFormatter.format(precedingTime);
watermarkTime = getWatermarkTimeMillis(compactionTime, granularity);
}
@VisibleForTesting
static long getWatermarkTimeMillis(ZonedDateTime compactionTime, TimeIterator.Granularity granularity) {
ZonedDateTime startOfMinute = compactionTime.withSecond(0).with(ChronoField.MILLI_OF_SECOND, 0);
ZonedDateTime startOfTimeGranularity = startOfMinute;
switch (granularity) {
case MINUTE:
break;
case HOUR:
startOfTimeGranularity = startOfMinute.withMinute(0);
break;
case DAY:
startOfTimeGranularity = startOfMinute.withHour(0).withMinute(0);
break;
case MONTH:
startOfTimeGranularity = startOfMinute.withDayOfMonth(1).withHour(0).withMinute(0);
break;
}
// The last millis of the start granularity
return startOfTimeGranularity.minus(1, ChronoUnit.MILLIS).toInstant().toEpochMilli();
}
@Override
public Result verify(FileSystemDataset dataset) {
Result res = super.verify(dataset);
if (!dataset.datasetRoot().toString().contains(precedingTimeDatasetPartitionName)) {
return res;
}
// set compaction watermark
this.state.setProp(COMPACTION_WATERMARK, watermarkTime);
if (enabled && res.isSuccessful()) {
log.info("Set dataset {} complete and compaction watermark {}", dataset.datasetRoot(), watermarkTime);
// If it also passed completeness check
this.state.setProp(COMPLETION_COMPACTION_WATERMARK, watermarkTime);
} else {
log.info("Set dataset {} compaction watermark {}", dataset.datasetRoot(), watermarkTime);
}
return res;
}
}
| 1,795 |
0 | Create_ds/gobblin/gobblin-compaction/src/main/java/org/apache/gobblin/compaction | Create_ds/gobblin/gobblin-compaction/src/main/java/org/apache/gobblin/compaction/verify/DataCompletenessVerifier.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.compaction.verify;
import java.io.Closeable;
import java.io.IOException;
import java.util.Iterator;
import java.util.Map;
import java.util.concurrent.Callable;
import java.util.concurrent.TimeUnit;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import com.google.common.base.Optional;
import com.google.common.base.Preconditions;
import com.google.common.base.Throwables;
import com.google.common.collect.ImmutableMap;
import com.google.common.util.concurrent.ListenableFuture;
import com.google.common.util.concurrent.ListeningExecutorService;
import org.apache.gobblin.compaction.dataset.Dataset;
import org.apache.gobblin.configuration.ConfigurationKeys;
import org.apache.gobblin.configuration.State;
import org.apache.gobblin.util.ExecutorsUtils;
import org.apache.gobblin.util.executors.ScalingThreadPoolExecutor;
/**
* A class for verifying data completeness of a {@link Dataset}.
*
* To verify data completeness, one should extend {@link AbstractRunner} and implement {@link AbstractRunner#call()}
* which returns a {@link Results} object. The (fully qualified) name of the class that extends {@link AbstractRunner}
* should be associated with property {@link ConfigurationKeys#COMPACTION_DATA_COMPLETENESS_VERIFICATION_CLASS}.
*
* @author Ziyang Liu
*/
public class DataCompletenessVerifier implements Closeable {
private static final Logger LOG = LoggerFactory.getLogger(DataCompletenessVerifier.class);
private static final String COMPACTION_COMPLETENESS_VERIFICATION_PREFIX = "compaction.completeness.verification.";
/**
* Configuration properties related to data completeness verification.
*/
private static final String COMPACTION_COMPLETENESS_VERIFICATION_CLASS =
COMPACTION_COMPLETENESS_VERIFICATION_PREFIX + "class";
private static final String COMPACTION_COMPLETENESS_VERIFICATION_THREAD_POOL_SIZE =
COMPACTION_COMPLETENESS_VERIFICATION_PREFIX + "thread.pool.size";
private static final int DEFAULT_COMPACTION_COMPLETENESS_VERIFICATION_THREAD_POOL_SIZE = 20;
private final State props;
private final int threadPoolSize;
private final ListeningExecutorService exeSvc;
private final Class<? extends AbstractRunner> runnerClass;
/**
* The given {@link State} object must specify property
* {@link ConfigurationKeys#COMPACTION_DATA_COMPLETENESS_VERIFICATION_CLASS}, and may optionally specify
* {@link ConfigurationKeys#COMPACTION_DATA_COMPLETENESS_VERIFICATION_THREAD_POOL_SIZE}.
*/
public DataCompletenessVerifier(State props) {
this.props = props;
this.threadPoolSize = getDataCompletenessVerificationThreadPoolSize();
this.exeSvc = getExecutorService();
this.runnerClass = getRunnerClass();
}
private ListeningExecutorService getExecutorService() {
return ExecutorsUtils.loggingDecorator(
ScalingThreadPoolExecutor.newScalingThreadPool(0, this.threadPoolSize, TimeUnit.SECONDS.toMillis(10)));
}
private int getDataCompletenessVerificationThreadPoolSize() {
return this.props.getPropAsInt(COMPACTION_COMPLETENESS_VERIFICATION_THREAD_POOL_SIZE,
DEFAULT_COMPACTION_COMPLETENESS_VERIFICATION_THREAD_POOL_SIZE);
}
@SuppressWarnings("unchecked")
private Class<? extends AbstractRunner> getRunnerClass() {
Preconditions.checkArgument(this.props.contains(COMPACTION_COMPLETENESS_VERIFICATION_CLASS),
"Missing required property " + COMPACTION_COMPLETENESS_VERIFICATION_CLASS);
try {
return (Class<? extends AbstractRunner>) Class
.forName(this.props.getProp(COMPACTION_COMPLETENESS_VERIFICATION_CLASS));
} catch (Throwable t) {
LOG.error("Failed to get data completeness verification class", t);
throw Throwables.propagate(t);
}
}
/**
* Verify data completeness for a set of {@link Dataset}s.
*
* @param datasets {@link Dataset}s to be verified.
* @return A {@link ListenableFuture<{@link Results}>} object that contains the result of the verification.
* Callers can add listeners or callbacks to it.
*/
public ListenableFuture<Results> verify(Iterable<Dataset> datasets) {
return this.exeSvc.submit(getRunner(datasets));
}
private AbstractRunner getRunner(Iterable<Dataset> datasets) {
try {
return this.runnerClass.getDeclaredConstructor(Iterable.class, State.class).newInstance(datasets, this.props);
} catch (Throwable t) {
LOG.error("Failed to instantiate data completeness verification class", t);
throw Throwables.propagate(t);
}
}
@Override
public void close() throws IOException {
ExecutorsUtils.shutdownExecutorService(this.exeSvc, Optional.of(LOG));
}
public void closeNow() {
ExecutorsUtils.shutdownExecutorService(this.exeSvc, Optional.of(LOG), 0, TimeUnit.NANOSECONDS);
}
/**
* Results of data completeness verification for a set of datasets.
*/
public static class Results implements Iterable<Results.Result> {
private final Iterable<Result> results;
public Results(Iterable<Result> results) {
this.results = results;
}
@Override
public Iterator<Results.Result> iterator() {
return this.results.iterator();
}
public static class Result {
public enum Status {
PASSED,
FAILED;
}
private final Dataset dataset;
private final Status status;
/**
* Data used to compute this result. A verification context is used to communicate to the caller how this {@link #status()}
* for data completeness was derived.
*/
private final Map<String, Object> verificationContext;
public Result(Dataset dataset, Status status) {
this.dataset = dataset;
this.status = status;
this.verificationContext = ImmutableMap.of();
}
public Result(Dataset dataset, Status status, Map<String, Object> verificationContext) {
this.dataset = dataset;
this.status = status;
this.verificationContext = verificationContext;
}
public Dataset dataset() {
return this.dataset;
}
public Status status() {
return this.status;
}
public Map<String, Object> verificationContext() {
return this.verificationContext;
}
}
}
/**
* Runner class for data completeness verification. Subclasses should implement {@link AbstractRunner#call()}
* which should contain the logic of data completeness verification and returns a {@link Results} object.
*/
public static abstract class AbstractRunner implements Callable<Results> {
protected final Iterable<Dataset> datasets;
protected final State props;
public AbstractRunner(Iterable<Dataset> datasets, State props) {
this.datasets = datasets;
this.props = props;
}
}
}
| 1,796 |
0 | Create_ds/gobblin/gobblin-compaction/src/main/java/org/apache/gobblin/compaction | Create_ds/gobblin/gobblin-compaction/src/main/java/org/apache/gobblin/compaction/verify/InputRecordCountHelper.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.compaction.verify;
import com.google.common.annotations.VisibleForTesting;
import com.google.common.base.Charsets;
import com.google.common.collect.Lists;
import java.io.BufferedReader;
import java.io.DataOutputStream;
import java.io.IOException;
import java.io.InputStreamReader;
import java.net.URI;
import java.util.Collection;
import lombok.Getter;
import lombok.extern.slf4j.Slf4j;
import org.apache.gobblin.compaction.dataset.DatasetHelper;
import org.apache.gobblin.compaction.event.CompactionSlaEventHelper;
import org.apache.gobblin.compaction.mapreduce.MRCompactor;
import org.apache.gobblin.configuration.ConfigurationKeys;
import org.apache.gobblin.configuration.State;
import org.apache.gobblin.util.HadoopUtils;
import org.apache.gobblin.util.RecordCountProvider;
import org.apache.gobblin.util.recordcount.IngestionRecordCountProvider;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FSDataInputStream;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import static org.apache.gobblin.compaction.event.CompactionSlaEventHelper.DUPLICATE_COUNT_TOTAL;
import static org.apache.gobblin.compaction.event.CompactionSlaEventHelper.EXEC_COUNT_TOTAL;
import static org.apache.gobblin.compaction.mapreduce.CompactorOutputCommitter.*;
/**
* A class helps to calculate, serialize, deserialize record count. This will work for Avro and ORC formats.
*
* By using {@link IngestionRecordCountProvider}, the default input file name should be in format
* {file_name}.{record_count}.{extension}. For example, given a file path: "/a/b/c/file.123.avro",
* the record count will be 123.
*/
@Slf4j
public class InputRecordCountHelper {
@Getter
private final FileSystem fs;
private final State state;
private final RecordCountProvider inputRecordCountProvider;
private final String extensionName;
@Deprecated
public final static String RECORD_COUNT_FILE = "_record_count";
public final static String STATE_FILE = "_state_file";
/**
* Constructor
*/
public InputRecordCountHelper(State state) {
try {
this.fs = getSourceFileSystem(state);
this.state = state;
this.extensionName = state.getProp(COMPACTION_OUTPUT_EXTENSION, DEFAULT_COMPACTION_OUTPUT_EXTENSION);
this.inputRecordCountProvider = (RecordCountProvider) Class.forName(
state.getProp(MRCompactor.COMPACTION_INPUT_RECORD_COUNT_PROVIDER,
MRCompactor.DEFAULT_COMPACTION_INPUT_RECORD_COUNT_PROVIDER)).newInstance();
} catch (Exception e) {
throw new RuntimeException("Failed to instantiate " + InputRecordCountHelper.class.getName(), e);
}
}
/**
* Calculate record count at given paths
* @param paths all paths where the record count are calculated
* @return record count after parsing all files under given paths
*/
public long calculateRecordCount(Collection<Path> paths) throws IOException {
long sum = 0;
for (Path path : paths) {
sum += inputRecordCountProvider.getRecordCount(
DatasetHelper.getApplicableFilePaths(this.fs, path, Lists.newArrayList(extensionName)));
}
return sum;
}
/**
* Load compaction state file
*/
public State loadState(Path dir) throws IOException {
return loadState(this.fs, dir);
}
@VisibleForTesting
public static State loadState(FileSystem fs, Path dir) throws IOException {
State state = new State();
if (fs.exists(new Path(dir, STATE_FILE))) {
try (FSDataInputStream inputStream = fs.open(new Path(dir, STATE_FILE))) {
state.readFields(inputStream);
}
}
return state;
}
/**
* Save compaction state file
*/
public void saveState(Path dir, State state) throws IOException {
saveState(this.fs, dir, state);
}
@VisibleForTesting
public static void saveState(FileSystem fs, Path dir, State state) throws IOException {
Path tmpFile = new Path(dir, STATE_FILE + ".tmp");
Path newFile = new Path(dir, STATE_FILE);
fs.delete(tmpFile, false);
try (DataOutputStream dataOutputStream = new DataOutputStream(fs.create(new Path(dir, STATE_FILE + ".tmp")))) {
state.write(dataOutputStream);
}
// Caution: We are deleting right before renaming because rename doesn't support atomic overwrite options from FileSystem API.
fs.delete(newFile, false);
fs.rename(tmpFile, newFile);
}
/**
* Read record count from a specific directory.
* File name is {@link InputRecordCountHelper#STATE_FILE}
* @param dir directory where a state file is located
* @return record count
*/
public long readRecordCount(Path dir) throws IOException {
return readRecordCount(this.fs, dir);
}
/**
* Read record count from a specific directory.
* File name is {@link InputRecordCountHelper#STATE_FILE}
* @param fs file system in use
* @param dir directory where a state file is located
* @return record count
*/
@Deprecated
public static long readRecordCount(FileSystem fs, Path dir) throws IOException {
State state = loadState(fs, dir);
if (!state.contains(CompactionSlaEventHelper.RECORD_COUNT_TOTAL)) {
if (fs.exists(new Path(dir, RECORD_COUNT_FILE))) {
try (BufferedReader br = new BufferedReader(
new InputStreamReader(fs.open(new Path(dir, RECORD_COUNT_FILE)), Charsets.UTF_8))) {
long count = Long.parseLong(br.readLine());
return count;
}
} else {
return 0;
}
} else {
return Long.parseLong(state.getProp(CompactionSlaEventHelper.RECORD_COUNT_TOTAL));
}
}
/**
* Read execution count from a specific directory.
* File name is {@link InputRecordCountHelper#STATE_FILE}
* @param dir directory where a state file is located
* @return record count
*/
public long readExecutionCount(Path dir) throws IOException {
return readCountHelper(dir, EXEC_COUNT_TOTAL);
}
public long readDuplicationCount(Path dir) throws IOException {
return readCountHelper(dir, DUPLICATE_COUNT_TOTAL);
}
private long readCountHelper(Path dir, String countKeyName) throws IOException {
State state = loadState(fs, dir);
return Long.parseLong(state.getProp(countKeyName, "0"));
}
/**
* Write record count to a specific directory.
* File name is {@link InputRecordCountHelper#RECORD_COUNT_FILE}
* @param fs file system in use
* @param dir directory where a record file is located
*/
@Deprecated
public static void writeRecordCount(FileSystem fs, Path dir, long count) throws IOException {
State state = loadState(fs, dir);
state.setProp(CompactionSlaEventHelper.RECORD_COUNT_TOTAL, count);
saveState(fs, dir, state);
}
protected FileSystem getSourceFileSystem(State state) throws IOException {
Configuration conf = HadoopUtils.getConfFromState(state);
String uri = state.getProp(ConfigurationKeys.SOURCE_FILEBASED_FS_URI, ConfigurationKeys.LOCAL_FS_URI);
return HadoopUtils.getOptionallyThrottledFileSystem(FileSystem.get(URI.create(uri), conf), state);
}
}
| 1,797 |
0 | Create_ds/gobblin/gobblin-compaction/src/main/java/org/apache/gobblin/compaction | Create_ds/gobblin/gobblin-compaction/src/main/java/org/apache/gobblin/compaction/verify/CompactionVerifier.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.compaction.verify;
import lombok.AllArgsConstructor;
import lombok.Getter;
import org.apache.gobblin.dataset.Dataset;
/**
* An interface which represents a generic verifier for compaction
*/
public interface CompactionVerifier<D extends Dataset> {
@Getter
@AllArgsConstructor
class Result {
boolean isSuccessful;
String failureReason;
}
String COMPACTION_VERIFIER_PREFIX = "compaction-verifier-";
String COMPACTION_VERIFICATION_TIMEOUT_MINUTES = "compaction.verification.timeoutMinutes";
String COMPACTION_VERIFICATION_ITERATION_COUNT_LIMIT = "compaction.verification.iteration.countLimit";
String COMPACTION_VERIFICATION_THREADS = "compaction.verification.threads";
String COMPACTION_VERIFICATION_FAIL_REASON = "compaction.verification.fail.reason";
Result verify (D dataset);
boolean isRetriable ();
String getName ();
} | 1,798 |
0 | Create_ds/gobblin/gobblin-compaction/src/main/java/org/apache/gobblin/compaction | Create_ds/gobblin/gobblin-compaction/src/main/java/org/apache/gobblin/compaction/verify/CompactionTimeRangeVerifier.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.compaction.verify;
import com.google.common.base.Splitter;
import java.util.List;
import java.util.regex.Pattern;
import lombok.AllArgsConstructor;
import lombok.extern.slf4j.Slf4j;
import org.apache.commons.lang.exception.ExceptionUtils;
import org.apache.gobblin.compaction.dataset.TimeBasedSubDirDatasetsFinder;
import org.apache.gobblin.compaction.event.CompactionSlaEventHelper;
import org.apache.gobblin.compaction.mapreduce.MRCompactor;
import org.apache.gobblin.compaction.parser.CompactionPathParser;
import org.apache.gobblin.compaction.source.CompactionSource;
import org.apache.gobblin.configuration.State;
import org.apache.gobblin.dataset.FileSystemDataset;
import org.apache.hadoop.fs.Path;
import org.joda.time.DateTime;
import org.joda.time.DateTimeZone;
import org.joda.time.Period;
import org.joda.time.format.PeriodFormatter;
import org.joda.time.format.PeriodFormatterBuilder;
/**
* A simple class which verify current dataset belongs to a specific time range. Will skip doing
* compaction if dataset is not in a correct time range.
*/
@Slf4j
@AllArgsConstructor
public class CompactionTimeRangeVerifier implements CompactionVerifier<FileSystemDataset> {
public final static String COMPACTION_VERIFIER_TIME_RANGE = COMPACTION_VERIFIER_PREFIX + "time-range";
protected State state;
public Result verify(FileSystemDataset dataset) {
final DateTime earliest;
final DateTime latest;
try {
CompactionPathParser.CompactionParserResult result = new CompactionPathParser(state).parse(dataset);
DateTime folderTime = result.getTime();
DateTimeZone timeZone = DateTimeZone.forID(
this.state.getProp(MRCompactor.COMPACTION_TIMEZONE, MRCompactor.DEFAULT_COMPACTION_TIMEZONE));
DateTime compactionStartTime =
new DateTime(this.state.getPropAsLong(CompactionSource.COMPACTION_INIT_TIME), timeZone);
PeriodFormatter formatter = new PeriodFormatterBuilder().appendMonths()
.appendSuffix("m")
.appendDays()
.appendSuffix("d")
.appendHours()
.appendSuffix("h")
.toFormatter();
// Dataset name is like 'Identity/MemberAccount' or 'PageViewEvent'
String datasetName = result.getDatasetName();
// get earliest time
String maxTimeAgoStrList = this.state.getProp(TimeBasedSubDirDatasetsFinder.COMPACTION_TIMEBASED_MAX_TIME_AGO,
TimeBasedSubDirDatasetsFinder.DEFAULT_COMPACTION_TIMEBASED_MAX_TIME_AGO);
String maxTimeAgoStr = getMatchedLookbackTime(datasetName, maxTimeAgoStrList,
TimeBasedSubDirDatasetsFinder.DEFAULT_COMPACTION_TIMEBASED_MAX_TIME_AGO);
Period maxTimeAgo = formatter.parsePeriod(maxTimeAgoStr);
earliest = compactionStartTime.minus(maxTimeAgo);
// get latest time
String minTimeAgoStrList = this.state.getProp(TimeBasedSubDirDatasetsFinder.COMPACTION_TIMEBASED_MIN_TIME_AGO,
TimeBasedSubDirDatasetsFinder.DEFAULT_COMPACTION_TIMEBASED_MIN_TIME_AGO);
String minTimeAgoStr = getMatchedLookbackTime(datasetName, minTimeAgoStrList,
TimeBasedSubDirDatasetsFinder.DEFAULT_COMPACTION_TIMEBASED_MIN_TIME_AGO);
Period minTimeAgo = formatter.parsePeriod(minTimeAgoStr);
latest = compactionStartTime.minus(minTimeAgo);
// get latest last run start time, we want to limit the duration between two compaction for the same dataset
if (state.contains(TimeBasedSubDirDatasetsFinder.MIN_RECOMPACTION_DURATION)) {
String minDurationStrList = this.state.getProp(TimeBasedSubDirDatasetsFinder.MIN_RECOMPACTION_DURATION);
String minDurationStr = getMatchedLookbackTime(datasetName, minDurationStrList,
TimeBasedSubDirDatasetsFinder.DEFAULT_MIN_RECOMPACTION_DURATION);
Period minDurationTime = formatter.parsePeriod(minDurationStr);
DateTime latestEligibleCompactTime = compactionStartTime.minus(minDurationTime);
InputRecordCountHelper helper = new InputRecordCountHelper(state);
State compactState = helper.loadState(new Path(result.getDstAbsoluteDir()));
if (compactState.contains(CompactionSlaEventHelper.LAST_RUN_START_TIME)
&& compactState.getPropAsLong(CompactionSlaEventHelper.LAST_RUN_START_TIME)
> latestEligibleCompactTime.getMillis()) {
log.warn("Last compaction for {} is {}, which is not before latestEligibleCompactTime={}",
dataset.datasetRoot(),
new DateTime(compactState.getPropAsLong(CompactionSlaEventHelper.LAST_RUN_START_TIME), timeZone),
latestEligibleCompactTime);
return new Result(false,
"Last compaction for " + dataset.datasetRoot() + " is not before" + latestEligibleCompactTime);
}
}
if (earliest.isBefore(folderTime) && latest.isAfter(folderTime)) {
log.debug("{} falls in the user defined time range", dataset.datasetRoot());
return new Result(true, "");
}
} catch (Exception e) {
log.error("{} cannot be verified because of {}", dataset.datasetRoot(), ExceptionUtils.getFullStackTrace(e));
return new Result(false, e.toString());
}
return new Result(false, dataset.datasetRoot() + " is not in between " + earliest + " and " + latest);
}
public String getName() {
return COMPACTION_VERIFIER_TIME_RANGE;
}
public boolean isRetriable() {
return false;
}
/**
* Find the correct lookback time for a given dataset.
*
* @param datasetsAndLookBacks Lookback string for multiple datasets. Datasets is represented by Regex pattern.
* Multiple 'datasets and lookback' pairs were joined by semi-colon. A default
* lookback time can be given without any Regex prefix. If nothing found, we will use
* {@param sysDefaultLookback}.
*
* Example Format: [Regex1]:[T1];[Regex2]:[T2];[DEFAULT_T];[Regex3]:[T3]
* Ex. Identity.*:1d2h;22h;BizProfile.BizCompany:3h (22h is default lookback time)
*
* @param sysDefaultLookback If user doesn't specify any lookback time for {@param datasetName}, also there is no default
* lookback time inside {@param datasetsAndLookBacks}, this system default lookback time is return.
*
* @param datasetName A description of dataset without time partition information. Example 'Identity/MemberAccount' or 'PageViewEvent'
* @return The lookback time matched with given dataset.
*/
public static String getMatchedLookbackTime(String datasetName, String datasetsAndLookBacks,
String sysDefaultLookback) {
String defaultLookback = sysDefaultLookback;
for (String entry : Splitter.on(";").trimResults().omitEmptyStrings().splitToList(datasetsAndLookBacks)) {
List<String> datasetAndLookbackTime = Splitter.on(":").trimResults().omitEmptyStrings().splitToList(entry);
if (datasetAndLookbackTime.size() == 1) {
defaultLookback = datasetAndLookbackTime.get(0);
} else if (datasetAndLookbackTime.size() == 2) {
String regex = datasetAndLookbackTime.get(0);
if (Pattern.compile(regex).matcher(datasetName).find()) {
return datasetAndLookbackTime.get(1);
}
} else {
log.error("Invalid format in {}, {} cannot find its lookback time", datasetsAndLookBacks, datasetName);
}
}
return defaultLookback;
}
}
| 1,799 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.