index int64 0 0 | repo_id stringlengths 9 205 | file_path stringlengths 31 246 | content stringlengths 1 12.2M | __index_level_0__ int64 0 10k |
|---|---|---|---|---|
0 | Create_ds/gobblin/gobblin-api/src/main/java/org/apache/gobblin/runtime | Create_ds/gobblin/gobblin-api/src/main/java/org/apache/gobblin/runtime/api/SpecConsumer.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.runtime.api;
import java.io.IOException;
import java.util.List;
import java.util.concurrent.Future;
import org.apache.commons.lang3.tuple.Pair;
/**
* A communication socket (receiving side for this class)
* for each {@link SpecExecutor} to receive spec to execute from Orchestrator.
* Implementation of this interface should specify communication channel (e.g. Kafka, REST, etc.)
*/
public interface SpecConsumer<V> {
/** List of newly changed {@link Spec}s for execution on {@link SpecExecutor}. */
Future<? extends List<Pair<SpecExecutor.Verb, V>>> changedSpecs();
/**
* A commit method to allow the consumer to checkpoint state on successful consumption of a {@link Spec}.
*/
default void commit(Spec spec) throws IOException {
return;
}
} | 2,100 |
0 | Create_ds/gobblin/gobblin-api/src/main/java/org/apache/gobblin/runtime | Create_ds/gobblin/gobblin-api/src/main/java/org/apache/gobblin/runtime/api/TaskEventMetadataGenerator.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.runtime.api;
import java.util.Map;
import org.apache.gobblin.configuration.State;
/**
* For generating additional event metadata to associate with Task Events.
*/
public interface TaskEventMetadataGenerator {
/**
* Generate a map of additional metadata for the specified event name.
* @param eventName the event name used to determine which additional metadata should be emitted
* @return {@link Map} with the additional metadata
*/
Map<String, String> getMetadata(State taskState, String eventName);
}
| 2,101 |
0 | Create_ds/gobblin/gobblin-api/src/main/java/org/apache/gobblin/runtime | Create_ds/gobblin/gobblin-api/src/main/java/org/apache/gobblin/runtime/api/SpecExecutor.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.runtime.api;
import java.net.URI;
import java.util.Map;
import java.util.concurrent.Future;
import com.typesafe.config.Config;
/**
* Defines a representation of JobSpec-Executor in GaaS.
* A triplet of <Technology, location, communication mechanism> uniquely defines an object of SpecExecutor.
* e.g. <Lumos, Holdem, Rest> represents a Executor that moves data by Lumos, running on Holdem can be reached by Rest.
*/
public interface SpecExecutor {
/** An URI identifying the SpecExecutor. */
URI getUri();
/** Human-readable description of the SpecExecutor .*/
Future<String> getDescription();
/** SpecExecutor config as a typesafe config object. */
Future<Config> getConfig();
/** SpecExecutor attributes include Location of SpecExecutor and the Type of it (Technology it used for data movement,
* like, gobblin-standalone/gobblin-cluster
* SpecExecutor attributes are supposed to be read-only once instantiated.
* */
Config getAttrs();
/** Health of SpecExecutor. */
Future<String> getHealth();
/** Source : Destination processing capabilities of SpecExecutor. */
Future<? extends Map<ServiceNode, ServiceNode>> getCapabilities();
/** A communication socket for generating spec to assigned physical executors, paired with
* a consumer on the physical executor side. */
Future<? extends SpecProducer<Spec>> getProducer();
String VERB_KEY = "Verb";
public static enum Verb {
ADD(1, "add"),
UPDATE(2, "update"),
DELETE(3, "delete"),
UNKNOWN(4, "unknown"),
CANCEL(5, "cancel");
private int _id;
private String _verb;
Verb(int id, String verb) {
_id = id;
_verb = verb;
}
public int getId() {
return _id;
}
public String getVerb() {
return _verb;
}
}
} | 2,102 |
0 | Create_ds/gobblin/gobblin-api/src/main/java/org/apache/gobblin/runtime | Create_ds/gobblin/gobblin-api/src/main/java/org/apache/gobblin/runtime/api/ServiceNode.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.runtime.api;
import com.typesafe.config.Config;
/**
* Abstraction of a Node in {@link SpecExecutor}
* 'Service' here refers to 'Service' in GaaS and it is not necessary related to a Service interface.
*/
public interface ServiceNode {
/**
* @return The name of node.
* It should be the identifier of a {@link ServiceNode}.
*/
String getNodeName();
/**
* @return The attributes of a {@link ServiceNode}.
*/
Config getNodeProps();
/**
* @return if the node is valid to use
*/
boolean isNodeEnabled();
} | 2,103 |
0 | Create_ds/gobblin/gobblin-api/src/main/java/org/apache/gobblin/runtime | Create_ds/gobblin/gobblin-api/src/main/java/org/apache/gobblin/runtime/api/Spec.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.runtime.api;
import java.io.Serializable;
import java.net.URI;
/**
* A basic interface for an object with a {@link URI}, version, and description.
*/
public interface Spec extends Serializable {
URI getUri();
String getVersion();
String getDescription();
}
| 2,104 |
0 | Create_ds/gobblin/gobblin-api/src/main/java/org/apache/gobblin | Create_ds/gobblin/gobblin-api/src/main/java/org/apache/gobblin/annotation/Alias.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.annotation;
import java.lang.annotation.Documented;
import java.lang.annotation.ElementType;
import java.lang.annotation.Retention;
import java.lang.annotation.RetentionPolicy;
import java.lang.annotation.Target;
import org.apache.gobblin.util.ClassAliasResolver;
/**
* Denotes that a class has an alias.
* Using {@link ClassAliasResolver#resolve(String)}, an alias can be resolved to cannonical name of the annotated class
*/
@Documented @Retention(value=RetentionPolicy.RUNTIME) @Target(value=ElementType.TYPE)
public @interface Alias {
/**
* Alias for that class
*/
public String value();
public String description() default "";
}
| 2,105 |
0 | Create_ds/gobblin/gobblin-api/src/main/java/org/apache/gobblin | Create_ds/gobblin/gobblin-api/src/main/java/org/apache/gobblin/annotation/Alpha.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.annotation;
import java.lang.annotation.Documented;
import java.lang.annotation.ElementType;
import java.lang.annotation.Retention;
import java.lang.annotation.RetentionPolicy;
import java.lang.annotation.Target;
/**
* Denotes an API that is still under active development and can undergo significant changes.
*/
@Documented @Retention(value=RetentionPolicy.CLASS) @Target(value=ElementType.TYPE)
public @interface Alpha {
}
| 2,106 |
0 | Create_ds/gobblin/gobblin-api/src/main/java/org/apache/gobblin | Create_ds/gobblin/gobblin-api/src/main/java/org/apache/gobblin/annotation/Beta.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.annotation;
import java.lang.annotation.Documented;
import java.lang.annotation.ElementType;
import java.lang.annotation.Retention;
import java.lang.annotation.RetentionPolicy;
import java.lang.annotation.Target;
/**
* Denotes an API that is still under development and can change but the changes should be
* relatively minor.
*/
@Documented @Retention(value=RetentionPolicy.CLASS) @Target(value=ElementType.TYPE)
public @interface Beta {
}
| 2,107 |
0 | Create_ds/gobblin/gobblin-api/src/main/java/org/apache/gobblin | Create_ds/gobblin/gobblin-api/src/main/java/org/apache/gobblin/annotation/Stable.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.annotation;
import java.lang.annotation.Documented;
import java.lang.annotation.ElementType;
import java.lang.annotation.Retention;
import java.lang.annotation.RetentionPolicy;
import java.lang.annotation.Target;
/**
* Denotes an API that is still stable and should not change.
*/
@Documented @Retention(value=RetentionPolicy.CLASS) @Target(value=ElementType.TYPE)
public @interface Stable {
}
| 2,108 |
0 | Create_ds/gobblin/gobblin-api/src/main/java/org/apache/gobblin | Create_ds/gobblin/gobblin-api/src/main/java/org/apache/gobblin/testing/AssertWithBackoff.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.testing;
import java.util.concurrent.TimeoutException;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import com.google.common.base.Function;
import com.google.common.base.Optional;
import com.google.common.base.Predicate;
import com.google.common.base.Predicates;
/**
* A helper class to perform a check for a condition with a back-off. Primary use of this is when a tests
* needs for something asynchronous to happen, say a service to start.
**/
public class AssertWithBackoff {
/** the max time in milliseconds to wait for the condition to become true */
private long timeoutMs = 30 * 1000;
/** a logger to use for logging waiting, results, etc. */
private Logger log = LoggerFactory.getLogger(AssertWithBackoff.class);
/** the number to multiple the sleep after condition failure */
private Optional<Double> backoffFactor = Optional.<Double> absent();
/** the max time to sleep between condition failures; */
private Optional<Long> maxSleepMs = Optional.<Long> absent();
public class EqualsCheck<T> implements Predicate<Void> {
private final Predicate<T> eqToExpected;
private final String message;
private final Function<Void, T> actual;
public EqualsCheck(Function<Void, T> actual, T expected, String message) {
this.eqToExpected = Predicates.equalTo(expected);
this.message = message;
this.actual = actual;
}
@Override
public boolean apply(Void input) {
T currentValue = this.actual.apply(input);
getLogger().debug("checking '" + this.message + "': " + currentValue);
return this.eqToExpected.apply(currentValue);
}
}
/** Creates a new instance */
public static AssertWithBackoff create() {
return new AssertWithBackoff();
}
/** Set the max time in milliseconds to wait for the condition to become true */
public AssertWithBackoff timeoutMs(long assertTimeoutMs) {
this.timeoutMs = assertTimeoutMs;
if (!this.maxSleepMs.isPresent()) {
this.maxSleepMs = Optional.of(getAutoMaxSleep());
}
if (!this.backoffFactor.isPresent()) {
this.backoffFactor = Optional.of(getAutoBackoffFactor());
}
return this;
}
/** the max time in milliseconds to wait for the condition to become true */
public long getTimeoutMs() {
return this.timeoutMs;
}
/** Set the max time to sleep between condition failures */
public AssertWithBackoff maxSleepMs(long maxSleepMs) {
this.maxSleepMs = Optional.of(maxSleepMs);
return this;
}
/** The max time to sleep between condition failures */
public long getMaxSleepMs() {
return this.maxSleepMs.or(getAutoMaxSleep());
}
/** Set the number to multiple the sleep after condition failure */
public AssertWithBackoff backoffFactor(double backoffFactor) {
this.backoffFactor = Optional.of(backoffFactor);
return this;
}
/** The number to multiple the sleep after condition failure */
public double getBackoffFactor() {
return this.backoffFactor.or(getAutoBackoffFactor());
}
/** Set the logger to use for logging waiting, results, etc. */
public AssertWithBackoff logger(Logger log) {
this.log = log;
return this;
}
/** The logger to use for logging waiting, results, etc. */
public Logger getLogger() {
return this.log;
}
private long getAutoMaxSleep() {
return this.timeoutMs / 3;
}
private double getAutoBackoffFactor() {
return Math.log(getMaxSleepMs()) / Math.log(5);
}
/**
* Performs a check for a condition with a back-off. Primary use of this is when a tests
* needs for something asynchronous to happen, say a service to start.
*
* @param condition the condition to wait for
* @param assertTimeoutMs the max time in milliseconds to wait for the condition to become true
* @param message a message to print while waiting for the condition
* @param log a logger to use for logging waiting, results
* @throws TimeoutException if the condition did not become true in the specified time budget
*/
public void assertTrue(Predicate<Void> condition, String message) throws TimeoutException, InterruptedException {
AssertWithBackoff.assertTrue(condition, getTimeoutMs(), message, getLogger(), getBackoffFactor(), getMaxSleepMs());
}
/**
* A convenience method for {@link #assertTrue(Predicate, String)} to keep checking until a
* certain value until it becomes equal to an expected value.
* @param actual a function that checks the actual value
* @param expected the expected value
* @param message a debugging message
**/
public <T> void assertEquals(Function<Void, T> actual, T expected, String message)
throws TimeoutException, InterruptedException {
assertTrue(new EqualsCheck<>(actual, expected, message), message);
}
/**
* Performs a check for a condition with a back-off. Primary use of this is when a tests
* needs for something asynchronous to happen, say a service to start.
*
* @param condition the condition to wait for
* @param assertTimeoutMs the max time in milliseconds to wait for the condition to become true
* @param message the message to print while waiting for the condition
* @param log the logger to use for logging waiting, results
* @param backoffFactor the number to multiple the sleep after condition failure;
* @param maxSleepMs the max time to sleep between condition failures; default is
* @throws TimeoutException if the condition did not become true in the specified time budget
* @throws InterrupedException if the assert gets interrupted while waiting for the condition to
* become true.
*/
public static void assertTrue(Predicate<Void> condition, long assertTimeoutMs, String message, Logger log,
double backoffFactor, long maxSleepMs) throws TimeoutException, InterruptedException {
long startTimeMs = System.currentTimeMillis();
long endTimeMs = startTimeMs + assertTimeoutMs;
long currentSleepMs = 0;
boolean done = false;
try {
while (!done && System.currentTimeMillis() < endTimeMs) {
done = condition.apply(null);
if (!done) {
currentSleepMs = computeRetrySleep(currentSleepMs, backoffFactor, maxSleepMs, endTimeMs);
log.debug("Condition check for '" + message + "' failed; sleeping for " + currentSleepMs + "ms");
Thread.sleep(currentSleepMs);
}
}
//one last try
if (!done && !condition.apply(null)) {
throw new TimeoutException("Timeout waiting for condition '" + message + "'.");
}
} catch (TimeoutException | InterruptedException e) {
//pass through
throw e;
} catch (RuntimeException e) {
throw new RuntimeException("Exception checking condition '" + message + "':" + e, e);
}
}
/**
* Computes a new sleep for a retry of a condition check.
*
* @param currentSleepMs the last sleep duration in milliseconds
* @param backoffFactor the factor to multiple currentSleepMs
* @param maxSleepMs the maximum allowed sleep in milliseconds
* @param endTimeMs the end time based on timeout for the condition to become true
* @return the new sleep time which will not exceed maxSleepMs and will also not cause to
* overshoot endTimeMs
*/
public static long computeRetrySleep(long currentSleepMs, double backoffFactor, long maxSleepMs, long endTimeMs) {
long newSleepMs = Math.round(currentSleepMs * backoffFactor);
if (newSleepMs <= currentSleepMs) {
// Prevent infinite loops
newSleepMs = currentSleepMs + 1;
}
long currentTimeMs = System.currentTimeMillis();
newSleepMs = Math.min(Math.min(newSleepMs, maxSleepMs), endTimeMs - currentTimeMs);
return newSleepMs;
}
}
| 2,109 |
0 | Create_ds/gobblin/gobblin-api/src/main/java/org/apache/gobblin | Create_ds/gobblin/gobblin-api/src/main/java/org/apache/gobblin/writer/WriterWrapper.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.writer;
import java.io.IOException;
/**
* An interface that should be used by all {@link DataWriter}s that don't actually write records but instead wrap a
* different {@link DataWriter}. Mostly useful to find all such wrappers when needed and to enforce changes in contracts.
*/
public abstract class WriterWrapper<D> implements DataWriter<D> {
@Override
public final void write(D record) throws IOException {
throw new UnsupportedOperationException();
}
}
| 2,110 |
0 | Create_ds/gobblin/gobblin-api/src/main/java/org/apache/gobblin | Create_ds/gobblin/gobblin-api/src/main/java/org/apache/gobblin/writer/FsWriterMetrics.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.writer;
import java.io.IOException;
import java.util.Collection;
import org.codehaus.jackson.map.ObjectMapper;
import lombok.AllArgsConstructor;
import lombok.Data;
import lombok.Getter;
import lombok.NoArgsConstructor;
import lombok.extern.slf4j.Slf4j;
/**
* Metrics that can be stored in workUnitState by filesystem based writers.
*/
@Data
@NoArgsConstructor
@AllArgsConstructor
@Getter
@Slf4j
public class FsWriterMetrics {
// Note: all of these classes are @NoArgsConstructor because Jackson requires
// an empty object to construct from JSON
@Data
@NoArgsConstructor
@AllArgsConstructor
@Getter
public static class FileInfo {
String fileName;
long numRecords;
}
String writerId;
PartitionIdentifier partitionInfo;
Collection<FileInfo> fileInfos;
/**
* Serialize this class to Json
*/
public String toJson() {
try {
return new ObjectMapper().writeValueAsString(this);
} catch (IOException e) {
log.error("IOException serializing FsWriterMetrics as JSON! Returning no metrics", e);
return "{}";
}
}
/**
* Instantiate an object of this class from its JSON representation
*/
public static FsWriterMetrics fromJson(String in) throws IOException {
return new ObjectMapper().readValue(in, FsWriterMetrics.class);
}
}
| 2,111 |
0 | Create_ds/gobblin/gobblin-api/src/main/java/org/apache/gobblin | Create_ds/gobblin/gobblin-api/src/main/java/org/apache/gobblin/writer/Destination.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.writer;
import org.apache.gobblin.configuration.State;
/**
* A class representing a destination for a writer to write to.
* It currently supports HDFS and Kafka as destinations.
*
* @author Yinan Li
*/
public class Destination {
/**
* Enumeration of supported destination types.
*/
public static enum DestinationType {
HDFS,
KAFKA,
MYSQL,
TERADATA,
POSTGRES
}
// Type of destination
private final DestinationType type;
// Destination properties
private final State properties;
private Destination(DestinationType type, State properties) {
this.type = type;
this.properties = properties;
}
/**
* Get the destination type.
*
* @return destination type
*/
public DestinationType getType() {
return this.type;
}
/**
* Get configuration properties for the destination type.
*
* @return configuration properties
*/
public State getProperties() {
return this.properties;
}
/**
* Create a new {@link Destination} instance.
*
* @param type destination type
* @param properties destination properties
* @return newly created {@link Destination} instance
*/
public static Destination of(DestinationType type, State properties) {
return new Destination(type, properties);
}
}
| 2,112 |
0 | Create_ds/gobblin/gobblin-api/src/main/java/org/apache/gobblin | Create_ds/gobblin/gobblin-api/src/main/java/org/apache/gobblin/writer/DataWriter.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.writer;
import java.io.Closeable;
import java.io.Flushable;
import java.io.IOException;
import org.apache.gobblin.dataset.Descriptor;
import org.apache.gobblin.records.ControlMessageHandler;
import org.apache.gobblin.records.FlushControlMessageHandler;
import org.apache.gobblin.stream.RecordEnvelope;
/**
* An interface for data writers
*
* <p>
* Generally, one work unit has a dedicated {@link DataWriter} instance, which processes only one dataset
* </p>
*
* @param <D> data record type
*
* @author Yinan Li
*/
public interface DataWriter<D> extends Closeable, Flushable {
/**
* Write a data record.
*
* @param record data record to write
* @throws IOException if there is anything wrong writing the record
*/
default void write(D record) throws IOException {
throw new UnsupportedOperationException();
}
/**
* Commit the data written.
* This method is expected to be called at most once during the lifetime of a writer.
* @throws IOException if there is anything wrong committing the output
*/
public void commit()
throws IOException;
/**
* Cleanup context/resources.
*
* @throws IOException if there is anything wrong doing cleanup.
*/
public void cleanup()
throws IOException;
/**
* Get the number of records written.
*
* @return number of records written
*/
public long recordsWritten();
/**
* Get the number of bytes written.
*
* @return number of bytes written
*/
public long bytesWritten()
throws IOException;
/**
* The method should return a {@link Descriptor} that represents what the writer is writing
*
* <p>
* Note that, this information might be useless and discarded by a
* {@link org.apache.gobblin.publisher.DataPublisher}, which determines the final form of dataset or partition
* </p>
*
* @return a {@link org.apache.gobblin.dataset.DatasetDescriptor} if it writes files of a dataset or
* a {@link org.apache.gobblin.dataset.PartitionDescriptor} if it writes files of a dataset partition or
* {@code null} if it is useless
*/
default Descriptor getDataDescriptor() {
return null;
}
/**
* Write the input {@link RecordEnvelope}. By default, just call {@link #write(Object)}.
* DataWriters that implement this method must acknowledge the recordEnvelope once the write has been acknowledged
* by the destination system.
*/
default void writeEnvelope(RecordEnvelope<D> recordEnvelope) throws IOException {
write(recordEnvelope.getRecord());
recordEnvelope.ack();
}
/**
* Default handler calls flush on this object when a {@link org.apache.gobblin.stream.FlushControlMessage} is received
* @return A {@link ControlMessageHandler}.
*/
default ControlMessageHandler getMessageHandler() {
return new FlushControlMessageHandler(this);
}
/**
* Flush data written by the writer. By default, does nothing.
* This method is expected to be called multiple times during the lifetime of a writer.
* @throws IOException
*/
default void flush() throws IOException {
}
}
| 2,113 |
0 | Create_ds/gobblin/gobblin-api/src/main/java/org/apache/gobblin | Create_ds/gobblin/gobblin-api/src/main/java/org/apache/gobblin/writer/DataWriterBuilder.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.writer;
import java.io.IOException;
import lombok.Getter;
import lombok.extern.slf4j.Slf4j;
import org.apache.gobblin.configuration.State;
import org.apache.gobblin.source.workunit.WorkUnitStream;
import org.apache.gobblin.writer.initializer.NoopWriterInitializer;
import org.apache.gobblin.writer.initializer.WriterInitializer;
/**
* A builder class for {@link DataWriter}.
*
* @param <S> schema type
* @param <D> data record type
*
* @author Yinan Li
*/
@Getter
@Slf4j
public abstract class DataWriterBuilder<S, D> {
protected Destination destination;
protected String writerId;
protected WriterOutputFormat format;
protected S schema;
protected int branches;
protected int branch;
protected String writerAttemptId;
/**
* Tell the writer the destination to write to.
*
* @param destination destination to write to
* @return this {@link DataWriterBuilder} instance
*/
public DataWriterBuilder<S, D> writeTo(Destination destination) {
this.destination = destination;
log.debug("For destination: {}", destination);
return this;
}
/**
* Tell the writer the output format of type {@link WriterOutputFormat}.
*
* @param format output format of the writer
* @return this {@link DataWriterBuilder} instance
*/
public DataWriterBuilder<S, D> writeInFormat(WriterOutputFormat format) {
this.format = format;
log.debug("writeInFormat : {}", this.format);
return this;
}
/**
* Give the writer a unique ID.
*
* @param writerId unique writer ID
* @return this {@link DataWriterBuilder} instance
*/
public DataWriterBuilder<S, D> withWriterId(String writerId) {
this.writerId = writerId;
log.debug("withWriterId : {}", this.writerId);
return this;
}
/**
* Tell the writer the data schema.
*
* @param schema data schema
* @return this {@link DataWriterBuilder} instance
*/
public DataWriterBuilder<S, D> withSchema(S schema) {
this.schema = schema;
log.debug("withSchema : {}", this.schema);
return this;
}
/**
* Tell the writer how many branches are being used.
*
* @param branches is the number of branches
* @return this {@link DataWriterBuilder} instance
*/
public DataWriterBuilder<S, D> withBranches(int branches) {
this.branches = branches;
log.debug("With branches: {}", this.branches);
return this;
}
/**
* Tell the writer which branch it is associated with.
*
* @param branch branch index
* @return this {@link DataWriterBuilder} instance
*/
public DataWriterBuilder<S, D> forBranch(int branch) {
this.branch = branch;
log.debug("For branch: {}", this.branch);
return this;
}
/**
* Attempt Id for this writer. There could be two duplicate writers with the same {@link #writerId},
* their writerAttemptId should be different.
*/
public DataWriterBuilder<S, D> withAttemptId(String attemptId) {
this.writerAttemptId = attemptId;
log.debug("With writerAttemptId: {}", this.writerAttemptId);
return this;
}
public WriterInitializer getInitializer(State state, WorkUnitStream workUnits, int branches, int branchId) {
return NoopWriterInitializer.INSTANCE;
}
/**
* Build a {@link DataWriter}.
*
* @throws IOException if there is anything wrong building the writer
* @return the built {@link DataWriter}
*/
public abstract DataWriter<D> build() throws IOException;
}
| 2,114 |
0 | Create_ds/gobblin/gobblin-api/src/main/java/org/apache/gobblin | Create_ds/gobblin/gobblin-api/src/main/java/org/apache/gobblin/writer/FluentDataWriterBuilder.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.writer;
/**
* A helper class to help create fluent {@link DataWriterBuilder}s. To make the Java generics magic
* work, classes should declare their builders as MyDataWriterBuilder<S, D, MyDataWriterBuilder>
* and use "return {@link #typedSelf()}" instead of "return this" in their setters.
*/
public abstract class FluentDataWriterBuilder<S, D, B extends FluentDataWriterBuilder<S, D, B>>
extends DataWriterBuilder<S, D> {
@SuppressWarnings("unchecked")
protected B typedSelf() {
return (B)this;
}
@Override
public B writeTo(Destination destination) {
super.writeTo(destination);
return typedSelf();
}
@Override
public B writeInFormat(WriterOutputFormat format) {
super.writeInFormat(format);
return typedSelf();
}
@Override
public DataWriterBuilder<S, D> withWriterId(String writerId) {
super.withWriterId(writerId);
return typedSelf();
}
@Override
public DataWriterBuilder<S, D> withSchema(S schema) {
super.withSchema(schema);
return typedSelf();
}
@Override
public DataWriterBuilder<S, D> withBranches(int branches) {
super.withBranches(branches);
return typedSelf();
}
@Override
public DataWriterBuilder<S, D> forBranch(int branch) {
super.forBranch(branch);
return typedSelf();
}
}
| 2,115 |
0 | Create_ds/gobblin/gobblin-api/src/main/java/org/apache/gobblin | Create_ds/gobblin/gobblin-api/src/main/java/org/apache/gobblin/writer/PartitionIdentifier.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.writer;
import lombok.AllArgsConstructor;
import lombok.Data;
import lombok.NoArgsConstructor;
@Data
@AllArgsConstructor
@NoArgsConstructor
public class PartitionIdentifier {
String partitionKey;
int branchId;
}
| 2,116 |
0 | Create_ds/gobblin/gobblin-api/src/main/java/org/apache/gobblin | Create_ds/gobblin/gobblin-api/src/main/java/org/apache/gobblin/writer/WatermarkStorage.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.writer;
import java.io.IOException;
import java.util.Map;
import org.apache.gobblin.annotation.Alpha;
import org.apache.gobblin.source.extractor.CheckpointableWatermark;
/**
* Storage for Watermarks. Used in streaming execution.
*/
@Alpha
public interface WatermarkStorage {
/**
* Commit a batch of watermarks to storage.
*
*/
void commitWatermarks(Iterable<CheckpointableWatermark> watermarks) throws IOException;
/**
* Retrieve previously committed watermarks.
* @param watermarkClass: the specific class that corresponds to the watermark expected
* @param sourcePartitions: a list of source partitions for whom we're retrieving committed watermarks.
* @return a map of String -> CheckpointableWatermark.
* The key corresponds to the source field in the CheckpointableWatermark and belongs to
* the list of source partitions passed in.
*/
Map<String, CheckpointableWatermark> getCommittedWatermarks(Class<? extends CheckpointableWatermark> watermarkClass,
Iterable<String> sourcePartitions) throws IOException;
}
| 2,117 |
0 | Create_ds/gobblin/gobblin-api/src/main/java/org/apache/gobblin | Create_ds/gobblin/gobblin-api/src/main/java/org/apache/gobblin/writer/WriterOutputFormat.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.writer;
import org.apache.commons.lang3.StringUtils;
/**
* An enumeration of writer output formats.
*
* @author Yinan Li
*/
public enum WriterOutputFormat {
AVRO("avro"),
PARQUET("parquet"),
PROTOBUF("protobuf"),
JSON("json"),
ORC("orc"),
CSV("csv"),
TXT("txt"),
OTHER(StringUtils.EMPTY);
/**
* Extension specifies the file name extension
*/
private final String extension;
WriterOutputFormat(String extension) {
this.extension = extension;
}
/**
* Returns the file name extension for the enum type
* @return a string representation of the file name extension
*/
public String getExtension() {
return this.extension;
}
}
| 2,118 |
0 | Create_ds/gobblin/gobblin-api/src/main/java/org/apache/gobblin/writer | Create_ds/gobblin/gobblin-api/src/main/java/org/apache/gobblin/writer/initializer/NoopWriterInitializer.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.writer.initializer;
import lombok.ToString;
import org.apache.gobblin.initializer.Initializer;
import org.apache.gobblin.initializer.NoopInitializer;
@ToString
public class NoopWriterInitializer implements WriterInitializer {
public static final NoopWriterInitializer INSTANCE = new NoopWriterInitializer();
private final Initializer initializer = NoopInitializer.INSTANCE;
private NoopWriterInitializer() {}
@Override
public void initialize() {
this.initializer.initialize();
}
@Override
public void close() {
this.initializer.close();
}
}
| 2,119 |
0 | Create_ds/gobblin/gobblin-api/src/main/java/org/apache/gobblin/writer | Create_ds/gobblin/gobblin-api/src/main/java/org/apache/gobblin/writer/initializer/WriterInitializer.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.writer.initializer;
import org.apache.gobblin.initializer.Initializer;
public interface WriterInitializer extends Initializer {
} | 2,120 |
0 | Create_ds/gobblin/gobblin-api/src/main/java/org/apache/gobblin | Create_ds/gobblin/gobblin-api/src/main/java/org/apache/gobblin/recordaccess/RecordAccessorException.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.recordaccess;
/**
* Base class for exceptions thrown by RecordAccessors. Exceptions are Runtime rather
* than checked as they are more similar to IndexOutOfBounds or ClassCastExceptions.
*/
public class RecordAccessorException extends RuntimeException {
public RecordAccessorException(String msg) {
super(msg);
}
public RecordAccessorException(String msg, Throwable e) {
super(msg, e);
}
}
| 2,121 |
0 | Create_ds/gobblin/gobblin-api/src/main/java/org/apache/gobblin | Create_ds/gobblin/gobblin-api/src/main/java/org/apache/gobblin/recordaccess/RecordAccessorProvider.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.recordaccess;
/**
* A ServiceProvider that can construct a RecordAccessor based on an object's type.
*/
public interface RecordAccessorProvider {
/**
* Build and return a RecordAccessor that can manipulate the object passed in.
*
* If the provider cannot satisfy the request it should return null.
*/
RecordAccessor recordAccessorForObject(Object obj);
}
| 2,122 |
0 | Create_ds/gobblin/gobblin-api/src/main/java/org/apache/gobblin | Create_ds/gobblin/gobblin-api/src/main/java/org/apache/gobblin/recordaccess/IncorrectTypeException.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.recordaccess;
/**
* Thrown when a get() or set() call on an Accessor accesses a record with an
* incorrect underlying type.
*/
public class IncorrectTypeException extends RecordAccessorException {
public IncorrectTypeException(String msg) {
super(msg);
}
public IncorrectTypeException(String msg, Throwable e) {
super(msg, e);
}
}
| 2,123 |
0 | Create_ds/gobblin/gobblin-api/src/main/java/org/apache/gobblin | Create_ds/gobblin/gobblin-api/src/main/java/org/apache/gobblin/recordaccess/RecordAccessor.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.recordaccess;
import java.util.List;
import java.util.Map;
import org.apache.gobblin.annotation.Alpha;
/**
* Interface for an object that can introspect and manipulate a record. Ideal use case
* is for converter logic; converters can specify the operation to do while the
* RecordAccessor actually carries it out.
*
* In general, nested records should be accessible with a '.' separating record name:
* eg given
* "foo": {
* "bar: 1
* }
*
* "foo.bar" should refer to integer 1.
*
* This interface will likely grow over time with more accessors/setters as well as
* schema manipulation (for example: rename field or delete field operations).
*/
@Alpha
public interface RecordAccessor {
/*
* Access a particular field of a record.
*
* If a record is nested, the "." may be used to access
* pieces of the nested record.
*
* If a record contains an array, ".0", ".1", etc may be used to index into the particular
* element of the array.
*
* In the getMulti() variants, a ".*" may be used to access all elements of an array or map.
* A map of key -> value is returned.
*
* For example, given the following JSON record:
* {
* "nested": {
* "key": "val"
* },
* "nestedArr": [
* { "key": "val0" },
* { "key": "val1" }
* ]
* }
*
* getAsString("nested.key") should return "val0".
* getAsString("nested.1.key") should return "val1".
* getAsString("nested.*.key") will throw an exception since the '*' refers to multiple values.
* getMultiAsString("nested.*.key") should return the map (nestedArr.0.key->val0, nestedArr.1.key->val1).
*
* Getters should return null if the field does not exist; may throw
* IncorrectTypeException if the underlying types do not match. Getters should not
* try to do any type coercion -- for example, getAsInt for a value that is the string "1"
* should throw a Sch.
*
* The get*Generic functions should return the following object types:
* String, Integer, Long, or a List of them.
*/
Map<String, String> getMultiAsString(String fieldName);
Map<String, Integer> getMultiAsInt(String fieldName);
Map<String, Long> getMultiAsLong(String fieldName);
Map<String, Object> getMultiGeneric(String fieldName);
String getAsString(String fieldName);
Integer getAsInt(String fieldName);
Long getAsLong(String fieldName);
Object getGeneric(String fieldName);
/*
* Set new values for an object. Should throw a FieldDoesNotExistException runtime exception if fieldName
* is not present in the object's schema or an IncorrectTypeException if the underlying type does not match.
*/
void set(String fieldName, String value);
void set(String fieldName, Integer value);
void set(String fieldName, Long value);
void setStringArray(String fieldName, List<String> value);
void setToNull(String fieldName);
}
| 2,124 |
0 | Create_ds/gobblin/gobblin-api/src/main/java/org/apache/gobblin | Create_ds/gobblin/gobblin-api/src/main/java/org/apache/gobblin/recordaccess/FieldDoesNotExistException.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.recordaccess;
public class FieldDoesNotExistException extends RecordAccessorException {
public FieldDoesNotExistException(String msg) {
super(msg);
}
public FieldDoesNotExistException(String msg, Throwable innerException) {
super(msg, innerException);
}
}
| 2,125 |
0 | Create_ds/gobblin/gobblin-api/src/main/java/org/apache/gobblin | Create_ds/gobblin/gobblin-api/src/main/java/org/apache/gobblin/service/ServiceConfigKeys.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.service;
import java.time.Duration;
import org.apache.gobblin.annotation.Alpha;
@Alpha
public class ServiceConfigKeys {
public static final String GOBBLIN_SERVICE_PREFIX = "gobblin.service.";
public static final String GOBBLIN_SERVICE_JOB_SCHEDULER_LISTENER_CLASS = "org.apache.gobblin.service.modules.scheduler.GobblinServiceJobScheduler";
public static final String GOBBLIN_ORCHESTRATOR_LISTENER_CLASS = "org.apache.gobblin.service.modules.orchestration.Orchestrator";
// Gobblin Service Manager Keys
public static final String GOBBLIN_SERVICE_TOPOLOGY_CATALOG_ENABLED_KEY = GOBBLIN_SERVICE_PREFIX + "topologyCatalog.enabled";
public static final String GOBBLIN_SERVICE_FLOW_CATALOG_ENABLED_KEY = GOBBLIN_SERVICE_PREFIX + "flowCatalog.enabled";
public static final String GOBBLIN_SERVICE_SCHEDULER_ENABLED_KEY = GOBBLIN_SERVICE_PREFIX + "scheduler.enabled";
public static final String GOBBLIN_SERVICE_INSTANCE_NAME = GOBBLIN_SERVICE_PREFIX + "instance.name";
public static final String GOBBLIN_SERVICE_RESTLI_SERVER_ENABLED_KEY = GOBBLIN_SERVICE_PREFIX + "restliServer.enabled";
public static final String GOBBLIN_SERVICE_TOPOLOGY_SPEC_FACTORY_ENABLED_KEY = GOBBLIN_SERVICE_PREFIX + "topologySpecFactory.enabled";
public static final String GOBBLIN_SERVICE_GIT_CONFIG_MONITOR_ENABLED_KEY = GOBBLIN_SERVICE_PREFIX + "gitConfigMonitor.enabled";
public static final String GOBBLIN_SERVICE_DAG_MANAGER_ENABLED_KEY = GOBBLIN_SERVICE_PREFIX + "dagManager.enabled";
public static final boolean DEFAULT_GOBBLIN_SERVICE_DAG_MANAGER_ENABLED = false;
public static final String GOBBLIN_SERVICE_JOB_STATUS_MONITOR_ENABLED_KEY = GOBBLIN_SERVICE_PREFIX + "jobStatusMonitor.enabled";
public static final String GOBBLIN_SERVICE_WARM_STANDBY_ENABLED_KEY = GOBBLIN_SERVICE_PREFIX + "warmStandby.enabled";
public static final String GOBBLIN_SERVICE_MULTI_ACTIVE_SCHEDULER_ENABLED_KEY = GOBBLIN_SERVICE_PREFIX + "multiActiveScheduler.enabled";
// If true, will mark up/down d2 servers on leadership so that all requests will be routed to the leader node
public static final String GOBBLIN_SERVICE_D2_ONLY_ANNOUNCE_LEADER = GOBBLIN_SERVICE_PREFIX + "d2.onlyAnnounceLeader";
// Helix / ServiceScheduler Keys
public static final String HELIX_CLUSTER_NAME_KEY = GOBBLIN_SERVICE_PREFIX + "helix.cluster.name";
public static final String ZK_CONNECTION_STRING_KEY = GOBBLIN_SERVICE_PREFIX + "zk.connection.string";
public static final String HELIX_INSTANCE_NAME_OPTION_NAME = "helix_instance_name";
public static final String HELIX_INSTANCE_NAME_KEY = GOBBLIN_SERVICE_PREFIX + "helixInstanceName";
public static final String GOBBLIN_SERVICE_FLOWSPEC = GOBBLIN_SERVICE_PREFIX + "flowSpec";
public static final String GOBBLIN_SERVICE_FLOWGRAPH_CLASS_KEY = GOBBLIN_SERVICE_PREFIX + "flowGraph.class";
public static final String GOBBLIN_SERVICE_FLOWGRAPH_HELPER_KEY = GOBBLIN_SERVICE_PREFIX + "flowGraphHelper.class";
// Helix message sub types for FlowSpec
public static final String HELIX_FLOWSPEC_ADD = "FLOWSPEC_ADD";
public static final String HELIX_FLOWSPEC_REMOVE = "FLOWSPEC_REMOVE";
public static final String HELIX_FLOWSPEC_UPDATE = "FLOWSPEC_UPDATE";
// Flow Compiler Keys
public static final String GOBBLIN_SERVICE_FLOWCOMPILER_CLASS_KEY = GOBBLIN_SERVICE_PREFIX + "flowCompiler.class";
public static final String COMPILATION_SUCCESSFUL = "compilation.successful";
public static final String COMPILATION_RESPONSE = "compilation.response";
// Flow Catalog Keys
public static final String GOBBLIN_SERVICE_FLOW_CATALOG_LOCAL_COMMIT = GOBBLIN_SERVICE_PREFIX + "flowCatalog.localCommit";
public static final boolean DEFAULT_GOBBLIN_SERVICE_FLOW_CATALOG_LOCAL_COMMIT = true;
// Job Level Keys
public static final String WORK_UNIT_SIZE = GOBBLIN_SERVICE_PREFIX + "work.unit.size";
public static final String TOTAL_WORK_UNIT_SIZE = GOBBLIN_SERVICE_PREFIX + "total.work.unit.size";
public static final String TOTAL_WORK_UNIT_COUNT = GOBBLIN_SERVICE_PREFIX + "total.work.unit.count";
/**
* Directly use canonical class name here to avoid introducing additional dependency here.
*/
public static final String DEFAULT_GOBBLIN_SERVICE_FLOWCOMPILER_CLASS =
"org.apache.gobblin.service.modules.flow.IdentityFlowToJobSpecCompiler";
// Flow specific Keys
public static final String FLOW_SOURCE_IDENTIFIER_KEY = "gobblin.flow.sourceIdentifier";
public static final String FLOW_DESTINATION_IDENTIFIER_KEY = "gobblin.flow.destinationIdentifier";
// Topology Factory Keys (for overall factory)
public static final String TOPOLOGY_FACTORY_PREFIX = "topologySpecFactory.";
public static final String DEFAULT_TOPOLOGY_SPEC_FACTORY =
"org.apache.gobblin.service.modules.topology.ConfigBasedTopologySpecFactory";
public static final String TOPOLOGYSPEC_FACTORY_KEY = TOPOLOGY_FACTORY_PREFIX + "class";
public static final String TOPOLOGY_FACTORY_TOPOLOGY_NAMES_KEY = TOPOLOGY_FACTORY_PREFIX + "topologyNames";
// Topology Factory Keys (for individual topologies)
public static final String TOPOLOGYSPEC_DESCRIPTION_KEY = "description";
public static final String TOPOLOGYSPEC_VERSION_KEY = "version";
public static final String TOPOLOGYSPEC_URI_KEY = "uri";
public static final String DEFAULT_SPEC_EXECUTOR =
"org.apache.gobblin.runtime.spec_executorInstance.InMemorySpecExecutor";
public static final String SPEC_EXECUTOR_KEY = "specExecutorInstance.class";
public static final String EDGE_SECURITY_KEY = "edge.secured";
public static final String DATA_MOVEMENT_AUTHORIZER_CLASS = "dataMovementAuthorizer.class";
// Template Catalog Keys
public static final String TEMPLATE_CATALOGS_FULLY_QUALIFIED_PATH_KEY = GOBBLIN_SERVICE_PREFIX + "templateCatalogs.fullyQualifiedPath";
public static final String TEMPLATE_CATALOGS_CLASS_KEY = GOBBLIN_SERVICE_PREFIX + "templateCatalogs.class";
// Keys related to user-specified policy on route selection.
// Undesired connection to form an executable JobSpec.
// Formatted as a String list, each entry contains a string in the format of "Source1:Sink1:URI",
// which indicates that data movement from source1 to sink1 with specific URI of specExecutor should be avoided.
public static final String POLICY_BASED_BLOCKED_CONNECTION = GOBBLIN_SERVICE_PREFIX + "blockedConnections";
// Comma separated list of nodes that is blacklisted. Names put here will become the nodeName which is the ID of a serviceNode.
public static final String POLICY_BASED_BLOCKED_NODES = GOBBLIN_SERVICE_PREFIX + "blockedNodes";
// Complete path of how the data movement is executed from source to sink.
// Formatted as a String, each hop separated by comma, from source to sink in order.
public static final String POLICY_BASED_DATA_MOVEMENT_PATH = GOBBLIN_SERVICE_PREFIX + "fullDataPath";
public static final String ATTRS_PATH_IN_CONFIG = "executorAttrs";
// Gobblin Service Graph Representation Topology related Keys
public static final String NODE_SECURITY_KEY = "node.secured";
// True means node is by default secure.
public static final String DEFAULT_NODE_SECURITY = "true";
// Policy related configuration Keys
public static final String DEFAULT_SERVICE_POLICY = "static";
public static final String SERVICE_POLICY_NAME = GOBBLIN_SERVICE_PREFIX + "servicePolicy";
// Logging
public static final String GOBBLIN_SERVICE_LOG4J_CONFIGURATION_FILE = "log4j-service.properties";
// GAAS Listerning Port
public static final String SERVICE_PORT = GOBBLIN_SERVICE_PREFIX + "port";
public static final String SERVICE_NAME = GOBBLIN_SERVICE_PREFIX + "serviceName";
public static final String SERVICE_URL_PREFIX = GOBBLIN_SERVICE_PREFIX + "serviceUrlPrefix";
// Prefix for config to ServiceBasedAppLauncher that will only be used by GaaS and not orchestrated jobs
public static final String GOBBLIN_SERVICE_APP_LAUNCHER_PREFIX = "gobblinServiceAppLauncher";
//Flow concurrency config key to control default service behavior.
public static final String FLOW_CONCURRENCY_ALLOWED = GOBBLIN_SERVICE_PREFIX + "flowConcurrencyAllowed";
public static final Boolean DEFAULT_FLOW_CONCURRENCY_ALLOWED = true;
public static final String LEADER_URL = "leaderUrl";
public static final String FORCE_LEADER = GOBBLIN_SERVICE_PREFIX + "forceLeader";
public static final boolean DEFAULT_FORCE_LEADER = false;
public static final String QUOTA_MANAGER_CLASS = GOBBLIN_SERVICE_PREFIX + "quotaManager.class";
public static final String DEFAULT_QUOTA_MANAGER = "org.apache.gobblin.service.modules.orchestration.InMemoryUserQuotaManager";
public static final String QUOTA_STORE_DB_TABLE_KEY = "quota.store.db.table";
public static final String DEFAULT_QUOTA_STORE_DB_TABLE = "quota_table";
public static final String RUNNING_DAG_IDS_DB_TABLE_KEY = "running.dag.ids.store.db.table";
public static final String DEFAULT_RUNNING_DAG_IDS_DB_TABLE = "running_dag_ids";
// Group Membership authentication service
public static final String GROUP_OWNERSHIP_SERVICE_CLASS = GOBBLIN_SERVICE_PREFIX + "groupOwnershipService.class";
public static final String DEFAULT_GROUP_OWNERSHIP_SERVICE = "org.apache.gobblin.service.NoopGroupOwnershipService";
public static final int MAX_FLOW_NAME_LENGTH = 128; // defined in FlowId.pdl
public static final int MAX_FLOW_GROUP_LENGTH = 128; // defined in FlowId.pdl
public static final int MAX_FLOW_EXECUTION_ID_LENGTH = 13; // length of flowExecutionId which is epoch timestamp
public static final int MAX_JOB_NAME_LENGTH = 374;
public static final int MAX_JOB_GROUP_LENGTH = 374;
public static final String STATE_STORE_TABLE_SUFFIX = "gst";
public static final String STATE_STORE_KEY_SEPARATION_CHARACTER = ".";
public static final String DAG_STORE_KEY_SEPARATION_CHARACTER = "_";
// Service database connection
public static final String SERVICE_DB_URL_KEY = GOBBLIN_SERVICE_PREFIX + "db.url";
public static final String SERVICE_DB_USERNAME = GOBBLIN_SERVICE_PREFIX + "db.username";
public static final String SERVICE_DB_PASSWORD = GOBBLIN_SERVICE_PREFIX + "db.password";
public static final String SERVICE_DB_MAX_CONNECTIONS = GOBBLIN_SERVICE_PREFIX + "db.maxConnections";
public static final String SERVICE_DB_MAX_CONNECTION_LIFETIME = GOBBLIN_SERVICE_PREFIX + "db.maxConnectionLifetime";
// Mysql-based issues repository
public static final String MYSQL_ISSUE_REPO_PREFIX = GOBBLIN_SERVICE_PREFIX + "issueRepo.mysql.";
public static final String MYSQL_ISSUE_REPO_CLEANUP_INTERVAL = MYSQL_ISSUE_REPO_PREFIX + "cleanupInterval";
public static final Duration DEFAULT_MYSQL_ISSUE_REPO_CLEANUP_INTERVAL = Duration.ofHours(1);
public static final String MYSQL_ISSUE_REPO_MAX_ISSUES_TO_KEEP = MYSQL_ISSUE_REPO_PREFIX + "maxIssuesToKeep";
public static final long DEFAULT_MYSQL_ISSUE_REPO_MAX_ISSUES_TO_KEEP = 10 * 1000 * 1000;
public static final String MYSQL_ISSUE_REPO_DELETE_ISSUES_OLDER_THAN =
MYSQL_ISSUE_REPO_PREFIX + "deleteIssuesOlderThan";
public static final Duration DEFAULT_MYSQL_ISSUE_REPO_DELETE_ISSUES_OLDER_THAN = Duration.ofDays(30);
// In-memory issue repository
public static final String MEMORY_ISSUE_REPO_PREFIX = GOBBLIN_SERVICE_PREFIX + "issueRepo.memory.";
public static final String MEMORY_ISSUE_REPO_MAX_CONTEXT_COUNT = MEMORY_ISSUE_REPO_PREFIX + "maxContextCount";
public static final int DEFAULT_MEMORY_ISSUE_REPO_MAX_CONTEXT_COUNT = 100;
public static final String MEMORY_ISSUE_REPO_MAX_ISSUE_PER_CONTEXT = MEMORY_ISSUE_REPO_PREFIX + "maxIssuesPerContext";
public static final int DEFAULT_MEMORY_ISSUE_REPO_MAX_ISSUE_PER_CONTEXT= 20;
public static final String ISSUE_REPO_CLASS = GOBBLIN_SERVICE_PREFIX + "issueRepo.class";
}
| 2,126 |
0 | Create_ds/gobblin/gobblin-api/src/main/java/org/apache/gobblin | Create_ds/gobblin/gobblin-api/src/main/java/org/apache/gobblin/metadata/GlobalMetadata.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.metadata;
import org.apache.gobblin.fork.CopyHelper;
import org.apache.gobblin.fork.CopyNotSupportedException;
import org.apache.gobblin.fork.Copyable;
import com.google.common.base.Optional;
import lombok.AccessLevel;
import lombok.AllArgsConstructor;
import lombok.Builder;
import lombok.EqualsAndHashCode;
import lombok.Getter;
/**
* Global metadata
* @param <S> schema type
*/
@AllArgsConstructor(access=AccessLevel.PRIVATE)
@EqualsAndHashCode
@Builder
public class GlobalMetadata<S> implements Copyable<GlobalMetadata<S>> {
@Getter
private S schema;
@Override
public GlobalMetadata<S> copy() throws CopyNotSupportedException {
if (CopyHelper.isCopyable(schema)) {
return new GlobalMetadata((S)CopyHelper.copy(schema));
}
throw new CopyNotSupportedException("Type is not copyable: " + schema.getClass().getName());
}
/**
* Builder that takes in an input {@GlobalMetadata} to use as a base.
* @param inputMetadata input metadata
* @param outputSchema output schema to set in the builder
* @param <SI> input schema type
* @param <SO> output schema type
* @return builder
*/
public static <SI, SO> GlobalMetadataBuilder<SO> builderWithInput(GlobalMetadata<SI> inputMetadata, Optional<SO> outputSchema) {
GlobalMetadataBuilder<SO> builder = (GlobalMetadataBuilder<SO>) builder();
if (outputSchema.isPresent()) {
builder.schema(outputSchema.get());
}
return builder;
}
}
| 2,127 |
0 | Create_ds/gobblin/gobblin-api/src/main/java/org/apache/gobblin | Create_ds/gobblin/gobblin-api/src/main/java/org/apache/gobblin/metadata/MetadataMerger.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.metadata;
import org.apache.gobblin.writer.FsWriterMetrics;
/**
* Interface for an object that can merge metadata from several work units together.
* @param <T> Type of the metadata record that will be merged
*/
public interface MetadataMerger<T> {
/**
* Process a metadata record, merging it with all previously processed records.
* @param metadata Record to process
*/
void update(T metadata);
/**
* Process a metrics record, merging it with all previously processed records.
*/
void update(FsWriterMetrics metrics);
/**
* Get a metadata record that is a representation of all records passed into update().
*/
T getMergedMetadata();
}
| 2,128 |
0 | Create_ds/gobblin/gobblin-api/src/main/java/org/apache/gobblin | Create_ds/gobblin/gobblin-api/src/main/java/org/apache/gobblin/ack/HierarchicalAckable.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.ack;
import java.io.Closeable;
import java.util.List;
import java.util.concurrent.ConcurrentLinkedQueue;
import java.util.concurrent.atomic.AtomicInteger;
import com.google.common.collect.ImmutableList;
import lombok.Getter;
import lombok.RequiredArgsConstructor;
/**
* An {@link Ackable} used to ack other {@link Ackable}s when a set of children ackables are all acknowledged. This is
* useful when forking a record, as we don't want to ack the original record until all children have been acked.
*
* Usage:
* HierarchicalAckable ackable = new HierarchicalAckable(list_of_parent_ackables);
* Ackable childAckable = ackable.newChildAckable();
* ackable.close(); // Must close HierarchicalAckable to indicate no more children will be created.
* childAckable.ack(); // When acking all children, parents will be acked.
*/
@RequiredArgsConstructor
public class HierarchicalAckable implements Closeable {
private final List<Ackable> parentAckables;
private final AtomicInteger remainingCallbacks = new AtomicInteger();
private final ConcurrentLinkedQueue<Throwable> throwables = new ConcurrentLinkedQueue<>();
private volatile boolean closed = false;
/**
* @return A new child {@link Ackable} that must be acked before parents are acked.
*/
public Ackable newChildAckable() {
if (this.closed) {
throw new IllegalStateException(HierarchicalAckable.class.getSimpleName() + " is already closed.");
}
this.remainingCallbacks.incrementAndGet();
return new ChildAckable();
}
/**
* Indicates no more children will be created.
*/
@Override
public synchronized void close() {
this.closed = true;
maybeAck();
}
private synchronized void maybeAck() {
if (this.remainingCallbacks.get() == 0 && this.closed) {
if (!this.throwables.isEmpty()) {
ChildrenFailedException exc = new ChildrenFailedException(ImmutableList.copyOf(this.throwables));
for (Ackable ackable : this.parentAckables) {
ackable.nack(exc);
}
} else {
for (Ackable ackable : this.parentAckables) {
ackable.ack();
}
}
}
}
private class ChildAckable implements Ackable {
private volatile boolean acked = false;
@Override
public synchronized void ack() {
if (this.acked) {
return;
}
this.acked = true;
HierarchicalAckable.this.remainingCallbacks.decrementAndGet();
maybeAck();
}
@Override
public synchronized void nack(Throwable error) {
if (this.acked) {
return;
}
this.acked = true;
HierarchicalAckable.this.remainingCallbacks.decrementAndGet();
HierarchicalAckable.this.throwables.add(error);
maybeAck();
}
}
/**
* Indicates that at least one of the children {@link Ackable}s was nacked.
*/
public static class ChildrenFailedException extends Exception {
@Getter
private final ImmutableList<Throwable> failureCauses;
private ChildrenFailedException(ImmutableList<Throwable> failureCauses) {
super("Some child ackables failed.");
this.failureCauses = failureCauses;
}
}
}
| 2,129 |
0 | Create_ds/gobblin/gobblin-api/src/main/java/org/apache/gobblin | Create_ds/gobblin/gobblin-api/src/main/java/org/apache/gobblin/ack/Ackable.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.ack;
/**
* An interface for entities that can be acked
*/
public interface Ackable {
/**
* Acknowledge this entity as a success.
*/
void ack();
/**
* Mark this entity as failed to process.
*/
default void nack(Throwable error) {
// do nothing by default
}
Ackable NoopAckable = new Ackable() {
@Override
public void ack() {}
public void nack(Throwable error) {}
};
}
| 2,130 |
0 | Create_ds/gobblin/gobblin-api/src/main/java/org/apache/gobblin | Create_ds/gobblin/gobblin-api/src/main/java/org/apache/gobblin/ack/BasicAckableForTesting.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.ack;
/**
* An ackable used for testing that just counts the times it is acked and nacked.
*/
public class BasicAckableForTesting implements Ackable {
public int acked = 0;
public int nacked = 0;
public Throwable throwable;
@Override
public void ack() {
this.acked++;
}
@Override
public void nack(Throwable error) {
this.nacked++;
this.throwable = error;
}
}
| 2,131 |
0 | Create_ds/gobblin/gobblin-api/src/main/java/gobblin | Create_ds/gobblin/gobblin-api/src/main/java/gobblin/configuration/ImmutableWorkUnitState.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package gobblin.configuration;
/***
* Shim layer for org.apache.gobblin.configuration.ImmutableWorkUnitState
*/
public class ImmutableWorkUnitState extends org.apache.gobblin.configuration.ImmutableWorkUnitState {
public ImmutableWorkUnitState(WorkUnitState workUnitState) {
super(workUnitState);
}
}
| 2,132 |
0 | Create_ds/gobblin/gobblin-api/src/main/java/gobblin | Create_ds/gobblin/gobblin-api/src/main/java/gobblin/configuration/WorkUnitState.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package gobblin.configuration;
import org.apache.gobblin.source.workunit.WorkUnit;
/***
* Shim layer for org.apache.gobblin.configuration.WorkUnitState
*/
public class WorkUnitState extends org.apache.gobblin.configuration.WorkUnitState {
/**
* Default constructor used for deserialization.
*/
public WorkUnitState() {
super();
}
@Deprecated
public WorkUnitState(WorkUnit workUnit) {
super(workUnit);
}
public WorkUnitState(WorkUnit workUnit, State jobState) {
super(workUnit, jobState);
}
}
| 2,133 |
0 | Create_ds/gobblin/gobblin-api/src/main/java/gobblin | Create_ds/gobblin/gobblin-api/src/main/java/gobblin/configuration/State.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package gobblin.configuration;
import java.util.Properties;
/***
* Shim layer for org.apache.gobblin.configuration.State
*/
public class State extends org.apache.gobblin.configuration.State {
public State() {
super();
}
public State(Properties properties) {
super(properties);
}
public State(State otherState) {
super(otherState);
}
}
| 2,134 |
0 | Create_ds/gobblin/gobblin-api/src/main/java/gobblin | Create_ds/gobblin/gobblin-api/src/main/java/gobblin/configuration/SourceState.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package gobblin.configuration;
import java.util.Map;
import com.google.common.base.Function;
import com.google.common.collect.Iterables;
/***
* Shim layer for org.apache.gobblin.configuration.SourceState
*/
public class SourceState extends org.apache.gobblin.configuration.SourceState {
/**
* Default constructor.
*/
public SourceState() {
super();
}
public SourceState(State properties, Iterable<WorkUnitState> prevWorkUnitStates) {
super(properties, adaptWorkUnitStates(prevWorkUnitStates));
}
public SourceState(State properties, Map<String, ? extends SourceState> previousDatasetStatesByUrns,
Iterable<WorkUnitState> previousWorkUnitStates) {
super(properties, previousDatasetStatesByUrns, adaptWorkUnitStates(previousWorkUnitStates));
}
private static Iterable<org.apache.gobblin.configuration.WorkUnitState> adaptWorkUnitStates(Iterable<WorkUnitState> prevWorkUnitStates) {
return Iterables.transform(prevWorkUnitStates, new Function<WorkUnitState, org.apache.gobblin.configuration.WorkUnitState>() {
@Override
public org.apache.gobblin.configuration.WorkUnitState apply(WorkUnitState input) {
return input;
}
});
}
}
| 2,135 |
0 | Create_ds/gobblin/gobblin-api/src/main/java/gobblin/source | Create_ds/gobblin/gobblin-api/src/main/java/gobblin/source/workunit/Extract.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package gobblin.source.workunit;
import gobblin.configuration.SourceState;
/***
* Shim layer for org.apache.gobblin.source.workunit.Extract
*/
public class Extract extends org.apache.gobblin.source.workunit.Extract {
public enum TableType {
SNAPSHOT_ONLY,
SNAPSHOT_APPEND,
APPEND_ONLY
}
@Deprecated
public Extract(SourceState state, TableType type, String namespace, String table) {
super(state, adaptTableType(type), namespace, table);
}
public Extract(TableType type, String namespace, String table) {
super(adaptTableType(type), namespace, table);
}
public Extract(Extract extract) {
super(extract);
}
private static org.apache.gobblin.source.workunit.Extract.TableType adaptTableType(TableType type) {
switch (type) {
case SNAPSHOT_ONLY: return org.apache.gobblin.source.workunit.Extract.TableType.SNAPSHOT_ONLY;
case SNAPSHOT_APPEND: return org.apache.gobblin.source.workunit.Extract.TableType.SNAPSHOT_APPEND;
default: return org.apache.gobblin.source.workunit.Extract.TableType.APPEND_ONLY;
}
}
}
| 2,136 |
0 | Create_ds/gobblin/gobblin-api/src/main/java/gobblin/source | Create_ds/gobblin/gobblin-api/src/main/java/gobblin/source/workunit/ImmutableWorkUnit.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package gobblin.source.workunit;
/***
* Shim layer for org.apache.gobblin.source.workunit.ImmutableWorkUnit
*/
public class ImmutableWorkUnit extends org.apache.gobblin.source.workunit.ImmutableWorkUnit {
public ImmutableWorkUnit(WorkUnit workUnit) {
super(workUnit);
}
}
| 2,137 |
0 | Create_ds/gobblin/gobblin-api/src/main/java/gobblin/source | Create_ds/gobblin/gobblin-api/src/main/java/gobblin/source/workunit/MultiWorkUnit.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package gobblin.source.workunit;
/***
* Shim layer for org.apache.gobblin.source.workunit.MultiWorkUnit
*/
public class MultiWorkUnit extends org.apache.gobblin.source.workunit.MultiWorkUnit {
@Deprecated
public MultiWorkUnit() {
super();
}
}
| 2,138 |
0 | Create_ds/gobblin/gobblin-api/src/main/java/gobblin/source | Create_ds/gobblin/gobblin-api/src/main/java/gobblin/source/workunit/WorkUnit.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package gobblin.source.workunit;
import gobblin.configuration.SourceState;
import gobblin.source.extractor.WatermarkInterval;
/***
* Shim layer for org.apache.gobblin.source.workunit.WorkUnit
*/
public class WorkUnit extends org.apache.gobblin.source.workunit.WorkUnit {
@Deprecated
public WorkUnit() {
super();
}
@Deprecated
public WorkUnit(SourceState state, Extract extract) {
super(state, extract);
}
@Deprecated
public WorkUnit(SourceState state, Extract extract, WatermarkInterval watermarkInterval) {
super(state, extract, watermarkInterval);
}
public WorkUnit(Extract extract) {
super(extract);
}
@Deprecated
public WorkUnit(WorkUnit other) {
super(other);
}
}
| 2,139 |
0 | Create_ds/gobblin/gobblin-api/src/main/java/gobblin/source | Create_ds/gobblin/gobblin-api/src/main/java/gobblin/source/workunit/ImmutableExtract.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package gobblin.source.workunit;
import gobblin.configuration.SourceState;
/***
* Shim layer for org.apache.gobblin.source.workunit.ImmutableExtract
*/
public class ImmutableExtract extends org.apache.gobblin.source.workunit.ImmutableExtract {
public ImmutableExtract(SourceState state, gobblin.source.workunit.Extract.TableType type, String namespace, String table) {
super(state, adaptTableType(type), namespace, table);
}
public ImmutableExtract(Extract extract) {
super(extract);
}
private static org.apache.gobblin.source.workunit.Extract.TableType adaptTableType(Extract.TableType type) {
switch (type) {
case SNAPSHOT_ONLY: return org.apache.gobblin.source.workunit.Extract.TableType.SNAPSHOT_ONLY;
case SNAPSHOT_APPEND: return org.apache.gobblin.source.workunit.Extract.TableType.SNAPSHOT_APPEND;
default: return org.apache.gobblin.source.workunit.Extract.TableType.APPEND_ONLY;
}
}
}
| 2,140 |
0 | Create_ds/gobblin/gobblin-api/src/main/java/gobblin/source | Create_ds/gobblin/gobblin-api/src/main/java/gobblin/source/extractor/Watermark.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package gobblin.source.extractor;
/***
* Shim layer for org.apache.gobblin.source.extractor.Watermark
*/
public interface Watermark extends org.apache.gobblin.source.extractor.Watermark {
}
| 2,141 |
0 | Create_ds/gobblin/gobblin-api/src/main/java/gobblin/source | Create_ds/gobblin/gobblin-api/src/main/java/gobblin/source/extractor/WatermarkInterval.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package gobblin.source.extractor;
/***
* Shim layer for org.apache.gobblin.source.extractor.WatermarkInterval
*/
public class WatermarkInterval extends org.apache.gobblin.source.extractor.WatermarkInterval {
public WatermarkInterval(Watermark lowWatermark, Watermark expectedHighWatermark) {
super(lowWatermark, expectedHighWatermark);
}
}
| 2,142 |
0 | Create_ds/gobblin/gobblin-api/src/main/java/gobblin/source | Create_ds/gobblin/gobblin-api/src/main/java/gobblin/source/extractor/CheckpointableWatermark.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package gobblin.source.extractor;
/***
* Shim layer for org.apache.gobblin.source.extractor.CheckpointableWatermark
*/
public interface CheckpointableWatermark extends org.apache.gobblin.source.extractor.CheckpointableWatermark {
}
| 2,143 |
0 | Create_ds/gobblin/gobblin-aws/src/test/java/org/apache/gobblin | Create_ds/gobblin/gobblin-aws/src/test/java/org/apache/gobblin/aws/GobblinAWSClusterLauncherTest.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.aws;
import java.io.IOException;
import java.net.URL;
import java.util.List;
import java.util.concurrent.TimeoutException;
import org.apache.curator.framework.CuratorFramework;
import org.apache.curator.test.TestingServer;
import org.apache.helix.HelixManager;
import org.apache.helix.HelixManagerFactory;
import org.apache.helix.InstanceType;
import org.apache.helix.model.Message;
import org.mockito.Mock;
import org.mockito.Mockito;
import org.mockito.MockitoAnnotations;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.testng.Assert;
import org.testng.annotations.AfterClass;
import org.testng.annotations.BeforeClass;
import org.testng.annotations.Test;
import com.amazonaws.services.autoscaling.model.AutoScalingGroup;
import com.amazonaws.services.autoscaling.model.Tag;
import com.amazonaws.services.autoscaling.model.TagDescription;
import com.amazonaws.services.ec2.model.AvailabilityZone;
import com.amazonaws.services.ec2.model.Instance;
import com.amazonaws.services.s3.model.S3ObjectSummary;
import com.google.common.base.Function;
import com.google.common.base.Optional;
import com.google.common.collect.Lists;
import com.google.common.io.Closer;
import com.typesafe.config.Config;
import com.typesafe.config.ConfigFactory;
import com.typesafe.config.ConfigValueFactory;
import org.apache.gobblin.cluster.GobblinClusterConfigurationKeys;
import org.apache.gobblin.cluster.GobblinHelixConstants;
import org.apache.gobblin.cluster.HelixMessageSubTypes;
import org.apache.gobblin.cluster.HelixMessageTestBase;
import org.apache.gobblin.cluster.HelixUtils;
import org.apache.gobblin.cluster.TestHelper;
import org.apache.gobblin.cluster.TestShutdownMessageHandlerFactory;
import org.apache.gobblin.testing.AssertWithBackoff;
/**
* Unit tests for {@link GobblinAWSClusterLauncher}.
*
* @author Abhishek Tiwari
*/
@Test(groups = { "gobblin.aws" })
public class GobblinAWSClusterLauncherTest implements HelixMessageTestBase {
public final static Logger LOG = LoggerFactory.getLogger(GobblinAWSClusterLauncherTest.class);
private CuratorFramework curatorFramework;
private Config config;
private GobblinAWSClusterLauncher gobblinAwsClusterLauncher;
private HelixManager helixManager;
private String gobblinClusterName = "testCluster";
private String helixClusterName;
private String clusterId;
private TagDescription clusterNameTag = new TagDescription()
.withKey(GobblinAWSClusterLauncher.CLUSTER_NAME_ASG_TAG).withValue(gobblinClusterName);
private TagDescription clusterIdTag = new TagDescription()
.withKey(GobblinAWSClusterLauncher.CLUSTER_ID_ASG_TAG).withValue("dummy");
private TagDescription masterTypeTag = new TagDescription()
.withKey(GobblinAWSClusterLauncher.ASG_TYPE_ASG_TAG).withValue(GobblinAWSClusterLauncher.ASG_TYPE_MASTER);
private TagDescription workerTypeTag = new TagDescription()
.withKey(GobblinAWSClusterLauncher.ASG_TYPE_ASG_TAG).withValue(GobblinAWSClusterLauncher.ASG_TYPE_WORKERS);
private AutoScalingGroup masterASG = new AutoScalingGroup()
.withAutoScalingGroupName("AutoScalingGroup_master")
.withLaunchConfigurationName("LaunchConfiguration_master")
.withTags(clusterNameTag, clusterIdTag, masterTypeTag);
private AutoScalingGroup workerASG = new AutoScalingGroup()
.withAutoScalingGroupName("AutoScalingGroup_worker")
.withLaunchConfigurationName("LaunchConfiguration_worker")
.withTags(clusterNameTag, clusterIdTag, workerTypeTag);
private AvailabilityZone availabilityZone = new AvailabilityZone().withZoneName("A");
private Instance instance = new Instance().withPublicIpAddress("0.0.0.0");
private final Closer closer = Closer.create();
@Mock
private AWSSdkClient awsSdkClient;
@BeforeClass
public void setUp() throws Exception {
// Mock AWS SDK calls
MockitoAnnotations.openMocks(this);
Mockito.doNothing()
.when(awsSdkClient)
.createSecurityGroup(Mockito.anyString(), Mockito.anyString());
Mockito.doReturn(Lists.<AvailabilityZone>newArrayList(availabilityZone))
.when(awsSdkClient)
.getAvailabilityZones();
Mockito.doReturn("dummy")
.when(awsSdkClient)
.createKeyValuePair(Mockito.anyString());
Mockito.doReturn(Lists.<AutoScalingGroup>newArrayList(masterASG, workerASG))
.when(awsSdkClient)
.getAutoScalingGroupsWithTag(Mockito.any(Tag.class));
Mockito.doReturn(Lists.<Instance>newArrayList(instance))
.when(awsSdkClient)
.getInstancesForGroup(Mockito.anyString(), Mockito.anyString());
Mockito.doReturn(Lists.<S3ObjectSummary>newArrayList())
.when(awsSdkClient)
.listS3Bucket(Mockito.anyString(), Mockito.anyString());
Mockito.doNothing()
.when(awsSdkClient)
.addPermissionsToSecurityGroup(Mockito.any(String.class), Mockito.any(String.class), Mockito.any(String.class),
Mockito.any(Integer.class), Mockito.any(Integer.class));
Mockito.doNothing()
.when(awsSdkClient)
.createAutoScalingGroup(Mockito.any(String.class), Mockito.any(String.class), Mockito.any(Integer.class),
Mockito.any(Integer.class), Mockito.any(Integer.class), Mockito.any(Optional.class),
Mockito.any(Optional.class), Mockito.any(Optional.class), Mockito.any(Optional.class),
Mockito.any(Optional.class), Mockito.any(Optional.class), Mockito.any(List.class));
Mockito.doNothing()
.when(awsSdkClient)
.createLaunchConfig(Mockito.any(String.class), Mockito.any(String.class), Mockito.any(String.class),
Mockito.any(String.class), Mockito.any(String.class), Mockito.any(Optional.class),
Mockito.any(Optional.class), Mockito.any(Optional.class), Mockito.any(Optional.class),
Mockito.any(Optional.class), Mockito.any(String.class));
Mockito
.doNothing()
.when(awsSdkClient)
.deleteAutoScalingGroup(Mockito.any(String.class), Mockito.any(boolean.class));
Mockito
.doNothing()
.when(awsSdkClient)
.deleteLaunchConfiguration(Mockito.any(String.class));
Mockito.doNothing()
.when(awsSdkClient)
.addPermissionsToSecurityGroup(Mockito.any(String.class), Mockito.any(String.class), Mockito.any(String.class),
Mockito.any(Integer.class), Mockito.any(Integer.class));
// Local test Zookeeper
final TestingServer testingZKServer = this.closer.register(new TestingServer(-1));
LOG.info("Testing ZK Server listening on: " + testingZKServer.getConnectString());
this.curatorFramework = TestHelper.createZkClient(testingZKServer, this.closer);
// Load configuration
final URL url = GobblinAWSClusterLauncherTest.class.getClassLoader().getResource(
GobblinAWSClusterLauncherTest.class.getSimpleName() + ".conf");
Assert.assertNotNull(url, "Could not find resource " + url);
this.config = ConfigFactory.parseURL(url)
.withValue("gobblin.cluster.zk.connection.string",
ConfigValueFactory.fromAnyRef(testingZKServer.getConnectString()))
.resolve();
this.helixClusterName = this.config.getString(GobblinClusterConfigurationKeys.HELIX_CLUSTER_NAME_KEY);
final String zkConnectionString = this.config.getString(GobblinClusterConfigurationKeys.ZK_CONNECTION_STRING_KEY);
this.helixManager = HelixManagerFactory
.getZKHelixManager(this.config.getString(GobblinClusterConfigurationKeys.HELIX_CLUSTER_NAME_KEY),
TestHelper.TEST_HELIX_INSTANCE_NAME, InstanceType.CONTROLLER, zkConnectionString);
// Gobblin AWS Cluster Launcher to test
this.gobblinAwsClusterLauncher = new TestGobblinAWSClusterLauncher(this.config);
}
@Test
public void testCreateHelixCluster() throws Exception {
// This is tested here instead of in HelixUtilsTest to avoid setting up yet another testing ZooKeeper server.
HelixUtils
.createGobblinHelixCluster(this.config.getString(GobblinClusterConfigurationKeys.ZK_CONNECTION_STRING_KEY),
this.config.getString(GobblinClusterConfigurationKeys.HELIX_CLUSTER_NAME_KEY));
// Assert to check if there is no pre-existing cluster
Assert.assertEquals(this.curatorFramework.checkExists().forPath(String.format("/%s",
this.helixClusterName)).getVersion(), 0);
Assert.assertEquals(this.curatorFramework.checkExists().forPath(String.format("/%s/CONTROLLER",
this.helixClusterName)).getVersion(), 0);
}
@Test(dependsOnMethods = "testCreateHelixCluster")
public void testSetupAndSubmitApplication() throws Exception {
// Setup new cluster
this.clusterId = this.gobblinAwsClusterLauncher.setupGobblinCluster();
this.clusterIdTag.setValue(this.clusterId);
}
@Test(dependsOnMethods = "testSetupAndSubmitApplication")
public void testGetReconnectableApplicationId() throws Exception {
// Assert to check if cluster was created correctly by trying to reconnect to it
Assert.assertEquals(this.gobblinAwsClusterLauncher.getReconnectableClusterId().get(), this.clusterId);
}
@Test(dependsOnMethods = "testGetReconnectableApplicationId")
public void testSendShutdownRequest() throws Exception {
// Connect to Helix as Controller and register a shutdown request handler
this.helixManager.connect();
this.helixManager.getMessagingService().registerMessageHandlerFactory(GobblinHelixConstants.SHUTDOWN_MESSAGE_TYPE,
new TestShutdownMessageHandlerFactory(this));
// Make Gobblin AWS Cluster launcher start a shutdown
this.gobblinAwsClusterLauncher.connectHelixManager();
this.gobblinAwsClusterLauncher.sendShutdownRequest();
Assert.assertEquals(this.curatorFramework.checkExists()
.forPath(String.format("/%s/CONTROLLER/MESSAGES", this.helixClusterName)).getVersion(), 0);
GetControllerMessageNumFunc getCtrlMessageNum =
new GetControllerMessageNumFunc(this.helixClusterName, this.curatorFramework);
// Assert to check if shutdown message was issued
AssertWithBackoff assertWithBackoff =
AssertWithBackoff.create().logger(LoggerFactory.getLogger("testSendShutdownRequest")).timeoutMs(20000);
assertWithBackoff.assertEquals(getCtrlMessageNum, 1, "1 controller message queued");
// Assert to check if shutdown message was processed
// Give Helix sometime to handle the message
assertWithBackoff.assertEquals(getCtrlMessageNum, 0, "all controller messages processed");
}
@AfterClass
public void tearDown() throws IOException, TimeoutException {
try {
this.gobblinAwsClusterLauncher.stop();
if (this.helixManager.isConnected()) {
this.helixManager.disconnect();
}
this.gobblinAwsClusterLauncher.disconnectHelixManager();
} finally {
this.closer.close();
}
}
@Test(enabled = false)
@Override
public void assertMessageReception(Message message) {
Assert.assertEquals(message.getMsgType(), GobblinHelixConstants.SHUTDOWN_MESSAGE_TYPE);
Assert.assertEquals(message.getMsgSubType(), HelixMessageSubTypes.APPLICATION_MASTER_SHUTDOWN.toString());
}
class TestGobblinAWSClusterLauncher extends GobblinAWSClusterLauncher {
public TestGobblinAWSClusterLauncher(Config config) throws IOException {
super(config);
}
protected AWSSdkClient createAWSSdkClient() {
return awsSdkClient;
}
}
static class GetControllerMessageNumFunc implements Function<Void, Integer> {
private final CuratorFramework curatorFramework;
private final String testName;
public GetControllerMessageNumFunc(String testName, CuratorFramework curatorFramework) {
this.curatorFramework = curatorFramework;
this.testName = testName;
}
@Override
public Integer apply(Void input) {
try {
return this.curatorFramework.getChildren().forPath(String.format("/%s/CONTROLLER/MESSAGES",
this.testName)).size();
} catch (Exception e) {
throw new RuntimeException(e);
}
}
}
}
| 2,144 |
0 | Create_ds/gobblin/gobblin-aws/src/test/java/org/apache/gobblin | Create_ds/gobblin/gobblin-aws/src/test/java/org/apache/gobblin/aws/BaseAWSJobConfigurationManagerTest.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.aws;
import com.google.common.collect.Lists;
import com.google.common.eventbus.EventBus;
import com.google.common.eventbus.Subscribe;
import com.typesafe.config.Config;
import com.typesafe.config.ConfigValueFactory;
import org.apache.commons.io.FileUtils;
import org.apache.commons.lang.StringUtils;
import org.apache.gobblin.cluster.GobblinClusterConfigurationKeys;
import org.apache.gobblin.cluster.event.NewJobConfigArrivalEvent;
import org.testng.Assert;
import org.testng.annotations.AfterClass;
import org.testng.annotations.BeforeClass;
import org.testng.annotations.Test;
import java.io.File;
import java.io.IOException;
import java.net.URL;
import java.util.List;
import java.util.Properties;
import java.util.concurrent.CountDownLatch;
/**
* Unit tests for {@link AWSJobConfigurationManager}.
*
* @author Abhishek Tiwari
*/
@Test(groups = { "gobblin.aws" })
public abstract class BaseAWSJobConfigurationManagerTest {
private static final int NUM_JOB_CONFIG_FILES = 1;
private static final String JOB_NAME_KEY = "job.name";
private static final String JOB_FIRST_NAME = "PullFromWikipedia1";
private static final String JOB_FIRST_ZIP = "wikipedia1.zip";
private static final String JOB_SECOND_NAME = "PullFromWikipedia2";
private static final String JOB_SECOND_ZIP = "wikipedia2.zip";
private static final String URI_ZIP_NAME = "wikipedia.zip";
private static final String JOB_CONFIG_DIR_NAME = BaseAWSJobConfigurationManagerTest.class.getSimpleName();
private final File jobConfigFileDir = new File(JOB_CONFIG_DIR_NAME + "_" + System.currentTimeMillis());
private final EventBus eventBus = new EventBus();
private AWSJobConfigurationManager jobConfigurationManager;
private final List<Properties> receivedJobConfigs = Lists.newLinkedList();
private final CountDownLatch countDownLatchBootUp = new CountDownLatch(NUM_JOB_CONFIG_FILES);
private final CountDownLatch countDownLatchUpdate = new CountDownLatch(NUM_JOB_CONFIG_FILES);
@BeforeClass
public void setUp() throws Exception {
this.eventBus.register(this);
// Prepare the test url to download the job conf from
final URL url = GobblinAWSClusterLauncherTest.class.getClassLoader().getResource(JOB_FIRST_ZIP);
final String jobConfZipUri = getJobConfigZipUri(new File(url.toURI()));
// Prepare the test dir to download the job conf to
if (this.jobConfigFileDir.exists()) {
FileUtils.deleteDirectory(this.jobConfigFileDir);
}
Assert.assertTrue(this.jobConfigFileDir.mkdirs(), "Failed to create " + this.jobConfigFileDir);
final Config config = getConfig(jobConfZipUri)
.withValue(GobblinClusterConfigurationKeys.JOB_CONF_PATH_KEY, ConfigValueFactory.fromAnyRef(this.jobConfigFileDir.toString()))
.withValue(GobblinAWSConfigurationKeys.JOB_CONF_REFRESH_INTERVAL, ConfigValueFactory.fromAnyRef("10s"));
this.jobConfigurationManager = new AWSJobConfigurationManager(this.eventBus, config);
this.jobConfigurationManager.startAsync().awaitRunning();
}
protected abstract Config getConfig(String jobConfZipUri);
@Test(enabled = false)
private String getJobConfigZipUri(File source) throws IOException {
final File destination = new File(StringUtils.substringBeforeLast(source.toString(), File.separator) + File.separator
+ URI_ZIP_NAME);
if (destination.exists()) {
if (!destination.delete()) {
throw new IOException("Cannot clean destination job conf zip file: " + destination);
}
}
FileUtils.copyFile(source, destination);
return destination.toURI().toString();
}
@Test
public void testBootUpNewJobConfigs() throws Exception {
// Wait for all job configs to be received
this.countDownLatchBootUp.await();
// Wikipedia1.zip has only 1 conf file, so we should only receive that
Assert.assertEquals(this.receivedJobConfigs.size(), 1);
Assert.assertEquals(this.receivedJobConfigs.get(0).getProperty(JOB_NAME_KEY), JOB_FIRST_NAME);
}
@Test(dependsOnMethods = "testBootUpNewJobConfigs")
public void testUpdatedNewJobConfigs() throws Exception {
// Change zip file in the Uri that JobConfigManager is watching
final URL url = GobblinAWSClusterLauncherTest.class.getClassLoader().getResource(JOB_SECOND_ZIP);
final String jobConfZipUri = getJobConfigZipUri(new File(url.toURI()));
// Wait for all job configs to be received (after scheduled execution of 1 minute)
this.countDownLatchUpdate.await();
// Wikipedia2.zip has only 2 conf files:
// 1. The original job conf that is not changed
// 2. A new job conf that has been added
// So, we should only receive one new / updated job conf (ie. total number of configs = 2)
Assert.assertEquals(this.receivedJobConfigs.size(), 2);
Assert.assertEquals(this.receivedJobConfigs.get(1).getProperty(JOB_NAME_KEY), JOB_SECOND_NAME);
}
@AfterClass
public void tearDown() throws IOException {
this.jobConfigurationManager.stopAsync().awaitTerminated();
if (this.jobConfigFileDir.exists()) {
FileUtils.deleteDirectory(this.jobConfigFileDir);
}
}
@Test(enabled = false)
@Subscribe
public void handleNewJobConfigArrival(NewJobConfigArrivalEvent newJobConfigArrivalEvent) {
Properties jobConfig = newJobConfigArrivalEvent.getJobConfig();
this.receivedJobConfigs.add(jobConfig);
if (jobConfig.getProperty(JOB_NAME_KEY).equalsIgnoreCase(JOB_FIRST_NAME)) {
this.countDownLatchBootUp.countDown();
} else {
this.countDownLatchUpdate.countDown();
}
}
}
| 2,145 |
0 | Create_ds/gobblin/gobblin-aws/src/test/java/org/apache/gobblin | Create_ds/gobblin/gobblin-aws/src/test/java/org/apache/gobblin/aws/AWSJobConfigurationManagerTest.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.aws;
import org.testng.annotations.Test;
import com.typesafe.config.Config;
import com.typesafe.config.ConfigFactory;
import com.typesafe.config.ConfigValueFactory;
/**
* Unit tests for {@link AWSJobConfigurationManager}.
*
* @author Abhishek Tiwari
*/
@Test(groups = { "gobblin.aws" })
public class AWSJobConfigurationManagerTest extends BaseAWSJobConfigurationManagerTest {
@Override
protected Config getConfig(String jobConfZipUri) {
return ConfigFactory.empty()
.withValue(GobblinAWSConfigurationKeys.JOB_CONF_SOURCE_FILE_FS_URI_KEY, ConfigValueFactory.fromAnyRef("file:///"))
.withValue(GobblinAWSConfigurationKeys.JOB_CONF_SOURCE_FILE_PATH_KEY, ConfigValueFactory.fromAnyRef(jobConfZipUri));
}
}
| 2,146 |
0 | Create_ds/gobblin/gobblin-aws/src/test/java/org/apache/gobblin | Create_ds/gobblin/gobblin-aws/src/test/java/org/apache/gobblin/aws/CloudInitScriptBuilderTest.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.aws;
import java.io.IOException;
import java.io.InputStreamReader;
import java.util.List;
import org.apache.commons.codec.binary.Base64;
import org.apache.commons.io.IOUtils;
import org.junit.BeforeClass;
import org.testng.Assert;
import org.testng.annotations.Test;
import org.testng.util.Strings;
import com.google.common.base.Optional;
/**
* Unit tests for {@link CloudInitScriptBuilder}.
*
* @author Abhishek Tiwari
*/
@Test(groups = { "gobblin.aws" })
public class CloudInitScriptBuilderTest {
private static final String MASTER_CLOUD_INIT_SCRIPT = "masterCloudInit.sh";
private static final String WORKER_CLOUD_INIT_SCRIPT = "workerCloudInit.sh";
private String clusterName = "cluster";
private String nfsParentDir = "/home/ec2-user/";
private String awsConfDir = nfsParentDir + clusterName + "/cluster-conf/";
private String appWorkDir = nfsParentDir + clusterName + "/work-dir/";
private String sinkLogRootDir = nfsParentDir + clusterName + "/log-dir/";
private String masterS3ConfUri = "https://s3-us-west-2.amazonaws.com/some-bucket/cluster-conf/";
private String masterS3ConfFiles = "application.conf,log4j-aws.properties,quartz.properties";
private String masterS3JarsUri = "https://s3-us-west-2.amazonaws.com/some-bucket/gobblin-jars/";
private String masterS3JarFiles = "myjar1.jar,myjar2.jar,myjar3.jar,myjar4-\"${vr}\".jar";
private String masterJarsDir = nfsParentDir + clusterName + "/gobblin-jars/";
private String masterJvmMemory = "-Xms1G";
private String masterPublicIp = "0.0.0.0";
private String workerS3ConfUri = "https://s3-us-west-2.amazonaws.com/some-bucket/cluster-conf/";
private String workerS3ConfFiles = "application.conf,log4j-aws.properties,quartz.properties";
private String workerS3JarsUri = "https://s3-us-west-2.amazonaws.com/some-bucket/gobblin-jars/";
private String workerS3JarFiles = "myjar1.jar,myjar2.jar,myjar3.jar,myjar4-\"${vr}\".jar";
private String workerJarsDir = nfsParentDir + clusterName + "/gobblin-jars/";
private String workerJvmMemory = "-Xms1G";
private String expectedMasterCloudInitScript;
private String expectedWorkerCloudInitScript;
private Optional<String> gobblinVersion = Optional.of("0.7.1");
@BeforeClass
public void setup() throws Exception {
this.expectedMasterCloudInitScript = loadFile(MASTER_CLOUD_INIT_SCRIPT);
this.expectedWorkerCloudInitScript = loadFile(WORKER_CLOUD_INIT_SCRIPT);
}
@Test
public void testBuildClusterMasterCommand() {
final String script = CloudInitScriptBuilder.buildClusterMasterCommand(this.clusterName, this.nfsParentDir,
this.sinkLogRootDir, this.awsConfDir, this.appWorkDir, this.masterS3ConfUri, this.masterS3ConfFiles,
this.masterS3JarsUri, this.masterS3JarFiles, this.masterJarsDir, this.masterJvmMemory,
Optional.<String>absent(), gobblinVersion);
final String decodedScript = new String(Base64.decodeBase64(script));
Assert.assertEquals(decodedScript, this.expectedMasterCloudInitScript,
"Master launcher cloud-init script not built as expected");
}
@Test
public void testBuildClusterWorkerCommand() {
final String script = CloudInitScriptBuilder.buildClusterWorkerCommand(this.clusterName, this.nfsParentDir,
this.sinkLogRootDir, this.awsConfDir, this.appWorkDir, this.masterPublicIp, this.workerS3ConfUri,
this.workerS3ConfFiles, this.workerS3JarsUri, this.workerS3JarFiles, this.workerJarsDir, this.workerJvmMemory,
Optional.<String>absent(), gobblinVersion);
final String decodedScript = new String(Base64.decodeBase64(script));
Assert.assertEquals(decodedScript, this.expectedWorkerCloudInitScript,
"Worker launcher cloud-init script not built as expected");
}
/**
* loads the given file into a string, ignoring the comments, but considering "#!/bin/bash"
* @param file file to read
* @return file content as a string
* @throws IOException
*/
private String loadFile(String file) throws IOException {
StringBuilder sb = new StringBuilder();
List<String> lines = IOUtils
.readLines(new InputStreamReader(GobblinAWSClusterLauncherTest.class.getClassLoader().getResourceAsStream(file), "UTF-8"));
for (String line : lines) {
if (line.equals(CloudInitScriptBuilder.BASH) || (!line.startsWith("#") && !Strings.isNullOrEmpty(line))) {
sb.append(line).append("\n");
}
}
return sb.toString();
}
}
| 2,147 |
0 | Create_ds/gobblin/gobblin-aws/src/test/java/org/apache/gobblin | Create_ds/gobblin/gobblin-aws/src/test/java/org/apache/gobblin/aws/LegacyAWSJobConfigurationManagerTest.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.aws;
import org.testng.annotations.Test;
import com.typesafe.config.Config;
import com.typesafe.config.ConfigFactory;
import com.typesafe.config.ConfigValueFactory;
/**
* Unit tests for {@link AWSJobConfigurationManager}.
*
* @author Abhishek Tiwari
*/
@Test(groups = { "gobblin.aws" })
public class LegacyAWSJobConfigurationManagerTest extends BaseAWSJobConfigurationManagerTest {
@Override
protected Config getConfig(String jobConfZipUri) {
return ConfigFactory.empty()
.withValue(GobblinAWSConfigurationKeys.JOB_CONF_S3_URI_KEY, ConfigValueFactory.fromAnyRef(jobConfZipUri));
}
}
| 2,148 |
0 | Create_ds/gobblin/gobblin-aws/src/main/java/org/apache/gobblin | Create_ds/gobblin/gobblin-aws/src/main/java/org/apache/gobblin/aws/GobblinAWSConfigurationKeys.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.aws;
import org.apache.gobblin.annotation.Alpha;
/**
* A central place for configuration related constants of Gobblin on AWS.
*
* @author Abhishek Tiwari
*/
@Alpha
public class GobblinAWSConfigurationKeys {
public static final String GOBBLIN_AWS_PREFIX = "gobblin.aws.";
// General Gobblin AWS application configuration properties.
public static final String CLUSTER_NAME_KEY = GOBBLIN_AWS_PREFIX + "cluster.name";
public static final String EMAIL_NOTIFICATION_ON_SHUTDOWN_KEY =
GOBBLIN_AWS_PREFIX + "email.notification.on.shutdown";
// Gobblin AWS cluster configuration properties.
public static final String AWS_REGION_KEY = GOBBLIN_AWS_PREFIX + "region";
public static final String AWS_CONF_DIR = GOBBLIN_AWS_PREFIX + "conf.dir";
// Gobblin AWS NFS configuration properties.
public static final String NFS_PARENT_DIR_KEY = GOBBLIN_AWS_PREFIX + "nfs.parent.dir";
// Gobblin AWS master configuration properties.
public static final String MASTER_AMI_ID_KEY = GOBBLIN_AWS_PREFIX + "master.ami.id";
public static final String MASTER_INSTANCE_TYPE_KEY = GOBBLIN_AWS_PREFIX + "master.instance.type";
public static final String MASTER_JVM_MEMORY_KEY = GOBBLIN_AWS_PREFIX + "master.jvm.memory";
public static final String MASTER_JVM_ARGS_KEY = GOBBLIN_AWS_PREFIX + "master.jvm.args";
public static final String MASTER_JARS_KEY = GOBBLIN_AWS_PREFIX + "master.jars.dir";
public static final String MASTER_S3_CONF_URI_KEY = GOBBLIN_AWS_PREFIX + "master.s3.conf.uri";
public static final String MASTER_S3_CONF_FILES_KEY = GOBBLIN_AWS_PREFIX + "master.s3.conf.files";
public static final String MASTER_S3_JARS_URI_KEY = GOBBLIN_AWS_PREFIX + "master.s3.jars.uri";
public static final String MASTER_S3_JARS_FILES_KEY = GOBBLIN_AWS_PREFIX + "master.s3.jars.files";
// Gobblin AWS worker configuration properties.
public static final String WORKER_AMI_ID_KEY = GOBBLIN_AWS_PREFIX + "worker.ami.id";
public static final String WORKER_INSTANCE_TYPE_KEY = GOBBLIN_AWS_PREFIX + "worker.instance.type";
public static final String WORKER_JVM_MEMORY_KEY = GOBBLIN_AWS_PREFIX + "worker.jvm.memory";
public static final String WORKER_JVM_ARGS_KEY = GOBBLIN_AWS_PREFIX + "worker.jvm.args";
public static final String MIN_WORKERS_KEY = GOBBLIN_AWS_PREFIX + "min.workers";
public static final String MAX_WORKERS_KEY = GOBBLIN_AWS_PREFIX + "max.workers";
public static final String DESIRED_WORKERS_KEY = GOBBLIN_AWS_PREFIX + "desired.workers";
public static final String WORKER_JARS_KEY = GOBBLIN_AWS_PREFIX + "worker.jars.dir";
public static final String WORKER_S3_CONF_URI_KEY = GOBBLIN_AWS_PREFIX + "worker.s3.conf.uri";
public static final String WORKER_S3_CONF_FILES_KEY = GOBBLIN_AWS_PREFIX + "worker.s3.conf.files";
public static final String WORKER_S3_JARS_URI_KEY = GOBBLIN_AWS_PREFIX + "worker.s3.jars.uri";
public static final String WORKER_S3_JARS_FILES_KEY = GOBBLIN_AWS_PREFIX + "worker.s3.jars.files";
// Security and authentication configuration properties.
public static final String CREDENTIALS_REFRESH_INTERVAL = GOBBLIN_AWS_PREFIX + "credentials.refresh.interval";
public static final String SERVICE_ACCESS_KEY = GOBBLIN_AWS_PREFIX + "service.access";
public static final String SERVICE_SECRET_KEY = GOBBLIN_AWS_PREFIX + "service.secret";
public static final String CLIENT_ASSUME_ROLE_KEY = GOBBLIN_AWS_PREFIX + "client.assume.role";
public static final String CLIENT_ROLE_ARN_KEY = GOBBLIN_AWS_PREFIX + "client.role.arn";
public static final String CLIENT_EXTERNAL_ID_KEY = GOBBLIN_AWS_PREFIX + "client.external.id";
public static final String CLIENT_SESSION_ID_KEY = GOBBLIN_AWS_PREFIX + "client.session.id";
// Resource/dependencies configuration properties.
public static final String LOGS_SINK_ROOT_DIR_KEY = GOBBLIN_AWS_PREFIX + "logs.sink.root.dir";
// Log4j properties.
public static final String GOBBLIN_AWS_LOG4J_CONFIGURATION_FILE = "log4j-aws.properties";
// Job conf properties.
public static final String JOB_CONF_S3_URI_KEY = GOBBLIN_AWS_PREFIX + "job.conf.s3.uri";
public static final String JOB_CONF_SOURCE_FILE_FS_URI_KEY = GOBBLIN_AWS_PREFIX + "job.conf.source.file.fs.uri";
public static final String JOB_CONF_SOURCE_FILE_PATH_KEY = GOBBLIN_AWS_PREFIX + "job.conf.source.file.path";
public static final String JOB_CONF_REFRESH_INTERVAL = GOBBLIN_AWS_PREFIX + "job.conf.refresh.interval";
// Work environment properties.
public static final String APP_WORK_DIR = GOBBLIN_AWS_PREFIX + "work.dir";
public static final String GOBBLIN_VERSION = GOBBLIN_AWS_PREFIX + "version";
// DEFAULT VALUES
// General Gobblin AWS application configuration properties.
public static final String DEFAULT_CLUSTER_NAME = "gobblinApplication";
public static final String DEFAULT_GOBBLIN_VERSION = "0.6.2-701-g7c07fd5";
public static final boolean DEFAULT_EMAIL_NOTIFICATION_ON_SHUTDOWN = false;
// Gobblin AWS cluster configuration properties.
public static final String DEFAULT_AWS_REGION = "us-west-2";
public static final String DEFAULT_AWS_CONF_DIR_POSTFIX = "cluster-conf";
// Gobblin AWS NFS configuration properties.
public static final String DEFAULT_NFS_PARENT_DIR = "/home/ec2-user/gobblinApplication/";
// Gobblin AWS master configuration properties.
public static final String DEFAULT_MASTER_AMI_ID = "ami-f303fb93";
public static final String DEFAULT_MASTER_INSTANCE_TYPE = "m3-medium";
public static final String DEFAULT_MASTER_JVM_MEMORY = "3G";
public static final String DEFAULT_MASTER_JARS_POSTFIX = "gobblin-lib";
public static final String DEFAULT_MASTER_S3_CONF_URI = "https://s3-region.amazonaws.com/s3bucket/gobblin-confs/cluster-conf/";
public static final String DEFAULT_MASTER_S3_CONF_FILES = "application.conf,log4j-aws.properties,quartz.properties";
public static final String DEFAULT_MASTER_S3_JARS_URI = "https://s3-us-west-2.amazonaws.com/gobblin-libs/latest-jars/";
// Do not use final on the public static strings, even though it is logical.
// Refer: http://bugs.sun.com/bugdatabase/view_bug.do?bug_id=6447475
// Findbugs:
// Huge string constants is duplicated across multiple class files
// A large String constant is duplicated across multiple class files.
// This is likely because a final field is initialized to a String constant,
// and the Java language mandates that all references to a final field from other
// classes be inlined into that classfile. See JDK bug 6447475 for a description
// of an occurrence of this bug in the JDK and how resolving it reduced the size of the JDK by 1 megabyte.
public static final String DEFAULT_MASTER_S3_JARS_FILES;
static {
DEFAULT_MASTER_S3_JARS_FILES = "ST4-4.0.4.jar,activation-1.1.1.jar,annotations-2.0.1.jar,ant-1.9.1.jar,ant-launcher-1.9.1.jar,antlr-runtime-3.5.2.jar,aopalliance-1.0.jar,apache-log4j-extras-1.2.17.jar,asm-3.1.jar,asm-commons-3.1.jar,asm-tree-3.1.jar,avro-1.7.7.jar,avro-ipc-1.7.7-tests.jar,avro-ipc-1.7.7.jar,avro-mapred-1.7.7-hadoop2.jar,aws-java-sdk-applicationautoscaling-1.11.8.jar,aws-java-sdk-autoscaling-1.11.8.jar,aws-java-sdk-core-1.11.8.jar,aws-java-sdk-ec2-1.11.8.jar,aws-java-sdk-iam-1.11.8.jar,aws-java-sdk-kms-1.11.8.jar,aws-java-sdk-s3-1.11.8.jar,aws-java-sdk-sts-1.11.8.jar,azkaban-2.5.0.jar,bcpg-jdk15on-1.52.jar,bcprov-jdk15on-1.52.jar,bonecp-0.8.0.RELEASE.jar,bsh-2.0b4.jar,c3p0-0.9.1.1.jar,calcite-avatica-1.2.0-incubating.jar,calcite-core-1.2.0-incubating.jar,calcite-linq4j-1.2.0-incubating.jar,cglib-nodep-2.2.jar,codemodel-2.2.jar,commons-cli-1.3.1.jar,commons-codec-1.10.jar,commons-collections-3.2.1.jar,commons-compiler-2.7.6.jar,commons-compress-1.10.jar,commons-configuration-1.10.jar,commons-daemon-1.0.13.jar,commons-dbcp-1.4.jar,commons-el-1.0.jar,commons-email-1.4.jar,commons-httpclient-3.1.jar,commons-io-2.5.jar,commons-lang-2.6.jar,commons-lang3-3.4.jar,commons-logging-1.2.jar,commons-math3-3.5.jar,commons-net-3.1.jar,commons-pool-1.5.4.jar,commons-pool2-2.4.2.jar,commons-vfs2-2.0.jar,config-1.2.1.jar,curator-client-2.12.0.jar,curator-framework-2.10.0.jar,curator-recipes-2.10.0.jar,d2-1.15.9.jar,data-1.15.9.jar,data-transform-1.15.9.jar,datanucleus-api-jdo-3.2.6.jar,datanucleus-core-3.2.10.jar,datanucleus-rdbms-3.2.9.jar,degrader-1.15.9.jar,derby-10.12.1.1.jar,eigenbase-properties-1.1.5.jar,flyway-core-3.2.1.jar,generator-1.15.9.jar,geronimo-annotation_1.0_spec-1.1.1.jar,geronimo-jaspic_1.0_spec-1.0.jar,geronimo-jpa_3.0_spec-1.0.jar,geronimo-jta_1.1_spec-1.1.1.jar,gobblin-admin-" + DEFAULT_GOBBLIN_VERSION + ".jar,gobblin-api-" + DEFAULT_GOBBLIN_VERSION + ".jar,gobblin-aws-" + DEFAULT_GOBBLIN_VERSION + ".jar,gobblin-azkaban-" + DEFAULT_GOBBLIN_VERSION + ".jar,gobblin-cluster-" + DEFAULT_GOBBLIN_VERSION + ".jar,gobblin-compaction-" + DEFAULT_GOBBLIN_VERSION + ".jar,gobblin-config-client-" + DEFAULT_GOBBLIN_VERSION + ".jar,gobblin-config-core-" + DEFAULT_GOBBLIN_VERSION + ".jar,gobblin-core-" + DEFAULT_GOBBLIN_VERSION + ".jar,gobblin-data-management-" + DEFAULT_GOBBLIN_VERSION + ".jar,gobblin-example-" + DEFAULT_GOBBLIN_VERSION + ".jar,gobblin-hive-registration-" + DEFAULT_GOBBLIN_VERSION + ".jar,gobblin-kafka-" + DEFAULT_GOBBLIN_VERSION + ".jar,gobblin-metastore-" + DEFAULT_GOBBLIN_VERSION + ".jar,gobblin-metrics-" + DEFAULT_GOBBLIN_VERSION + ".jar,gobblin-rest-api-" + DEFAULT_GOBBLIN_VERSION + ".jar,gobblin-rest-api-data-template-" + DEFAULT_GOBBLIN_VERSION + ".jar,gobblin-rest-api-rest-client-" + DEFAULT_GOBBLIN_VERSION + ".jar,gobblin-rest-client-" + DEFAULT_GOBBLIN_VERSION + ".jar,gobblin-rest-server-" + DEFAULT_GOBBLIN_VERSION + ".jar,gobblin-runtime-" + DEFAULT_GOBBLIN_VERSION + ".jar,gobblin-test-harness-" + DEFAULT_GOBBLIN_VERSION + ".jar,gobblin-tunnel-" + DEFAULT_GOBBLIN_VERSION + ".jar,gobblin-utility-" + DEFAULT_GOBBLIN_VERSION + ".jar,gobblin-yarn-" + DEFAULT_GOBBLIN_VERSION + ".jar,groovy-all-2.1.6.jar,gson-2.6.2.jar,guava-21.0.jar,guava-retrying-2.0.0.jar,guice-4.0.jar,guice-servlet-3.0.jar,hadoop-annotations-2.3.0.jar,hadoop-auth-2.3.0.jar,hadoop-common-2.3.0.jar,hadoop-hdfs-2.3.0.jar,hadoop-mapreduce-client-common-2.3.0.jar,hadoop-mapreduce-client-core-2.3.0.jar,hadoop-yarn-api-2.3.0.jar,hadoop-yarn-client-2.3.0.jar,hadoop-yarn-common-2.3.0.jar,hadoop-yarn-server-common-2.3.0.jar,hamcrest-core-1.1.jar,helix-core-0.6.6-SNAPSHOT.jar,hive-ant-1.0.1.jar,hive-common-1.0.1.jar,hive-exec-1.0.1-core.jar,hive-jdbc-1.0.1.jar,hive-metastore-1.0.1.jar,hive-serde-1.0.1.jar,hive-service-1.0.1.jar,hive-shims-0.20-1.0.1.jar,hive-shims-0.20S-1.0.1.jar,hive-shims-0.23-1.0.1.jar,hive-shims-1.0.1.jar,hive-shims-common-1.0.1.jar,hive-shims-common-secure-1.0.1.jar,httpclient-4.5.2.jar,httpcore-4.4.4.jar,influxdb-java-2.1.jar,jackson-annotations-2.6.0.jar,jackson-core-2.6.6.jar,jackson-core-asl-1.9.13.jar,jackson-databind-2.6.6.jar,jackson-dataformat-cbor-2.6.6.jar,jackson-jaxrs-1.8.3.jar,jackson-mapper-asl-1.9.13.jar,jackson-xc-1.8.3.jar,janino-2.7.6.jar,jansi-1.11.jar,jasper-compiler-5.5.23.jar,jasper-runtime-5.5.23.jar,jasypt-1.9.2.jar,java-xmlbuilder-0.4.jar,javassist-3.18.2-GA.jar,javax.inject-1.jar,javax.mail-1.5.2.jar,javax.servlet-api-3.1.0.jar,jaxb-api-2.2.2.jar,jaxb-impl-2.2.3-1.jar,jcommander-1.48.jar,jdo-api-3.0.1.jar,jdo2-api-2.1.jar,jersey-core-1.9.jar,jersey-guice-1.9.jar,jersey-json-1.9.jar,jersey-server-1.9.jar,jets3t-0.9.0.jar,jettison-1.1.jar,jetty-6.1.26.jar,jetty-all-7.6.0.v20120127.jar,jetty-http-9.2.14.v20151106.jar,jetty-io-9.2.14.v20151106.jar,jetty-security-9.2.14.v20151106.jar,jetty-server-9.2.14.v20151106.jar,jetty-servlet-9.2.14.v20151106.jar,jetty-util-6.1.26.jar,jetty-util-9.2.14.v20151106.jar,jline-0.9.94.jar,joda-time-2.9.3.jar,jopt-simple-3.2.jar,jpam-1.1.jar,jsch-0.1.53.jar,json-20070829.jar,jsp-api-2.1.jar,jsr305-3.0.0.jar,jta-1.1.jar,junit-3.8.1.jar,kafka-clients-0.8.2.2.jar,kafka_2.11-0.8.2.2.jar,li-jersey-uri-1.15.9.jar,libfb303-0.9.0.jar,libthrift-0.9.3.jar,log4j-1.2.17.jar,lombok-1.16.8.jar,lz4-1.2.0.jar,mail-1.4.1.jar,maven-scm-api-1.4.jar,maven-scm-provider-svn-commons-1.4.jar,maven-scm-provider-svnexe-1.4.jar,metrics-core-2.2.0.jar,metrics-core-3.1.0.jar,metrics-graphite-3.1.0.jar,metrics-jvm-3.1.0.jar,mina-core-1.1.7.jar,mockito-core-1.10.19.jar,mysql-connector-java-5.1.38.jar,netty-3.2.3.Final.jar,netty-3.7.0.Final.jar,objenesis-2.1.jar,okhttp-2.4.0.jar,okio-1.4.0.jar,opencsv-2.3.jar,paranamer-2.3.jar,parseq-1.3.6.jar,pegasus-common-1.15.9.jar,pentaho-aggdesigner-algorithm-5.1.5-jhyde.jar,plexus-utils-1.5.6.jar,protobuf-java-2.5.0.jar,quartz-2.2.3.jar,r2-1.15.9.jar,reflections-0.9.10.jar,regexp-1.3.jar,restli-client-1.15.9.jar,restli-common-1.15.9.jar,restli-docgen-1.15.9.jar,restli-netty-standalone-1.15.9.jar,restli-server-1.15.9.jar,restli-tools-1.15.9.jar,retrofit-1.9.0.jar,scala-library-2.11.8.jar,scala-parser-combinators_2.11-1.0.2.jar,scala-xml_2.11-1.0.2.jar,servlet-api-2.5-20081211.jar,servlet-api-2.5.jar,slf4j-api-1.7.21.jar,slf4j-log4j12-1.7.21.jar,snappy-0.3.jar,snappy-java-1.1.1.7.jar,stax-api-1.0-2.jar,stax-api-1.0.1.jar,testng-6.9.10.jar,transaction-api-1.1.jar,velocity-1.7.jar,xmlenc-0.52.jar,zkclient-0.5.jar,zookeeper-3.4.6.jar";
}
// Gobblin AWS worker configuration properties.
public static final String DEFAULT_WORKER_AMI_ID = "ami-f303fb93";
public static final String DEFAULT_WORKER_INSTANCE_TYPE = "m3-medium";
public static final String DEFAULT_WORKER_JVM_MEMORY = "3G";
public static final int DEFAULT_MIN_WORKERS = 2;
public static final int DEFAULT_MAX_WORKERS = 4;
public static final int DEFAULT_DESIRED_WORKERS = 2;
public static final String DEFAULT_WORKER_JARS_POSTFIX = DEFAULT_MASTER_JARS_POSTFIX;
public static final String DEFAULT_WORKER_S3_CONF_URI = DEFAULT_MASTER_S3_CONF_URI;
public static final String DEFAULT_WORKER_S3_CONF_FILES = DEFAULT_MASTER_S3_CONF_FILES;
public static final String DEFAULT_WORKER_S3_JARS_URI = DEFAULT_MASTER_S3_JARS_URI;
public static final String DEFAULT_WORKER_S3_JARS_FILES = DEFAULT_MASTER_S3_JARS_FILES;
// Resource/dependencies configuration properties.
public static final String DEFAULT_LOGS_SINK_ROOT_DIR_POSTFIX = "logs";
// Work environment properties.
public static final String DEFAULT_APP_WORK_DIR_POSTFIX = "work.dir";
}
| 2,149 |
0 | Create_ds/gobblin/gobblin-aws/src/main/java/org/apache/gobblin | Create_ds/gobblin/gobblin-aws/src/main/java/org/apache/gobblin/aws/GobblinAWSClusterLauncher.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.aws;
import java.io.File;
import java.io.IOException;
import java.util.Arrays;
import java.util.Collections;
import java.util.Date;
import java.util.List;
import java.util.UUID;
import java.util.concurrent.CountDownLatch;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.TimeoutException;
import org.apache.commons.io.FileUtils;
import org.apache.commons.mail.EmailException;
import org.apache.helix.Criteria;
import org.apache.helix.HelixManager;
import org.apache.helix.HelixManagerFactory;
import org.apache.helix.InstanceType;
import org.apache.helix.messaging.AsyncCallback;
import org.apache.helix.model.Message;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import com.amazonaws.regions.Region;
import com.amazonaws.regions.Regions;
import com.amazonaws.services.autoscaling.model.AutoScalingGroup;
import com.amazonaws.services.autoscaling.model.BlockDeviceMapping;
import com.amazonaws.services.autoscaling.model.InstanceMonitoring;
import com.amazonaws.services.autoscaling.model.Tag;
import com.amazonaws.services.autoscaling.model.TagDescription;
import com.amazonaws.services.ec2.model.AvailabilityZone;
import com.amazonaws.services.ec2.model.Instance;
import com.google.common.annotations.VisibleForTesting;
import com.google.common.base.Optional;
import com.google.common.base.Throwables;
import com.google.common.collect.Lists;
import com.google.common.eventbus.EventBus;
import com.google.common.io.Closer;
import com.typesafe.config.Config;
import com.typesafe.config.ConfigFactory;
import org.apache.gobblin.annotation.Alpha;
import org.apache.gobblin.cluster.GobblinClusterConfigurationKeys;
import org.apache.gobblin.cluster.GobblinClusterUtils;
import org.apache.gobblin.cluster.GobblinHelixConstants;
import org.apache.gobblin.cluster.HelixMessageSubTypes;
import org.apache.gobblin.cluster.HelixUtils;
import org.apache.gobblin.util.ConfigUtils;
import org.apache.gobblin.util.EmailUtils;
import static org.apache.gobblin.aws.GobblinAWSUtils.*;
import static org.apache.gobblin.aws.GobblinAWSConfigurationKeys.*;
import static org.apache.gobblin.cluster.GobblinClusterConfigurationKeys.*;
/**
* A client driver to launch Gobblin as an AWS Cluster.
*
* <p>
* This class upon starting, will check if there is an AWS Cluster that is already running and
* it is able to reconnect to. More specifically, it checks if an cluster with the same cluster name
* exists and can be reconnected to i.e. if the cluster has not completed yet. If so, it simply starts
* monitoring that cluster.
* </p>
*
* <p>
* On the other hand, if there's no such a reconnectable AWS cluster, This class will launch a new AWS
* cluster and start the {@link GobblinAWSClusterManager}. It also persists the new cluster details so it
* is able to reconnect to the AWS cluster if it is restarted for some reason.
* </p>
*
* <p>
* If a shutdown signal is received, it sends a Helix
* {@link org.apache.helix.model.Message.MessageType#SCHEDULER_MSG} to the {@link GobblinAWSClusterManager}
* asking it to shutdown. It also sends an email notification for the shutdown if
* {@link GobblinAWSConfigurationKeys#EMAIL_NOTIFICATION_ON_SHUTDOWN_KEY} is {@code true}.
* </p>
*
* @author Abhishek Tiwari
*/
@Alpha
public class GobblinAWSClusterLauncher {
private static final Logger LOGGER = LoggerFactory.getLogger(GobblinAWSClusterLauncher.class);
public static final String CLUSTER_NAME_ASG_TAG = "ClusterName";
public static final String CLUSTER_ID_ASG_TAG = "ClusterId";
public static final String ASG_TYPE_ASG_TAG = "AsgType";
public static final String ASG_TYPE_MASTER = "master";
public static final String ASG_TYPE_WORKERS = "workers";
public static final String MASTER_ASG_NAME_PREFIX = "GobblinMasterASG_";
public static final String MASTER_LAUNCH_CONFIG_NAME_PREFIX = "GobblinMasterLaunchConfig_";
public static final String WORKERS_ASG_NAME_PREFIX = "GobblinWorkerASG_";
public static final String WORKERS_LAUNCH_CONFIG_PREFIX = "GobblinWorkerLaunchConfig_";
private final Config config;
private final String zkConnectionString;
private final String helixClusterName;
private final HelixManager helixManager;
private final EventBus eventBus = new EventBus(GobblinAWSClusterLauncher.class.getSimpleName());
private final CountDownLatch countDownLatch = new CountDownLatch(1);
private AWSClusterSecurityManager awsClusterSecurityManager;
private AWSSdkClient awsSdkClient;
private final Closer closer = Closer.create();
// AWS cluster meta
private final String clusterName;
private volatile Optional<String> clusterId = Optional.absent();
private volatile boolean stopped = false;
private final boolean emailNotificationOnShutdown;
// AWS Gobblin cluster common config
private final String awsRegion;
private final String awsConfDir;
// AWS Gobblin Master Instance config
private final String masterAmiId;
private final String masterInstanceType;
private final String masterJvmMemory;
// AWS Gobblin Worker Instance config
private final String workerAmiId;
private final String workerInstanceType;
private final String workerJvmMemory;
private final Integer minWorkers;
private final Integer maxWorkers;
private final Integer desiredWorkers;
private final Optional<String> masterJvmArgs;
private final Optional<String> workerJvmArgs;
private String masterPublicIp;
private final String nfsParentDir;
private final String masterJarsDir;
private final String masterS3ConfUri;
private final String masterS3ConfFiles;
private final String masterS3JarsUri;
private final String masterS3JarsFiles;
private final String workerJarsDir;
private final String workerS3ConfUri;
private final String workerS3ConfFiles;
private final String workerS3JarsUri;
private final String workerS3JarsFiles;
private final String sinkLogRootDir;
private final String appWorkDir;
private String masterLaunchConfigName;
private String masterAutoScalingGroupName;
private String workerLaunchConfigName;
private String workerAutoScalingGroupName;
private final Optional<String> gobblinVersion;
public GobblinAWSClusterLauncher(Config config) throws IOException {
this.config = config;
// Mandatory configs
this.zkConnectionString = config.getString(GobblinClusterConfigurationKeys.ZK_CONNECTION_STRING_KEY);
LOGGER.info("Using ZooKeeper connection string: " + this.zkConnectionString);
// Configs with default values
this.clusterName = ConfigUtils.getString(config, CLUSTER_NAME_KEY, DEFAULT_CLUSTER_NAME);
this.helixClusterName = ConfigUtils.getString(config, HELIX_CLUSTER_NAME_KEY, this.clusterName);
this.nfsParentDir = appendSlash(ConfigUtils.getString(config, NFS_PARENT_DIR_KEY, DEFAULT_NFS_PARENT_DIR));
this.awsRegion = ConfigUtils.getString(config, AWS_REGION_KEY, DEFAULT_AWS_REGION);
this.awsConfDir =
appendSlash(ConfigUtils.getString(config, AWS_CONF_DIR, nfsParentDir + DEFAULT_AWS_CONF_DIR_POSTFIX));
this.masterAmiId = ConfigUtils.getString(config, MASTER_AMI_ID_KEY, DEFAULT_MASTER_AMI_ID);
this.masterInstanceType = ConfigUtils.getString(config, MASTER_INSTANCE_TYPE_KEY, DEFAULT_MASTER_INSTANCE_TYPE);
this.masterJvmMemory = ConfigUtils.getString(config, MASTER_JVM_MEMORY_KEY, DEFAULT_MASTER_JVM_MEMORY);
this.workerAmiId = ConfigUtils.getString(config, WORKER_AMI_ID_KEY, DEFAULT_WORKER_AMI_ID);
this.workerInstanceType = ConfigUtils.getString(config, WORKER_INSTANCE_TYPE_KEY, DEFAULT_WORKER_INSTANCE_TYPE);
this.workerJvmMemory = ConfigUtils.getString(config, WORKER_JVM_MEMORY_KEY, DEFAULT_WORKER_JVM_MEMORY);
this.minWorkers = ConfigUtils.getInt(config, MIN_WORKERS_KEY, DEFAULT_MIN_WORKERS);
this.maxWorkers = ConfigUtils.getInt(config, MAX_WORKERS_KEY, DEFAULT_MAX_WORKERS);
this.desiredWorkers = ConfigUtils.getInt(config, DESIRED_WORKERS_KEY, DEFAULT_DESIRED_WORKERS);
this.masterJvmArgs = config.hasPath(GobblinAWSConfigurationKeys.MASTER_JVM_ARGS_KEY) ?
Optional.of(config.getString(GobblinAWSConfigurationKeys.MASTER_JVM_ARGS_KEY)) :
Optional.<String>absent();
this.workerJvmArgs = config.hasPath(GobblinAWSConfigurationKeys.WORKER_JVM_ARGS_KEY) ?
Optional.of(config.getString(GobblinAWSConfigurationKeys.WORKER_JVM_ARGS_KEY)) :
Optional.<String>absent();
this.masterJarsDir = appendSlash(
ConfigUtils.getString(config, MASTER_JARS_KEY, nfsParentDir + DEFAULT_MASTER_JARS_POSTFIX));
this.masterS3ConfUri = appendSlash(
ConfigUtils.getString(config, MASTER_S3_CONF_URI_KEY, DEFAULT_MASTER_S3_CONF_URI));
this.masterS3ConfFiles = ConfigUtils.getString(config, MASTER_S3_CONF_FILES_KEY, DEFAULT_MASTER_S3_CONF_FILES);
this.masterS3JarsUri = ConfigUtils.getString(config, MASTER_S3_JARS_URI_KEY, DEFAULT_MASTER_S3_JARS_URI);
this.masterS3JarsFiles = ConfigUtils.getString(config, MASTER_S3_JARS_FILES_KEY, DEFAULT_MASTER_S3_JARS_FILES);
this.workerJarsDir = appendSlash(ConfigUtils.getString(config, WORKER_JARS_KEY,
nfsParentDir + DEFAULT_WORKER_JARS_POSTFIX));
this.workerS3ConfUri = appendSlash(
ConfigUtils.getString(config, WORKER_S3_CONF_URI_KEY, DEFAULT_WORKER_S3_CONF_URI));
this.workerS3ConfFiles = ConfigUtils.getString(config, WORKER_S3_CONF_FILES_KEY, DEFAULT_WORKER_S3_CONF_FILES);
this.workerS3JarsUri = ConfigUtils.getString(config, WORKER_S3_JARS_URI_KEY, DEFAULT_WORKER_S3_JARS_URI);
this.workerS3JarsFiles = ConfigUtils.getString(config, WORKER_S3_JARS_FILES_KEY, DEFAULT_WORKER_S3_JARS_FILES);
this.sinkLogRootDir = appendSlash(ConfigUtils.getString(config, LOGS_SINK_ROOT_DIR_KEY,
nfsParentDir + DEFAULT_LOGS_SINK_ROOT_DIR_POSTFIX));
this.appWorkDir = appendSlash(ConfigUtils.getString(config, APP_WORK_DIR,
nfsParentDir + DEFAULT_APP_WORK_DIR_POSTFIX));
this.emailNotificationOnShutdown = ConfigUtils
.getBoolean(config, EMAIL_NOTIFICATION_ON_SHUTDOWN_KEY, DEFAULT_EMAIL_NOTIFICATION_ON_SHUTDOWN);
this.awsClusterSecurityManager = new AWSClusterSecurityManager(this.config);
this.awsSdkClient = createAWSSdkClient();
if (config.hasPath(GobblinAWSConfigurationKeys.GOBBLIN_VERSION)) {
this.gobblinVersion = Optional.of(config.getString(GobblinAWSConfigurationKeys.GOBBLIN_VERSION));
} else {
this.gobblinVersion = Optional.<String>absent();
}
this.helixManager = HelixManagerFactory.getZKHelixManager(this.helixClusterName, GobblinClusterUtils.getHostname(),
InstanceType.SPECTATOR, this.zkConnectionString);
}
/**
* Launch a new Gobblin cluster on AWS.
*
* @throws IOException If there's something wrong launching the cluster
*/
public void launch() throws IOException, InterruptedException {
this.eventBus.register(this);
// Create Helix cluster and connect to it
HelixUtils.createGobblinHelixCluster(this.zkConnectionString, this.helixClusterName, false);
LOGGER.info("Created Helix cluster " + this.helixClusterName);
connectHelixManager();
// Core logic to launch cluster
this.clusterId = getClusterId();
// TODO: Add cluster monitoring
countDownLatch.await();
}
/**
* Stop this {@link GobblinAWSClusterLauncher} instance.
*
* @throws IOException If this {@link GobblinAWSClusterLauncher} instance fails to clean up its working directory.
*/
public synchronized void stop() throws IOException, TimeoutException {
if (this.stopped) {
return;
}
LOGGER.info("Stopping the " + GobblinAWSClusterLauncher.class.getSimpleName());
try {
if (this.clusterId.isPresent()) {
sendShutdownRequest();
}
disconnectHelixManager();
} finally {
try {
if (this.clusterId.isPresent()) {
cleanUpClusterWorkDirectory(this.clusterId.get());
}
} finally {
this.closer.close();
}
}
this.countDownLatch.countDown();
this.stopped = true;
}
@VisibleForTesting
void connectHelixManager() {
try {
this.helixManager.connect();
} catch (Exception e) {
LOGGER.error("HelixManager failed to connect", e);
throw Throwables.propagate(e);
}
}
@VisibleForTesting
void disconnectHelixManager() {
if (this.helixManager.isConnected()) {
this.helixManager.disconnect();
}
}
@VisibleForTesting
protected AWSSdkClient createAWSSdkClient() {
return new AWSSdkClient(this.awsClusterSecurityManager,
Region.getRegion(Regions.fromName(this.awsRegion)));
}
private Optional<String> getClusterId() throws IOException {
final Optional<String> reconnectableClusterId = getReconnectableClusterId();
if (reconnectableClusterId.isPresent()) {
LOGGER.info("Found reconnectable cluster with cluster ID: " + reconnectableClusterId.get());
return reconnectableClusterId;
}
LOGGER.info("No reconnectable cluster found so creating a cluster");
return Optional.of(setupGobblinCluster());
}
@VisibleForTesting
Optional<String> getReconnectableClusterId() throws IOException {
// List ASGs with Tag of cluster name
final Tag clusterNameTag = new Tag()
.withKey(CLUSTER_NAME_ASG_TAG)
.withValue(this.clusterName);
final List<AutoScalingGroup> autoScalingGroups = this.awsSdkClient.getAutoScalingGroupsWithTag(clusterNameTag);
// If no auto scaling group is found, we don't have an existing cluster to connect to
if (autoScalingGroups.size() == 0) {
return Optional.absent();
}
// If more than 0 auto scaling groups are found, validate the setup
if (autoScalingGroups.size() != 2) {
throw new IOException("Expected 2 auto scaling groups (1 each for master and workers) but found: " +
autoScalingGroups.size());
}
// Retrieve cluster information from ASGs
Optional<String> clusterId = Optional.absent();
Optional<AutoScalingGroup> masterAsg = Optional.absent();
Optional<AutoScalingGroup> workersAsg = Optional.absent();
for (TagDescription tagDescription : autoScalingGroups.get(0).getTags()) {
LOGGER.info("Found tag: " + tagDescription);
if (tagDescription.getKey().equalsIgnoreCase(CLUSTER_ID_ASG_TAG)) {
clusterId = Optional.of(tagDescription.getValue());
}
if (tagDescription.getKey().equalsIgnoreCase(ASG_TYPE_ASG_TAG)) {
if (tagDescription.getValue().equalsIgnoreCase(ASG_TYPE_MASTER)) {
masterAsg = Optional.of(autoScalingGroups.get(0));
workersAsg = Optional.of(autoScalingGroups.get(1));
} else {
masterAsg = Optional.of(autoScalingGroups.get(1));
workersAsg = Optional.of(autoScalingGroups.get(0));
}
}
}
if (!clusterId.isPresent()) {
throw new IOException("Found 2 auto scaling group names for: " + this.clusterName +
" but tags seem to be corrupted, hence could not determine cluster id");
}
if (!masterAsg.isPresent() || !workersAsg.isPresent()) {
throw new IOException("Found 2 auto scaling group names for: " + this.clusterName +
" but tags seem to be corrupted, hence could not determine master and workers ASG");
}
// Get Master and Workers launch config name and auto scaling group name
this.masterAutoScalingGroupName = masterAsg.get().getAutoScalingGroupName();
this.masterLaunchConfigName = masterAsg.get().getLaunchConfigurationName();
this.workerAutoScalingGroupName = workersAsg.get().getAutoScalingGroupName();
this.workerLaunchConfigName = workersAsg.get().getLaunchConfigurationName();
LOGGER.info("Trying to find cluster master public ip");
this.masterPublicIp = getMasterPublicIp();
LOGGER.info("Master public ip: "+ this.masterPublicIp);
return clusterId;
}
/**
* Setup the Gobblin AWS cluster.
*
* @throws IOException If there's anything wrong setting up the AWS cluster
*/
@VisibleForTesting
String setupGobblinCluster() throws IOException {
final String uuid = UUID.randomUUID().toString();
// Create security group
// TODO: Make security group restrictive
final String securityGroupName = "GobblinSecurityGroup_" + uuid;
this.awsSdkClient.createSecurityGroup(securityGroupName, "Gobblin cluster security group");
this.awsSdkClient.addPermissionsToSecurityGroup(securityGroupName,
"0.0.0.0/0",
"tcp",
0,
65535);
// Create key value pair
final String keyName = "GobblinKey_" + uuid;
final String material = this.awsSdkClient.createKeyValuePair(keyName);
LOGGER.debug("Material is: " + material);
FileUtils.writeStringToFile(new File(keyName + ".pem"), material);
// Get all availability zones in the region. Currently, we will only use first
final List<AvailabilityZone> availabilityZones = this.awsSdkClient.getAvailabilityZones();
// Launch Cluster Master
final String clusterId = launchClusterMaster(uuid, keyName, securityGroupName, availabilityZones.get(0));
// Launch WorkUnit runners
launchWorkUnitRunners(uuid, keyName, securityGroupName, availabilityZones.get(0));
return clusterId;
}
private String launchClusterMaster(String uuid, String keyName, String securityGroups,
AvailabilityZone availabilityZone) {
// Get cloud-init script to launch cluster master
final String userData = CloudInitScriptBuilder.buildClusterMasterCommand(this.clusterName,
this.nfsParentDir,
this.sinkLogRootDir,
this.awsConfDir,
this.appWorkDir,
this.masterS3ConfUri,
this.masterS3ConfFiles,
this.masterS3JarsUri,
this.masterS3JarsFiles,
this.masterJarsDir,
this.masterJvmMemory,
this.masterJvmArgs,
this.gobblinVersion);
// Create launch config for Cluster master
this.masterLaunchConfigName = MASTER_LAUNCH_CONFIG_NAME_PREFIX + uuid;
this.awsSdkClient.createLaunchConfig(this.masterLaunchConfigName,
this.masterAmiId,
this.masterInstanceType,
keyName,
securityGroups,
Optional.<String>absent(),
Optional.<String>absent(),
Optional.<BlockDeviceMapping>absent(),
Optional.<String>absent(),
Optional.<InstanceMonitoring>absent(),
userData);
// Create ASG for Cluster master
// TODO: Make size configurable when we have support multi-master
this.masterAutoScalingGroupName = MASTER_ASG_NAME_PREFIX + uuid;
final int minNumMasters = 1;
final int maxNumMasters = 1;
final int desiredNumMasters = 1;
final Tag clusterNameTag = new Tag().withKey(CLUSTER_NAME_ASG_TAG).withValue(this.clusterName);
final Tag clusterUuidTag = new Tag().withKey(CLUSTER_ID_ASG_TAG).withValue(uuid);
final Tag asgTypeTag = new Tag().withKey(ASG_TYPE_ASG_TAG).withValue(ASG_TYPE_MASTER);
this.awsSdkClient.createAutoScalingGroup(this.masterAutoScalingGroupName,
this.masterLaunchConfigName,
minNumMasters,
maxNumMasters,
desiredNumMasters,
Optional.of(availabilityZone.getZoneName()),
Optional.<Integer>absent(),
Optional.<Integer>absent(),
Optional.<String>absent(),
Optional.<String>absent(),
Optional.<String>absent(), Lists.newArrayList(clusterNameTag, clusterUuidTag, asgTypeTag));
LOGGER.info("Waiting for cluster master to launch");
this.masterPublicIp = getMasterPublicIp();
LOGGER.info("Master public ip: "+ this.masterPublicIp);
return uuid;
}
private String getMasterPublicIp() {
final long startTime = System.currentTimeMillis();
final long launchTimeout = TimeUnit.MINUTES.toMillis(10);
boolean isMasterLaunched = false;
List<Instance> instanceIds = Collections.emptyList();
while (!isMasterLaunched && (System.currentTimeMillis() - startTime) < launchTimeout) {
try {
Thread.sleep(5000);
} catch (InterruptedException e) {
throw new RuntimeException("Interrupted while waiting for cluster master to boot up", e);
}
instanceIds = this.awsSdkClient.getInstancesForGroup(this.masterAutoScalingGroupName, "running");
isMasterLaunched = instanceIds.size() > 0;
}
if (!isMasterLaunched) {
throw new RuntimeException("Timed out while waiting for cluster master. "
+ "Check for issue manually for ASG: " + this.masterAutoScalingGroupName);
}
// This will change if cluster master restarts, but that will be handled by Helix events
// TODO: Add listener to Helix / Zookeeper for master restart and update master public ip
// .. although we do not use master public ip for anything
return instanceIds.get(0).getPublicIpAddress();
}
private void launchWorkUnitRunners(String uuid, String keyName,
String securityGroups,
AvailabilityZone availabilityZone) {
// Get cloud-init script to launch cluster worker
final String userData = CloudInitScriptBuilder.buildClusterWorkerCommand(this.clusterName,
this.nfsParentDir,
this.sinkLogRootDir,
this.awsConfDir,
this.appWorkDir,
this.masterPublicIp,
this.workerS3ConfUri,
this.workerS3ConfFiles,
this.workerS3JarsUri,
this.workerS3JarsFiles,
this.workerJarsDir,
this.workerJvmMemory,
this.workerJvmArgs,
this.gobblinVersion);
// Create launch config for Cluster worker
this.workerLaunchConfigName = WORKERS_LAUNCH_CONFIG_PREFIX + uuid;
this.awsSdkClient.createLaunchConfig(this.workerLaunchConfigName,
this.workerAmiId,
this.workerInstanceType,
keyName,
securityGroups,
Optional.<String>absent(),
Optional.<String>absent(),
Optional.<BlockDeviceMapping>absent(),
Optional.<String>absent(),
Optional.<InstanceMonitoring>absent(),
userData);
// Create ASG for Cluster workers
this.workerAutoScalingGroupName = WORKERS_ASG_NAME_PREFIX + uuid;
final Tag clusterNameTag = new Tag().withKey(CLUSTER_NAME_ASG_TAG).withValue(this.clusterName);
final Tag clusterUuidTag = new Tag().withKey(CLUSTER_ID_ASG_TAG).withValue(uuid);
final Tag asgTypeTag = new Tag().withKey(ASG_TYPE_ASG_TAG).withValue(ASG_TYPE_WORKERS);
this.awsSdkClient.createAutoScalingGroup(this.workerAutoScalingGroupName,
this.workerLaunchConfigName,
this.minWorkers,
this.maxWorkers,
this.desiredWorkers,
Optional.of(availabilityZone.getZoneName()),
Optional.<Integer>absent(),
Optional.<Integer>absent(),
Optional.<String>absent(),
Optional.<String>absent(),
Optional.<String>absent(),
Lists.newArrayList(clusterNameTag, clusterUuidTag, asgTypeTag));
}
@VisibleForTesting
void sendShutdownRequest() {
final Criteria criteria = new Criteria();
criteria.setInstanceName("%");
criteria.setResource("%");
criteria.setPartition("%");
criteria.setPartitionState("%");
criteria.setRecipientInstanceType(InstanceType.CONTROLLER);
criteria.setSessionSpecific(true);
final Message shutdownRequest = new Message(GobblinHelixConstants.SHUTDOWN_MESSAGE_TYPE,
HelixMessageSubTypes.APPLICATION_MASTER_SHUTDOWN.toString().toLowerCase() + UUID.randomUUID().toString());
shutdownRequest.setMsgSubType(HelixMessageSubTypes.APPLICATION_MASTER_SHUTDOWN.toString());
shutdownRequest.setMsgState(Message.MessageState.NEW);
shutdownRequest.setTgtSessionId("*");
// Wait for 5 minutes
final int timeout = 300000;
// Send shutdown request to Cluster master, which will send shutdown request to workers
// Upon receiving shutdown response from workers, master will shut itself down and call back shutdownASG()
final int messagesSent = this.helixManager.getMessagingService().send(criteria, shutdownRequest,
shutdownASG(),timeout);
if (messagesSent == 0) {
LOGGER.error(String.format("Failed to send the %s message to the controller", shutdownRequest.getMsgSubType()));
}
}
/***
* Callback method that deletes {@link AutoScalingGroup}s
* @return Callback method that deletes {@link AutoScalingGroup}s
*/
private AsyncCallback shutdownASG() {
Optional<List<String>> optionalLaunchConfigurationNames = Optional
.of(Arrays.asList(this.masterLaunchConfigName, this.workerLaunchConfigName));
Optional<List<String>> optionalAutoScalingGroupNames = Optional
.of(Arrays.asList(this.masterAutoScalingGroupName, this.workerAutoScalingGroupName));
return new AWSShutdownHandler(this.awsSdkClient,
optionalLaunchConfigurationNames,
optionalAutoScalingGroupNames);
}
private void cleanUpClusterWorkDirectory(String clusterId) throws IOException {
final File appWorkDir = new File(GobblinClusterUtils.getAppWorkDirPath(this.clusterName, clusterId));
if (appWorkDir.exists() && appWorkDir.isDirectory()) {
LOGGER.info("Deleting application working directory " + appWorkDir);
FileUtils.deleteDirectory(appWorkDir);
}
}
private void sendEmailOnShutdown(Optional<String> report) {
final String subject = String.format("Gobblin AWS cluster %s completed", this.clusterName);
final StringBuilder messageBuilder = new StringBuilder("Gobblin AWS cluster was shutdown at: " + new Date());
if (report.isPresent()) {
messageBuilder.append(' ').append(report.get());
}
try {
EmailUtils.sendEmail(ConfigUtils.configToState(this.config), subject, messageBuilder.toString());
} catch (EmailException ee) {
LOGGER.error("Failed to send email notification on shutdown", ee);
}
}
public static void main(String[] args) throws Exception {
final GobblinAWSClusterLauncher gobblinAWSClusterLauncher =
new GobblinAWSClusterLauncher(ConfigFactory.load());
Runtime.getRuntime().addShutdownHook(new Thread() {
@Override
public void run() {
try {
gobblinAWSClusterLauncher.stop();
} catch (IOException ioe) {
LOGGER.error("Failed to shutdown the " + GobblinAWSClusterLauncher.class.getSimpleName(), ioe);
} catch (TimeoutException te) {
LOGGER.error("Timeout in stopping the service manager", te);
} finally {
if (gobblinAWSClusterLauncher.emailNotificationOnShutdown) {
gobblinAWSClusterLauncher.sendEmailOnShutdown(Optional.<String>absent());
}
}
}
});
gobblinAWSClusterLauncher.launch();
}
}
| 2,150 |
0 | Create_ds/gobblin/gobblin-aws/src/main/java/org/apache/gobblin | Create_ds/gobblin/gobblin-aws/src/main/java/org/apache/gobblin/aws/AWSClusterSecurityManager.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.aws;
import java.util.concurrent.TimeUnit;
import com.amazonaws.AmazonClientException;
import com.amazonaws.auth.AWSCredentials;
import com.amazonaws.auth.AWSCredentialsProvider;
import com.amazonaws.auth.AWSCredentialsProviderChain;
import com.amazonaws.auth.BasicAWSCredentials;
import com.amazonaws.auth.BasicSessionCredentials;
import com.amazonaws.auth.EnvironmentVariableCredentialsProvider;
import com.amazonaws.auth.InstanceProfileCredentialsProvider;
import com.amazonaws.auth.STSAssumeRoleSessionCredentialsProvider;
import com.amazonaws.auth.SystemPropertiesCredentialsProvider;
import com.amazonaws.auth.profile.ProfileCredentialsProvider;
import com.amazonaws.util.StringUtils;
import com.typesafe.config.Config;
import org.apache.gobblin.annotation.Alpha;
import org.apache.gobblin.password.PasswordManager;
import org.apache.gobblin.util.ConfigUtils;
/**
* Class for managing AWS login and credentials renewal.
*
* <p>
* This class makes use of {@link BasicAWSCredentials} and {@link BasicSessionCredentials} to
* manage renewal of Amazon AWS authentication.
* This class runs a scheduled login executor
* task to refresh the credentials upon specified renewal interval which is configurable.
* </p>
*
* @author Abhishek Tiwari
*/
@Alpha
public class AWSClusterSecurityManager {
private final Config config;
public AWSClusterSecurityManager(Config config) {
this.config = config;
}
public AWSCredentialsProvider getCredentialsProvider() {
AWSCredentialsProvider credentialsProviderChain = new DefaultAWSCredentialsProviderChain(this.config);
if (config.hasPath(GobblinAWSConfigurationKeys.CLIENT_ASSUME_ROLE_KEY) &&
config.getBoolean(GobblinAWSConfigurationKeys.CLIENT_ASSUME_ROLE_KEY)) {
String roleArn = config.getString(GobblinAWSConfigurationKeys.CLIENT_ROLE_ARN_KEY);
String sessionId = config.getString(GobblinAWSConfigurationKeys.CLIENT_SESSION_ID_KEY);
STSAssumeRoleSessionCredentialsProvider.Builder builder =
new STSAssumeRoleSessionCredentialsProvider.Builder(roleArn, sessionId)
.withLongLivedCredentialsProvider(credentialsProviderChain);
if (config.hasPath(GobblinAWSConfigurationKeys.CLIENT_EXTERNAL_ID_KEY)) {
builder.withExternalId(config.getString(GobblinAWSConfigurationKeys.CLIENT_EXTERNAL_ID_KEY));
}
if (config.hasPath(GobblinAWSConfigurationKeys.CREDENTIALS_REFRESH_INTERVAL)) {
builder.withRoleSessionDurationSeconds(
(int) TimeUnit.MINUTES.toSeconds(config.getLong(GobblinAWSConfigurationKeys.CREDENTIALS_REFRESH_INTERVAL)));
}
credentialsProviderChain = builder.build();
}
return credentialsProviderChain;
}
private static class DefaultAWSCredentialsProviderChain extends AWSCredentialsProviderChain {
DefaultAWSCredentialsProviderChain(Config config) {
super(new EnvironmentVariableCredentialsProvider(),
new SystemPropertiesCredentialsProvider(),
new ConfigurationCredentialsProvider(config),
new ProfileCredentialsProvider(),
new InstanceProfileCredentialsProvider());
}
}
private static class ConfigurationCredentialsProvider implements AWSCredentialsProvider {
private Config config;
ConfigurationCredentialsProvider(Config config) {
this.config = config;
}
@Override
public AWSCredentials getCredentials() {
String accessKey = null;
if (config.hasPath(GobblinAWSConfigurationKeys.SERVICE_ACCESS_KEY)) {
accessKey = config.getString(GobblinAWSConfigurationKeys.SERVICE_ACCESS_KEY);
}
String secretKey = null;
if (config.hasPath(GobblinAWSConfigurationKeys.SERVICE_SECRET_KEY)) {
secretKey = PasswordManager.getInstance(ConfigUtils.configToState(config))
.readPassword(config.getString(GobblinAWSConfigurationKeys.SERVICE_SECRET_KEY));
}
accessKey = StringUtils.trim(accessKey);
secretKey = StringUtils.trim(secretKey);
if (StringUtils.isNullOrEmpty(accessKey) || StringUtils.isNullOrEmpty(secretKey)) {
throw new AmazonClientException(String.format("Unable to load AWS credentials from config (%s and %s)",
GobblinAWSConfigurationKeys.SERVICE_ACCESS_KEY, GobblinAWSConfigurationKeys.SERVICE_SECRET_KEY));
}
return new BasicAWSCredentials(accessKey, secretKey);
}
@Override
public void refresh() {}
@Override
public String toString() {
return getClass().getSimpleName();
}
}
}
| 2,151 |
0 | Create_ds/gobblin/gobblin-aws/src/main/java/org/apache/gobblin | Create_ds/gobblin/gobblin-aws/src/main/java/org/apache/gobblin/aws/AWSJobConfigurationManager.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.aws;
import java.io.File;
import java.io.FileOutputStream;
import java.io.IOException;
import java.io.InputStream;
import java.io.OutputStream;
import java.net.URI;
import java.net.URL;
import java.util.Enumeration;
import java.util.List;
import java.util.Map;
import java.util.Properties;
import java.util.concurrent.Executors;
import java.util.concurrent.ScheduledExecutorService;
import java.util.concurrent.TimeUnit;
import java.util.zip.ZipEntry;
import java.util.zip.ZipFile;
import org.apache.gobblin.runtime.job_spec.JobSpecResolver;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import com.google.common.base.Optional;
import com.google.common.collect.Maps;
import com.google.common.eventbus.EventBus;
import com.typesafe.config.Config;
import org.apache.commons.configuration.ConfigurationException;
import org.apache.commons.io.FileUtils;
import org.apache.commons.io.IOUtils;
import org.apache.commons.lang.StringUtils;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.gobblin.annotation.Alpha;
import org.apache.gobblin.cluster.GobblinClusterConfigurationKeys;
import org.apache.gobblin.cluster.GobblinHelixJobScheduler;
import org.apache.gobblin.cluster.JobConfigurationManager;
import org.apache.gobblin.cluster.event.NewJobConfigArrivalEvent;
import org.apache.gobblin.configuration.ConfigurationKeys;
import org.apache.gobblin.util.ConfigUtils;
import org.apache.gobblin.util.ExecutorsUtils;
import org.apache.gobblin.util.HadoopUtils;
import org.apache.gobblin.util.SchedulerUtils;
import static org.apache.gobblin.aws.GobblinAWSUtils.appendSlash;
import lombok.Value;
/**
* Class for managing AWS Gobblin job configurations.
*
* <p>
* This class reads all the job configuration at startup from S3
* and schedules a refresh to poll from S3 for any new job configurations.
* The jobs read are scheduled by the {@link GobblinHelixJobScheduler} by posting a
* {@link NewJobConfigArrivalEvent} for each job configuration file.
* </p>
*
* @author Abhishek Tiwari
*/
@Alpha
public class AWSJobConfigurationManager extends JobConfigurationManager {
private static final Logger LOGGER = LoggerFactory.getLogger(AWSJobConfigurationManager.class);
private static final long DEFAULT_JOB_CONF_REFRESH_INTERVAL = 60;
private Optional<JobArchiveRetriever> jobArchiveRetriever;
private Map<String, Properties> jobConfFiles;
private final long refreshIntervalInSeconds;
private final ScheduledExecutorService fetchJobConfExecutor;
private final JobSpecResolver jobSpecResolver;
public AWSJobConfigurationManager(EventBus eventBus, Config config) {
super(eventBus, config);
this.jobConfFiles = Maps.newHashMap();
if (config.hasPath(GobblinAWSConfigurationKeys.JOB_CONF_REFRESH_INTERVAL)) {
this.refreshIntervalInSeconds = config.getDuration(GobblinAWSConfigurationKeys.JOB_CONF_REFRESH_INTERVAL,
TimeUnit.SECONDS);
} else {
this.refreshIntervalInSeconds = DEFAULT_JOB_CONF_REFRESH_INTERVAL;
}
this.fetchJobConfExecutor = Executors.newSingleThreadScheduledExecutor(
ExecutorsUtils.newThreadFactory(Optional.of(LOGGER), Optional.of("FetchJobConfExecutor")));
try {
this.jobSpecResolver = JobSpecResolver.builder(config).build();
} catch (IOException ioe) {
throw new RuntimeException(ioe);
}
}
private void fetchJobConfSettings() {
this.jobConfDirPath =
config.hasPath(GobblinClusterConfigurationKeys.JOB_CONF_PATH_KEY) ? Optional
.of(config.getString(GobblinClusterConfigurationKeys.JOB_CONF_PATH_KEY)) : Optional.<String>absent();
this.jobArchiveRetriever = this.getJobArchiveRetriever(config);
}
@Override
protected void startUp() throws Exception {
LOGGER.info("Starting the " + AWSJobConfigurationManager.class.getSimpleName());
LOGGER.info(String.format("Scheduling the job configuration refresh task with an interval of %d second(s)",
this.refreshIntervalInSeconds));
// Schedule the job config fetch task
this.fetchJobConfExecutor.scheduleAtFixedRate(new Runnable() {
@Override
public void run() {
try {
fetchJobConf();
} catch (IOException | ConfigurationException e) {
LOGGER.error("Failed to fetch job configurations", e);
throw new RuntimeException("Failed to fetch job configurations", e);
}
}
}, 0, this.refreshIntervalInSeconds, TimeUnit.SECONDS);
}
private void fetchJobConf()
throws IOException, ConfigurationException {
// Refresh job config pull details from config
fetchJobConfSettings();
// TODO: Eventually when config store supports job files as well
// .. we can replace this logic with config store
if (this.jobArchiveRetriever.isPresent() && this.jobConfDirPath.isPresent()) {
// Download the zip file
final String zipFile = this.jobArchiveRetriever.get().retrieve(this.config, this.jobConfDirPath.get());
final String extractedPullFilesPath = appendSlash(this.jobConfDirPath.get()) + "files";
// Extract the zip file
LOGGER.debug("Extracting to directory: " + extractedPullFilesPath + " from zip: " + zipFile);
unzipArchive(zipFile, new File(extractedPullFilesPath));
// Load all new job configurations
// TODO: Currently new and updated jobs are handled, we should un-schedule deleted jobs as well
final File jobConfigDir = new File(extractedPullFilesPath);
if (jobConfigDir.exists()) {
LOGGER.info("Loading job configurations from " + jobConfigDir);
final Properties properties = ConfigUtils.configToProperties(this.config);
properties.setProperty(ConfigurationKeys.JOB_CONFIG_FILE_GENERAL_PATH_KEY, jobConfigDir.getAbsolutePath());
final List<Properties> jobConfigs = SchedulerUtils.loadGenericJobConfigs(properties, this.jobSpecResolver);
LOGGER.info("Loaded " + jobConfigs.size() + " job configuration(s)");
for (Properties config : jobConfigs) {
LOGGER.debug("Config value: " + config);
// If new config or existing config got updated, then post new job config arrival event
final String jobConfigPathIdentifier = config.getProperty(ConfigurationKeys.JOB_CONFIG_FILE_PATH_KEY);
if (!jobConfFiles.containsKey(jobConfigPathIdentifier)) {
jobConfFiles.put(jobConfigPathIdentifier, config);
postNewJobConfigArrival(config.getProperty(ConfigurationKeys.JOB_NAME_KEY), config);
LOGGER.info("New config arrived for job: " + jobConfigPathIdentifier);
} else if (!config.equals(jobConfFiles.get(jobConfigPathIdentifier))) {
jobConfFiles.put(jobConfigPathIdentifier, config);
postNewJobConfigArrival(config.getProperty(ConfigurationKeys.JOB_NAME_KEY), config);
LOGGER.info("Config updated for job: " + jobConfigPathIdentifier);
} else {
LOGGER.info("Config not changed for job: " + jobConfigPathIdentifier);
}
}
} else {
LOGGER.warn("Job configuration directory " + jobConfigDir + " not found");
}
}
}
/***
* Unzip a zip archive
* @param file Zip file to unarchive
* @param outputDir Output directory for the unarchived file
* @throws IOException If any issue occurs in unzipping the file
*/
public void unzipArchive(String file, File outputDir)
throws IOException {
try (ZipFile zipFile = new ZipFile(file)) {
final Enumeration<? extends ZipEntry> entries = zipFile.entries();
while (entries.hasMoreElements()) {
final ZipEntry entry = entries.nextElement();
final File entryDestination = new File(outputDir, entry.getName());
if (!org.apache.gobblin.util.FileUtils.isSubPath(outputDir, entryDestination)) {
throw new IOException(String.format("Extracted file: %s is trying to write outside of output directory: %s",
entryDestination, outputDir));
}
if (entry.isDirectory()) {
// If entry is directory, create directory
if (!entryDestination.mkdirs() && !entryDestination.exists()) {
throw new IOException("Could not create directory: " + entryDestination
+ " while un-archiving zip: " + file);
}
} else {
// Create parent dirs if required
if (!entryDestination.getParentFile().mkdirs() && !entryDestination.getParentFile().exists()) {
throw new IOException("Could not create parent directory for: " + entryDestination
+ " while un-archiving zip: " + file);
}
// Extract and save the conf file
InputStream in = null;
OutputStream out = null;
try {
in = zipFile.getInputStream(entry);
out = new FileOutputStream(entryDestination);
IOUtils.copy(in, out);
} finally {
if (null != in)
IOUtils.closeQuietly(in);
if (null != out)
IOUtils.closeQuietly(out);
}
}
}
}
}
@Override
protected void shutDown() throws Exception {
GobblinAWSUtils.shutdownExecutorService(this.getClass(), this.fetchJobConfExecutor, LOGGER);
}
private Optional<JobArchiveRetriever> getJobArchiveRetriever(Config config) {
if (config.hasPath(GobblinAWSConfigurationKeys.JOB_CONF_SOURCE_FILE_FS_URI_KEY) &&
config.hasPath(GobblinAWSConfigurationKeys.JOB_CONF_SOURCE_FILE_PATH_KEY)) {
return Optional.of(new HadoopJobArchiveRetriever(config.getString(GobblinAWSConfigurationKeys.JOB_CONF_SOURCE_FILE_FS_URI_KEY),
config.getString(GobblinAWSConfigurationKeys.JOB_CONF_SOURCE_FILE_PATH_KEY)));
}
if (config.hasPath(GobblinAWSConfigurationKeys.JOB_CONF_S3_URI_KEY)) {
LOGGER.warn("GobblinAWSConfigurationKeys.JOB_CONF_S3_URI_KEY is deprecated. " +
"Switch to GobblinAWSConfigurationKeys.JOB_CONF_SOURCE_FILE_FS_URI_KEY and " +
"GobblinAWSConfigurationKeys.JOB_CONF_SOURCE_FILE_PATH_KEY.");
return Optional.of(new LegacyJobArchiveRetriever(config.getString(GobblinAWSConfigurationKeys.JOB_CONF_S3_URI_KEY)));
}
return Optional.absent();
}
private interface JobArchiveRetriever {
String retrieve(Config config, String targetDir) throws IOException;
}
@Value
private static class LegacyJobArchiveRetriever implements JobArchiveRetriever {
String uri;
@Override
public String retrieve(Config config, String targetDir) throws IOException {
final String zipFile = appendSlash(targetDir) +
StringUtils.substringAfterLast(this.uri, File.separator);
LOGGER.debug("Downloading to zip: " + zipFile + " from uri: " + uri);
FileUtils.copyURLToFile(new URL(this.uri), new File(zipFile));
return zipFile;
}
}
@Value
private static class HadoopJobArchiveRetriever implements JobArchiveRetriever {
String fsUri;
String path;
@Override
public String retrieve(Config config, String targetDir) throws IOException {
URI uri = URI.create(this.fsUri);
FileSystem fs = FileSystem.get(uri, HadoopUtils.getConfFromState(ConfigUtils.configToState(config)));
final Path sourceFile = new Path(path);
final String zipFile = appendSlash(targetDir) +
StringUtils.substringAfterLast(this.path, File.separator);
LOGGER.debug("Downloading to zip: " + zipFile + " from uri: " + sourceFile);
fs.copyToLocalFile(sourceFile, new Path(zipFile));
return zipFile;
}
}
}
| 2,152 |
0 | Create_ds/gobblin/gobblin-aws/src/main/java/org/apache/gobblin | Create_ds/gobblin/gobblin-aws/src/main/java/org/apache/gobblin/aws/AWSSdkClient.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.aws;
import java.io.File;
import java.io.IOException;
import java.util.ArrayList;
import java.util.List;
import org.apache.commons.io.FileUtils;
import org.apache.commons.lang.StringUtils;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import com.amazonaws.AmazonServiceException;
import com.amazonaws.regions.Region;
import com.amazonaws.services.autoscaling.AmazonAutoScaling;
import com.amazonaws.services.autoscaling.AmazonAutoScalingClient;
import com.amazonaws.services.autoscaling.model.AutoScalingGroup;
import com.amazonaws.services.autoscaling.model.BlockDeviceMapping;
import com.amazonaws.services.autoscaling.model.CreateAutoScalingGroupRequest;
import com.amazonaws.services.autoscaling.model.CreateLaunchConfigurationRequest;
import com.amazonaws.services.autoscaling.model.DeleteAutoScalingGroupRequest;
import com.amazonaws.services.autoscaling.model.DeleteLaunchConfigurationRequest;
import com.amazonaws.services.autoscaling.model.DescribeAutoScalingGroupsRequest;
import com.amazonaws.services.autoscaling.model.InstanceMonitoring;
import com.amazonaws.services.autoscaling.model.Tag;
import com.amazonaws.services.autoscaling.model.TagDescription;
import com.amazonaws.services.ec2.AmazonEC2;
import com.amazonaws.services.ec2.AmazonEC2Client;
import com.amazonaws.services.ec2.model.AuthorizeSecurityGroupIngressRequest;
import com.amazonaws.services.ec2.model.AvailabilityZone;
import com.amazonaws.services.ec2.model.CreateKeyPairRequest;
import com.amazonaws.services.ec2.model.CreateKeyPairResult;
import com.amazonaws.services.ec2.model.CreateSecurityGroupRequest;
import com.amazonaws.services.ec2.model.DescribeAvailabilityZonesResult;
import com.amazonaws.services.ec2.model.DescribeInstancesRequest;
import com.amazonaws.services.ec2.model.DescribeInstancesResult;
import com.amazonaws.services.ec2.model.Filter;
import com.amazonaws.services.ec2.model.Instance;
import com.amazonaws.services.ec2.model.IpPermission;
import com.amazonaws.services.ec2.model.KeyPair;
import com.amazonaws.services.ec2.model.Reservation;
import com.amazonaws.services.s3.AmazonS3;
import com.amazonaws.services.s3.AmazonS3Client;
import com.amazonaws.services.s3.model.GetObjectRequest;
import com.amazonaws.services.s3.model.ListObjectsRequest;
import com.amazonaws.services.s3.model.ObjectListing;
import com.amazonaws.services.s3.model.S3Object;
import com.amazonaws.services.s3.model.S3ObjectSummary;
import com.google.common.base.Optional;
import com.google.common.base.Splitter;
import com.google.common.base.Supplier;
import com.google.common.base.Suppliers;
import com.google.common.collect.Lists;
import org.apache.gobblin.annotation.Alpha;
/**
* Class responsible for all AWS API calls.
*
* <p>
* This class makes use of AWS SDK API and provides clients for various Amazon AWS services
* such as: EC2, S3, AutoScaling; as well as this class provides various helper methods to
* perform AWS service API calls.
* </p>
*
* @author Abhishek Tiwari
*/
@Alpha
public class AWSSdkClient {
private static final Logger LOGGER = LoggerFactory.getLogger(GobblinAWSClusterLauncher.class);
private static final Splitter SPLITTER = Splitter.on(',').omitEmptyStrings().trimResults();
private Supplier<AmazonEC2> amazonEC2Supplier;
private Supplier<AmazonS3> amazonS3Supplier;
private Supplier<AmazonAutoScaling> amazonAutoScalingSupplier;
/***
* Initialize the AWS SDK Client
*
* @param awsClusterSecurityManager The {@link AWSClusterSecurityManager} to fetch AWS credentials
* @param region The Amazon AWS {@link Region}
*/
public AWSSdkClient(final AWSClusterSecurityManager awsClusterSecurityManager, final Region region) {
this.amazonEC2Supplier = Suppliers.memoize(new Supplier<AmazonEC2>() {
@Override
public AmazonEC2 get() {
AmazonEC2Client amazonEC2 = new AmazonEC2Client(awsClusterSecurityManager.getCredentialsProvider());
amazonEC2.setRegion(region);
return amazonEC2;
}
});
this.amazonS3Supplier = Suppliers.memoize(new Supplier<AmazonS3>() {
@Override
public AmazonS3 get() {
AmazonS3Client amazonS3 = new AmazonS3Client(awsClusterSecurityManager.getCredentialsProvider());
amazonS3.setRegion(region);
return amazonS3;
}
});
this.amazonAutoScalingSupplier = Suppliers.memoize(new Supplier<AmazonAutoScaling>() {
@Override
public AmazonAutoScaling get() {
AmazonAutoScalingClient amazonAutoScaling =
new AmazonAutoScalingClient(awsClusterSecurityManager.getCredentialsProvider());
amazonAutoScaling.setRegion(region);
return amazonAutoScaling;
}
});
}
/***
* Create an Amazon AWS security group
*
* @param groupName Security group name
* @param description Security group description
*/
public void createSecurityGroup(String groupName,
String description) {
AmazonEC2 amazonEC2 = getEc2Client();
try {
final CreateSecurityGroupRequest securityGroupRequest = new CreateSecurityGroupRequest()
.withGroupName(groupName)
.withDescription(description);
amazonEC2.createSecurityGroup(securityGroupRequest);
LOGGER.info("Created Security Group: " + groupName);
} catch (AmazonServiceException ase) {
// This might mean that security group is already created, hence ignore
LOGGER.warn("Issue in creating security group", ase);
}
}
/***
* Open firewall for a security group
*
* @param groupName Open firewall for this security group
* @param ipRanges Open firewall for this IP range
* @param ipProtocol Open firewall for this protocol type (eg. tcp, udp)
* @param fromPort Open firewall for port range starting at this port
* @param toPort Open firewall for port range ending at this port
*/
public void addPermissionsToSecurityGroup(String groupName,
String ipRanges,
String ipProtocol,
Integer fromPort,
Integer toPort) {
final AmazonEC2 amazonEC2 = getEc2Client();
final IpPermission ipPermission = new IpPermission()
.withIpRanges(ipRanges)
.withIpProtocol(ipProtocol)
.withFromPort(fromPort)
.withToPort(toPort);
final AuthorizeSecurityGroupIngressRequest authorizeSecurityGroupIngressRequest =
new AuthorizeSecurityGroupIngressRequest()
.withGroupName(groupName)
.withIpPermissions(ipPermission);
amazonEC2.authorizeSecurityGroupIngress(authorizeSecurityGroupIngressRequest);
LOGGER.info("Added permissions: " + ipPermission + " to security group: " + groupName);
}
/***
* Creates a 2048-bit RSA key pair with the specified name
*
* @param keyName Key name to use
* @return Unencrypted PEM encoded PKCS#8 private key
*/
public String createKeyValuePair(String keyName) {
final AmazonEC2 amazonEC2 = getEc2Client();
final CreateKeyPairRequest createKeyPairRequest = new CreateKeyPairRequest().withKeyName(keyName);
final CreateKeyPairResult createKeyPairResult = amazonEC2.createKeyPair(createKeyPairRequest);
final KeyPair keyPair = createKeyPairResult.getKeyPair();
final String material = keyPair.getKeyMaterial();
LOGGER.info("Created key: " + keyName);
LOGGER.debug("Created material: " + material);
return material;
}
/***
* Create a launch configuration that can be later used to create {@link AmazonAutoScaling} groups
*
* @param launchConfigName Desired launch config name
* @param imageId AMI image id to use
* @param instanceType EC2 instance type to use
* @param keyName Key name
* @param securityGroups Security groups to apply
* @param kernelId Optional kernel id
* @param ramdiskId Optional ram disk id
* @param blockDeviceMapping Optional EBS device mapping
* @param iamInstanceProfile Optional IAM instance profile
* @param instanceMonitoring Optional instance monitoring
* @param userData User data (eg. shell script to execute at instance boot under this launch config)
*/
public void createLaunchConfig(String launchConfigName,
String imageId,
String instanceType,
String keyName,
String securityGroups,
Optional<String> kernelId,
Optional<String> ramdiskId,
Optional<BlockDeviceMapping> blockDeviceMapping,
Optional<String> iamInstanceProfile,
Optional<InstanceMonitoring> instanceMonitoring,
String userData) {
final AmazonAutoScaling autoScaling = getAmazonAutoScalingClient();
CreateLaunchConfigurationRequest createLaunchConfigurationRequest = new CreateLaunchConfigurationRequest()
.withLaunchConfigurationName(launchConfigName)
.withImageId(imageId)
.withInstanceType(instanceType)
.withSecurityGroups(SPLITTER.splitToList(securityGroups))
.withKeyName(keyName)
.withUserData(userData);
if (kernelId.isPresent()) {
createLaunchConfigurationRequest = createLaunchConfigurationRequest
.withKernelId(kernelId.get());
}
if (ramdiskId.isPresent()) {
createLaunchConfigurationRequest = createLaunchConfigurationRequest
.withRamdiskId(ramdiskId.get());
}
if (blockDeviceMapping.isPresent()) {
createLaunchConfigurationRequest = createLaunchConfigurationRequest
.withBlockDeviceMappings(blockDeviceMapping.get());
}
if (iamInstanceProfile.isPresent()) {
createLaunchConfigurationRequest = createLaunchConfigurationRequest
.withIamInstanceProfile(iamInstanceProfile.get());
}
if (instanceMonitoring.isPresent()) {
createLaunchConfigurationRequest = createLaunchConfigurationRequest
.withInstanceMonitoring(instanceMonitoring.get());
}
autoScaling.createLaunchConfiguration(createLaunchConfigurationRequest);
LOGGER.info("Created Launch Configuration: " + launchConfigName);
}
/***
* Delete a launch configuration by its name
*
* @param launchConfigName Name of launch config to delete
*/
public void deleteLaunchConfiguration(String launchConfigName) {
final AmazonAutoScaling autoScaling = getAmazonAutoScalingClient();
final DeleteLaunchConfigurationRequest deleteLaunchConfigurationRequest = new DeleteLaunchConfigurationRequest()
.withLaunchConfigurationName(launchConfigName);
autoScaling.deleteLaunchConfiguration(deleteLaunchConfigurationRequest);
LOGGER.info("Deleted Launch Configuration: " + launchConfigName);
}
/***
* Create and launch an {@link AmazonAutoScaling} group
*
* @param groupName Auto scaling group name
* @param launchConfig Launch configuration string
* @param minSize Minimum number of instances to maintain in auto scaling group
* @param maxSize Maximum number of instances to scale up-to for load
* @param desiredCapacity Desired number of instances to maintain in auto scaling group
* @param availabilityZones Optional availability zones to make use of
* @param cooldown Optional cooldown period before any scaling event (default is 300 secs)
* @param healthCheckGracePeriod Optional grace period till which no health check is performed after bootup (default is 300 secs)
* @param healthCheckType Optional health check type (default is EC2 instance check)
* @param loadBalancer Optional load balancer to use
* @param terminationPolicy Optional termination policies
* @param tags Optional tags to set on auto scaling group (they are set to propagate to EC2 instances implicitly)
*/
public void createAutoScalingGroup(String groupName,
String launchConfig,
Integer minSize, Integer maxSize, Integer desiredCapacity,
Optional<String> availabilityZones,
Optional<Integer> cooldown,
Optional<Integer> healthCheckGracePeriod,
Optional<String> healthCheckType,
Optional<String> loadBalancer,
Optional<String> terminationPolicy,
List<Tag> tags) {
AmazonAutoScaling autoScaling = getAmazonAutoScalingClient();
// Propagate ASG tags to EC2 instances launched under the ASG by default
// (we want to ensure this, hence not configurable)
final List<Tag> tagsWithPropagationSet = Lists.newArrayList();
for (Tag tag : tags) {
tagsWithPropagationSet.add(tag.withPropagateAtLaunch(true));
}
CreateAutoScalingGroupRequest createAutoScalingGroupRequest = new CreateAutoScalingGroupRequest()
.withAutoScalingGroupName(groupName)
.withLaunchConfigurationName(launchConfig)
.withMinSize(minSize)
.withMaxSize(maxSize)
.withDesiredCapacity(desiredCapacity)
.withTags(tagsWithPropagationSet);
if (availabilityZones.isPresent()) {
createAutoScalingGroupRequest = createAutoScalingGroupRequest
.withAvailabilityZones(SPLITTER.splitToList(availabilityZones.get()));
}
if (cooldown.isPresent()) {
createAutoScalingGroupRequest = createAutoScalingGroupRequest
.withDefaultCooldown(cooldown.get());
}
if (healthCheckGracePeriod.isPresent()) {
createAutoScalingGroupRequest = createAutoScalingGroupRequest
.withHealthCheckGracePeriod(healthCheckGracePeriod.get());
}
if (healthCheckType.isPresent()) {
createAutoScalingGroupRequest = createAutoScalingGroupRequest
.withHealthCheckType(healthCheckType.get());
}
if (loadBalancer.isPresent()) {
createAutoScalingGroupRequest = createAutoScalingGroupRequest
.withLoadBalancerNames(SPLITTER.splitToList(loadBalancer.get()));
}
if (terminationPolicy.isPresent()) {
createAutoScalingGroupRequest = createAutoScalingGroupRequest
.withTerminationPolicies(SPLITTER.splitToList(terminationPolicy.get()));
}
autoScaling.createAutoScalingGroup(createAutoScalingGroupRequest);
LOGGER.info("Created AutoScalingGroup: " + groupName);
}
/***
* Delete an auto scaling group by its name
*
* @param autoScalingGroupName Name of auto scaling group to delete
* @param shouldForceDelete If the AutoScalingGroup should be deleted without waiting for instances to terminate
*/
public void deleteAutoScalingGroup(String autoScalingGroupName,
boolean shouldForceDelete) {
final AmazonAutoScaling autoScaling = getAmazonAutoScalingClient();
final DeleteAutoScalingGroupRequest deleteLaunchConfigurationRequest = new DeleteAutoScalingGroupRequest()
.withAutoScalingGroupName(autoScalingGroupName)
.withForceDelete(shouldForceDelete);
autoScaling.deleteAutoScalingGroup(deleteLaunchConfigurationRequest);
LOGGER.info("Deleted AutoScalingGroup: " + autoScalingGroupName);
}
/***
* Get list of {@link AutoScalingGroup}s for a given tag
*
* @param tag Tag to filter the auto scaling groups
* @return List of {@link AutoScalingGroup}s qualifying the filter tag
*/
public List<AutoScalingGroup> getAutoScalingGroupsWithTag(Tag tag) {
final AmazonAutoScaling autoScaling = getAmazonAutoScalingClient();
final DescribeAutoScalingGroupsRequest describeAutoScalingGroupsRequest = new DescribeAutoScalingGroupsRequest();
final List<AutoScalingGroup> allAutoScalingGroups = autoScaling
.describeAutoScalingGroups(describeAutoScalingGroupsRequest)
.getAutoScalingGroups();
final List<AutoScalingGroup> filteredAutoScalingGroups = Lists.newArrayList();
for (AutoScalingGroup autoScalingGroup : allAutoScalingGroups) {
for (TagDescription tagDescription : autoScalingGroup.getTags()) {
if (tagDescription.getKey().equalsIgnoreCase(tag.getKey()) &&
tagDescription.getValue().equalsIgnoreCase(tag.getValue())) {
filteredAutoScalingGroups.add(autoScalingGroup);
}
}
}
return filteredAutoScalingGroups;
}
/***
* Get list of EC2 {@link Instance}s for a auto scaling group
*
* @param groupName Auto scaling group name
* @param status Instance status (eg. running)
* @return List of EC2 instances found for the input auto scaling group
*/
public List<Instance> getInstancesForGroup(String groupName,
String status) {
final AmazonEC2 amazonEC2 = getEc2Client();
final DescribeInstancesResult instancesResult = amazonEC2.describeInstances(new DescribeInstancesRequest()
.withFilters(new Filter().withName("tag:aws:autoscaling:groupName").withValues(groupName)));
final List<Instance> instances = new ArrayList<>();
for (Reservation reservation : instancesResult.getReservations()) {
for (Instance instance : reservation.getInstances()) {
if (null == status|| null == instance.getState()
|| status.equals(instance.getState().getName())) {
instances.add(instance);
LOGGER.info("Found instance: " + instance + " which qualified filter: " + status);
} else {
LOGGER.info("Found instance: " + instance + " but did not qualify for filter: " + status);
}
}
}
return instances;
}
/***
* Get availability zones in an Amazon AWS region
*
* @return List of availability zones
*/
public List<AvailabilityZone> getAvailabilityZones() {
final AmazonEC2 amazonEC2 = getEc2Client();
final DescribeAvailabilityZonesResult describeAvailabilityZonesResult = amazonEC2.describeAvailabilityZones();
final List<AvailabilityZone> availabilityZones = describeAvailabilityZonesResult.getAvailabilityZones();
LOGGER.info("Found: " + availabilityZones.size() + " availability zone");
return availabilityZones;
}
/***
* Download a S3 object to local directory
*
* @param s3ObjectSummary S3 object summary for the object to download
* @param targetDirectory Local target directory to download the object to
* @throws IOException If any errors were encountered in downloading the object
*/
public void downloadS3Object(S3ObjectSummary s3ObjectSummary,
String targetDirectory)
throws IOException {
final AmazonS3 amazonS3 = getS3Client();
final GetObjectRequest getObjectRequest = new GetObjectRequest(
s3ObjectSummary.getBucketName(),
s3ObjectSummary.getKey());
final S3Object s3Object = amazonS3.getObject(getObjectRequest);
final String targetFile = StringUtils.removeEnd(targetDirectory, File.separator) + File.separator + s3Object.getKey();
FileUtils.copyInputStreamToFile(s3Object.getObjectContent(), new File(targetFile));
LOGGER.info("S3 object downloaded to file: " + targetFile);
}
/***
* Get list of S3 objects within a S3 bucket qualified by prefix path
*
* @param bucketName S3 bucket name
* @param prefix S3 prefix to object
* @return List of {@link S3ObjectSummary} objects within the bucket qualified by prefix path
*/
public List<S3ObjectSummary> listS3Bucket(String bucketName,
String prefix) {
final AmazonS3 amazonS3 = getS3Client();
final ListObjectsRequest listObjectsRequest = new ListObjectsRequest()
.withBucketName(bucketName)
.withPrefix(prefix);
final ObjectListing objectListing = amazonS3.listObjects(listObjectsRequest);
LOGGER.info("S3 bucket listing for bucket: " + bucketName + " with prefix: " + prefix + " is: " + objectListing);
return objectListing.getObjectSummaries();
}
/***
* Creates a new Amazon EC2 client to invoke service methods on Amazon EC2
*
* @return Amazon EC2 client to invoke service methods on Amazon EC2
*/
public AmazonEC2 getEc2Client() {
return amazonEC2Supplier.get();
}
/***
* Creates a new Amazon AutoScaling client to invoke service methods on Amazon AutoScaling
*
* @return Amazon AutoScaling client to invoke service methods on Amazon AutoScaling
*/
public AmazonAutoScaling getAmazonAutoScalingClient() {
return amazonAutoScalingSupplier.get();
}
/***
* Creates a new Amazon S3 client to invoke service methods on Amazon S3
*
* @return Amazon S3 client to invoke service methods on Amazon S3
*/
public AmazonS3 getS3Client() {
return amazonS3Supplier.get();
}
}
| 2,153 |
0 | Create_ds/gobblin/gobblin-aws/src/main/java/org/apache/gobblin | Create_ds/gobblin/gobblin-aws/src/main/java/org/apache/gobblin/aws/AWSShutdownHandler.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.aws;
import java.util.List;
import org.apache.helix.messaging.AsyncCallback;
import org.apache.helix.model.Message;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import com.google.common.base.Optional;
import org.apache.gobblin.annotation.Alpha;
/**
* Class that handles Helix shutdown response and consequently shutdowns Amazon AutoScaling group.
*
* @author Abhishek Tiwari
*/
@Alpha
public class AWSShutdownHandler extends AsyncCallback {
private static final Logger LOGGER = LoggerFactory.getLogger(AWSShutdownHandler.class);
private final AWSSdkClient awsSdkClient;
private final Optional<List<String>> optionalLaunchConfigurationNames;
private final Optional<List<String>> optionalAutoScalingGroupNames;
public AWSShutdownHandler(AWSSdkClient awsSdkClient,
Optional<List<String>> optionalLaunchConfigurationNames,
Optional<List<String>> optionalAutoScalingGroupNames) {
this.awsSdkClient = awsSdkClient;
this.optionalLaunchConfigurationNames = optionalLaunchConfigurationNames;
this.optionalAutoScalingGroupNames = optionalAutoScalingGroupNames;
}
@Override
public void onTimeOut() {
LOGGER.warn("Timeout while waiting for Helix controller and participants shutdown. "
+ "Moving ahead with forced shutdown of Amazon AutoScaling group");
shutdownASG();
}
@Override
public void onReplyMessage(Message message) {
LOGGER.info("Successfully shutdown Helix controller and participants shutdown. "
+ "Moving ahead with graceful shutdown of Amazon AutoScaling group");
shutdownASG();
}
private void shutdownASG() {
if (optionalLaunchConfigurationNames.isPresent()) {
for (String launchConfigurationName : optionalLaunchConfigurationNames.get()) {
try {
this.awsSdkClient.deleteLaunchConfiguration(launchConfigurationName);
} catch (Exception e) {
// Ignore and continue, so that we clean up as many resources as possible
LOGGER.warn("Issue in deleting launch configuration, please delete manually: " + launchConfigurationName +
" Continuing to cleanup AutoScalingGroups", e);
}
}
}
if (optionalAutoScalingGroupNames.isPresent()) {
for (String autoScalingGroupName : optionalAutoScalingGroupNames.get()) {
try {
this.awsSdkClient.deleteAutoScalingGroup(autoScalingGroupName, false);
} catch (Exception e1) {
LOGGER.warn("Issue in deleting auto scaling group (in graceful mode): " + autoScalingGroupName
+ " Going to try forceful cleanup.", e1);
try {
// Delete forcefully
this.awsSdkClient.deleteAutoScalingGroup(autoScalingGroupName, true);
} catch (Exception e2) {
// Ignore and continue, so that we clean up as many resources as possible
LOGGER.warn("Issue in deleting auto scaling group (in forced mode), please delete manually: " +
autoScalingGroupName + " Continuing to cleanup other resources", e2);
}
}
}
}
}
}
| 2,154 |
0 | Create_ds/gobblin/gobblin-aws/src/main/java/org/apache/gobblin | Create_ds/gobblin/gobblin-aws/src/main/java/org/apache/gobblin/aws/Log4jConfigHelper.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.aws;
import java.io.IOException;
import java.io.InputStream;
import java.util.Properties;
import org.apache.log4j.LogManager;
import org.apache.log4j.PropertyConfigurator;
import com.google.common.io.Closer;
import org.apache.gobblin.annotation.Alpha;
/**
* A helper class for programmatically configuring log4j.
*
* @author Abhishek Tiwari
*/
@Alpha
public class Log4jConfigHelper {
/**
* Update the log4j configuration.
*
* @param targetClass the target class used to get the original log4j configuration file as a resource
* @param log4jFileName the custom log4j configuration properties file name
* @throws IOException if there's something wrong with updating the log4j configuration
*/
public static void updateLog4jConfiguration(Class<?> targetClass, String log4jFileName)
throws IOException {
final Closer closer = Closer.create();
try {
final InputStream inputStream = closer.register(targetClass.getResourceAsStream("/" + log4jFileName));
final Properties originalProperties = new Properties();
originalProperties.load(inputStream);
LogManager.resetConfiguration();
PropertyConfigurator.configure(originalProperties);
} catch (Throwable t) {
throw closer.rethrow(t);
} finally {
closer.close();
}
}
}
| 2,155 |
0 | Create_ds/gobblin/gobblin-aws/src/main/java/org/apache/gobblin | Create_ds/gobblin/gobblin-aws/src/main/java/org/apache/gobblin/aws/GobblinAWSClusterManager.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.aws;
import java.util.Collections;
import java.util.List;
import org.apache.commons.cli.CommandLine;
import org.apache.commons.cli.DefaultParser;
import org.apache.commons.cli.HelpFormatter;
import org.apache.commons.cli.Options;
import org.apache.commons.cli.ParseException;
import org.apache.hadoop.fs.Path;
import org.apache.helix.HelixManager;
import org.apache.helix.NotificationContext;
import org.apache.helix.messaging.handling.HelixTaskResult;
import org.apache.helix.messaging.handling.MessageHandler;
import org.apache.helix.messaging.handling.MultiTypeMessageHandlerFactory;
import org.apache.helix.model.Message;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import com.google.common.base.Optional;
import com.typesafe.config.Config;
import com.typesafe.config.ConfigFactory;
import org.apache.gobblin.annotation.Alpha;
import org.apache.gobblin.cluster.GobblinClusterConfigurationKeys;
import org.apache.gobblin.cluster.GobblinClusterManager;
import org.apache.gobblin.cluster.GobblinHelixJobScheduler;
import org.apache.gobblin.cluster.HelixMessageSubTypes;
import org.apache.gobblin.cluster.JobConfigurationManager;
import org.apache.gobblin.util.JvmUtils;
/**
* The AWS Cluster master class for Gobblin.
*
* <p>
* This class makes use of super class {@link GobblinClusterManager} to run:
* 1. {@link GobblinHelixJobScheduler} for scheduling and running Gobblin jobs.
* 2. {@link HelixManager} to work with Helix and act as Helix controller.
* 3. {@link JobConfigurationManager} to discover new job configurations and updates to
* existing job configurations.
*
* More AWS specific services can be added in future to this class that are required to be
* run on Gobblin cluster master.
* </p>
*
* <p>
* Note: Shutdown initiated by {@link GobblinAWSClusterLauncher} via a Helix message of subtype
* {@link HelixMessageSubTypes#APPLICATION_MASTER_SHUTDOWN} is handled by super class {@link GobblinClusterManager}
* </p>
*
* @author Abhishek Tiwari
*/
@Alpha
public class GobblinAWSClusterManager extends GobblinClusterManager {
private static final Logger LOGGER = LoggerFactory.getLogger(GobblinAWSClusterManager.class);
public GobblinAWSClusterManager(String clusterName, String applicationId, Config config,
Optional<Path> appWorkDirOptional)
throws Exception {
super(clusterName, applicationId, config, appWorkDirOptional);
// Note: JobConfigurationManager and HelixJobScheduler are initialized in {@link GobblinClusterManager}
}
/**
* A custom {@link MultiTypeMessageHandlerFactory} for {@link ControllerUserDefinedMessageHandler}s that
* handle messages of type {@link org.apache.helix.model.Message.MessageType#USER_DEFINE_MSG}.
*/
private static class ControllerUserDefinedMessageHandlerFactory implements MultiTypeMessageHandlerFactory {
@Override
public MessageHandler createHandler(Message message, NotificationContext context) {
return new ControllerUserDefinedMessageHandler(message, context);
}
@Override
public String getMessageType() {
return Message.MessageType.USER_DEFINE_MSG.toString();
}
public List<String> getMessageTypes() {
return Collections.singletonList(getMessageType());
}
@Override
public void reset() {
}
/**
* A custom {@link MessageHandler} for handling user-defined messages to the controller.
*
* <p>
* Currently does not handle any user-defined messages. If this class is passed a custom message, it will simply
* print out a warning and return successfully.
* </p>
*/
private static class ControllerUserDefinedMessageHandler extends MessageHandler {
public ControllerUserDefinedMessageHandler(Message message, NotificationContext context) {
super(message, context);
}
@Override
public HelixTaskResult handleMessage() {
LOGGER.warn(String
.format("No handling setup for %s message of subtype: %s", Message.MessageType.USER_DEFINE_MSG.toString(),
this._message.getMsgSubType()));
final HelixTaskResult helixTaskResult = new HelixTaskResult();
helixTaskResult.setSuccess(true);
return helixTaskResult;
}
@Override
public void onError(Exception e, ErrorCode code, ErrorType type) {
LOGGER.error(
String.format("Failed to handle message with exception %s, error code %s, error type %s", e, code, type));
}
}
}
private static Options buildOptions() {
final Options options = new Options();
options.addOption("a", GobblinClusterConfigurationKeys.APPLICATION_NAME_OPTION_NAME, true, "AWS application name");
options.addOption("d", GobblinAWSConfigurationKeys.APP_WORK_DIR, true, "Application work directory");
return options;
}
private static void printUsage(Options options) {
final HelpFormatter formatter = new HelpFormatter();
formatter.printHelp(GobblinAWSClusterManager.class.getSimpleName(), options);
}
public static void main(String[] args) throws Exception {
final Options options = buildOptions();
try {
final CommandLine cmd = new DefaultParser().parse(options, args);
if (!cmd.hasOption(GobblinClusterConfigurationKeys.APPLICATION_NAME_OPTION_NAME) ||
!cmd.hasOption(GobblinAWSConfigurationKeys.APP_WORK_DIR)) {
printUsage(options);
System.exit(1);
}
if (System.getProperty("log4j.configuration") == null) {
Log4jConfigHelper.updateLog4jConfiguration(GobblinAWSClusterManager.class,
GobblinAWSConfigurationKeys.GOBBLIN_AWS_LOG4J_CONFIGURATION_FILE);
}
LOGGER.info(JvmUtils.getJvmInputArguments());
// Note: Application id is required param for {@link GobblinClusterManager} super class
// .. but has not meaning in AWS cluster context, so defaulting to a fixed value
final String applicationId = "1";
final String appWorkDir = cmd.getOptionValue(GobblinAWSConfigurationKeys.APP_WORK_DIR);
try (GobblinAWSClusterManager clusterMaster = new GobblinAWSClusterManager(
cmd.getOptionValue(GobblinClusterConfigurationKeys.APPLICATION_NAME_OPTION_NAME), applicationId,
ConfigFactory.load(), Optional.of(new Path(appWorkDir)))) {
clusterMaster.start();
}
} catch (ParseException pe) {
printUsage(options);
System.exit(1);
}
}
}
| 2,156 |
0 | Create_ds/gobblin/gobblin-aws/src/main/java/org/apache/gobblin | Create_ds/gobblin/gobblin-aws/src/main/java/org/apache/gobblin/aws/GobblinAWSTaskRunner.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.aws;
import java.util.Collections;
import java.util.List;
import org.apache.commons.cli.CommandLine;
import org.apache.commons.cli.DefaultParser;
import org.apache.commons.cli.Options;
import org.apache.commons.cli.ParseException;
import org.apache.hadoop.fs.Path;
import org.apache.helix.HelixManager;
import org.apache.helix.NotificationContext;
import org.apache.helix.messaging.handling.HelixTaskResult;
import org.apache.helix.messaging.handling.MessageHandler;
import org.apache.helix.messaging.handling.MultiTypeMessageHandlerFactory;
import org.apache.helix.model.Message;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import com.amazonaws.util.EC2MetadataUtils;
import com.google.common.base.Optional;
import com.google.common.util.concurrent.Service;
import com.typesafe.config.Config;
import com.typesafe.config.ConfigFactory;
import org.apache.gobblin.annotation.Alpha;
import org.apache.gobblin.cluster.GobblinClusterConfigurationKeys;
import org.apache.gobblin.cluster.GobblinClusterManager;
import org.apache.gobblin.cluster.GobblinHelixTask;
import org.apache.gobblin.cluster.GobblinHelixTaskFactory;
import org.apache.gobblin.cluster.GobblinTaskRunner;
import org.apache.gobblin.cluster.HelixMessageSubTypes;
import org.apache.gobblin.util.JvmUtils;
/**
* Class running on worker nodes managing services for executing Gobblin
* {@link org.apache.gobblin.source.workunit.WorkUnit}s.
*
* <p>
* This class makes use of super class {@link GobblinTaskRunner} to run:
* 1. {@link GobblinHelixTaskFactory} for creating {@link GobblinHelixTask}s that Helix manages
* to run Gobblin data ingestion tasks.
* 2. {@link HelixManager} to work with Helix and act as Helix participant to execute tasks.
*
* More AWS specific services can be added in future to this class that are required to be
* run on Gobblin cluster worker.
* </p>
*
* <p>
* Note: Shutdown initiated by {@link GobblinClusterManager} via a Helix message of subtype
* {@link HelixMessageSubTypes#WORK_UNIT_RUNNER_SHUTDOWN} is handled by super class {@link GobblinTaskRunner}
* </p>
*
* @author Abhishek Tiwari
*/
@Alpha
public class GobblinAWSTaskRunner extends GobblinTaskRunner {
private static final Logger LOGGER = LoggerFactory.getLogger(GobblinTaskRunner.class);
public GobblinAWSTaskRunner(String applicationName, String helixInstanceName, Config config,
Optional<Path> appWorkDirOptional)
throws Exception {
super(applicationName, helixInstanceName, getApplicationId(), getTaskRunnerId(), config,
appWorkDirOptional);
}
@Override
public List<Service> getServices() {
return super.getServices();
}
@Override
public MultiTypeMessageHandlerFactory getUserDefinedMessageHandlerFactory() {
return new ParticipantUserDefinedMessageHandlerFactory();
}
/**
* A custom {@link MultiTypeMessageHandlerFactory} for {@link ParticipantUserDefinedMessageHandler}s that
* handle messages of type {@link org.apache.helix.model.Message.MessageType#USER_DEFINE_MSG}.
*/
private static class ParticipantUserDefinedMessageHandlerFactory implements MultiTypeMessageHandlerFactory {
@Override
public MessageHandler createHandler(Message message, NotificationContext context) {
return new ParticipantUserDefinedMessageHandler(message, context);
}
@Override
public String getMessageType() {
return Message.MessageType.USER_DEFINE_MSG.toString();
}
public List<String> getMessageTypes() {
return Collections.singletonList(getMessageType());
}
@Override
public void reset() {
}
/**
* A custom {@link MessageHandler} for handling user-defined messages to the controller.
*
* <p>
* Currently does not handle any user-defined messages. If this class is passed a custom message, it will simply
* print out a warning and return successfully.
* </p>
*/
private static class ParticipantUserDefinedMessageHandler extends MessageHandler {
public ParticipantUserDefinedMessageHandler(Message message, NotificationContext context) {
super(message, context);
}
@Override
public HelixTaskResult handleMessage() throws InterruptedException {
LOGGER.warn(String
.format("No handling setup for %s message of subtype: %s", Message.MessageType.USER_DEFINE_MSG.toString(),
this._message.getMsgSubType()));
final HelixTaskResult helixTaskResult = new HelixTaskResult();
helixTaskResult.setSuccess(true);
return helixTaskResult;
}
@Override
public void onError(Exception e, ErrorCode code, ErrorType type) {
LOGGER.error(
String.format("Failed to handle message with exception %s, error code %s, error type %s", e, code, type));
}
}
}
private static String getApplicationId() {
return "1";
}
private static String getTaskRunnerId() {
return EC2MetadataUtils.getNetworkInterfaces().get(0).getPublicIPv4s().get(0);
}
public static Options buildOptions() {
final Options options = new Options();
options.addOption("a", GobblinClusterConfigurationKeys.APPLICATION_NAME_OPTION_NAME, true, "Application name");
options.addOption("i", GobblinClusterConfigurationKeys.HELIX_INSTANCE_NAME_OPTION_NAME, true, "Helix instance name");
options.addOption("d", GobblinAWSConfigurationKeys.APP_WORK_DIR, true, "Application work directory");
return options;
}
public static void main(String[] args) throws Exception {
final Options options = buildOptions();
try {
final CommandLine cmd = new DefaultParser().parse(options, args);
if (!cmd.hasOption(GobblinClusterConfigurationKeys.APPLICATION_NAME_OPTION_NAME) ||
!cmd.hasOption(GobblinClusterConfigurationKeys.HELIX_INSTANCE_NAME_OPTION_NAME) ||
!cmd.hasOption(GobblinAWSConfigurationKeys.APP_WORK_DIR)) {
printUsage(options);
System.exit(1);
}
if (System.getProperty("log4j.configuration") == null) {
Log4jConfigHelper.updateLog4jConfiguration(GobblinTaskRunner.class,
GobblinAWSConfigurationKeys.GOBBLIN_AWS_LOG4J_CONFIGURATION_FILE);
}
LOGGER.info(JvmUtils.getJvmInputArguments());
final String applicationName = cmd.getOptionValue(GobblinClusterConfigurationKeys.APPLICATION_NAME_OPTION_NAME);
final String helixInstanceName = cmd.getOptionValue(GobblinClusterConfigurationKeys.HELIX_INSTANCE_NAME_OPTION_NAME);
final String appWorkDir = cmd.getOptionValue(GobblinAWSConfigurationKeys.APP_WORK_DIR);
final GobblinTaskRunner gobblinTaskRunner =
new GobblinAWSTaskRunner(applicationName, helixInstanceName, ConfigFactory.load(),
Optional.of(new Path(appWorkDir)));
gobblinTaskRunner.start();
} catch (ParseException pe) {
printUsage(options);
System.exit(1);
}
}
}
| 2,157 |
0 | Create_ds/gobblin/gobblin-aws/src/main/java/org/apache/gobblin | Create_ds/gobblin/gobblin-aws/src/main/java/org/apache/gobblin/aws/CloudInitScriptBuilder.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.aws;
import java.io.File;
import java.util.List;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import com.google.common.base.Optional;
import com.google.common.base.Splitter;
import org.apache.gobblin.annotation.Alpha;
import org.apache.gobblin.cluster.GobblinClusterConfigurationKeys;
import org.apache.gobblin.util.JvmUtils;
import static org.apache.gobblin.aws.GobblinAWSUtils.encodeBase64;
/**
* Class to generate script for launching Gobblin cluster master and workers via cloud-init
* on EC2 instance boot up.
*
* @author Abhishek Tiwari
*/
@Alpha
public class CloudInitScriptBuilder {
private static final Logger LOGGER = LoggerFactory.getLogger(CloudInitScriptBuilder.class);
private static final Splitter SPLITTER = Splitter.on(",").trimResults().omitEmptyStrings();
private static final String STDOUT = "stdout";
private static final String STDERR = "stderr";
private static final String NFS_SHARE_ALL_IPS = "*";
private static final String NFS_SHARE_DEFAULT_OPTS = "rw,sync,no_subtree_check,fsid=1,no_root_squash";
private static final String NFS_CONF_FILE = "/etc/exports";
private static final String NFS_SERVER_INSTALL_CMD = "yum install nfs-utils nfs-utils-lib";
private static final String NFS_SERVER_START_CMD = "/etc/init.d/nfs start";
private static final String NFS_EXPORT_FS_CMD = "exportfs -a";
private static final String NFS_TYPE_4 = "nfs4";
public static final String BASH = "#!/bin/bash";
/***
* This method generates the script that would be executed by cloud-init module in EC2 instance
* upon boot up for {@link GobblinAWSClusterManager}.
*
* This will generate cloud init shell script that does the following:
* 1. Mount NFS Server (TODO: To be replaced with EFS soon)
* 2. Create all prerequisite directories
* 3. Download cluster configuration from S3
* 4. Download Gobblin application jars from S3 (TODO: To be replaced via baked in jars in custom Gobblin AMI)
* 5. Download Gobblin custom jars from S3
* 6. Launch {@link GobblinAWSClusterManager} java application
* 7. TODO: Add cron that watches the {@link GobblinAWSClusterManager} application and restarts it if it dies
*
* @param clusterName Name of the cluster
* @param nfsParentDir Directory within which NFS directory should be created and mounted
* @param sinkLogRootDir Log sink root directory
* @param awsConfDir Directory to save downloaded Gobblin cluster configuration files
* @param appWorkDir Gobblin application work directory
* @param masterS3ConfUri S3 URI to download cluster configuration files from
* @param masterS3ConfFiles Comma separated list of configuration files to download from masterS3ConfUri
* @param masterS3JarsUri S3 URI to download Gobblin jar files from
* @param masterS3JarsFiles Comma separated list of jar files to download from masterS3JarUri
* @param masterJarsDir Directory to save downloaded Gobblin jar files
* @param masterJvmMemory Xmx memory setting for Gobblin master java application
* @param masterJvmArgs JVM arguments for Gobblin master application
* @param gobblinVersion Optional Gobblin version
* @return Cloud-init script to launch {@link GobblinAWSClusterManager}
*/
public static String buildClusterMasterCommand(String clusterName, String nfsParentDir, String sinkLogRootDir,
String awsConfDir, String appWorkDir,
String masterS3ConfUri, String masterS3ConfFiles,
String masterS3JarsUri, String masterS3JarsFiles, String masterJarsDir,
String masterJvmMemory, Optional<String> masterJvmArgs, Optional<String> gobblinVersion) {
final StringBuilder cloudInitCmds = new StringBuilder().append(BASH).append("\n");
final String clusterMasterClassName = GobblinAWSClusterManager.class.getSimpleName();
// Create NFS server
// TODO: Replace with EFS (it went into GA on 6/30/2016)
// Note: Until EFS availability, ClusterMaster is SPOF because we loose NFS when it's relaunched / replaced
// .. this can be worked around, but would be an un-necessary work
final String nfsDir = nfsParentDir + clusterName;
final String nfsShareDirCmd = String.format("echo '%s %s(%s)' | tee --append %s",
nfsDir, NFS_SHARE_ALL_IPS, NFS_SHARE_DEFAULT_OPTS, NFS_CONF_FILE);
cloudInitCmds.append("mkdir -p ").append(nfsDir).append(File.separator).append("1").append("\n");
cloudInitCmds.append(NFS_SERVER_INSTALL_CMD).append("\n");
cloudInitCmds.append(nfsShareDirCmd).append("\n");
cloudInitCmds.append(NFS_SERVER_START_CMD).append("\n");
cloudInitCmds.append(NFS_EXPORT_FS_CMD).append("\n");
// Create various directories
cloudInitCmds.append("mkdir -p ").append(sinkLogRootDir).append("\n");
cloudInitCmds.append("chown -R ec2-user:ec2-user /home/ec2-user/*").append("\n");
// Setup short variables to save cloud-init script space
if (gobblinVersion.isPresent()) {
cloudInitCmds.append("vr=").append(gobblinVersion.get()).append("\n");
}
cloudInitCmds.append("cgS3=").append(masterS3ConfUri).append("\n");
cloudInitCmds.append("cg=").append(awsConfDir).append("\n");
cloudInitCmds.append("jrS3=").append(masterS3JarsUri).append("\n");
cloudInitCmds.append("jr=").append(masterJarsDir).append("\n");
// Download configurations from S3
final StringBuilder classpath = new StringBuilder();
final List<String> awsConfs = SPLITTER.splitToList(masterS3ConfFiles);
for (String awsConf : awsConfs) {
cloudInitCmds.append(String.format("wget -P \"${cg}\" \"${cgS3}\"%s", awsConf)).append("\n");
}
classpath.append(awsConfDir);
// Download jars from S3
// TODO: Eventually limit only custom user jars to pulled from S3, load rest from AMI
final List<String> awsJars = SPLITTER.splitToList(masterS3JarsFiles);
for (String awsJar : awsJars) {
cloudInitCmds.append(String.format("wget -P \"${jr}\" \"${jrS3}\"%s", awsJar)).append("\n");
}
classpath.append(":").append(masterJarsDir).append("*");
// TODO: Add cron that brings back master if it dies
// Launch Gobblin Cluster Master
final StringBuilder launchGobblinClusterMasterCmd = new StringBuilder()
.append("java")
.append(" -cp ").append(classpath)
.append(" -Xmx").append(masterJvmMemory)
.append(" ").append(JvmUtils.formatJvmArguments(masterJvmArgs))
.append(" ").append(GobblinAWSClusterManager.class.getName())
.append(" --").append(GobblinClusterConfigurationKeys.APPLICATION_NAME_OPTION_NAME)
.append(" ").append(clusterName)
.append(" --").append(GobblinAWSConfigurationKeys.APP_WORK_DIR)
.append(" ").append(appWorkDir)
.append(" 1>").append(sinkLogRootDir)
.append(clusterMasterClassName).append(".")
.append("master").append(".")
.append(CloudInitScriptBuilder.STDOUT)
.append(" 2>").append(sinkLogRootDir)
.append(clusterMasterClassName).append(".")
.append("master").append(".")
.append(CloudInitScriptBuilder.STDERR);
cloudInitCmds.append(launchGobblinClusterMasterCmd).append("\n");
final String cloudInitScript = cloudInitCmds.toString();
LOGGER.info("Cloud-init script for master node: " + cloudInitScript);
return encodeBase64(cloudInitScript);
}
/***
* This method generates the script that would be executed by cloud-init module in EC2 instance
* upon boot up for {@link GobblinAWSTaskRunner}.
*
* This will generate cloud init shell script that does the following:
* 1. Mount NFS volume (TODO: To be replaced with EFS soon)
* 2. Create all prerequisite directories
* 3. Download cluster configuration from S3
* 4. Download Gobblin application jars from S3 (TODO: To be replaced via baked in jars in custom Gobblin AMI)
* 5. Download Gobblin custom jars from S3
* 6. Launch {@link GobblinAWSTaskRunner} java application
* 7. TODO: Add cron that watches the {@link GobblinAWSTaskRunner} application and restarts it if it dies
*
* @param clusterName Name of the cluster
* @param nfsParentDir Directory within which NFS directory should be created and mounted
* @param sinkLogRootDir Log sink root directory
* @param awsConfDir Directory to save downloaded Gobblin cluster configuration files
* @param appWorkDir Gobblin application work directory
* @param masterPublicIp IP of Gobblin cluster worker
* @param workerS3ConfUri S3 URI to download cluster configuration files from
* @param workerS3ConfFiles Comma separated list of configuration files to download from workerS3ConfUri
* @param workerS3JarsUri S3 URI to download Gobblin jar files from
* @param workerS3JarsFiles Comma separated list of jar files to download from workerS3JarUri
* @param workerJarsDir Directory to save downloaded Gobblin jar files
* @param workerJvmMemory Xmx memory setting for Gobblin worker java application
* @param workerJvmArgs JVM arguments for Gobblin worker application
* @param gobblinVersion Optional Gobblin version
* @return Cloud-init script to launch {@link GobblinAWSTaskRunner}
*/
public static String buildClusterWorkerCommand(String clusterName, String nfsParentDir, String sinkLogRootDir,
String awsConfDir, String appWorkDir, String masterPublicIp,
String workerS3ConfUri, String workerS3ConfFiles,
String workerS3JarsUri, String workerS3JarsFiles, String workerJarsDir,
String workerJvmMemory, Optional<String> workerJvmArgs, Optional<String> gobblinVersion) {
final StringBuilder cloudInitCmds = new StringBuilder().append(BASH).append("\n");
final String clusterWorkerClassName = GobblinAWSTaskRunner.class.getSimpleName();
// Connect to NFS server
// TODO: Replace with EFS (it went into GA on 6/30/2016)
final String nfsDir = nfsParentDir + clusterName;
final String nfsMountCmd = String.format("mount -t %s %s:%s %s", NFS_TYPE_4, masterPublicIp, nfsDir,
nfsDir);
cloudInitCmds.append("mkdir -p ").append(nfsDir).append("\n");
cloudInitCmds.append(nfsMountCmd).append("\n");
// Create various other directories
cloudInitCmds.append("mkdir -p ").append(sinkLogRootDir).append("\n");
cloudInitCmds.append("chown -R ec2-user:ec2-user /home/ec2-user/*").append("\n");
// Setup short variables to save cloud-init script space
if (gobblinVersion.isPresent()) {
cloudInitCmds.append("vr=").append(gobblinVersion.get()).append("\n");
}
cloudInitCmds.append("cg0=").append(workerS3ConfUri).append("\n");
cloudInitCmds.append("cg=").append(awsConfDir).append("\n");
cloudInitCmds.append("jr0=").append(workerS3JarsUri).append("\n");
cloudInitCmds.append("jr=").append(workerJarsDir).append("\n");
// Download configurations from S3
final StringBuilder classpath = new StringBuilder();
final List<String> awsConfs = SPLITTER.splitToList(workerS3ConfFiles);
for (String awsConf : awsConfs) {
cloudInitCmds.append(String.format("wget -P \"${cg}\" \"${cg0}\"%s", awsConf)).append("\n");
}
classpath.append(awsConfDir);
// Download jars from S3
// TODO: Limit only custom user jars to pulled from S3, load rest from AMI
final List<String> awsJars = SPLITTER.splitToList(workerS3JarsFiles);
for (String awsJar : awsJars) {
cloudInitCmds.append(String.format("wget -P \"${jr}\" \"${jr0}\"%s", awsJar)).append("\n");
}
classpath.append(":").append(workerJarsDir).append("*");
// Get a random Helix instance name
cloudInitCmds.append("pi=`curl http://169.254.169.254/latest/meta-data/local-ipv4`").append("\n");
// TODO: Add cron that brings back worker if it dies
// Launch Gobblin Worker
final StringBuilder launchGobblinClusterWorkerCmd = new StringBuilder()
.append("java")
.append(" -cp ").append(classpath)
.append(" -Xmx").append(workerJvmMemory)
.append(" ").append(JvmUtils.formatJvmArguments(workerJvmArgs))
.append(" ").append(GobblinAWSTaskRunner.class.getName())
.append(" --").append(GobblinClusterConfigurationKeys.APPLICATION_NAME_OPTION_NAME)
.append(" ").append(clusterName)
.append(" --").append(GobblinClusterConfigurationKeys.HELIX_INSTANCE_NAME_OPTION_NAME)
.append(" ").append("$pi")
.append(" --").append(GobblinAWSConfigurationKeys.APP_WORK_DIR)
.append(" ").append(appWorkDir)
.append(" 1>").append(sinkLogRootDir)
.append(clusterWorkerClassName).append(".")
.append("$pi").append(".")
.append(CloudInitScriptBuilder.STDOUT)
.append(" 2>").append(sinkLogRootDir)
.append(clusterWorkerClassName).append(".")
.append("$pi").append(".")
.append(CloudInitScriptBuilder.STDERR);
cloudInitCmds.append(launchGobblinClusterWorkerCmd).append("\n");
final String cloudInitScript = cloudInitCmds.toString();
LOGGER.info("Cloud-init script for worker node: " + cloudInitScript);
return encodeBase64(cloudInitScript);
}
}
| 2,158 |
0 | Create_ds/gobblin/gobblin-aws/src/main/java/org/apache/gobblin | Create_ds/gobblin/gobblin-aws/src/main/java/org/apache/gobblin/aws/GobblinAWSUtils.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.aws;
import java.io.File;
import java.util.List;
import java.util.concurrent.ExecutorService;
import java.util.concurrent.TimeUnit;
import org.apache.commons.codec.binary.Base64;
import org.apache.commons.io.filefilter.FileFileFilter;
import org.apache.commons.lang.StringUtils;
import org.quartz.utils.FindbugsSuppressWarnings;
import org.slf4j.Logger;
import com.google.common.base.Joiner;
import com.google.common.base.Preconditions;
import org.apache.gobblin.annotation.Alpha;
/**
* An util class for Gobblin on AWS misc functionality.
*
* @author Abhishek Tiwari
*/
@Alpha
public class GobblinAWSUtils {
private static final long DEFAULT_EXECUTOR_SERVICE_SHUTDOWN_TIME_IN_MINUTES = 2;
/***
* Append a slash ie / at the end of input string.
*
* @param inputString Input string to append a slash to
* @return String with slash appened
*/
public static String appendSlash(String inputString) {
Preconditions.checkNotNull(inputString);
if (inputString.endsWith("/")) {
return inputString;
}
return inputString + "/";
}
/***
* List and generate classpath string from paths.
*
* Note: This is currently unused, and will be brought in to use with custom Gobblin AMI
*
* @param paths Paths to list
* @return Classpath string
*/
public static String getClasspathFromPaths(File... paths) {
Preconditions.checkNotNull(paths);
final StringBuilder classpath = new StringBuilder();
boolean isFirst = true;
for (File path : paths) {
if (!isFirst) {
classpath.append(":");
}
final String subClasspath = getClasspathFromPath(path);
if (subClasspath.length() > 0) {
classpath.append(subClasspath);
isFirst = false;
}
}
return classpath.toString();
}
private static String getClasspathFromPath(File path) {
if (null == path) {
return StringUtils.EMPTY;
}
if (!path.isDirectory()) {
return path.getAbsolutePath();
}
return Joiner.on(":").skipNulls().join(path.list(FileFileFilter.FILE));
}
/***
* Encodes String data using the base64 algorithm and does not chunk the output.
*
* @param data String to be encoded.
* @return Encoded String.
*/
@FindbugsSuppressWarnings("DM_DEFAULT_ENCODING")
public static String encodeBase64(String data) {
final byte[] encodedBytes = Base64.encodeBase64(data.getBytes());
return new String(encodedBytes);
}
/***
* Initiates an orderly shutdown in which previously submitted
* tasks are executed, but no new tasks are accepted.
* Invocation has no additional effect if already shut down.
*
* This also blocks until all tasks have completed execution
* request, or the timeout occurs, or the current thread is
* interrupted, whichever happens first.
* @param clazz {@link Class} that invokes shutdown on the {@link ExecutorService}.
* @param executorService {@link ExecutorService} to shutdown.
* @param logger {@link Logger} to log shutdown for invoking class.
* @throws InterruptedException if shutdown is interrupted.
*/
public static void shutdownExecutorService(Class clazz,
ExecutorService executorService, Logger logger) throws InterruptedException{
executorService.shutdown();
if (!executorService.awaitTermination(DEFAULT_EXECUTOR_SERVICE_SHUTDOWN_TIME_IN_MINUTES, TimeUnit.MINUTES)) {
logger.warn("Executor service shutdown timed out.");
List<Runnable> pendingTasks = executorService.shutdownNow();
logger.warn(String
.format("%s was shutdown instantly. %s tasks were not executed: %s", clazz.getName(), pendingTasks.size(),
StringUtils.join(pendingTasks, ",")));
}
}
}
| 2,159 |
0 | Create_ds/gobblin/gobblin-cluster/src/test/java/org/apache/gobblin | Create_ds/gobblin/gobblin-cluster/src/test/java/org/apache/gobblin/cluster/GobblinClusterUtilsTest.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.cluster;
import java.io.IOException;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.testng.Assert;
import org.testng.annotations.Test;
import com.typesafe.config.Config;
import com.typesafe.config.ConfigFactory;
import com.typesafe.config.ConfigValueFactory;
import org.apache.gobblin.util.PathUtils;
import static org.apache.gobblin.cluster.GobblinClusterUtils.JAVA_TMP_DIR_KEY;
import static org.mockito.Mockito.mock;
import static org.mockito.Mockito.when;
import static org.testng.Assert.assertEquals;
public class GobblinClusterUtilsTest {
private static final String TEST_APP_NAME = "appName";
private static final String TEST_APP_ID = "appId";
private static final String TEST_WORK_DIR = "file:///foo/bar";
private static final String DEFAULT_HOME_DIR = "file:///home";
@Test
public void testGetAppWorkDirPathFromConfig() throws IOException {
FileSystem localFs = FileSystem.getLocal(new Configuration());
FileSystem mockFs = mock(FileSystem.class);
when(mockFs.getHomeDirectory()).thenReturn(new Path(DEFAULT_HOME_DIR));
when(mockFs.getUri()).thenReturn(localFs.getUri());
//Set gobblin.cluster.workDir config
Config config = ConfigFactory.empty().withValue(GobblinClusterConfigurationKeys.CLUSTER_WORK_DIR,
ConfigValueFactory.fromAnyRef(TEST_WORK_DIR));
Path workDirPath = GobblinClusterUtils.getAppWorkDirPathFromConfig(config, localFs, TEST_APP_NAME, TEST_APP_ID);
assertEquals(PathUtils.combinePaths(TEST_WORK_DIR, TEST_APP_NAME, TEST_APP_ID), workDirPath);
//Get workdir when gobblin.cluster.workDir is not specified
workDirPath = GobblinClusterUtils
.getAppWorkDirPathFromConfig(ConfigFactory.empty(), mockFs, TEST_APP_NAME, TEST_APP_ID);
assertEquals(PathUtils.combinePaths(DEFAULT_HOME_DIR, TEST_APP_NAME, TEST_APP_ID), workDirPath);
}
@Test
public void testSetSystemProperties() {
//Set a dummy property before calling GobblinClusterUtils#setSystemProperties() and assert that this property and value
//exists even after the call to the setSystemProperties() method.
System.setProperty("prop1", "val1");
Config config = ConfigFactory.empty().withValue(GobblinClusterConfigurationKeys.GOBBLIN_CLUSTER_SYSTEM_PROPERTY_PREFIX + ".prop2",
ConfigValueFactory.fromAnyRef("val2"))
.withValue(GobblinClusterConfigurationKeys.GOBBLIN_CLUSTER_SYSTEM_PROPERTY_PREFIX + ".prop3", ConfigValueFactory.fromAnyRef("val3"));
GobblinClusterUtils.setSystemProperties(config);
Assert.assertEquals(System.getProperty("prop1"), "val1");
Assert.assertEquals(System.getProperty("prop2"), "val2");
Assert.assertEquals(System.getProperty("prop3"), "val3");
// Test specifically for key resolution using YARN_CACHE as the example.
config = config.withValue(GobblinClusterConfigurationKeys.GOBBLIN_CLUSTER_SYSTEM_PROPERTY_PREFIX + "." +
JAVA_TMP_DIR_KEY, ConfigValueFactory.fromAnyRef(GobblinClusterUtils.JVM_ARG_VALUE_RESOLVER.YARN_CACHE.name()))
.withValue(GobblinClusterConfigurationKeys.GOBBLIN_CLUSTER_SYSTEM_PROPERTY_PREFIX + ".randomKey1",
ConfigValueFactory.fromAnyRef(GobblinClusterUtils.JVM_ARG_VALUE_RESOLVER.YARN_CACHE.name()))
.withValue(GobblinClusterConfigurationKeys.GOBBLIN_CLUSTER_SYSTEM_PROPERTY_PREFIX + ".randomKey2",
ConfigValueFactory.fromAnyRef(GobblinClusterUtils.JVM_ARG_VALUE_RESOLVER.YARN_CACHE.name()))
.withValue(GobblinClusterConfigurationKeys.GOBBLIN_CLUSTER_SYSTEM_PROPERTY_PREFIX + ".rejectedKey",
ConfigValueFactory.fromAnyRef(GobblinClusterUtils.JVM_ARG_VALUE_RESOLVER.YARN_CACHE.name()))
.withValue("gobblin.cluster.systemPropertiesList.YARN_CACHE", ConfigValueFactory.fromAnyRef("randomKey1,randomKey2"));
GobblinClusterUtils.setSystemProperties(config);
Assert.assertEquals(System.getProperty(JAVA_TMP_DIR_KEY), GobblinClusterUtils.JVM_ARG_VALUE_RESOLVER.YARN_CACHE.getResolution());
Assert.assertEquals(System.getProperty("randomKey1"), GobblinClusterUtils.JVM_ARG_VALUE_RESOLVER.YARN_CACHE.getResolution());
Assert.assertEquals(System.getProperty("randomKey2"), GobblinClusterUtils.JVM_ARG_VALUE_RESOLVER.YARN_CACHE.getResolution());
// For keys not being added in the list of `gobblin.cluster.systemPropertiesList.YARN_CACHE`, the value wont'
// be resolved.
Assert.assertEquals(System.getProperty("rejectedKey"), GobblinClusterUtils.JVM_ARG_VALUE_RESOLVER.YARN_CACHE.name());
}
}
| 2,160 |
0 | Create_ds/gobblin/gobblin-cluster/src/test/java/org/apache/gobblin | Create_ds/gobblin/gobblin-cluster/src/test/java/org/apache/gobblin/cluster/TestHelper.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.cluster;
import java.io.File;
import java.io.IOException;
import java.util.Iterator;
import java.util.concurrent.TimeUnit;
import org.apache.avro.Schema;
import org.apache.avro.file.DataFileReader;
import org.apache.avro.generic.GenericDatumReader;
import org.apache.avro.generic.GenericRecord;
import org.apache.curator.framework.CuratorFramework;
import org.apache.curator.framework.CuratorFrameworkFactory;
import org.apache.curator.retry.RetryOneTime;
import org.apache.curator.test.TestingServer;
import org.testng.Assert;
import com.google.common.io.Closer;
import com.google.common.io.Files;
import org.apache.gobblin.configuration.ConfigurationKeys;
import org.apache.gobblin.util.JobLauncherUtils;
/**
* A helper class for Gobblin Cluster related test constants and utility methods.
*
* @author Yinan Li
*/
public class TestHelper {
public static final String TEST_APPLICATION_NAME = "TestApplication";
public static final String TEST_APPLICATION_ID = "1";
public static final String TEST_HELIX_INSTANCE_NAME = HelixUtils.getHelixInstanceName("TestInstance", 0);
public static final String TEST_TASK_RUNNER_ID = "1";
public static final String TEST_JOB_NAME = "TestJob";
public static final String TEST_JOB_ID = JobLauncherUtils.newJobId(TEST_JOB_NAME);
public static final int TEST_TASK_KEY = 0;
public static final String TEST_TASK_ID = JobLauncherUtils.newTaskId(TEST_JOB_ID, TEST_TASK_KEY);
public static final String SOURCE_SCHEMA =
"{\"namespace\":\"example.avro\", \"type\":\"record\", \"name\":\"User\", "
+ "\"fields\":[{\"name\":\"name\", \"type\":\"string\"}, {\"name\":\"favorite_number\", "
+ "\"type\":\"int\"}, {\"name\":\"favorite_color\", \"type\":\"string\"}]}\n";
public static final String SOURCE_JSON_DOCS =
"{\"name\": \"Alyssa\", \"favorite_number\": 256, \"favorite_color\": \"yellow\"}\n"
+ "{\"name\": \"Ben\", \"favorite_number\": 7, \"favorite_color\": \"red\"}\n"
+ "{\"name\": \"Charlie\", \"favorite_number\": 68, \"favorite_color\": \"blue\"}";
public static final String REL_WRITER_FILE_PATH = "avro";
public static final String WRITER_FILE_NAME = "foo.avro";
public static void createSourceJsonFile(File sourceJsonFile) throws IOException {
Files.createParentDirs(sourceJsonFile);
Files.write(SOURCE_JSON_DOCS, sourceJsonFile, ConfigurationKeys.DEFAULT_CHARSET_ENCODING);
}
public static void assertGenericRecords(File outputAvroFile, Schema schema) throws IOException {
try (DataFileReader<GenericRecord> reader =
new DataFileReader<>(outputAvroFile, new GenericDatumReader<GenericRecord>(schema))) {
Iterator<GenericRecord> iterator = reader.iterator();
GenericRecord record = iterator.next();
Assert.assertEquals(record.get("name").toString(), "Alyssa");
record = iterator.next();
Assert.assertEquals(record.get("name").toString(), "Ben");
record = iterator.next();
Assert.assertEquals(record.get("name").toString(), "Charlie");
Assert.assertFalse(iterator.hasNext());
}
}
public static CuratorFramework createZkClient(TestingServer testingZKServer, Closer closer)
throws InterruptedException {
CuratorFramework curatorFramework =
closer.register(CuratorFrameworkFactory.newClient(testingZKServer.getConnectString(),
new RetryOneTime(2000)));
curatorFramework.start();
if (! curatorFramework.blockUntilConnected(60, TimeUnit.SECONDS)) {
throw new RuntimeException("Time out waiting to connect to ZK!");
}
return curatorFramework;
}
}
| 2,161 |
0 | Create_ds/gobblin/gobblin-cluster/src/test/java/org/apache/gobblin | Create_ds/gobblin/gobblin-cluster/src/test/java/org/apache/gobblin/cluster/FsJobConfigurationManagerTest.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.cluster;
import java.io.IOException;
import java.net.URI;
import java.net.URISyntaxException;
import java.util.concurrent.ExecutionException;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.mockito.Mockito;
import org.testng.Assert;
import org.testng.annotations.AfterClass;
import org.testng.annotations.BeforeClass;
import org.testng.annotations.Test;
import com.google.common.eventbus.EventBus;
import com.google.common.io.Files;
import com.google.common.util.concurrent.Service;
import com.typesafe.config.Config;
import com.typesafe.config.ConfigFactory;
import com.typesafe.config.ConfigValueFactory;
import lombok.extern.slf4j.Slf4j;
import org.apache.gobblin.cluster.event.DeleteJobConfigArrivalEvent;
import org.apache.gobblin.cluster.event.NewJobConfigArrivalEvent;
import org.apache.gobblin.cluster.event.UpdateJobConfigArrivalEvent;
import org.apache.gobblin.configuration.ConfigurationKeys;
import org.apache.gobblin.runtime.api.FsSpecConsumer;
import org.apache.gobblin.runtime.api.FsSpecProducer;
import org.apache.gobblin.runtime.api.JobSpec;
import org.apache.gobblin.runtime.api.JobSpecNotFoundException;
import org.apache.gobblin.runtime.api.MutableJobCatalog;
import org.apache.gobblin.runtime.api.SpecExecutor;
import org.apache.gobblin.runtime.api.SpecProducer;
import org.apache.gobblin.runtime.job_catalog.NonObservingFSJobCatalog;
import org.apache.gobblin.util.filters.HiddenFilter;
@Slf4j
public class FsJobConfigurationManagerTest {
private MutableJobCatalog _jobCatalog;
private FsJobConfigurationManager jobConfigurationManager;
private String jobConfDir = "/tmp/" + this.getClass().getSimpleName() + "/jobCatalog";
private String fsSpecConsumerPathString = "/tmp/fsJobConfigManagerTest";
private String jobSpecUriString = "testJobSpec";
private FileSystem fs;
private SpecProducer _specProducer;
private int newJobConfigArrivalEventCount = 0;
private int updateJobConfigArrivalEventCount = 0;
private int deleteJobConfigArrivalEventCount = 0;
// An EventBus used for communications between services running in the ApplicationMaster
private EventBus eventBus;
@BeforeClass
public void setUp() throws IOException {
this.eventBus = Mockito.mock(EventBus.class);
Mockito.doAnswer(invocationOnMock -> {
Object argument = invocationOnMock.getArguments()[0];
if (argument instanceof NewJobConfigArrivalEvent) {
newJobConfigArrivalEventCount++;
} else if (argument instanceof DeleteJobConfigArrivalEvent) {
deleteJobConfigArrivalEventCount++;
} else if (argument instanceof UpdateJobConfigArrivalEvent) {
updateJobConfigArrivalEventCount++;
} else {
throw new IOException("Unexpected event type");
}
return null;
}).when(this.eventBus).post(Mockito.any());
this.fs = FileSystem.getLocal(new Configuration(false));
Path jobConfDirPath = new Path(jobConfDir);
if (!this.fs.exists(jobConfDirPath)) {
this.fs.mkdirs(jobConfDirPath);
}
Config config = ConfigFactory.empty()
.withValue(ConfigurationKeys.JOB_CONFIG_FILE_GENERAL_PATH_KEY, ConfigValueFactory.fromAnyRef(jobConfDir))
.withValue(FsSpecConsumer.SPEC_PATH_KEY, ConfigValueFactory.fromAnyRef(fsSpecConsumerPathString))
.withValue(GobblinClusterConfigurationKeys.JOB_SPEC_REFRESH_INTERVAL, ConfigValueFactory.fromAnyRef(1));
this._jobCatalog = new NonObservingFSJobCatalog(config);
((NonObservingFSJobCatalog) this._jobCatalog).startAsync().awaitRunning();
jobConfigurationManager = new FsJobConfigurationManager(eventBus, config, this._jobCatalog, this.fs);
_specProducer = new FsSpecProducer(this.fs, config);
}
private void addJobSpec(String jobSpecName, String version, String verb)
throws URISyntaxException, IOException {
JobSpec jobSpec =
JobSpec.builder(new URI(Files.getNameWithoutExtension(jobSpecName)))
.withConfig(ConfigFactory.empty())
.withTemplate(new URI("FS:///"))
.withVersion(version)
.withDescription("test")
.build();
SpecExecutor.Verb enumVerb = SpecExecutor.Verb.valueOf(verb);
switch (enumVerb) {
case ADD:
_specProducer.addSpec(jobSpec);
break;
case DELETE:
_specProducer.deleteSpec(jobSpec.getUri());
break;
case UPDATE:
_specProducer.updateSpec(jobSpec);
break;
default:
throw new IOException("Unknown Spec Verb: " + verb);
}
}
@Test (expectedExceptions = {JobSpecNotFoundException.class})
public void testFetchJobSpecs() throws ExecutionException, InterruptedException, URISyntaxException, JobSpecNotFoundException, IOException {
//Ensure JobSpec is added to JobCatalog
String verb1 = SpecExecutor.Verb.ADD.name();
String version1 = "1";
addJobSpec(jobSpecUriString, version1, verb1);
this.jobConfigurationManager.fetchJobSpecs();
JobSpec jobSpec = this._jobCatalog.getJobSpec(new URI(jobSpecUriString));
Assert.assertTrue(jobSpec != null);
Assert.assertTrue(jobSpec.getVersion().equals(version1));
Assert.assertTrue(jobSpec.getUri().getPath().equals(jobSpecUriString));
//Ensure the JobSpec is deleted from the FsSpecConsumer path.
Path fsSpecConsumerPath = new Path(fsSpecConsumerPathString);
Assert.assertEquals(this.fs.listStatus(fsSpecConsumerPath, new HiddenFilter()).length, 0);
//Ensure NewJobConfigArrivalEvent is posted to EventBus
Assert.assertEquals(newJobConfigArrivalEventCount, 1);
Assert.assertEquals(updateJobConfigArrivalEventCount, 0);
Assert.assertEquals(deleteJobConfigArrivalEventCount, 0);
//Test that the updated JobSpec has been added to the JobCatalog.
String verb2 = SpecExecutor.Verb.UPDATE.name();
String version2 = "2";
addJobSpec(jobSpecUriString, version2, verb2);
this.jobConfigurationManager.fetchJobSpecs();
jobSpec = this._jobCatalog.getJobSpec(new URI(jobSpecUriString));
Assert.assertTrue(jobSpec != null);
Assert.assertTrue(jobSpec.getVersion().equals(version2));
//Ensure the updated JobSpec is deleted from the FsSpecConsumer path.
Assert.assertEquals(this.fs.listStatus(fsSpecConsumerPath, new HiddenFilter()).length, 0);
//Ensure UpdateJobConfigArrivalEvent is posted to EventBus
Assert.assertEquals(newJobConfigArrivalEventCount, 1);
Assert.assertEquals(updateJobConfigArrivalEventCount, 1);
Assert.assertEquals(deleteJobConfigArrivalEventCount, 0);
//Test that the JobSpec has been deleted from the JobCatalog.
String verb3 = SpecExecutor.Verb.DELETE.name();
addJobSpec(jobSpecUriString, version2, verb3);
this.jobConfigurationManager.fetchJobSpecs();
//Ensure the JobSpec is deleted from the FsSpecConsumer path.
Assert.assertEquals(this.fs.listStatus(fsSpecConsumerPath, new HiddenFilter()).length, 0);
this._jobCatalog.getJobSpec(new URI(jobSpecUriString));
//Ensure DeleteJobConfigArrivalEvent is posted to EventBus
Assert.assertEquals(newJobConfigArrivalEventCount, 1);
Assert.assertEquals(updateJobConfigArrivalEventCount, 1);
Assert.assertEquals(deleteJobConfigArrivalEventCount, 1);
}
@Test
public void testException()
throws Exception {
FsJobConfigurationManager jobConfigurationManager = Mockito.spy(this.jobConfigurationManager);
Mockito.doThrow(new ExecutionException(new IOException("Test exception"))).when(jobConfigurationManager).fetchJobSpecs();
jobConfigurationManager.startUp();
//Add wait to ensure that fetchJobSpecExecutor thread is scheduled at least once.
Thread.sleep(2000);
int numInvocations = Mockito.mockingDetails(jobConfigurationManager).getInvocations().size();
Mockito.verify(jobConfigurationManager, Mockito.atLeast(1)).fetchJobSpecs();
Thread.sleep(2000);
//Verify that there new invocations of fetchJobSpecs()
Mockito.verify(jobConfigurationManager, Mockito.atLeast(numInvocations + 1)).fetchJobSpecs();
//Ensure that the JobConfigurationManager Service is running.
Assert.assertTrue(!jobConfigurationManager.state().equals(Service.State.FAILED) && !jobConfigurationManager.state().equals(Service.State.TERMINATED));
}
@AfterClass
public void tearDown() throws IOException {
Path fsSpecConsumerPath = new Path(fsSpecConsumerPathString);
if (fs.exists(fsSpecConsumerPath)) {
fs.delete(fsSpecConsumerPath, true);
}
Path jobCatalogPath = new Path(jobConfDir);
if (fs.exists(jobCatalogPath)) {
fs.delete(jobCatalogPath, true);
}
}
}
| 2,162 |
0 | Create_ds/gobblin/gobblin-cluster/src/test/java/org/apache/gobblin | Create_ds/gobblin/gobblin-cluster/src/test/java/org/apache/gobblin/cluster/HelixMessageTestBase.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.cluster;
import org.apache.helix.model.Message;
/**
* An interface for test classes that involve sending and receiving Helix messages.
*
* @author Yinan Li
*/
public interface HelixMessageTestBase {
/**
* Assert the reception of a message.
*
* @param message the message to assert reception for
*/
public void assertMessageReception(Message message);
}
| 2,163 |
0 | Create_ds/gobblin/gobblin-cluster/src/test/java/org/apache/gobblin | Create_ds/gobblin/gobblin-cluster/src/test/java/org/apache/gobblin/cluster/HelixAssignedParticipantCheckTest.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.cluster;
import java.io.IOException;
import org.apache.helix.HelixManager;
import org.apache.helix.HelixManagerFactory;
import org.apache.helix.InstanceType;
import org.testng.Assert;
import org.testng.annotations.BeforeClass;
import org.testng.annotations.Test;
import com.google.common.base.Joiner;
import com.typesafe.config.Config;
import com.typesafe.config.ConfigValueFactory;
import org.apache.gobblin.cluster.suite.IntegrationBasicSuite;
import org.apache.gobblin.commit.CommitStepException;
import org.apache.gobblin.testing.AssertWithBackoff;
public class HelixAssignedParticipantCheckTest {
private static final String JOB_ID = "job_testJob_345";
private static final String TASK_STATE_FILE = "/tmp/" + HelixAssignedParticipantCheckTest.class.getSimpleName() + "/taskState/_RUNNING";
private IntegrationBasicSuite suite;
private HelixManager helixManager;
private Config helixConfig;
@BeforeClass
public void setUp()
throws Exception {
Config jobConfigOverrides = ClusterIntegrationTestUtils.buildSleepingJob(JOB_ID, TASK_STATE_FILE);
//Set up a Gobblin Helix cluster integration job
suite = new IntegrationBasicSuite(jobConfigOverrides);
helixConfig = suite.getManagerConfig();
}
@Test (groups = {"disabledOnCI"})
//Test disabled on Travis because cluster integration tests are generally flaky on Travis.
public void testExecute() throws Exception {
suite.startCluster();
String clusterName = helixConfig.getString(GobblinClusterConfigurationKeys.HELIX_CLUSTER_NAME_KEY);
String zkConnectString = helixConfig.getString(GobblinClusterConfigurationKeys.ZK_CONNECTION_STRING_KEY);
helixManager = HelixManagerFactory.getZKHelixManager(clusterName, "TestManager",
InstanceType.SPECTATOR, zkConnectString);
//Connect to the previously started Helix cluster
helixManager.connect();
//Ensure that Helix has created a workflow
AssertWithBackoff.create().maxSleepMs(1000).backoffFactor(1).
assertTrue(ClusterIntegrationTest.isTaskStarted(helixManager, JOB_ID), "Waiting for the job to start...");
//Instantiate config for HelixAssignedParticipantCheck
String helixJobId = Joiner.on("_").join(JOB_ID, JOB_ID);
helixConfig = helixConfig.withValue(GobblinClusterConfigurationKeys.HELIX_INSTANCE_NAME_KEY,
ConfigValueFactory.fromAnyRef(IntegrationBasicSuite.WORKER_INSTANCE_0))
.withValue(GobblinClusterConfigurationKeys.HELIX_JOB_ID_KEY, ConfigValueFactory.fromAnyRef(helixJobId))
.withValue(GobblinClusterConfigurationKeys.HELIX_PARTITION_ID_KEY, ConfigValueFactory.fromAnyRef(0));
HelixAssignedParticipantCheck check = new HelixAssignedParticipantCheck(helixConfig);
//Ensure that the SleepingTask is running
AssertWithBackoff.create().maxSleepMs(100).timeoutMs(2000).backoffFactor(1).
assertTrue(ClusterIntegrationTest.isTaskRunning(TASK_STATE_FILE),"Waiting for the task to enter running state");
//Run the check. Ensure that the configured Helix instance is indeed the assigned participant
// (i.e. no exceptions thrown).
check.execute();
//Disconnect the helixmanager used to check the assigned participant to force an Exception on the first attempt.
//The test should succeed on the following attempt.
HelixManager helixManagerOriginal = HelixAssignedParticipantCheck.getHelixManager();
helixManagerOriginal.disconnect();
check.execute();
//Ensure that a new HelixManager instance is created.
Assert.assertTrue(HelixAssignedParticipantCheck.getHelixManager() != helixManagerOriginal);
//Create Helix config with invalid partition num. Ensure HelixAssignedParticipantCheck fails.
helixConfig = helixConfig.withValue(GobblinClusterConfigurationKeys.HELIX_PARTITION_ID_KEY, ConfigValueFactory.fromAnyRef(1));
check = new HelixAssignedParticipantCheck(helixConfig);
try {
check.execute();
Assert.fail("Expected to throw CommitStepException");
} catch (CommitStepException e) {
//Expected to throw CommitStepException
Assert.assertTrue(e.getClass().equals(CommitStepException.class));
}
}
public void tearDown() throws IOException, InterruptedException {
//Shutdown cluster
suite.shutdownCluster();
if (helixManager.isConnected()) {
helixManager.disconnect();
}
}
} | 2,164 |
0 | Create_ds/gobblin/gobblin-cluster/src/test/java/org/apache/gobblin | Create_ds/gobblin/gobblin-cluster/src/test/java/org/apache/gobblin/cluster/DummySource.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.cluster;
import java.io.IOException;
import java.util.List;
import com.google.common.collect.Lists;
import org.apache.gobblin.configuration.SourceState;
import org.apache.gobblin.configuration.WorkUnitState;
import org.apache.gobblin.runtime.JobShutdownException;
import org.apache.gobblin.source.extractor.DataRecordException;
import org.apache.gobblin.source.extractor.Extractor;
import org.apache.gobblin.source.extractor.extract.AbstractSource;
import org.apache.gobblin.source.workunit.WorkUnit;
/**
* A source implementation that does nothing.
*/
public class DummySource extends AbstractSource<String, Integer> {
private static final int NUM_RECORDS_TO_EXTRACT_PER_EXTRACTOR = 10;
private static final int NUM_WORK_UNITS = 1;
@Override
public List<WorkUnit> getWorkunits(SourceState sourceState) {
return Lists.newArrayList();
}
@Override
public Extractor<String, Integer> getExtractor(WorkUnitState state)
throws IOException {
return new DummyExtractor(state);
}
@Override
public void shutdown(SourceState state) {
// Nothing to do
}
/**
* A dummy implementation of {@link Extractor}.
*/
private static class DummyExtractor implements Extractor<String, Integer> {
private final WorkUnitState workUnitState;
private int current;
DummyExtractor(WorkUnitState workUnitState) {
this.workUnitState = workUnitState;
workUnitState.setProp("FOO", "BAR");
this.current = 0;
}
@Override
public String getSchema() {
return "";
}
@Override
public Integer readRecord(Integer reuse)
throws DataRecordException, IOException {
// Simply just get some records and stopped
if (this.current > 10) {
return null;
}
return this.current++;
}
@Override
public long getExpectedRecordCount() {
return DummySource.NUM_RECORDS_TO_EXTRACT_PER_EXTRACTOR;
}
@Override
public long getHighWatermark() {
return this.workUnitState.getHighWaterMark();
}
@Override
public void close()
throws IOException {
// Nothing to do
}
@Override
public void shutdown()
throws JobShutdownException {
// Nothing to do but overwrite unnecessary checking in the base interface.
}
}
} | 2,165 |
0 | Create_ds/gobblin/gobblin-cluster/src/test/java/org/apache/gobblin | Create_ds/gobblin/gobblin-cluster/src/test/java/org/apache/gobblin/cluster/GobblinHelixJobMappingTest.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.cluster;
import java.util.Properties;
import org.testng.Assert;
import org.testng.annotations.Test;
import org.apache.gobblin.configuration.ConfigurationKeys;
public class GobblinHelixJobMappingTest {
@Test
void testMapJobNameWithFlowExecutionId() {
Properties props = new Properties();
props.setProperty(ConfigurationKeys.FLOW_EXECUTION_ID_KEY, "1234");
props.setProperty(ConfigurationKeys.JOB_NAME_KEY, "job1");
String planningJobId = HelixJobsMapping.createPlanningJobId(props);
String actualJobId = HelixJobsMapping.createActualJobId(props);
// The jobID contains the system timestamp that we need to parse out
Assert.assertEquals(planningJobId.substring(0, planningJobId.lastIndexOf("_")), "job_PlanningJobjob1_1234");
Assert.assertEquals(actualJobId.substring(0, actualJobId.lastIndexOf("_")), "job_ActualJobjob1_1234");
}
@Test
void testMapJobNameWithoutFlowExecutionId() {
Properties props = new Properties();
props.setProperty(ConfigurationKeys.JOB_NAME_KEY, "job1");
String planningJobId = HelixJobsMapping.createPlanningJobId(props);
String actualJobId = HelixJobsMapping.createActualJobId(props);
// The jobID contains the system timestamp that we need to parse out
Assert.assertEquals(planningJobId.substring(0, planningJobId.lastIndexOf("_")), "job_PlanningJobjob1");
Assert.assertEquals(actualJobId.substring(0, actualJobId.lastIndexOf("_")), "job_ActualJobjob1");
}
}
| 2,166 |
0 | Create_ds/gobblin/gobblin-cluster/src/test/java/org/apache/gobblin | Create_ds/gobblin/gobblin-cluster/src/test/java/org/apache/gobblin/cluster/ClusterEventMetadataGeneratorTest.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.cluster;
import java.util.Map;
import org.mockito.Mockito;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.testng.Assert;
import org.testng.annotations.Test;
import org.apache.gobblin.configuration.ConfigurationKeys;
import org.apache.gobblin.metrics.event.EventName;
import org.apache.gobblin.runtime.EventMetadataUtils;
import org.apache.gobblin.runtime.JobContext;
import org.apache.gobblin.runtime.JobState;
import org.apache.gobblin.runtime.TaskState;
/**
* Unit tests for {@link ClusterEventMetadataGenerator}.
*/
@Test(groups = { "gobblin.cluster" })
public class ClusterEventMetadataGeneratorTest {
public final static Logger LOG = LoggerFactory.getLogger(ClusterEventMetadataGeneratorTest.class);
@Test
public void testProcessedCount() throws Exception {
JobContext jobContext = Mockito.mock(JobContext.class);
JobState jobState = new JobState("jobName", "1234");
TaskState taskState1 = new TaskState();
TaskState taskState2 = new TaskState();
taskState1.setTaskId("1");
taskState1.setProp(ConfigurationKeys.WRITER_RECORDS_WRITTEN, "1");
taskState2.setTaskId("2");
taskState2.setProp(ConfigurationKeys.WRITER_RECORDS_WRITTEN, "22");
jobState.addTaskState(taskState1);
jobState.addTaskState(taskState2);
Mockito.when(jobContext.getJobState()).thenReturn(jobState);
ClusterEventMetadataGenerator metadataGenerator = new ClusterEventMetadataGenerator();
Map<String, String> metadata;
// processed count is not in job cancel event
metadata = metadataGenerator.getMetadata(jobContext, EventName.JOB_CANCEL);
Assert.assertEquals(metadata.get("processedCount"), null);
// processed count is in job complete event
metadata = metadataGenerator.getMetadata(jobContext, EventName.getEnumFromEventId("JobCompleteTimer"));
Assert.assertEquals(metadata.get("processedCount"), "23");
}
@Test
public void testErrorMessage() throws Exception {
JobContext jobContext = Mockito.mock(JobContext.class);
JobState jobState = new JobState("jobName", "1234");
TaskState taskState1 = new TaskState();
TaskState taskState2 = new TaskState();
taskState1.setTaskId("1");
taskState1.setProp(ConfigurationKeys.TASK_FAILURE_EXCEPTION_KEY, "exception1");
taskState2.setTaskId("2");
taskState2.setProp(ConfigurationKeys.TASK_FAILURE_EXCEPTION_KEY, "exception2");
taskState2.setProp(EventMetadataUtils.TASK_FAILURE_MESSAGE_KEY, "failureMessage2");
jobState.addTaskState(taskState1);
jobState.addTaskState(taskState2);
Mockito.when(jobContext.getJobState()).thenReturn(jobState);
ClusterEventMetadataGenerator metadataGenerator = new ClusterEventMetadataGenerator();
Map<String, String> metadata;
// error message is not in job commit event
metadata = metadataGenerator.getMetadata(jobContext, EventName.JOB_COMMIT);
Assert.assertEquals(metadata.get("message"), null);
// error message is in job failed event
metadata = metadataGenerator.getMetadata(jobContext, EventName.JOB_FAILED);
Assert.assertTrue(metadata.get("message").startsWith("failureMessage"));
Assert.assertTrue(metadata.get("message").contains("exception1"));
Assert.assertTrue(metadata.get("message").contains("exception2"));
}
}
| 2,167 |
0 | Create_ds/gobblin/gobblin-cluster/src/test/java/org/apache/gobblin | Create_ds/gobblin/gobblin-cluster/src/test/java/org/apache/gobblin/cluster/TestShutdownMessageHandlerFactory.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.cluster;
import java.util.Collections;
import java.util.List;
import org.apache.helix.NotificationContext;
import org.apache.helix.messaging.handling.HelixTaskResult;
import org.apache.helix.messaging.handling.MessageHandler;
import org.apache.helix.messaging.handling.MultiTypeMessageHandlerFactory;
import org.apache.helix.model.Message;
import org.testng.Assert;
/**
* A test implementation of {@link MultiTypeMessageHandlerFactory}.
*
* @author Yinan Li
*/
public class TestShutdownMessageHandlerFactory implements MultiTypeMessageHandlerFactory {
private final HelixMessageTestBase helixMessageTestBase;
public TestShutdownMessageHandlerFactory(HelixMessageTestBase helixMessageTestBase) {
this.helixMessageTestBase = helixMessageTestBase;
}
@Override
public MessageHandler createHandler(Message message, NotificationContext notificationContext) {
return new TestShutdownMessageHandler(message, notificationContext, this.helixMessageTestBase);
}
@Override
public String getMessageType() {
return GobblinHelixConstants.SHUTDOWN_MESSAGE_TYPE;
}
public List<String> getMessageTypes() {
return Collections.singletonList(getMessageType());
}
@Override
public void reset() {
}
private static class TestShutdownMessageHandler extends MessageHandler {
private final HelixMessageTestBase helixMessageTestBase;
public TestShutdownMessageHandler(Message message, NotificationContext context,
HelixMessageTestBase helixMessageTestBase) {
super(message, context);
this.helixMessageTestBase = helixMessageTestBase;
}
@Override
public HelixTaskResult handleMessage()
throws InterruptedException {
// Delay handling the message so the ZooKeeper client sees the message
Thread.sleep(1000);
this.helixMessageTestBase.assertMessageReception(_message);
HelixTaskResult result = new HelixTaskResult();
result.setSuccess(true);
return result;
}
@Override
public void onError(Exception e, ErrorCode errorCode, ErrorType errorType) {
Assert.fail();
}
}
}
| 2,168 |
0 | Create_ds/gobblin/gobblin-cluster/src/test/java/org/apache/gobblin | Create_ds/gobblin/gobblin-cluster/src/test/java/org/apache/gobblin/cluster/GobblinTaskRunnerTest.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.cluster;
import java.io.IOException;
import java.net.URL;
import java.util.concurrent.ExecutionException;
import java.util.concurrent.ExecutorService;
import java.util.concurrent.Executors;
import java.util.concurrent.Future;
import org.apache.curator.test.TestingServer;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.helix.HelixException;
import org.apache.helix.HelixManager;
import org.apache.helix.HelixManagerFactory;
import org.apache.helix.InstanceType;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.testng.Assert;
import org.testng.annotations.AfterClass;
import org.testng.annotations.BeforeClass;
import org.testng.annotations.Test;
import com.google.common.base.Optional;
import com.google.common.base.Predicate;
import com.google.common.eventbus.EventBus;
import com.typesafe.config.Config;
import com.typesafe.config.ConfigFactory;
import com.typesafe.config.ConfigValueFactory;
import org.apache.gobblin.broker.SharedResourcesBrokerFactory;
import org.apache.gobblin.cluster.suite.IntegrationBasicSuite;
import org.apache.gobblin.configuration.ConfigurationKeys;
import org.apache.gobblin.testing.AssertWithBackoff;
import org.apache.gobblin.util.event.ContainerHealthCheckFailureEvent;
import org.apache.gobblin.util.eventbus.EventBusFactory;
/**
* Unit tests for {@link GobblinTaskRunner}.
*
* <p>
* This class uses a {@link TestingServer} as an embedded ZooKeeper server for testing. A
* {@link GobblinClusterManager} instance is used to send the test shutdown request message.
* </p>
*
* @author Yinan Li
*/
@Test(groups = { "gobblin.cluster" })
public class GobblinTaskRunnerTest {
public final static Logger LOG = LoggerFactory.getLogger(GobblinTaskRunnerTest.class);
private static final String JOB_ID = "job_taskRunnerTestJob_" + System.currentTimeMillis();
private static final String TASK_STATE_FILE = "/tmp/" + GobblinTaskRunnerTest.class.getSimpleName() + "/taskState/_RUNNING";
public static final String HADOOP_OVERRIDE_PROPERTY_NAME = "prop";
private TestingServer testingZKServer;
private GobblinTaskRunner gobblinTaskRunner;
private GobblinTaskRunner gobblinTaskRunnerHealthCheck;
private GobblinTaskRunner corruptGobblinTaskRunner;
private GobblinTaskRunner gobblinTaskRunnerFailedReporter;
private GobblinClusterManager gobblinClusterManager;
private String clusterName;
private String corruptHelixInstance;
private TaskAssignmentAfterConnectionRetry suite;
@BeforeClass
public void setUp() throws Exception {
this.testingZKServer = new TestingServer(-1);
LOG.info("Testing ZK Server listening on: " + testingZKServer.getConnectString());
URL url = GobblinTaskRunnerTest.class.getClassLoader().getResource(
GobblinTaskRunnerTest.class.getSimpleName() + ".conf");
Assert.assertNotNull(url, "Could not find resource " + url);
Config config = ConfigFactory.parseURL(url)
.withValue("gobblin.cluster.zk.connection.string",
ConfigValueFactory.fromAnyRef(testingZKServer.getConnectString()))
.withValue(GobblinClusterConfigurationKeys.HADOOP_CONFIG_OVERRIDES_PREFIX + "." + HADOOP_OVERRIDE_PROPERTY_NAME,
ConfigValueFactory.fromAnyRef("value"))
.withValue(GobblinClusterConfigurationKeys.HADOOP_CONFIG_OVERRIDES_PREFIX + "." + "fs.file.impl.disable.cache",
ConfigValueFactory.fromAnyRef("true"))
.resolve();
String zkConnectionString = config.getString(GobblinClusterConfigurationKeys.ZK_CONNECTION_STRING_KEY);
this.clusterName = config.getString(GobblinClusterConfigurationKeys.HELIX_CLUSTER_NAME_KEY);
HelixUtils.createGobblinHelixCluster(zkConnectionString, this.clusterName);
// Participant
this.gobblinTaskRunner =
new GobblinTaskRunner(TestHelper.TEST_APPLICATION_NAME, TestHelper.TEST_HELIX_INSTANCE_NAME,
TestHelper.TEST_APPLICATION_ID, TestHelper.TEST_TASK_RUNNER_ID, config, Optional.<Path>absent());
// Participant
String healthCheckInstance = HelixUtils.getHelixInstanceName("HealthCheckHelixInstance", 0);
this.gobblinTaskRunnerHealthCheck =
new GobblinTaskRunner(TestHelper.TEST_APPLICATION_NAME, healthCheckInstance,
TestHelper.TEST_APPLICATION_ID, TestHelper.TEST_TASK_RUNNER_ID,
config.withValue(GobblinClusterConfigurationKeys.CONTAINER_EXIT_ON_HEALTH_CHECK_FAILURE_ENABLED, ConfigValueFactory.fromAnyRef(true))
, Optional.<Path>absent());
// Participant that fails to start due to metric reporter failures
String instanceName = HelixUtils.getHelixInstanceName("MetricReporterFailureInstance", 0);
Config metricConfig = config.withValue(ConfigurationKeys.METRICS_ENABLED_KEY, ConfigValueFactory.fromAnyRef(true))
.withValue(ConfigurationKeys.METRICS_REPORTING_KAFKA_ENABLED_KEY, ConfigValueFactory.fromAnyRef(true))
.withValue(ConfigurationKeys.METRICS_KAFKA_TOPIC_METRICS, ConfigValueFactory.fromAnyRef("metricTopic"))
.withValue(ConfigurationKeys.GOBBLIN_TASK_METRIC_REPORTING_FAILURE_FATAL, ConfigValueFactory.fromAnyRef(true));
this.gobblinTaskRunnerFailedReporter =
new GobblinTaskRunner(TestHelper.TEST_APPLICATION_NAME, instanceName,
TestHelper.TEST_APPLICATION_ID, "2", metricConfig, Optional.<Path>absent());
// Participant with a partial Instance set up on Helix/ZK
this.corruptHelixInstance = HelixUtils.getHelixInstanceName("CorruptHelixInstance", 0);
this.corruptGobblinTaskRunner =
new GobblinTaskRunner(TestHelper.TEST_APPLICATION_NAME, corruptHelixInstance,
TestHelper.TEST_APPLICATION_ID, TestHelper.TEST_TASK_RUNNER_ID, config, Optional.<Path>absent());
// Controller
this.gobblinClusterManager =
new GobblinClusterManager(TestHelper.TEST_APPLICATION_NAME, TestHelper.TEST_APPLICATION_ID, config,
Optional.<Path>absent());
this.gobblinClusterManager.connectHelixManager();
}
@Test
public void testSendReceiveShutdownMessage() throws Exception {
this.gobblinTaskRunner.connectHelixManager();
ExecutorService service = Executors.newSingleThreadExecutor();
service.submit(() -> GobblinTaskRunnerTest.this.gobblinTaskRunner.start());
Logger log = LoggerFactory.getLogger("testSendReceiveShutdownMessage");
// Give Helix some time to start the task runner
AssertWithBackoff.create().logger(log).timeoutMs(20000)
.assertTrue(new Predicate<Void>() {
@Override public boolean apply(Void input) {
return GobblinTaskRunnerTest.this.gobblinTaskRunner.isStarted();
}
}, "gobblinTaskRunner started");
this.gobblinClusterManager.sendShutdownRequest();
// Give Helix some time to handle the message
AssertWithBackoff.create().logger(log).timeoutMs(20000)
.assertTrue(new Predicate<Void>() {
@Override public boolean apply(Void input) {
return GobblinTaskRunnerTest.this.gobblinTaskRunner.isStopped();
}
}, "gobblinTaskRunner stopped");
}
@Test (expectedExceptions = RuntimeException.class, expectedExceptionsMessageRegExp = ".*Could not create one or more reporters.*")
public void testStartUpFailsDueToMetricReporterFailure() {
GobblinTaskRunnerTest.this.gobblinTaskRunnerFailedReporter.start();
}
@Test
public void testBuildFileSystemConfig() {
FileSystem fileSystem = this.gobblinTaskRunner.getFs();
Assert.assertEquals(fileSystem.getConf().get(HADOOP_OVERRIDE_PROPERTY_NAME), "value");
}
@Test
public void testConnectHelixManagerWithRetry() {
HelixManager instanceManager = HelixManagerFactory.getZKHelixManager(
clusterName, corruptHelixInstance, InstanceType.PARTICIPANT, testingZKServer.getConnectString());
ClusterIntegrationTestUtils.createPartialInstanceStructure(instanceManager, testingZKServer.getConnectString());
//Ensure that the connecting to Helix without retry will throw a HelixException
try {
corruptGobblinTaskRunner.connectHelixManager();
Assert.fail("Unexpected success in connecting to HelixManager");
} catch (Exception e) {
//Assert that a HelixException is thrown.
Assert.assertTrue(e.getClass().equals(HelixException.class));
}
//Ensure that connect with retry succeeds
corruptGobblinTaskRunner.connectHelixManagerWithRetry();
Assert.assertTrue(true);
}
@Test (groups = {"disabledOnCI"})
public void testTaskAssignmentAfterHelixConnectionRetry()
throws Exception {
Config jobConfigOverrides = ClusterIntegrationTestUtils.buildSleepingJob(JOB_ID, TASK_STATE_FILE);
this.suite = new TaskAssignmentAfterConnectionRetry(jobConfigOverrides);
suite.startCluster();
String zkConnectString = suite.getManagerConfig().getString(GobblinClusterConfigurationKeys.ZK_CONNECTION_STRING_KEY);
String clusterName = suite.getManagerConfig().getString(GobblinClusterConfigurationKeys.HELIX_CLUSTER_NAME_KEY);
//A test manager instance for observing the state of the cluster
HelixManager helixManager = HelixManagerFactory.getZKHelixManager(clusterName, "TestManager", InstanceType.SPECTATOR, zkConnectString);
helixManager.connect();
//Ensure that Helix has created a workflow
AssertWithBackoff.create().maxSleepMs(1000).backoffFactor(1).
assertTrue(ClusterIntegrationTest.isTaskStarted(helixManager, JOB_ID), "Waiting for the job to start...");
//Ensure that the SleepingTask is running
AssertWithBackoff.create().maxSleepMs(100).timeoutMs(2000).backoffFactor(1).
assertTrue(ClusterIntegrationTest.isTaskRunning(TASK_STATE_FILE),"Waiting for the task to enter running state");
helixManager.disconnect();
}
@Test (groups = {"disabledOnCI"}, dependsOnMethods = "testSendReceiveShutdownMessage", expectedExceptions = ExecutionException.class, expectedExceptionsMessageRegExp = ".*ContainerHealthCheckException.*")
public void testShutdownOnHealthCheckFailure() throws Exception {
this.gobblinTaskRunnerHealthCheck.connectHelixManager();
ExecutorService service = Executors.newSingleThreadExecutor();
Future future = service.submit(() -> GobblinTaskRunnerTest.this.gobblinTaskRunnerHealthCheck.start());
Logger log = LoggerFactory.getLogger("testHandleContainerHealthCheckFailure");
// Give Helix some time to start the task runner
AssertWithBackoff.create().logger(log).timeoutMs(20000)
.assertTrue(new Predicate<Void>() {
@Override public boolean apply(Void input) {
return GobblinTaskRunnerTest.this.gobblinTaskRunnerHealthCheck.isStarted();
}
}, "gobblinTaskRunner started");
EventBus eventBus = EventBusFactory.get(ContainerHealthCheckFailureEvent.CONTAINER_HEALTH_CHECK_EVENT_BUS_NAME,
SharedResourcesBrokerFactory.getImplicitBroker());
eventBus.post(new ContainerHealthCheckFailureEvent(ConfigFactory.empty(), getClass().getName()));
// Give some time to allow GobblinTaskRunner to handle the ContainerHealthCheckFailureEvent
AssertWithBackoff.create().logger(log).timeoutMs(30000)
.assertTrue(new Predicate<Void>() {
@Override public boolean apply(Void input) {
return GobblinTaskRunnerTest.this.gobblinTaskRunnerHealthCheck.isStopped();
}
}, "gobblinTaskRunner stopped");
//Call Future#get() to check and ensure that ContainerHealthCheckException is thrown
future.get();
}
public static class TaskAssignmentAfterConnectionRetry extends IntegrationBasicSuite {
TaskAssignmentAfterConnectionRetry(Config jobConfigOverrides) {
super(jobConfigOverrides);
}
@Override
protected void createHelixCluster() throws Exception {
super.createHelixCluster();
String clusterName = super.getManagerConfig().getString(GobblinClusterConfigurationKeys.HELIX_CLUSTER_NAME_KEY);
String zkConnectString = super.getManagerConfig().getString(GobblinClusterConfigurationKeys.ZK_CONNECTION_STRING_KEY);
HelixManager helixManager = HelixManagerFactory
.getZKHelixManager(clusterName, IntegrationBasicSuite.WORKER_INSTANCE_0, InstanceType.PARTICIPANT, zkConnectString);
//Create a partial instance setup
ClusterIntegrationTestUtils.createPartialInstanceStructure(helixManager, zkConnectString);
}
}
@AfterClass
public void tearDown()
throws IOException, InterruptedException {
try {
this.gobblinClusterManager.disconnectHelixManager();
this.gobblinTaskRunner.disconnectHelixManager();
this.corruptGobblinTaskRunner.disconnectHelixManager();
this.gobblinTaskRunnerFailedReporter.disconnectHelixManager();
this.gobblinTaskRunnerHealthCheck.disconnectHelixManager();
if (this.suite != null) {
this.suite.shutdownCluster();
}
} finally {
this.testingZKServer.close();
}
}
}
| 2,169 |
0 | Create_ds/gobblin/gobblin-cluster/src/test/java/org/apache/gobblin | Create_ds/gobblin/gobblin-cluster/src/test/java/org/apache/gobblin/cluster/GobblinHelixTaskTest.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.cluster;
import java.io.File;
import java.io.IOException;
import java.util.Map;
import java.util.Properties;
import java.util.concurrent.CountDownLatch;
import org.apache.avro.Schema;
import org.apache.gobblin.broker.SharedResourcesBrokerFactory;
import org.apache.gobblin.configuration.ConfigurationKeys;
import org.apache.gobblin.example.simplejson.SimpleJsonConverter;
import org.apache.gobblin.example.simplejson.SimpleJsonSource;
import org.apache.gobblin.metastore.FsStateStore;
import org.apache.gobblin.runtime.AbstractJobLauncher;
import org.apache.gobblin.runtime.JobState;
import org.apache.gobblin.runtime.TaskExecutor;
import org.apache.gobblin.source.workunit.WorkUnit;
import org.apache.gobblin.util.Id;
import org.apache.gobblin.util.SerializationUtils;
import org.apache.gobblin.util.event.ContainerHealthCheckFailureEvent;
import org.apache.gobblin.util.eventbus.EventBusFactory;
import org.apache.gobblin.util.retry.RetryerFactory;
import org.apache.gobblin.writer.AvroDataWriterBuilder;
import org.apache.gobblin.writer.Destination;
import org.apache.gobblin.writer.WriterOutputFormat;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.helix.HelixManager;
import org.apache.helix.task.JobConfig;
import org.apache.helix.task.JobContext;
import org.apache.helix.task.TaskCallbackContext;
import org.apache.helix.task.TaskConfig;
import org.apache.helix.task.TaskDriver;
import org.apache.helix.task.TaskResult;
import org.mockito.Mockito;
import org.testng.Assert;
import org.testng.annotations.AfterClass;
import org.testng.annotations.BeforeClass;
import org.testng.annotations.Test;
import com.google.common.base.Joiner;
import com.google.common.base.Optional;
import com.google.common.collect.ImmutableMap;
import com.google.common.collect.Maps;
import com.google.common.eventbus.EventBus;
import com.google.common.eventbus.Subscribe;
import com.typesafe.config.ConfigFactory;
import com.typesafe.config.ConfigValueFactory;
import static org.apache.gobblin.cluster.GobblinHelixTaskStateTracker.IS_TASK_METRICS_SCHEDULING_FAILURE_FATAL;
import static org.apache.gobblin.util.retry.RetryerFactory.RETRY_TIMES;
import static org.apache.gobblin.util.retry.RetryerFactory.RETRY_TYPE;
import static org.mockito.Mockito.when;
/**
* Unit tests for {@link GobblinHelixTask}.
*
* <p>
* This class uses a mocked {@link HelixManager} to control the behavior of certain method for testing.
* A {@link TaskExecutor} is used to run the test task and a {@link GobblinHelixTaskStateTracker} is
* also used as being required by {@link GobblinHelixTaskFactory}. The test task writes everything to
* the local file system as returned by {@link FileSystem#getLocal(Configuration)}.
* </p>
*
* @author Yinan Li
*/
@Test(groups = { "gobblin.cluster" })
public class GobblinHelixTaskTest {
private TaskExecutor taskExecutor;
private GobblinHelixTaskStateTracker taskStateTracker;
private GobblinHelixTask gobblinHelixTask;
private GobblinHelixTask gobblinHelixTaskForCancel;
private HelixManager helixManager;
private FileSystem localFs;
private Path appWorkDir;
private Path taskOutputDir;
private CountDownLatch countDownLatchForFailInTaskCreation;
@BeforeClass
public void setUp() throws IOException {
Configuration configuration = new Configuration();
configuration.setInt(ConfigurationKeys.TASK_EXECUTOR_THREADPOOL_SIZE_KEY, 1);
this.taskExecutor = new TaskExecutor(configuration);
this.helixManager = Mockito.mock(HelixManager.class);
when(this.helixManager.getInstanceName()).thenReturn(GobblinHelixTaskTest.class.getSimpleName());
Properties stateTrackerProp = new Properties();
stateTrackerProp.setProperty(IS_TASK_METRICS_SCHEDULING_FAILURE_FATAL, "true");
this.taskStateTracker = new GobblinHelixTaskStateTracker(stateTrackerProp);
this.localFs = FileSystem.getLocal(configuration);
this.appWorkDir = new Path(GobblinHelixTaskTest.class.getSimpleName());
this.taskOutputDir = new Path(this.appWorkDir, "output");
}
@Test
public void testPrepareTask()
throws IOException, InterruptedException {
EventBus eventBus = EventBusFactory.get(ContainerHealthCheckFailureEvent.CONTAINER_HEALTH_CHECK_EVENT_BUS_NAME,
SharedResourcesBrokerFactory.getImplicitBroker());
eventBus.register(this);
countDownLatchForFailInTaskCreation = new CountDownLatch(1);
// Serialize the JobState that will be read later in GobblinHelixTask
Path jobStateFilePath =
new Path(appWorkDir, TestHelper.TEST_JOB_ID + "." + AbstractJobLauncher.JOB_STATE_FILE_NAME);
JobState jobState = new JobState();
jobState.setJobName(TestHelper.TEST_JOB_NAME);
jobState.setJobId(TestHelper.TEST_JOB_ID);
SerializationUtils.serializeState(this.localFs, jobStateFilePath, jobState);
// Prepare the WorkUnit
WorkUnit workUnit = WorkUnit.createEmpty();
prepareWorkUnit(workUnit);
// Prepare the source Json file
File sourceJsonFile = new File(this.appWorkDir.toString(), TestHelper.TEST_JOB_NAME + ".json");
TestHelper.createSourceJsonFile(sourceJsonFile);
workUnit.setProp(SimpleJsonSource.SOURCE_FILE_KEY, sourceJsonFile.getAbsolutePath());
// Serialize the WorkUnit into a file
// expected path is appWorkDir/_workunits/job_id/job_id.wu
Path workUnitDirPath = new Path(this.appWorkDir, GobblinClusterConfigurationKeys.INPUT_WORK_UNIT_DIR_NAME);
FsStateStore<WorkUnit> wuStateStore = new FsStateStore<>(this.localFs, workUnitDirPath.toString(), WorkUnit.class);
Path workUnitFilePath = new Path(new Path(workUnitDirPath, TestHelper.TEST_JOB_ID),
TestHelper.TEST_JOB_NAME + ".wu");
wuStateStore.put(TestHelper.TEST_JOB_ID, TestHelper.TEST_JOB_NAME + ".wu", workUnit);
Assert.assertTrue(this.localFs.exists(workUnitFilePath));
// Prepare the GobblinHelixTask
Map<String, String> taskConfigMap = Maps.newHashMap();
taskConfigMap.put(GobblinClusterConfigurationKeys.WORK_UNIT_FILE_PATH, workUnitFilePath.toString());
taskConfigMap.put(ConfigurationKeys.JOB_NAME_KEY, TestHelper.TEST_JOB_NAME);
taskConfigMap.put(ConfigurationKeys.JOB_ID_KEY, TestHelper.TEST_JOB_ID);
taskConfigMap.put(ConfigurationKeys.TASK_KEY_KEY, Long.toString(Id.parse(TestHelper.TEST_JOB_ID).getSequence()));
TaskConfig taskConfig = new TaskConfig("", taskConfigMap, true);
TaskCallbackContext taskCallbackContext = Mockito.mock(TaskCallbackContext.class);
when(taskCallbackContext.getTaskConfig()).thenReturn(taskConfig);
when(taskCallbackContext.getManager()).thenReturn(this.helixManager);
TaskDriver taskDriver = createTaskDriverWithMockedAttributes(taskCallbackContext, taskConfig);
TaskRunnerSuiteBase.Builder builder = new TaskRunnerSuiteBase.Builder(ConfigFactory.empty()
.withValue(RETRY_TYPE, ConfigValueFactory.fromAnyRef(RetryerFactory.RetryType.FIXED_ATTEMPT.name()))
.withValue(RETRY_TIMES, ConfigValueFactory.fromAnyRef(2))
);
TaskRunnerSuiteBase sb = builder.setInstanceName("TestInstance")
.setApplicationName("TestApplication")
.setAppWorkPath(appWorkDir)
.setContainerMetrics(Optional.absent())
.setFileSystem(localFs)
.setJobHelixManager(this.helixManager)
.setApplicationId("TestApplication-1")
.build();
GobblinHelixTaskFactory gobblinHelixTaskFactory =
new GobblinHelixTaskFactory(builder,
sb.metricContext,
this.taskStateTracker,
ConfigFactory.empty(),
Optional.of(taskDriver));
// Expect to go through.
this.gobblinHelixTask = (GobblinHelixTask) gobblinHelixTaskFactory.createNewTask(taskCallbackContext);
this.gobblinHelixTaskForCancel = (GobblinHelixTask) gobblinHelixTaskFactory.createNewTask(taskCallbackContext);
// Mock the method getFs() which get called in SingleTask constructor, so that SingleTask could fail and trigger retry,
// which would also fail eventually with timeout.
TaskRunnerSuiteBase.Builder builderSpy = Mockito.spy(builder);
when(builderSpy.getFs()).thenThrow(new RuntimeException("failure on purpose"));
gobblinHelixTaskFactory =
new GobblinHelixTaskFactory(builderSpy,
sb.metricContext,
this.taskStateTracker,
ConfigFactory.empty(),
Optional.of(taskDriver));
// Expecting the eventBus containing the failure signal when run is called
try {
gobblinHelixTaskFactory.createNewTask(taskCallbackContext).run();
} catch (Throwable t){
return;
}
Assert.fail();
}
@Subscribe
@Test(enabled = false)
// When a class has "@Test" annotation, TestNG will run all public methods as tests.
// This specific method is public because eventBus is calling it. To prevent running it as a test, we mark it
// as "disabled" test.
public void handleContainerHealthCheckFailureEvent(ContainerHealthCheckFailureEvent event) {
this.countDownLatchForFailInTaskCreation.countDown();
}
/**
* To test against org.apache.gobblin.cluster.GobblinHelixTask#getPartitionForHelixTask(org.apache.helix.task.TaskDriver)
* we need to assign the right partition id for each helix task, which would be queried from taskDriver.
* This method encapsulate all mocking steps for taskDriver object to return expected value.
*/
private TaskDriver createTaskDriverWithMockedAttributes(TaskCallbackContext taskCallbackContext,
TaskConfig taskConfig) {
String helixJobId = Joiner.on("_").join(TestHelper.TEST_JOB_ID, TestHelper.TEST_JOB_ID);
JobConfig jobConfig = Mockito.mock(JobConfig.class);
when(jobConfig.getJobId()).thenReturn(helixJobId);
when(taskCallbackContext.getJobConfig()).thenReturn(jobConfig);
JobContext mockJobContext = Mockito.mock(JobContext.class);
Map<String, Integer> taskIdPartitionMap = ImmutableMap.of(taskConfig.getId(), 0);
when(mockJobContext.getTaskIdPartitionMap()).thenReturn(taskIdPartitionMap);
TaskDriver taskDriver = Mockito.mock(TaskDriver.class);
when(taskDriver.getJobContext(Mockito.anyString())).thenReturn(mockJobContext);
return taskDriver;
}
@Test(dependsOnMethods = "testPrepareTask")
public void testRun() throws IOException {
TaskResult taskResult = this.gobblinHelixTask.run();
System.out.println(taskResult.getInfo());
Assert.assertEquals(taskResult.getStatus(), TaskResult.Status.COMPLETED);
File outputAvroFile = new File(this.taskOutputDir.toString(),
TestHelper.REL_WRITER_FILE_PATH + File.separator + TestHelper.WRITER_FILE_NAME);
Assert.assertTrue(outputAvroFile.exists());
Schema schema = new Schema.Parser().parse(TestHelper.SOURCE_SCHEMA);
TestHelper.assertGenericRecords(outputAvroFile, schema);
}
@Test(dependsOnMethods = "testRun")
public void testCancel() throws IOException, InterruptedException {
final TaskResult[] taskResult = new TaskResult[1];
Thread thread = new Thread(){
@Override
public void run() {
taskResult[0] = gobblinHelixTaskForCancel.run();
}
};
thread.start();
Thread.sleep(3);
gobblinHelixTaskForCancel.cancel();
thread.join();
System.out.println(taskResult[0].getInfo());
//We can see task failure or task cancelled as task status
Assert.assertNotEquals(taskResult[0].getStatus(), TaskResult.Status.COMPLETED);
}
@AfterClass
public void tearDown() throws IOException {
try {
if (this.localFs.exists(this.appWorkDir)) {
this.localFs.delete(this.appWorkDir, true);
}
} finally {
this.taskExecutor.stopAsync().awaitTerminated();
this.taskStateTracker.stopAsync().awaitTerminated();
}
}
private void prepareWorkUnit(WorkUnit workUnit) {
workUnit.setProp(ConfigurationKeys.TASK_ID_KEY, TestHelper.TEST_TASK_ID);
workUnit.setProp(ConfigurationKeys.TASK_KEY_KEY, Long.toString(Id.parse(TestHelper.TEST_TASK_ID).getSequence()));
workUnit.setProp(ConfigurationKeys.SOURCE_CLASS_KEY, SimpleJsonSource.class.getName());
workUnit.setProp(ConfigurationKeys.CONVERTER_CLASSES_KEY, SimpleJsonConverter.class.getName());
workUnit.setProp(ConfigurationKeys.WRITER_OUTPUT_FORMAT_KEY, WriterOutputFormat.AVRO.toString());
workUnit.setProp(ConfigurationKeys.WRITER_DESTINATION_TYPE_KEY, Destination.DestinationType.HDFS.toString());
workUnit.setProp(ConfigurationKeys.WRITER_STAGING_DIR, this.appWorkDir.toString() + Path.SEPARATOR + "staging");
workUnit.setProp(ConfigurationKeys.WRITER_OUTPUT_DIR, this.taskOutputDir.toString());
workUnit.setProp(ConfigurationKeys.WRITER_FILE_NAME, TestHelper.WRITER_FILE_NAME);
workUnit.setProp(ConfigurationKeys.WRITER_FILE_PATH, TestHelper.REL_WRITER_FILE_PATH);
workUnit.setProp(ConfigurationKeys.WRITER_BUILDER_CLASS, AvroDataWriterBuilder.class.getName());
workUnit.setProp(ConfigurationKeys.SOURCE_SCHEMA, TestHelper.SOURCE_SCHEMA);
}
}
| 2,170 |
0 | Create_ds/gobblin/gobblin-cluster/src/test/java/org/apache/gobblin | Create_ds/gobblin/gobblin-cluster/src/test/java/org/apache/gobblin/cluster/JobConfigurationManagerTest.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.cluster;
import java.io.File;
import java.io.IOException;
import java.util.List;
import java.util.Properties;
import java.util.Set;
import java.util.concurrent.CountDownLatch;
import java.util.regex.Pattern;
import org.apache.commons.io.FileUtils;
import org.testng.Assert;
import org.testng.annotations.AfterClass;
import org.testng.annotations.BeforeClass;
import org.testng.annotations.Test;
import com.google.common.collect.Lists;
import com.google.common.collect.Sets;
import com.google.common.eventbus.EventBus;
import com.google.common.eventbus.Subscribe;
import com.google.common.io.Closer;
import com.google.common.io.Files;
import com.typesafe.config.Config;
import com.typesafe.config.ConfigFactory;
import com.typesafe.config.ConfigValueFactory;
import org.apache.gobblin.cluster.event.NewJobConfigArrivalEvent;
import org.apache.gobblin.configuration.ConfigurationKeys;
/**
* Unit tests for {@link JobConfigurationManager}.
*
* @author Yinan Li
*/
@Test(groups = { "gobblin.cluster" })
public class JobConfigurationManagerTest {
private static final int NUM_JOB_CONFIG_FILES = 3;
private static final String JOB_CONFIG_DIR_NAME = JobConfigurationManagerTest.class.getSimpleName();
private final File jobConfigFileDir = new File(JOB_CONFIG_DIR_NAME + GobblinClusterConfigurationKeys.TAR_GZ_FILE_SUFFIX);
private final EventBus eventBus = new EventBus();
private JobConfigurationManager jobConfigurationManager;
private final List<Properties> receivedJobConfigs = Lists.newArrayList();
private final CountDownLatch countDownLatch = new CountDownLatch(NUM_JOB_CONFIG_FILES);
@BeforeClass
public void setUp() throws IOException {
this.eventBus.register(this);
if (this.jobConfigFileDir.exists()) {
FileUtils.deleteDirectory(this.jobConfigFileDir);
}
// Prepare the test job configuration files
Assert.assertTrue(this.jobConfigFileDir.mkdirs(), "Failed to create " + this.jobConfigFileDir);
Closer closer = Closer.create();
try {
for (int i = 0; i < NUM_JOB_CONFIG_FILES; i++) {
File jobConfigFile = new File(this.jobConfigFileDir, "test" + i + ".job");
Assert.assertTrue(jobConfigFile.createNewFile());
Properties properties = new Properties();
properties.setProperty("foo", "bar" + i);
properties.store(closer.register(Files.newWriter(jobConfigFile, ConfigurationKeys.DEFAULT_CHARSET_ENCODING)),
"");
}
} catch (Throwable t) {
throw closer.rethrow(t);
} finally {
closer.close();
}
Config config = ConfigFactory.empty().withValue(GobblinClusterConfigurationKeys.JOB_CONF_PATH_KEY,
ConfigValueFactory.fromAnyRef(JOB_CONFIG_DIR_NAME));
this.jobConfigurationManager = new JobConfigurationManager(this.eventBus, config);
this.jobConfigurationManager.startAsync().awaitRunning();
}
@Test
public void verifyJobConfigs() throws InterruptedException {
// Wait for all job configs to be received
this.countDownLatch.await();
Set<String> actual = Sets.newHashSet();
Set<String> expected = Sets.newHashSet();
Assert.assertEquals(this.receivedJobConfigs.size(), 3);
for (int i = 0; i < NUM_JOB_CONFIG_FILES; i++) {
actual.add(this.receivedJobConfigs.get(i).getProperty("foo"));
expected.add("bar" + i);
}
Assert.assertEquals(actual, expected);
}
@Test
public void testShouldRun() {
Pattern pattern = Pattern.compile("testJob1|testJob2");
Properties jobConfig = new Properties();
jobConfig.setProperty(ConfigurationKeys.JOB_NAME_KEY, "testJob1");
Assert.assertTrue(JobConfigurationManager.shouldRun(pattern, jobConfig));
jobConfig.setProperty(ConfigurationKeys.JOB_NAME_KEY, "testJob2");
Assert.assertTrue(JobConfigurationManager.shouldRun(pattern, jobConfig));
jobConfig.setProperty(ConfigurationKeys.JOB_NAME_KEY, "job1");
Assert.assertFalse(JobConfigurationManager.shouldRun(pattern, jobConfig));
}
@AfterClass
public void tearDown() throws IOException {
this.jobConfigurationManager.stopAsync().awaitTerminated();
if (this.jobConfigFileDir.exists()) {
FileUtils.deleteDirectory(this.jobConfigFileDir);
}
}
@Test(enabled = false)
@Subscribe
public void handleNewJobConfigArrival(NewJobConfigArrivalEvent newJobConfigArrivalEvent) {
this.receivedJobConfigs.add(newJobConfigArrivalEvent.getJobConfig());
this.countDownLatch.countDown();
}
}
| 2,171 |
0 | Create_ds/gobblin/gobblin-cluster/src/test/java/org/apache/gobblin | Create_ds/gobblin/gobblin-cluster/src/test/java/org/apache/gobblin/cluster/TaskRunnerSuiteForJobFactoryTest.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.cluster;
import java.util.Map;
import org.apache.helix.task.Task;
import org.apache.helix.task.TaskCallbackContext;
import org.apache.helix.task.TaskFactory;
import org.testng.Assert;
import com.google.common.collect.Maps;
import lombok.extern.slf4j.Slf4j;
import org.apache.gobblin.annotation.Alias;
import org.apache.gobblin.cluster.suite.IntegrationJobFactorySuite;
import org.apache.gobblin.configuration.ConfigurationKeys;
import org.apache.gobblin.metrics.MetricContext;
public class TaskRunnerSuiteForJobFactoryTest extends TaskRunnerSuiteThreadModel {
private TaskFactory testJobFactory;
public TaskRunnerSuiteForJobFactoryTest(IntegrationJobFactorySuite.TestJobFactorySuiteBuilder builder) {
super(builder);
this.testJobFactory = new TestJobFactory(builder, this.metricContext);
}
@Override
protected Map<String, TaskFactory> getTaskFactoryMap() {
Map<String, TaskFactory> taskFactoryMap = Maps.newHashMap();
taskFactoryMap.put(GobblinTaskRunner.GOBBLIN_TASK_FACTORY_NAME, taskFactory);
taskFactoryMap.put(GobblinTaskRunner.GOBBLIN_JOB_FACTORY_NAME, testJobFactory);
return taskFactoryMap;
}
public class TestJobFactory extends GobblinHelixJobFactory {
public TestJobFactory(IntegrationJobFactorySuite.TestJobFactorySuiteBuilder builder, MetricContext metricContext) {
super (builder, metricContext);
this.builder = builder;
}
@Override
public Task createNewTask(TaskCallbackContext context) {
return new TestHelixJobTask(context,
jobsMapping,
builder,
new GobblinHelixJobLauncherMetrics("launcherInJobFactory", metricContext, 5),
new GobblinHelixJobTask.GobblinHelixJobTaskMetrics(metricContext, 5),
new GobblinHelixMetrics("helixMetricsInJobFactory", metricContext, 5));
}
}
public class TestHelixJobTask extends GobblinHelixJobTask {
public TestHelixJobTask(TaskCallbackContext context,
HelixJobsMapping jobsMapping,
TaskRunnerSuiteBase.Builder builder,
GobblinHelixJobLauncherMetrics launcherMetrics,
GobblinHelixJobTaskMetrics jobTaskMetrics,
GobblinHelixMetrics helixMetrics) {
super(context,
jobsMapping,
builder,
launcherMetrics,
jobTaskMetrics,
helixMetrics);
}
}
@Slf4j
public static class TestDistributedExecutionLauncher extends GobblinHelixDistributeJobExecutionLauncher {
public TestDistributedExecutionLauncher(GobblinHelixDistributeJobExecutionLauncher.Builder builder) throws Exception {
super(builder);
}
protected DistributeJobResult getResultFromUserContent() {
DistributeJobResult rst = super.getResultFromUserContent();
String jobName = this.jobPlanningProps.getProperty(ConfigurationKeys.JOB_NAME_KEY);
try {
Assert.assertFalse(this.jobsMapping.getPlanningJobId(jobName).isPresent());
} catch (Exception e) {
Assert.fail(e.toString());
}
IntegrationJobFactorySuite.completed.set(true);
return rst;
}
@Alias("TestDistributedExecutionLauncherBuilder")
public static class Builder extends GobblinHelixDistributeJobExecutionLauncher.Builder {
public TestDistributedExecutionLauncher build() throws Exception {
return new TestDistributedExecutionLauncher(this);
}
}
}
}
| 2,172 |
0 | Create_ds/gobblin/gobblin-cluster/src/test/java/org/apache/gobblin | Create_ds/gobblin/gobblin-cluster/src/test/java/org/apache/gobblin/cluster/SleepingTaskFactory.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.cluster;
import org.apache.gobblin.publisher.DataPublisher;
import org.apache.gobblin.publisher.NoopPublisher;
import org.apache.gobblin.runtime.JobState;
import org.apache.gobblin.runtime.TaskContext;
import org.apache.gobblin.runtime.task.TaskFactory;
import org.apache.gobblin.runtime.task.TaskIFace;
public class SleepingTaskFactory implements TaskFactory {
@Override
public TaskIFace createTask(TaskContext taskContext) {
return new SleepingTask(taskContext);
}
@Override
public DataPublisher createDataPublisher(JobState.DatasetState datasetState) {
return new NoopPublisher(datasetState);
}
}
| 2,173 |
0 | Create_ds/gobblin/gobblin-cluster/src/test/java/org/apache/gobblin | Create_ds/gobblin/gobblin-cluster/src/test/java/org/apache/gobblin/cluster/TaskRunnerSuiteForJobTagTest.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.cluster;
import java.util.List;
import java.util.Map;
import org.apache.helix.task.Task;
import org.apache.helix.task.TaskCallbackContext;
import org.apache.helix.task.TaskFactory;
import org.testng.Assert;
import com.google.common.collect.Maps;
import lombok.extern.slf4j.Slf4j;
import org.apache.gobblin.cluster.suite.IntegrationJobTagSuite;
import org.apache.gobblin.configuration.ConfigurationKeys;
/**
* A special {@link TaskRunnerSuiteBase} which can verify if the worker gets the correct jobs based on the tag association.
*/
@Slf4j
public class TaskRunnerSuiteForJobTagTest extends TaskRunnerSuiteThreadModel {
private TaskFactory jobTagTestFactory;
private String instanceName;
public TaskRunnerSuiteForJobTagTest(IntegrationJobTagSuite.JobTagTaskRunnerSuiteBuilder builder) {
super(builder);
this.instanceName = builder.getInstanceName();
this.jobTagTestFactory = new JobTagTestFactory(this.taskFactory);
}
@Override
protected Map<String, TaskFactory> getTaskFactoryMap() {
Map<String, TaskFactory> taskFactoryMap = Maps.newHashMap();
taskFactoryMap.put(GobblinTaskRunner.GOBBLIN_TASK_FACTORY_NAME, jobTagTestFactory);
return taskFactoryMap;
}
public class JobTagTestFactory implements TaskFactory {
private TaskFactory factory;
public JobTagTestFactory(TaskFactory factory) {
this.factory = factory;
}
@Override
public Task createNewTask(TaskCallbackContext context) {
Map<String, String> configMap = context.getTaskConfig().getConfigMap();
String jobName = configMap.get(ConfigurationKeys.JOB_NAME_KEY);
List<String> allowedJobNames = IntegrationJobTagSuite.EXPECTED_JOB_NAMES.get(TaskRunnerSuiteForJobTagTest.this.instanceName);
if (allowedJobNames.contains(jobName)) {
log.info("{} has job name {}", instanceName, jobName);
} else {
Assert.fail(instanceName + " should not receive " + jobName);
}
return this.factory.createNewTask(context);
}
}
}
| 2,174 |
0 | Create_ds/gobblin/gobblin-cluster/src/test/java/org/apache/gobblin | Create_ds/gobblin/gobblin-cluster/src/test/java/org/apache/gobblin/cluster/HelixUtilsTest.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.cluster;
import java.io.IOException;
import java.net.URL;
import java.util.Arrays;
import java.util.HashMap;
import java.util.HashSet;
import java.util.Map;
import java.util.Properties;
import org.apache.gobblin.configuration.ConfigurationKeys;
import org.apache.gobblin.util.ConfigUtils;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.security.token.Token;
import org.apache.helix.task.JobConfig;
import org.apache.helix.task.JobDag;
import org.apache.helix.task.TargetState;
import org.apache.helix.task.TaskConfig;
import org.apache.helix.task.TaskDriver;
import org.apache.helix.task.WorkflowConfig;
import org.mockito.Mockito;
import org.testng.Assert;
import org.testng.annotations.AfterClass;
import org.testng.annotations.BeforeClass;
import org.testng.annotations.Test;
import com.google.common.collect.ImmutableMap;
import com.typesafe.config.Config;
import com.typesafe.config.ConfigFactory;
import static org.testng.Assert.*;
/**
* Unit tests for {@link HelixUtils}.
*
* @author Yinan Li
*/
@Test(groups = { "gobblin.cluster" })
public class HelixUtilsTest {
private Configuration configuration;
private FileSystem fileSystem;
private Path tokenFilePath;
private Token<?> token;
@BeforeClass
public void setUp() throws IOException {
this.configuration = new Configuration();
this.fileSystem = FileSystem.getLocal(this.configuration);
this.tokenFilePath = new Path(HelixUtilsTest.class.getSimpleName(), "token");
this.token = new Token<>();
this.token.setKind(new Text("test"));
this.token.setService(new Text("test"));
}
@Test
public void testConfigToProperties() {
URL url = HelixUtilsTest.class.getClassLoader().getResource(HelixUtilsTest.class.getSimpleName() + ".conf");
Assert.assertNotNull(url, "Could not find resource " + url);
Config config = ConfigFactory.parseURL(url).resolve();
assertEquals(config.getString("k1"), "v1");
assertEquals(config.getString("k2"), "v1");
assertEquals(config.getInt("k3"), 1000);
Assert.assertTrue(config.getBoolean("k4"));
assertEquals(config.getLong("k5"), 10000);
Properties properties = ConfigUtils.configToProperties(config);
assertEquals(properties.getProperty("k1"), "v1");
assertEquals(properties.getProperty("k2"), "v1");
assertEquals(properties.getProperty("k3"), "1000");
assertEquals(properties.getProperty("k4"), "true");
assertEquals(properties.getProperty("k5"), "10000");
}
@Test
public void testGetWorkunitIdForJobNames() throws GobblinHelixUnexpectedStateException {
final String HELIX_JOB = "job";
final String GOBBLIN_JOB_NAME = "gobblin-job-name";
TaskDriver driver = Mockito.mock(TaskDriver.class);
WorkflowConfig workflowCfg = Mockito.mock(WorkflowConfig.class);
JobDag dag = Mockito.mock(JobDag.class);
JobConfig jobCfg = Mockito.mock(JobConfig.class);
TaskConfig taskCfg = Mockito.mock(TaskConfig.class);
/**
* Mocks for setting up the workflow, job dag, job names, etc.
*
* Example of task cfg
* "mapFields" : {
* "006d6d2b-4b8b-4c1b-877b-b7fb51d9295c" : {
* "TASK_SUCCESS_OPTIONAL" : "true",
* "job.id" : "job_KafkaHdfsStreamingTracking_1668738617409",
* "job.name" : "KafkaHdfsStreamingTracking",
* "task.id" : "task_KafkaHdfsStreamingTracking_1668738617409_179",
* "gobblin.cluster.work.unit.file.path" : "<SOME PATH>",
* "TASK_ID" : "006d6d2b-4b8b-4c1b-877b-b7fb51d9295c"
* },
*/
Mockito.when(driver.getWorkflows()).thenReturn(ImmutableMap.of(
"workflow-1", workflowCfg
));
Mockito.when(workflowCfg.getTargetState()).thenReturn(TargetState.START);
Mockito.when(workflowCfg.getJobDag()).thenReturn(dag);
Mockito.when(dag.getAllNodes()).thenReturn(new HashSet<>(Arrays.asList(HELIX_JOB)));
Mockito.when(driver.getJobConfig(HELIX_JOB)).thenReturn(jobCfg);
Mockito.when(jobCfg.getTaskConfigMap()).thenReturn(ImmutableMap.of("stub-guid", taskCfg));
Mockito.when(taskCfg.getConfigMap()).thenReturn(ImmutableMap.of(ConfigurationKeys.JOB_NAME_KEY, GOBBLIN_JOB_NAME));
assertEquals(
HelixUtils.getWorkflowIdsFromJobNames(driver, Arrays.asList(GOBBLIN_JOB_NAME)),
ImmutableMap.of(GOBBLIN_JOB_NAME, "workflow-1"));
}
@Test(expectedExceptions = GobblinHelixUnexpectedStateException.class)
public void testGetWorkunitIdForJobNamesWithInvalidHelixState() throws GobblinHelixUnexpectedStateException {
final String GOBBLIN_JOB_NAME = "gobblin-job-name";
TaskDriver driver = Mockito.mock(TaskDriver.class);
Map<String, WorkflowConfig> workflowConfigMap = new HashMap<>();
workflowConfigMap.put("null-workflow-to-throw-exception", null);
Mockito.when(driver.getWorkflows()).thenReturn(workflowConfigMap);
try {
HelixUtils.getWorkflowIdsFromJobNames(driver, Arrays.asList(GOBBLIN_JOB_NAME));
} catch (GobblinHelixUnexpectedStateException e) {
e.printStackTrace();
throw e;
}
}
@AfterClass
public void tearDown() throws IOException {
if (this.fileSystem.exists(this.tokenFilePath.getParent())) {
this.fileSystem.delete(this.tokenFilePath.getParent(), true);
}
}
}
| 2,175 |
0 | Create_ds/gobblin/gobblin-cluster/src/test/java/org/apache/gobblin | Create_ds/gobblin/gobblin-cluster/src/test/java/org/apache/gobblin/cluster/SleepingCustomTaskSource.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.cluster;
import java.util.List;
import org.apache.gobblin.configuration.SourceState;
import org.apache.gobblin.runtime.task.TaskUtils;
import org.apache.gobblin.source.workunit.WorkUnit;
import org.apache.gobblin.util.test.HelloWorldSource;
public class SleepingCustomTaskSource extends HelloWorldSource {
@Override
public List<WorkUnit> getWorkunits(SourceState state) {
List<WorkUnit> workUnits = super.getWorkunits(state);
for (WorkUnit workUnit : workUnits) {
TaskUtils.setTaskFactoryClass(workUnit, SleepingTaskFactory.class);
}
return workUnits;
}
}
| 2,176 |
0 | Create_ds/gobblin/gobblin-cluster/src/test/java/org/apache/gobblin | Create_ds/gobblin/gobblin-cluster/src/test/java/org/apache/gobblin/cluster/SingleTaskRunnerMainTest.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.cluster;
import org.testng.annotations.Test;
import static org.apache.gobblin.cluster.SingleTaskRunnerMainArgumentsDataProvider.TEST_CLUSTER_CONF;
import static org.apache.gobblin.cluster.SingleTaskRunnerMainArgumentsDataProvider.TEST_JOB_ID;
import static org.apache.gobblin.cluster.SingleTaskRunnerMainArgumentsDataProvider.TEST_WORKUNIT;
import static org.mockito.Mockito.doReturn;
import static org.mockito.Mockito.mock;
import static org.mockito.Mockito.spy;
import static org.mockito.Mockito.verify;
public class SingleTaskRunnerMainTest {
@Test
public void testRun()
throws Exception {
final SingleTaskRunnerBuilder builder = spy(SingleTaskRunnerBuilder.class);
final SingleTaskRunner taskRunner = mock(SingleTaskRunner.class);
doReturn(taskRunner).when(builder).createSingleTaskRunner();
final SingleTaskRunnerMain runnerMain = new SingleTaskRunnerMain(builder);
runnerMain.run(SingleTaskRunnerMainArgumentsDataProvider.getArgs());
verify(builder).setClusterConfigFilePath(TEST_CLUSTER_CONF);
verify(builder).setJobId(TEST_JOB_ID);
verify(builder).setWorkUnitFilePath(TEST_WORKUNIT);
verify(taskRunner).run();
}
}
| 2,177 |
0 | Create_ds/gobblin/gobblin-cluster/src/test/java/org/apache/gobblin | Create_ds/gobblin/gobblin-cluster/src/test/java/org/apache/gobblin/cluster/HelixRetriggeringJobCallableTest.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.cluster;
import java.io.File;
import java.util.Optional;
import java.util.Properties;
import org.apache.hadoop.fs.Path;
import org.apache.helix.HelixManager;
import org.assertj.core.util.Lists;
import org.junit.Assert;
import org.mockito.Mockito;
import org.testng.annotations.BeforeClass;
import org.testng.annotations.Test;
import com.typesafe.config.Config;
import com.typesafe.config.ConfigFactory;
import com.typesafe.config.ConfigValueFactory;
import org.apache.gobblin.configuration.ConfigurationKeys;
import org.apache.gobblin.runtime.api.MutableJobCatalog;
import org.apache.gobblin.runtime.job_catalog.NonObservingFSJobCatalog;
import org.apache.gobblin.scheduler.SchedulerService;
public class HelixRetriggeringJobCallableTest {
public static final String TMP_DIR = "/tmp/" + HelixRetriggeringJobCallable.class.getSimpleName();
@BeforeClass
public void setUp() {
File tmpDir = new File(TMP_DIR);
if (!tmpDir.exists()) {
tmpDir.mkdirs();
}
tmpDir.deleteOnExit();
}
@Test
public void testBuildJobLauncher()
throws Exception {
Config config = ConfigFactory.empty().withValue(ConfigurationKeys.JOB_CONFIG_FILE_GENERAL_PATH_KEY,
ConfigValueFactory.fromAnyRef(TMP_DIR));
MutableJobCatalog jobCatalog = new NonObservingFSJobCatalog(config);
SchedulerService schedulerService = new SchedulerService(new Properties());
Path appWorkDir = new Path(TMP_DIR);
GobblinHelixJobScheduler jobScheduler = new GobblinHelixJobScheduler(ConfigFactory.empty(), getMockHelixManager(), Optional.empty(), null,
appWorkDir, Lists.emptyList(), schedulerService, jobCatalog);
GobblinHelixJobLauncher jobLauncher = HelixRetriggeringJobCallable.buildJobLauncherForCentralizedMode(jobScheduler, getDummyJob());
String jobId = jobLauncher.getJobId();
Assert.assertNotNull(jobId);
Assert.assertFalse(jobId.contains(GobblinClusterConfigurationKeys.ACTUAL_JOB_NAME_PREFIX));
}
private Properties getDummyJob() {
Properties jobProps = new Properties();
jobProps.setProperty(ConfigurationKeys.JOB_NAME_KEY, "dummyJob");
jobProps.setProperty(ConfigurationKeys.JOB_LOCK_ENABLED_KEY, "false");
jobProps.setProperty(ConfigurationKeys.STATE_STORE_ENABLED, "false");
jobProps.setProperty(ConfigurationKeys.SOURCE_CLASS_KEY, DummySource.class.getName());
return jobProps;
}
private HelixManager getMockHelixManager() {
HelixManager helixManager = Mockito.mock(HelixManager.class);
Mockito.when(helixManager.getClusterManagmentTool()).thenReturn(null);
Mockito.when(helixManager.getClusterName()).thenReturn(null);
Mockito.when(helixManager.getHelixDataAccessor()).thenReturn(null);
Mockito.when(helixManager.getHelixPropertyStore()).thenReturn(null);
return helixManager;
}
} | 2,178 |
0 | Create_ds/gobblin/gobblin-cluster/src/test/java/org/apache/gobblin | Create_ds/gobblin/gobblin-cluster/src/test/java/org/apache/gobblin/cluster/ClusterIntegrationTestUtils.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.cluster;
import org.apache.helix.HelixManager;
import org.apache.helix.PropertyPathBuilder;
import org.apache.helix.manager.zk.ZkClient;
import org.testng.Assert;
import com.typesafe.config.Config;
import com.typesafe.config.ConfigFactory;
import com.typesafe.config.ConfigValueFactory;
import org.apache.gobblin.configuration.ConfigurationKeys;
public class ClusterIntegrationTestUtils {
/**
* A utility method to build a job that uses the {@link SleepingCustomTaskSource} with the provided config overrides.
* @param jobId
* @param taskStateFile
* @return job config with overrides
*/
public static Config buildSleepingJob(String jobId, String taskStateFile) {
return buildSleepingJob(jobId, taskStateFile, 10L);
}
/**
* A utility method to build a job that uses the {@link SleepingCustomTaskSource} with the provided config overrides.
* @param jobId
* @param taskStateFile
* @param helixJobTimeoutSecs
* @return job config with overrides
*/
public static Config buildSleepingJob(String jobId, String taskStateFile, Long helixJobTimeoutSecs) {
Config jobConfig = ConfigFactory.empty().withValue(SleepingTask.TASK_STATE_FILE_KEY, ConfigValueFactory.fromAnyRef(taskStateFile))
.withValue(ConfigurationKeys.JOB_ID_KEY, ConfigValueFactory.fromAnyRef(jobId))
.withValue(ConfigurationKeys.SOURCE_CLASS_KEY, ConfigValueFactory.fromAnyRef(SleepingCustomTaskSource.class.getName()))
.withValue(GobblinClusterConfigurationKeys.HELIX_JOB_TIMEOUT_ENABLED_KEY, ConfigValueFactory.fromAnyRef(Boolean.TRUE))
.withValue(GobblinClusterConfigurationKeys.HELIX_JOB_TIMEOUT_SECONDS, ConfigValueFactory.fromAnyRef(helixJobTimeoutSecs));
return jobConfig;
}
/**
* A utility method that creates a partial instance structure in ZK.
*/
public static void createPartialInstanceStructure(HelixManager helixManager, String zkConnectString) {
//Connect and disconnect the helixManager to create a Helix Instance set up.
try {
helixManager.connect();
helixManager.disconnect();
} catch (Exception e) {
Assert.fail("Failed to connect to ZK");
}
//Delete ERRORS/HISTORY/STATUSUPDATES znodes under INSTANCES to simulate partial instance set up.
ZkClient zkClient = new ZkClient(zkConnectString);
zkClient.delete(PropertyPathBuilder.instanceError(helixManager.getClusterName(), helixManager.getInstanceName()));
zkClient.delete(PropertyPathBuilder.instanceHistory(helixManager.getClusterName(), helixManager.getInstanceName()));
zkClient.delete(PropertyPathBuilder.instanceStatusUpdate(helixManager.getClusterName(), helixManager.getInstanceName()));
}
}
| 2,179 |
0 | Create_ds/gobblin/gobblin-cluster/src/test/java/org/apache/gobblin | Create_ds/gobblin/gobblin-cluster/src/test/java/org/apache/gobblin/cluster/ContainerHealthMetricsServiceTest.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.cluster;
import org.testng.Assert;
import org.testng.annotations.Test;
import com.typesafe.config.Config;
import com.typesafe.config.ConfigFactory;
public class ContainerHealthMetricsServiceTest {
@Test
public void testRunOneIteration() throws Exception {
Config config = ConfigFactory.empty();
ContainerHealthMetricsService service = new ContainerHealthMetricsService(config);
service.runOneIteration();
//Ensure lastGcStats updated after each iteration
Assert.assertTrue(service.getCurrentGcStats() == service.getLastGcStats());
ContainerHealthMetricsService.GcStats previousLastGcStats = service.getLastGcStats();
Assert.assertTrue( service.minorGcCount.get() >= 0);
Assert.assertTrue( service.minorGcDuration.get() >= 0);
Assert.assertTrue( service.majorGcCount.get() >= 0);
Assert.assertTrue( service.minorGcDuration.get() >= 0);
Assert.assertTrue( service.unknownGcCount.get() >= 0);
Assert.assertTrue( service.unknownGcDuration.get() >= 0);
double processCpuTime1 = service.processCpuTime.get();
Thread.sleep(10);
service.runOneIteration();
Assert.assertTrue(service.getCurrentGcStats() == service.getLastGcStats());
Assert.assertTrue(service.getLastGcStats() != previousLastGcStats);
double processCpuTime2 = service.processCpuTime.get();
Assert.assertTrue( processCpuTime1 <= processCpuTime2);
Assert.assertTrue( service.minorGcCount.get() >= 0);
Assert.assertTrue( service.minorGcDuration.get() >= 0);
Assert.assertTrue( service.majorGcCount.get() >= 0);
Assert.assertTrue( service.minorGcDuration.get() >= 0);
Assert.assertTrue( service.unknownGcCount.get() >= 0);
Assert.assertTrue( service.unknownGcDuration.get() >= 0);
}
}
| 2,180 |
0 | Create_ds/gobblin/gobblin-cluster/src/test/java/org/apache/gobblin | Create_ds/gobblin/gobblin-cluster/src/test/java/org/apache/gobblin/cluster/SingleTaskLauncherTest.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.cluster;
import java.nio.file.Path;
import java.nio.file.Paths;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.List;
import org.testng.annotations.Test;
import com.typesafe.config.ConfigFactory;
import org.apache.gobblin.util.GobblinProcessBuilder;
import org.apache.gobblin.util.SystemPropertiesWrapper;
import static org.assertj.core.api.Assertions.assertThat;
import static org.mockito.Mockito.any;
import static org.mockito.Mockito.mock;
import static org.mockito.Mockito.verify;
import static org.mockito.Mockito.when;
public class SingleTaskLauncherTest {
private static final String JOB_ID = "1";
private static final String JAVAHOME = "/javahome";
private static final String TEST_CLASS_PATH = "foo.jar:bar.jar";
private static final String WORK_UNIT_PATH = "workUnit.wu";
private static final String CLUSTER_CONFIG_CONF_PATH = "clusterConfig.conf";
@Test
public void testLaunch()
throws Exception {
final SystemPropertiesWrapper propertiesWrapper = mock(SystemPropertiesWrapper.class);
when(propertiesWrapper.getJavaHome()).thenReturn(JAVAHOME);
when(propertiesWrapper.getJavaClassPath()).thenReturn(TEST_CLASS_PATH);
final GobblinProcessBuilder processBuilder = mock(GobblinProcessBuilder.class);
final Process mockProcess = mock(Process.class);
when(processBuilder.start(any())).thenReturn(mockProcess);
final Path clusterConfPath = Paths.get(CLUSTER_CONFIG_CONF_PATH);
final SingleTaskLauncher launcher =
new SingleTaskLauncher(processBuilder, propertiesWrapper, clusterConfPath, ConfigFactory.empty());
final Path workUnitPath = Paths.get(WORK_UNIT_PATH);
final Process process = launcher.launch(JOB_ID, workUnitPath);
final List<String> expectedInput = new ArrayList<>(Arrays
.asList("/javahome/bin/java", "-cp", TEST_CLASS_PATH,
"org.apache.gobblin.cluster.SingleTaskRunnerMain", "--cluster_config_file_path",
CLUSTER_CONFIG_CONF_PATH, "--job_id", JOB_ID, "--work_unit_file_path", WORK_UNIT_PATH));
verify(processBuilder).start(expectedInput);
assertThat(process).isEqualTo(mockProcess);
}
}
| 2,181 |
0 | Create_ds/gobblin/gobblin-cluster/src/test/java/org/apache/gobblin | Create_ds/gobblin/gobblin-cluster/src/test/java/org/apache/gobblin/cluster/SingleTaskRunnerMainArgumentsDataProvider.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.cluster;
class SingleTaskRunnerMainArgumentsDataProvider {
static final String TEST_JOB_ID = "1";
static final String TEST_WORKUNIT = "/_workunits/store/workunit.wu";
static final String TEST_CLUSTER_CONF = "/cluster.conf";
static String[] getArgs() {
return new String[]{"--job_id", TEST_JOB_ID, "--work_unit_file_path", TEST_WORKUNIT,
"--cluster_config_file_path", TEST_CLUSTER_CONF};
}
}
| 2,182 |
0 | Create_ds/gobblin/gobblin-cluster/src/test/java/org/apache/gobblin | Create_ds/gobblin/gobblin-cluster/src/test/java/org/apache/gobblin/cluster/GobblinClusterManagerTest.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.cluster;
import java.net.URL;
import org.apache.curator.framework.CuratorFramework;
import org.apache.curator.test.TestingServer;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.helix.HelixManager;
import org.apache.helix.HelixManagerFactory;
import org.apache.helix.InstanceType;
import org.apache.helix.model.ClusterConfig;
import org.apache.helix.model.Message;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.testng.Assert;
import org.testng.annotations.AfterClass;
import org.testng.annotations.BeforeClass;
import org.testng.annotations.Test;
import com.google.common.base.Function;
import com.google.common.base.Optional;
import com.google.common.base.Predicate;
import com.google.common.io.Closer;
import com.typesafe.config.Config;
import com.typesafe.config.ConfigFactory;
import com.typesafe.config.ConfigValueFactory;
import org.apache.gobblin.cluster.event.ClusterManagerShutdownRequest;
import org.apache.gobblin.testing.AssertWithBackoff;
/**
* Unit tests for {@link GobblinClusterManager}.
*
* <p>
* This class uses a {@link TestingServer} as an embedded ZooKeeper server for testing. The Curator
* framework is used to provide a ZooKeeper client. This class also uses the {@link HelixManager} to
* act as a testing Helix participant to receive the container (running the {@link GobblinTaskRunner})
* shutdown request message.
* </p>
*
* @author Yinan Li
*/
@Test(groups = { "gobblin.cluster" })
public class GobblinClusterManagerTest implements HelixMessageTestBase {
public final static Logger LOG = LoggerFactory.getLogger(GobblinClusterManagerTest.class);
public static final String HADOOP_OVERRIDE_PROPERTY_NAME = "prop";
private TestingServer testingZKServer;
private HelixManager helixManager;
private GobblinClusterManager gobblinClusterManager;
@BeforeClass
public void setUp() throws Exception {
// Use a random ZK port
this.testingZKServer = new TestingServer(-1);
LOG.info("Testing ZK Server listening on: " + testingZKServer.getConnectString());
URL url = GobblinClusterManagerTest.class.getClassLoader().getResource(
GobblinClusterManager.class.getSimpleName() + ".conf");
Assert.assertNotNull(url, "Could not find resource " + url);
Config config = ConfigFactory.parseURL(url)
.withValue("gobblin.cluster.zk.connection.string",
ConfigValueFactory.fromAnyRef(testingZKServer.getConnectString()))
.withValue(GobblinClusterConfigurationKeys.HELIX_TASK_QUOTA_CONFIG_KEY,
ConfigValueFactory.fromAnyRef("DEFAULT:1,OTHER:10"))
.withValue(GobblinClusterConfigurationKeys.HADOOP_CONFIG_OVERRIDES_PREFIX + "." + HADOOP_OVERRIDE_PROPERTY_NAME,
ConfigValueFactory.fromAnyRef("value"))
.withValue(GobblinClusterConfigurationKeys.HADOOP_CONFIG_OVERRIDES_PREFIX + "." + "fs.file.impl.disable.cache",
ConfigValueFactory.fromAnyRef("true"))
.resolve();
String zkConnectionString = config.getString(GobblinClusterConfigurationKeys.ZK_CONNECTION_STRING_KEY);
HelixUtils.createGobblinHelixCluster(zkConnectionString,
config.getString(GobblinClusterConfigurationKeys.HELIX_CLUSTER_NAME_KEY));
this.helixManager = HelixManagerFactory
.getZKHelixManager(config.getString(GobblinClusterConfigurationKeys.HELIX_CLUSTER_NAME_KEY),
TestHelper.TEST_HELIX_INSTANCE_NAME, InstanceType.PARTICIPANT, zkConnectionString);
this.helixManager.connect();
this.helixManager.getMessagingService().registerMessageHandlerFactory(GobblinHelixConstants.SHUTDOWN_MESSAGE_TYPE,
new TestShutdownMessageHandlerFactory(this));
this.gobblinClusterManager =
new GobblinClusterManager(GobblinClusterManagerTest.class.getSimpleName(), TestHelper.TEST_APPLICATION_ID, config,
Optional.<Path>absent());
this.gobblinClusterManager.getEventBus().register(this.gobblinClusterManager);
this.gobblinClusterManager.connectHelixManager();
}
static class GetInstanceMessageNumFunc implements Function<Void, Integer> {
private final CuratorFramework curatorFramework;
private final String testName;
public GetInstanceMessageNumFunc(String testName, CuratorFramework curatorFramework) {
this.curatorFramework = curatorFramework;
this.testName = testName;
}
@Override
public Integer apply(Void input) {
try {
return this.curatorFramework.getChildren().forPath(String
.format("/%s/INSTANCES/%s/MESSAGES", this.testName,
TestHelper.TEST_HELIX_INSTANCE_NAME)).size();
} catch (Exception e) {
throw new RuntimeException(e);
}
}
}
@Test
public void testQuotaConfig() throws Exception {
this.gobblinClusterManager.configureHelixQuotaBasedTaskScheduling();
ClusterConfig clusterConfig =
this.gobblinClusterManager.multiManager.getJobClusterHelixManager().getConfigAccessor()
.getClusterConfig(GobblinClusterManagerTest.class.getSimpleName());
Assert.assertEquals(clusterConfig.getTaskQuotaRatio("DEFAULT"), "1");
Assert.assertEquals(clusterConfig.getTaskQuotaRatio("OTHER"), "10");
}
@Test
public void testSendShutdownRequest() throws Exception {
Logger log = LoggerFactory.getLogger("testSendShutdownRequest");
Closer closer = Closer.create();
try {
CuratorFramework curatorFramework = TestHelper.createZkClient(this.testingZKServer, closer);
final GetInstanceMessageNumFunc getMessageNumFunc =
new GetInstanceMessageNumFunc(GobblinClusterManagerTest.class.getSimpleName(),
curatorFramework);
AssertWithBackoff assertWithBackoff =
AssertWithBackoff.create().logger(log).timeoutMs(30000);
this.gobblinClusterManager.sendShutdownRequest();
Assert.assertEquals(curatorFramework.checkExists().forPath(String
.format("/%s/INSTANCES/%s/MESSAGES", GobblinClusterManagerTest.class.getSimpleName(),
TestHelper.TEST_HELIX_INSTANCE_NAME)).getVersion(), 0);
assertWithBackoff.assertEquals(getMessageNumFunc, 1, "1 message queued");
// Give Helix sometime to handle the message
assertWithBackoff.assertEquals(getMessageNumFunc, 0, "all messages processed");
} finally {
closer.close();
}
}
@Test(dependsOnMethods = "testSendShutdownRequest")
public void testHandleClusterManagerShutdownRequest() throws Exception {
Logger log = LoggerFactory.getLogger("testHandleClusterManagerShutdownRequest");
this.gobblinClusterManager.getEventBus().post(new ClusterManagerShutdownRequest());
AssertWithBackoff.create().logger(log).timeoutMs(20000)
.assertTrue(new Predicate<Void>() {
@Override public boolean apply(Void input) {
return !GobblinClusterManagerTest.this.gobblinClusterManager.isHelixManagerConnected();
}
}, "Cluster Manager shutdown");
}
@Test
public void testBuildFileSystemConfig() {
FileSystem fileSystem = this.gobblinClusterManager.getFs();
Assert.assertEquals(fileSystem.getConf().get(HADOOP_OVERRIDE_PROPERTY_NAME), "value");
}
@AfterClass
public void tearDown() throws Exception {
try {
if (this.helixManager.isConnected()) {
this.helixManager.disconnect();
}
this.gobblinClusterManager.disconnectHelixManager();
} catch (Throwable t) {
Assert.fail();
} finally {
this.testingZKServer.close();
}
}
@Test(enabled = false)
@Override
public void assertMessageReception(Message message) {
Assert.assertEquals(message.getMsgType(), GobblinHelixConstants.SHUTDOWN_MESSAGE_TYPE);
Assert.assertEquals(message.getMsgSubType(), HelixMessageSubTypes.WORK_UNIT_RUNNER_SHUTDOWN.toString());
}
}
| 2,183 |
0 | Create_ds/gobblin/gobblin-cluster/src/test/java/org/apache/gobblin | Create_ds/gobblin/gobblin-cluster/src/test/java/org/apache/gobblin/cluster/GobblinHelixJobLauncherTest.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.cluster;
import java.io.Closeable;
import java.io.File;
import java.io.IOException;
import java.net.URL;
import java.util.List;
import java.util.Properties;
import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.CountDownLatch;
import java.util.concurrent.atomic.AtomicInteger;
import org.apache.avro.Schema;
import org.apache.curator.test.TestingServer;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.helix.HelixManager;
import org.apache.helix.HelixManagerFactory;
import org.apache.helix.InstanceType;
import org.apache.helix.task.TaskDriver;
import org.apache.helix.task.WorkflowConfig;
import org.apache.helix.task.WorkflowContext;
import org.mockito.Mockito;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.testng.Assert;
import org.testng.annotations.AfterClass;
import org.testng.annotations.BeforeClass;
import org.testng.annotations.Test;
import com.google.common.base.Optional;
import com.google.common.collect.ImmutableList;
import com.google.common.io.Closer;
import com.typesafe.config.Config;
import com.typesafe.config.ConfigFactory;
import com.typesafe.config.ConfigValueFactory;
import lombok.Getter;
import org.apache.gobblin.configuration.ConfigurationKeys;
import org.apache.gobblin.configuration.WorkUnitState;
import org.apache.gobblin.metastore.DatasetStateStore;
import org.apache.gobblin.metrics.Tag;
import org.apache.gobblin.runtime.FsDatasetStateStore;
import org.apache.gobblin.runtime.JobContext;
import org.apache.gobblin.runtime.JobException;
import org.apache.gobblin.runtime.JobState;
import org.apache.gobblin.runtime.listeners.AbstractJobListener;
import org.apache.gobblin.runtime.listeners.JobListener;
import org.apache.gobblin.util.ClassAliasResolver;
import org.apache.gobblin.util.ConfigUtils;
/**
* Unit tests for {@link GobblinHelixJobLauncher}.
*
* <p>
* This class uses a {@link TestingServer} as an embedded ZooKeeper server for testing. This class
* also uses the {@link HelixManager} to act as a testing Helix controller to be passed into the
* {@link GobblinHelixJobLauncher} instance. A {@link GobblinTaskRunner} is also used to run
* the single task of the test job. A {@link FsDatasetStateStore} is used to check the state store
* after the job is done. The test job writes everything to the local file system as returned by
* {@link FileSystem#getLocal(Configuration)}.
* </p>
*
* @author Yinan Li
*/
@Test(groups = { "gobblin.cluster" })
public class GobblinHelixJobLauncherTest {
public final static Logger LOG = LoggerFactory.getLogger(GobblinHelixJobLauncherTest.class);
private HelixManager helixManager;
private FileSystem localFs;
private Path appWorkDir;
private GobblinTaskRunner gobblinTaskRunner;
private DatasetStateStore datasetStateStore;
private Thread thread;
private final Closer closer = Closer.create();
private Config baseConfig;
@BeforeClass
public void setUp() throws Exception {
TestingServer testingZKServer = this.closer.register(new TestingServer(-1));
LOG.info("Testing ZK Server listening on: " + testingZKServer.getConnectString());
URL url = GobblinHelixJobLauncherTest.class.getClassLoader().getResource(
GobblinHelixJobLauncherTest.class.getSimpleName() + ".conf");
Assert.assertNotNull(url, "Could not find resource " + url);
this.appWorkDir = new Path(GobblinHelixJobLauncherTest.class.getSimpleName());
// Prepare the source Json file
File sourceJsonFile = new File(this.appWorkDir.toString(), TestHelper.TEST_JOB_NAME + ".json");
TestHelper.createSourceJsonFile(sourceJsonFile);
baseConfig = ConfigFactory.parseURL(url)
.withValue("gobblin.cluster.zk.connection.string",
ConfigValueFactory.fromAnyRef(testingZKServer.getConnectString()))
.withValue(ConfigurationKeys.SOURCE_FILEBASED_FILES_TO_PULL,
ConfigValueFactory.fromAnyRef(sourceJsonFile.getAbsolutePath()))
.withValue(ConfigurationKeys.JOB_STATE_IN_STATE_STORE, ConfigValueFactory.fromAnyRef("true"))
.resolve();
String zkConnectingString = baseConfig.getString(GobblinClusterConfigurationKeys.ZK_CONNECTION_STRING_KEY);
String helixClusterName = baseConfig.getString(GobblinClusterConfigurationKeys.HELIX_CLUSTER_NAME_KEY);
HelixUtils.createGobblinHelixCluster(zkConnectingString, helixClusterName);
this.helixManager = HelixManagerFactory
.getZKHelixManager(helixClusterName, TestHelper.TEST_HELIX_INSTANCE_NAME, InstanceType.CONTROLLER,
zkConnectingString);
this.closer.register(new Closeable() {
@Override
public void close() throws IOException {
helixManager.disconnect();
}
});
this.helixManager.connect();
this.localFs = FileSystem.getLocal(new Configuration());
this.closer.register(new Closeable() {
@Override
public void close() throws IOException {
if (localFs.exists(appWorkDir)) {
localFs.delete(appWorkDir, true);
}
}
});
this.gobblinTaskRunner =
new GobblinTaskRunner(TestHelper.TEST_APPLICATION_NAME, TestHelper.TEST_HELIX_INSTANCE_NAME,
TestHelper.TEST_APPLICATION_ID, TestHelper.TEST_TASK_RUNNER_ID, baseConfig, Optional.of(appWorkDir));
String stateStoreType = ConfigUtils.getString(baseConfig, ConfigurationKeys.STATE_STORE_TYPE_KEY,
ConfigurationKeys.DEFAULT_STATE_STORE_TYPE);
ClassAliasResolver<DatasetStateStore.Factory> resolver =
new ClassAliasResolver<>(DatasetStateStore.Factory.class);
DatasetStateStore.Factory stateStoreFactory =
resolver.resolveClass(stateStoreType).newInstance();
this.datasetStateStore = stateStoreFactory.createStateStore(baseConfig);
this.thread = new Thread(new Runnable() {
@Override
public void run() {
gobblinTaskRunner.start();
}
});
this.thread.start();
}
static Properties generateJobProperties(Config baseConfig, String jobNameSuffix, String jobIdSuffix) {
Properties properties = ConfigUtils.configToProperties(baseConfig);
String jobName = properties.getProperty(ConfigurationKeys.JOB_NAME_KEY) + jobNameSuffix;
properties.setProperty(ConfigurationKeys.JOB_NAME_KEY, jobName);
properties.setProperty(ConfigurationKeys.JOB_ID_KEY, "job_" + jobName + jobIdSuffix);
properties.setProperty(ConfigurationKeys.WRITER_FILE_PATH, jobName);
// expiry time should be more than the time needed for the job to complete
// otherwise JobContext will become null. This is how Helix work flow works.
properties.setProperty(GobblinClusterConfigurationKeys.HELIX_WORKFLOW_EXPIRY_TIME_SECONDS, "5");
return properties;
}
private File getJobOutputFile(Properties properties) {
return new File(properties.getProperty(ConfigurationKeys.DATA_PUBLISHER_FINAL_DIR),
properties.getProperty(ConfigurationKeys.WRITER_FILE_PATH) + File.separator + properties
.getProperty(ConfigurationKeys.WRITER_FILE_NAME));
}
public void testLaunchJob() throws Exception {
final ConcurrentHashMap<String, Boolean> runningMap = new ConcurrentHashMap<>();
// Normal job launcher
final Properties properties = generateJobProperties(this.baseConfig, "1", "_1504201348470");
final GobblinHelixJobLauncher gobblinHelixJobLauncher = this.closer.register(
new GobblinHelixJobLauncher(properties, this.helixManager, this.appWorkDir, ImmutableList.<Tag<?>>of(), runningMap,
java.util.Optional.empty()));
gobblinHelixJobLauncher.launchJob(null);
final File jobOutputFile = getJobOutputFile(properties);
Assert.assertTrue(jobOutputFile.exists());
Schema schema = new Schema.Parser().parse(TestHelper.SOURCE_SCHEMA);
TestHelper.assertGenericRecords(jobOutputFile, schema);
List<JobState.DatasetState> datasetStates = this.datasetStateStore.getAll(properties.getProperty(ConfigurationKeys.JOB_NAME_KEY),
FsDatasetStateStore.CURRENT_DATASET_STATE_FILE_SUFFIX + FsDatasetStateStore.DATASET_STATE_STORE_TABLE_SUFFIX);
Assert.assertEquals(datasetStates.size(), 1);
JobState.DatasetState datasetState = datasetStates.get(0);
Assert.assertEquals(datasetState.getCompletedTasks(), 1);
Assert.assertEquals(datasetState.getState(), JobState.RunningState.COMMITTED);
Assert.assertEquals(datasetState.getTaskStates().size(), 1);
Assert.assertEquals(datasetState.getTaskStates().get(0).getWorkingState(), WorkUnitState.WorkingState.COMMITTED);
}
private static class SuspendJobListener extends AbstractJobListener {
@Getter
private AtomicInteger completes = new AtomicInteger();
private CountDownLatch stg1;
private CountDownLatch stg2;
public SuspendJobListener (CountDownLatch stg1, CountDownLatch stg2) {
this.stg1 = stg1;
this.stg2 = stg2;
}
@Override
public void onJobStart (JobContext jobContext) throws Exception {
stg1.countDown();
stg2.await();
}
@Override
public void onJobCompletion(JobContext jobContext) throws Exception {
completes.addAndGet(1);
}
}
public void testLaunchMultipleJobs() throws Exception {
final ConcurrentHashMap<String, Boolean> runningMap = new ConcurrentHashMap<>();
// Job launcher(1) to test parallel job running
final Properties properties1 = generateJobProperties(this.baseConfig, "2", "_1504201348471");
final GobblinHelixJobLauncher gobblinHelixJobLauncher1 = this.closer.register(
new GobblinHelixJobLauncher(properties1, this.helixManager, this.appWorkDir, ImmutableList.<Tag<?>>of(), runningMap,
java.util.Optional.empty()));
// Job launcher(2) to test parallel job running
final Properties properties2 = generateJobProperties(this.baseConfig, "2", "_1504201348472");
final GobblinHelixJobLauncher gobblinHelixJobLauncher2 = this.closer.register(
new GobblinHelixJobLauncher(properties2, this.helixManager, this.appWorkDir, ImmutableList.<Tag<?>>of(), runningMap,
java.util.Optional.empty()));
CountDownLatch stg1 = new CountDownLatch(1);
CountDownLatch stg2 = new CountDownLatch(1);
CountDownLatch stg3 = new CountDownLatch(1);
SuspendJobListener testListener = new SuspendJobListener(stg1, stg2);
(new Thread(() -> {
try {
gobblinHelixJobLauncher1.launchJob(testListener);
stg3.countDown();
} catch (JobException e) {
}
})).start();
// Wait for the first job to start
stg1.await();
// When first job is in the middle of running, launch the second job (which should do NOOP because previous job is still running)
gobblinHelixJobLauncher2.launchJob(testListener);
stg2.countDown();
// Wait for the first job to finish
stg3.await();
Assert.assertEquals(testListener.getCompletes().get() == 1, true);
}
public void testTimeout() throws Exception {
final ConcurrentHashMap<String, Boolean> runningMap = new ConcurrentHashMap<>();
final Properties props = generateJobProperties(this.baseConfig, "testTimeoutTest", "_12345");
props.setProperty(GobblinClusterConfigurationKeys.HELIX_WORKFLOW_SUBMISSION_TIMEOUT_SECONDS, "0");
final GobblinHelixJobLauncher gobblinHelixJobLauncher = this.closer.register(
new GobblinHelixJobLauncher(props, this.helixManager, this.appWorkDir, ImmutableList.<Tag<?>>of(), runningMap,
java.util.Optional.empty()));
// using launchJobImpl because we do not want to swallow the exception
Assert.assertThrows(JobException.class, () -> gobblinHelixJobLauncher.launchJobImpl(null));
}
public void testCancelJobOnFailureDuringLaunch() throws Exception {
final ConcurrentHashMap<String, Boolean> runningMap = new ConcurrentHashMap<>();
final Properties props = generateJobProperties(this.baseConfig, "testDoesCancelOnFailure", "_12345");
props.setProperty(GobblinClusterConfigurationKeys.HELIX_WORKFLOW_SUBMISSION_TIMEOUT_SECONDS, "0");
final GobblinHelixJobLauncher gobblinHelixJobLauncher = this.closer.register(
new GobblinHelixJobLauncher(props, this.helixManager, this.appWorkDir, ImmutableList.<Tag<?>>of(), runningMap,
java.util.Optional.empty()));
// The launchJob will throw an exception (see testTimeout test) and we expect the launcher to swallow the exception,
// then call still properly call cancel. We use the listener to confirm the cancel hook was correctly called once
JobListener mockListener = Mockito.mock(JobListener.class);
gobblinHelixJobLauncher.launchJob(mockListener);
Mockito.verify(mockListener).onJobCancellation(Mockito.any(JobContext.class));
}
public void testNoCancelWhenJobCompletesSuccessfully() throws Exception {
final ConcurrentHashMap<String, Boolean> runningMap = new ConcurrentHashMap<>();
final Properties props = generateJobProperties(this.baseConfig, "testDoesNotCancelOnSuccess", "_12345");
final GobblinHelixJobLauncher gobblinHelixJobLauncher = this.closer.register(
new GobblinHelixJobLauncher(props, this.helixManager, this.appWorkDir, ImmutableList.<Tag<?>>of(), runningMap,
java.util.Optional.empty()));
// When the job finishes successfully, the cancellation hook should not be invoked
JobListener mockListener = Mockito.mock(JobListener.class);
gobblinHelixJobLauncher.launchJob(mockListener);
Mockito.verify(mockListener, Mockito.never()).onJobCancellation(Mockito.any(JobContext.class));
}
@Test(enabled = false, dependsOnMethods = {"testLaunchJob", "testLaunchMultipleJobs"})
public void testJobCleanup() throws Exception {
final ConcurrentHashMap<String, Boolean> runningMap = new ConcurrentHashMap<>();
final Properties properties = generateJobProperties(this.baseConfig, "3", "_1504201348473");
final GobblinHelixJobLauncher gobblinHelixJobLauncher =
new GobblinHelixJobLauncher(properties, this.helixManager, this.appWorkDir, ImmutableList.<Tag<?>>of(), runningMap,
java.util.Optional.empty());
final Properties properties2 = generateJobProperties(this.baseConfig, "33", "_1504201348474");
final GobblinHelixJobLauncher gobblinHelixJobLauncher2 =
new GobblinHelixJobLauncher(properties2, this.helixManager, this.appWorkDir, ImmutableList.<Tag<?>>of(), runningMap,
java.util.Optional.empty());
gobblinHelixJobLauncher.launchJob(null);
gobblinHelixJobLauncher2.launchJob(null);
final TaskDriver taskDriver = new TaskDriver(this.helixManager);
final String jobIdKey1 = properties.getProperty(ConfigurationKeys.JOB_ID_KEY);
final String jobIdKey2 = properties2.getProperty(ConfigurationKeys.JOB_ID_KEY);
org.apache.helix.task.JobContext jobContext1 = taskDriver.getJobContext(jobIdKey1);
org.apache.helix.task.JobContext jobContext2 = taskDriver.getJobContext(jobIdKey2);
waitForWorkFlowStartup(taskDriver, jobIdKey1);
waitForWorkFlowStartup(taskDriver, jobIdKey2);
// job context should be present until close
Assert.assertNotNull(jobContext1);
Assert.assertNotNull(jobContext2);
gobblinHelixJobLauncher.close();
// workflow deleted asynchronously after close
waitForWorkFlowCleanup(taskDriver, jobIdKey1);
jobContext1 = taskDriver.getJobContext(jobIdKey1);
// job context should have been deleted
Assert.assertNull(jobContext1);
// workflow should have been deleted
WorkflowConfig workflowConfig = taskDriver.getWorkflowConfig(jobIdKey1);
Assert.assertNull(workflowConfig);
WorkflowContext workflowContext = taskDriver.getWorkflowContext(jobIdKey1);
Assert.assertNull(workflowContext);
// second workflow with shared prefix should not be deleted when the first workflow is cleaned up
workflowConfig = taskDriver.getWorkflowConfig(jobIdKey2);
Assert.assertNotNull(workflowConfig);
gobblinHelixJobLauncher2.close();
// workflow deleted asynchronously after close
waitForWorkFlowCleanup(taskDriver, jobIdKey2);
workflowConfig = taskDriver.getWorkflowConfig(jobIdKey2);
Assert.assertNull(workflowConfig);
// check that workunit and taskstate directory for the job are cleaned up
final File workunitsDir =
new File(this.appWorkDir + File.separator + GobblinClusterConfigurationKeys.INPUT_WORK_UNIT_DIR_NAME
+ File.separator + jobIdKey1);
final File taskstatesDir =
new File(this.appWorkDir + File.separator + GobblinClusterConfigurationKeys.OUTPUT_TASK_STATE_DIR_NAME
+ File.separator + jobIdKey1);
Assert.assertFalse(workunitsDir.exists());
Assert.assertFalse(taskstatesDir.exists());
// check that job.state file is cleaned up
final File jobStateFile = new File(GobblinClusterUtils.getJobStateFilePath(true, this.appWorkDir, jobIdKey1).toString());
Assert.assertFalse(jobStateFile.exists());
}
@AfterClass
public void tearDown() throws IOException {
try {
this.gobblinTaskRunner.stop();
this.thread.join();
} catch (InterruptedException ie) {
Thread.currentThread().interrupt();
} finally {
this.closer.close();
}
}
private void waitForWorkFlowCleanup(TaskDriver taskDriver, String queueName) {
for (int i = 0; i < 60; i++) {
WorkflowConfig workflowConfig = taskDriver.getWorkflowConfig(queueName);
if (workflowConfig == null) {
break;
}
try {
Thread.sleep(1000);
} catch (InterruptedException e) {
}
}
}
private void waitForWorkFlowStartup(TaskDriver taskDriver, String workflow) {
for (int i = 0; i < 5; i++) {
WorkflowConfig workflowConfig = taskDriver.getWorkflowConfig(workflow);
if (workflowConfig != null) {
break;
}
try {
Thread.sleep(1000);
} catch (InterruptedException e) {
}
}
}
}
| 2,184 |
0 | Create_ds/gobblin/gobblin-cluster/src/test/java/org/apache/gobblin | Create_ds/gobblin/gobblin-cluster/src/test/java/org/apache/gobblin/cluster/SingleHelixTaskTest.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.cluster;
import java.io.IOException;
import java.nio.file.Path;
import java.nio.file.Paths;
import org.apache.helix.task.TaskResult;
import org.testng.annotations.BeforeMethod;
import org.testng.annotations.Test;
import com.google.common.collect.ImmutableMap;
import static org.assertj.core.api.Assertions.assertThat;
import static org.mockito.Mockito.any;
import static org.mockito.Mockito.mock;
import static org.mockito.Mockito.verify;
import static org.mockito.Mockito.when;
public class SingleHelixTaskTest {
private static final String WORK_UNIT_FILE_PATH = "work-unit.wu";
private static final String JOB_ID = "1";
private Process mockProcess;
private SingleTaskLauncher mockLauncher;
private SingleHelixTask task;
@BeforeMethod
public void setUp() {
this.mockLauncher = mock(SingleTaskLauncher.class);
this.mockProcess = mock(Process.class);
}
@Test
public void successTaskProcessShouldResultInCompletedStatus()
throws IOException, InterruptedException {
when(this.mockProcess.waitFor()).thenReturn(0);
final TaskResult result = createAndRunTask();
assertThat(result.getStatus()).isEqualTo(TaskResult.Status.COMPLETED);
final Path expectedPath = Paths.get(WORK_UNIT_FILE_PATH);
verify(this.mockLauncher).launch(JOB_ID, expectedPath);
verify(this.mockProcess).waitFor();
}
@Test
public void failedTaskProcessShouldResultInFailedStatus()
throws IOException, InterruptedException {
when(this.mockProcess.waitFor()).thenReturn(1);
final TaskResult result = createAndRunTask();
assertThat(result.getStatus()).isEqualTo(TaskResult.Status.FATAL_FAILED);
}
@Test
public void NonInterruptedExceptionShouldResultInFailedStatus()
throws IOException, InterruptedException {
when(this.mockProcess.waitFor()).thenThrow(new RuntimeException());
final TaskResult result = createAndRunTask();
assertThat(result.getStatus()).isEqualTo(TaskResult.Status.FAILED);
}
@Test
public void testCancel()
throws IOException {
createAndRunTask();
this.task.cancel();
verify(this.mockProcess).destroyForcibly();
}
private TaskResult createAndRunTask()
throws IOException {
when(this.mockLauncher.launch(any(), any())).thenReturn(this.mockProcess);
final ImmutableMap<String, String> configMap = ImmutableMap
.of("job.name", "testJob", "job.id", JOB_ID, "gobblin.cluster.work.unit.file.path",
WORK_UNIT_FILE_PATH);
this.task = new SingleHelixTask(this.mockLauncher, configMap);
return this.task.run();
}
}
| 2,185 |
0 | Create_ds/gobblin/gobblin-cluster/src/test/java/org/apache/gobblin | Create_ds/gobblin/gobblin-cluster/src/test/java/org/apache/gobblin/cluster/TestSingleTask.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.cluster;
import java.io.File;
import java.io.IOException;
import java.nio.file.Path;
import java.nio.file.Paths;
import java.util.concurrent.ExecutorService;
import java.util.concurrent.Executors;
import java.util.concurrent.FutureTask;
import org.junit.Assert;
import org.testng.annotations.Test;
import com.google.common.base.Predicate;
import com.google.common.collect.ImmutableMap;
import com.google.common.io.Files;
import com.typesafe.config.Config;
import com.typesafe.config.ConfigFactory;
import com.typesafe.config.ConfigValueFactory;
import javax.annotation.Nullable;
import lombok.extern.slf4j.Slf4j;
import org.apache.gobblin.testing.AssertWithBackoff;
import org.apache.gobblin.util.ConfigUtils;
import org.apache.gobblin.util.FileUtils;
import static org.apache.gobblin.cluster.SingleTask.MAX_RETRY_WAITING_FOR_INIT_KEY;
/**
* Notes & Usage:
* 0. This test could be used to reproduce task-execution issue in Gobblin-Cluster, within each container.
* 1. The workunit is being created in {@link InMemoryWuFailedSingleTask}.
* 2. When needed to reproduce certain errors, replace org.apache.gobblin.cluster.DummySource.DummyExtractor or
* {@link DummySource} to plug in required logic.
* 3. Some of this tests simulate the Helix scenarios where run() and cancel() coule be assigned to run in different threads.
*/
@Slf4j
public class TestSingleTask {
private InMemorySingleTaskRunner createInMemoryTaskRunner()
throws IOException {
final File clusterWorkDirPath = Files.createTempDir();
Path clusterConfigPath = Paths.get(clusterWorkDirPath.getAbsolutePath(), "clusterConf");
Config config = ConfigFactory.empty().withValue(GobblinTaskRunner.CLUSTER_APP_WORK_DIR, ConfigValueFactory.fromAnyRef(clusterWorkDirPath.toString()));
ConfigUtils configUtils = new ConfigUtils(new FileUtils());
configUtils.saveConfigToFile(config, clusterConfigPath);
final Path wuPath = Paths.get(clusterWorkDirPath.getAbsolutePath(), "_workunits/store/workunit.wu");
InMemorySingleTaskRunner inMemorySingleTaskRunner =
new InMemorySingleTaskRunner(clusterConfigPath.toString(), "testJob", wuPath.toString());
return inMemorySingleTaskRunner;
}
/**
* An in-memory {@link SingleTask} runner that could be used to simulate how it works in Gobblin-Cluster.
* For this example method, it fail the execution by missing certain configuration on purpose, catch the exception and
* re-run it again.
*/
@Test
public void testSingleTaskRerunAfterFailure()
throws Exception {
SingleTaskRunner inMemorySingleTaskRunner = createInMemoryTaskRunner();
try {
inMemorySingleTaskRunner.run(true);
} catch (Exception e) {
inMemorySingleTaskRunner.run();
}
Assert.assertTrue(true);
}
@Test
public void testTaskCancelBeforeRunFailure() throws Exception {
InMemorySingleTaskRunner inMemorySingleTaskRunner = createInMemoryTaskRunner();
inMemorySingleTaskRunner.initClusterSingleTask(false);
// Directly calling cancel without initializing taskAttempt, it will timeout until reaching illegal state defined
// in SingleTask.
try {
inMemorySingleTaskRunner.task.cancel();
} catch (Exception e) {
Assert.assertTrue(e instanceof IllegalStateException);
Assert.assertTrue(e.getMessage().contains("Failed to initialize"));
}
}
// Normal sequence means, run is executed before cancel method.
@Test
public void testNormalSequence() throws Exception {
InMemorySingleTaskRunner inMemorySingleTaskRunner = createInMemoryTaskRunner();
inMemorySingleTaskRunner.startServices();
inMemorySingleTaskRunner.initClusterSingleTask(false);
final SingleTask task = inMemorySingleTaskRunner.task;
ExecutorService executor = Executors.newFixedThreadPool(2);
Runnable cancelRunnable = () -> {
try {
task.cancel();
} catch (Exception e) {
throw new RuntimeException(e);
}
};
final FutureTask<String> cancelTask = new FutureTask<String>(cancelRunnable, "cancelled");
Runnable runRunnable = () -> {
try {
task.run();
} catch (Exception e) {
throw new RuntimeException(e);
}
};
FutureTask<String> runTask = new FutureTask<String>(runRunnable, "completed");
executor.submit(runTask);
AssertWithBackoff.create().timeoutMs(2000).backoffFactor(1).assertTrue(new Predicate<Void>() {
@Override
public boolean apply(@Nullable Void input) {
return task._taskAttempt != null;
}
}, "wait until task attempt available");
// Simulate the process that signal happened first.
executor.submit(cancelTask);
AssertWithBackoff.create().timeoutMs(2000).backoffFactor(1).assertTrue(new Predicate<Void>() {
@Override
public boolean apply(@Nullable Void input) {
return cancelTask.isDone();
}
}, "wait until task attempt available");
Assert.assertEquals(cancelTask.get(), "cancelled");
}
@Test
public void testTaskCancelBeforeRun()
throws Exception {
final InMemorySingleTaskRunner inMemorySingleTaskRunner = createInMemoryTaskRunner();
// Place cancellation into infinite wait while having another thread initialize the taskAttempt.
// Reset task and set the retry to be infinite large.
inMemorySingleTaskRunner
.setInjectedConfig(ConfigFactory.parseMap(ImmutableMap.of(MAX_RETRY_WAITING_FOR_INIT_KEY, Integer.MAX_VALUE)));
inMemorySingleTaskRunner.startServices();
inMemorySingleTaskRunner.initClusterSingleTask(false);
final SingleTask task = inMemorySingleTaskRunner.task;
// The task.cancel() method has the logic to block on taskAttempt object to be initialized before calling
// taskAttempt.shutdownTasks(). Here there has to be at least 2 threads running concurrently, the run() method
// is meant to create the taskAttempt object so that the waiting thread (cancel thread) got unblocked after that.
ExecutorService executor = Executors.newFixedThreadPool(2);
Runnable cancelRunnable = () -> {
try {
task.cancel();
} catch (Exception e) {
throw new RuntimeException(e);
}
};
FutureTask<String> cancelTask = new FutureTask<String>(cancelRunnable, "cancelled");
executor.submit(cancelTask);
Runnable runRunnable = () -> {
try {
task.run();
} catch (Exception e) {
throw new RuntimeException(e);
}
};
FutureTask<String> runTask = new FutureTask<String>(runRunnable, "completed");
executor.submit(runTask);
AssertWithBackoff assertWithBackoff = AssertWithBackoff.create().backoffFactor(1).maxSleepMs(1000).timeoutMs(500000);
assertWithBackoff.assertTrue(new Predicate<Void>() {
@Override
public boolean apply(@Nullable Void input) {
return runTask.isDone();
}
}, "waiting for future to complete");
Assert.assertEquals(runTask.get(), "completed");
Assert.assertTrue(cancelTask.isDone());
Assert.assertEquals(cancelTask.get(), "cancelled");
}
}
| 2,186 |
0 | Create_ds/gobblin/gobblin-cluster/src/test/java/org/apache/gobblin | Create_ds/gobblin/gobblin-cluster/src/test/java/org/apache/gobblin/cluster/ClusterIntegrationTest.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.cluster;
import java.io.File;
import java.io.IOException;
import java.nio.file.Paths;
import java.util.Map;
import java.util.concurrent.ExecutorService;
import java.util.concurrent.Executors;
import java.util.concurrent.FutureTask;
import org.apache.helix.HelixManager;
import org.apache.helix.HelixManagerFactory;
import org.apache.helix.InstanceType;
import org.apache.helix.ZNRecord;
import org.apache.helix.manager.zk.ZNRecordStreamingSerializer;
import org.apache.helix.manager.zk.ZkClient;
import org.apache.helix.task.TargetState;
import org.apache.helix.task.TaskDriver;
import org.apache.helix.zookeeper.datamodel.serializer.ChainedPathZkSerializer;
import org.apache.helix.zookeeper.zkclient.serialize.PathBasedZkSerializer;
import org.testng.Assert;
import org.testng.annotations.AfterMethod;
import org.testng.annotations.Test;
import com.google.common.base.Predicate;
import com.typesafe.config.Config;
import com.typesafe.config.ConfigValueFactory;
import lombok.extern.slf4j.Slf4j;
import org.apache.gobblin.cluster.suite.IntegrationBasicSuite;
import org.apache.gobblin.cluster.suite.IntegrationDedicatedManagerClusterSuite;
import org.apache.gobblin.cluster.suite.IntegrationDedicatedTaskDriverClusterSuite;
import org.apache.gobblin.cluster.suite.IntegrationJobCancelSuite;
import org.apache.gobblin.cluster.suite.IntegrationJobFactorySuite;
import org.apache.gobblin.cluster.suite.IntegrationJobRestartViaSpecSuite;
import org.apache.gobblin.cluster.suite.IntegrationJobTagSuite;
import org.apache.gobblin.cluster.suite.IntegrationSeparateProcessSuite;
import org.apache.gobblin.runtime.api.SpecExecutor;
import org.apache.gobblin.testing.AssertWithBackoff;
import org.apache.gobblin.util.ConfigUtils;
@Slf4j
@Test
public class ClusterIntegrationTest {
private IntegrationBasicSuite suite;
private String zkConnectString;
@Test
public void testJobShouldComplete()
throws Exception {
this.suite = new IntegrationBasicSuite();
runAndVerify();
}
private HelixManager getHelixManager() {
Config helixConfig = this.suite.getManagerConfig();
String clusterName = helixConfig.getString(GobblinClusterConfigurationKeys.HELIX_CLUSTER_NAME_KEY);
String instanceName = ConfigUtils.getString(helixConfig, GobblinClusterConfigurationKeys.HELIX_INSTANCE_NAME_KEY,
GobblinClusterManager.class.getSimpleName());
this.zkConnectString = helixConfig.getString(GobblinClusterConfigurationKeys.ZK_CONNECTION_STRING_KEY);
HelixManager helixManager = HelixManagerFactory.getZKHelixManager(clusterName, instanceName, InstanceType.CONTROLLER, zkConnectString);
return helixManager;
}
@Test
void testJobShouldGetCancelled() throws Exception {
// Cancellation usually needs long time to successfully be executed, therefore setting the sleeping time to 100.
Config jobConfigOverrides = ClusterIntegrationTestUtils.buildSleepingJob(IntegrationJobCancelSuite.JOB_ID,
IntegrationJobCancelSuite.TASK_STATE_FILE)
.withValue(SleepingTask.SLEEP_TIME_IN_SECONDS, ConfigValueFactory.fromAnyRef(100));
this.suite = new IntegrationJobCancelSuite(jobConfigOverrides);
suite.startCluster();
HelixManager helixManager = getHelixManager();
helixManager.connect();
ExecutorService executor = Executors.newSingleThreadExecutor();
Runnable cancelAfterTaskInit = () -> {
try {
TaskDriver taskDriver = new TaskDriver(helixManager);
// The actual cancellation needs to be executed in separated thread to make the cancel of helix is not blocked by
// SleepingTask's thread in its own thread.
// Issue the cancel after ensuring the workflow is created and the SleepingTask is running
AssertWithBackoff.create().maxSleepMs(1000).backoffFactor(1).
assertTrue(isTaskStarted(helixManager, IntegrationJobCancelSuite.JOB_ID), "Waiting for the job to start...");
AssertWithBackoff.create().maxSleepMs(100).timeoutMs(2000).backoffFactor(1).
assertTrue(isTaskRunning(IntegrationJobCancelSuite.TASK_STATE_FILE),
"Waiting for the task to enter running state");
log.info("Stopping the job");
taskDriver.stop(IntegrationJobCancelSuite.JOB_ID);
suite.shutdownCluster();
} catch (Exception e) {
throw new RuntimeException("Failure in canceling tasks");
}
};
FutureTask<String> futureTask = new FutureTask<String>( cancelAfterTaskInit, "cancelled");
executor.submit(futureTask);
AssertWithBackoff assertWithBackoff = AssertWithBackoff.create().backoffFactor(1).maxSleepMs(1000).timeoutMs(500000);
assertWithBackoff.assertTrue(new Predicate<Void>() {
@Override
public boolean apply(Void input) {
return futureTask.isDone();
}
}, "waiting for future to complete");
Assert.assertEquals(futureTask.get(), "cancelled");
suite.waitForAndVerifyOutputFiles();
}
/**
* An integration test for restarting a Helix workflow via a JobSpec. This test case starts a Helix cluster with
* a {@link FsJobConfigurationManager}. The test case does the following:
* <ul>
* <li> add a {@link org.apache.gobblin.runtime.api.JobSpec} that uses a {@link org.apache.gobblin.cluster.SleepingCustomTaskSource})
* to {@link IntegrationJobRestartViaSpecSuite#FS_SPEC_CONSUMER_DIR}. which is picked by the JobConfigurationManager. </li>
* <li> the JobConfigurationManager sends a notification to the GobblinHelixJobScheduler which schedules the job for execution. The JobSpec is
* also added to the JobCatalog for persistence. Helix starts a Workflow for this JobSpec. </li>
* <li> We then add a {@link org.apache.gobblin.runtime.api.JobSpec} with UPDATE Verb to {@link IntegrationJobRestartViaSpecSuite#FS_SPEC_CONSUMER_DIR}.
* This signals GobblinHelixJobScheduler (and, Helix) to first cancel the running job (i.e., Helix Workflow) started in the previous step.
* <li> We inspect the state of the zNode corresponding to the Workflow resource in Zookeeper to ensure that its {@link org.apache.helix.task.TargetState}
* is STOP. </li>
* <li> Once the cancelled job from the previous steps is completed, the job will be re-launched for execution by the GobblinHelixJobScheduler.
* We confirm the execution by again inspecting the zNode and ensuring its TargetState is START. </li>
* </ul>
*/
@Test (enabled = false, dependsOnMethods = { "testJobShouldGetCancelled" }, groups = {"disabledOnCI"})
public void testJobRestartViaSpec() throws Exception {
Config jobConfigOverrides = ClusterIntegrationTestUtils.buildSleepingJob(IntegrationJobCancelSuite.JOB_ID,
IntegrationJobCancelSuite.TASK_STATE_FILE);
this.suite = new IntegrationJobRestartViaSpecSuite(jobConfigOverrides);
HelixManager helixManager = getHelixManager();
IntegrationJobRestartViaSpecSuite restartViaSpecSuite = (IntegrationJobRestartViaSpecSuite) this.suite;
//Start the cluster
restartViaSpecSuite.startCluster();
helixManager.connect();
AssertWithBackoff.create().timeoutMs(30000).maxSleepMs(1000).backoffFactor(1).
assertTrue(isTaskStarted(helixManager, IntegrationJobRestartViaSpecSuite.JOB_ID), "Waiting for the job to start...");
AssertWithBackoff.create().maxSleepMs(100).timeoutMs(2000).backoffFactor(1).
assertTrue(isTaskRunning(IntegrationJobRestartViaSpecSuite.TASK_STATE_FILE), "Waiting for the task to enter running state");
ZkClient zkClient = new ZkClient(this.zkConnectString);
PathBasedZkSerializer zkSerializer = ChainedPathZkSerializer.builder(new ZNRecordStreamingSerializer()).build();
zkClient.setZkSerializer(zkSerializer);
String clusterName = getHelixManager().getClusterName();
String zNodePath = Paths.get("/", clusterName, "CONFIGS", "RESOURCE", IntegrationJobCancelSuite.JOB_ID).toString();
//Ensure that the Workflow is started
ZNRecord record = zkClient.readData(zNodePath);
String targetState = record.getSimpleField("TargetState");
Assert.assertEquals(targetState, TargetState.START.name());
//Add a JobSpec with UPDATE verb signalling the Helix cluster to restart the workflow
restartViaSpecSuite.addJobSpec(IntegrationJobRestartViaSpecSuite.JOB_NAME, SpecExecutor.Verb.UPDATE.name());
AssertWithBackoff.create().maxSleepMs(1000).timeoutMs(12000).backoffFactor(1).assertTrue(input -> {
//Inspect the zNode at the path corresponding to the Workflow resource. Ensure the target state of the resource is in
// the STOP state or that the zNode has been deleted.
ZNRecord recordNew = zkClient.readData(zNodePath, true);
String targetStateNew = null;
if (recordNew != null) {
targetStateNew = recordNew.getSimpleField("TargetState");
}
return recordNew == null || targetStateNew.equals(TargetState.STOP.name());
}, "Waiting for Workflow TargetState to be STOP");
//Ensure that the SleepingTask did not terminate normally i.e. it was interrupted. We check this by ensuring
// that the line "Hello World!" is not present in the logged output.
suite.waitForAndVerifyOutputFiles();
AssertWithBackoff.create().maxSleepMs(1000).timeoutMs(120000).backoffFactor(1).assertTrue(input -> {
//Inspect the zNode at the path corresponding to the Workflow resource. Ensure the target state of the resource is in
// the START state.
ZNRecord recordNew = zkClient.readData(zNodePath, true);
String targetStateNew = null;
if (recordNew != null) {
targetStateNew = recordNew.getSimpleField("TargetState");
return targetStateNew.equals(TargetState.START.name());
}
return false;
}, "Waiting for Workflow TargetState to be START");
}
public static Predicate<Void> isTaskStarted(HelixManager helixManager, String jobId) {
return input -> TaskDriver.getWorkflowContext(helixManager, jobId) != null;
}
public static Predicate<Void> isTaskRunning(String taskStateFileName) {
return input -> {
File taskStateFile = new File(taskStateFileName);
return taskStateFile.exists();
};
}
@Test
public void testSeparateProcessMode()
throws Exception {
this.suite = new IntegrationSeparateProcessSuite();
runAndVerify();
}
@Test
public void testDedicatedManagerCluster()
throws Exception {
this.suite = new IntegrationDedicatedManagerClusterSuite();
runAndVerify();
}
@Test(enabled = false)
public void testDedicatedTaskDriverCluster()
throws Exception {
this.suite = new IntegrationDedicatedTaskDriverClusterSuite();
runAndVerify();
}
@Test(enabled = false)
public void testJobWithTag()
throws Exception {
this.suite = new IntegrationJobTagSuite();
runAndVerify();
}
@Test
public void testPlanningJobFactory()
throws Exception {
this.suite = new IntegrationJobFactorySuite();
runAndVerify();
}
private void runAndVerify()
throws Exception {
suite.startCluster();
suite.waitForAndVerifyOutputFiles();
ensureJobLauncherFinished();
suite.verifyMetricsCleaned();
suite.shutdownCluster();
}
private void ensureJobLauncherFinished() throws Exception {
AssertWithBackoff asserter = AssertWithBackoff.create().logger(log).timeoutMs(120_000)
.maxSleepMs(100).backoffFactor(1.5);
asserter.assertTrue(this::isJobLauncherFinished, "Waiting for job launcher completion");
}
protected boolean isJobLauncherFinished(Void input) {
Map<Thread, StackTraceElement[]> map = Thread.getAllStackTraces();
for (Map.Entry<Thread, StackTraceElement[]> entry: map.entrySet()) {
for (StackTraceElement ste: entry.getValue()) {
if (ste.toString().contains(HelixRetriggeringJobCallable.class.getSimpleName())) {
return false;
}
}
}
return true;
}
@AfterMethod
public void tearDown() throws IOException {
this.suite.deleteWorkDir();
}
}
| 2,187 |
0 | Create_ds/gobblin/gobblin-cluster/src/test/java/org/apache/gobblin | Create_ds/gobblin/gobblin-cluster/src/test/java/org/apache/gobblin/cluster/HelixTaskEventMetadataGeneratorTest.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.cluster;
import java.util.Map;
import org.testng.Assert;
import org.testng.annotations.Test;
import org.apache.gobblin.configuration.ConfigurationKeys;
import org.apache.gobblin.configuration.State;
import org.apache.gobblin.runtime.api.TaskEventMetadataGenerator;
import org.apache.gobblin.util.TaskEventMetadataUtils;
@Test
public class HelixTaskEventMetadataGeneratorTest {
public void testGetMetadata() {
State state = new State();
state.setProp(ConfigurationKeys.TASK_EVENT_METADATA_GENERATOR_CLASS_KEY, "helixtask");
state.setProp(GobblinClusterConfigurationKeys.CONTAINER_ID_KEY, "container-1");
state.setProp(GobblinClusterConfigurationKeys.HELIX_TASK_ID_KEY, "task-1");
state.setProp(GobblinClusterConfigurationKeys.HELIX_JOB_ID_KEY, "job-1");
TaskEventMetadataGenerator metadataGenerator = TaskEventMetadataUtils.getTaskEventMetadataGenerator(state);
//Ensure instantiation is done correctly
Assert.assertTrue(metadataGenerator != null);
//Ensure metadata map is correctly populated
Map<String, String> metadataMap = metadataGenerator.getMetadata(state, "testEventName");
Assert.assertEquals(metadataMap.size(), 5);
Assert.assertEquals(metadataMap.get(HelixTaskEventMetadataGenerator.HELIX_INSTANCE_KEY), "");
Assert.assertEquals(metadataMap.get(HelixTaskEventMetadataGenerator.CONTAINER_ID_KEY), "container-1");
Assert.assertEquals(metadataMap.get(HelixTaskEventMetadataGenerator.HOST_NAME_KEY), "");
Assert.assertEquals(metadataMap.get(HelixTaskEventMetadataGenerator.HELIX_TASK_ID_KEY), "task-1");
Assert.assertEquals(metadataMap.get(HelixTaskEventMetadataGenerator.HELIX_JOB_ID_KEY), "job-1");
}
} | 2,188 |
0 | Create_ds/gobblin/gobblin-cluster/src/test/java/org/apache/gobblin | Create_ds/gobblin/gobblin-cluster/src/test/java/org/apache/gobblin/cluster/SingleTaskRunnerMainOptionsTest.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.cluster;
import java.io.PrintWriter;
import java.io.StringWriter;
import org.testng.annotations.BeforeMethod;
import org.testng.annotations.Test;
import static org.apache.gobblin.cluster.SingleTaskRunnerMainArgumentsDataProvider.TEST_CLUSTER_CONF;
import static org.apache.gobblin.cluster.SingleTaskRunnerMainArgumentsDataProvider.TEST_JOB_ID;
import static org.apache.gobblin.cluster.SingleTaskRunnerMainArgumentsDataProvider.TEST_WORKUNIT;
import static org.assertj.core.api.Assertions.assertThat;
import static org.assertj.core.api.Assertions.assertThatThrownBy;
public class SingleTaskRunnerMainOptionsTest {
private PrintWriter writer;
private StringWriter stringWriter;
@BeforeMethod
public void setUp() {
this.stringWriter = new StringWriter();
this.writer = new PrintWriter(this.stringWriter, true);
}
@Test
public void correctCmdLineShouldReturnAllValues() {
final String[] args = SingleTaskRunnerMainArgumentsDataProvider.getArgs();
final SingleTaskRunnerMainOptions options = new SingleTaskRunnerMainOptions(args, this.writer);
final String jobId = options.getJobId();
final String workUnitFilePath = options.getWorkUnitFilePath();
final String clusterConfigFilePath = options.getClusterConfigFilePath();
assertThat(jobId).isEqualTo(TEST_JOB_ID);
assertThat(workUnitFilePath).isEqualTo(TEST_WORKUNIT);
assertThat(clusterConfigFilePath).isEqualTo(TEST_CLUSTER_CONF);
}
@Test
public void missingOptionShouldThrow() {
final String[] args = {};
assertThatThrownBy(() -> new SingleTaskRunnerMainOptions(args, this.writer))
.isInstanceOf(GobblinClusterException.class);
final String output = this.stringWriter.toString();
assertThat(output).contains("usage: SingleTaskRunnerMain");
}
}
| 2,189 |
0 | Create_ds/gobblin/gobblin-cluster/src/test/java/org/apache/gobblin | Create_ds/gobblin/gobblin-cluster/src/test/java/org/apache/gobblin/cluster/GobblinHelixJobSchedulerTest.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.cluster;
import java.io.File;
import java.io.IOException;
import java.net.URL;
import java.nio.file.Files;
import java.time.Clock;
import java.time.Duration;
import java.time.Instant;
import java.time.temporal.ChronoUnit;
import java.util.Collections;
import java.util.Map;
import java.util.Properties;
import java.util.concurrent.atomic.AtomicReference;
import org.apache.curator.test.TestingServer;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.helix.HelixManager;
import org.apache.helix.InstanceType;
import org.assertj.core.util.Lists;
import org.mockito.Mockito;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.testng.Assert;
import org.testng.annotations.AfterClass;
import org.testng.annotations.BeforeClass;
import org.testng.annotations.Test;
import com.google.common.base.Optional;
import com.google.common.eventbus.EventBus;
import com.google.common.io.Closer;
import com.typesafe.config.Config;
import com.typesafe.config.ConfigFactory;
import com.typesafe.config.ConfigValueFactory;
import org.apache.gobblin.cluster.event.NewJobConfigArrivalEvent;
import org.apache.gobblin.cluster.event.UpdateJobConfigArrivalEvent;
import org.apache.gobblin.configuration.ConfigurationKeys;
import org.apache.gobblin.runtime.job_catalog.NonObservingFSJobCatalog;
import org.apache.gobblin.scheduler.SchedulerService;
import static org.mockito.Mockito.when;
/**
* Unit tests for {@link org.apache.gobblin.cluster.GobblinHelixJobScheduler}.
*
* In all test cases, we use GobblinHelixManagerFactory instead of
* HelixManagerFactory, and instantiate a local HelixManager per test to
* provide isolation and prevent errors caused by the ZKClient being shared
* (e.g. ZKClient is not connected exceptions).
*/
@Test(groups = {"gobblin.cluster"}, singleThreaded = true)
public class GobblinHelixJobSchedulerTest {
public final static Logger LOG = LoggerFactory.getLogger(GobblinHelixJobSchedulerTest.class);
private FileSystem localFs;
private Path appWorkDir;
private final Closer closer = Closer.create();
private Config baseConfig;
private GobblinTaskRunner gobblinTaskRunner;
private Thread thread;
private final String workflowIdSuffix1 = "_1504201348471";
private final String workflowIdSuffix2 = "_1504201348472";
private final Instant beginTime = Instant.EPOCH;
private final Duration withinThrottlePeriod = Duration.of(1, ChronoUnit.SECONDS);
private final Duration exceedsThrottlePeriod = Duration.of(
GobblinClusterConfigurationKeys.DEFAULT_HELIX_JOB_SCHEDULING_THROTTLE_TIMEOUT_SECONDS_KEY + 1, ChronoUnit.SECONDS);
private String zkConnectingString;
private String helixClusterName;
@BeforeClass
public void setUp()
throws Exception {
TestingServer testingZKServer = this.closer.register(new TestingServer(-1));
LOG.info("Testing ZK Server listening on: " + testingZKServer.getConnectString());
URL url = GobblinHelixJobSchedulerTest.class.getClassLoader()
.getResource(GobblinHelixJobSchedulerTest.class.getSimpleName() + ".conf");
Assert.assertNotNull(url, "Could not find resource " + url);
this.appWorkDir = new Path(GobblinHelixJobSchedulerTest.class.getSimpleName());
// Prepare the source Json file
File sourceJsonFile = new File(this.appWorkDir.toString(), TestHelper.TEST_JOB_NAME + ".json");
TestHelper.createSourceJsonFile(sourceJsonFile);
baseConfig = ConfigFactory.parseURL(url).withValue("gobblin.cluster.zk.connection.string",
ConfigValueFactory.fromAnyRef(testingZKServer.getConnectString()))
.withValue(ConfigurationKeys.SOURCE_FILEBASED_FILES_TO_PULL,
ConfigValueFactory.fromAnyRef(sourceJsonFile.getAbsolutePath()))
.withValue(ConfigurationKeys.JOB_STATE_IN_STATE_STORE, ConfigValueFactory.fromAnyRef("true")).resolve();
this.zkConnectingString = baseConfig.getString(GobblinClusterConfigurationKeys.ZK_CONNECTION_STRING_KEY);
this.helixClusterName = baseConfig.getString(GobblinClusterConfigurationKeys.HELIX_CLUSTER_NAME_KEY);
HelixUtils.createGobblinHelixCluster(zkConnectingString, helixClusterName);
this.localFs = FileSystem.getLocal(new Configuration());
this.closer.register(() -> {
if (localFs.exists(appWorkDir)) {
localFs.delete(appWorkDir, true);
}
});
this.closer.register(() -> {
if (localFs.exists(appWorkDir)) {
localFs.delete(appWorkDir, true);
}
});
this.gobblinTaskRunner =
new GobblinTaskRunner(TestHelper.TEST_APPLICATION_NAME, TestHelper.TEST_HELIX_INSTANCE_NAME,
TestHelper.TEST_APPLICATION_ID, TestHelper.TEST_TASK_RUNNER_ID, baseConfig, Optional.of(appWorkDir));
this.thread = new Thread(() -> gobblinTaskRunner.start());
this.thread.start();
}
/***
* Time span exceeds throttle timeout, within same workflow, throttle is enabled
* Job will be updated
* @throws Exception
*/
@Test
public void testUpdateSameWorkflowLongPeriodThrottle()
throws Exception {
runWorkflowTest(exceedsThrottlePeriod, "UpdateSameWorkflowLongPeriodThrottle",
workflowIdSuffix1, workflowIdSuffix2, workflowIdSuffix2,
true, true);
}
/***
* Time span is within throttle timeout, within same workflow, throttle is enabled
* Job will not be updated
* @throws Exception
*/
@Test
public void testUpdateSameWorkflowShortPeriodThrottle()
throws Exception {
runWorkflowTest(withinThrottlePeriod, "UpdateSameWorkflowShortPeriodThrottle",
workflowIdSuffix1, workflowIdSuffix2, workflowIdSuffix1,
true, true);
}
/***
* Time span exceeds throttle timeout, within same workflow, throttle is not enabled
* Job will be updated
* @throws Exception
*/
@Test
public void testUpdateSameWorkflowLongPeriodNoThrottle()
throws Exception {
runWorkflowTest(exceedsThrottlePeriod, "UpdateSameWorkflowLongPeriodNoThrottle",
workflowIdSuffix1, workflowIdSuffix2, workflowIdSuffix2,
false, true);
}
/***
* Time span is within throttle timeout, within same workflow, throttle is not enabled
* Job will be updated
* @throws Exception
*/
@Test
public void testUpdateSameWorkflowShortPeriodNoThrottle()
throws Exception {
runWorkflowTest(withinThrottlePeriod, "UpdateSameWorkflowShortPeriodNoThrottle",
workflowIdSuffix1, workflowIdSuffix2, workflowIdSuffix2,
false, true);
}
/***
* Time span is within throttle timeout, within different workflow, throttle is enabled
* Job will be updated
* @throws Exception
*/
public void testUpdateDiffWorkflowShortPeriodThrottle()
throws Exception {
runWorkflowTest(withinThrottlePeriod, "UpdateDiffWorkflowShortPeriodThrottle",
workflowIdSuffix1, workflowIdSuffix2, workflowIdSuffix2,
true, false);
}
/***
* Time span is within throttle timeout, within different workflow, throttle is not enabled
* Job will be updated
* @throws Exception
*/
@Test
public void testUpdateDiffWorkflowShortPeriodNoThrottle()
throws Exception {
runWorkflowTest(withinThrottlePeriod, "UpdateDiffWorkflowShortPeriodNoThrottle",
workflowIdSuffix1, workflowIdSuffix2, workflowIdSuffix2,
false, false);
}
/***
* Time span exceeds throttle timeout, within different workflow, throttle is enabled
* Job will be updated
* @throws Exception
*/
@Test
public void testUpdateDiffWorkflowLongPeriodThrottle()
throws Exception {
runWorkflowTest(exceedsThrottlePeriod, "UpdateDiffWorkflowLongPeriodThrottle",
workflowIdSuffix1, workflowIdSuffix2, workflowIdSuffix2,
true, false);
}
/***
* Time span exceeds throttle timeout, within different workflow, throttle is not enabled
* Job will be updated
* @throws Exception
*/
@Test
public void testUpdateDiffWorkflowLongPeriodNoThrottle()
throws Exception {
runWorkflowTest(exceedsThrottlePeriod, "UpdateDiffWorkflowLongPeriodNoThrottle",
workflowIdSuffix1, workflowIdSuffix2, workflowIdSuffix2,
false, false);
}
private GobblinHelixJobScheduler createJobScheduler(HelixManager helixManager, boolean isThrottleEnabled, Clock clock) throws Exception {
java.nio.file.Path p = Files.createTempDirectory(GobblinHelixJobScheduler.class.getSimpleName());
Config config = ConfigFactory.empty().withValue(ConfigurationKeys.JOB_CONFIG_FILE_GENERAL_PATH_KEY,
ConfigValueFactory.fromAnyRef(p.toString()));
SchedulerService schedulerService = new SchedulerService(new Properties());
NonObservingFSJobCatalog jobCatalog = new NonObservingFSJobCatalog(config);
jobCatalog.startAsync();
Config helixJobSchedulerConfig = ConfigFactory.empty().withValue(GobblinClusterConfigurationKeys.HELIX_JOB_SCHEDULING_THROTTLE_ENABLED_KEY,
ConfigValueFactory.fromAnyRef(isThrottleEnabled));
GobblinHelixJobScheduler gobblinHelixJobScheduler = new GobblinHelixJobScheduler(helixJobSchedulerConfig, helixManager, java.util.Optional.empty(),
new EventBus(), appWorkDir, Lists.emptyList(), schedulerService, jobCatalog, clock);
return gobblinHelixJobScheduler;
}
private NewJobConfigArrivalEvent createJobConfigArrivalEvent(Properties properties) {
properties.setProperty(GobblinClusterConfigurationKeys.CANCEL_RUNNING_JOB_ON_DELETE, "true");
NewJobConfigArrivalEvent newJobConfigArrivalEvent =
new NewJobConfigArrivalEvent(properties.getProperty(ConfigurationKeys.JOB_NAME_KEY), properties);
return newJobConfigArrivalEvent;
}
private void connectAndAssertWorkflowId(String expectedSuffix, String jobName, HelixManager helixManager) throws Exception {
helixManager.connect();
String workFlowId = getWorkflowID(jobName, helixManager);
Assert.assertNotNull(workFlowId);
Assert.assertTrue(workFlowId.endsWith(expectedSuffix));
}
private String getWorkflowID (String jobName, HelixManager helixManager)
throws Exception {
// Poll helix for up to 30 seconds to fetch until a workflow with a matching job name exists in Helix and then return that workflowID
long endTime = System.currentTimeMillis() + 30000;
Map<String, String> workflowIdMap;
while (System.currentTimeMillis() < endTime) {
try{
workflowIdMap = HelixUtils.getWorkflowIdsFromJobNames(helixManager,
Collections.singletonList(jobName));
} catch(GobblinHelixUnexpectedStateException e){
continue;
}
if (workflowIdMap.containsKey(jobName)) {
return workflowIdMap.get(jobName);
}
Thread.sleep(100);
}
return null;
}
private void runWorkflowTest(Duration mockStepAmountTime, String jobSuffix,
String newJobWorkflowIdSuffix, String updateWorkflowIdSuffix,
String assertUpdateWorkflowIdSuffix, boolean isThrottleEnabled, boolean isSameWorkflow) throws Exception {
Clock mockClock = Mockito.mock(Clock.class);
AtomicReference<Instant> nextInstant = new AtomicReference<>(beginTime);
when(mockClock.instant()).thenAnswer(invocation -> nextInstant.getAndAccumulate(null, (currentInstant, x) -> currentInstant.plus(mockStepAmountTime)));
// Use GobblinHelixManagerFactory instead of HelixManagerFactory to avoid the connection error
// helixManager is set to local variable to avoid the HelixManager (ZkClient) is not connected error across tests
HelixManager helixManager = GobblinHelixManagerFactory
.getZKHelixManager(helixClusterName, TestHelper.TEST_HELIX_INSTANCE_NAME, InstanceType.CONTROLLER,
zkConnectingString);
GobblinHelixJobScheduler jobScheduler = createJobScheduler(helixManager, isThrottleEnabled, mockClock);
final Properties properties = GobblinHelixJobLauncherTest.generateJobProperties(this.baseConfig, jobSuffix, newJobWorkflowIdSuffix);
NewJobConfigArrivalEvent newJobConfigArrivalEvent = createJobConfigArrivalEvent(properties);
jobScheduler.handleNewJobConfigArrival(newJobConfigArrivalEvent);
connectAndAssertWorkflowId(newJobWorkflowIdSuffix, newJobConfigArrivalEvent.getJobName(), helixManager);
if (isSameWorkflow) {
properties.setProperty(ConfigurationKeys.JOB_ID_KEY,
"job_" + properties.getProperty(ConfigurationKeys.JOB_NAME_KEY) + updateWorkflowIdSuffix);
jobScheduler.handleUpdateJobConfigArrival(
new UpdateJobConfigArrivalEvent(properties.getProperty(ConfigurationKeys.JOB_NAME_KEY), properties));
connectAndAssertWorkflowId(assertUpdateWorkflowIdSuffix, newJobConfigArrivalEvent.getJobName(), helixManager);
}
else {
final Properties properties2 =
GobblinHelixJobLauncherTest.generateJobProperties(
this.baseConfig, jobSuffix + '2', updateWorkflowIdSuffix);
NewJobConfigArrivalEvent newJobConfigArrivalEvent2 =
new NewJobConfigArrivalEvent(properties2.getProperty(ConfigurationKeys.JOB_NAME_KEY), properties2);
jobScheduler.handleUpdateJobConfigArrival(
new UpdateJobConfigArrivalEvent(properties2.getProperty(ConfigurationKeys.JOB_NAME_KEY), properties2));
connectAndAssertWorkflowId(assertUpdateWorkflowIdSuffix, newJobConfigArrivalEvent2.getJobName(), helixManager);
}
}
@AfterClass
public void tearDown()
throws IOException {
try {
this.gobblinTaskRunner.stop();
this.thread.join();
} catch (InterruptedException ie) {
Thread.currentThread().interrupt();
} finally {
this.closer.close();
}
}
}
| 2,190 |
0 | Create_ds/gobblin/gobblin-cluster/src/test/java/org/apache/gobblin | Create_ds/gobblin/gobblin-cluster/src/test/java/org/apache/gobblin/cluster/GobblinClusterKillTest.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.cluster;
import java.io.File;
import java.io.IOException;
import java.io.InputStream;
import java.net.URL;
import java.nio.file.DirectoryStream;
import java.nio.file.Files;
import java.nio.file.Paths;
import java.util.Iterator;
import java.util.concurrent.TimeoutException;
import org.apache.commons.io.FileUtils;
import org.apache.curator.test.TestingServer;
import org.apache.hadoop.fs.Path;
import org.apache.helix.HelixManager;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.testng.Assert;
import org.testng.annotations.AfterMethod;
import org.testng.annotations.BeforeMethod;
import org.testng.annotations.Test;
import com.google.common.base.Optional;
import com.google.common.base.Predicate;
import com.typesafe.config.Config;
import com.typesafe.config.ConfigFactory;
import com.typesafe.config.ConfigValueFactory;
import org.apache.gobblin.testing.AssertWithBackoff;
/**
* Unit tests for killing {@link GobblinClusterManager}s and {@link GobblinTaskRunner}s
*
* <p>
* This class uses a {@link TestingServer} as an embedded ZooKeeper server for testing. The Curator
* framework is used to provide a ZooKeeper client. This class also uses the {@link HelixManager} to
* act as a testing Helix participant to receive the container (running the {@link GobblinTaskRunner})
* shutdown request message.
* </p>
*/
// The kill tests are unreliable on Travis
// Disabled GobblinClusterKillTest until reliability improves
@Test(enabled=false, groups = {"disabledOnCI"}, singleThreaded = true)
public class GobblinClusterKillTest {
public final static Logger LOG = LoggerFactory.getLogger(GobblinClusterKillTest.class);
public static final String CLASS_NAME_BASED_PATH = "org/apache/gobblin/util/test/HelloWorldSource";
private TestingServer _testingZKServer;
private final static int ASSERT_TIMEOUT = 60000;
private final static int ASSERT_MAX_SLEEP = 2000;
private final static int NUM_MANAGERS = 2;
private final static int NUM_WORKERS = 2;
private GobblinClusterManager[] _clusterManagers;
private GobblinTaskRunner[] _clusterWorkers;
private Thread[] _workerStartThreads;
private String _testDirPath;
private String _jobDirPath;
Config _config;
/**
* clean up and set up test directory
*/
private void setupTestDir() throws IOException {
_testDirPath = _config.getString("gobblin.cluster.work.dir");
_jobDirPath = _config.getString(GobblinClusterConfigurationKeys.JOB_CONF_PATH_KEY);
// clean up test directory and create job dir
File testDir = new File(_testDirPath);
File jobDir = new File(_jobDirPath);
if (testDir.exists()) {
FileUtils.deleteDirectory(testDir);
}
jobDir.mkdirs();
// copy job file from resource
String jobFileName = GobblinClusterKillTest.class.getSimpleName() + "Job.conf";
try (InputStream resourceStream = this.getClass().getClassLoader().getResourceAsStream(jobFileName)) {
if (resourceStream == null) {
throw new RuntimeException("Could not find job resource " + jobFileName);
}
File targetFile = new File(_jobDirPath + "/" + jobFileName);
FileUtils.copyInputStreamToFile(resourceStream, targetFile);
} catch (IOException e) {
throw new RuntimeException("Unable to load job resource " + jobFileName, e);
}
}
/**
* Create and start a cluster manager
* @param id - array offset
* @throws Exception
*/
private void setupManager(int id) throws Exception {
_clusterManagers[id] =
new GobblinClusterManager(TestHelper.TEST_APPLICATION_NAME, TestHelper.TEST_APPLICATION_ID,
_config.withValue(GobblinClusterConfigurationKeys.HELIX_INSTANCE_NAME_KEY,
ConfigValueFactory.fromAnyRef("Manager_" + id)),
Optional.of(new Path(_config.getString("gobblin.cluster.work.dir"))));
_clusterManagers[id].start();
}
/**
* Create and start a cluster worker
* @param id - array offset
* @throws Exception
*/
private void setupWorker(int id) throws Exception {
final GobblinTaskRunner fworker =
new GobblinTaskRunner(TestHelper.TEST_APPLICATION_NAME, "Worker_" + id, TestHelper.TEST_APPLICATION_ID, "1",
_config, Optional.of(new Path(_config.getString("gobblin.cluster.work.dir"))));
_clusterWorkers[id] = fworker;
_workerStartThreads[id] = new Thread(new Runnable() {
@Override
public void run() {
fworker.start();
}
});
_workerStartThreads[id].start();
}
@BeforeMethod
public void setUp() throws Exception {
// Use a random ZK port
_testingZKServer = new TestingServer(-1);
LOG.info("Testing ZK Server listening on: " + _testingZKServer.getConnectString());
URL url = GobblinClusterKillTest.class.getClassLoader().getResource(
GobblinClusterKillTest.class.getSimpleName() + ".conf");
Assert.assertNotNull(url, "Could not find resource " + url);
_config = ConfigFactory.parseURL(url)
.withValue("gobblin.cluster.zk.connection.string",
ConfigValueFactory.fromAnyRef(_testingZKServer.getConnectString()))
.withValue("gobblin.cluster.jobconf.fullyQualifiedPath",
ConfigValueFactory.fromAnyRef("/tmp/gobblinClusterKillTest/job-conf"))
.resolve();
String zkConnectionString = _config.getString(GobblinClusterConfigurationKeys.ZK_CONNECTION_STRING_KEY);
HelixUtils.createGobblinHelixCluster(zkConnectionString,
_config.getString(GobblinClusterConfigurationKeys.HELIX_CLUSTER_NAME_KEY));
setupTestDir();
_clusterManagers = new GobblinClusterManager[NUM_MANAGERS];
_clusterWorkers = new GobblinTaskRunner[NUM_WORKERS];
_workerStartThreads = new Thread[NUM_WORKERS];
for (int i = 0; i < NUM_MANAGERS; i++) {
setupManager(i);
}
for (int i = 0; i < NUM_WORKERS; i++) {
setupWorker(i);
}
}
public void testKillWorker() throws Exception {
final File testJobFile = new File(_jobDirPath + "/GobblinClusterKillTestJob.conf");
// Job file should exist
Assert.assertTrue(testJobFile.exists());
AssertWithBackoff.create().logger(LOG).timeoutMs(ASSERT_TIMEOUT).maxSleepMs(ASSERT_MAX_SLEEP).backoffFactor(1.5)
.assertTrue(new Predicate<Void>() {
@Override
public boolean apply(Void input) {
File writerOutputDir = getWriterOutputDir();
if (writerOutputDir != null && writerOutputDir.exists()) {
return FileUtils.listFiles(writerOutputDir, new String[]{"txt"}, true).size() >= 25;
} else {
return false;
}
}
}, "Waiting for writer output");
File writerOutputDir = getWriterOutputDir();
LOG.info("{} matches found before disconnecting worker",
FileUtils.listFiles(writerOutputDir, new String[]{"txt"}, true).size());
_clusterWorkers[0].disconnectHelixManager();
AssertWithBackoff.create().logger(LOG).timeoutMs(ASSERT_TIMEOUT).maxSleepMs(ASSERT_MAX_SLEEP).backoffFactor(1.5)
.assertTrue(new Predicate<Void>() {
@Override
public boolean apply(Void input) {
File jobOutputDir = getJobOutputDir();
if (jobOutputDir != null && jobOutputDir.exists()) {
return FileUtils.listFiles(jobOutputDir, new String[]{"txt"}, true).size() >= 100;
} else {
return false;
}
}
}, "Waiting for job-completion");
// Job file should have been deleted
Thread.sleep(5000);
Assert.assertFalse(testJobFile.exists());
}
// The kill tests are unreliable on Travis
// Disabled GobblinClusterKillTest until reliability improves
// @Test(groups = { "disabledOnCI" }, dependsOnMethods = "testKillWorker")
public void testKillManager() throws IOException, TimeoutException, InterruptedException {
// kill a manager to cause leader election. New leader will schedule a new job.
_clusterManagers[0].disconnectHelixManager();
AssertWithBackoff.create().logger(LOG).timeoutMs(ASSERT_TIMEOUT).maxSleepMs(ASSERT_MAX_SLEEP).backoffFactor(1.5)
.assertTrue(new Predicate<Void>() {
@Override
public boolean apply(Void input) {
File writerOutputDir = getWriterOutputDir();
if (writerOutputDir != null && writerOutputDir.exists()) {
return FileUtils.listFiles(writerOutputDir, new String[]{"txt"}, true).size() >= 25;
} else {
return false;
}
}
}, "Waiting for writer output");
AssertWithBackoff.create().logger(LOG).timeoutMs(ASSERT_TIMEOUT).maxSleepMs(ASSERT_MAX_SLEEP).backoffFactor(1.5)
.assertTrue(new Predicate<Void>() {
@Override
public boolean apply(Void input) {
File jobOutputDir = getJobOutputDir();
if (jobOutputDir != null && jobOutputDir.exists()) {
return FileUtils.listFiles(jobOutputDir, new String[]{"txt"}, true).size() >= 100;
} else {
return false;
}
}
}, "Waiting for job-completion");
// Job file should have been deleted
Thread.sleep(5000);
final File testJobFile = new File(_jobDirPath + "/GobblinClusterKillTestJob.conf");
Assert.assertFalse(testJobFile.exists());
}
// The kill tests are unreliable on Travis
// Disabled GobblinClusterKillTest until reliability improves
// @Test(groups = { "disabledOnCI" }, enabled=true, dependsOnMethods = "testKillManager")
public void testRestartManager() throws IOException, TimeoutException, InterruptedException {
_clusterManagers[0].disconnectHelixManager();
// At this point there is one connected manager. Disconnect it and reconnect the other one to confirm that a manager
// can continue to function after regaining leadership.
_clusterManagers[1].disconnectHelixManager();
// Should function after regaining leadership
// need to reinitialize the heap manager and call handleLeadershipChange to shut down services in the test
// since the leadership change is simulated
_clusterManagers[0].initializeHelixManager();
_clusterManagers[0].multiManager.handleLeadershipChange(null);
// reconnect to get leadership role
_clusterManagers[0].connectHelixManager();
AssertWithBackoff.create().logger(LOG).timeoutMs(ASSERT_TIMEOUT).maxSleepMs(ASSERT_MAX_SLEEP).backoffFactor(1.5)
.assertTrue(new Predicate<Void>() {
@Override
public boolean apply(Void input) {
File writerOutputDir = getWriterOutputDir();
if (writerOutputDir != null && writerOutputDir.exists()) {
return FileUtils.listFiles(writerOutputDir, new String[]{"txt"}, true).size() >= 25;
} else {
return false;
}
}
}, "Waiting for writer output");
AssertWithBackoff.create().logger(LOG).timeoutMs(ASSERT_TIMEOUT).maxSleepMs(ASSERT_MAX_SLEEP).backoffFactor(1.5)
.assertTrue(new Predicate<Void>() {
@Override
public boolean apply(Void input) {
File jobOutputDir = getJobOutputDir();
if (jobOutputDir != null && jobOutputDir.exists()) {
return FileUtils.listFiles(jobOutputDir, new String[]{"txt"}, true).size() >= 100;
} else {
return false;
}
}
}, "Waiting for job-completion");
}
@AfterMethod
public void tearDown() throws IOException, InterruptedException {
for (int i = 0; i < NUM_MANAGERS; i++) {
_clusterManagers[i].multiManager.connect();
if (!_clusterManagers[i].isHelixManagerConnected()) {
_clusterManagers[i].connectHelixManager();
}
_clusterManagers[i].stop();
}
for (int i = 0; i < NUM_WORKERS; i++) {
_clusterWorkers[i].stop();
}
for (int i = 0; i < NUM_WORKERS; i++) {
_workerStartThreads[i].join();
}
_testingZKServer.close();
}
/**
* Get a file that matches the glob pattern in the base directory
* @param base directory to check
* @param glob the glob pattern to match
* @return a {@link File} if found, otherwise null
*/
private File getFileFromGlob(String base, String glob) {
try (DirectoryStream<java.nio.file.Path> dirStream = Files.newDirectoryStream(Paths.get(base), glob)) {
Iterator<java.nio.file.Path> iter = dirStream.iterator();
if (iter.hasNext()) {
java.nio.file.Path path = iter.next();
return path.toFile();
} else {
return null;
}
} catch (IOException e) {
return null;
}
}
/**
* Find the writer output directory
* @return a {@link File} if directory found, otherwise null
*/
private File getWriterOutputDir() {
File writerOutputJobDir = getFileFromGlob(
_testDirPath + "/writer-output/GobblinClusterKillTestJob", "job*");
File writerOutputDir = null;
if (writerOutputJobDir != null) {
writerOutputDir = new File(writerOutputJobDir,
CLASS_NAME_BASED_PATH);
}
return writerOutputDir;
}
/**
* Find the job output directory
* @return a {@link File} if directory found, otherwise null
*/
private File getJobOutputDir() {
return getFileFromGlob(
_testDirPath + "/job-output/" + CLASS_NAME_BASED_PATH,
"*_append");
}
}
| 2,191 |
0 | Create_ds/gobblin/gobblin-cluster/src/test/java/org/apache/gobblin/cluster | Create_ds/gobblin/gobblin-cluster/src/test/java/org/apache/gobblin/cluster/suite/IntegrationJobTagSuite.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.cluster.suite;
import java.util.Collection;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import org.testng.collections.Lists;
import org.testng.collections.Maps;
import com.google.common.base.Joiner;
import com.google.common.collect.ImmutableList;
import com.google.common.collect.ImmutableMap;
import com.typesafe.config.Config;
import com.typesafe.config.ConfigFactory;
import lombok.Getter;
import lombok.extern.slf4j.Slf4j;
import org.apache.gobblin.annotation.Alias;
import org.apache.gobblin.cluster.ClusterIntegrationTest;
import org.apache.gobblin.cluster.GobblinClusterConfigurationKeys;
import org.apache.gobblin.cluster.TaskRunnerSuiteBase;
import org.apache.gobblin.cluster.TaskRunnerSuiteForJobTagTest;
import org.apache.gobblin.configuration.ConfigurationKeys;
import org.apache.gobblin.testing.AssertWithBackoff;
/**
* A test suite used for {@link ClusterIntegrationTest#testJobWithTag()}
*
* Each worker instance will have the tags it can accept.
* Each job is associated with a specific tag.
* Each job will always go to certain workers as expected due to the tag association.
*/
@Slf4j
public class IntegrationJobTagSuite extends IntegrationBasicSuite {
private static final String WORKER_INSTANCE_1 = "WorkerInstance_1";
private static final String WORKER_INSTANCE_2 = "WorkerInstance_2";
private static final String WORKER_INSTANCE_3 = "WorkerInstance_3";
private static final Map<String, List<String>> WORKER_TAG_ASSOCIATION = ImmutableMap.of(
WORKER_INSTANCE_1, ImmutableList.of("T2", "T7", "T8"),
WORKER_INSTANCE_2, ImmutableList.of("T4", "T5", "T6"),
WORKER_INSTANCE_3, ImmutableList.of("T1", "T3"));
private static final Map<String, String> JOB_TAG_ASSOCIATION = ImmutableMap.<String, String>builder()
.put("jobHello_1", "T2")
.put("jobHello_2", "T4")
.put("jobHello_3", "T5")
.put("jobHello_4", "T6")
.put("jobHello_5", "T7")
.put("jobHello_6", "T8")
.put("jobHello_7", "T1")
.put("jobHello_8", "T3")
.build();
public static final Map<String, List<String>> EXPECTED_JOB_NAMES = ImmutableMap.of(
WORKER_INSTANCE_1, ImmutableList.of("jobHello_1", "jobHello_5", "jobHello_6"),
WORKER_INSTANCE_2, ImmutableList.of("jobHello_2", "jobHello_3", "jobHello_4"),
WORKER_INSTANCE_3, ImmutableList.of("jobHello_7", "jobHello_8"));
private Config addInstanceTags(Config workerConfig, String instanceName, List<String> tags) {
Map<String, String> configMap = new HashMap<>();
if (tags!= null && tags.size() > 0) {
configMap.put(GobblinClusterConfigurationKeys.HELIX_INSTANCE_TAGS_KEY, Joiner.on(',').join(tags));
configMap.put(IntegrationBasicSuite.TEST_INSTANCE_NAME_KEY, instanceName);
}
return ConfigFactory.parseMap(configMap).withFallback(workerConfig);
}
@Override
public Collection<Config> getWorkerConfigs() {
Config parent = super.getWorkerConfigs().iterator().next();
Config worker_1 = addInstanceTags(parent, WORKER_INSTANCE_1, WORKER_TAG_ASSOCIATION.get(WORKER_INSTANCE_1));
Config worker_2 = addInstanceTags(parent, WORKER_INSTANCE_2, WORKER_TAG_ASSOCIATION.get(WORKER_INSTANCE_2));
Config worker_3 = addInstanceTags(parent, WORKER_INSTANCE_3, WORKER_TAG_ASSOCIATION.get(WORKER_INSTANCE_3));
worker_1 = addTaskRunnerSuiteBuilder(worker_1);
worker_2 = addTaskRunnerSuiteBuilder(worker_2);
worker_3 = addTaskRunnerSuiteBuilder(worker_3);
return Lists.newArrayList(worker_1, worker_2, worker_3);
}
private Config addTaskRunnerSuiteBuilder(Config workerConfig) {
return ConfigFactory.parseMap(ImmutableMap.of(GobblinClusterConfigurationKeys.TASK_RUNNER_SUITE_BUILDER, "JobTagTaskRunnerSuiteBuilder")).withFallback(workerConfig);
}
/**
* Create different jobs with different tags
*/
@Override
protected Map<String, Config> overrideJobConfigs(Config rawJobConfig) {
Map<String, Config> jobConfigs = Maps.newHashMap();
for(Map.Entry<String, String> assoc: JOB_TAG_ASSOCIATION.entrySet()) {
Config newConfig = getConfigOverride(rawJobConfig, assoc.getKey(), assoc.getValue());
jobConfigs.put(assoc.getKey(), newConfig);
}
return jobConfigs;
}
private Config getConfigOverride(Config config, String jobName, String jobTag) {
Config newConfig = ConfigFactory.parseMap(ImmutableMap.of(
GobblinClusterConfigurationKeys.HELIX_JOB_TAG_KEY, jobTag,
ConfigurationKeys.JOB_NAME_KEY, jobName,
ConfigurationKeys.DATA_PUBLISHER_FINAL_DIR, this.jobOutputBasePath + "/" + jobName))
.withFallback(config);
return newConfig;
}
@Override
public void waitForAndVerifyOutputFiles() throws Exception {
AssertWithBackoff asserter = AssertWithBackoff.create().logger(log).timeoutMs(60_000)
.maxSleepMs(100).backoffFactor(1.5);
asserter.assertTrue(this::hasExpectedFilesBeenCreated, "Waiting for job-completion");
}
@Override
protected boolean hasExpectedFilesBeenCreated(Void input) {
int numOfFiles = getNumOfOutputFiles(this.jobOutputBasePath);
return numOfFiles == JOB_TAG_ASSOCIATION.size();
}
@Alias("JobTagTaskRunnerSuiteBuilder")
public static class JobTagTaskRunnerSuiteBuilder extends TaskRunnerSuiteBase.Builder {
@Getter
private String instanceName;
public JobTagTaskRunnerSuiteBuilder(Config config) {
super(config);
this.instanceName = config.getString(IntegrationJobTagSuite.TEST_INSTANCE_NAME_KEY);
}
@Override
public TaskRunnerSuiteBase build() {
return new TaskRunnerSuiteForJobTagTest(this);
}
}
}
| 2,192 |
0 | Create_ds/gobblin/gobblin-cluster/src/test/java/org/apache/gobblin/cluster | Create_ds/gobblin/gobblin-cluster/src/test/java/org/apache/gobblin/cluster/suite/IntegrationDedicatedManagerClusterSuite.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.cluster.suite;
import java.util.HashMap;
import java.util.Map;
import com.typesafe.config.Config;
import com.typesafe.config.ConfigFactory;
import org.apache.gobblin.cluster.ClusterIntegrationTest;
import org.apache.gobblin.cluster.GobblinClusterConfigurationKeys;
import org.apache.gobblin.cluster.HelixUtils;
/**
* A test suite used for {@link ClusterIntegrationTest#testDedicatedManagerCluster()}
*/
public class IntegrationDedicatedManagerClusterSuite extends IntegrationBasicSuite {
@Override
public void createHelixCluster() throws Exception {
super.createHelixCluster();
String zkConnectionString = managerConfig
.getString(GobblinClusterConfigurationKeys.ZK_CONNECTION_STRING_KEY);
String manager_cluster_name = managerConfig
.getString(GobblinClusterConfigurationKeys.MANAGER_CLUSTER_NAME_KEY);
HelixUtils.createGobblinHelixCluster(zkConnectionString, manager_cluster_name);
}
@Override
public Config getManagerConfig() {
Map<String, String> configMap = new HashMap<>();
configMap.put(GobblinClusterConfigurationKeys.DEDICATED_MANAGER_CLUSTER_ENABLED, "true");
configMap.put(GobblinClusterConfigurationKeys.MANAGER_CLUSTER_NAME_KEY, "ManagerCluster");
Config config = ConfigFactory.parseMap(configMap);
return config.withFallback(super.getManagerConfig()).resolve();
}
}
| 2,193 |
0 | Create_ds/gobblin/gobblin-cluster/src/test/java/org/apache/gobblin/cluster | Create_ds/gobblin/gobblin-cluster/src/test/java/org/apache/gobblin/cluster/suite/IntegrationSeparateProcessSuite.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.cluster.suite;
import java.util.Collection;
import java.util.HashMap;
import java.util.Map;
import com.google.common.collect.Lists;
import com.typesafe.config.Config;
import com.typesafe.config.ConfigFactory;
import org.apache.gobblin.cluster.ClusterIntegrationTest;
import org.apache.gobblin.cluster.GobblinClusterConfigurationKeys;
/**
* A test suite used for {@link ClusterIntegrationTest#testSeparateProcessMode()}
*/
public class IntegrationSeparateProcessSuite extends IntegrationBasicSuite {
@Override
protected Collection<Config> getWorkerConfigs() {
Map<String, String> configMap = new HashMap<>();
configMap.put(GobblinClusterConfigurationKeys.ENABLE_TASK_IN_SEPARATE_PROCESS, "true");
Config config = ConfigFactory.parseMap(configMap);
Config parent = super.getWorkerConfigs().iterator().next();
return Lists.newArrayList(config.withFallback(parent).resolve());
}
}
| 2,194 |
0 | Create_ds/gobblin/gobblin-cluster/src/test/java/org/apache/gobblin/cluster | Create_ds/gobblin/gobblin-cluster/src/test/java/org/apache/gobblin/cluster/suite/IntegrationJobRestartViaSpecSuite.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.cluster.suite;
import java.io.IOException;
import java.io.InputStream;
import java.io.InputStreamReader;
import java.io.Reader;
import java.net.URI;
import java.net.URISyntaxException;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import com.google.common.io.Files;
import com.google.common.io.Resources;
import com.typesafe.config.Config;
import com.typesafe.config.ConfigFactory;
import com.typesafe.config.ConfigParseOptions;
import com.typesafe.config.ConfigSyntax;
import com.typesafe.config.ConfigValueFactory;
import org.apache.gobblin.cluster.ClusterIntegrationTestUtils;
import org.apache.gobblin.cluster.FsJobConfigurationManager;
import org.apache.gobblin.cluster.GobblinClusterConfigurationKeys;
import org.apache.gobblin.cluster.SleepingTask;
import org.apache.gobblin.runtime.api.FsSpecConsumer;
import org.apache.gobblin.runtime.api.FsSpecProducer;
import org.apache.gobblin.runtime.api.JobSpec;
import org.apache.gobblin.runtime.api.SpecExecutor;
import org.apache.gobblin.runtime.api.SpecProducer;
public class IntegrationJobRestartViaSpecSuite extends IntegrationJobCancelSuite {
public static final String JOB_NAME = "HelloWorldTestJob";
public static final String FS_SPEC_CONSUMER_DIR = "/tmp/IntegrationJobCancelViaSpecSuite/jobSpecs";
private final SpecProducer _specProducer;
public IntegrationJobRestartViaSpecSuite(Config jobConfigOverrides) throws IOException {
super(jobConfigOverrides);
FileSystem fs = FileSystem.getLocal(new Configuration());
this._specProducer = new FsSpecProducer(fs, ConfigFactory.empty().withValue(FsSpecConsumer.SPEC_PATH_KEY, ConfigValueFactory.fromAnyRef(FS_SPEC_CONSUMER_DIR)));
}
private Config getJobConfig() throws IOException {
try (InputStream resourceStream = Resources.getResource(JOB_CONF_NAME).openStream()) {
Reader reader = new InputStreamReader(resourceStream);
Config rawJobConfig =
ConfigFactory.parseReader(reader, ConfigParseOptions.defaults().setSyntax(ConfigSyntax.CONF));
rawJobConfig = rawJobConfig.withFallback(getClusterConfig());
Config newConfig = ClusterIntegrationTestUtils.buildSleepingJob(JOB_ID, TASK_STATE_FILE, 100L);
newConfig = newConfig.withValue(SleepingTask.TASK_STATE_FILE_KEY, ConfigValueFactory.fromAnyRef(TASK_STATE_FILE));
newConfig = newConfig.withFallback(rawJobConfig);
return newConfig;
}
}
@Override
public Config getManagerConfig() {
Config managerConfig = super.getManagerConfig();
managerConfig = managerConfig.withValue(GobblinClusterConfigurationKeys.JOB_CONFIGURATION_MANAGER_KEY,
ConfigValueFactory.fromAnyRef(FsJobConfigurationManager.class.getName()))
.withValue(GobblinClusterConfigurationKeys.JOB_SPEC_REFRESH_INTERVAL, ConfigValueFactory.fromAnyRef(1L))
.withValue(FsSpecConsumer.SPEC_PATH_KEY, ConfigValueFactory.fromAnyRef(FS_SPEC_CONSUMER_DIR));
return managerConfig;
}
public void addJobSpec(String jobSpecName, String verb) throws IOException, URISyntaxException {
Config jobConfig = ConfigFactory.empty();
if (SpecExecutor.Verb.ADD.name().equals(verb)) {
jobConfig = getJobConfig();
} else if (SpecExecutor.Verb.DELETE.name().equals(verb)) {
jobConfig = jobConfig.withValue(GobblinClusterConfigurationKeys.CANCEL_RUNNING_JOB_ON_DELETE, ConfigValueFactory.fromAnyRef("true"));
} else if (SpecExecutor.Verb.UPDATE.name().equals(verb)) {
jobConfig = getJobConfig().withValue(GobblinClusterConfigurationKeys.CANCEL_RUNNING_JOB_ON_DELETE, ConfigValueFactory.fromAnyRef("true"));
}
JobSpec jobSpec = JobSpec.builder(Files.getNameWithoutExtension(jobSpecName))
.withConfig(jobConfig)
.withTemplate(new URI("FS:///"))
.withDescription("HelloWorldTestJob")
.withVersion("1")
.build();
SpecExecutor.Verb enumVerb = SpecExecutor.Verb.valueOf(verb);
switch (enumVerb) {
case ADD:
_specProducer.addSpec(jobSpec);
break;
case DELETE:
_specProducer.deleteSpec(jobSpec.getUri());
break;
case UPDATE:
_specProducer.updateSpec(jobSpec);
break;
default:
throw new IOException("Unknown Spec Verb: " + verb);
}
}
}
| 2,195 |
0 | Create_ds/gobblin/gobblin-cluster/src/test/java/org/apache/gobblin/cluster | Create_ds/gobblin/gobblin-cluster/src/test/java/org/apache/gobblin/cluster/suite/IntegrationJobCancelSuite.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.cluster.suite;
import org.junit.Assert;
import com.typesafe.config.Config;
public class IntegrationJobCancelSuite extends IntegrationBasicSuite {
public static final String JOB_ID = "job_HelloWorldTestJob_1234";
public static final String TASK_STATE_FILE = "/tmp/IntegrationJobCancelSuite/taskState/_RUNNING";
private int sleepingTime = 10;
public IntegrationJobCancelSuite() {
// for backward compatible.
}
public IntegrationJobCancelSuite(int sleepingTime) {
this.sleepingTime = sleepingTime;
}
public IntegrationJobCancelSuite(Config jobConfigOverrides) {
super(jobConfigOverrides);
}
@Override
public void waitForAndVerifyOutputFiles() throws Exception {
// If the job is cancelled, it should not have been able to write 'Hello World!'
Assert.assertFalse(verifyFileForMessage(this.jobLogOutputFile, "Hello World!"));
Assert.assertFalse(verifyFileForMessage(this.jobLogOutputFile, "java.lang.NullPointerException"));
}
}
| 2,196 |
0 | Create_ds/gobblin/gobblin-cluster/src/test/java/org/apache/gobblin/cluster | Create_ds/gobblin/gobblin-cluster/src/test/java/org/apache/gobblin/cluster/suite/IntegrationDedicatedTaskDriverClusterSuite.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.cluster.suite;
import java.net.URL;
import java.util.Collection;
import java.util.HashMap;
import java.util.Map;
import com.google.common.collect.ImmutableList;
import com.google.common.collect.ImmutableMap;
import com.google.common.io.Resources;
import com.typesafe.config.Config;
import com.typesafe.config.ConfigFactory;
import org.apache.gobblin.cluster.ClusterIntegrationTest;
import org.apache.gobblin.cluster.GobblinClusterConfigurationKeys;
import org.apache.gobblin.cluster.HelixUtils;
/**
* <p> A test suite used for {@link ClusterIntegrationTest#testDedicatedTaskDriverCluster()} ()}
*
* <p> We will have two separate clusters, one for planning job, one for actual job.
*
* <p> Each planning job is submitted by manager instance and reaches to task driver
* instance via 'task driver cluster (or planning job cluster)'.
*
* <p> Each actual job is submitted by task driver instance and reaches to the worker
* instance via 'job cluster'.
*/
public class IntegrationDedicatedTaskDriverClusterSuite extends IntegrationBasicSuite {
@Override
public void createHelixCluster() throws Exception {
super.createHelixCluster();
String zkConnectionString = managerConfig
.getString(GobblinClusterConfigurationKeys.ZK_CONNECTION_STRING_KEY);
String managerClusterName = managerConfig
.getString(GobblinClusterConfigurationKeys.MANAGER_CLUSTER_NAME_KEY);
HelixUtils.createGobblinHelixCluster(zkConnectionString, managerClusterName);
String taskDriverClusterName = taskDriverConfigs.iterator().next()
.getString(GobblinClusterConfigurationKeys.TASK_DRIVER_CLUSTER_NAME_KEY);
HelixUtils.createGobblinHelixCluster(zkConnectionString, taskDriverClusterName);
}
@Override
public Config getManagerConfig() {
Map<String, String> configMap = new HashMap<>();
configMap.put(GobblinClusterConfigurationKeys.DEDICATED_MANAGER_CLUSTER_ENABLED, "true");
configMap.put(GobblinClusterConfigurationKeys.MANAGER_CLUSTER_NAME_KEY, "ManagerCluster");
Config config = ConfigFactory.parseMap(configMap);
return config.withFallback(super.getManagerConfig()).resolve();
}
@Override
protected Map<String, Config> overrideJobConfigs(Config rawJobConfig) {
Config newConfig = ConfigFactory.parseMap(ImmutableMap.of(
GobblinClusterConfigurationKeys.DISTRIBUTED_JOB_LAUNCHER_ENABLED, true))
.withFallback(rawJobConfig);
return ImmutableMap.of(JOB_NAME, newConfig);
}
@Override
protected Collection<Config> getTaskDriverConfigs() {
// task driver config initialization
URL url = Resources.getResource("BasicTaskDriver.conf");
Config taskDriverConfig = ConfigFactory.parseURL(url);
taskDriverConfig = taskDriverConfig.withFallback(getClusterConfig());
Config taskDriver1 = addInstanceName(taskDriverConfig, "TaskDriver1");
return ImmutableList.of(taskDriver1);
}
@Override
protected Config getClusterConfig() {
Map<String, String> configMap = new HashMap<>();
configMap.put(GobblinClusterConfigurationKeys.DEDICATED_TASK_DRIVER_CLUSTER_ENABLED, "true");
configMap.put(GobblinClusterConfigurationKeys.TASK_DRIVER_CLUSTER_NAME_KEY, "TaskDriverCluster");
Config config = ConfigFactory.parseMap(configMap);
return config.withFallback(super.getClusterConfig()).resolve();
}
@Override
protected Collection<Config> getWorkerConfigs() {
Config baseConfig = super.getWorkerConfigs().iterator().next();
Config workerConfig1 = addInstanceName(baseConfig, "Worker1");
return ImmutableList.of(workerConfig1);
}
}
| 2,197 |
0 | Create_ds/gobblin/gobblin-cluster/src/test/java/org/apache/gobblin/cluster | Create_ds/gobblin/gobblin-cluster/src/test/java/org/apache/gobblin/cluster/suite/IntegrationJobFactorySuite.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.cluster.suite;
import java.util.Collection;
import java.util.Map;
import java.util.concurrent.atomic.AtomicBoolean;
import org.testng.collections.Lists;
import com.google.common.collect.ImmutableMap;
import com.typesafe.config.Config;
import com.typesafe.config.ConfigFactory;
import lombok.extern.slf4j.Slf4j;
import org.apache.gobblin.annotation.Alias;
import org.apache.gobblin.cluster.GobblinClusterConfigurationKeys;
import org.apache.gobblin.cluster.TaskRunnerSuiteBase;
import org.apache.gobblin.cluster.TaskRunnerSuiteForJobFactoryTest;
@Slf4j
public class IntegrationJobFactorySuite extends IntegrationBasicSuite {
public static AtomicBoolean completed = new AtomicBoolean(false);
@Override
protected Map<String, Config> overrideJobConfigs(Config rawJobConfig) {
Config newConfig = ConfigFactory.parseMap(ImmutableMap.of(
GobblinClusterConfigurationKeys.DISTRIBUTED_JOB_LAUNCHER_ENABLED, true,
GobblinClusterConfigurationKeys.DISTRIBUTED_JOB_LAUNCHER_BUILDER, "TestDistributedExecutionLauncherBuilder"))
.withFallback(rawJobConfig);
return ImmutableMap.of(JOB_NAME, newConfig);
}
@Override
public Collection<Config> getWorkerConfigs() {
Config rawConfig = super.getWorkerConfigs().iterator().next();
Config workerConfig = ConfigFactory.parseMap(ImmutableMap.of(GobblinClusterConfigurationKeys.TASK_RUNNER_SUITE_BUILDER, "TestJobFactorySuiteBuilder"))
.withFallback(rawConfig);
return Lists.newArrayList(workerConfig);
}
public void waitForAndVerifyOutputFiles() throws Exception {
while (true) {
Thread.sleep(1000);
if (completed.get()) {
break;
} else {
log.info("Waiting for job to be finished");
}
}
}
@Alias("TestJobFactorySuiteBuilder")
public static class TestJobFactorySuiteBuilder extends TaskRunnerSuiteBase.Builder {
public TestJobFactorySuiteBuilder(Config config) {
super(config);
}
@Override
public TaskRunnerSuiteBase build() {
return new TaskRunnerSuiteForJobFactoryTest(this);
}
}
}
| 2,198 |
0 | Create_ds/gobblin/gobblin-cluster/src/test/java/org/apache/gobblin/cluster | Create_ds/gobblin/gobblin-cluster/src/test/java/org/apache/gobblin/cluster/suite/IntegrationBasicSuite.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.cluster.suite;
import java.io.DataOutputStream;
import java.io.File;
import java.io.FileOutputStream;
import java.io.IOException;
import java.io.InputStream;
import java.io.InputStreamReader;
import java.io.OutputStreamWriter;
import java.io.Reader;
import java.io.Writer;
import java.net.URL;
import java.nio.file.Files;
import java.nio.file.Path;
import java.nio.file.Paths;
import java.util.ArrayList;
import java.util.Collection;
import java.util.HashMap;
import java.util.Map;
import org.apache.commons.io.FileUtils;
import org.apache.curator.test.TestingServer;
import org.testng.Assert;
import com.google.common.base.Charsets;
import com.google.common.base.Optional;
import com.google.common.collect.ImmutableMap;
import com.google.common.collect.Lists;
import com.google.common.io.Resources;
import com.typesafe.config.Config;
import com.typesafe.config.ConfigFactory;
import com.typesafe.config.ConfigParseOptions;
import com.typesafe.config.ConfigRenderOptions;
import com.typesafe.config.ConfigSyntax;
import lombok.extern.slf4j.Slf4j;
import org.apache.gobblin.cluster.ClusterIntegrationTest;
import org.apache.gobblin.cluster.GobblinClusterConfigurationKeys;
import org.apache.gobblin.cluster.GobblinClusterManager;
import org.apache.gobblin.cluster.GobblinTaskRunner;
import org.apache.gobblin.cluster.HelixUtils;
import org.apache.gobblin.cluster.TestHelper;
import org.apache.gobblin.metrics.GobblinMetrics;
import org.apache.gobblin.metrics.GobblinMetricsRegistry;
import org.apache.gobblin.testing.AssertWithBackoff;
/**
* A test suite used for {@link ClusterIntegrationTest#testJobShouldComplete()}
*
* This basic suite class provides utilities to launch one manager and multiple workers (participants).
* User can override {@link IntegrationBasicSuite#getWorkerConfigs()} for worker customization.
* User can also override {@link IntegrationBasicSuite#waitForAndVerifyOutputFiles()} to check different successful condition.
*/
@Slf4j
public class IntegrationBasicSuite {
public static final String JOB_NAME = "HelloWorldTestJob";
public static final String JOB_CONF_NAME = "HelloWorldJob.conf";
public static final String WORKER_INSTANCE_0 = "WorkerInstance_0";
public static final String TEST_INSTANCE_NAME_KEY = "worker.instance.name";
protected final Config jobConfigOverrides;
// manager and workers
protected Config managerConfig;
protected Collection<Config> taskDriverConfigs = Lists.newArrayList();
protected Collection<Config> workerConfigs = Lists.newArrayList();
protected Collection<GobblinTaskRunner> workers = Lists.newArrayList();
protected Collection<GobblinTaskRunner> taskDrivers = Lists.newArrayList();
protected GobblinClusterManager manager;
// This filename should match the log file specified in log4j.xml
public static Path jobLogOutputFile = Paths.get("gobblin-integration-test-log-dir/gobblin-cluster-test.log");;
protected Path workPath;
protected Path jobConfigPath;
protected Path jobOutputBasePath;
protected URL jobConfResourceUrl;
protected TestingServer testingZKServer;
public IntegrationBasicSuite() {
this(ConfigFactory.empty());
}
public IntegrationBasicSuite(Config jobConfigOverrides) {
this.jobConfigOverrides = jobConfigOverrides;
try {
initWorkDir();
initJobOutputDir();
initZooKeeper();
initConfig();
initJobConfDir();
} catch (Exception e) {
throw new RuntimeException(e);
}
}
private void initConfig() {
this.managerConfig = this.getManagerConfig();
this.taskDriverConfigs = this.getTaskDriverConfigs();
this.workerConfigs = this.getWorkerConfigs();
}
private void initZooKeeper() throws Exception {
this.testingZKServer = new TestingServer(false);
log.info(
"Created testing ZK Server. Connection string : " + testingZKServer.getConnectString());
}
private void initJobConfDir() throws IOException {
String jobConfigDir = this.managerConfig.getString(GobblinClusterConfigurationKeys.JOB_CONF_PATH_KEY);
this.jobConfigPath = Paths.get(jobConfigDir);
Files.createDirectories(this.jobConfigPath);
this.jobConfResourceUrl = Resources.getResource(JOB_CONF_NAME);
copyJobConfFromResource();
}
private void initJobOutputDir() throws IOException {
this.jobOutputBasePath = Paths.get(this.workPath + "/job-output");
Files.createDirectory(this.jobOutputBasePath);
}
private void initWorkDir() throws IOException {
this.workPath = Paths.get(ConfigFactory.parseURL(Resources.getResource("BasicCluster.conf"))
.getString(GobblinClusterConfigurationKeys.CLUSTER_WORK_DIR));
log.info("Created a new work directory: " + this.workPath.toAbsolutePath());
// Delete the working directory in case the previous test fails to delete the directory
// e.g. when the test was killed forcefully under a debugger.
deleteWorkDir();
Files.createDirectory(this.workPath);
}
public void deleteWorkDir() throws IOException {
if ((this.workPath != null) && Files.exists(this.workPath)) {
FileUtils.deleteDirectory(this.workPath.toFile());
}
}
private void copyJobConfFromResource() throws IOException {
Map<String, Config> jobConfigs;
try (InputStream resourceStream = this.jobConfResourceUrl.openStream()) {
Reader reader = new InputStreamReader(resourceStream);
Config rawJobConfig = ConfigFactory.parseReader(reader, ConfigParseOptions.defaults().setSyntax(ConfigSyntax.CONF));
jobConfigs = overrideJobConfigs(rawJobConfig);
jobConfigs.forEach((jobName, jobConfig)-> {
try {
writeJobConf(jobName, jobConfig);
} catch (IOException e) {
log.error("Job " + jobName + " config cannot be written.");
}
});
}
}
protected Map<String, Config> overrideJobConfigs(Config rawJobConfig) {
return ImmutableMap.of(JOB_NAME, this.jobConfigOverrides.withFallback(rawJobConfig));
}
private void writeJobConf(String jobName, Config jobConfig) throws IOException {
String targetPath = this.jobConfigPath + "/" + jobName + ".conf";
String renderedConfig = jobConfig.root().render(ConfigRenderOptions.defaults());
try (DataOutputStream os = new DataOutputStream(new FileOutputStream(targetPath));
Writer writer = new OutputStreamWriter(os, Charsets.UTF_8)) {
writer.write(renderedConfig);
}
}
protected Config getClusterConfig() {
URL url = Resources.getResource("BasicCluster.conf");
Config config = ConfigFactory.parseURL(url);
Map<String, String> configMap = new HashMap<>();
String zkConnectionString = this.testingZKServer.getConnectString();
configMap.put(GobblinClusterConfigurationKeys.ZK_CONNECTION_STRING_KEY, zkConnectionString);
configMap.put(GobblinTaskRunner.CLUSTER_APP_WORK_DIR, this.workPath.toString());
Config overrideConfig = ConfigFactory.parseMap(configMap);
return overrideConfig.withFallback(config);
}
public Config getManagerConfig() {
// manager config initialization
URL url = Resources.getResource("BasicManager.conf");
Config managerConfig = ConfigFactory.parseURL(url);
managerConfig = managerConfig.withFallback(getClusterConfig());
return managerConfig.resolve();
}
protected Collection<Config> getTaskDriverConfigs() {
return new ArrayList<>();
}
protected Collection<Config> getWorkerConfigs() {
// worker config initialization
URL url = Resources.getResource("BasicWorker.conf");
Config workerConfig = ConfigFactory.parseURL(url);
workerConfig = workerConfig.withFallback(getClusterConfig());
return Lists.newArrayList(workerConfig.resolve());
}
protected Config addInstanceName(Config baseConfig, String instanceName) {
Map<String, String> configMap = new HashMap<>();
configMap.put(IntegrationBasicSuite.TEST_INSTANCE_NAME_KEY, instanceName);
Config instanceConfig = ConfigFactory.parseMap(configMap);
return instanceConfig.withFallback(baseConfig).resolve();
}
public void waitForAndVerifyOutputFiles() throws Exception {
AssertWithBackoff asserter = AssertWithBackoff.create().logger(log).timeoutMs(120_000)
.maxSleepMs(100).backoffFactor(1.5);
asserter.assertTrue(this::hasExpectedFilesBeenCreated, "Waiting for job-completion");
}
/**
* verify if the file containts the provided message
* @param logFile file to be looked inside
* @param message string to look for
* @return true if the file contains the message
* @throws IOException
*/
static boolean verifyFileForMessage(Path logFile, String message) throws IOException {
String content = new String(Files.readAllBytes(logFile));
return content.contains(message);
}
protected boolean hasExpectedFilesBeenCreated(Void input) {
int numOfFiles = getNumOfOutputFiles(this.jobOutputBasePath);
return numOfFiles == 1;
}
protected int getNumOfOutputFiles(Path jobOutputDir) {
Collection<File> outputFiles = FileUtils
.listFiles(jobOutputDir.toFile(), new String[]{"txt"}, true);
return outputFiles.size();
}
public void startCluster() throws Exception {
this.testingZKServer.start();
createHelixCluster();
startWorker();
startTaskDriver();
startManager();
}
private void startManager() throws Exception {
this.manager = new GobblinClusterManager(TestHelper.TEST_APPLICATION_NAME,
TestHelper.TEST_APPLICATION_ID,
this.managerConfig, Optional.absent());
this.manager.start();
}
private void startTaskDriver() throws Exception {
for (Config taskDriverConfig: this.taskDriverConfigs) {
GobblinTaskRunner runner = new GobblinTaskRunner(TestHelper.TEST_APPLICATION_NAME,
taskDriverConfig.getString(TEST_INSTANCE_NAME_KEY),
TestHelper.TEST_APPLICATION_ID, "1",
taskDriverConfig, Optional.absent());
this.taskDrivers.add(runner);
// Need to run in another thread since the start call will not return until the stop method
// is called.
Thread workerThread = new Thread(runner::start);
workerThread.start();
}
}
private void startWorker() throws Exception {
if (workerConfigs.size() == 1) {
this.workers.add(new GobblinTaskRunner(TestHelper.TEST_APPLICATION_NAME, WORKER_INSTANCE_0,
TestHelper.TEST_APPLICATION_ID, "1",
this.workerConfigs.iterator().next(), Optional.absent()));
// Need to run in another thread since the start call will not return until the stop method
// is called.
Thread workerThread = new Thread(this.workers.iterator().next()::start);
workerThread.start();
} else {
// Each workerConfig corresponds to a worker instance
for (Config workerConfig: this.workerConfigs) {
GobblinTaskRunner runner = new GobblinTaskRunner(TestHelper.TEST_APPLICATION_NAME,
workerConfig.getString(TEST_INSTANCE_NAME_KEY),
TestHelper.TEST_APPLICATION_ID, "1",
workerConfig, Optional.absent());
this.workers.add(runner);
// Need to run in another thread since the start call will not return until the stop method
// is called.
Thread workerThread = new Thread(runner::start);
workerThread.start();
}
}
}
public void verifyMetricsCleaned() {
Collection<GobblinMetrics> all = GobblinMetricsRegistry.getInstance().getMetricsByPattern(".*" + JOB_NAME + ".*");
Assert.assertEquals(all.size(), 0);
}
public void shutdownCluster() throws InterruptedException, IOException {
this.workers.forEach(runner->runner.stop());
this.taskDrivers.forEach(runner->runner.stop());
this.manager.stop();
this.testingZKServer.close();
}
protected void createHelixCluster() throws Exception {
String zkConnectionString = this.managerConfig
.getString(GobblinClusterConfigurationKeys.ZK_CONNECTION_STRING_KEY);
String helix_cluster_name = this.managerConfig
.getString(GobblinClusterConfigurationKeys.HELIX_CLUSTER_NAME_KEY);
HelixUtils.createGobblinHelixCluster(zkConnectionString, helix_cluster_name);
}
}
| 2,199 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.