code
stringlengths 23
201k
| docstring
stringlengths 17
96.2k
| func_name
stringlengths 0
235
| language
stringclasses 1
value | repo
stringlengths 8
72
| path
stringlengths 11
317
| url
stringlengths 57
377
| license
stringclasses 7
values |
|---|---|---|---|---|---|---|---|
@Ignore
@Test
public void testMultipleWatchers() throws InterruptedException {
final BlockingStatus status = new BlockingStatus(1, "test", Status.QUEUED);
final WatchingThread thread1 = new WatchingThread(status);
thread1.start();
Thread.sleep(2000);
final WatchingThread thread2 = new WatchingThread(status);
thread2.start();
Thread.sleep(2000);
status.changeStatus(Status.FAILED);
thread2.join(1000);
thread1.join(1000);
System.out.println("Diff thread 1 " + thread1.getDiff());
System.out.println("Diff thread 2 " + thread2.getDiff());
Assert.assertTrue(thread1.getDiff() >= 4000 && thread1.getDiff() < 4200);
Assert.assertTrue(thread2.getDiff() >= 2000 && thread2.getDiff() < 2200);
}
|
TODO: Ignore this test at present since travis in Github can not always pass this test. We will
modify the below code to make travis pass in future.
|
testMultipleWatchers
|
java
|
azkaban/azkaban
|
azkaban-exec-server/src/test/java/azkaban/execapp/event/BlockingStatusTest.java
|
https://github.com/azkaban/azkaban/blob/master/azkaban-exec-server/src/test/java/azkaban/execapp/event/BlockingStatusTest.java
|
Apache-2.0
|
@Override
public void run() {
final long startTime = System.currentTimeMillis();
this.status.blockOnFinishedStatus();
this.diff = System.currentTimeMillis() - startTime;
}
|
TODO: Ignore this test at present since travis in Github can not always pass this test. We will
modify the below code to make travis pass in future.
|
run
|
java
|
azkaban/azkaban
|
azkaban-exec-server/src/test/java/azkaban/execapp/event/BlockingStatusTest.java
|
https://github.com/azkaban/azkaban/blob/master/azkaban-exec-server/src/test/java/azkaban/execapp/event/BlockingStatusTest.java
|
Apache-2.0
|
public long getDiff() {
return this.diff;
}
|
TODO: Ignore this test at present since travis in Github can not always pass this test. We will
modify the below code to make travis pass in future.
|
getDiff
|
java
|
azkaban/azkaban
|
azkaban-exec-server/src/test/java/azkaban/execapp/event/BlockingStatusTest.java
|
https://github.com/azkaban/azkaban/blob/master/azkaban-exec-server/src/test/java/azkaban/execapp/event/BlockingStatusTest.java
|
Apache-2.0
|
private void disableFSCache() {
this.conf.setBoolean(FS_HDFS_IMPL_DISABLE_CACHE, true);
this.conf.setBoolean(FS_FAILOVER_IMPL_DISABLE_CACHE, true);
this.conf.setBoolean(FS_LOCAL_IMPL_DISABLE_CACHE, true);
// Get the default scheme
final String defaultFS = conf.get(CommonConfigurationKeysPublic.FS_DEFAULT_NAME_KEY);
if (defaultFS == null) {
return;
}
final String scheme = new Path(defaultFS).toUri().getScheme();
if (scheme == null) {
return;
}
// Construct the property name
final String FS_DEFAULT_IMPL_DISABLE_CACHE =
"fs." + scheme + IMPL_DISABLE_CACHE_SUFFIX;
this.conf.setBoolean(FS_DEFAULT_IMPL_DISABLE_CACHE, true);
logger.info("Disable cache for scheme " + FS_DEFAULT_IMPL_DISABLE_CACHE);
}
|
This class is used as abstract class for all the HadoopSecurityManager versions.
|
disableFSCache
|
java
|
azkaban/azkaban
|
azkaban-hadoop-security-plugin/src/main/java/azkaban/security/AbstractHadoopSecurityManager.java
|
https://github.com/azkaban/azkaban/blob/master/azkaban-hadoop-security-plugin/src/main/java/azkaban/security/AbstractHadoopSecurityManager.java
|
Apache-2.0
|
@Override
public UserGroupInformation getProxiedUser(final Props userProp)
throws HadoopSecurityManagerException {
final String userToProxy = verifySecureProperty(userProp, JobProperties.USER_TO_PROXY);
final UserGroupInformation user = getProxiedUser(userToProxy);
if (user == null) {
throw new HadoopSecurityManagerException(
"Unable to proxy as " + userToProxy);
}
return user;
}
|
Create a proxied user, taking all parameters, including which user to proxy from provided
Properties.
|
getProxiedUser
|
java
|
azkaban/azkaban
|
azkaban-hadoop-security-plugin/src/main/java/azkaban/security/AbstractHadoopSecurityManager.java
|
https://github.com/azkaban/azkaban/blob/master/azkaban-hadoop-security-plugin/src/main/java/azkaban/security/AbstractHadoopSecurityManager.java
|
Apache-2.0
|
private String verifySecureProperty(final Props props, final String s)
throws HadoopSecurityManagerException {
final String value = props.getString(s);
if (value == null) {
throw new HadoopSecurityManagerException(s + " not set in properties.");
}
return value;
}
|
This method is used to get property from props object. It will throw an exception when property
doesn't exist.
@param props
@param s
@return
@throws HadoopSecurityManagerException
|
verifySecureProperty
|
java
|
azkaban/azkaban
|
azkaban-hadoop-security-plugin/src/main/java/azkaban/security/AbstractHadoopSecurityManager.java
|
https://github.com/azkaban/azkaban/blob/master/azkaban-hadoop-security-plugin/src/main/java/azkaban/security/AbstractHadoopSecurityManager.java
|
Apache-2.0
|
@Override
public synchronized UserGroupInformation getProxiedUser(final String userToProxy)
throws HadoopSecurityManagerException {
return getProxiedUser(userToProxy, userToProxy);
}
|
Create a proxied user based on the explicit user name, taking other parameters necessary from
properties file.
|
getProxiedUser
|
java
|
azkaban/azkaban
|
azkaban-hadoop-security-plugin/src/main/java/azkaban/security/AbstractHadoopSecurityManager.java
|
https://github.com/azkaban/azkaban/blob/master/azkaban-hadoop-security-plugin/src/main/java/azkaban/security/AbstractHadoopSecurityManager.java
|
Apache-2.0
|
@Override
public synchronized UserGroupInformation getProxiedUser(final String realIdentity, final String userToProxy)
throws HadoopSecurityManagerException {
if (userToProxy == null) {
throw new HadoopSecurityManagerException("userToProxy can't be null");
}
UserGroupInformation ugi = this.userUgiMap.get(userToProxy);
if (ugi == null) {
logger.info("Proxy user " + userToProxy
+ " does not exist. Creating new proxy user");
if (this.shouldProxy) {
try {
ugi =
UserGroupInformation.createProxyUser(userToProxy,
UserGroupInformation.getLoginUser());
} catch (final IOException e) {
throw new HadoopSecurityManagerException(
"Failed to create proxy user", e);
}
} else {
ugi = UserGroupInformation.createRemoteUser(userToProxy);
}
this.userUgiMap.putIfAbsent(userToProxy, ugi);
}
return ugi;
}
|
Create a proxied user based on the explicit user name, taking other parameters necessary from
properties file. It is also taking readIdentity for audit purpose.
|
getProxiedUser
|
java
|
azkaban/azkaban
|
azkaban-hadoop-security-plugin/src/main/java/azkaban/security/AbstractHadoopSecurityManager.java
|
https://github.com/azkaban/azkaban/blob/master/azkaban-hadoop-security-plugin/src/main/java/azkaban/security/AbstractHadoopSecurityManager.java
|
Apache-2.0
|
@Override
public FileSystem getFSAsUser(final String user)
throws HadoopSecurityManagerException {
return getFSAsUser(user, user);
}
|
Get file system as User passed in parameter.
@param user
@return
@throws HadoopSecurityManagerException
|
getFSAsUser
|
java
|
azkaban/azkaban
|
azkaban-hadoop-security-plugin/src/main/java/azkaban/security/AbstractHadoopSecurityManager.java
|
https://github.com/azkaban/azkaban/blob/master/azkaban-hadoop-security-plugin/src/main/java/azkaban/security/AbstractHadoopSecurityManager.java
|
Apache-2.0
|
@Override
public FileSystem getFSAsUser(final String realIdentity, final String proxyUser)
throws HadoopSecurityManagerException {
final FileSystem fs;
try {
logger.info("Getting file system as " + proxyUser + " on behalf of " + realIdentity);
final UserGroupInformation ugi = getProxiedUser(realIdentity, proxyUser);
if (ugi != null) {
fs = ugi.doAs(new PrivilegedAction<FileSystem>() {
@Override
public FileSystem run() {
try {
return FileSystem.get(AbstractHadoopSecurityManager.this.conf);
} catch (final IOException e) {
throw new RuntimeException(e);
}
}
});
} else {
fs = FileSystem.get(this.conf);
}
} catch (final Exception e) {
logger.error("Failed to get FileSystem.", e);
throw new HadoopSecurityManagerException("Failed to get FileSystem. ", e);
}
return fs;
}
|
Get file system as User passed in parameter. It is also passing realIdentity for audit purpose.
@param realIdentity
@param proxyUser
@return
@throws HadoopSecurityManagerException
|
getFSAsUser
|
java
|
azkaban/azkaban
|
azkaban-hadoop-security-plugin/src/main/java/azkaban/security/AbstractHadoopSecurityManager.java
|
https://github.com/azkaban/azkaban/blob/master/azkaban-hadoop-security-plugin/src/main/java/azkaban/security/AbstractHadoopSecurityManager.java
|
Apache-2.0
|
@Override
public FileSystem run() {
try {
return FileSystem.get(AbstractHadoopSecurityManager.this.conf);
} catch (final IOException e) {
throw new RuntimeException(e);
}
}
|
Get file system as User passed in parameter. It is also passing realIdentity for audit purpose.
@param realIdentity
@param proxyUser
@return
@throws HadoopSecurityManagerException
|
run
|
java
|
azkaban/azkaban
|
azkaban-hadoop-security-plugin/src/main/java/azkaban/security/AbstractHadoopSecurityManager.java
|
https://github.com/azkaban/azkaban/blob/master/azkaban-hadoop-security-plugin/src/main/java/azkaban/security/AbstractHadoopSecurityManager.java
|
Apache-2.0
|
public boolean shouldProxy() {
return this.shouldProxy;
}
|
This method will verify whether proxy is allowed or not.
@return
|
shouldProxy
|
java
|
azkaban/azkaban
|
azkaban-hadoop-security-plugin/src/main/java/azkaban/security/AbstractHadoopSecurityManager.java
|
https://github.com/azkaban/azkaban/blob/master/azkaban-hadoop-security-plugin/src/main/java/azkaban/security/AbstractHadoopSecurityManager.java
|
Apache-2.0
|
protected CredentialProvider getCustomCredentialProvider(final Props props,
final Credentials hadoopCred,
final Logger jobLogger, final String customCredentialProviderName) {
String credentialClassName = "unknown class";
try {
credentialClassName = props.getString(customCredentialProviderName);
logger.info("custom credential class name: " + credentialClassName);
final Class credentialClass = Class.forName(credentialClassName);
// The credential class must have a constructor accepting 3 parameters, Credentials,
// Props, and Logger in order.
final Constructor constructor = credentialClass
.getConstructor(Credentials.class, Props.class, org.apache.log4j.Logger.class);
final CredentialProvider customCredential = (CredentialProvider) constructor
.newInstance(hadoopCred, props, jobLogger);
return customCredential;
} catch (final Exception e) {
logger.error("Encountered error while loading and instantiating "
+ credentialClassName, e);
throw new IllegalStateException("Encountered error while loading and instantiating "
+ credentialClassName, e);
}
}
|
This method is used to get custom credential provider.
@param props
@param hadoopCred
@param jobLogger
@param customCredentialProviderName
@return
|
getCustomCredentialProvider
|
java
|
azkaban/azkaban
|
azkaban-hadoop-security-plugin/src/main/java/azkaban/security/AbstractHadoopSecurityManager.java
|
https://github.com/azkaban/azkaban/blob/master/azkaban-hadoop-security-plugin/src/main/java/azkaban/security/AbstractHadoopSecurityManager.java
|
Apache-2.0
|
protected void registerCustomCredential(final Props props, final Credentials hadoopCred,
final String userToProxy, final org.apache.log4j.Logger jobLogger,
final String customCredentialProviderName) {
final CredentialProvider customCredential = getCustomCredentialProvider(
props, hadoopCred, jobLogger, customCredentialProviderName);
final KeyStore keyStore = KeyStoreManager.getInstance().getKeyStore();
if (keyStore != null) {
// KeyStore is prepopulated to be used by Credential Provider.
// This KeyStore is expected especially in case of containerized execution when it is preferred
// to keep it in-memory of Azkaban user rather than on the file-system of container. This ensures
// that the user can't access it.
try {
((CredentialProviderWithKeyStore) customCredential).setKeyStore(keyStore);
} catch (ClassCastException e) {
logger.error("Encountered error while casting to CredentialProviderWithKeyStore", e);
throw new IllegalStateException(
"Encountered error while casting to CredentialProviderWithKeyStore", e);
} catch (final Exception e) {
logger.error("Unknown error occurred while setting keyStore", e);
throw new IllegalStateException("Unknown error occurred while setting keyStore", e);
}
}
customCredential.register(userToProxy);
}
|
This method is used to register custom credentials which will be used when doPrefetch method is
called.
@param props
@param hadoopCred
@param userToProxy
@param jobLogger
@param customCredentialProviderName
|
registerCustomCredential
|
java
|
azkaban/azkaban
|
azkaban-hadoop-security-plugin/src/main/java/azkaban/security/AbstractHadoopSecurityManager.java
|
https://github.com/azkaban/azkaban/blob/master/azkaban-hadoop-security-plugin/src/main/java/azkaban/security/AbstractHadoopSecurityManager.java
|
Apache-2.0
|
@Override
public KeyStore getKeyStore(final Props props) {
logger.info("Prefetching KeyStore for the flow");
final Credentials cred = new Credentials();
final CredentialProviderWithKeyStore customCredential = (CredentialProviderWithKeyStore)
getCustomCredentialProvider(props, cred, logger,
Constants.ConfigurationKeys.CUSTOM_CREDENTIAL_NAME);
final KeyStore keyStore = customCredential.getKeyStore();
KeyStoreManager.getInstance().setKeyStore(keyStore);
return keyStore;
}
|
Fetches the Azkaban KeyStore to be placed in-memory for reuse by all the jobs within a flow in
containerized execution. The KeyStore object acquired is placed in KeyStoreManager for future
use.
@param props Azkaban Props containing CredentialProvider info.
@return KeyStore object.
|
getKeyStore
|
java
|
azkaban/azkaban
|
azkaban-hadoop-security-plugin/src/main/java/azkaban/security/AbstractHadoopSecurityManager.java
|
https://github.com/azkaban/azkaban/blob/master/azkaban-hadoop-security-plugin/src/main/java/azkaban/security/AbstractHadoopSecurityManager.java
|
Apache-2.0
|
@Override
public Map<String,KeyStore> getKeyStoreMap(final Props props) {
logger.info("Prefetching KeyStore for the flow");
final Credentials cred = new Credentials();
final CredentialProviderWithKeyStoreMap customCredential = (CredentialProviderWithKeyStoreMap)
getCustomCredentialProvider(props, cred, logger,
Constants.ConfigurationKeys.CUSTOM_CREDENTIAL_NAME);
final Map<String,KeyStore> keyStoreMap = customCredential.getKeyStoreMap();
KeyStoreManager.getInstance().setKeyStoreMap(keyStoreMap);
return keyStoreMap;
}
|
Fetches the Azkaban KeyStore to be placed in-memory for reuse by all the jobs within a flow in
containerized execution. The KeyStore object acquired is placed in KeyStoreManager for future
use.
@param props Azkaban Props containing CredentialProvider info.
@return KeyStore object.
|
getKeyStoreMap
|
java
|
azkaban/azkaban
|
azkaban-hadoop-security-plugin/src/main/java/azkaban/security/AbstractHadoopSecurityManager.java
|
https://github.com/azkaban/azkaban/blob/master/azkaban-hadoop-security-plugin/src/main/java/azkaban/security/AbstractHadoopSecurityManager.java
|
Apache-2.0
|
@Override
public boolean isHadoopSecurityEnabled() {
return this.securityEnabled;
}
|
This method is used to verify whether Hadoop security is enabled or not.
@return
|
isHadoopSecurityEnabled
|
java
|
azkaban/azkaban
|
azkaban-hadoop-security-plugin/src/main/java/azkaban/security/AbstractHadoopSecurityManager.java
|
https://github.com/azkaban/azkaban/blob/master/azkaban-hadoop-security-plugin/src/main/java/azkaban/security/AbstractHadoopSecurityManager.java
|
Apache-2.0
|
@Override
public void prefetchToken(final File tokenFile, final Props props, final Logger logger)
throws HadoopSecurityManagerException {
final String userToProxy = props.getString(JobProperties.USER_TO_PROXY);
doPrefetch(tokenFile, props, logger, userToProxy);
}
|
This method is used to verify whether Hadoop security is enabled or not.
@return
|
prefetchToken
|
java
|
azkaban/azkaban
|
azkaban-hadoop-security-plugin/src/main/java/azkaban/security/AbstractHadoopSecurityManager.java
|
https://github.com/azkaban/azkaban/blob/master/azkaban-hadoop-security-plugin/src/main/java/azkaban/security/AbstractHadoopSecurityManager.java
|
Apache-2.0
|
protected void doPrefetch(final File tokenFile, final Props props, final Logger logger,
final String userToProxy) throws HadoopSecurityManagerException {
// Create suffix to be added to kerberos principal
final String suffix = getFQNSuffix(props);
final String userToProxyFQN = userToProxy + suffix;
logger.info(tokenFile.toString() + props.toAllProperties().toString());
logger.info("Getting hadoop tokens based on props for " + userToProxyFQN);
final Credentials cred = new Credentials();
try {
// cred is being populated
fetchAllHadoopTokens(userToProxyFQN, userToProxy, props, logger, cred);
getProxiedUser(userToProxyFQN).doAs((PrivilegedExceptionAction<Void>) () -> {
registerAllCustomCredentials(userToProxy, props, cred, logger);
return null;
});
logger.info("fetched cred = " + cred);
cred.getAllTokens().forEach(t -> {
logger.info(String.format("Token = %s, %s, %s ", t.getKind(), t.getService(),
Arrays.toString(t.getIdentifier())));
});
logger.info("cred end");
logger.info("Preparing token file " + tokenFile.getAbsolutePath());
// assign userToProxy to the owner of the token file, not the FQN user
prepareTokenFile(userToProxy, cred, tokenFile, logger,
props.getString(Constants.ConfigurationKeys.SECURITY_USER_GROUP, "azkaban"));
// stash them to cancel after use.
logger.info("Tokens loaded in " + tokenFile.getAbsolutePath());
} catch (final Exception e) {
throw new HadoopSecurityManagerException("Failed to get hadoop tokens! "
+ e.getMessage() + e.getCause(), e);
} catch (final Throwable t) {
throw new HadoopSecurityManagerException("Failed to get hadoop tokens! "
+ t.getMessage() + t.getCause(), t);
}
}
|
This method is used to prefetch all required tokens for a job.
@param tokenFile
@param props
@param logger
@param userToProxy
@throws HadoopSecurityManagerException
|
doPrefetch
|
java
|
azkaban/azkaban
|
azkaban-hadoop-security-plugin/src/main/java/azkaban/security/AbstractHadoopSecurityManager.java
|
https://github.com/azkaban/azkaban/blob/master/azkaban-hadoop-security-plugin/src/main/java/azkaban/security/AbstractHadoopSecurityManager.java
|
Apache-2.0
|
protected String getFQNSuffix(Props props) {
return (null != props.getString(HadoopSecurityManager.DOMAIN_NAME, null)) ?
FQN_SUFFIX_DELIMITER + kerberosSuffix(props) : "";
}
|
This method is used to get FQN suffix which will be added to proxy user.
@param props
@return
|
getFQNSuffix
|
java
|
azkaban/azkaban
|
azkaban-hadoop-security-plugin/src/main/java/azkaban/security/AbstractHadoopSecurityManager.java
|
https://github.com/azkaban/azkaban/blob/master/azkaban-hadoop-security-plugin/src/main/java/azkaban/security/AbstractHadoopSecurityManager.java
|
Apache-2.0
|
private void prepareTokenFile(final String user,
final Credentials credentials,
final File tokenFile,
final Logger logger,
final String group) throws IOException {
writeCredentialsToFile(credentials, tokenFile, logger);
try {
assignPermissions(user, tokenFile, group);
} catch (final IOException e) {
// On any error managing token file. delete the file
tokenFile.delete();
throw e;
}
}
|
Prepare token file. Writes credentials to a token file and sets appropriate permissions to keep
the file secure
@param user user to be proxied
@param credentials Credentials to be written to file
@param tokenFile file to be written
@param logger logger to use
@param group user group to own the token file
@throws IOException If there are issues in reading / updating the token file
|
prepareTokenFile
|
java
|
azkaban/azkaban
|
azkaban-hadoop-security-plugin/src/main/java/azkaban/security/AbstractHadoopSecurityManager.java
|
https://github.com/azkaban/azkaban/blob/master/azkaban-hadoop-security-plugin/src/main/java/azkaban/security/AbstractHadoopSecurityManager.java
|
Apache-2.0
|
private void writeCredentialsToFile(final Credentials credentials, final File tokenFile,
final Logger logger)
throws IOException {
FileOutputStream fos = null;
DataOutputStream dos = null;
try {
fos = new FileOutputStream(tokenFile);
dos = new DataOutputStream(fos);
credentials.writeTokenStorageToStream(dos);
} finally {
if (dos != null) {
try {
dos.close();
} catch (final Throwable t) {
// best effort
logger.error("encountered exception while closing DataOutputStream of the tokenFile", t);
}
}
if (fos != null) {
fos.close();
}
}
}
|
This method is used to write all the credentials into file so that the file can be shared with
user job process.
@param credentials
@param tokenFile
@param logger
@throws IOException
|
writeCredentialsToFile
|
java
|
azkaban/azkaban
|
azkaban-hadoop-security-plugin/src/main/java/azkaban/security/AbstractHadoopSecurityManager.java
|
https://github.com/azkaban/azkaban/blob/master/azkaban-hadoop-security-plugin/src/main/java/azkaban/security/AbstractHadoopSecurityManager.java
|
Apache-2.0
|
private void assignPermissions(final String user, final File tokenFile, final String group)
throws IOException {
final List<String> changePermissionsCommand = Arrays.asList(
CHMOD, TOKEN_FILE_PERMISSIONS, tokenFile.getAbsolutePath()
);
int result = this.executeAsUser
.execute(System.getProperty("user.name"), changePermissionsCommand);
if (result != 0) {
throw new IOException("Unable to modify permissions. User: " + user);
}
final List<String> changeOwnershipCommand = Arrays.asList(
CHOWN, user + ":" + group, tokenFile.getAbsolutePath()
);
result = this.executeAsUser.execute("root", changeOwnershipCommand);
if (result != 0) {
throw new IOException("Unable to set ownership. User: " + user);
}
}
|
Uses execute-as-user binary to reassign file permissions to be readable only by that user.
<p>
Step 1. Set file permissions to 460. Readable to self and readable / writable azkaban group
Step 2. Set user as owner of file.
@param user user to be proxied
@param tokenFile file to be written
@param group user group to own the token file
|
assignPermissions
|
java
|
azkaban/azkaban
|
azkaban-hadoop-security-plugin/src/main/java/azkaban/security/AbstractHadoopSecurityManager.java
|
https://github.com/azkaban/azkaban/blob/master/azkaban-hadoop-security-plugin/src/main/java/azkaban/security/AbstractHadoopSecurityManager.java
|
Apache-2.0
|
protected String kerberosSuffix(final Props props) {
// AZKABAN_SERVER_HOST_NAME is not set in Props here, get it from another instance of Props.
final String host = ServiceProvider.SERVICE_PROVIDER.getInstance(Props.class)
.getString(AZKABAN_WEBSERVER_EXTERNAL_HOSTNAME, "unknown");
final StringBuilder builder = new StringBuilder("az_");
builder.append(host);
builder.append("_");
builder.append(props.getString(FlowProperties.AZKABAN_FLOW_EXEC_ID));
builder.append(props.getString(HadoopSecurityManager.DOMAIN_NAME));
return builder.toString();
}
|
Uses execute-as-user binary to reassign file permissions to be readable only by that user.
<p>
Step 1. Set file permissions to 460. Readable to self and readable / writable azkaban group
Step 2. Set user as owner of file.
@param user user to be proxied
@param tokenFile file to be written
@param group user group to own the token file
|
kerberosSuffix
|
java
|
azkaban/azkaban
|
azkaban-hadoop-security-plugin/src/main/java/azkaban/security/AbstractHadoopSecurityManager.java
|
https://github.com/azkaban/azkaban/blob/master/azkaban-hadoop-security-plugin/src/main/java/azkaban/security/AbstractHadoopSecurityManager.java
|
Apache-2.0
|
protected void registerAllCustomCredentials(String userToProxy, Props props, Credentials cred,
Logger logger) {
// Register user secrets by custom credential Object
if (props.getBoolean(JobProperties.ENABLE_JOB_SSL, false)) {
registerCustomCredential(props, cred, userToProxy, logger,
Constants.ConfigurationKeys.CUSTOM_CREDENTIAL_NAME);
}
// Register oauth tokens by custom oauth credential provider
if (props.getBoolean(JobProperties.ENABLE_OAUTH, false)) {
registerCustomCredential(props, cred, userToProxy, logger,
Constants.ConfigurationKeys.OAUTH_CREDENTIAL_NAME);
}
}
|
This method is used to register all custom credentials for all the versions of Hadoop Security
Manager which will extend AbstractHadoopSecurityManager.
@param userToProxy
@param props
@param cred
@param logger
|
registerAllCustomCredentials
|
java
|
azkaban/azkaban
|
azkaban-hadoop-security-plugin/src/main/java/azkaban/security/AbstractHadoopSecurityManager.java
|
https://github.com/azkaban/azkaban/blob/master/azkaban-hadoop-security-plugin/src/main/java/azkaban/security/AbstractHadoopSecurityManager.java
|
Apache-2.0
|
private void cancelHiveToken(final Token<? extends TokenIdentifier> t,
final String userToProxy) throws HadoopSecurityManagerException {
try {
final HiveConf hiveConf = new HiveConf();
final IMetaStoreClient hiveClient = createRetryingMetaStoreClient(hiveConf);
hiveClient.cancelDelegationToken(t.encodeToUrlString());
} catch (final Exception e) {
throw new HadoopSecurityManagerException("Failed to cancel Token. "
+ e.getMessage() + e.getCause(), e);
}
}
|
This method is used to register all custom credentials for all the versions of Hadoop Security
Manager which will extend AbstractHadoopSecurityManager.
@param userToProxy
@param props
@param cred
@param logger
|
cancelHiveToken
|
java
|
azkaban/azkaban
|
azkaban-hadoop-security-plugin/src/main/java/azkaban/security/AbstractHadoopSecurityManager.java
|
https://github.com/azkaban/azkaban/blob/master/azkaban-hadoop-security-plugin/src/main/java/azkaban/security/AbstractHadoopSecurityManager.java
|
Apache-2.0
|
@Override
public void cancelTokens(final File tokenFile, final String userToProxy, final Logger logger)
throws HadoopSecurityManagerException {
try {
final Credentials cred = Credentials
.readTokenStorageFile(new Path(tokenFile.toURI()), this.conf);
for (final Token<? extends TokenIdentifier> t : cred.getAllTokens()) {
try {
logger.info("Got token.");
logger.info("Token kind: " + t.getKind());
logger.info("Token service: " + t.getService());
if (t.getKind().equals(new Text("HIVE_DELEGATION_TOKEN"))) {
logger.info("Cancelling hive token.");
cancelHiveToken(t, userToProxy);
} else if (t.getKind().equals(new Text("RM_DELEGATION_TOKEN"))) {
logger.info("Ignore cancelling mr job tracker token request.");
} else if (t.getKind().equals(new Text("HDFS_DELEGATION_TOKEN"))) {
logger.info("Ignore cancelling namenode token request.");
} else if (t.getKind().equals(new Text("MR_DELEGATION_TOKEN"))) {
logger.info("Ignore cancelling jobhistoryserver mr token request.");
} else {
logger.info("unknown token type " + t.getKind());
}
} catch (final Exception e) {
logger.warn("Failed to cancel token", e);
}
}
} catch (final Exception e) {
throw new HadoopSecurityManagerException("Failed to cancel tokens", e);
}
}
|
This method is used to cancel token.
@param tokenFile
@param userToProxy
@param logger
@throws HadoopSecurityManagerException
|
cancelTokens
|
java
|
azkaban/azkaban
|
azkaban-hadoop-security-plugin/src/main/java/azkaban/security/AbstractHadoopSecurityManager.java
|
https://github.com/azkaban/azkaban/blob/master/azkaban-hadoop-security-plugin/src/main/java/azkaban/security/AbstractHadoopSecurityManager.java
|
Apache-2.0
|
@Override
public Credentials getTokens(File tokenFile, Logger logger)
throws HadoopSecurityManagerException {
try {
return Credentials.readTokenStorageFile(new Path(tokenFile.toURI()), this.conf);
} catch (final Exception e) {
throw new HadoopSecurityManagerException("Failed to get tokens from file", e);
}
}
|
This method is used to cancel token.
@param tokenFile
@param userToProxy
@param logger
@throws HadoopSecurityManagerException
|
getTokens
|
java
|
azkaban/azkaban
|
azkaban-hadoop-security-plugin/src/main/java/azkaban/security/AbstractHadoopSecurityManager.java
|
https://github.com/azkaban/azkaban/blob/master/azkaban-hadoop-security-plugin/src/main/java/azkaban/security/AbstractHadoopSecurityManager.java
|
Apache-2.0
|
protected IMetaStoreClient createRetryingMetaStoreClient(final HiveConf hiveConf)
throws MetaException {
// Custom hook-loader to return a HiveMetaHook if the table is configured with a custom storage handler
final HiveMetaHookLoader hookLoader = new HiveMetaHookLoader() {
@Override
public HiveMetaHook getHook(final Table tbl) throws MetaException {
if (tbl == null) {
return null;
}
try {
final HiveStorageHandler storageHandler =
HiveUtils.getStorageHandler(hiveConf, tbl.getParameters().get(META_TABLE_STORAGE));
return storageHandler == null ? null : storageHandler.getMetaHook();
} catch (final HiveException e) {
AbstractHadoopSecurityManager.logger.error(e.toString());
throw new MetaException("Failed to get storage handler: " + e);
}
}
};
logger.info(hiveConf.getAllProperties() + hookLoader.toString() + HiveMetaStoreClient.class.getName());
return RetryingMetaStoreClient
.getProxy(hiveConf, hookLoader, HiveMetaStoreClient.class.getName());
}
|
Method to create a metastore client that retries on failures
|
createRetryingMetaStoreClient
|
java
|
azkaban/azkaban
|
azkaban-hadoop-security-plugin/src/main/java/azkaban/security/AbstractHadoopSecurityManager.java
|
https://github.com/azkaban/azkaban/blob/master/azkaban-hadoop-security-plugin/src/main/java/azkaban/security/AbstractHadoopSecurityManager.java
|
Apache-2.0
|
@Override
public HiveMetaHook getHook(final Table tbl) throws MetaException {
if (tbl == null) {
return null;
}
try {
final HiveStorageHandler storageHandler =
HiveUtils.getStorageHandler(hiveConf, tbl.getParameters().get(META_TABLE_STORAGE));
return storageHandler == null ? null : storageHandler.getMetaHook();
} catch (final HiveException e) {
AbstractHadoopSecurityManager.logger.error(e.toString());
throw new MetaException("Failed to get storage handler: " + e);
}
}
|
Method to create a metastore client that retries on failures
|
getHook
|
java
|
azkaban/azkaban
|
azkaban-hadoop-security-plugin/src/main/java/azkaban/security/AbstractHadoopSecurityManager.java
|
https://github.com/azkaban/azkaban/blob/master/azkaban-hadoop-security-plugin/src/main/java/azkaban/security/AbstractHadoopSecurityManager.java
|
Apache-2.0
|
protected Optional<String[]> getOtherNameNodes(final Props props) {
// getting additional name nodes tokens
final String otherNameNodes = props.get(OTHER_NAMENODES_TO_GET_TOKEN);
if ((otherNameNodes != null) && (otherNameNodes.length() > 0)) {
logger.info("Fetching token(s) for other namenode(s): " + otherNameNodes);
final String[] nameNodeArr = otherNameNodes.split(",");
return Optional.of(nameNodeArr);
}
return Optional.empty();
}
|
This method is used to fetch other NameNodes.
@param props
@return
|
getOtherNameNodes
|
java
|
azkaban/azkaban
|
azkaban-hadoop-security-plugin/src/main/java/azkaban/security/AbstractHadoopSecurityManager.java
|
https://github.com/azkaban/azkaban/blob/master/azkaban-hadoop-security-plugin/src/main/java/azkaban/security/AbstractHadoopSecurityManager.java
|
Apache-2.0
|
private Token<DelegationTokenIdentifier> fetchHcatToken(final String userToProxy,
final HiveConf hiveConf, final String tokenSignatureOverwrite, final Logger logger)
throws IOException, MetaException, TException {
logger.info(HiveConf.ConfVars.METASTOREURIS.varname + ": "
+ hiveConf.get(HiveConf.ConfVars.METASTOREURIS.varname));
logger.info(HiveConf.ConfVars.METASTORE_USE_THRIFT_SASL.varname + ": "
+ hiveConf.get(HiveConf.ConfVars.METASTORE_USE_THRIFT_SASL.varname));
logger.info(HiveConf.ConfVars.METASTORE_KERBEROS_PRINCIPAL.varname + ": "
+ hiveConf.get(HiveConf.ConfVars.METASTORE_KERBEROS_PRINCIPAL.varname));
final IMetaStoreClient hiveClient = createRetryingMetaStoreClient(hiveConf);
final String hcatTokenStr =
Failsafe.with(retryPolicy)
.get(() -> hiveClient.getDelegationToken(userToProxy, UserGroupInformation
.getLoginUser().getShortUserName()));
final Token<DelegationTokenIdentifier> hcatToken =
new Token<>();
hcatToken.decodeFromUrlString(hcatTokenStr);
// overwrite the value of the service property of the token if the signature
// override is specified.
// If the service field is set, do not overwrite that
if (hcatToken.getService().getLength() <= 0 && tokenSignatureOverwrite != null
&& tokenSignatureOverwrite.trim().length() > 0) {
hcatToken.setService(new Text(tokenSignatureOverwrite.trim()
.toLowerCase()));
logger.info(HIVE_TOKEN_SIGNATURE_KEY + ":" + tokenSignatureOverwrite);
}
logger.info("Created hive metastore token.");
logger.info("Token kind: " + hcatToken.getKind());
logger.info("Token service: " + hcatToken.getService());
return hcatToken;
}
|
This method is to fetch hcat token as per the specified hive configuration and then store the
token in to the credential store specified .
@param userToProxy String value indicating the name of the user the token will be
fetched for.
@param hiveConf the configuration based off which the hive client will be
initialized.
@param tokenSignatureOverwrite
@param logger the logger instance which writes the logging content to the job
logs.
|
fetchHcatToken
|
java
|
azkaban/azkaban
|
azkaban-hadoop-security-plugin/src/main/java/azkaban/security/HadoopSecurityManager_H_2_0.java
|
https://github.com/azkaban/azkaban/blob/master/azkaban-hadoop-security-plugin/src/main/java/azkaban/security/HadoopSecurityManager_H_2_0.java
|
Apache-2.0
|
private void fetchJobTrackerToken(final String userToProxyFQN,
final String userToProxy, final Props props,
final Logger logger, final Credentials cred)
throws IOException, InterruptedException, HadoopSecurityManagerException {
if (props.getBoolean(HadoopSecurityManager.OBTAIN_JOBTRACKER_TOKEN, false)) {
final JobConf jobConf = new JobConf();
final JobClient jobClient = new JobClient(jobConf);
logger.info("Pre-fetching JT token from JobTracker");
Token<DelegationTokenIdentifier> mrDelegationToken = null;
try {
mrDelegationToken = Failsafe.with(retryPolicy)
.get(()->jobClient.getDelegationToken(getMRTokenRenewerInternal(jobConf)));
}catch (Exception e){
logger.error("Failed to get delegation token " + e.getMessage());
}
finally {
jobClient.close();
}
if (mrDelegationToken == null) {
logger.error("Failed to fetch JT token");
throw new HadoopSecurityManagerException(
"Failed to fetch JT token for " + userToProxyFQN);
}
logger.info(String.format("JT token pre-fetched, token kind: %s, token service: %s",
mrDelegationToken.getKind(), mrDelegationToken.getService()));
cred.addToken(mrDelegationToken.getService(), mrDelegationToken);
}
}
|
This method is used to fetch delegation token for JT and add it in cred object.
@param userToProxyFQN
@param userToProxy
@param props
@param logger
@param cred
@throws IOException
@throws InterruptedException
@throws HadoopSecurityManagerException
|
fetchJobTrackerToken
|
java
|
azkaban/azkaban
|
azkaban-hadoop-security-plugin/src/main/java/azkaban/security/HadoopSecurityManager_H_2_0.java
|
https://github.com/azkaban/azkaban/blob/master/azkaban-hadoop-security-plugin/src/main/java/azkaban/security/HadoopSecurityManager_H_2_0.java
|
Apache-2.0
|
@Override
protected void fetchAllHadoopTokens(final String userToProxyFQN,
final String userToProxy, final Props props,
final Logger logger,
final Credentials cred) throws IOException, InterruptedException,
HadoopSecurityManagerException {
logger.info("Fetching all hadoop tokens.");
fetchMetaStoreToken(userToProxyFQN, userToProxy, props, logger, cred);
fetchJHSToken(userToProxyFQN, userToProxy, props, logger, cred);
getProxiedUser(userToProxyFQN).doAs((PrivilegedExceptionAction<Void>) () -> {
fetchNameNodeToken(userToProxyFQN, userToProxy, props, logger, cred);
fetchJobTrackerToken(userToProxyFQN, userToProxy, props, logger, cred);
return null;
});
}
|
This method is used to fetch delegation token for JT and add it in cred object.
@param userToProxyFQN
@param userToProxy
@param props
@param logger
@param cred
@throws IOException
@throws InterruptedException
@throws HadoopSecurityManagerException
|
fetchAllHadoopTokens
|
java
|
azkaban/azkaban
|
azkaban-hadoop-security-plugin/src/main/java/azkaban/security/HadoopSecurityManager_H_2_0.java
|
https://github.com/azkaban/azkaban/blob/master/azkaban-hadoop-security-plugin/src/main/java/azkaban/security/HadoopSecurityManager_H_2_0.java
|
Apache-2.0
|
private void fetchNameNodeToken(final String userToProxyFQN,
final String userToProxy, final Props props,
final Logger logger,
final Credentials cred) throws IOException, HadoopSecurityManagerException {
logger.info("Here is the props for " + HadoopSecurityManager.OBTAIN_NAMENODE_TOKEN +
": " + props.getBoolean(HadoopSecurityManager.OBTAIN_NAMENODE_TOKEN));
if (props.getBoolean(HadoopSecurityManager.OBTAIN_NAMENODE_TOKEN, false)) {
final String renewer = getMRTokenRenewerInternal(new JobConf()).toString();
logger.info("Renewer is " + renewer);
// Get the tokens name node
fetchNameNodeTokenInternal(renewer, cred, userToProxyFQN, null);
Optional<String[]> otherNameNodes = getOtherNameNodes(props);
if (otherNameNodes.isPresent()) {
String[] nameNodeArr = otherNameNodes.get();
for (String nameNode : nameNodeArr) {
fetchNameNodeTokenInternal(renewer, cred, userToProxyFQN,
new Path(nameNode.trim()).toUri());
logger.info("Successfully fetched tokens for: " + nameNode);
}
}
} else {
logger.info(
HadoopSecurityManager_H_2_0.OTHER_NAMENODES_TO_GET_TOKEN + " was not configured");
}
}
|
This method is used to fetch delegation token for NameNode and add it in cred object.
@param userToProxyFQN
@param userToProxy
@param props
@param logger
@param cred
@throws IOException
@throws HadoopSecurityManagerException
|
fetchNameNodeToken
|
java
|
azkaban/azkaban
|
azkaban-hadoop-security-plugin/src/main/java/azkaban/security/HadoopSecurityManager_H_2_0.java
|
https://github.com/azkaban/azkaban/blob/master/azkaban-hadoop-security-plugin/src/main/java/azkaban/security/HadoopSecurityManager_H_2_0.java
|
Apache-2.0
|
private void fetchNameNodeTokenInternal(final String renewer, final Credentials cred,
final String userToProxyFQN, final URI uri)
throws IOException, HadoopSecurityManagerException {
FileSystem fs = null;
try {
// Use FileSystem.get() instead of newInstance() to ensure cache is not used.
// .get() method checks if cache is enabled or not, newInstance() does not.
if (uri == null) {
fs = Failsafe.with(retryPolicy)
.get(()->FileSystem.get(conf))
;
} else {
fs = Failsafe.with(retryPolicy)
.get(()->FileSystem.get(uri, conf));
}
// check if we get the correct FS, and most importantly, the conf
logger.info("Getting DFS token from " + fs.getUri());
try {
FileSystem finalFs = fs;
final Token<?>[] fsTokens = Failsafe.with(retryPolicy)
.get(()-> finalFs.addDelegationTokens(renewer, cred));
for (int i = 0; i < fsTokens.length; i++) {
final Token<?> fsToken = fsTokens[i];
logger.info(String.format(
"DFS token from namenode pre-fetched, token kind: %s, token service: %s",
fsToken.getKind(), fsToken.getService()));
}
} catch (Exception e) {
// Adding logging of configuration on when exception is encountered.
logger.info("Hadoop Configuration Values used:\n");
conf.forEach(s -> {
logger.info("key:" + s.getKey() + " value:" + s.getValue());
});
logger.error("Failed to fetch DFS token for " + userToProxyFQN + "because of " +e + e.getMessage());
throw new HadoopSecurityManagerException(
"Failed to fetch DFS token for " + userToProxyFQN);
}
} finally {
if (fs != null) {
fs.close();
}
}
}
|
fetchNameNodeInternal - With modified UGI which is of the format,
<userToProxy>/az_<host name>_<exec_id>
Due to this change, the FileSystem cache creates an entry per execution instead of an entry per
proxy user. This could blow up the cache very quickly on a busy Executor and cause OOM. To make
this worse, the entry in Cache is never used as it is specific to an execution. To avoid this,
the FileSystem Cache should be disabled before calling this method.
@param renewer
@param cred
@param userToProxyFQN
@param uri
@throws IOException
@throws HadoopSecurityManagerException
|
fetchNameNodeTokenInternal
|
java
|
azkaban/azkaban
|
azkaban-hadoop-security-plugin/src/main/java/azkaban/security/HadoopSecurityManager_H_2_0.java
|
https://github.com/azkaban/azkaban/blob/master/azkaban-hadoop-security-plugin/src/main/java/azkaban/security/HadoopSecurityManager_H_2_0.java
|
Apache-2.0
|
private void fetchJHSToken(final String userToProxyFQN,
final String userToProxy, final Props props, final Logger logger, final Credentials cred)
throws HadoopSecurityManagerException, IOException {
if (props.getBoolean(OBTAIN_JOBHISTORYSERVER_TOKEN, false)) {
logger.info("Pre-fetching JH token from job history server");
final YarnRPC rpc = YarnRPC.create(this.conf);
final String serviceAddr = this.conf.get(JHAdminConfig.MR_HISTORY_ADDRESS);
logger.info("Connecting to HistoryServer at: " + serviceAddr);
final HSClientProtocol hsProxy =
(HSClientProtocol) rpc.getProxy(HSClientProtocol.class,
NetUtils.createSocketAddr(serviceAddr), this.conf);
Token<?> jhsdt = null;
try {
jhsdt = getDelegationTokenFromHS(hsProxy);
} catch (final Exception e) {
logger.error("Failed to fetch JH token" + e.getMessage());
throw new HadoopSecurityManagerException(
"Failed to fetch JH token for " + userToProxyFQN);
}
if (hsProxy instanceof Closeable) {
// HSClientProtocol is not closable, but its only implementation, HSClientProtocolPBClientImpl, is
((Closeable) hsProxy).close();
}
if (jhsdt == null) {
logger.error("getDelegationTokenFromHS() returned null");
throw new HadoopSecurityManagerException(
"Unable to fetch JH token for " + userToProxyFQN);
}
logger.info(String
.format("JH token from job history server pre-fetched, token Kind: %s, token service: %s",
jhsdt.getKind(), jhsdt.getService()));
cred.addToken(jhsdt.getService(), jhsdt);
}
}
|
This method is used to fetch delegation token for JHS and add it in cred object.
@param userToProxyFQN
@param userToProxy
@param props
@param logger
@param cred
@throws HadoopSecurityManagerException
@throws IOException
|
fetchJHSToken
|
java
|
azkaban/azkaban
|
azkaban-hadoop-security-plugin/src/main/java/azkaban/security/HadoopSecurityManager_H_2_0.java
|
https://github.com/azkaban/azkaban/blob/master/azkaban-hadoop-security-plugin/src/main/java/azkaban/security/HadoopSecurityManager_H_2_0.java
|
Apache-2.0
|
private void fetchMetaStoreToken(final String userToProxyFQN,
final String userToProxy, final Props props, final Logger logger,
final Credentials cred)
throws HadoopSecurityManagerException {
if (props.getBoolean(HadoopSecurityManager.OBTAIN_HCAT_TOKEN, false)) {
try {
// first we fetch and save the default hcat token.
logger.info("Pre-fetching default hive metastore token from hive");
HiveConf hiveConf = new HiveConf();
Token<DelegationTokenIdentifier> hcatToken =
fetchHcatToken(userToProxyFQN, hiveConf, null, logger);
cred.addToken(hcatToken.getService(), hcatToken);
// Added support for extra_hcat_clusters
final List<String> extraHcatClusters = props.getStringListFromCluster(EXTRA_HCAT_CLUSTERS);
int extraHcatTokenCount = 0;
if (Collections.EMPTY_LIST != extraHcatClusters) {
logger.info("Need to pre-fetch extra metastore tokens from extra hive clusters.");
// start to process the user inputs.
for (final String thriftUrls : extraHcatClusters) {
logger.info("Pre-fetching metastore token from cluster : " + thriftUrls);
hiveConf = new HiveConf();
hiveConf.set(HiveConf.ConfVars.METASTOREURIS.varname, thriftUrls);
logger.info(hiveConf.getAllProperties() + " " + userToProxyFQN );
try {
hcatToken = fetchHcatToken(userToProxyFQN, hiveConf, thriftUrls, logger);
cred.addToken(hcatToken.getService(), hcatToken);
++extraHcatTokenCount;
} catch (Exception e) {
logger.error("Failed to fetch extra metastore tokens from : " + thriftUrls
+ e.getMessage());
}
}
if (0 == extraHcatTokenCount) {
throw new HadoopSecurityManagerException("No extra metastore token could be fetched.");
}
} else {
// Only if EXTRA_HCAT_CLUSTERS
logger.info("Extra hcat clusters provided: " + extraHcatClusters);
final List<String> extraHcatLocations =
props.getStringList(EXTRA_HCAT_LOCATION);
if (Collections.EMPTY_LIST != extraHcatLocations) {
logger.info("Need to pre-fetch extra metastore tokens from hive.");
// start to process the user inputs.
for (final String thriftUrl : extraHcatLocations) {
logger.info("Pre-fetching metastore token from : " + thriftUrl);
hiveConf = new HiveConf();
hiveConf.set(HiveConf.ConfVars.METASTOREURIS.varname, thriftUrl);
try {
hcatToken =
fetchHcatToken(userToProxyFQN, hiveConf, thriftUrl, logger);
cred.addToken(hcatToken.getService(), hcatToken);
++extraHcatTokenCount;
} catch (Exception e) {
logger.error("Failed to fetch extra metastore tokens from : " + thriftUrl, e);
}
}
if (0 == extraHcatTokenCount) {
throw new HadoopSecurityManagerException("No extra metastore token could be fetched.");
}
}
}
logger.info("Hive metastore token(s) prefetched");
} catch (final Throwable t) {
final String message =
"Failed to get hive metastore token." + t.getMessage()
+ t.getCause();
logger.error(message, t);
throw new HadoopSecurityManagerException(message);
}
}
}
|
This method is used to fetch delegation token for MetaStore and add it in cred object.
@param userToProxyFQN
@param userToProxy
@param props
@param logger
@param cred
@throws HadoopSecurityManagerException
|
fetchMetaStoreToken
|
java
|
azkaban/azkaban
|
azkaban-hadoop-security-plugin/src/main/java/azkaban/security/HadoopSecurityManager_H_2_0.java
|
https://github.com/azkaban/azkaban/blob/master/azkaban-hadoop-security-plugin/src/main/java/azkaban/security/HadoopSecurityManager_H_2_0.java
|
Apache-2.0
|
private Text getMRTokenRenewerInternal(final JobConf jobConf) throws IOException {
// Taken from Oozie
//
// Getting renewer correctly for JT principal also though JT in hadoop
// 1.x does not have
// support for renewing/cancelling tokens
final String servicePrincipal =
jobConf.get(RM_PRINCIPAL, jobConf.get(JT_PRINCIPAL));
final Text renewer;
if (servicePrincipal != null) {
String target =
jobConf.get(HADOOP_YARN_RM, jobConf.get(HADOOP_JOB_TRACKER_2));
if (target == null) {
target = jobConf.get(HADOOP_JOB_TRACKER);
}
final String addr = NetUtils.createSocketAddr(target).getHostName();
renewer =
new Text(SecurityUtil.getServerPrincipal(servicePrincipal, addr));
} else {
// No security
renewer = DEFAULT_RENEWER;
}
return renewer;
}
|
This method is used to fetch delegation token for MetaStore and add it in cred object.
@param userToProxyFQN
@param userToProxy
@param props
@param logger
@param cred
@throws HadoopSecurityManagerException
|
getMRTokenRenewerInternal
|
java
|
azkaban/azkaban
|
azkaban-hadoop-security-plugin/src/main/java/azkaban/security/HadoopSecurityManager_H_2_0.java
|
https://github.com/azkaban/azkaban/blob/master/azkaban-hadoop-security-plugin/src/main/java/azkaban/security/HadoopSecurityManager_H_2_0.java
|
Apache-2.0
|
private Token<?> getDelegationTokenFromHS(final HSClientProtocol hsProxy)
throws IOException, InterruptedException {
logger.info(hsProxy.toString());
final GetDelegationTokenRequest request =
this.recordFactory.newRecordInstance(GetDelegationTokenRequest.class);
request.setRenewer(Master.getMasterPrincipal(this.conf));
logger.info(request.getRenewer() + Master.getMasterPrincipal(this.conf));
final org.apache.hadoop.yarn.api.records.Token mrDelegationToken;
mrDelegationToken =
Failsafe.with(retryPolicy)
.get(()->hsProxy.getDelegationToken(request).getDelegationToken());
return ConverterUtils.convertFromYarn(mrDelegationToken,
hsProxy.getConnectAddress());
}
|
This method is used to fetch delegation token for MetaStore and add it in cred object.
@param userToProxyFQN
@param userToProxy
@param props
@param logger
@param cred
@throws HadoopSecurityManagerException
|
getDelegationTokenFromHS
|
java
|
azkaban/azkaban
|
azkaban-hadoop-security-plugin/src/main/java/azkaban/security/HadoopSecurityManager_H_2_0.java
|
https://github.com/azkaban/azkaban/blob/master/azkaban-hadoop-security-plugin/src/main/java/azkaban/security/HadoopSecurityManager_H_2_0.java
|
Apache-2.0
|
public static synchronized UserGroupInformation getProxiedUser(
final String toProxy, final Properties prop, final Logger log, final Configuration conf)
throws IOException {
if (conf == null) {
throw new IllegalArgumentException("conf can't be null");
}
UserGroupInformation.setConfiguration(conf);
if (toProxy == null) {
throw new IllegalArgumentException("toProxy can't be null");
}
if (loginUser == null) {
log.info("No login user. Creating login user");
final String keytab = verifySecureProperty(prop, PROXY_KEYTAB_LOCATION, log);
final String proxyUser = verifySecureProperty(prop, PROXY_USER, log);
UserGroupInformation.loginUserFromKeytab(proxyUser, keytab);
loginUser = UserGroupInformation.getLoginUser();
log.info("Logged in with user " + loginUser);
} else {
log.info("loginUser (" + loginUser + ") already created, refreshing tgt.");
loginUser.checkTGTAndReloginFromKeytab();
}
return UserGroupInformation.createProxyUser(toProxy, loginUser);
}
|
Create a proxied user based on the explicit user name, taking other parameters necessary from
properties file.
|
getProxiedUser
|
java
|
azkaban/azkaban
|
azkaban-hadoop-security-plugin/src/main/java/azkaban/security/commons/SecurityUtils.java
|
https://github.com/azkaban/azkaban/blob/master/azkaban-hadoop-security-plugin/src/main/java/azkaban/security/commons/SecurityUtils.java
|
Apache-2.0
|
public static UserGroupInformation getProxiedUser(final Properties prop,
final Logger log, final Configuration conf) throws IOException {
final String toProxy = verifySecureProperty(prop, JobProperties.USER_TO_PROXY, log);
final UserGroupInformation user = getProxiedUser(toProxy, prop, log, conf);
if (user == null) {
throw new IOException(
"Proxy as any user in unsecured grid is not supported!"
+ prop.toString());
}
log.info("created proxy user for " + user.getUserName() + user.toString());
return user;
}
|
Create a proxied user, taking all parameters, including which user to proxy from provided
Properties.
|
getProxiedUser
|
java
|
azkaban/azkaban
|
azkaban-hadoop-security-plugin/src/main/java/azkaban/security/commons/SecurityUtils.java
|
https://github.com/azkaban/azkaban/blob/master/azkaban-hadoop-security-plugin/src/main/java/azkaban/security/commons/SecurityUtils.java
|
Apache-2.0
|
public static String verifySecureProperty(final Properties properties, final String s,
final Logger l) throws IOException {
final String value = properties.getProperty(s);
if (value == null) {
throw new IOException(s
+ " not set in properties. Cannot use secure proxy");
}
l.info("Secure proxy configuration: Property " + s + " = " + value);
return value;
}
|
Create a proxied user, taking all parameters, including which user to proxy from provided
Properties.
|
verifySecureProperty
|
java
|
azkaban/azkaban
|
azkaban-hadoop-security-plugin/src/main/java/azkaban/security/commons/SecurityUtils.java
|
https://github.com/azkaban/azkaban/blob/master/azkaban-hadoop-security-plugin/src/main/java/azkaban/security/commons/SecurityUtils.java
|
Apache-2.0
|
public static boolean shouldProxy(final Properties prop) {
final String shouldProxy = prop.getProperty(ENABLE_PROXYING);
return shouldProxy != null && shouldProxy.equals("true");
}
|
Create a proxied user, taking all parameters, including which user to proxy from provided
Properties.
|
shouldProxy
|
java
|
azkaban/azkaban
|
azkaban-hadoop-security-plugin/src/main/java/azkaban/security/commons/SecurityUtils.java
|
https://github.com/azkaban/azkaban/blob/master/azkaban-hadoop-security-plugin/src/main/java/azkaban/security/commons/SecurityUtils.java
|
Apache-2.0
|
public static synchronized void prefetchToken(final File tokenFile,
final Props p, final Logger logger) throws InterruptedException,
IOException {
final Configuration conf = new Configuration();
logger.info("Getting proxy user for " + p.getString(JobProperties.USER_TO_PROXY));
logger.info("Getting proxy user for " + p.toString());
getProxiedUser(p.toProperties(), logger, conf).doAs(
new PrivilegedExceptionAction<Void>() {
@Override
public Void run() throws Exception {
getToken(p);
return null;
}
private void getToken(final Props p) throws InterruptedException,
IOException {
final String shouldPrefetch = p.getString(OBTAIN_BINARY_TOKEN);
if (shouldPrefetch != null && shouldPrefetch.equals("true")) {
logger.info("Pre-fetching token");
logger.info("Pre-fetching fs token");
final FileSystem fs = FileSystem.get(conf);
final Token<?> fsToken =
fs.getDelegationToken(p.getString(JobProperties.USER_TO_PROXY));
logger.info("Created token: " + fsToken.toString());
final Job job =
new Job(conf, "totally phony, extremely fake, not real job");
final JobConf jc = new JobConf(conf);
final JobClient jobClient = new JobClient(jc);
logger.info("Pre-fetching job token: Got new JobClient: " + jc);
final Token<DelegationTokenIdentifier> mrdt =
jobClient.getDelegationToken(new Text("hi"));
logger.info("Created token: " + mrdt.toString());
job.getCredentials().addToken(new Text("howdy"), mrdt);
job.getCredentials().addToken(fsToken.getService(), fsToken);
FileOutputStream fos = null;
DataOutputStream dos = null;
try {
fos = new FileOutputStream(tokenFile);
dos = new DataOutputStream(fos);
job.getCredentials().writeTokenStorageToStream(dos);
} finally {
if (dos != null) {
dos.close();
}
if (fos != null) {
fos.close();
}
}
logger.info("Loading hadoop tokens into "
+ tokenFile.getAbsolutePath());
p.put("HadoopTokenFileLoc", tokenFile.getAbsolutePath());
} else {
logger.info("Not pre-fetching token");
}
}
});
}
|
Create a proxied user, taking all parameters, including which user to proxy from provided
Properties.
|
prefetchToken
|
java
|
azkaban/azkaban
|
azkaban-hadoop-security-plugin/src/main/java/azkaban/security/commons/SecurityUtils.java
|
https://github.com/azkaban/azkaban/blob/master/azkaban-hadoop-security-plugin/src/main/java/azkaban/security/commons/SecurityUtils.java
|
Apache-2.0
|
@Override
public Void run() throws Exception {
getToken(p);
return null;
}
|
Create a proxied user, taking all parameters, including which user to proxy from provided
Properties.
|
run
|
java
|
azkaban/azkaban
|
azkaban-hadoop-security-plugin/src/main/java/azkaban/security/commons/SecurityUtils.java
|
https://github.com/azkaban/azkaban/blob/master/azkaban-hadoop-security-plugin/src/main/java/azkaban/security/commons/SecurityUtils.java
|
Apache-2.0
|
private void getToken(final Props p) throws InterruptedException,
IOException {
final String shouldPrefetch = p.getString(OBTAIN_BINARY_TOKEN);
if (shouldPrefetch != null && shouldPrefetch.equals("true")) {
logger.info("Pre-fetching token");
logger.info("Pre-fetching fs token");
final FileSystem fs = FileSystem.get(conf);
final Token<?> fsToken =
fs.getDelegationToken(p.getString(JobProperties.USER_TO_PROXY));
logger.info("Created token: " + fsToken.toString());
final Job job =
new Job(conf, "totally phony, extremely fake, not real job");
final JobConf jc = new JobConf(conf);
final JobClient jobClient = new JobClient(jc);
logger.info("Pre-fetching job token: Got new JobClient: " + jc);
final Token<DelegationTokenIdentifier> mrdt =
jobClient.getDelegationToken(new Text("hi"));
logger.info("Created token: " + mrdt.toString());
job.getCredentials().addToken(new Text("howdy"), mrdt);
job.getCredentials().addToken(fsToken.getService(), fsToken);
FileOutputStream fos = null;
DataOutputStream dos = null;
try {
fos = new FileOutputStream(tokenFile);
dos = new DataOutputStream(fos);
job.getCredentials().writeTokenStorageToStream(dos);
} finally {
if (dos != null) {
dos.close();
}
if (fos != null) {
fos.close();
}
}
logger.info("Loading hadoop tokens into "
+ tokenFile.getAbsolutePath());
p.put("HadoopTokenFileLoc", tokenFile.getAbsolutePath());
} else {
logger.info("Not pre-fetching token");
}
}
|
Create a proxied user, taking all parameters, including which user to proxy from provided
Properties.
|
getToken
|
java
|
azkaban/azkaban
|
azkaban-hadoop-security-plugin/src/main/java/azkaban/security/commons/SecurityUtils.java
|
https://github.com/azkaban/azkaban/blob/master/azkaban-hadoop-security-plugin/src/main/java/azkaban/security/commons/SecurityUtils.java
|
Apache-2.0
|
private static String[] prepareDefaultConf() throws IOException {
final File templateFolder = new File("test/local-conf-templates");
final File localConfFolder = new File("local/conf");
if (!localConfFolder.exists()) {
FileUtils.copyDirectory(templateFolder, localConfFolder.getParentFile());
log.info("Copied local conf templates from " + templateFolder.getAbsolutePath());
}
log.info("Using conf at " + localConfFolder.getAbsolutePath());
return new String[]{"-conf", "local/conf"};
}
|
To enable "run out of the box for testing".
|
prepareDefaultConf
|
java
|
azkaban/azkaban
|
azkaban-solo-server/src/main/java/azkaban/soloserver/AzkabanSingleServer.java
|
https://github.com/azkaban/azkaban/blob/master/azkaban-solo-server/src/main/java/azkaban/soloserver/AzkabanSingleServer.java
|
Apache-2.0
|
private void launch() throws Exception {
// exec server first so that it's ready to accept calls by web server when web initializes
AzkabanExecutorServer.launch(this.executor);
log.info("Azkaban Exec Server started...");
this.executor.getFlowRunnerManager()
.setExecutorActive(true, this.executor.getHost(), this.executor.getPort());
log.info("Azkaban Exec Server activated...");
AzkabanWebServer.launch(this.webServer);
log.info("Azkaban Web Server started...");
}
|
To enable "run out of the box for testing".
|
launch
|
java
|
azkaban/azkaban
|
azkaban-solo-server/src/main/java/azkaban/soloserver/AzkabanSingleServer.java
|
https://github.com/azkaban/azkaban/blob/master/azkaban-solo-server/src/main/java/azkaban/soloserver/AzkabanSingleServer.java
|
Apache-2.0
|
public Dependency copy() {
try {
return new Dependency(getFileName(), getDestination(), getType(), getIvyCoordinates(),
getSHA1());
} catch (final InvalidHashException e) {
// This should never happen because we already validated the hash when creating this dependency
throw new RuntimeException("InvalidHashException when copying dependency.");
}
}
|
Make a copy of this dependency
@return a copy of this dependency
|
copy
|
java
|
azkaban/azkaban
|
azkaban-spi/src/main/java/azkaban/spi/Dependency.java
|
https://github.com/azkaban/azkaban/blob/master/azkaban-spi/src/main/java/azkaban/spi/Dependency.java
|
Apache-2.0
|
public DependencyFile makeDependencyFile(final File file) {
try {
return new DependencyFile(file, getFileName(), getDestination(), getType(),
getIvyCoordinates(), getSHA1());
} catch (final InvalidHashException e) {
// This should never happen because we already validated the hash when creating this dependency
throw new RuntimeException("InvalidHashException when copying dependency.");
}
}
|
Make a new DependencyFile with the same details as this dependency
@param file for DependencyFile
@return the new DependencyFile
|
makeDependencyFile
|
java
|
azkaban/azkaban
|
azkaban-spi/src/main/java/azkaban/spi/Dependency.java
|
https://github.com/azkaban/azkaban/blob/master/azkaban-spi/src/main/java/azkaban/spi/Dependency.java
|
Apache-2.0
|
@JsonProperty("file")
public String getFileName() {
return this.fileName;
}
|
Make a new DependencyFile with the same details as this dependency
@param file for DependencyFile
@return the new DependencyFile
|
getFileName
|
java
|
azkaban/azkaban
|
azkaban-spi/src/main/java/azkaban/spi/Dependency.java
|
https://github.com/azkaban/azkaban/blob/master/azkaban-spi/src/main/java/azkaban/spi/Dependency.java
|
Apache-2.0
|
public String getDestination() {
return this.destination;
}
|
Make a new DependencyFile with the same details as this dependency
@param file for DependencyFile
@return the new DependencyFile
|
getDestination
|
java
|
azkaban/azkaban
|
azkaban-spi/src/main/java/azkaban/spi/Dependency.java
|
https://github.com/azkaban/azkaban/blob/master/azkaban-spi/src/main/java/azkaban/spi/Dependency.java
|
Apache-2.0
|
public String getType() {
return this.type;
}
|
Make a new DependencyFile with the same details as this dependency
@param file for DependencyFile
@return the new DependencyFile
|
getType
|
java
|
azkaban/azkaban
|
azkaban-spi/src/main/java/azkaban/spi/Dependency.java
|
https://github.com/azkaban/azkaban/blob/master/azkaban-spi/src/main/java/azkaban/spi/Dependency.java
|
Apache-2.0
|
public String getIvyCoordinates() {
return this.ivyCoordinates;
}
|
Make a new DependencyFile with the same details as this dependency
@param file for DependencyFile
@return the new DependencyFile
|
getIvyCoordinates
|
java
|
azkaban/azkaban
|
azkaban-spi/src/main/java/azkaban/spi/Dependency.java
|
https://github.com/azkaban/azkaban/blob/master/azkaban-spi/src/main/java/azkaban/spi/Dependency.java
|
Apache-2.0
|
public String getSHA1() {
return this.sha1;
}
|
Make a new DependencyFile with the same details as this dependency
@param file for DependencyFile
@return the new DependencyFile
|
getSHA1
|
java
|
azkaban/azkaban
|
azkaban-spi/src/main/java/azkaban/spi/Dependency.java
|
https://github.com/azkaban/azkaban/blob/master/azkaban-spi/src/main/java/azkaban/spi/Dependency.java
|
Apache-2.0
|
@Override
public boolean equals(final Object o) {
if (this == o) {
return true;
}
if (o == null || getClass() != o.getClass()) {
return false;
}
final Dependency that = (Dependency) o;
return this.fileName.equals(that.fileName) &&
this.destination.equals(that.destination) &&
this.type.equals(that.type) &&
this.ivyCoordinates.equals(that.ivyCoordinates) &&
this.sha1.equals(that.sha1);
}
|
Make a new DependencyFile with the same details as this dependency
@param file for DependencyFile
@return the new DependencyFile
|
equals
|
java
|
azkaban/azkaban
|
azkaban-spi/src/main/java/azkaban/spi/Dependency.java
|
https://github.com/azkaban/azkaban/blob/master/azkaban-spi/src/main/java/azkaban/spi/Dependency.java
|
Apache-2.0
|
@Override
public String toString() {
return this.fileName + "/" + this.destination;
}
|
Make a new DependencyFile with the same details as this dependency
@param file for DependencyFile
@return the new DependencyFile
|
toString
|
java
|
azkaban/azkaban
|
azkaban-spi/src/main/java/azkaban/spi/Dependency.java
|
https://github.com/azkaban/azkaban/blob/master/azkaban-spi/src/main/java/azkaban/spi/Dependency.java
|
Apache-2.0
|
@Override
public int hashCode() {
return Objects.hash(this.sha1);
}
|
Make a new DependencyFile with the same details as this dependency
@param file for DependencyFile
@return the new DependencyFile
|
hashCode
|
java
|
azkaban/azkaban
|
azkaban-spi/src/main/java/azkaban/spi/Dependency.java
|
https://github.com/azkaban/azkaban/blob/master/azkaban-spi/src/main/java/azkaban/spi/Dependency.java
|
Apache-2.0
|
@Override
public boolean equals(final Object o) {
if (this == o) {
return true;
}
if (o == null || getClass() != o.getClass()) {
return false;
}
if (!super.equals(o)) {
return false;
}
DependencyFile that = (DependencyFile) o;
return Objects.equals(file, that.file);
}
|
Representation of startup dependency with an associated local file. Usually a DependencyFile will never be
directly instantiated (except maybe in tests), but rather will be generated from an instance of a Dependency
using Dependency::makeDependencyFile(File f)
|
equals
|
java
|
azkaban/azkaban
|
azkaban-spi/src/main/java/azkaban/spi/DependencyFile.java
|
https://github.com/azkaban/azkaban/blob/master/azkaban-spi/src/main/java/azkaban/spi/DependencyFile.java
|
Apache-2.0
|
public boolean isFlowEventType() {
return this == FLOW_STARTED || this == FLOW_FINISHED || this == FLOW_STATUS_CHANGED;
}
|
Enum class defining the list of supported event types.
|
isFlowEventType
|
java
|
azkaban/azkaban
|
azkaban-spi/src/main/java/azkaban/spi/EventType.java
|
https://github.com/azkaban/azkaban/blob/master/azkaban-spi/src/main/java/azkaban/spi/EventType.java
|
Apache-2.0
|
public void processStatusUpdate(final DependencyInstance depInst) {
//this is blocking call, might offload it to another thread if necessary.
this.flowTriggerInstanceLoader.updateDependencyExecutionStatus(depInst);
}
|
Process status update of dependency instance
|
processStatusUpdate
|
java
|
azkaban/azkaban
|
azkaban-web-server/src/main/java/azkaban/flowtrigger/DependencyInstanceProcessor.java
|
https://github.com/azkaban/azkaban/blob/master/azkaban-web-server/src/main/java/azkaban/flowtrigger/DependencyInstanceProcessor.java
|
Apache-2.0
|
public void start() {
this.scheduler.scheduleAtFixedRate(() -> {
FlowTriggerExecutionCleaner.this.flowTriggerInstanceLoader
.deleteTriggerExecutionsFinishingOlderThan(System
.currentTimeMillis() - RETENTION_PERIOD.toMillis());
}, 0, CLEAN_INTERVAL.getSeconds(), TimeUnit.SECONDS);
}
|
This is to purge old flow trigger execution records from the db table.
Otherwise the table will keep growing indefinitely as triggers are executed, leading to
excessive query time on the table.
The cleanup policy is removing trigger instances finishing older than 30 days back.
|
start
|
java
|
azkaban/azkaban
|
azkaban-web-server/src/main/java/azkaban/flowtrigger/FlowTriggerExecutionCleaner.java
|
https://github.com/azkaban/azkaban/blob/master/azkaban-web-server/src/main/java/azkaban/flowtrigger/FlowTriggerExecutionCleaner.java
|
Apache-2.0
|
public void shutdown() {
this.scheduler.shutdown();
this.scheduler.shutdownNow();
}
|
This is to purge old flow trigger execution records from the db table.
Otherwise the table will keep growing indefinitely as triggers are executed, leading to
excessive query time on the table.
The cleanup policy is removing trigger instances finishing older than 30 days back.
|
shutdown
|
java
|
azkaban/azkaban
|
azkaban-web-server/src/main/java/azkaban/flowtrigger/FlowTriggerExecutionCleaner.java
|
https://github.com/azkaban/azkaban/blob/master/azkaban-web-server/src/main/java/azkaban/flowtrigger/FlowTriggerExecutionCleaner.java
|
Apache-2.0
|
public void start() throws FlowTriggerDependencyPluginException {
this.triggerPluginManager.loadAllPlugins();
this.recoverIncompleteTriggerInstances();
this.cleaner.start();
}
|
FlowTriggerService is a singleton class in the AZ web server to
process all trigger-related operations. Externally it provides following
operations -
1. Create a trigger instance based on trigger definition.
2. Cancel a trigger instance.
3. Query running and historic trigger instances.
4. Recover incomplete trigger instances.
Internally, it
1. maintains the list of running trigger instance in memory.
2. updates status, starttime/endtime of trigger instance.
3. persists trigger instance to DB.
FlowTriggerService will be leveraged by Quartz scheduler, our new AZ scheduler to schedule
triggers.
After construction, call {@link #start()} to start the service.
|
start
|
java
|
azkaban/azkaban
|
azkaban-web-server/src/main/java/azkaban/flowtrigger/FlowTriggerService.java
|
https://github.com/azkaban/azkaban/blob/master/azkaban-web-server/src/main/java/azkaban/flowtrigger/FlowTriggerService.java
|
Apache-2.0
|
private DependencyInstanceContext createDepContext(final FlowTriggerDependency dep, final long
startTimeInMills, final String triggerInstId) throws Exception {
final DependencyCheck dependencyCheck = this.triggerPluginManager
.getDependencyCheck(dep.getType());
final DependencyInstanceCallback callback = new DependencyInstanceCallbackImpl(this);
final Map<String, String> depInstConfig = new HashMap<>();
depInstConfig.putAll(dep.getProps());
depInstConfig.put(FlowTriggerProps.DEP_NAME, dep.getName());
final DependencyInstanceConfigImpl config = new DependencyInstanceConfigImpl(depInstConfig);
final DependencyInstanceRuntimeProps runtimeProps = new DependencyInstanceRuntimePropsImpl
(ImmutableMap
.of(FlowTriggerProps.START_TIME, String.valueOf(startTimeInMills), FlowTriggerProps
.TRIGGER_INSTANCE_ID, triggerInstId));
return dependencyCheck.run(config, runtimeProps, callback);
}
|
FlowTriggerService is a singleton class in the AZ web server to
process all trigger-related operations. Externally it provides following
operations -
1. Create a trigger instance based on trigger definition.
2. Cancel a trigger instance.
3. Query running and historic trigger instances.
4. Recover incomplete trigger instances.
Internally, it
1. maintains the list of running trigger instance in memory.
2. updates status, starttime/endtime of trigger instance.
3. persists trigger instance to DB.
FlowTriggerService will be leveraged by Quartz scheduler, our new AZ scheduler to schedule
triggers.
After construction, call {@link #start()} to start the service.
|
createDepContext
|
java
|
azkaban/azkaban
|
azkaban-web-server/src/main/java/azkaban/flowtrigger/FlowTriggerService.java
|
https://github.com/azkaban/azkaban/blob/master/azkaban-web-server/src/main/java/azkaban/flowtrigger/FlowTriggerService.java
|
Apache-2.0
|
private TriggerInstance createTriggerInstance(final FlowTrigger flowTrigger, final String flowId,
final int flowVersion, final String submitUser, final Project project) {
final String triggerInstId = generateId();
final long startTime = System.currentTimeMillis();
// create a list of dependency instances
final List<DependencyInstance> depInstList = new ArrayList<>();
for (final FlowTriggerDependency dep : flowTrigger.getDependencies()) {
final String depName = dep.getName();
DependencyInstanceContext context = null;
try {
context = createDepContext(dep, startTime, triggerInstId);
} catch (final Exception ex) {
logger.error("unable to create dependency context for trigger instance[id = {}]",
triggerInstId, ex);
}
// if dependency instance context fails to be created, then its status is cancelled and
// cause is failure
final Status status = context == null ? Status.CANCELLED : Status.RUNNING;
final CancellationCause cause =
context == null ? CancellationCause.FAILURE : CancellationCause.NONE;
final long endTime = context == null ? System.currentTimeMillis() : 0;
final DependencyInstance depInst = new DependencyInstance(depName, startTime, endTime,
context, status, cause);
depInstList.add(depInst);
}
final TriggerInstance triggerInstance = new TriggerInstance(triggerInstId, flowTrigger,
flowId, flowVersion, submitUser, depInstList, Constants.UNASSIGNED_EXEC_ID, project);
return triggerInstance;
}
|
FlowTriggerService is a singleton class in the AZ web server to
process all trigger-related operations. Externally it provides following
operations -
1. Create a trigger instance based on trigger definition.
2. Cancel a trigger instance.
3. Query running and historic trigger instances.
4. Recover incomplete trigger instances.
Internally, it
1. maintains the list of running trigger instance in memory.
2. updates status, starttime/endtime of trigger instance.
3. persists trigger instance to DB.
FlowTriggerService will be leveraged by Quartz scheduler, our new AZ scheduler to schedule
triggers.
After construction, call {@link #start()} to start the service.
|
createTriggerInstance
|
java
|
azkaban/azkaban
|
azkaban-web-server/src/main/java/azkaban/flowtrigger/FlowTriggerService.java
|
https://github.com/azkaban/azkaban/blob/master/azkaban-web-server/src/main/java/azkaban/flowtrigger/FlowTriggerService.java
|
Apache-2.0
|
private String generateId() {
return UUID.randomUUID().toString();
}
|
FlowTriggerService is a singleton class in the AZ web server to
process all trigger-related operations. Externally it provides following
operations -
1. Create a trigger instance based on trigger definition.
2. Cancel a trigger instance.
3. Query running and historic trigger instances.
4. Recover incomplete trigger instances.
Internally, it
1. maintains the list of running trigger instance in memory.
2. updates status, starttime/endtime of trigger instance.
3. persists trigger instance to DB.
FlowTriggerService will be leveraged by Quartz scheduler, our new AZ scheduler to schedule
triggers.
After construction, call {@link #start()} to start the service.
|
generateId
|
java
|
azkaban/azkaban
|
azkaban-web-server/src/main/java/azkaban/flowtrigger/FlowTriggerService.java
|
https://github.com/azkaban/azkaban/blob/master/azkaban-web-server/src/main/java/azkaban/flowtrigger/FlowTriggerService.java
|
Apache-2.0
|
private void scheduleKill(final TriggerInstance triggerInst, final Duration duration, final
CancellationCause cause) {
logger
.debug("cancel trigger instance {} in {} secs", triggerInst.getId(), duration
.getSeconds());
this.timeoutService.schedule(() -> {
cancelTriggerInstance(triggerInst, cause);
}, duration.toMillis(), TimeUnit.MILLISECONDS);
}
|
FlowTriggerService is a singleton class in the AZ web server to
process all trigger-related operations. Externally it provides following
operations -
1. Create a trigger instance based on trigger definition.
2. Cancel a trigger instance.
3. Query running and historic trigger instances.
4. Recover incomplete trigger instances.
Internally, it
1. maintains the list of running trigger instance in memory.
2. updates status, starttime/endtime of trigger instance.
3. persists trigger instance to DB.
FlowTriggerService will be leveraged by Quartz scheduler, our new AZ scheduler to schedule
triggers.
After construction, call {@link #start()} to start the service.
|
scheduleKill
|
java
|
azkaban/azkaban
|
azkaban-web-server/src/main/java/azkaban/flowtrigger/FlowTriggerService.java
|
https://github.com/azkaban/azkaban/blob/master/azkaban-web-server/src/main/java/azkaban/flowtrigger/FlowTriggerService.java
|
Apache-2.0
|
public Collection<TriggerInstance> getRunningTriggers() {
return this.flowTriggerInstanceLoader.getRunning();
}
|
@return the list of running trigger instances
|
getRunningTriggers
|
java
|
azkaban/azkaban
|
azkaban-web-server/src/main/java/azkaban/flowtrigger/FlowTriggerService.java
|
https://github.com/azkaban/azkaban/blob/master/azkaban-web-server/src/main/java/azkaban/flowtrigger/FlowTriggerService.java
|
Apache-2.0
|
public Collection<TriggerInstance> getRecentlyFinished() {
return this.flowTriggerInstanceLoader.getRecentlyFinished(RECENTLY_FINISHED_TRIGGER_LIMIT);
}
|
@return the list of running trigger instances
|
getRecentlyFinished
|
java
|
azkaban/azkaban
|
azkaban-web-server/src/main/java/azkaban/flowtrigger/FlowTriggerService.java
|
https://github.com/azkaban/azkaban/blob/master/azkaban-web-server/src/main/java/azkaban/flowtrigger/FlowTriggerService.java
|
Apache-2.0
|
public TriggerInstance findTriggerInstanceById(final String triggerInstanceId) {
return this.flowTriggerInstanceLoader.getTriggerInstanceById(triggerInstanceId);
}
|
@return the list of running trigger instances
|
findTriggerInstanceById
|
java
|
azkaban/azkaban
|
azkaban-web-server/src/main/java/azkaban/flowtrigger/FlowTriggerService.java
|
https://github.com/azkaban/azkaban/blob/master/azkaban-web-server/src/main/java/azkaban/flowtrigger/FlowTriggerService.java
|
Apache-2.0
|
public TriggerInstance findTriggerInstanceByExecId(final int flowExecId) {
return this.flowTriggerInstanceLoader.getTriggerInstanceByFlowExecId(flowExecId);
}
|
@return the list of running trigger instances
|
findTriggerInstanceByExecId
|
java
|
azkaban/azkaban
|
azkaban-web-server/src/main/java/azkaban/flowtrigger/FlowTriggerService.java
|
https://github.com/azkaban/azkaban/blob/master/azkaban-web-server/src/main/java/azkaban/flowtrigger/FlowTriggerService.java
|
Apache-2.0
|
private boolean isDoneButFlowNotExecuted(final TriggerInstance triggerInstance) {
return triggerInstance.getStatus() == Status.SUCCEEDED && triggerInstance.getFlowExecId() ==
Constants.UNASSIGNED_EXEC_ID;
}
|
@return the list of running trigger instances
|
isDoneButFlowNotExecuted
|
java
|
azkaban/azkaban
|
azkaban-web-server/src/main/java/azkaban/flowtrigger/FlowTriggerService.java
|
https://github.com/azkaban/azkaban/blob/master/azkaban-web-server/src/main/java/azkaban/flowtrigger/FlowTriggerService.java
|
Apache-2.0
|
private void recoverRunningOrCancelling(final TriggerInstance triggerInstance) {
final FlowTrigger flowTrigger = triggerInstance.getFlowTrigger();
for (final DependencyInstance depInst : triggerInstance.getDepInstances()) {
if (depInst.getStatus() == Status.RUNNING || depInst.getStatus() == Status.CANCELLING) {
final FlowTriggerDependency dependency = flowTrigger
.getDependencyByName(depInst.getDepName());
DependencyInstanceContext context = null;
try {
//recreate dependency instance context
context = createDepContext(dependency, depInst.getStartTime(), depInst
.getTriggerInstance().getId());
} catch (final Exception ex) {
logger
.error(
"unable to create dependency context for trigger instance[id ="
+ " {}]", triggerInstance.getId(), ex);
}
depInst.setDependencyInstanceContext(context);
if (context == null) {
depInst.setStatus(Status.CANCELLED);
depInst.setCancellationCause(CancellationCause.FAILURE);
}
}
}
if (triggerInstance.getStatus() == Status.CANCELLING) {
addToRunningListAndCancel(triggerInstance);
} else if (triggerInstance.getStatus() == Status.RUNNING) {
final long remainingTime = remainingTimeBeforeTimeout(triggerInstance);
addToRunningListAndScheduleKill(triggerInstance, Duration.ofMillis(remainingTime).plus
(CANCELLING_GRACE_PERIOD_AFTER_RESTART), CancellationCause.TIMEOUT);
}
}
|
@return the list of running trigger instances
|
recoverRunningOrCancelling
|
java
|
azkaban/azkaban
|
azkaban-web-server/src/main/java/azkaban/flowtrigger/FlowTriggerService.java
|
https://github.com/azkaban/azkaban/blob/master/azkaban-web-server/src/main/java/azkaban/flowtrigger/FlowTriggerService.java
|
Apache-2.0
|
private void recoverTriggerInstance(final TriggerInstance triggerInstance) {
this.flowTriggerExecutorService.submit(() -> recover(triggerInstance));
}
|
@return the list of running trigger instances
|
recoverTriggerInstance
|
java
|
azkaban/azkaban
|
azkaban-web-server/src/main/java/azkaban/flowtrigger/FlowTriggerService.java
|
https://github.com/azkaban/azkaban/blob/master/azkaban-web-server/src/main/java/azkaban/flowtrigger/FlowTriggerService.java
|
Apache-2.0
|
private void recover(final TriggerInstance triggerInstance) {
logger.info("recovering pending trigger instance {}", triggerInstance.getId());
if (isDoneButFlowNotExecuted(triggerInstance)) {
// if trigger instance succeeds but the associated flow hasn't been started yet, then start
// the flow
this.triggerProcessor.processSucceed(triggerInstance);
} else {
recoverRunningOrCancelling(triggerInstance);
}
}
|
@return the list of running trigger instances
|
recover
|
java
|
azkaban/azkaban
|
azkaban-web-server/src/main/java/azkaban/flowtrigger/FlowTriggerService.java
|
https://github.com/azkaban/azkaban/blob/master/azkaban-web-server/src/main/java/azkaban/flowtrigger/FlowTriggerService.java
|
Apache-2.0
|
public void recoverIncompleteTriggerInstances() {
final Collection<TriggerInstance> unfinishedTriggerInstances = this.flowTriggerInstanceLoader
.getIncompleteTriggerInstances();
for (final TriggerInstance triggerInstance : unfinishedTriggerInstances) {
if (triggerInstance.getFlowTrigger() != null) {
recoverTriggerInstance(triggerInstance);
} else {
logger.error("cannot recover the trigger instance {}, flow trigger is null,"
+ " cancelling it ", triggerInstance.getId());
//finalize unrecoverable trigger instances
// the following situation would cause trigger instances unrecoverable:
// 1. project A with flow A associated with flow trigger A is uploaded
// 2. flow trigger A starts to run
// 3. project A with flow B without any flow trigger is uploaded
// 4. web server restarts
// in this case, flow trigger instance of flow trigger A will be equipped with latest
// project, thus failing to find the flow trigger since new project doesn't contain flow
// trigger at all
if (isDoneButFlowNotExecuted(triggerInstance)) {
triggerInstance.setFlowExecId(Constants.FAILED_EXEC_ID);
this.flowTriggerInstanceLoader.updateAssociatedFlowExecId(triggerInstance);
} else {
for (final DependencyInstance depInst : triggerInstance.getDepInstances()) {
if (!Status.isDone(depInst.getStatus())) {
processStatusAndCancelCauseUpdate(depInst, Status.CANCELLED,
CancellationCause.FAILURE);
this.triggerProcessor.processTermination(depInst.getTriggerInstance());
}
}
}
}
}
}
|
Resume executions of all incomplete trigger instances by recovering the state from db.
|
recoverIncompleteTriggerInstances
|
java
|
azkaban/azkaban
|
azkaban-web-server/src/main/java/azkaban/flowtrigger/FlowTriggerService.java
|
https://github.com/azkaban/azkaban/blob/master/azkaban-web-server/src/main/java/azkaban/flowtrigger/FlowTriggerService.java
|
Apache-2.0
|
private void addToRunningListAndScheduleKill(final TriggerInstance triggerInst, final
Duration durationBeforeKill, final CancellationCause cause) {
// if trigger instance is already done
if (!Status.isDone(triggerInst.getStatus())) {
this.runningTriggers.add(triggerInst);
scheduleKill(triggerInst, durationBeforeKill, cause);
}
}
|
Resume executions of all incomplete trigger instances by recovering the state from db.
|
addToRunningListAndScheduleKill
|
java
|
azkaban/azkaban
|
azkaban-web-server/src/main/java/azkaban/flowtrigger/FlowTriggerService.java
|
https://github.com/azkaban/azkaban/blob/master/azkaban-web-server/src/main/java/azkaban/flowtrigger/FlowTriggerService.java
|
Apache-2.0
|
private CancellationCause getCancelleationCause(final TriggerInstance triggerInst) {
final Set<CancellationCause> causes = triggerInst.getDepInstances().stream()
.map(DependencyInstance::getCancellationCause).collect(Collectors.toSet());
if (causes.contains(CancellationCause.FAILURE) || causes
.contains(CancellationCause.CASCADING)) {
return CancellationCause.CASCADING;
} else if (causes.contains(CancellationCause.TIMEOUT)) {
return CancellationCause.TIMEOUT;
} else if (causes.contains(CancellationCause.MANUAL)) {
return CancellationCause.MANUAL;
} else {
return CancellationCause.NONE;
}
}
|
Resume executions of all incomplete trigger instances by recovering the state from db.
|
getCancelleationCause
|
java
|
azkaban/azkaban
|
azkaban-web-server/src/main/java/azkaban/flowtrigger/FlowTriggerService.java
|
https://github.com/azkaban/azkaban/blob/master/azkaban-web-server/src/main/java/azkaban/flowtrigger/FlowTriggerService.java
|
Apache-2.0
|
private void cancelTriggerInstance(final TriggerInstance triggerInst) {
logger.debug("cancelling trigger instance of exec id" + triggerInst.getId());
final CancellationCause cause = getCancelleationCause(triggerInst);
for (final DependencyInstance depInst : triggerInst.getDepInstances()) {
if (depInst.getStatus() == Status.CANCELLING) {
cancelContextAsync(depInst.getContext());
} else if (depInst.getStatus() == Status.RUNNING) {
// sometimes dependency instances of trigger instance in cancelling status can be running.
// e.x. dep inst1: failure, dep inst2: running -> trigger inst is in killing
this.processStatusAndCancelCauseUpdate(depInst, Status.CANCELLING, cause);
cancelContextAsync(depInst.getContext());
}
}
}
|
Resume executions of all incomplete trigger instances by recovering the state from db.
|
cancelTriggerInstance
|
java
|
azkaban/azkaban
|
azkaban-web-server/src/main/java/azkaban/flowtrigger/FlowTriggerService.java
|
https://github.com/azkaban/azkaban/blob/master/azkaban-web-server/src/main/java/azkaban/flowtrigger/FlowTriggerService.java
|
Apache-2.0
|
private void addToRunningListAndCancel(final TriggerInstance triggerInst) {
this.runningTriggers.add(triggerInst);
cancelTriggerInstance(triggerInst);
}
|
Resume executions of all incomplete trigger instances by recovering the state from db.
|
addToRunningListAndCancel
|
java
|
azkaban/azkaban
|
azkaban-web-server/src/main/java/azkaban/flowtrigger/FlowTriggerService.java
|
https://github.com/azkaban/azkaban/blob/master/azkaban-web-server/src/main/java/azkaban/flowtrigger/FlowTriggerService.java
|
Apache-2.0
|
private void updateDepInstStatus(final DependencyInstance depInst, final Status newStatus) {
depInst.setStatus(newStatus);
if (Status.isDone(depInst.getStatus())) {
depInst.setEndTime(System.currentTimeMillis());
}
}
|
Resume executions of all incomplete trigger instances by recovering the state from db.
|
updateDepInstStatus
|
java
|
azkaban/azkaban
|
azkaban-web-server/src/main/java/azkaban/flowtrigger/FlowTriggerService.java
|
https://github.com/azkaban/azkaban/blob/master/azkaban-web-server/src/main/java/azkaban/flowtrigger/FlowTriggerService.java
|
Apache-2.0
|
private void processStatusUpdate(final DependencyInstance depInst, final Status newStatus) {
logger.debug("process status update for " + depInst);
updateDepInstStatus(depInst, newStatus);
this.dependencyProcessor.processStatusUpdate(depInst);
}
|
Resume executions of all incomplete trigger instances by recovering the state from db.
|
processStatusUpdate
|
java
|
azkaban/azkaban
|
azkaban-web-server/src/main/java/azkaban/flowtrigger/FlowTriggerService.java
|
https://github.com/azkaban/azkaban/blob/master/azkaban-web-server/src/main/java/azkaban/flowtrigger/FlowTriggerService.java
|
Apache-2.0
|
private void processStatusAndCancelCauseUpdate(final DependencyInstance depInst, final Status
newStatus, final CancellationCause cause) {
depInst.setCancellationCause(cause);
updateDepInstStatus(depInst, newStatus);
this.dependencyProcessor.processStatusUpdate(depInst);
}
|
Resume executions of all incomplete trigger instances by recovering the state from db.
|
processStatusAndCancelCauseUpdate
|
java
|
azkaban/azkaban
|
azkaban-web-server/src/main/java/azkaban/flowtrigger/FlowTriggerService.java
|
https://github.com/azkaban/azkaban/blob/master/azkaban-web-server/src/main/java/azkaban/flowtrigger/FlowTriggerService.java
|
Apache-2.0
|
private long remainingTimeBeforeTimeout(final TriggerInstance triggerInst) {
final long now = System.currentTimeMillis();
return Math.max(0,
triggerInst.getFlowTrigger().getMaxWaitDuration().get().toMillis() - (now - triggerInst
.getStartTime()));
}
|
Resume executions of all incomplete trigger instances by recovering the state from db.
|
remainingTimeBeforeTimeout
|
java
|
azkaban/azkaban
|
azkaban-web-server/src/main/java/azkaban/flowtrigger/FlowTriggerService.java
|
https://github.com/azkaban/azkaban/blob/master/azkaban-web-server/src/main/java/azkaban/flowtrigger/FlowTriggerService.java
|
Apache-2.0
|
public void startTrigger(final FlowTrigger flowTrigger, final String flowId,
final int flowVersion, final String submitUser, final Project project) {
final TriggerInstance triggerInst = createTriggerInstance(flowTrigger, flowId, flowVersion,
submitUser, project);
this.flowTriggerExecutorService.submit(() -> {
logger.info("Starting the flow trigger [trigger instance id: {}] by {}",
triggerInst.getId(), submitUser);
start(triggerInst);
});
}
|
Start the trigger. The method will be scheduled to invoke by azkaban scheduler.
|
startTrigger
|
java
|
azkaban/azkaban
|
azkaban-web-server/src/main/java/azkaban/flowtrigger/FlowTriggerService.java
|
https://github.com/azkaban/azkaban/blob/master/azkaban-web-server/src/main/java/azkaban/flowtrigger/FlowTriggerService.java
|
Apache-2.0
|
private void start(final TriggerInstance triggerInst) {
this.triggerProcessor.processNewInstance(triggerInst);
if (triggerInst.getStatus() == Status.CANCELLED) {
// all dependency instances failed
logger.info(
"Trigger instance[id: {}] is cancelled since all dependency instances fail to be created",
triggerInst.getId());
this.triggerProcessor.processTermination(triggerInst);
} else if (triggerInst.getStatus() == Status.CANCELLING) {
// some of the dependency instances failed
logger.info(
"Trigger instance[id: {}] is being cancelled since some dependency instances fail to be created",
triggerInst.getId());
addToRunningListAndCancel(triggerInst);
} else if (triggerInst.getStatus() == Status.SUCCEEDED) {
this.triggerProcessor.processSucceed(triggerInst);
} else {
// todo chengren311: it's possible web server restarts before the db update, then
// new instance will not be recoverable from db.
addToRunningListAndScheduleKill(triggerInst, triggerInst.getFlowTrigger()
.getMaxWaitDuration().get(), CancellationCause.TIMEOUT);
}
}
|
Start the trigger. The method will be scheduled to invoke by azkaban scheduler.
|
start
|
java
|
azkaban/azkaban
|
azkaban-web-server/src/main/java/azkaban/flowtrigger/FlowTriggerService.java
|
https://github.com/azkaban/azkaban/blob/master/azkaban-web-server/src/main/java/azkaban/flowtrigger/FlowTriggerService.java
|
Apache-2.0
|
public TriggerInstance findRunningTriggerInstById(final String triggerInstId) {
final Future<TriggerInstance> future = this.flowTriggerExecutorService.submit(
() -> getTriggerInstanceById(triggerInstId)
);
try {
return future.get();
} catch (final Exception e) {
logger.error("exception when finding trigger instance by id" + triggerInstId, e);
return null;
}
}
|
Start the trigger. The method will be scheduled to invoke by azkaban scheduler.
|
findRunningTriggerInstById
|
java
|
azkaban/azkaban
|
azkaban-web-server/src/main/java/azkaban/flowtrigger/FlowTriggerService.java
|
https://github.com/azkaban/azkaban/blob/master/azkaban-web-server/src/main/java/azkaban/flowtrigger/FlowTriggerService.java
|
Apache-2.0
|
private TriggerInstance getTriggerInstanceById(final String triggerInstId) {
return this.runningTriggers.stream()
.filter(triggerInst -> triggerInst.getId().equals(triggerInstId)).findFirst()
.orElse(null);
}
|
Start the trigger. The method will be scheduled to invoke by azkaban scheduler.
|
getTriggerInstanceById
|
java
|
azkaban/azkaban
|
azkaban-web-server/src/main/java/azkaban/flowtrigger/FlowTriggerService.java
|
https://github.com/azkaban/azkaban/blob/master/azkaban-web-server/src/main/java/azkaban/flowtrigger/FlowTriggerService.java
|
Apache-2.0
|
private void cancelContextAsync(final DependencyInstanceContext context) {
this.cancelExecutorService.submit(() -> context.cancel());
}
|
Start the trigger. The method will be scheduled to invoke by azkaban scheduler.
|
cancelContextAsync
|
java
|
azkaban/azkaban
|
azkaban-web-server/src/main/java/azkaban/flowtrigger/FlowTriggerService.java
|
https://github.com/azkaban/azkaban/blob/master/azkaban-web-server/src/main/java/azkaban/flowtrigger/FlowTriggerService.java
|
Apache-2.0
|
public void cancelTriggerInstance(final TriggerInstance triggerInst,
final CancellationCause cause) {
if (triggerInst.getStatus() == Status.RUNNING) {
this.flowTriggerExecutorService.submit(() -> cancel(triggerInst, cause));
}
}
|
Cancel a trigger instance
@param triggerInst trigger instance to be cancelled
@param cause cause of cancelling
|
cancelTriggerInstance
|
java
|
azkaban/azkaban
|
azkaban-web-server/src/main/java/azkaban/flowtrigger/FlowTriggerService.java
|
https://github.com/azkaban/azkaban/blob/master/azkaban-web-server/src/main/java/azkaban/flowtrigger/FlowTriggerService.java
|
Apache-2.0
|
private void cancel(final TriggerInstance triggerInst, final CancellationCause cause) {
logger.info("cancelling trigger instance with id {}", triggerInst.getId());
if (triggerInst != null) {
for (final DependencyInstance depInst : triggerInst.getDepInstances()) {
// cancel running dependencies only, no need to cancel a killed/successful dependency
// instance
if (depInst.getStatus() == Status.RUNNING) {
this.processStatusAndCancelCauseUpdate(depInst, Status.CANCELLING, cause);
cancelContextAsync(depInst.getContext());
}
}
} else {
logger.debug("unable to cancel a trigger instance in non-running state with id {}",
triggerInst.getId());
}
}
|
Cancel a trigger instance
@param triggerInst trigger instance to be cancelled
@param cause cause of cancelling
|
cancel
|
java
|
azkaban/azkaban
|
azkaban-web-server/src/main/java/azkaban/flowtrigger/FlowTriggerService.java
|
https://github.com/azkaban/azkaban/blob/master/azkaban-web-server/src/main/java/azkaban/flowtrigger/FlowTriggerService.java
|
Apache-2.0
|
private DependencyInstance findDependencyInstanceByContext(
final DependencyInstanceContext context) {
return this.runningTriggers.stream()
.flatMap(triggerInst -> triggerInst.getDepInstances().stream()).filter(
depInst -> depInst.getContext() != null && depInst.getContext() == context)
.findFirst().orElse(null);
}
|
Cancel a trigger instance
@param triggerInst trigger instance to be cancelled
@param cause cause of cancelling
|
findDependencyInstanceByContext
|
java
|
azkaban/azkaban
|
azkaban-web-server/src/main/java/azkaban/flowtrigger/FlowTriggerService.java
|
https://github.com/azkaban/azkaban/blob/master/azkaban-web-server/src/main/java/azkaban/flowtrigger/FlowTriggerService.java
|
Apache-2.0
|
public void markDependencySuccess(final DependencyInstanceContext context) {
this.flowTriggerExecutorService.submit(() -> markSuccess(context));
}
|
Mark the dependency instance context as success
|
markDependencySuccess
|
java
|
azkaban/azkaban
|
azkaban-web-server/src/main/java/azkaban/flowtrigger/FlowTriggerService.java
|
https://github.com/azkaban/azkaban/blob/master/azkaban-web-server/src/main/java/azkaban/flowtrigger/FlowTriggerService.java
|
Apache-2.0
|
private void markSuccess(final DependencyInstanceContext context) {
final DependencyInstance depInst = findDependencyInstanceByContext(context);
if (depInst != null) {
if (Status.isDone(depInst.getStatus())) {
logger.warn("OnSuccess of dependency instance[id: {}, name: {}] is ignored",
depInst.getTriggerInstance().getId(), depInst.getDepName());
return;
}
// if the status transits from cancelling to succeeded, then cancellation cause was set,
// we need to unset cancellation cause.
this.processStatusAndCancelCauseUpdate(depInst, Status.SUCCEEDED, CancellationCause.NONE);
// if associated trigger instance becomes success, then remove it from running list
if (depInst.getTriggerInstance().getStatus() == Status.SUCCEEDED) {
logger.info("trigger instance[id: {}] succeeded", depInst.getTriggerInstance().getId());
this.triggerProcessor.processSucceed(depInst.getTriggerInstance());
this.runningTriggers.remove(depInst.getTriggerInstance());
}
} else {
logger.debug("unable to find trigger instance with context {} when marking it success",
context);
}
}
|
Mark the dependency instance context as success
|
markSuccess
|
java
|
azkaban/azkaban
|
azkaban-web-server/src/main/java/azkaban/flowtrigger/FlowTriggerService.java
|
https://github.com/azkaban/azkaban/blob/master/azkaban-web-server/src/main/java/azkaban/flowtrigger/FlowTriggerService.java
|
Apache-2.0
|
private boolean cancelledByAzkaban(final DependencyInstance depInst) {
return depInst.getStatus() == Status.CANCELLING;
}
|
Mark the dependency instance context as success
|
cancelledByAzkaban
|
java
|
azkaban/azkaban
|
azkaban-web-server/src/main/java/azkaban/flowtrigger/FlowTriggerService.java
|
https://github.com/azkaban/azkaban/blob/master/azkaban-web-server/src/main/java/azkaban/flowtrigger/FlowTriggerService.java
|
Apache-2.0
|
private boolean cancelledByDependencyPlugin(final DependencyInstance depInst) {
// When onKill is called by the dependency plugin not through flowTriggerService, we treat it
// as cancelled by dependency due to failure on dependency side. In this case, cancel cause
// remains unset.
return depInst.getStatus() == Status.RUNNING;
}
|
Mark the dependency instance context as success
|
cancelledByDependencyPlugin
|
java
|
azkaban/azkaban
|
azkaban-web-server/src/main/java/azkaban/flowtrigger/FlowTriggerService.java
|
https://github.com/azkaban/azkaban/blob/master/azkaban-web-server/src/main/java/azkaban/flowtrigger/FlowTriggerService.java
|
Apache-2.0
|
public void markDependencyCancelled(final DependencyInstanceContext context) {
this.flowTriggerExecutorService.submit(() -> {
markCancelled(context);
});
}
|
Mark the dependency instance context as success
|
markDependencyCancelled
|
java
|
azkaban/azkaban
|
azkaban-web-server/src/main/java/azkaban/flowtrigger/FlowTriggerService.java
|
https://github.com/azkaban/azkaban/blob/master/azkaban-web-server/src/main/java/azkaban/flowtrigger/FlowTriggerService.java
|
Apache-2.0
|
private void markCancelled(final DependencyInstanceContext context) {
final DependencyInstance depInst = findDependencyInstanceByContext(context);
if (depInst != null) {
if (cancelledByDependencyPlugin(depInst)) {
processStatusAndCancelCauseUpdate(depInst, Status.CANCELLED, CancellationCause.FAILURE);
cancelTriggerInstance(depInst.getTriggerInstance());
} else if (cancelledByAzkaban(depInst)) {
processStatusUpdate(depInst, Status.CANCELLED);
} else {
logger.warn("OnCancel of dependency instance[id: {}, name: {}] is ignored",
depInst.getTriggerInstance().getId(), depInst.getDepName());
return;
}
if (depInst.getTriggerInstance().getStatus() == Status.CANCELLED) {
logger.info("trigger instance with execId {} is cancelled",
depInst.getTriggerInstance().getId());
this.triggerProcessor.processTermination(depInst.getTriggerInstance());
this.runningTriggers.remove(depInst.getTriggerInstance());
}
} else {
logger.warn("unable to find trigger instance with context {} when marking "
+ "it cancelled", context);
}
}
|
Mark the dependency instance context as success
|
markCancelled
|
java
|
azkaban/azkaban
|
azkaban-web-server/src/main/java/azkaban/flowtrigger/FlowTriggerService.java
|
https://github.com/azkaban/azkaban/blob/master/azkaban-web-server/src/main/java/azkaban/flowtrigger/FlowTriggerService.java
|
Apache-2.0
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.