code
stringlengths 25
201k
| docstring
stringlengths 19
96.2k
| func_name
stringlengths 0
235
| language
stringclasses 1
value | repo
stringlengths 8
51
| path
stringlengths 11
314
| url
stringlengths 62
377
| license
stringclasses 7
values |
|---|---|---|---|---|---|---|---|
private void updateImageRampup(final ImageRampup imageRampup)
throws ImageMgmtException {
try {
final List<Object> params = new ArrayList<>();
final StringBuilder queryBuilder = new StringBuilder("update image_rampup set ");
if (imageRampup.getRampupPercentage() != null) {
queryBuilder.append(" rampup_percentage = ?, ");
params.add(imageRampup.getRampupPercentage());
}
if (imageRampup.getStabilityTag() != null) {
queryBuilder.append(" stability_tag = ?, ");
params.add(imageRampup.getStabilityTag().getTagName());
}
queryBuilder.append(" modified_by = ?, modified_on = ?");
params.add(imageRampup.getModifiedBy());
params.add(Timestamp.valueOf(LocalDateTime.now()));
queryBuilder.append(" where id = ? ");
params.add(imageRampup.getId());
final int updateCount = this.databaseOperator
.update(queryBuilder.toString(), Iterables.toArray(params, Object.class));
if (updateCount < 1) {
log.error(String.format("Exception while updating image rampup details for plan id: %s, "
+ "updateCount: %d. ",
imageRampup.getPlanId(), updateCount));
throw new ImageMgmtDaoException(ErrorCode.BAD_REQUEST,
String.format("Exception while updating image rampup details for plan id: %s. ",
imageRampup.getPlanId()));
}
} catch (final SQLException ex) {
log.error(String.format("Exception while updating image rampup details for plan id: %s. ",
imageRampup.getPlanId()), ex);
throw new ImageMgmtDaoException(ErrorCode.BAD_REQUEST,
String.format("Exception while updating image rampup details for plan id: %s. ",
imageRampup.getPlanId()));
}
}
|
This method up updates image rampup for an image type. The information such as version rampup
percentage, stability tag etc. can be updated.
@param imageRampup
@throws ImageMgmtException
|
updateImageRampup
|
java
|
azkaban/azkaban
|
azkaban-common/src/main/java/azkaban/imagemgmt/daos/ImageRampupDaoImpl.java
|
https://github.com/azkaban/azkaban/blob/master/azkaban-common/src/main/java/azkaban/imagemgmt/daos/ImageRampupDaoImpl.java
|
Apache-2.0
|
@Override
public List<ImageRampupPlan> handle(final ResultSet rs) throws SQLException {
if (!rs.next()) {
return Collections.emptyList();
}
final List<ImageRampupPlan> imageRampupPlans = new ArrayList<>();
do {
final int id = rs.getInt("id");
final String name = rs.getString("name");
final String description = rs.getString("description");
final String imageTypeName = rs.getString("image_type_name");
final boolean active = rs.getBoolean("active");
final String createdOn = rs.getString("created_on");
final String createdBy = rs.getString("created_by");
final String modifiedOn = rs.getString("modified_on");
final String modifiedBy = rs.getString("modified_by");
final ImageRampupPlan imageRampupPlan = new ImageRampupPlan();
imageRampupPlan.setId(id);
imageRampupPlan.setPlanName(name);
imageRampupPlan.setDescription(description);
imageRampupPlan.setActive(active);
imageRampupPlan.setImageTypeName(imageTypeName);
imageRampupPlan.setCreatedOn(createdOn);
imageRampupPlan.setCreatedBy(createdBy);
imageRampupPlan.setModifiedBy(modifiedBy);
imageRampupPlan.setModifiedOn(modifiedOn);
imageRampupPlans.add(imageRampupPlan);
} while (rs.next());
return imageRampupPlans;
}
|
ResultSetHandler implementation class for fetching image rampup plan
|
handle
|
java
|
azkaban/azkaban
|
azkaban-common/src/main/java/azkaban/imagemgmt/daos/ImageRampupDaoImpl.java
|
https://github.com/azkaban/azkaban/blob/master/azkaban-common/src/main/java/azkaban/imagemgmt/daos/ImageRampupDaoImpl.java
|
Apache-2.0
|
@Override
public List<ImageRampup> handle(final ResultSet rs) throws SQLException {
if (!rs.next()) {
return Collections.emptyList();
}
final List<ImageRampup> imageRampups = new ArrayList<>();
do {
final int id = rs.getInt("id");
final int planId = rs.getInt("plan_id");
final String imageVersion = rs.getString("version");
final int rampupPercentage = rs.getInt("rampup_percentage");
final String stabilityTag = rs.getString("stability_tag");
final String createdOn = rs.getString("created_on");
final String createdBy = rs.getString("created_by");
final String modifiedOn = rs.getString("modified_on");
final String modifiedBy = rs.getString("modified_by");
final ImageRampup imageRampup = new ImageRampup();
imageRampup.setId(id);
imageRampup.setPlanId(planId);
imageRampup.setImageVersion(imageVersion);
imageRampup.setRampupPercentage(rampupPercentage);
imageRampup.setStabilityTag(StabilityTag.fromTagName(stabilityTag));
imageRampup.setCreatedOn(createdOn);
imageRampup.setCreatedBy(createdBy);
imageRampup.setModifiedBy(modifiedBy);
imageRampup.setModifiedOn(modifiedOn);
imageRampups.add(imageRampup);
} while (rs.next());
log.debug("Fetched imageRampups:" + imageRampups);
return imageRampups;
}
|
ResultSetHandler implementation class for fetching image rampup
|
handle
|
java
|
azkaban/azkaban
|
azkaban-common/src/main/java/azkaban/imagemgmt/daos/ImageRampupDaoImpl.java
|
https://github.com/azkaban/azkaban/blob/master/azkaban-common/src/main/java/azkaban/imagemgmt/daos/ImageRampupDaoImpl.java
|
Apache-2.0
|
@Override
public Map<String, List<ImageRampup>> handle(final ResultSet rs) throws SQLException {
if (!rs.next()) {
return Collections.emptyMap();
}
final Map<String, List<ImageRampup>> imageRampupMap = new LinkedHashMap<>();
do {
final int id = rs.getInt("id");
final int planId = rs.getInt("plan_id");
final String imageTypeName = rs.getString("image_type_name");
final String imageVersion = rs.getString("image_version");
final int rampupPercentage = rs.getInt("rampup_percentage");
final String stabilityTag = rs.getString("stability_tag");
final String createdOn = rs.getString("created_on");
final String createdBy = rs.getString("created_by");
final String modifiedOn = rs.getString("modified_on");
final String modifiedBy = rs.getString("modified_by");
final String releaseTag = rs.getString("release_tag");
final ImageRampup imageRampup = new ImageRampup();
imageRampup.setId(id);
imageRampup.setImageVersion(imageVersion);
imageRampup.setRampupPercentage(rampupPercentage);
imageRampup.setStabilityTag(StabilityTag.fromTagName(stabilityTag));
imageRampup.setCreatedOn(createdOn);
imageRampup.setCreatedBy(createdBy);
imageRampup.setModifiedBy(modifiedBy);
imageRampup.setModifiedOn(modifiedOn);
imageRampup.setReleaseTag(releaseTag);
if (imageRampupMap.containsKey(imageTypeName)) {
imageRampupMap.get(imageTypeName).add(imageRampup);
} else {
final List<ImageRampup> imageRampupList = new ArrayList<>();
imageRampupList.add(imageRampup);
imageRampupMap.put(imageTypeName, imageRampupList);
}
} while (rs.next());
return imageRampupMap;
}
|
ResultSetHandler implementation class for fetching image rampup details for given image types
|
handle
|
java
|
azkaban/azkaban
|
azkaban-common/src/main/java/azkaban/imagemgmt/daos/ImageRampupDaoImpl.java
|
https://github.com/azkaban/azkaban/blob/master/azkaban-common/src/main/java/azkaban/imagemgmt/daos/ImageRampupDaoImpl.java
|
Apache-2.0
|
@Override
public List<ImageOwnership> getImageTypeOwnership(final String imageTypeName) {
final FetchImageOwnershipHandler fetchImageOwnershipHandler = new FetchImageOwnershipHandler();
try {
return this.databaseOperator
.query(FetchImageOwnershipHandler.FETCH_IMAGE_OWNERSHIP_BY_IMAGE_TYPE_NAME,
fetchImageOwnershipHandler, imageTypeName.toLowerCase());
} catch (final SQLException ex) {
log.error(FetchImageOwnershipHandler.FETCH_IMAGE_OWNERSHIP_BY_IMAGE_TYPE_NAME + " failed.",
ex);
throw new ImageMgmtDaoException(ErrorCode.INTERNAL_SERVER_ERROR,
"Unable to fetch ownership for image type : " + imageTypeName);
}
}
|
Gets ownership metadata based on image type name.
@param imageTypeName
@return List<ImageOwnership>
|
getImageTypeOwnership
|
java
|
azkaban/azkaban
|
azkaban-common/src/main/java/azkaban/imagemgmt/daos/ImageTypeDaoImpl.java
|
https://github.com/azkaban/azkaban/blob/master/azkaban-common/src/main/java/azkaban/imagemgmt/daos/ImageTypeDaoImpl.java
|
Apache-2.0
|
@Override
public boolean isExcludedByRampRule(final String flowName, final String imageName, final String imageVersion) {
final String targetImageVersion = String.join(IMAGE_VERSION_DELIMITER, imageName, imageVersion);
try {
List<RampRuleDenyList> rampRuleDenyLists = databaseOperator.query(
FetchFlowDenyListHandler.FETCH_FLOW_DENY_LIST_BY_FLOW_ID,
new FetchFlowDenyListHandler(), flowName);
for (RampRuleDenyList rampRuleDenyList : rampRuleDenyLists) {
// denyMode.ALL means this flow is an HP flow,
// if denyMode is not set to ALL then it must have denyVersion, then match given image version
// the flow will be excluded too
if (rampRuleDenyList.getDenyMode().equals(DenyMode.ALL)) {
return true;
} else if (rampRuleDenyList.getDenyVersion().equals(targetImageVersion)) {
return true;
}
}
return false;
} catch (SQLException e) {
LOG.error("fail to query ramp rule deny list: " + e);
throw new ImageMgmtDaoException("fail to query ramp rule deny list: " + e.getMessage());
}
}
|
Dao Implementation for access DB of table ramp_rules.
Create/update/delete/get RampRule or partial RampRule metadata. {@link ImageRampRule}
|
isExcludedByRampRule
|
java
|
azkaban/azkaban
|
azkaban-common/src/main/java/azkaban/imagemgmt/daos/RampRuleDaoImpl.java
|
https://github.com/azkaban/azkaban/blob/master/azkaban-common/src/main/java/azkaban/imagemgmt/daos/RampRuleDaoImpl.java
|
Apache-2.0
|
@Override
public int addRampRule(final ImageRampRule rampRule) {
try {
// duplicated rule should be forbidden as client side error, which needs a separate check to
// avoid it falls into normal SQL Exception as server error.
final ImageRampRule imageRampRule = databaseOperator.query(
FetchRampRuleHandler.FETCH_RAMP_RULE_BY_ID, new FetchRampRuleHandler(), rampRule.getRuleName());
if (imageRampRule != null) {
LOG.error("Error in create ramp rule on duplicate ruleName: " + imageRampRule.getRuleName());
throw new ImageMgmtDaoException(ErrorCode.BAD_REQUEST,
"Error in create ramp rule on duplicate ruleName: " + imageRampRule.getRuleName());
}
return this.databaseOperator.update(INSERT_RAMP_RULE,
rampRule.getRuleName(),
rampRule.getImageName(),
rampRule.getImageVersion(),
rampRule.getOwners(),
rampRule.isHPRule(),
rampRule.getCreatedBy(),
Timestamp.valueOf(LocalDateTime.now()),
rampRule.getCreatedBy(),
Timestamp.valueOf(LocalDateTime.now()));
} catch (SQLException e) {
LOG.error("Error in create ramp rule on DB" + rampRule);
throw new ImageMgmtDaoException(ErrorCode.INTERNAL_SERVER_ERROR,
"Error creating ramp rule " + rampRule.getRuleName() + "with " + e.getMessage());
}
}
|
Insert new ramp rule into DB, check if duplicate ruleName exists first
@param rampRule
@throws ImageMgmtDaoException
|
addRampRule
|
java
|
azkaban/azkaban
|
azkaban-common/src/main/java/azkaban/imagemgmt/daos/RampRuleDaoImpl.java
|
https://github.com/azkaban/azkaban/blob/master/azkaban-common/src/main/java/azkaban/imagemgmt/daos/RampRuleDaoImpl.java
|
Apache-2.0
|
@Override
public int updateOwnerships(final String newOwners, final String ruleName, final String modifiedBy) {
try {
// validation should already done
return this.databaseOperator.update(UPDATE_RULE_OWNERSHIP,
newOwners, modifiedBy, Timestamp.valueOf(LocalDateTime.now()), ruleName);
} catch (SQLException e) {
LOG.error("Error in updating ownership " + newOwners);
throw new ImageMgmtDaoException(ErrorCode.INTERNAL_SERVER_ERROR,
"Error in updating ownership with " + e.getMessage());
}
}
|
update column owners from table ramp_rules along with modification metadata.
@param newOwners
@param ruleName
@param modifiedBy
@return int - id of the DB entry
@throws ImageMgmtDaoException
|
updateOwnerships
|
java
|
azkaban/azkaban
|
azkaban-common/src/main/java/azkaban/imagemgmt/daos/RampRuleDaoImpl.java
|
https://github.com/azkaban/azkaban/blob/master/azkaban-common/src/main/java/azkaban/imagemgmt/daos/RampRuleDaoImpl.java
|
Apache-2.0
|
@Override
public Set<String> getOwners(final String ruleName) {
final ImageRampRule rampRule = getRampRule(ruleName);
String owners = rampRule.getOwners();
return new HashSet<>(Arrays.asList(owners.split(",")));
}
|
Query table ramp_rules to get owners of Ramp rule.
Serves as the source of truth for rule ownership management.
@param ruleName - ruleName in {@see ImageRampRule}
@return owners of the ramp rule.
|
getOwners
|
java
|
azkaban/azkaban
|
azkaban-common/src/main/java/azkaban/imagemgmt/daos/RampRuleDaoImpl.java
|
https://github.com/azkaban/azkaban/blob/master/azkaban-common/src/main/java/azkaban/imagemgmt/daos/RampRuleDaoImpl.java
|
Apache-2.0
|
@Override
public int addFlowDenyInfo(final List<ProjectFlow> flowIds, final String ruleName) {
// query ramp_rules to get ramp rule information
// based on ramp rule to generate mappings with flowId as key
final SQLTransaction<Long> fetchRampRuleAndUpdateDenyList = transOperator -> {
final ImageRampRule imageRampRule =
transOperator.query(FetchRampRuleHandler.FETCH_RAMP_RULE_BY_ID, new FetchRampRuleHandler(), ruleName);
if (imageRampRule == null) {
LOG.error("fail to find the existing ruleName: " + ruleName);
throw new ImageMgmtDaoException("ramp rule not found with ruleName: " + ruleName);
}
// insert HP flows: <flowId, denyMode.ALL, ruleName> based on whether HP flow
if (imageRampRule.isHPRule()) {
LOG.info("handling add flows for HP Flow Rule: " + ruleName);
for (final ProjectFlow flowId : flowIds) {
// avoid duplicate insert HP flows, use <flowId, denyMode, ruleName> to identity duplicates.
// in case one rule deleted, the others still exist
if (transOperator.query(
FetchFlowDenyListHandler.FETCH_FLOW_DENY_LIST_BY_FLOW_ID_DENY_MODE_RULE_NAME,
new FetchFlowDenyListHandler(), flowId.toString(), DenyMode.ALL.name(), ruleName)
.isEmpty()) {
transOperator.update(INSERT_HP_DENY_LIST, flowId.toString(), DenyMode.ALL.name(), ruleName);
}
}
} else {
// insert normal flows: <flowId, denyMode.PARTIAL, ruleName>
final String denyVersion = String.join(IMAGE_VERSION_DELIMITER, imageRampRule.getImageName(), imageRampRule.getImageVersion());
LOG.info("handling add flows for normal Flow Rule: " + ruleName + " with denyVersion: " + denyVersion);
for (final ProjectFlow flowId : flowIds) {
// avoid duplicate insert, use <flowId, denyVersion, ruleName> to identity duplicates.
// in case one got deleted, the others should not get impacted
if (transOperator.query(
FetchFlowDenyListHandler.FETCH_FLOW_DENY_LIST_BY_FLOW_ID_DENY_VERSION_RULE_NAME,
new FetchFlowDenyListHandler(), flowId.toString(), denyVersion, ruleName)
.isEmpty()) {
transOperator.update(INSERT_FLOW_DENY_LIST, flowId.toString(), DenyMode.PARTIAL.name(), denyVersion, ruleName);
}
}
}
// end if
transOperator.getConnection().commit();
return transOperator.getLastInsertId();
};
// end SQL transaction operator
try {
int batchInsertId = this.databaseOperator.transaction(fetchRampRuleAndUpdateDenyList).intValue();
if (batchInsertId == 0) {
LOG.warn(String.format("creating no new flow deny list based on rule: %s, "
+ "flowList: %s. Might due to deny rule already exists", ruleName, flowIds));
throw new ImageMgmtDaoException(ErrorCode.BAD_REQUEST,
String.format("flows already exists in the rule: %s, " + "flowList: %s.", ruleName,
flowIds));
}
if (batchInsertId < 0) {
LOG.warn(String.format("Error on inserting into DB based on rule: %s, "
+ "flowList: %s.", ruleName, flowIds));
throw new ImageMgmtDaoException(ErrorCode.BAD_REQUEST,
String.format("Exception while creating flow deny list based on rule: %s, " + "flowList: %s.", ruleName,
flowIds));
}
return batchInsertId;
} catch (final SQLException e) {
LOG.error("Unable to create the flow deny list metadata", e);
throw new ImageMgmtDaoException("Exception while creating the flow deny list metadata: " + e.getMessage());
}
}
|
Map FlowId (project.flow) with denying information (denyMode or denyVersion) and insert them into DB.
Deduplication performed in same rule with same flow to avoid DB inflation.
@param flowIds
@param ruleName
@return int - id of the DB entry
|
addFlowDenyInfo
|
java
|
azkaban/azkaban
|
azkaban-common/src/main/java/azkaban/imagemgmt/daos/RampRuleDaoImpl.java
|
https://github.com/azkaban/azkaban/blob/master/azkaban-common/src/main/java/azkaban/imagemgmt/daos/RampRuleDaoImpl.java
|
Apache-2.0
|
public Builder jsonPayload(String jsonPayload) {
Preconditions.checkNotNull(jsonPayload);
this.jsonPayload = jsonPayload;
return this;
}
|
Sets the json payload provided as part of image management REST API invocation
@param jsonPayload input json payload in string format
@return Builder - returns builder instance
@throws NullPointerException if json payload is missing
|
jsonPayload
|
java
|
azkaban/azkaban
|
azkaban-common/src/main/java/azkaban/imagemgmt/dto/ImageMetadataRequest.java
|
https://github.com/azkaban/azkaban/blob/master/azkaban-common/src/main/java/azkaban/imagemgmt/dto/ImageMetadataRequest.java
|
Apache-2.0
|
public Builder user(String user) {
Preconditions.checkNotNull(user);
this.user = user;
return this;
}
|
Sets the user who invokes the image management REST API.
@param user - user in string
@return Builder - returns builder instance
@throws NullPointerException if user is null
|
user
|
java
|
azkaban/azkaban
|
azkaban-common/src/main/java/azkaban/imagemgmt/dto/ImageMetadataRequest.java
|
https://github.com/azkaban/azkaban/blob/master/azkaban-common/src/main/java/azkaban/imagemgmt/dto/ImageMetadataRequest.java
|
Apache-2.0
|
public Builder addParam(String paramKey, Object paramValue) {
// Param key must not be null
Preconditions.checkNotNull(paramKey, "Param key is null");
// Param value must not be null
Preconditions.checkNotNull(paramValue, "The mandatory parameter " + paramKey + " is "
+ "either missing or contains null value.");
this.params.put(paramKey, paramValue);
return this;
}
|
Adds param to the param map. It accepts non null key and non null value. This method must be
user for mandatory parameters
@param paramKey - key of the parameter
@param paramValue - value of the parameter
@return Builder - returns builder instance
@throws NullPointerException if preconditions failed
|
addParam
|
java
|
azkaban/azkaban
|
azkaban-common/src/main/java/azkaban/imagemgmt/dto/ImageMetadataRequest.java
|
https://github.com/azkaban/azkaban/blob/master/azkaban-common/src/main/java/azkaban/imagemgmt/dto/ImageMetadataRequest.java
|
Apache-2.0
|
public Builder addParamIfPresent(String paramKey, Optional<? extends Object> paramValue) {
// Both param key and value must be present and should not be null
Preconditions.checkNotNull(paramKey, "Param key is null");
// Ignore the key value if not specified
if (paramValue.isPresent()) {
this.params.put(paramKey, paramValue.get());
}
return this;
}
|
Adds param to the param map. It accepts non null key. The value can be null. This method must
be used for optional parameters
@param paramKey - key of the parameter
@param paramValue - value of the parameter
@return Builder - returns builder instance
@throws NullPointerException if preconditions failed
|
addParamIfPresent
|
java
|
azkaban/azkaban
|
azkaban-common/src/main/java/azkaban/imagemgmt/dto/ImageMetadataRequest.java
|
https://github.com/azkaban/azkaban/blob/master/azkaban-common/src/main/java/azkaban/imagemgmt/dto/ImageMetadataRequest.java
|
Apache-2.0
|
public Builder start(int start) {
Preconditions.checkArgument(start > 0, "Pagination start offset must be positive");
this.start = start;
return this;
}
|
Sets the start offset for pagination
@param start - start offset for pagination
@return Builder - returns builder instance
@throws IllegalArgumentException if precondition is not satisfied
|
start
|
java
|
azkaban/azkaban
|
azkaban-common/src/main/java/azkaban/imagemgmt/dto/ImageMetadataRequest.java
|
https://github.com/azkaban/azkaban/blob/master/azkaban-common/src/main/java/azkaban/imagemgmt/dto/ImageMetadataRequest.java
|
Apache-2.0
|
public ImageMetadataRequest build() {
return new ImageMetadataRequest(this);
}
|
Invoke build method to build the final RequestContext
@return RequestContext
|
build
|
java
|
azkaban/azkaban
|
azkaban-common/src/main/java/azkaban/imagemgmt/dto/ImageMetadataRequest.java
|
https://github.com/azkaban/azkaban/blob/master/azkaban-common/src/main/java/azkaban/imagemgmt/dto/ImageMetadataRequest.java
|
Apache-2.0
|
public int getCode() {
return this.code;
}
|
@return the error code associated with this ErrorCode
|
getCode
|
java
|
azkaban/azkaban
|
azkaban-common/src/main/java/azkaban/imagemgmt/exception/ErrorCode.java
|
https://github.com/azkaban/azkaban/blob/master/azkaban-common/src/main/java/azkaban/imagemgmt/exception/ErrorCode.java
|
Apache-2.0
|
public ErrorCode getErrorCode() {
return this.errorCode;
}
|
Top level exception class for image management
|
getErrorCode
|
java
|
azkaban/azkaban
|
azkaban-common/src/main/java/azkaban/imagemgmt/exception/ImageMgmtException.java
|
https://github.com/azkaban/azkaban/blob/master/azkaban-common/src/main/java/azkaban/imagemgmt/exception/ImageMgmtException.java
|
Apache-2.0
|
public Permission getPermission() {
return this.permission;
}
|
Enum representing owner Role as ADMIN is having all the permissions such as invoking image
management APIs, including adding/removing image type owners etc. MEMBER role is having
permissions to invoke image management APIs, but can't add/delete image type owners etc.
GUEST role is having readonly or get access to the image management APIs.
|
getPermission
|
java
|
azkaban/azkaban
|
azkaban-common/src/main/java/azkaban/imagemgmt/models/ImageOwnership.java
|
https://github.com/azkaban/azkaban/blob/master/azkaban-common/src/main/java/azkaban/imagemgmt/models/ImageOwnership.java
|
Apache-2.0
|
public static Deployable fromDeployableName(final String name) {
return deployableMap.getOrDefault(name, IMAGE);
}
|
Creates Deployable enum for enum name
@param name - enum name
@return Deployable
|
fromDeployableName
|
java
|
azkaban/azkaban
|
azkaban-common/src/main/java/azkaban/imagemgmt/models/ImageType.java
|
https://github.com/azkaban/azkaban/blob/master/azkaban-common/src/main/java/azkaban/imagemgmt/models/ImageType.java
|
Apache-2.0
|
public String getStateValue() {
return this.stateValue;
}
|
Enum to represent state of the image version. Below are the significance of the enums NEW - An
image type version is marked with state as NEW when it is first created/registered using image
management API ACTIVE - An image type version goes through the ramp up process and once the
version is fully ramped upto 100% the version is marked as ACTIVE in the image_versions table.
UNSTABLE - An image type version goes through the ramp up process and once the version is
identified as faulty or unstable, the version is marked as UNSTABLE in the image_versions
table. DEPRECATED - An image type version which is no longer in use is marked as DEPRECATED
TEST - This is to represent a TEST version of the image and once the version is tested it can
be marked as NEW.
STABLE - When an image version is marked as ACTIVE, the other ACTIVE version(s) are marked as
STABLE as there can be only 1 ACTIVE version at a time.
|
getStateValue
|
java
|
azkaban/azkaban
|
azkaban-common/src/main/java/azkaban/imagemgmt/models/ImageVersion.java
|
https://github.com/azkaban/azkaban/blob/master/azkaban-common/src/main/java/azkaban/imagemgmt/models/ImageVersion.java
|
Apache-2.0
|
public static State fromStateValue(final String stateValue) {
return stateMap.getOrDefault(stateValue, NEW);
}
|
Create state enum from state value
@param stateValue
@return
|
fromStateValue
|
java
|
azkaban/azkaban
|
azkaban-common/src/main/java/azkaban/imagemgmt/models/ImageVersion.java
|
https://github.com/azkaban/azkaban/blob/master/azkaban-common/src/main/java/azkaban/imagemgmt/models/ImageVersion.java
|
Apache-2.0
|
public static Set<String> getNonActiveStateValues() {
return nonActiveStateValueSet;
}
|
Create set of non active state values
|
getNonActiveStateValues
|
java
|
azkaban/azkaban
|
azkaban-common/src/main/java/azkaban/imagemgmt/models/ImageVersion.java
|
https://github.com/azkaban/azkaban/blob/master/azkaban-common/src/main/java/azkaban/imagemgmt/models/ImageVersion.java
|
Apache-2.0
|
public static Set<State> getNewActiveAndStableStateFilter() {
return newActiveAndStableState;
}
|
Gets a set with NEW, ACTIVE, or STABLE state
@return Set<State>
|
getNewActiveAndStableStateFilter
|
java
|
azkaban/azkaban
|
azkaban-common/src/main/java/azkaban/imagemgmt/models/ImageVersion.java
|
https://github.com/azkaban/azkaban/blob/master/azkaban-common/src/main/java/azkaban/imagemgmt/models/ImageVersion.java
|
Apache-2.0
|
public static Set<State> getNewActiveTestAndStableStateFilter() {
return newActiveTestAndStableState;
}
|
Gets a set with NEW, ACTIVE, STABLE and TEST state
@return Set<State>
|
getNewActiveTestAndStableStateFilter
|
java
|
azkaban/azkaban
|
azkaban-common/src/main/java/azkaban/imagemgmt/models/ImageVersion.java
|
https://github.com/azkaban/azkaban/blob/master/azkaban-common/src/main/java/azkaban/imagemgmt/models/ImageVersion.java
|
Apache-2.0
|
public static Set<State> getAllStates() {
return Arrays.stream(State.values()).collect(Collectors.toSet());
}
|
A Set of All States
@return Set<State>
|
getAllStates
|
java
|
azkaban/azkaban
|
azkaban-common/src/main/java/azkaban/imagemgmt/models/ImageVersion.java
|
https://github.com/azkaban/azkaban/blob/master/azkaban-common/src/main/java/azkaban/imagemgmt/models/ImageVersion.java
|
Apache-2.0
|
@Override
public boolean hasPermissionForImageType(final String imageTypeName, final String userId, final Type type) {
// Gets the image type metadata including ownerships.
final List<ImageOwnership> imageOwnerships =
this.imageTypeDao.getImageTypeOwnership(imageTypeName);
// The owner set contains both users and groups.
final Set<String> ownerSet = new HashSet<>();
// Check if ownership is present. If so check the permission of the user role.
for (final ImageOwnership imageOwnership: imageOwnerships) {
// Gets the permission of the user based on image type ownership metadata.
final Permission permission = imageOwnership.getRole().getPermission();
// Check if the given Permission.Type contains in the permission of the user.
if (permission.isPermissionSet(type)) {
ownerSet.add(imageOwnership.getOwner());
}
}
return ownerSet.contains(userId) || userManager.validateUserGroupMembership(userId, ownerSet);
}
|
Checks the permission based on user manager, image type name, user id and Permission type.
@param imageTypeName
@param userId
@param type
@return boolean
|
hasPermissionForImageType
|
java
|
azkaban/azkaban
|
azkaban-common/src/main/java/azkaban/imagemgmt/permission/PermissionManagerImpl.java
|
https://github.com/azkaban/azkaban/blob/master/azkaban-common/src/main/java/azkaban/imagemgmt/permission/PermissionManagerImpl.java
|
Apache-2.0
|
@Override
public Set<String> validatePermissionAndGetOwnerships(final String imageTypeName, final User user)
throws ImageMgmtException {
String userId = user.getUserId();
// fetch owners from image_ownerships with matching permission set (e.g. ADMIN)
Set<String> imageOwnerships = imageTypeDao.getImageTypeOwnership(imageTypeName).stream()
.map(ImageOwnership::getOwner)
.collect(Collectors.toSet());
// not authorized if not azkaban admin nor validate through image_ownership
if (!hasPermission(user, imageOwnerships)) {
String errorMsg = String.format("unauthorized user %s does not has permission to operate", userId);
log.error(errorMsg);
throw new ImageMgmtValidationException(ErrorCode.UNAUTHORIZED, errorMsg);
}
return imageOwnerships;
}
|
Checks the user permission based on image type name, user id and Permission type;
Azkaban admin would have permission and other user validates through image_ownership table,
@param imageTypeName
@param user
@return boolean
|
validatePermissionAndGetOwnerships
|
java
|
azkaban/azkaban
|
azkaban-common/src/main/java/azkaban/imagemgmt/permission/PermissionManagerImpl.java
|
https://github.com/azkaban/azkaban/blob/master/azkaban-common/src/main/java/azkaban/imagemgmt/permission/PermissionManagerImpl.java
|
Apache-2.0
|
@Override
public boolean isAzkabanAdmin(final User user) {
return user.getRoles().stream()
.anyMatch(role -> userManager.getRole(role).getPermission().isPermissionSet(Type.ADMIN));
}
|
Method to check if user is Azkaban Admin.
@param user
@return true, if azkaban dev
false otherwise
|
isAzkabanAdmin
|
java
|
azkaban/azkaban
|
azkaban-common/src/main/java/azkaban/imagemgmt/permission/PermissionManagerImpl.java
|
https://github.com/azkaban/azkaban/blob/master/azkaban-common/src/main/java/azkaban/imagemgmt/permission/PermissionManagerImpl.java
|
Apache-2.0
|
private Map<String, ImageVersionMetadata> processAndGetVersionForImageTypes(
final ExecutableFlow flow,
final Set<String> imageTypes,
final Map<String, List<ImageRampup>> imageTypeRampups,
final Set<String> remainingImageTypes) {
final Set<String> rampupImageTypeSet = imageTypeRampups.keySet();
log.info("Found active rampup for the image types {} ", rampupImageTypeSet);
final Map<String, ImageVersionMetadata> imageTypeVersionMap = new TreeMap<>(
String.CASE_INSENSITIVE_ORDER);
// select current flow's image versions based on ramp up plan and ramp rule(exclusive list)
final Map<String, ImageVersion> imageTypeRampupVersionMap =
this.processAndGetRampupVersion(flow, imageTypeRampups);
imageTypeRampupVersionMap
.forEach((k, v) -> imageTypeVersionMap.put(k, new ImageVersionMetadata(v,
imageTypeRampups.get(k), MSG_RANDOM_RAMPUP_VERSION_SELECTION)));
log.info("After processing rampup records -> imageTypeVersionMap: {}", imageTypeVersionMap);
/*
* Fetching the latest active image version from image_versions table for the remaining image
* types for which there is no active rampup plan or the versions are marked as
* unstable/deprecated in the active plan.
*/
// Converts the input image types to lowercase for case insensitive comparison.
final Set<String> imageTypesInLowerCase =
imageTypes.stream().map(String::toLowerCase).collect(Collectors.toSet());
remainingImageTypes.addAll(imageTypesInLowerCase);
remainingImageTypes
.removeAll(imageTypeVersionMap.keySet().stream().map(String::toLowerCase).collect(
Collectors.toSet()));
log.info("After finding version through rampup image types remaining: {} ",
remainingImageTypes);
final Map<String, ImageVersion> imageTypeActiveVersionMap =
this.processAndGetActiveImageVersion(remainingImageTypes);
imageTypeActiveVersionMap
.forEach((k, v) -> imageTypeVersionMap.put(k,
new ImageVersionMetadata(v, MSG_ACTIVE_VERSION_SELECTION)));
log.info("After fetching active image version -> imageTypeVersionMap {}", imageTypeVersionMap);
// For the leftover image types throw exception with appropriate error message.
remainingImageTypes
.removeAll(imageTypeVersionMap.keySet().stream().map(String::toLowerCase).collect(
Collectors.toSet()));
log.info("After fetching version using ramp up and based on active image version the "
+ "image types remaining: {} ", remainingImageTypes);
return imageTypeVersionMap;
}
|
This method processes image type rampup details for the image type and selects a version for
the image type. Here is the version selection process for an image type 1. Sort the ramp up
data in the ascending order of rampup percentage. 2. Generate a random number between 1 to 100
both inclusive. Let us say the number the number generated is 60. 3. Let us say there are three
versions 1.1.1, 1.1.2 & and 1.1.3 with rampup percantages 10, 30 & 60 respectively. 4. The
above percentage fors three ranges [1 - 10], [11 - 40] & [41 - 100]. The random humber 60
belongs to the last range i.e. [41 - 100] and hence version 1.1.3 will be selected. If random
number generated is 22 then version 1.1.2 will be selected and so on. 5. If there is no active
rampup plan for an image type or in the active plan if the version is marked unstable or
deprecated, and latest active version will be selected from the image_verions table. 6. If
there is no active version in the image_versions table, it will throw appropriate error message
mentioning could not select version for the image type and the whole process would fail. 7.
Follow the rampup procedure to elect a new version from the image_versions table for the ailed
image type.
@param imageTypes - set of specified image types
@param imageTypeRampups - contains rampup list for an image type
@param remainingImageTypes - This set is used to keep track of the image types for which
version metadata is not available.
@return Map<String, VersionMetadata>
|
processAndGetVersionForImageTypes
|
java
|
azkaban/azkaban
|
azkaban-common/src/main/java/azkaban/imagemgmt/rampup/ImageRampupManagerImpl.java
|
https://github.com/azkaban/azkaban/blob/master/azkaban-common/src/main/java/azkaban/imagemgmt/rampup/ImageRampupManagerImpl.java
|
Apache-2.0
|
private Map<String, ImageVersion> processAndGetRampupVersion(
final ExecutableFlow flow,
final Map<String, List<ImageRampup>> imageTypeRampups) {
final Set<String> rampupImageTypeSet = imageTypeRampups.keySet();
log.info("Found active rampup for the image types {} ", rampupImageTypeSet);
final Map<String, ImageVersion> imageTypeRampupVersionMap =
new TreeMap<>(String.CASE_INSENSITIVE_ORDER);
if (rampupImageTypeSet.isEmpty()) {
log.warn("No active rampup found for the image types");
return imageTypeRampupVersionMap;
}
log.info("Found active rampup for the image types {} ", rampupImageTypeSet);
for (final String imageTypeName : rampupImageTypeSet) {
final List<ImageRampup> imageRampupList = imageTypeRampups.get(imageTypeName);
if (imageRampupList.isEmpty()) {
log.info("ImageRampupList was empty, so continue");
continue;
}
if (null == flow) {
log.info("Flow object is null, so continue");
final ImageRampup firstImageRampup = imageRampupList.get(0);
imageTypeRampupVersionMap.put(imageTypeName,
this.fetchImageVersion(imageTypeName, firstImageRampup.getImageVersion())
.orElseThrow(() -> new ImageMgmtException(
String.format("Unable to fetch version %s from image " + "versions table.",
firstImageRampup.getImageVersion()))));
} else {
int prevRampupPercentage = 0;
final int flowNameHashValMapping = ContainerImplUtils.getFlowNameHashValMapping(flow);
log.info("HashValMapping: " + flowNameHashValMapping);
for (final ImageRampup imageRampup : imageRampupList) {
final int rampupPercentage = imageRampup.getRampupPercentage();
if (flowNameHashValMapping >= prevRampupPercentage + 1
&& flowNameHashValMapping <= prevRampupPercentage + rampupPercentage) {
// when flow is excluded by a ramp rule, will use default active version for that image type
if (imageRampRuleDao.isExcludedByRampRule(
flow.getFlowName(), imageTypeName, imageRampup.getImageVersion())) {
imageTypeRampupVersionMap.put(imageTypeName, fetchActiveImageVersion(imageTypeName)
.orElseThrow(() -> new ImageMgmtDaoException(
"fail to find active image version for {}" + imageTypeName)));
log.debug("The image version {} is deselected for image type {} with rampup percentage {} "
+ "and use active current one",
imageRampup.getImageVersion(), imageTypeName, rampupPercentage);
} else {
imageTypeRampupVersionMap.put(imageTypeName,
this.fetchImageVersion(imageTypeName, imageRampup.getImageVersion())
.orElseThrow(() -> new ImageMgmtException(
String.format("Unable to fetch version %s from image " + "versions table.", imageRampup.getImageVersion()))));
log.debug("The image version {} is selected for image type {} with rampup percentage {}", imageRampup.getImageVersion(), imageTypeName, rampupPercentage);
}
break;
}
log.info("ImageTypeRampupVersionMap: " + imageTypeRampupVersionMap);
prevRampupPercentage += rampupPercentage;
}
}
}
return imageTypeRampupVersionMap;
}
|
Method to process the rampup list and get the rampup version based on rampup logic for
the given image types in the rampup map.
If there is a ramp rule defined for this ramp up plan, the image version would be deselected
and use the current active version instead.
@param imageTypeRampups
@return Map<String, ImageVersion>
|
processAndGetRampupVersion
|
java
|
azkaban/azkaban
|
azkaban-common/src/main/java/azkaban/imagemgmt/rampup/ImageRampupManagerImpl.java
|
https://github.com/azkaban/azkaban/blob/master/azkaban-common/src/main/java/azkaban/imagemgmt/rampup/ImageRampupManagerImpl.java
|
Apache-2.0
|
private Map<String, ImageVersion> processAndGetActiveImageVersion(final Set<String> imageTypes) {
final Map<String, ImageVersion> imageTypeActiveVersionMap =
new TreeMap<>(String.CASE_INSENSITIVE_ORDER);
if (!CollectionUtils.isEmpty(imageTypes)) {
final List<ImageVersion> imageVersions =
this.imageVersionDao.getActiveVersionByImageTypes(imageTypes);
log.debug("Active image versions fetched: {} ", imageVersions);
if (imageVersions != null && !imageVersions.isEmpty()) {
for (final ImageVersion imageVersion : imageVersions) {
imageTypeActiveVersionMap.put(imageVersion.getName(), imageVersion);
}
}
}
return imageTypeActiveVersionMap;
}
|
Process and get latest active image version for the given image types.
@param imageTypes
@return Map<String, ImageVersion>
|
processAndGetActiveImageVersion
|
java
|
azkaban/azkaban
|
azkaban-common/src/main/java/azkaban/imagemgmt/rampup/ImageRampupManagerImpl.java
|
https://github.com/azkaban/azkaban/blob/master/azkaban-common/src/main/java/azkaban/imagemgmt/rampup/ImageRampupManagerImpl.java
|
Apache-2.0
|
private Map<String, ImageVersion> getLatestNonActiveImageVersion(final Set<String> imageTypes) {
final Map<String, ImageVersion> imageTypeLatestNonActiveVersionMap =
new TreeMap<>(String.CASE_INSENSITIVE_ORDER);
if (!CollectionUtils.isEmpty(imageTypes)) {
final List<ImageVersion> imageVersions =
this.imageVersionDao.getLatestNonActiveVersionByImageTypes(imageTypes);
log.info("Non Active image versions fetched: {} ", imageVersions);
if (imageVersions != null && !imageVersions.isEmpty()) {
for (final ImageVersion imageVersion : imageVersions) {
imageTypeLatestNonActiveVersionMap.put(imageVersion.getName(), imageVersion);
}
// Retain the the remaining/left over image types (i.e. image types without any version)
imageTypes.removeAll(imageTypeLatestNonActiveVersionMap.keySet().stream()
.map(String::toLowerCase)
.collect(Collectors.toSet()));
}
}
return imageTypeLatestNonActiveVersionMap;
}
|
Get latest non active image version for the given image types.
@param imageTypes
@return Map<String, ImageVersion>
|
getLatestNonActiveImageVersion
|
java
|
azkaban/azkaban
|
azkaban-common/src/main/java/azkaban/imagemgmt/rampup/ImageRampupManagerImpl.java
|
https://github.com/azkaban/azkaban/blob/master/azkaban-common/src/main/java/azkaban/imagemgmt/rampup/ImageRampupManagerImpl.java
|
Apache-2.0
|
private Optional<ImageVersion> fetchImageVersion(final String imageType,
final String imageVersion) {
final ImageMetadataRequest imageMetadataRequest = ImageMetadataRequest.newBuilder()
.addParam(ImageMgmtConstants.IMAGE_TYPE, imageType)
.addParam(ImageMgmtConstants.IMAGE_VERSION, imageVersion)
.build();
final List<ImageVersion> imageVersions = this.imageVersionDao
.findImageVersions(imageMetadataRequest);
if (CollectionUtils.isEmpty(imageVersions)) {
return Optional.empty();
}
// Return only the imageVersion only when the image type/name matches
for (final ImageVersion version : imageVersions) {
if (version.getName().equalsIgnoreCase(imageType) && version.getVersion()
.equalsIgnoreCase(imageVersion)) {
return Optional.of(version);
}
}
return Optional.empty();
}
|
Method to fetch image version based on image type and image version.
@param imageType
@param imageVersion
@return Optional<ImageVersion>
|
fetchImageVersion
|
java
|
azkaban/azkaban
|
azkaban-common/src/main/java/azkaban/imagemgmt/rampup/ImageRampupManagerImpl.java
|
https://github.com/azkaban/azkaban/blob/master/azkaban-common/src/main/java/azkaban/imagemgmt/rampup/ImageRampupManagerImpl.java
|
Apache-2.0
|
private Optional<ImageVersion> fetchActiveImageVersion(String imageType) {
Set<String> imageTypeSet = new HashSet<>();
imageTypeSet.add(imageType);
List<ImageVersion> imageVersions = imageVersionDao.getActiveVersionByImageTypes(imageTypeSet);
if (imageVersions.isEmpty()) {
log.debug("found no active image version for {}", imageType);
return Optional.empty();
} else {
log.debug("fetch active image version {} for {}", imageVersions.get(0).getVersion(), imageType);
return Optional.of(imageVersions.get(0));
}
}
|
Method to fetch active image version based on given image type.
@param imageType
@return Optional<ImageVersion>
|
fetchActiveImageVersion
|
java
|
azkaban/azkaban
|
azkaban-common/src/main/java/azkaban/imagemgmt/rampup/ImageRampupManagerImpl.java
|
https://github.com/azkaban/azkaban/blob/master/azkaban-common/src/main/java/azkaban/imagemgmt/rampup/ImageRampupManagerImpl.java
|
Apache-2.0
|
private Map<String, VersionInfo> createVersionInfoMap(
final Map<String, ImageVersionMetadata> imageVersionMetadataMap) {
final Map<String, VersionInfo> versionInfoMap = new TreeMap<>(String.CASE_INSENSITIVE_ORDER);
imageVersionMetadataMap.forEach((k, v) -> versionInfoMap.put(k,
new VersionInfo(v.getImageVersion().getVersion(), v.getImageVersion().getPath(),
v.getImageVersion().getState())));
return versionInfoMap;
}
|
Creates VersionInfo map from the ImageVersionMetadata map for the given image type keys.
@param imageVersionMetadataMap
@return Map<String, VersionInfo>
|
createVersionInfoMap
|
java
|
azkaban/azkaban
|
azkaban-common/src/main/java/azkaban/imagemgmt/rampup/ImageRampupManagerImpl.java
|
https://github.com/azkaban/azkaban/blob/master/azkaban-common/src/main/java/azkaban/imagemgmt/rampup/ImageRampupManagerImpl.java
|
Apache-2.0
|
private Comparator<ImageRampup> getRampupPercentageComparator() {
return Comparator.comparingInt(ImageRampup::getRampupPercentage);
}
|
Return rampup percentage comparator
@return Comparator<ImageRampup>
|
getRampupPercentageComparator
|
java
|
azkaban/azkaban
|
azkaban-common/src/main/java/azkaban/imagemgmt/rampup/ImageRampupManagerImpl.java
|
https://github.com/azkaban/azkaban/blob/master/azkaban-common/src/main/java/azkaban/imagemgmt/rampup/ImageRampupManagerImpl.java
|
Apache-2.0
|
@Override
public KeyDeserializer findKeyDeserializer(final JavaType type, final DeserializationConfig config,
final BeanDescription beanDesc, final BeanProperty property)
throws JsonMappingException {
if ((type.getRawClass() != String.class) && (type.getRawClass() != Object.class)) {
throw new IllegalArgumentException(
"expected String or Object, found " + type.getRawClass().getName());
}
return DESERIALIZER;
}
|
This method is invoked while deserializing JSON content field names to Map keys. In this case,
only keys of type string or object is considered.
@param type
@param config
@param beanDesc
@param property
@return KeyDeserializer
@throws JsonMappingException
|
findKeyDeserializer
|
java
|
azkaban/azkaban
|
azkaban-common/src/main/java/azkaban/imagemgmt/utils/CaseInsensitiveKeyDeserializers.java
|
https://github.com/azkaban/azkaban/blob/master/azkaban-common/src/main/java/azkaban/imagemgmt/utils/CaseInsensitiveKeyDeserializers.java
|
Apache-2.0
|
@Override
public Object deserializeKey(final String key, final DeserializationContext ctxt)
throws IOException {
return key.toLowerCase();
}
|
This method deserializes a given key to lowercase.
@param key
@param ctxt
@return Object
@throws IOException
|
deserializeKey
|
java
|
azkaban/azkaban
|
azkaban-common/src/main/java/azkaban/imagemgmt/utils/CaseInsensitiveKeyDeserializers.java
|
https://github.com/azkaban/azkaban/blob/master/azkaban-common/src/main/java/azkaban/imagemgmt/utils/CaseInsensitiveKeyDeserializers.java
|
Apache-2.0
|
public <T extends BaseDTO> T convertToDTO(final String jsonPayloadString, final Class<T> dtoClass)
throws ImageMgmtInvalidInputException {
try {
return this.objectMapper.readValue(jsonPayloadString, dtoClass);
} catch (final JsonParseException e) {
log.error("Exception while parsing input json ", e);
throw new ImageMgmtInvalidInputException(ErrorCode.BAD_REQUEST,
"Exception while reading input payload. Invalid input.");
} catch (final JsonMappingException e) {
log.error("Exception while converting input json ", e);
throw new ImageMgmtInvalidInputException(ErrorCode.BAD_REQUEST,
"Exception while reading input payload. Invalid input.");
} catch (final IOException e) {
log.error("IOException occurred while converting input json ", e);
throw new ImageMgmtInvalidInputException(ErrorCode.BAD_REQUEST,
"Exception while reading input payload. Invalid input.");
}
}
|
Converts Json input payload to API specific model (DTO).
@param jsonPayloadString
@param dtoClass
@param <T>
@return DTO
@throws ImageMgmtInvalidInputException
|
convertToDTO
|
java
|
azkaban/azkaban
|
azkaban-common/src/main/java/azkaban/imagemgmt/utils/ConverterUtils.java
|
https://github.com/azkaban/azkaban/blob/master/azkaban-common/src/main/java/azkaban/imagemgmt/utils/ConverterUtils.java
|
Apache-2.0
|
public <T extends BaseDTO> List<T> convertToDTOs(final String jsonPayloadString,
final Class<T> dtoClass)
throws ImageMgmtInvalidInputException {
try {
final TypeFactory typeFactory = this.objectMapper.getTypeFactory();
final JavaType javaType = typeFactory.constructParametricType(ArrayList.class, dtoClass);
return this.objectMapper.readValue(jsonPayloadString, javaType);
} catch (final JsonParseException e) {
log.error("Exception while parsing input json ", e);
throw new ImageMgmtInvalidInputException(
"Exception while reading input payload. Invalid input.");
} catch (final JsonMappingException e) {
log.error("Exception while converting input json ", e);
throw new ImageMgmtInvalidInputException(
"Exception while reading input payload. Invalid input.");
} catch (final IOException e) {
log.error("IOException occurred while converting input json ", e);
throw new ImageMgmtInvalidInputException(
"Exception while reading input payload. Invalid input.");
}
}
|
Converts input json payload to API speific models (DTOs).
@param jsonPayloadString
@param <T>
@return List<T>
@throws ImageMgmtInvalidInputException
|
convertToDTOs
|
java
|
azkaban/azkaban
|
azkaban-common/src/main/java/azkaban/imagemgmt/utils/ConverterUtils.java
|
https://github.com/azkaban/azkaban/blob/master/azkaban-common/src/main/java/azkaban/imagemgmt/utils/ConverterUtils.java
|
Apache-2.0
|
@Override
public synchronized Optional<VersionSet> insertAndGetVersionSet(final String versionSetMd5Hex,
final String versionSetJsonString) throws ImageMgmtException {
final SQLTransaction<Integer> insertVersionSet = transOperator -> {
final VersionSetHandler versionSetHandler = new VersionSetHandler();
// Check if the versionSet already exists for versionSetMd5Hex
final List<VersionSet> versionSets = transOperator
.query(SELECT_VSET_FROM_MD5, versionSetHandler, versionSetMd5Hex);
if (versionSets.size() > 1) {
throw new SQLException(
"Expected only one VersionSet for versionSetMd5Hex " + versionSetMd5Hex);
}
// Insert if it doesn't
if (versionSets.isEmpty()) {
transOperator.update(INSERT_VSET, versionSetMd5Hex, versionSetJsonString);
return (int) transOperator.getLastInsertId();
}
return versionSets.get(0).getVersionSetId();
};
try {
final Integer versionSetId = this.dbOperator.transaction(insertVersionSet);
final VersionSet versionSet = new VersionSet(versionSetJsonString, versionSetMd5Hex,
versionSetId);
this.md5ToVersionSet.put(versionSetMd5Hex, versionSet);
this.idToVersionSet.put(versionSetId, versionSet);
return Optional.of(versionSet);
} catch (final SQLException e) {
logger.error("Exception occurred while inserting version set and getting version id", e);
throw new ImageMgmtException("Unable to insert and get versionSetId", e);
}
}
|
This method inserts the versionSetJsonString and versionSetMd5Hex to the Table version_set and
get the versionSetId autogenerated post insertion.
|
insertAndGetVersionSet
|
java
|
azkaban/azkaban
|
azkaban-common/src/main/java/azkaban/imagemgmt/version/JdbcVersionSetLoader.java
|
https://github.com/azkaban/azkaban/blob/master/azkaban-common/src/main/java/azkaban/imagemgmt/version/JdbcVersionSetLoader.java
|
Apache-2.0
|
@Override
public synchronized boolean deleteVersionSet(final String versionSetMd5Hex)
throws ImageMgmtException {
final SQLTransaction<Integer> insertVersionSet = transOperator -> transOperator
.update(DELETE_VSET, versionSetMd5Hex);
try {
final Integer rowsDeleted = this.dbOperator.transaction(insertVersionSet);
if (this.md5ToVersionSet.containsKey(versionSetMd5Hex)) {
final VersionSet removedVersionSet = this.md5ToVersionSet.remove(versionSetMd5Hex);
this.idToVersionSet.remove(removedVersionSet.getVersionSetId());
}
return rowsDeleted != 0;
} catch (final SQLException e) {
logger.error("Failed to execute: " + DELETE_VSET, e);
throw new ImageMgmtException(
"Exception occurred while removing version set for " + versionSetMd5Hex,
e);
}
}
|
Deletes the versionSet row from the table version_set corresponding to versionSetMd5Hex.
|
deleteVersionSet
|
java
|
azkaban/azkaban
|
azkaban-common/src/main/java/azkaban/imagemgmt/version/JdbcVersionSetLoader.java
|
https://github.com/azkaban/azkaban/blob/master/azkaban-common/src/main/java/azkaban/imagemgmt/version/JdbcVersionSetLoader.java
|
Apache-2.0
|
@Override
public synchronized Optional<VersionSet> getVersionSet(final String versionSetMd5Hex,
final String versionSetJsonString)
throws ImageMgmtException {
if (this.md5ToVersionSet.containsKey(versionSetMd5Hex)) {
final VersionSet versionSet = this.md5ToVersionSet.get(versionSetMd5Hex);
return Optional.of(versionSet);
}
return insertAndGetVersionSet(versionSetMd5Hex, versionSetJsonString);
}
|
This method first checks if the versionSetId already exists in the local copy of the
versionSets, otherwise, it will insert the versionSet into the table and get the resulting
versionSetId.
|
getVersionSet
|
java
|
azkaban/azkaban
|
azkaban-common/src/main/java/azkaban/imagemgmt/version/JdbcVersionSetLoader.java
|
https://github.com/azkaban/azkaban/blob/master/azkaban-common/src/main/java/azkaban/imagemgmt/version/JdbcVersionSetLoader.java
|
Apache-2.0
|
@Override
public synchronized List<VersionSet> fetchAllVersionSets() throws ImageMgmtException {
final VersionSetHandler versionSetHandler = new VersionSetHandler();
try {
return this.dbOperator.query(SELECT_ALL_VSET, versionSetHandler);
} catch (final SQLException e) {
logger.error("Failed to execute: " + SELECT_ALL_VSET, e);
throw new ImageMgmtException("Failed to fetch all VersionSets ", e);
}
}
|
Fetches all the rows from the version_set table and returns it as List of VersionSet.
|
fetchAllVersionSets
|
java
|
azkaban/azkaban
|
azkaban-common/src/main/java/azkaban/imagemgmt/version/JdbcVersionSetLoader.java
|
https://github.com/azkaban/azkaban/blob/master/azkaban-common/src/main/java/azkaban/imagemgmt/version/JdbcVersionSetLoader.java
|
Apache-2.0
|
@Override
public List<VersionSet> handle(final ResultSet rs) throws SQLException {
final List<VersionSet> versionSets = new ArrayList<>();
while (rs.next()) {
final int versionSetId = rs.getInt(VERSIONSET_ID_IDX);
final String versionSetMd5Hex = rs.getString(VERSIONSET_MD5_IDX);
final String versionSetJsonString = rs.getString(VERSIONSET_JSON_IDX);
try {
versionSets.add(new VersionSet(versionSetJsonString, versionSetMd5Hex, versionSetId));
} catch (final Exception e) {
throw new SQLException(e);
}
}
return versionSets;
}
|
A Handler class to modify the result set into List of VersionSet.
|
handle
|
java
|
azkaban/azkaban
|
azkaban-common/src/main/java/azkaban/imagemgmt/version/JdbcVersionSetLoader.java
|
https://github.com/azkaban/azkaban/blob/master/azkaban-common/src/main/java/azkaban/imagemgmt/version/JdbcVersionSetLoader.java
|
Apache-2.0
|
@Override
public Integer handleResponse(final HttpResponse response)
throws ClientProtocolException, IOException {
final int statusCode = response.getStatusLine().getStatusCode();
BufferedReader bufferedReader = null;
try {
final HttpEntity responseEntity = response.getEntity();
if (responseEntity != null) {
bufferedReader =
new BufferedReader(new InputStreamReader(
responseEntity.getContent(), StandardCharsets.UTF_8));
String line = "";
int lineCount = 0;
this.logger.info("HTTP response [");
while ((line = bufferedReader.readLine()) != null) {
this.logger.info(line);
lineCount++;
if (lineCount > MAX_RESPONSE_LINE_TO_PRINT) {
break;
}
}
this.logger.info("]");
} else {
this.logger.info("No response");
}
} catch (final Throwable t) {
this.logger.warn(
"Encountered error while logging out job callback response", t);
} finally {
if (bufferedReader != null) {
try {
bufferedReader.close();
} catch (final IOException ex) {
// don't care
}
}
}
return statusCode;
}
|
Response handler for logging job callback response using the given logger instance
@author hluu
|
handleResponse
|
java
|
azkaban/azkaban
|
azkaban-common/src/main/java/azkaban/jobcallback/JobCallbackRequestMaker.java
|
https://github.com/azkaban/azkaban/blob/master/azkaban-common/src/main/java/azkaban/jobcallback/JobCallbackRequestMaker.java
|
Apache-2.0
|
public static List<HttpRequestBase> parseJobCallbackProperties(final Props props,
final JobCallbackStatusEnum status, final Map<String, String> contextInfo,
final int maxNumCallback, final Logger privateLogger) {
String callbackUrl = null;
if (!isThereJobCallbackProperty(props, status)) {
// short circuit
return Collections.emptyList();
}
final List<HttpRequestBase> result = new ArrayList<>();
// replace property templates with status
final String jobCallBackUrlKey =
replaceStatusToken(JOB_CALLBACK_URL_TEMPLATE, status);
final String requestMethod =
replaceStatusToken(JOB_CALLBACK_REQUEST_METHOD_TEMPLATE, status);
final String httpBodyKey = replaceStatusToken(JOB_CALLBACK_BODY_TEMPLATE, status);
final String headersKey =
replaceStatusToken(JOB_CALLBACK_REQUEST_HEADERS_TEMPLATE, status);
for (int sequence = 1; sequence <= maxNumCallback; sequence++) {
HttpRequestBase httpRequest = null;
final String sequenceStr = Integer.toString(sequence);
// callback url
final String callbackUrlKey =
jobCallBackUrlKey.replace(SEQUENCE_TOKEN, sequenceStr);
callbackUrl = props.get(callbackUrlKey);
if (callbackUrl == null || callbackUrl.length() == 0) {
// no more needs to done
break;
} else {
final String callbackUrlWithTokenReplaced =
replaceTokens(callbackUrl, contextInfo, true);
final String requestMethodKey =
requestMethod.replace(SEQUENCE_TOKEN, sequenceStr);
final String method = props.getString(requestMethodKey, HTTP_GET);
if (HTTP_POST.equals(method)) {
final String postBodyKey = httpBodyKey.replace(SEQUENCE_TOKEN, sequenceStr);
final String httpBodyValue = props.get(postBodyKey);
if (httpBodyValue == null) {
// missing body for POST, not good
// update the wiki about skipping callback url if body is missing
privateLogger.warn("Missing value for key: " + postBodyKey
+ " skipping job callback '" + callbackUrl + " for job "
+ contextInfo.get(CONTEXT_JOB_TOKEN));
} else {
// put together an URL
privateLogger.info("callbackUrlWithTokenReplaced: " + callbackUrlWithTokenReplaced);
final HttpPost httpPost = new HttpPost(callbackUrlWithTokenReplaced);
final String postActualBody =
replaceTokens(httpBodyValue, contextInfo, false);
privateLogger.info("postActualBody: " + postActualBody);
httpPost.setEntity(createStringEntity(postActualBody));
httpRequest = httpPost;
}
} else if (HTTP_GET.equals(method)) {
// GET
httpRequest = new HttpGet(callbackUrlWithTokenReplaced);
} else {
privateLogger.warn("Unsupported request method: " + method
+ ". Only POST and GET are supported");
}
final String headersKeyPerSequence =
headersKey.replace(SEQUENCE_TOKEN, sequenceStr);
final String headersValue = props.get(headersKeyPerSequence);
privateLogger.info("headers: " + headersValue);
final Header[] headers = parseHttpHeaders(headersValue);
if (headers != null) {
httpRequest.setHeaders(headers);
privateLogger.info("# of headers found: " + headers.length);
}
result.add(httpRequest);
}
}
return result;
}
|
This method is responsible for parsing job call URL properties and convert them into a list of
HttpRequestBase, which callers can use to execute.
In addition to parsing, it will also replace the tokens with actual values.
@return List<HttpRequestBase> - empty if no job callback related properties
|
parseJobCallbackProperties
|
java
|
azkaban/azkaban
|
azkaban-common/src/main/java/azkaban/jobcallback/JobCallbackUtil.java
|
https://github.com/azkaban/azkaban/blob/master/azkaban-common/src/main/java/azkaban/jobcallback/JobCallbackUtil.java
|
Apache-2.0
|
public static Map<String, String> buildJobContextInfoMap(final Event event, final String server) {
final EventData eventData = event.getData();
final String projectName = eventData.getProjectName();
final String flowName = eventData.getFlowName();
final String executionId =
String.valueOf(eventData.getExecutionId());
final String jobId = eventData.getJobId();
final Map<String, String> result = new HashMap<>();
result.put(CONTEXT_SERVER_TOKEN, server);
result.put(CONTEXT_PROJECT_TOKEN, projectName);
result.put(CONTEXT_FLOW_TOKEN, flowName);
result.put(CONTEXT_EXECUTION_ID_TOKEN, executionId);
result.put(CONTEXT_JOB_TOKEN, jobId);
result.put(CONTEXT_JOB_STATUS_TOKEN, eventData.getStatus().name().toLowerCase());
/*
* if (node.getStatus() == Status.SUCCEEDED || node.getStatus() ==
* Status.FAILED) { result.put(JOB_STATUS_TOKEN,
* node.getStatus().name().toLowerCase()); } else if (node.getStatus() ==
* Status.PREPARING) { result.put(JOB_STATUS_TOKEN, "started"); }
*/
return result;
}
|
This method takes the job context info. and put the values into a map with keys as the tokens.
@return Map<String,String>
|
buildJobContextInfoMap
|
java
|
azkaban/azkaban
|
azkaban-common/src/main/java/azkaban/jobcallback/JobCallbackUtil.java
|
https://github.com/azkaban/azkaban/blob/master/azkaban-common/src/main/java/azkaban/jobcallback/JobCallbackUtil.java
|
Apache-2.0
|
public void setJobProps(Props props) {
this.jobProps = props;
}
|
Re-configure Job Props
@param props new props
|
setJobProps
|
java
|
azkaban/azkaban
|
azkaban-common/src/main/java/azkaban/jobExecutor/AbstractProcessJob.java
|
https://github.com/azkaban/azkaban/blob/master/azkaban-common/src/main/java/azkaban/jobExecutor/AbstractProcessJob.java
|
Apache-2.0
|
public Map<String, String> getEnvironmentVariables() {
final Props props = getJobProps();
final Map<String, String> envMap = props.getMapByPrefix(ENV_PREFIX);
return envMap;
}
|
Get Environment Variables from the Job Properties Table
@return All Job Properties with "env." prefix
|
getEnvironmentVariables
|
java
|
azkaban/azkaban
|
azkaban-common/src/main/java/azkaban/jobExecutor/AbstractProcessJob.java
|
https://github.com/azkaban/azkaban/blob/master/azkaban-common/src/main/java/azkaban/jobExecutor/AbstractProcessJob.java
|
Apache-2.0
|
public String getWorkingDirectory() {
final String workingDir = getJobProps().getString(WORKING_DIR, this._jobPath);
return Utils.ifNull(workingDir, "");
}
|
Get Working Directory from Job Properties when it is presented. Otherwise, the working
directory is the jobPath
@return working directory property
|
getWorkingDirectory
|
java
|
azkaban/azkaban
|
azkaban-common/src/main/java/azkaban/jobExecutor/AbstractProcessJob.java
|
https://github.com/azkaban/azkaban/blob/master/azkaban-common/src/main/java/azkaban/jobExecutor/AbstractProcessJob.java
|
Apache-2.0
|
@Deprecated
public Props loadOutputFileProps(final File outputPropertiesFile) {
InputStream reader = null;
try {
this.info("output properties file=" + outputPropertiesFile.getAbsolutePath());
reader =
new BufferedInputStream(new FileInputStream(outputPropertiesFile));
final Props outputProps = new Props();
final String content = Streams.asString(reader).trim();
if (!content.isEmpty()) {
final Map<String, Object> propMap =
(Map<String, Object>) JSONUtils.parseJSONFromString(content);
for (final Map.Entry<String, Object> entry : propMap.entrySet()) {
outputProps.put(entry.getKey(), entry.getValue().toString());
}
}
return outputProps;
} catch (final FileNotFoundException e) {
this.info(
String.format("File[%s] wasn't found, returning empty props.", outputPropertiesFile));
return new Props();
} catch (final Exception e) {
this.error(
"Exception thrown when trying to load output file props. Returning empty Props instead of failing. Is this really the best thing to do?",
e);
return new Props();
} finally {
IOUtils.closeQuietly(reader);
}
}
|
This public function will be deprecated since it tends to be a Utility function
Please use azkaban.utils.FileIOUtils.loadOutputFileProps(String file) instead.
|
loadOutputFileProps
|
java
|
azkaban/azkaban
|
azkaban-common/src/main/java/azkaban/jobExecutor/AbstractProcessJob.java
|
https://github.com/azkaban/azkaban/blob/master/azkaban-common/src/main/java/azkaban/jobExecutor/AbstractProcessJob.java
|
Apache-2.0
|
@Deprecated
private File createFlattenedPropsFile(final Props props, final String workingDir,
String propsName) {
try {
final File directory = new File(workingDir);
// The temp file prefix must be at least 3 characters.
final File tempFile = File.createTempFile(getId() + propsName, "_tmp", directory);
props.storeFlattened(tempFile);
return tempFile;
} catch (final IOException e) {
throw new RuntimeException("Failed to create temp property file. workingDir = " + workingDir);
}
}
|
This public function will be deprecated since it tends to be a Utility function
Please use azkaban.utils.FileIOUtils.createOutputPropsFile(String, String, String) instead.
|
createFlattenedPropsFile
|
java
|
azkaban/azkaban
|
azkaban-common/src/main/java/azkaban/jobExecutor/AbstractProcessJob.java
|
https://github.com/azkaban/azkaban/blob/master/azkaban-common/src/main/java/azkaban/jobExecutor/AbstractProcessJob.java
|
Apache-2.0
|
protected void generateProperties(final File outputFile) {
this.generatedProperties = loadOutputFileProps(outputFile);
}
|
Generate properties from output file and set to props tables
@param outputFile explain
|
generateProperties
|
java
|
azkaban/azkaban
|
azkaban-common/src/main/java/azkaban/jobExecutor/AbstractProcessJob.java
|
https://github.com/azkaban/azkaban/blob/master/azkaban-common/src/main/java/azkaban/jobExecutor/AbstractProcessJob.java
|
Apache-2.0
|
private String getKrb5ccname(final Props jobProps) {
final String effectiveUser = getEffectiveUser(jobProps);
final String projectName =
jobProps.getString(CommonJobProperties.PROJECT_NAME).replace(" ", "_");
final String flowId =
jobProps.getString(CommonJobProperties.FLOW_ID).replace(" ", "_");
final String jobId =
jobProps.getString(CommonJobProperties.JOB_ID).replace(" ", "_");
// execId should be an int and should not have space in it, ever
final String execId = jobProps.getString(CommonJobProperties.EXEC_ID);
final String krb5ccname =
String.format("/tmp/krb5cc__%s__%s__%s__%s__%s", projectName, flowId,
jobId, execId, effectiveUser);
return krb5ccname;
}
|
<pre>
This method extracts the kerberos ticket cache file name from the jobprops.
This method will ensure that each job execution will have its own kerberos ticket cache file
Given that the code only sets an environmental variable, the number of files created
corresponds
to the number of processes that are doing kinit in their flow, which should not be an
inordinately
high number.
</pre>
@return file name: the kerberos ticket cache file to use
|
getKrb5ccname
|
java
|
azkaban/azkaban
|
azkaban-common/src/main/java/azkaban/jobExecutor/ProcessJob.java
|
https://github.com/azkaban/azkaban/blob/master/azkaban-common/src/main/java/azkaban/jobExecutor/ProcessJob.java
|
Apache-2.0
|
private boolean canWriteInCurrentWorkingDirectory(final String effectiveUser)
throws IOException {
final ExecuteAsUser executeAsUser = new ExecuteAsUser(
this.getSysProps().getString(AZKABAN_SERVER_NATIVE_LIB_FOLDER));
final Path tmpFilePath = Paths.get(getWorkingDirectory(), TEMP_FILE_NAME);
final List<String> checkIfUserCanWriteCommand = Arrays
.asList(CREATE_FILE, tmpFilePath.toString());
final int result = executeAsUser.execute(effectiveUser, checkIfUserCanWriteCommand);
// If TEMP_FILE user_can_write is created, it should be deleted at the end of the function
try {
Files.deleteIfExists(tmpFilePath);
} catch (Exception e) {
warn(String.format("Failed to delete %s in current working directory", TEMP_FILE_NAME), e);
}
return result == SUCCESSFUL_EXECUTION;
}
|
Checks to see if user has write access to current working directory which many users need for
their jobs to store temporary data/jars on the executor.
Accomplishes this by using execute-as-user to try to create an empty file in the cwd.
@param effectiveUser user/proxy user running the job
@return true if user has write permissions in current working directory otherwise false
|
canWriteInCurrentWorkingDirectory
|
java
|
azkaban/azkaban
|
azkaban-common/src/main/java/azkaban/jobExecutor/ProcessJob.java
|
https://github.com/azkaban/azkaban/blob/master/azkaban-common/src/main/java/azkaban/jobExecutor/ProcessJob.java
|
Apache-2.0
|
private void assignUserFileOwnership(final String effectiveUser, final String fileName) throws
Exception {
final ExecuteAsUser executeAsUser = new ExecuteAsUser(
this.getSysProps().getString(AZKABAN_SERVER_NATIVE_LIB_FOLDER));
final String groupName = this.getSysProps().getString(AZKABAN_SERVER_GROUP_NAME, "azkaban");
final List<String> changeOwnershipCommand = Arrays
.asList(CHOWN, effectiveUser + ":" + groupName, fileName);
info("Change ownership of " + fileName + " to " + effectiveUser + ":" + groupName + ".");
final int result = executeAsUser.execute("root", changeOwnershipCommand);
if (result != 0) {
handleError("Failed to change current working directory ownership. Error code: " + Integer
.toString(result), null);
}
}
|
Changes permissions on file/directory so that the file/directory is owned by the user and the
group remains the azkaban service account name.
Leverages execute-as-user with "root" as the user to run the command.
@param effectiveUser user/proxy user running the job
@param fileName the name of the file whose permissions will be changed
|
assignUserFileOwnership
|
java
|
azkaban/azkaban
|
azkaban-common/src/main/java/azkaban/jobExecutor/ProcessJob.java
|
https://github.com/azkaban/azkaban/blob/master/azkaban-common/src/main/java/azkaban/jobExecutor/ProcessJob.java
|
Apache-2.0
|
public void run() throws IOException {
if (this.isStarted() || this.isComplete()) {
throw new IllegalStateException("The process can only be used once.");
}
final ProcessBuilder builder = new ProcessBuilder(this.cmd);
builder.directory(new File(this.workingDir));
builder.environment().putAll(this.env);
builder.redirectErrorStream(true);
this.process = builder.start();
try {
this.processId = processId(this.process);
if (this.processId == 0) {
this.logger.info("Spawned process with unknown process id");
} else {
this.logger.info("Spawned process with id " + this.processId);
}
this.startupLatch.countDown();
final LogGobbler outputGobbler =
new LogGobbler(
new InputStreamReader(this.process.getInputStream(), StandardCharsets.UTF_8),
this.logger, Level.INFO, 30);
final LogGobbler errorGobbler =
new LogGobbler(
new InputStreamReader(this.process.getErrorStream(), StandardCharsets.UTF_8),
this.logger, Level.ERROR, 30);
outputGobbler.start();
errorGobbler.start();
int exitCode = -1;
try {
exitCode = this.process.waitFor();
} catch (final InterruptedException e) {
this.logger.info("Process interrupted. Exit code is " + exitCode, e);
}
this.completeLatch.countDown();
// try to wait for everything to get logged out before exiting
outputGobbler.awaitCompletion(5000);
errorGobbler.awaitCompletion(5000);
if (exitCode != 0) {
throw new ProcessFailureException(exitCode);
}
} finally {
IOUtils.closeQuietly(this.process.getInputStream());
IOUtils.closeQuietly(this.process.getOutputStream());
IOUtils.closeQuietly(this.process.getErrorStream());
}
}
|
Execute this process, blocking until it has completed.
|
run
|
java
|
azkaban/azkaban
|
azkaban-common/src/main/java/azkaban/jobExecutor/utils/process/AzkabanProcess.java
|
https://github.com/azkaban/azkaban/blob/master/azkaban-common/src/main/java/azkaban/jobExecutor/utils/process/AzkabanProcess.java
|
Apache-2.0
|
public int getProcessId() {
checkStarted();
return this.processId;
}
|
Get the process id for this process, if it has started.
@return The process id or -1 if it cannot be fetched
|
getProcessId
|
java
|
azkaban/azkaban
|
azkaban-common/src/main/java/azkaban/jobExecutor/utils/process/AzkabanProcess.java
|
https://github.com/azkaban/azkaban/blob/master/azkaban-common/src/main/java/azkaban/jobExecutor/utils/process/AzkabanProcess.java
|
Apache-2.0
|
private int processId(final java.lang.Process process) {
int processId = 0;
try {
final Field f = process.getClass().getDeclaredField("pid");
f.setAccessible(true);
processId = f.getInt(process);
} catch (final Throwable e) {
e.printStackTrace();
}
return processId;
}
|
Attempt to get the process id for this process
@param process The process to get the id from
@return The id of the process
|
processId
|
java
|
azkaban/azkaban
|
azkaban-common/src/main/java/azkaban/jobExecutor/utils/process/AzkabanProcess.java
|
https://github.com/azkaban/azkaban/blob/master/azkaban-common/src/main/java/azkaban/jobExecutor/utils/process/AzkabanProcess.java
|
Apache-2.0
|
public JobParams createJobParams(final String jobId, Props jobProps, final Logger logger) {
// This is final because during build phase, you should never need to swap
// the pluginSet for safety reasons
final JobTypePluginSet pluginSet = getJobTypePluginSet();
try {
final Optional<String> jobTypeOptional = getJobType(jobProps);
if (!jobTypeOptional.isPresent()) {
throw new JobExecutionException(String.format(
"The 'type' parameter for job[%s] is missing or null or empty", jobProps));
}
final String jobType = jobTypeOptional.get();
logger.info("Building " + jobType + " job executor. ");
jobProps = getJobProps(jobProps, pluginSet, jobType);
final Props pluginLoadProps = getPluginLoadProps(pluginSet, jobType);
final List<URL> jobClassLoaderUrls = new ArrayList<>();
// collect jobtype declared dependencies for the job's classloader
final URL[] jobTypeURLs = pluginSet.getPluginClassLoaderURLs(jobType);
jobClassLoaderUrls.addAll(Arrays.asList(jobTypeURLs));
// collect cluster-specific dependencies for the job's classloader
Cluster targetCluster = null;
final Collection<String> components = getClusterComponents(jobProps,
pluginSet.getPluginLoaderProps(jobType), false);
ClassLoader jobContextClassLoader = this.parentLoader;
if (!components.isEmpty()) {
targetCluster = this.clusterRouter.getCluster(jobId, jobProps, logger,
components);
if (targetCluster != null && !Cluster.UNKNOWN.equals(targetCluster)) {
jobContextClassLoader = targetCluster.getSecurityManagerClassLoader();
jobProps.put(CommonJobProperties.TARGET_CLUSTER_ID, targetCluster.clusterId);
}
}
logger.info(String.format("JobClassLoader URLs: %s", jobClassLoaderUrls.stream()
.map(URL::toString).collect(Collectors.joining(", "))));
final ClassLoader jobClassLoader = new JobClassLoader(
jobClassLoaderUrls.toArray(new URL[jobClassLoaderUrls.size()]),
jobContextClassLoader, jobId);
// load the jobtype from JobClassLoader
final String jobTypeClassName = pluginSet.getPluginClassName(jobType);
final Class<? extends Object> jobTypeClass = jobClassLoader.loadClass(jobTypeClassName);
if (jobTypeClass == null) {
throw new JobExecutionException(String.format("Job type [%s] "
+ "is unrecognized. Could not construct job [%s] of type [%s].",
jobType, jobId, jobType));
}
// inject cluster jars and native libraries into jobs through properties
Props clusterSpecificProps = getClusterSpecificJobProps(targetCluster, jobProps, pluginLoadProps);
for (final String key : clusterSpecificProps.getKeySet()) {
// User's job props should take precedence over cluster props
if (!jobProps.containsKey(key)) {
jobProps.put(key, clusterSpecificProps.get(key));
}
}
// Override any plugin load props if specified.
// Make a clone of pluginLoadProps to ensure the original object is not corrupted.
// Use the cloned object from here on.
final Props pluginLoadPropsCopy = Props.clone(pluginLoadProps);
if (pluginLoadOverrideProps != null) {
final String[] propsList = pluginLoadOverrideProps.split(",");
for (final String prop : propsList) {
final String value = clusterSpecificProps.getString(prop, null);
if (value == null) {
// The property must be present in cluster specific props
logger.warn(String.format("Expected override property %s is not "
+ " present in ClusterSpecific Properties, ignoring it.", prop));
continue;
}
pluginLoadPropsCopy.put(prop, value);
}
}
Props nonOverriddableClusterProps = getClusterSpecificNonOverridableJobProps(clusterSpecificProps);
// CAUTION: ADD ROUTER-SPECIFIC PROPERTIES THAT ARE CRITICAL FOR JOB EXECUTION AS THE LAST
// STEP TO STOP THEM FROM BEING ACCIDENTALLY OVERRIDDEN BY JOB PROPERTIES
jobProps.putAll(nonOverriddableClusterProps);
jobProps = PropsUtils.resolveProps(jobProps);
Props pluginPrivateProps = pluginSet.getPluginPrivateProps(jobType);
if(pluginPrivateProps == null) { // some jobtypes (ie: noop) don't have private properties.
pluginPrivateProps = new Props();
}
return new JobParams(jobTypeClass, jobProps, pluginPrivateProps, pluginLoadPropsCopy,
jobContextClassLoader);
} catch (final Exception e) {
logger.error("Failed to build job executor for job " + jobId
+ e.getMessage());
throw new JobTypeManagerException("Failed to build job executor for job "
+ jobId, e);
} catch (final Throwable t) {
logger.error(
"Failed to build job executor for job " + jobId + t.getMessage(), t);
throw new JobTypeManagerException("Failed to build job executor for job "
+ jobId, t);
}
}
|
Create job parameters that can be used to create a job instance.
@param jobId job id
@param jobProps job properties
@param logger logger
@return job parameters that can be used to create a job instance
|
createJobParams
|
java
|
azkaban/azkaban
|
azkaban-common/src/main/java/azkaban/jobtype/JobTypeManager.java
|
https://github.com/azkaban/azkaban/blob/master/azkaban-common/src/main/java/azkaban/jobtype/JobTypeManager.java
|
Apache-2.0
|
public static Job createJob(final String jobId, final JobParams jobParams, final Logger logger) {
try {
return
(Job) Utils.callConstructor(jobParams.jobClass, jobId, jobParams.pluginLoadProps,
jobParams.jobProps, jobParams.pluginPrivateProps, logger);
} catch (final Throwable e) {
final String message = "Ctor with private properties %s, will try one without. e = ";
if (e instanceof IllegalStateException && e.getCause() instanceof NoSuchMethodException) {
// expected, message quietly, don't confuse users
logger.debug(String.format(message, "not defined") + e.getMessage());
} else {
// unexpected, message loudly
logger.warn(String.format(message, "failed"), e);
}
}
try {
return
(Job) Utils.callConstructor(jobParams.jobClass, jobId, jobParams.pluginLoadProps,
jobParams.jobProps, logger);
} catch (final Throwable e) {
final String message = String.format("Failed to build job: %s", jobId);
logger.error(message, e);
throw new JobTypeManagerException(message, e);
}
}
|
Create an instance of Job with the given parameters, job id and job logger.
|
createJob
|
java
|
azkaban/azkaban
|
azkaban-common/src/main/java/azkaban/jobtype/JobTypeManager.java
|
https://github.com/azkaban/azkaban/blob/master/azkaban-common/src/main/java/azkaban/jobtype/JobTypeManager.java
|
Apache-2.0
|
private Props getClusterSpecificJobProps(final Cluster cluster, final Props jobProps,
final Props pluginProps) {
final Props clusterProps = new Props();
Props sourceProps;
if (cluster != null && !Cluster.UNKNOWN.equals(cluster)){
sourceProps = cluster.getProperties();
clusterProps.putAll(sourceProps);
} else {
// fall back to the existing mechanism if no cluster is found/configured
sourceProps = pluginProps;
}
final Collection<String> components = getClusterComponents(jobProps, pluginProps, true);
final String javaLibPath = Cluster.getJavaLibraryPath(sourceProps, components);
if (javaLibPath != null && !javaLibPath.isEmpty()) {
clusterProps.put(CommonJobProperties.TARGET_CLUSTER_CLASSPATH, javaLibPath);
}
final String nativeLibPath = Cluster.getNativeLibraryPath(sourceProps, components);
if (nativeLibPath != null && !nativeLibPath.isEmpty()) {
clusterProps.put(CommonJobProperties.TARGET_CLUSTER_NATIVE_LIB, nativeLibPath);
}
final String hadoopSecurityManagerClass =
sourceProps.get(Cluster.HADOOP_SECURITY_MANAGER_CLASS_PROP);
if (hadoopSecurityManagerClass != null) {
clusterProps.put(
Cluster.HADOOP_SECURITY_MANAGER_CLASS_PROP, hadoopSecurityManagerClass);
}
return clusterProps;
}
|
Expose cluster-specific libraries and native libraries through job properties.
if a router is configured, construct the properties based on cluster.properties
otherwise, the cluster is implicitly configured, the properties will be based
on plugins' private properties.
|
getClusterSpecificJobProps
|
java
|
azkaban/azkaban
|
azkaban-common/src/main/java/azkaban/jobtype/JobTypeManager.java
|
https://github.com/azkaban/azkaban/blob/master/azkaban-common/src/main/java/azkaban/jobtype/JobTypeManager.java
|
Apache-2.0
|
private static Collection<String> getClusterComponents(final Props jobProps,
final Props pluginProps, final boolean exclusionEnabled) {
// use ordered set to maintain the classpath order as much as possible
final Set<String> components = new LinkedHashSet<>();
if (jobProps != null) {
final List<String> jobTypeComponents = jobProps.getStringList(
CommonJobProperties.JOB_CLUSTER_COMPONENTS_DEPENDENCIES, Collections.emptyList(), ",");
components.addAll(jobTypeComponents);
}
if (pluginProps != null) {
final List<String> jobtypeComponents = pluginProps.getStringList(
CommonJobProperties.JOBTYPE_CLUSTER_COMPONENTS_DEPENDENCIES, Collections.emptyList(), ",");
components.addAll(jobtypeComponents);
}
if (exclusionEnabled && pluginProps != null) {
final List<String> excludedComponentsFromJobProcess = pluginProps.getStringList(
CommonJobProperties.JOBTYPE_CLUSTER_COMPONENTS_DEPENDENCIES_EXCLUDED, Collections.emptyList(), ",");
components.removeAll(excludedComponentsFromJobProcess);
}
return components;
}
|
Get the components within a cluster that a job depends on. Note if
jobtype.dependency.components.excluded is set to true, the libraries
of the specified components are not exposed to the JVM process that
a job may spawn. This is to address jar conflict between
HadoopSecurityManager dependencies (hive) and those of individual jobs
|
getClusterComponents
|
java
|
azkaban/azkaban
|
azkaban-common/src/main/java/azkaban/jobtype/JobTypeManager.java
|
https://github.com/azkaban/azkaban/blob/master/azkaban-common/src/main/java/azkaban/jobtype/JobTypeManager.java
|
Apache-2.0
|
public Props getCommonPluginJobProps() {
return this.commonJobProps;
}
|
Gets common properties for every jobtype
|
getCommonPluginJobProps
|
java
|
azkaban/azkaban
|
azkaban-common/src/main/java/azkaban/jobtype/JobTypePluginSet.java
|
https://github.com/azkaban/azkaban/blob/master/azkaban-common/src/main/java/azkaban/jobtype/JobTypePluginSet.java
|
Apache-2.0
|
public void setCommonPluginJobProps(final Props commonJobProps) {
this.commonJobProps = commonJobProps;
}
|
Sets the common properties shared in every jobtype
|
setCommonPluginJobProps
|
java
|
azkaban/azkaban
|
azkaban-common/src/main/java/azkaban/jobtype/JobTypePluginSet.java
|
https://github.com/azkaban/azkaban/blob/master/azkaban-common/src/main/java/azkaban/jobtype/JobTypePluginSet.java
|
Apache-2.0
|
public Props getPluginLoaderProps(final String jobTypeName) {
return this.pluginLoadPropsMap.get(jobTypeName);
}
|
Get the properties for a jobtype used to setup and load a plugin
|
getPluginLoaderProps
|
java
|
azkaban/azkaban
|
azkaban-common/src/main/java/azkaban/jobtype/JobTypePluginSet.java
|
https://github.com/azkaban/azkaban/blob/master/azkaban-common/src/main/java/azkaban/jobtype/JobTypePluginSet.java
|
Apache-2.0
|
public Props getPluginPrivateProps(final String jobTypeName) {
return this.pluginPrivatePropsMap.get(jobTypeName);
}
|
Get the plugin private properties for the jobtype
|
getPluginPrivateProps
|
java
|
azkaban/azkaban
|
azkaban-common/src/main/java/azkaban/jobtype/JobTypePluginSet.java
|
https://github.com/azkaban/azkaban/blob/master/azkaban-common/src/main/java/azkaban/jobtype/JobTypePluginSet.java
|
Apache-2.0
|
public String getPluginClassName(final String jobTypeName) {
return this.jobToClassName.get(jobTypeName);
}
|
Gets the plugin job class name
|
getPluginClassName
|
java
|
azkaban/azkaban
|
azkaban-common/src/main/java/azkaban/jobtype/JobTypePluginSet.java
|
https://github.com/azkaban/azkaban/blob/master/azkaban-common/src/main/java/azkaban/jobtype/JobTypePluginSet.java
|
Apache-2.0
|
public URL[] getPluginClassLoaderURLs(final String jobTypeName) {
return this.jobToClassLoaderURLs.getOrDefault(jobTypeName, EMPTY_URLS);
}
|
Get the resource URLs that should be added to its associated job ClassLoader.
|
getPluginClassLoaderURLs
|
java
|
azkaban/azkaban
|
azkaban-common/src/main/java/azkaban/jobtype/JobTypePluginSet.java
|
https://github.com/azkaban/azkaban/blob/master/azkaban-common/src/main/java/azkaban/jobtype/JobTypePluginSet.java
|
Apache-2.0
|
public void addPluginJobProps(final String jobTypeName, final Props props) {
this.pluginJobPropsMap.put(jobTypeName, props);
}
|
Adds plugin job properties used as default runtime properties
|
addPluginJobProps
|
java
|
azkaban/azkaban
|
azkaban-common/src/main/java/azkaban/jobtype/JobTypePluginSet.java
|
https://github.com/azkaban/azkaban/blob/master/azkaban-common/src/main/java/azkaban/jobtype/JobTypePluginSet.java
|
Apache-2.0
|
public void addPluginClassLoaderURLs(final String jobTypeName, final URL[] urls) {
this.jobToClassLoaderURLs.put(jobTypeName, urls);
}
|
Add resource URLs that should be made available to ClassLoader of all jobs of the given jobtype.
|
addPluginClassLoaderURLs
|
java
|
azkaban/azkaban
|
azkaban-common/src/main/java/azkaban/jobtype/JobTypePluginSet.java
|
https://github.com/azkaban/azkaban/blob/master/azkaban-common/src/main/java/azkaban/jobtype/JobTypePluginSet.java
|
Apache-2.0
|
public void addPluginLoadProps(final String jobTypeName, final Props props) {
this.pluginLoadPropsMap.put(jobTypeName, props);
}
|
Adds plugin load properties used to load the plugin
|
addPluginLoadProps
|
java
|
azkaban/azkaban
|
azkaban-common/src/main/java/azkaban/jobtype/JobTypePluginSet.java
|
https://github.com/azkaban/azkaban/blob/master/azkaban-common/src/main/java/azkaban/jobtype/JobTypePluginSet.java
|
Apache-2.0
|
public Set<String> getDefaultProxyUsersFilter() {
return this.defaultProxyUsersFilter;
}
|
@return The Set of users which are not allowed as defaultProxyUsers.
|
getDefaultProxyUsersFilter
|
java
|
azkaban/azkaban
|
azkaban-common/src/main/java/azkaban/jobtype/JobTypePluginSet.java
|
https://github.com/azkaban/azkaban/blob/master/azkaban-common/src/main/java/azkaban/jobtype/JobTypePluginSet.java
|
Apache-2.0
|
public Set<String> getDefaultProxyUsersJobTypeClasses() {
return this.defaultProxyUsersJobTypeClasses;
}
|
@return The list of allowed jobType classes for the defaultProxyUsers feature.
|
getDefaultProxyUsersJobTypeClasses
|
java
|
azkaban/azkaban
|
azkaban-common/src/main/java/azkaban/jobtype/JobTypePluginSet.java
|
https://github.com/azkaban/azkaban/blob/master/azkaban-common/src/main/java/azkaban/jobtype/JobTypePluginSet.java
|
Apache-2.0
|
public Optional<String> getDefaultProxyUser(String jobType) {
String defaultProxyUser = this.jobToDefaultProxyUser.get(jobType);
if (StringUtils.isBlank(defaultProxyUser)) {
return Optional.empty();
}
if (this.defaultProxyUsersFilter.contains(defaultProxyUser)) {
return Optional.empty();
}
if (!this.jobToClassName.containsKey(jobType)) {
return Optional.empty();
}
String jobTypeClassName = this.jobToClassName.get(jobType);
if (!this.defaultProxyUsersJobTypeClasses.contains(jobTypeClassName)) {
return Optional.empty();
}
return Optional.of(defaultProxyUser);
}
|
If the default proxy user is configured for the jobType and the jobType class associated is
part of allowed jobType classes for the defaultProxyUser feature, then return Optional
defaultProxyUser. Otherwise return Optional.empty().
@param jobType The type of the job like hadoopJava, hadoopShell, hive, java, etc.
@return {@link Optional} defaultProxyUser corresponding to the jobType.
|
getDefaultProxyUser
|
java
|
azkaban/azkaban
|
azkaban-common/src/main/java/azkaban/jobtype/JobTypePluginSet.java
|
https://github.com/azkaban/azkaban/blob/master/azkaban-common/src/main/java/azkaban/jobtype/JobTypePluginSet.java
|
Apache-2.0
|
public void addDefaultProxyUser(String jobType, String defaultProxyUser) {
this.jobToDefaultProxyUser.put(jobType, defaultProxyUser);
}
|
Adds the defaultProxyUser for the jobType to the jobToDefaultProxyUser Map.
@param jobType The type of the job like hadoopJava, hadoopShell, hive, java, etc.
@param defaultProxyUser defaultProxyUser corresponding to the jobType.
|
addDefaultProxyUser
|
java
|
azkaban/azkaban
|
azkaban-common/src/main/java/azkaban/jobtype/JobTypePluginSet.java
|
https://github.com/azkaban/azkaban/blob/master/azkaban-common/src/main/java/azkaban/jobtype/JobTypePluginSet.java
|
Apache-2.0
|
@Override
public String getName() {
return this.name;
}
|
{@inheritDoc}
@see azkaban.metric.IMetric#getName()
|
getName
|
java
|
azkaban/azkaban
|
azkaban-common/src/main/java/azkaban/metric/AbstractMetric.java
|
https://github.com/azkaban/azkaban/blob/master/azkaban-common/src/main/java/azkaban/metric/AbstractMetric.java
|
Apache-2.0
|
@Override
public String getValueType() {
return this.type;
}
|
{@inheritDoc}
@see azkaban.metric.IMetric#getValueType()
|
getValueType
|
java
|
azkaban/azkaban
|
azkaban-common/src/main/java/azkaban/metric/AbstractMetric.java
|
https://github.com/azkaban/azkaban/blob/master/azkaban-common/src/main/java/azkaban/metric/AbstractMetric.java
|
Apache-2.0
|
@Override
public void updateMetricManager(final MetricReportManager manager) {
this.metricManager = manager;
}
|
{@inheritDoc}
@see azkaban.metric.IMetric#updateMetricManager(azkaban.metric.MetricReportManager)
|
updateMetricManager
|
java
|
azkaban/azkaban
|
azkaban-common/src/main/java/azkaban/metric/AbstractMetric.java
|
https://github.com/azkaban/azkaban/blob/master/azkaban-common/src/main/java/azkaban/metric/AbstractMetric.java
|
Apache-2.0
|
@Override
public IMetric<T> getSnapshot() throws CloneNotSupportedException {
return (IMetric<T>) this.clone();
}
|
{@inheritDoc}
@see azkaban.metric.IMetric#getSnapshot()
|
getSnapshot
|
java
|
azkaban/azkaban
|
azkaban-common/src/main/java/azkaban/metric/AbstractMetric.java
|
https://github.com/azkaban/azkaban/blob/master/azkaban-common/src/main/java/azkaban/metric/AbstractMetric.java
|
Apache-2.0
|
@Override
public T getValue() {
return this.value;
}
|
{@inheritDoc}
@see azkaban.metric.IMetric#getValue()
|
getValue
|
java
|
azkaban/azkaban
|
azkaban-common/src/main/java/azkaban/metric/AbstractMetric.java
|
https://github.com/azkaban/azkaban/blob/master/azkaban-common/src/main/java/azkaban/metric/AbstractMetric.java
|
Apache-2.0
|
@Override
public void notifyManager() {
logger.debug(String.format("Notifying Manager for %s", this.getClass().getName()));
try {
this.metricManager.reportMetric(this);
} catch (final Throwable ex) {
logger.error(
String.format("Metric Manager is not set for %s metric", this.getClass().getName()), ex);
}
}
|
Method used to notify manager for a tracking event. Metric is free to call this method as per
implementation. Timer based or Azkaban events are the most common implementation {@inheritDoc}
@see azkaban.metric.IMetric#notifyManager()
|
notifyManager
|
java
|
azkaban/azkaban
|
azkaban-common/src/main/java/azkaban/metric/AbstractMetric.java
|
https://github.com/azkaban/azkaban/blob/master/azkaban-common/src/main/java/azkaban/metric/AbstractMetric.java
|
Apache-2.0
|
public static boolean isAvailable() {
return isInstantiated() && isManagerEnabled;
}
|
@return true, if we have Instantiated and enabled metric manager from Azkaban exec server
|
isAvailable
|
java
|
azkaban/azkaban
|
azkaban-common/src/main/java/azkaban/metric/MetricReportManager.java
|
https://github.com/azkaban/azkaban/blob/master/azkaban-common/src/main/java/azkaban/metric/MetricReportManager.java
|
Apache-2.0
|
public static boolean isInstantiated() {
return instance != null;
}
|
@return true, if we have Instantiated metric manager from Azkaban exec server
|
isInstantiated
|
java
|
azkaban/azkaban
|
azkaban-common/src/main/java/azkaban/metric/MetricReportManager.java
|
https://github.com/azkaban/azkaban/blob/master/azkaban-common/src/main/java/azkaban/metric/MetricReportManager.java
|
Apache-2.0
|
public static MetricReportManager getInstance() {
if (instance == null) {
synchronized (MetricReportManager.class) {
if (instance == null) {
logger.info("Instantiating MetricReportManager");
instance = new MetricReportManager();
}
}
}
return instance;
}
|
Get a singleton object for Metric Manager
|
getInstance
|
java
|
azkaban/azkaban
|
azkaban-common/src/main/java/azkaban/metric/MetricReportManager.java
|
https://github.com/azkaban/azkaban/blob/master/azkaban-common/src/main/java/azkaban/metric/MetricReportManager.java
|
Apache-2.0
|
@SuppressWarnings("FutureReturnValueIgnored")
public void reportMetric(final IMetric<?> metric) {
if (metric != null && isAvailable()) {
try {
final IMetric<?> metricSnapshot;
// take snapshot
synchronized (metric) {
metricSnapshot = metric.getSnapshot();
}
logger.debug(String
.format("Submitting %s metric for metric emission pool", metricSnapshot.getName()));
// report to all emitters
for (final IMetricEmitter metricEmitter : this.metricEmitters) {
this.executorService.submit(() -> {
try {
metricEmitter.reportMetric(metricSnapshot);
} catch (final Exception ex) {
logger.error(
String.format("Failed to report %s metric due to ", metricSnapshot.getName()),
ex);
}
});
}
} catch (final CloneNotSupportedException ex) {
logger.error(
String.format("Failed to take snapshot for %s metric", metric.getClass().getName()),
ex);
}
}
}
|
each element of metrics List is responsible to call this method and report metrics
@param metric
|
reportMetric
|
java
|
azkaban/azkaban
|
azkaban-common/src/main/java/azkaban/metric/MetricReportManager.java
|
https://github.com/azkaban/azkaban/blob/master/azkaban-common/src/main/java/azkaban/metric/MetricReportManager.java
|
Apache-2.0
|
public void addMetric(final IMetric<?> metric) {
// metric null or already present
if (metric == null) {
throw new IllegalArgumentException("Cannot add a null metric");
}
if (getMetricFromName(metric.getName()) == null) {
logger.debug(String.format("Adding %s metric in Metric Manager", metric.getName()));
this.metrics.add(metric);
metric.updateMetricManager(this);
} else {
logger.error("Failed to add metric");
}
}
|
Add a metric to be managed by Metric Manager
|
addMetric
|
java
|
azkaban/azkaban
|
azkaban-common/src/main/java/azkaban/metric/MetricReportManager.java
|
https://github.com/azkaban/azkaban/blob/master/azkaban-common/src/main/java/azkaban/metric/MetricReportManager.java
|
Apache-2.0
|
public IMetric<?> getMetricFromName(final String name) {
IMetric<?> metric = null;
if (name != null) {
for (final IMetric<?> currentMetric : this.metrics) {
if (currentMetric.getName().equals(name)) {
metric = currentMetric;
break;
}
}
}
return metric;
}
|
Get metric object for a given metric name
@param name metricName
@return metric Object, if found. Otherwise null.
|
getMetricFromName
|
java
|
azkaban/azkaban
|
azkaban-common/src/main/java/azkaban/metric/MetricReportManager.java
|
https://github.com/azkaban/azkaban/blob/master/azkaban-common/src/main/java/azkaban/metric/MetricReportManager.java
|
Apache-2.0
|
@Override
protected void finalize() {
this.executorService.shutdown();
}
|
Shutdown execution service {@inheritDoc}
@see java.lang.Object#finalize()
|
finalize
|
java
|
azkaban/azkaban
|
azkaban-common/src/main/java/azkaban/metric/MetricReportManager.java
|
https://github.com/azkaban/azkaban/blob/master/azkaban-common/src/main/java/azkaban/metric/MetricReportManager.java
|
Apache-2.0
|
public synchronized void setReportingInterval(final long val) {
this.timeWindow = val;
}
|
Update reporting interval
@param val interval in milliseconds
|
setReportingInterval
|
java
|
azkaban/azkaban
|
azkaban-common/src/main/java/azkaban/metric/inmemoryemitter/InMemoryMetricEmitter.java
|
https://github.com/azkaban/azkaban/blob/master/azkaban-common/src/main/java/azkaban/metric/inmemoryemitter/InMemoryMetricEmitter.java
|
Apache-2.0
|
public void setReportingInstances(final long num) {
this.numInstances = num;
}
|
Set number of /stats servlet display points
|
setReportingInstances
|
java
|
azkaban/azkaban
|
azkaban-common/src/main/java/azkaban/metric/inmemoryemitter/InMemoryMetricEmitter.java
|
https://github.com/azkaban/azkaban/blob/master/azkaban-common/src/main/java/azkaban/metric/inmemoryemitter/InMemoryMetricEmitter.java
|
Apache-2.0
|
public List<InMemoryHistoryNode> getMetrics(final String metricName, final Date from,
final Date to,
final Boolean useStats) throws ClassCastException {
final LinkedList<InMemoryHistoryNode> selectedLists = new LinkedList<>();
if (this.historyListMapping.containsKey(metricName)) {
logger.debug("selecting snapshots within time frame");
synchronized (this.historyListMapping.get(metricName)) {
for (final InMemoryHistoryNode node : this.historyListMapping.get(metricName)) {
if (node.getTimestamp().after(from) && node.getTimestamp().before(to)) {
selectedLists.add(node);
}
if (node.getTimestamp().after(to)) {
break;
}
}
}
// selecting nodes if num of nodes > numInstances
if (useStats) {
statBasedSelectMetricHistory(selectedLists);
} else {
generalSelectMetricHistory(selectedLists);
}
}
cleanUsingTime(metricName, new Date());
return selectedLists;
}
|
Get snapshots for a given metric at a given time
@param metricName name of the metric
@param from Start date
@param to end date
@param useStats get statistically significant points only
@return List of snapshots
|
getMetrics
|
java
|
azkaban/azkaban
|
azkaban-common/src/main/java/azkaban/metric/inmemoryemitter/InMemoryMetricEmitter.java
|
https://github.com/azkaban/azkaban/blob/master/azkaban-common/src/main/java/azkaban/metric/inmemoryemitter/InMemoryMetricEmitter.java
|
Apache-2.0
|
private void generalSelectMetricHistory(final LinkedList<InMemoryHistoryNode> selectedLists) {
logger.debug("selecting snapshots evenly from across the time interval");
if (selectedLists.size() > this.numInstances) {
final double step = (double) selectedLists.size() / this.numInstances;
long nextIndex = 0, currentIndex = 0, numSelectedInstances = 1;
final Iterator<InMemoryHistoryNode> ite = selectedLists.iterator();
while (ite.hasNext()) {
ite.next();
if (currentIndex == nextIndex) {
nextIndex = (long) Math.floor(numSelectedInstances * step + 0.5);
numSelectedInstances++;
} else {
ite.remove();
}
currentIndex++;
}
}
}
|
filter snapshots by evenly selecting points across the interval
@param selectedLists list of snapshots
|
generalSelectMetricHistory
|
java
|
azkaban/azkaban
|
azkaban-common/src/main/java/azkaban/metric/inmemoryemitter/InMemoryMetricEmitter.java
|
https://github.com/azkaban/azkaban/blob/master/azkaban-common/src/main/java/azkaban/metric/inmemoryemitter/InMemoryMetricEmitter.java
|
Apache-2.0
|
private void cleanUsingTime(final String metricName, final Date firstAllowedDate) {
if (this.historyListMapping.containsKey(metricName)
&& this.historyListMapping.get(metricName) != null) {
synchronized (this.historyListMapping.get(metricName)) {
InMemoryHistoryNode firstNode = this.historyListMapping.get(metricName).peekFirst();
long localCopyOfTimeWindow = 0;
// go ahead for clean up using latest possible value of interval
// any interval change will not affect on going clean up
synchronized (this) {
localCopyOfTimeWindow = this.timeWindow;
}
// removing objects older than Interval time from firstAllowedDate
while (firstNode != null
&& TimeUnit.MILLISECONDS
.toMillis(firstAllowedDate.getTime() - firstNode.getTimestamp().getTime())
> localCopyOfTimeWindow) {
this.historyListMapping.get(metricName).removeFirst();
firstNode = this.historyListMapping.get(metricName).peekFirst();
}
}
}
}
|
Remove snapshots to maintain reporting interval
@param metricName Name of the metric
@param firstAllowedDate End date of the interval
|
cleanUsingTime
|
java
|
azkaban/azkaban
|
azkaban-common/src/main/java/azkaban/metric/inmemoryemitter/InMemoryMetricEmitter.java
|
https://github.com/azkaban/azkaban/blob/master/azkaban-common/src/main/java/azkaban/metric/inmemoryemitter/InMemoryMetricEmitter.java
|
Apache-2.0
|
private void setupAllMetrics() {
this.flowFailMeter = this.metricsManager.addMeter(FLOW_FAIL_METER_NAME);
this.dispatchFailMeter = this.metricsManager.addMeter(DISPATCH_FAIL_METER_NAME);
this.dispatchSuccessMeter = this.metricsManager.addMeter(DISPATCH_SUCCESS_METER_NAME);
this.sendEmailFailMeter = this.metricsManager.addMeter(SEND_EMAIL_FAIL_METER_NAME);
this.sendEmailSuccessMeter = this.metricsManager.addMeter(SEND_EMAIL_SUCCESS_METER_NAME);
this.submitFlowSuccessMeter = this.metricsManager.addMeter(SUBMIT_FLOW_SUCCESS_METER_NAME);
this.submitFlowFailMeter = this.metricsManager.addMeter(SUBMIT_FLOW_FAIL_METER_NAME);
this.submitFlowSkipMeter = this.metricsManager.addMeter(SUBMIT_FLOW_SKIP_METER_NAME);
this.OOMWaitingJobCount = this.metricsManager.addCounter(OOM_WAITING_JOB_COUNT_NAME);
this.uploadFatProjectMeter = this.metricsManager.addMeter(UPLOAD_FAT_PROJECT_METER_NAME);
this.uploadThinProjectMeter = this.metricsManager.addMeter(UPLOAD_THIN_PROJECT_METER_NAME);
this.cancelFlowMeter = this.metricsManager.addMeter(CANCEL_FLOW_METER_NAME);
this.cancelFlowUnreachableMeter =
this.metricsManager.addMeter(CANCEL_FLOW_UNREACHABLE_METER_NAME);
this.cancelFlowUngracefulKillMeter =
this.metricsManager.addMeter(CANCEL_FLOW_UNGRACEFULLY_KILL_METER_NAME);
this.cancelFlowFailedMeter = this.metricsManager.addMeter(CANCEL_FLOW_FAILED_METER_NAME);
}
|
This singleton class CommonMetrics is in charge of collecting varieties of metrics which are
accessed in both web and exec modules. That said, these metrics will be exposed in both Web
server and executor.
|
setupAllMetrics
|
java
|
azkaban/azkaban
|
azkaban-common/src/main/java/azkaban/metrics/CommonMetrics.java
|
https://github.com/azkaban/azkaban/blob/master/azkaban-common/src/main/java/azkaban/metrics/CommonMetrics.java
|
Apache-2.0
|
protected void loadAllFlows(final List<Project> projects) {
try {
final Map<Project, List<Flow>> projectToFlows = this.projectLoader
.fetchAllFlowsForProjects(projects);
// Load the flows into the project objects
for (final Map.Entry<Project, List<Flow>> entry : projectToFlows.entrySet()) {
final Project project = entry.getKey();
final List<Flow> flows = entry.getValue();
final Map<String, Flow> flowMap = new HashMap<>();
for (final Flow flow : flows) {
flowMap.put(flow.getId(), flow);
}
project.setFlows(flowMap);
}
} catch (final ProjectManagerException e) {
logger.error("Could not load projects flows from store.", e);
throw new RuntimeException("Could not load projects flows from store.", e);
}
}
|
loadAllFlowsForAllProjects : To load all flows corresponding to projects from the database
@param projects list of Projects to fetch flows for.
|
loadAllFlows
|
java
|
azkaban/azkaban
|
azkaban-common/src/main/java/azkaban/project/AbstractProjectCache.java
|
https://github.com/azkaban/azkaban/blob/master/azkaban-common/src/main/java/azkaban/project/AbstractProjectCache.java
|
Apache-2.0
|
protected void loadAllFlowResourceRecommendations(final List<Project> projects) {
try {
final Map<Project, List<FlowResourceRecommendation>> projectToFlowResourceRecommendations = this.projectLoader
.fetchAllFlowResourceRecommendationsForProjects(projects);
// Load the flows into the project objects
for (final Map.Entry<Project, List<FlowResourceRecommendation>> entry : projectToFlowResourceRecommendations.entrySet()) {
final Project project = entry.getKey();
final List<FlowResourceRecommendation> flowResourceRecommendations = entry.getValue();
final ConcurrentHashMap<String, FlowResourceRecommendation> flowResourceRecommendationMap =
project.getFlowResourceRecommendationMap();
for (final FlowResourceRecommendation flowResourceRecommendation : flowResourceRecommendations) {
flowResourceRecommendationMap.put(flowResourceRecommendation.getFlowId(),
flowResourceRecommendation);
}
}
} catch (final ProjectManagerException e) {
logger.error("Could not load projects flow resource recommendations from store.", e);
throw new RuntimeException("Could not load projects flow resource recommendations from store.", e);
}
}
|
loadAllFlowResourceRecommendationsForAllProjects : To load all flow resource recommendations corresponding to
projects from the database
@param projects list of Projects to fetch flow resource recommendations for.
|
loadAllFlowResourceRecommendations
|
java
|
azkaban/azkaban
|
azkaban-common/src/main/java/azkaban/project/AbstractProjectCache.java
|
https://github.com/azkaban/azkaban/blob/master/azkaban-common/src/main/java/azkaban/project/AbstractProjectCache.java
|
Apache-2.0
|
@Override
public List<Project> getActiveProjects() {
List<Project> result = Collections.emptyList();
try {
result = this.projectLoader.fetchAllActiveProjects();
} catch (final ProjectManagerException e) {
logger.error("Could not load projects flows from store.", e);
throw new RuntimeException("Could not load projects from store.", e);
}
return result;
}
|
get all active projects from database.
@return List of projects;
|
getActiveProjects
|
java
|
azkaban/azkaban
|
azkaban-common/src/main/java/azkaban/project/AbstractProjectCache.java
|
https://github.com/azkaban/azkaban/blob/master/azkaban-common/src/main/java/azkaban/project/AbstractProjectCache.java
|
Apache-2.0
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.