code
stringlengths 23
201k
| docstring
stringlengths 17
96.2k
| func_name
stringlengths 0
235
| language
stringclasses 1
value | repo
stringlengths 8
72
| path
stringlengths 11
317
| url
stringlengths 57
377
| license
stringclasses 7
values |
|---|---|---|---|---|---|---|---|
public Map<String, List<Edge>> getEdgeMap() {
return this.edgeMap;
}
|
Returns the edge map constructed from the loaded flows.
@return Map of flow name to all its Edges.
|
getEdgeMap
|
java
|
azkaban/azkaban
|
azkaban-common/src/main/java/azkaban/project/DirectoryYamlFlowLoader.java
|
https://github.com/azkaban/azkaban/blob/master/azkaban-common/src/main/java/azkaban/project/DirectoryYamlFlowLoader.java
|
Apache-2.0
|
@Override
public ValidationReport loadProjectFlow(final Project project, final File projectDir) {
convertYamlFiles(projectDir);
FlowLoaderUtils.checkJobProperties(project.getId(), this.props, this.jobPropsMap, this.errors);
return FlowLoaderUtils.generateFlowLoaderReport(this.errors);
}
|
Loads all project flows from the directory.
@param project The project.
@param projectDir The directory to load flows from.
@return the validation report.
|
loadProjectFlow
|
java
|
azkaban/azkaban
|
azkaban-common/src/main/java/azkaban/project/DirectoryYamlFlowLoader.java
|
https://github.com/azkaban/azkaban/blob/master/azkaban-common/src/main/java/azkaban/project/DirectoryYamlFlowLoader.java
|
Apache-2.0
|
private void convertYamlFiles(final File projectDir) {
// Todo jamiesjc: convert project yaml file.
for (final File file : projectDir.listFiles(new SuffixFilter(Constants.FLOW_FILE_SUFFIX))) {
final NodeBeanLoader loader = new NodeBeanLoader();
try {
final NodeBean nodeBean = loader.load(file);
if (!loader.validate(nodeBean)) {
this.errors.add("Failed to validate nodeBean for " + file.getName()
+ ". Duplicate nodes found or dependency undefined or ROOT used as a name.");
} else {
final AzkabanFlow azkabanFlow = (AzkabanFlow) loader.toAzkabanNode(nodeBean);
if (this.flowMap.containsKey(azkabanFlow.getName())) {
this.errors.add("Duplicate flows found in the project with name " + azkabanFlow
.getName());
} else {
final Flow flow = convertAzkabanFlowToFlow(azkabanFlow, azkabanFlow.getName(), file);
this.flowMap.put(flow.getId(), flow);
}
}
} catch (final Exception e) {
this.errors.add("Error loading flow yaml file " + file.getName() + ":"
+ e.getMessage());
}
}
for (final File file : projectDir.listFiles(new DirFilter())) {
convertYamlFiles(file);
}
}
|
Loads all project flows from the directory.
@param project The project.
@param projectDir The directory to load flows from.
@return the validation report.
|
convertYamlFiles
|
java
|
azkaban/azkaban
|
azkaban-common/src/main/java/azkaban/project/DirectoryYamlFlowLoader.java
|
https://github.com/azkaban/azkaban/blob/master/azkaban-common/src/main/java/azkaban/project/DirectoryYamlFlowLoader.java
|
Apache-2.0
|
private Flow convertAzkabanFlowToFlow(final AzkabanFlow azkabanFlow, final String flowName,
final File flowFile) {
final Flow flow = new Flow(flowName);
flow.setAzkabanFlowVersion(Constants.AZKABAN_FLOW_VERSION_2_0);
final Props props = azkabanFlow.getProps();
FlowLoaderUtils.addEmailPropsToFlow(flow, props);
String failureAction = props.getString(FAILURE_ACTION_PROPERTY, null);
if (failureAction != null) {
flow.setFailureAction(failureAction);
}
props.setSource(flowFile.getName());
flow.addAllFlowProperties(ImmutableList.of(ImmutableFlowProps.createFlowProps(props)));
// Convert azkabanNodes to nodes inside the flow.
azkabanFlow.getNodes().values().stream()
.map(n -> convertAzkabanNodeToNode(n, flowName, flowFile, azkabanFlow))
.forEach(n -> flow.addNode(n));
// Add edges for the flow.
buildFlowEdges(azkabanFlow, flowName);
if (this.edgeMap.containsKey(flowName)) {
flow.addAllEdges(this.edgeMap.get(flowName));
}
// Todo jamiesjc: deprecate startNodes, endNodes and numLevels, and remove below method finally.
// Blow method will construct startNodes, endNodes and numLevels for the flow.
flow.initialize();
return flow;
}
|
Loads all project flows from the directory.
@param project The project.
@param projectDir The directory to load flows from.
@return the validation report.
|
convertAzkabanFlowToFlow
|
java
|
azkaban/azkaban
|
azkaban-common/src/main/java/azkaban/project/DirectoryYamlFlowLoader.java
|
https://github.com/azkaban/azkaban/blob/master/azkaban-common/src/main/java/azkaban/project/DirectoryYamlFlowLoader.java
|
Apache-2.0
|
private Node convertAzkabanNodeToNode(final AzkabanNode azkabanNode, final String flowName,
final File flowFile, final AzkabanFlow azkabanFlow) {
final Node node = new Node(azkabanNode.getName());
node.setType(azkabanNode.getType());
validateCondition(node, azkabanNode, azkabanFlow);
node.setCondition(azkabanNode.getCondition());
node.setPropsSource(flowFile.getName());
node.setJobSource(flowFile.getName());
if (azkabanNode.getType().equals(Constants.FLOW_NODE_TYPE)) {
final String embeddedFlowId = flowName + Constants.PATH_DELIMITER + node.getId();
node.setEmbeddedFlowId(embeddedFlowId);
final Flow flowNode = convertAzkabanFlowToFlow((AzkabanFlow) azkabanNode, embeddedFlowId,
flowFile);
flowNode.setEmbeddedFlow(true);
flowNode.setCondition(node.getCondition());
this.flowMap.put(flowNode.getId(), flowNode);
}
this.jobPropsMap
.put(flowName + Constants.PATH_DELIMITER + node.getId(), azkabanNode.getProps());
return node;
}
|
Loads all project flows from the directory.
@param project The project.
@param projectDir The directory to load flows from.
@return the validation report.
|
convertAzkabanNodeToNode
|
java
|
azkaban/azkaban
|
azkaban-common/src/main/java/azkaban/project/DirectoryYamlFlowLoader.java
|
https://github.com/azkaban/azkaban/blob/master/azkaban-common/src/main/java/azkaban/project/DirectoryYamlFlowLoader.java
|
Apache-2.0
|
private void buildFlowEdges(final AzkabanFlow azkabanFlow, final String flowName) {
// Recursive stack to record searched nodes. Used for detecting dependency cycles.
final HashSet<String> recStack = new HashSet<>();
// Nodes that have already been visited and added edges.
final HashSet<String> visited = new HashSet<>();
for (final AzkabanNode node : azkabanFlow.getNodes().values()) {
addEdges(node, azkabanFlow, flowName, recStack, visited);
}
}
|
Loads all project flows from the directory.
@param project The project.
@param projectDir The directory to load flows from.
@return the validation report.
|
buildFlowEdges
|
java
|
azkaban/azkaban
|
azkaban-common/src/main/java/azkaban/project/DirectoryYamlFlowLoader.java
|
https://github.com/azkaban/azkaban/blob/master/azkaban-common/src/main/java/azkaban/project/DirectoryYamlFlowLoader.java
|
Apache-2.0
|
private void addEdges(final AzkabanNode node, final AzkabanFlow azkabanFlow,
final String flowName, final HashSet<String> recStack, final HashSet<String> visited) {
if (!visited.contains(node.getName())) {
recStack.add(node.getName());
visited.add(node.getName());
final List<String> dependsOnList = node.getDependsOn();
for (final String parent : dependsOnList) {
final Edge edge = new Edge(parent, node.getName());
if (!this.edgeMap.containsKey(flowName)) {
this.edgeMap.put(flowName, new ArrayList<>());
}
this.edgeMap.get(flowName).add(edge);
if (recStack.contains(parent)) {
// Cycles found, including self cycle.
edge.setError("Cycles found.");
this.errors.add("Cycles found at " + edge.getId());
} else {
// Valid edge. Continue to process the parent node recursively.
addEdges(azkabanFlow.getNode(parent), azkabanFlow, flowName, recStack, visited);
}
}
recStack.remove(node.getName());
}
}
|
Loads all project flows from the directory.
@param project The project.
@param projectDir The directory to load flows from.
@return the validation report.
|
addEdges
|
java
|
azkaban/azkaban
|
azkaban-common/src/main/java/azkaban/project/DirectoryYamlFlowLoader.java
|
https://github.com/azkaban/azkaban/blob/master/azkaban-common/src/main/java/azkaban/project/DirectoryYamlFlowLoader.java
|
Apache-2.0
|
private void validateCondition(final Node node, final AzkabanNode azkabanNode,
final AzkabanFlow azkabanFlow) {
boolean foundConditionOnJobStatus = false;
final String condition = azkabanNode.getCondition();
if (condition == null) {
return;
}
// First, remove all the whitespaces and parenthesis ().
final String replacedCondition = condition.replaceAll("\\s+|\\(|\\)", "");
// Second, split the condition by operators &&, ||, ==, !=, >, >=, <, <=
final String[] operands = replacedCondition.split(VALID_CONDITION_OPERATORS);
// Third, check whether all the operands are valid: only conditionOnJobStatus macros, numbers,
// strings, and variable substitution ${jobName:param} are allowed.
for (int i = 0; i < operands.length; i++) {
final Matcher matcher = CONDITION_ON_JOB_STATUS_PATTERN.matcher(operands[i]);
if (matcher.matches()) {
this.logger.info("Operand " + operands[i] + " is a condition on job status.");
if (foundConditionOnJobStatus) {
this.errors.add("Invalid condition for " + node.getId()
+ ": cannot combine more than one conditionOnJobStatus macros.");
}
foundConditionOnJobStatus = true;
node.setConditionOnJobStatus(ConditionOnJobStatus.fromString(matcher.group(1)));
} else {
if (operands[i].startsWith("!")) {
// Remove the operator '!' from the operand.
operands[i] = operands[i].substring(1);
}
if (operands[i].equals("")) {
this.errors
.add("Invalid condition for " + node.getId() + ": operand is an empty string.");
} else if (!DIGIT_STRING_PATTERN.matcher(operands[i]).matches()) {
validateVariableSubstitution(operands[i], azkabanNode, azkabanFlow);
}
}
}
}
|
Loads all project flows from the directory.
@param project The project.
@param projectDir The directory to load flows from.
@return the validation report.
|
validateCondition
|
java
|
azkaban/azkaban
|
azkaban-common/src/main/java/azkaban/project/DirectoryYamlFlowLoader.java
|
https://github.com/azkaban/azkaban/blob/master/azkaban-common/src/main/java/azkaban/project/DirectoryYamlFlowLoader.java
|
Apache-2.0
|
private void validateVariableSubstitution(final String operand, final AzkabanNode azkabanNode,
final AzkabanFlow azkabanFlow) {
final Matcher matcher = CONDITION_VARIABLE_REPLACEMENT_PATTERN.matcher(operand);
if (matcher.matches()) {
final String jobName = matcher.group(1);
final AzkabanNode conditionNode = azkabanFlow.getNode(jobName);
if (conditionNode == null) {
this.errors.add("Invalid condition for " + azkabanNode.getName() + ": " + jobName
+ " doesn't exist in the flow.");
}
// If a job defines condition on its descendant nodes, then that condition is invalid.
else if (isDescendantNode(conditionNode, azkabanNode, azkabanFlow)) {
this.errors.add("Invalid condition for " + azkabanNode.getName()
+ ": should not define condition on its descendant node " + jobName + ".");
}
} else {
this.errors.add("Invalid condition for " + azkabanNode.getName()
+ ": cannot resolve the condition. Please check the syntax for supported conditions.");
}
}
|
Loads all project flows from the directory.
@param project The project.
@param projectDir The directory to load flows from.
@return the validation report.
|
validateVariableSubstitution
|
java
|
azkaban/azkaban
|
azkaban-common/src/main/java/azkaban/project/DirectoryYamlFlowLoader.java
|
https://github.com/azkaban/azkaban/blob/master/azkaban-common/src/main/java/azkaban/project/DirectoryYamlFlowLoader.java
|
Apache-2.0
|
private boolean isDescendantNode(final AzkabanNode current, final AzkabanNode target,
final AzkabanFlow azkabanFlow) {
// Check if the current node is a descendant of the target node.
if (current == null || target == null) {
return false;
} else if (current.getDependsOn() == null) {
return false;
} else if (current.getDependsOn().contains(target.getName())) {
return true;
} else {
for (final String nodeName : current.getDependsOn()) {
if (isDescendantNode(azkabanFlow.getNode(nodeName), target, azkabanFlow)) {
return true;
}
}
}
return false;
}
|
Loads all project flows from the directory.
@param project The project.
@param projectDir The directory to load flows from.
@return the validation report.
|
isDescendantNode
|
java
|
azkaban/azkaban
|
azkaban-common/src/main/java/azkaban/project/DirectoryYamlFlowLoader.java
|
https://github.com/azkaban/azkaban/blob/master/azkaban-common/src/main/java/azkaban/project/DirectoryYamlFlowLoader.java
|
Apache-2.0
|
public static FeatureFlag fromString(String key) {
for (FeatureFlag featureFlag : FeatureFlag.values()) {
if (featureFlag.getName().equals(key)) {
return featureFlag;
}
}
throw new IllegalArgumentException("FeatureFlag not found for key: " + key);
}
|
A Feature Flag class contains enum of all feature flags.
|
fromString
|
java
|
azkaban/azkaban
|
azkaban-common/src/main/java/azkaban/project/FeatureFlag.java
|
https://github.com/azkaban/azkaban/blob/master/azkaban-common/src/main/java/azkaban/project/FeatureFlag.java
|
Apache-2.0
|
@Override
public String toString() {
return getName();
}
|
A Feature Flag class contains enum of all feature flags.
|
toString
|
java
|
azkaban/azkaban
|
azkaban-common/src/main/java/azkaban/project/FeatureFlag.java
|
https://github.com/azkaban/azkaban/blob/master/azkaban-common/src/main/java/azkaban/project/FeatureFlag.java
|
Apache-2.0
|
public String getName() {
return this.name;
}
|
A Feature Flag class contains enum of all feature flags.
|
getName
|
java
|
azkaban/azkaban
|
azkaban-common/src/main/java/azkaban/project/FeatureFlag.java
|
https://github.com/azkaban/azkaban/blob/master/azkaban-common/src/main/java/azkaban/project/FeatureFlag.java
|
Apache-2.0
|
public FlowLoader createFlowLoader(final File projectDir) throws ProjectManagerException {
if (checkForValidProjectYamlFile(projectDir)) {
return new DirectoryYamlFlowLoader(this.props);
} else {
return new DirectoryFlowLoader(this.props);
}
}
|
Creates flow loader based on project YAML file inside project directory.
@param projectDir the project directory
@return the flow loader
|
createFlowLoader
|
java
|
azkaban/azkaban
|
azkaban-common/src/main/java/azkaban/project/FlowLoaderFactory.java
|
https://github.com/azkaban/azkaban/blob/master/azkaban-common/src/main/java/azkaban/project/FlowLoaderFactory.java
|
Apache-2.0
|
private boolean checkForValidProjectYamlFile(final File projectDir) throws
ProjectManagerException {
final File[] projectFileList = projectDir.listFiles(new SuffixFilter(Constants
.PROJECT_FILE_SUFFIX));
if (projectFileList == null) {
throw new ProjectManagerException("Error reading project directory. Input is not a "
+ "directory or IO error happens.");
}
if (ArrayUtils.isNotEmpty(projectFileList)) {
if (projectFileList.length > 1) {
throw new ProjectManagerException("Duplicate project YAML files found in the project "
+ "directory. Only one is allowed.");
}
final Map<String, Object> azkabanProject;
try (FileInputStream fis = new FileInputStream(projectFileList[0])) {
azkabanProject = (Map<String, Object>) new Yaml().load(fis);
} catch (final IOException e) {
throw new ProjectManagerException("Error reading project YAML file.", e);
}
if (azkabanProject == null || !azkabanProject
.containsKey(Constants.ConfigurationKeys.AZKABAN_FLOW_VERSION)) {
throw new ProjectManagerException("azkaban-flow-version is not specified in the project "
+ "YAML file.");
}
if (azkabanProject.get(Constants.ConfigurationKeys.AZKABAN_FLOW_VERSION).equals
(Constants.AZKABAN_FLOW_VERSION_2_0)) {
return true;
} else {
throw new ProjectManagerException("Invalid azkaban-flow-version in the project YAML file.");
}
} else {
for (final File file : projectDir.listFiles(new DirFilter())) {
if (checkForValidProjectYamlFile(file)) {
return true;
}
}
return false;
}
}
|
Creates flow loader based on project YAML file inside project directory.
@param projectDir the project directory
@return the flow loader
|
checkForValidProjectYamlFile
|
java
|
azkaban/azkaban
|
azkaban-common/src/main/java/azkaban/project/FlowLoaderFactory.java
|
https://github.com/azkaban/azkaban/blob/master/azkaban-common/src/main/java/azkaban/project/FlowLoaderFactory.java
|
Apache-2.0
|
private static boolean overridePropsInNodeBean(final NodeBean nodeBean, final String[] pathList,
final int idx, final Props prop) {
if (idx < pathList.length && nodeBean.getName().equals(pathList[idx])) {
if (idx == pathList.length - 1) {
if (prop.containsKey(Constants.NODE_TYPE)) {
nodeBean.setType(prop.get(Constants.NODE_TYPE));
}
final Map<String, String> config = prop.getFlattened();
config.remove(Constants.NODE_TYPE);
nodeBean.setConfig(config);
return true;
}
for (final NodeBean bean : nodeBean.getNodes()) {
if (overridePropsInNodeBean(bean, pathList, idx + 1, prop)) {
return true;
}
}
}
return false;
}
|
Helper method to recursively find the node to override props.
@param nodeBean the node bean
@param pathList the path list
@param idx the idx
@param prop the props to override
@return the boolean
|
overridePropsInNodeBean
|
java
|
azkaban/azkaban
|
azkaban-common/src/main/java/azkaban/project/FlowLoaderUtils.java
|
https://github.com/azkaban/azkaban/blob/master/azkaban-common/src/main/java/azkaban/project/FlowLoaderUtils.java
|
Apache-2.0
|
private static boolean findPropsFromNodeBean(final NodeBean nodeBean,
final String[] pathList, final int idx, final List<Props> propsList) {
if (idx < pathList.length && nodeBean.getName().equals(pathList[idx])) {
if (idx == pathList.length - 1) {
propsList.add(nodeBean.getProps());
return true;
}
for (final NodeBean bean : nodeBean.getNodes()) {
if (findPropsFromNodeBean(bean, pathList, idx + 1, propsList)) {
return true;
}
}
}
return false;
}
|
Helper method to recursively find props from node bean.
@param nodeBean the node bean
@param pathList the path list
@param idx the idx
@param propsList the props list
@return the boolean
|
findPropsFromNodeBean
|
java
|
azkaban/azkaban
|
azkaban-common/src/main/java/azkaban/project/FlowLoaderUtils.java
|
https://github.com/azkaban/azkaban/blob/master/azkaban-common/src/main/java/azkaban/project/FlowLoaderUtils.java
|
Apache-2.0
|
public static FlowTrigger getFlowTriggerFromYamlFile(final File flowFile) {
final NodeBeanLoader loader = new NodeBeanLoader();
try {
final NodeBean nodeBean = loader.load(flowFile);
return loader.toFlowTrigger(nodeBean.getTrigger());
} catch (final Exception e) {
logger.error("Failed to get flow trigger, error loading flow YAML file. ", e);
}
return null;
}
|
Helper method to recursively find props from node bean.
@param nodeBean the node bean
@param pathList the path list
@param idx the idx
@param propsList the props list
@return the boolean
|
getFlowTriggerFromYamlFile
|
java
|
azkaban/azkaban
|
azkaban-common/src/main/java/azkaban/project/FlowLoaderUtils.java
|
https://github.com/azkaban/azkaban/blob/master/azkaban-common/src/main/java/azkaban/project/FlowLoaderUtils.java
|
Apache-2.0
|
public static void addEmailPropsToFlow(final Flow flow, final Props prop) {
final List<String> successEmailList =
prop.getStringList(CommonJobProperties.SUCCESS_EMAILS,
Collections.EMPTY_LIST);
final Set<String> successEmail = new HashSet<>();
for (final String email : successEmailList) {
successEmail.add(email.toLowerCase());
}
final List<String> failureEmailList =
prop.getStringList(CommonJobProperties.FAILURE_EMAILS,
Collections.EMPTY_LIST);
final Set<String> failureEmail = new HashSet<>();
for (final String email : failureEmailList) {
failureEmail.add(email.toLowerCase());
}
final List<String> overrideEmailList =
prop.getStringList(CommonJobProperties.OVERRIDE_EMAILS,
Collections.EMPTY_LIST);
final Set<String> overrideEmail = new HashSet<>();
for (final String email : overrideEmailList) {
overrideEmail.add(email.toLowerCase());
}
final List<String> notifyEmailList =
prop.getStringList(CommonJobProperties.NOTIFY_EMAILS,
Collections.EMPTY_LIST);
for (String email : notifyEmailList) {
email = email.toLowerCase();
successEmail.add(email);
failureEmail.add(email);
}
flow.addFailureEmails(failureEmail);
flow.addSuccessEmails(successEmail);
flow.addOverrideEmails(overrideEmail);
}
|
Adds email properties to a flow.
@param flow the flow
@param prop the prop
|
addEmailPropsToFlow
|
java
|
azkaban/azkaban
|
azkaban-common/src/main/java/azkaban/project/FlowLoaderUtils.java
|
https://github.com/azkaban/azkaban/blob/master/azkaban-common/src/main/java/azkaban/project/FlowLoaderUtils.java
|
Apache-2.0
|
public static ValidationReport generateFlowLoaderReport(final Set<String> errors) {
final ValidationReport report = new ValidationReport();
report.addErrorMsgs(errors);
return report;
}
|
Generate flow loader report validation report.
@param errors the errors
@return the validation report
|
generateFlowLoaderReport
|
java
|
azkaban/azkaban
|
azkaban-common/src/main/java/azkaban/project/FlowLoaderUtils.java
|
https://github.com/azkaban/azkaban/blob/master/azkaban-common/src/main/java/azkaban/project/FlowLoaderUtils.java
|
Apache-2.0
|
public static void checkJobProperties(final int projectId, final Props props,
final Map<String, Props> jobPropsMap, final Set<String> errors) {
// if project is in the memory check whitelist, then we don't need to check
// its memory settings
if (ProjectWhitelist.isProjectWhitelisted(projectId,
ProjectWhitelist.WhitelistType.MemoryCheck)) {
return;
}
final MemConfValue maxXms = MemConfValue.parseMaxXms(props);
final MemConfValue maxXmx = MemConfValue.parseMaxXmx(props);
for (final String jobName : jobPropsMap.keySet()) {
final Props jobProps = jobPropsMap.get(jobName);
final String xms = jobProps.getString(XMS, null);
if (xms != null && !PropsUtils.isVariableReplacementPattern(xms)
&& Utils.parseMemString(xms) > maxXms.getSize()) {
errors.add(String.format(
"%s: Xms value has exceeded the allowed limit (max Xms = %s)",
jobName, maxXms.getString()));
}
final String xmx = jobProps.getString(XMX, null);
if (xmx != null && !PropsUtils.isVariableReplacementPattern(xmx)
&& Utils.parseMemString(xmx) > maxXmx.getSize()) {
errors.add(String.format(
"%s: Xmx value has exceeded the allowed limit (max Xmx = %s)",
jobName, maxXmx.getString()));
}
// job callback properties check
JobCallbackValidator.validate(jobName, props, jobProps, errors);
}
}
|
Check job properties.
@param projectId the project id
@param props the server props
@param jobPropsMap the job props map
@param errors the errors
|
checkJobProperties
|
java
|
azkaban/azkaban
|
azkaban-common/src/main/java/azkaban/project/FlowLoaderUtils.java
|
https://github.com/azkaban/azkaban/blob/master/azkaban-common/src/main/java/azkaban/project/FlowLoaderUtils.java
|
Apache-2.0
|
public static void cleanUpDir(final File dir) {
try {
if (dir != null && dir.exists()) {
FileUtils.deleteDirectory(dir);
}
} catch (final IOException e) {
logger.error("Failed to delete the directory", e);
dir.deleteOnExit();
}
}
|
Clean up the directory.
@param dir the directory to be deleted
|
cleanUpDir
|
java
|
azkaban/azkaban
|
azkaban-common/src/main/java/azkaban/project/FlowLoaderUtils.java
|
https://github.com/azkaban/azkaban/blob/master/azkaban-common/src/main/java/azkaban/project/FlowLoaderUtils.java
|
Apache-2.0
|
public static boolean isAzkabanFlowVersion20(final double azkabanFlowVersion) {
return Double.compare(azkabanFlowVersion, Constants.AZKABAN_FLOW_VERSION_2_0) == 0;
}
|
Check if azkaban flow version is 2.0.
@param azkabanFlowVersion the azkaban flow version
@return the boolean
|
isAzkabanFlowVersion20
|
java
|
azkaban/azkaban
|
azkaban-common/src/main/java/azkaban/project/FlowLoaderUtils.java
|
https://github.com/azkaban/azkaban/blob/master/azkaban-common/src/main/java/azkaban/project/FlowLoaderUtils.java
|
Apache-2.0
|
@Override
public boolean accept(final File pathname) {
final String name = pathname.getName();
return pathname.isFile() && !pathname.isHidden()
&& name.length() > this.suffix.length() && name.endsWith(this.suffix);
}
|
Instantiates a new Suffix filter.
@param suffix the suffix
|
accept
|
java
|
azkaban/azkaban
|
azkaban-common/src/main/java/azkaban/project/FlowLoaderUtils.java
|
https://github.com/azkaban/azkaban/blob/master/azkaban-common/src/main/java/azkaban/project/FlowLoaderUtils.java
|
Apache-2.0
|
public static Props loadPropsFromYamlFile(final ProjectLoader projectLoader,
final ExecutableFlow executableFlow, final String path) {
File tempDir = null;
Props props = null;
try {
tempDir = com.google.common.io.Files.createTempDir();
props = FlowLoaderUtils.getPropsFromYamlFile(
path == null ? executableFlow.getId() : path,
getFlowFile(tempDir, projectLoader, executableFlow));
} catch (final Exception e) {
logger.error("Failed to get props from flow file. " + e);
} finally {
if (tempDir != null && tempDir.exists()) {
try {
FileUtils.deleteDirectory(tempDir);
} catch (final IOException e) {
logger.error("Failed to delete temp directory." + e);
tempDir.deleteOnExit();
}
}
}
return props;
}
|
Loads flow/job properties from the flow's YAML file. If path is null
then it loads the flow's properties, otherwise it loads the property
of the job at the path. The caller is responsible for providing the
correct path
@param projectLoader Used to fetch file from DB.
@param executableFlow The executable flow of which properties are
being loaded.
@param path Path to job file. NULL for flow properties.
@return return Props object with flow/job properties.
|
loadPropsFromYamlFile
|
java
|
azkaban/azkaban
|
azkaban-common/src/main/java/azkaban/project/FlowLoaderUtils.java
|
https://github.com/azkaban/azkaban/blob/master/azkaban-common/src/main/java/azkaban/project/FlowLoaderUtils.java
|
Apache-2.0
|
public static Props loadPropsForExecutableFlow(
ProjectLoader projectLoader, ExecutableFlow executableFlow)
throws ProjectManagerException {
return FlowLoaderUtils.isAzkabanFlowVersion20(executableFlow.getAzkabanFlowVersion()) ?
FlowLoaderUtils.loadPropsFromYamlFile(projectLoader, executableFlow, null) :
projectLoader.fetchProjectProperty(
executableFlow.getProjectId(),
executableFlow.getVersion(),
Constants.PARAM_OVERRIDE_FILE);
}
|
Loads flow/job properties from the flow's YAML file. If path is null
then it loads the flow's properties, otherwise it loads the property
of the job at the path. The caller is responsible for providing the
correct path
@param projectLoader Used to fetch file from DB.
@param executableFlow The executable flow of which properties are
being loaded.
@param path Path to job file. NULL for flow properties.
@return return Props object with flow/job properties.
|
loadPropsForExecutableFlow
|
java
|
azkaban/azkaban
|
azkaban-common/src/main/java/azkaban/project/FlowLoaderUtils.java
|
https://github.com/azkaban/azkaban/blob/master/azkaban-common/src/main/java/azkaban/project/FlowLoaderUtils.java
|
Apache-2.0
|
private static File getFlowFile(final File tempDir, final ProjectLoader projectLoader,
final ExecutableFlow flow) throws Exception {
final List<ImmutableFlowProps> immutableFlowPropsList = ImmutableList.copyOf(flow.getFlowProps());
// There should be exact one source (file name) for each flow file.
if (immutableFlowPropsList.isEmpty() || immutableFlowPropsList.get(0) == null) {
throw new ProjectManagerException(
"Failed to get flow file source. Flow props is empty for " + flow.getId());
}
final String source = immutableFlowPropsList.get(0).getSource();
final int flowVersion = projectLoader
.getLatestFlowVersion(flow.getProjectId(), flow.getVersion(), source);
return projectLoader
.getUploadedFlowFile(flow.getProjectId(), flow.getVersion(), source,
flowVersion, tempDir);
}
|
This function fetches the flow file and puts it in tempDir
@param tempDir location where the flow file is put
@param projectLoader Used to fetch from DB
@param flow the executable flow
@return returns the flow file from db.
@throws Exception
|
getFlowFile
|
java
|
azkaban/azkaban
|
azkaban-common/src/main/java/azkaban/project/FlowLoaderUtils.java
|
https://github.com/azkaban/azkaban/blob/master/azkaban-common/src/main/java/azkaban/project/FlowLoaderUtils.java
|
Apache-2.0
|
private void validateDepDefinitionUniqueness(final List<FlowTriggerDependency> dependencies) {
final Set<String> seen = new HashSet<>();
for (final FlowTriggerDependency dep : dependencies) {
final Map<String, String> props = dep.getProps();
// set.add() returns false when there exists duplicate
Preconditions.checkArgument(seen.add(dep.getType() + ":" + props.toString()), String.format
("duplicate dependency config %s found, dependency config should be unique",
dep.getName()));
}
}
|
check uniqueness of dependency type and params
|
validateDepDefinitionUniqueness
|
java
|
azkaban/azkaban
|
azkaban-common/src/main/java/azkaban/project/FlowTrigger.java
|
https://github.com/azkaban/azkaban/blob/master/azkaban-common/src/main/java/azkaban/project/FlowTrigger.java
|
Apache-2.0
|
private void validateDependencies(final List<FlowTriggerDependency> dependencies) {
validateDepNameUniqueness(dependencies);
validateDepDefinitionUniqueness(dependencies);
}
|
check uniqueness of dependency type and params
|
validateDependencies
|
java
|
azkaban/azkaban
|
azkaban-common/src/main/java/azkaban/project/FlowTrigger.java
|
https://github.com/azkaban/azkaban/blob/master/azkaban-common/src/main/java/azkaban/project/FlowTrigger.java
|
Apache-2.0
|
public FlowTriggerDependency getDependencyByName(final String name) {
return this.dependencies.get(name);
}
|
check uniqueness of dependency type and params
|
getDependencyByName
|
java
|
azkaban/azkaban
|
azkaban-common/src/main/java/azkaban/project/FlowTrigger.java
|
https://github.com/azkaban/azkaban/blob/master/azkaban-common/src/main/java/azkaban/project/FlowTrigger.java
|
Apache-2.0
|
public Collection<FlowTriggerDependency> getDependencies() {
return this.dependencies.values();
}
|
check uniqueness of dependency type and params
|
getDependencies
|
java
|
azkaban/azkaban
|
azkaban-common/src/main/java/azkaban/project/FlowTrigger.java
|
https://github.com/azkaban/azkaban/blob/master/azkaban-common/src/main/java/azkaban/project/FlowTrigger.java
|
Apache-2.0
|
public Optional<Duration> getMaxWaitDuration() {
return Optional.ofNullable(this.maxWaitDuration);
}
|
check uniqueness of dependency type and params
|
getMaxWaitDuration
|
java
|
azkaban/azkaban
|
azkaban-common/src/main/java/azkaban/project/FlowTrigger.java
|
https://github.com/azkaban/azkaban/blob/master/azkaban-common/src/main/java/azkaban/project/FlowTrigger.java
|
Apache-2.0
|
public CronSchedule getSchedule() {
return this.schedule;
}
|
check uniqueness of dependency type and params
|
getSchedule
|
java
|
azkaban/azkaban
|
azkaban-common/src/main/java/azkaban/project/FlowTrigger.java
|
https://github.com/azkaban/azkaban/blob/master/azkaban-common/src/main/java/azkaban/project/FlowTrigger.java
|
Apache-2.0
|
public Long getMaxWaitMins() {
return this.maxWaitMins;
}
|
Java bean loaded from YAML file to represent a flow trigger.
|
getMaxWaitMins
|
java
|
azkaban/azkaban
|
azkaban-common/src/main/java/azkaban/project/FlowTriggerBean.java
|
https://github.com/azkaban/azkaban/blob/master/azkaban-common/src/main/java/azkaban/project/FlowTriggerBean.java
|
Apache-2.0
|
public void setMaxWaitMins(final Long maxWaitMins) {
this.maxWaitMins = maxWaitMins;
}
|
Java bean loaded from YAML file to represent a flow trigger.
|
setMaxWaitMins
|
java
|
azkaban/azkaban
|
azkaban-common/src/main/java/azkaban/project/FlowTriggerBean.java
|
https://github.com/azkaban/azkaban/blob/master/azkaban-common/src/main/java/azkaban/project/FlowTriggerBean.java
|
Apache-2.0
|
public Map<String, String> getSchedule() {
return this.schedule;
}
|
Java bean loaded from YAML file to represent a flow trigger.
|
getSchedule
|
java
|
azkaban/azkaban
|
azkaban-common/src/main/java/azkaban/project/FlowTriggerBean.java
|
https://github.com/azkaban/azkaban/blob/master/azkaban-common/src/main/java/azkaban/project/FlowTriggerBean.java
|
Apache-2.0
|
public void setSchedule(final Map<String, String> schedule) {
this.schedule = schedule;
}
|
Java bean loaded from YAML file to represent a flow trigger.
|
setSchedule
|
java
|
azkaban/azkaban
|
azkaban-common/src/main/java/azkaban/project/FlowTriggerBean.java
|
https://github.com/azkaban/azkaban/blob/master/azkaban-common/src/main/java/azkaban/project/FlowTriggerBean.java
|
Apache-2.0
|
public List<TriggerDependencyBean> getTriggerDependencies() {
return this.triggerDependencies == null ? Collections.emptyList() : this.triggerDependencies;
}
|
Java bean loaded from YAML file to represent a flow trigger.
|
getTriggerDependencies
|
java
|
azkaban/azkaban
|
azkaban-common/src/main/java/azkaban/project/FlowTriggerBean.java
|
https://github.com/azkaban/azkaban/blob/master/azkaban-common/src/main/java/azkaban/project/FlowTriggerBean.java
|
Apache-2.0
|
public void setTriggerDependencies(
final List<TriggerDependencyBean> triggerDependencies) {
this.triggerDependencies = triggerDependencies;
}
|
Java bean loaded from YAML file to represent a flow trigger.
|
setTriggerDependencies
|
java
|
azkaban/azkaban
|
azkaban-common/src/main/java/azkaban/project/FlowTriggerBean.java
|
https://github.com/azkaban/azkaban/blob/master/azkaban-common/src/main/java/azkaban/project/FlowTriggerBean.java
|
Apache-2.0
|
@Override
public String toString() {
return "FlowTriggerBean{" +
"maxWaitMins='" + this.maxWaitMins + '\'' +
", schedule=" + this.schedule +
", triggerDependencies=" + this.triggerDependencies +
'}';
}
|
Java bean loaded from YAML file to represent a flow trigger.
|
toString
|
java
|
azkaban/azkaban
|
azkaban-common/src/main/java/azkaban/project/FlowTriggerBean.java
|
https://github.com/azkaban/azkaban/blob/master/azkaban-common/src/main/java/azkaban/project/FlowTriggerBean.java
|
Apache-2.0
|
public String getName() {
return this.name;
}
|
@throws IllegalArgumentException if name or type is null or blank
@throws IllegalArgumentException if depProps is null
|
getName
|
java
|
azkaban/azkaban
|
azkaban-common/src/main/java/azkaban/project/FlowTriggerDependency.java
|
https://github.com/azkaban/azkaban/blob/master/azkaban-common/src/main/java/azkaban/project/FlowTriggerDependency.java
|
Apache-2.0
|
public String getType() {
return this.type;
}
|
@throws IllegalArgumentException if name or type is null or blank
@throws IllegalArgumentException if depProps is null
|
getType
|
java
|
azkaban/azkaban
|
azkaban-common/src/main/java/azkaban/project/FlowTriggerDependency.java
|
https://github.com/azkaban/azkaban/blob/master/azkaban-common/src/main/java/azkaban/project/FlowTriggerDependency.java
|
Apache-2.0
|
public Map<String, String> getProps() {
return this.props;
}
|
@throws IllegalArgumentException if name or type is null or blank
@throws IllegalArgumentException if depProps is null
|
getProps
|
java
|
azkaban/azkaban
|
azkaban-common/src/main/java/azkaban/project/FlowTriggerDependency.java
|
https://github.com/azkaban/azkaban/blob/master/azkaban-common/src/main/java/azkaban/project/FlowTriggerDependency.java
|
Apache-2.0
|
@Override
public String toString() {
return "FlowTriggerDependency{" +
"name='" + this.name + '\'' +
", type='" + this.type + '\'' +
", props=" + this.props +
'}';
}
|
@throws IllegalArgumentException if name or type is null or blank
@throws IllegalArgumentException if depProps is null
|
toString
|
java
|
azkaban/azkaban
|
azkaban-common/src/main/java/azkaban/project/FlowTriggerDependency.java
|
https://github.com/azkaban/azkaban/blob/master/azkaban-common/src/main/java/azkaban/project/FlowTriggerDependency.java
|
Apache-2.0
|
private void init() {
final List<Project> projects = super.getActiveProjects();
logger.info("Loading active projects.");
for (final Project proj : projects) {
putProject(proj);
}
logger.info("Loading flows from active projects.");
loadAllFlows(projects);
logger.info("Loading flow resource recommendations from active projects.");
loadAllFlowResourceRecommendations(projects);
logger.info("Active projects has been initialized.");
}
|
load all active projects and their corresponding flows into memory. Queries from database only
returns a high level project object. Need to explicitly load flows for the project objects.
|
init
|
java
|
azkaban/azkaban
|
azkaban-common/src/main/java/azkaban/project/InMemoryProjectCache.java
|
https://github.com/azkaban/azkaban/blob/master/azkaban-common/src/main/java/azkaban/project/InMemoryProjectCache.java
|
Apache-2.0
|
@Override
public void putProject(final Project project) {
this.projectsByName.put(project.getName(), project);
this.projectsById.put(project.getId(), project);
}
|
Inserts given project into the cache.
@param project Project
|
putProject
|
java
|
azkaban/azkaban
|
azkaban-common/src/main/java/azkaban/project/InMemoryProjectCache.java
|
https://github.com/azkaban/azkaban/blob/master/azkaban-common/src/main/java/azkaban/project/InMemoryProjectCache.java
|
Apache-2.0
|
@Override
public Optional<Project> getProjectByName(final String key) {
Project project = this.projectsByName.get(key);
if (project == null) {
logger.info("No active project with name {} exists in cache, fetching from DB.", key);
try {
project = fetchProjectByName(key);
} catch (final ProjectManagerException e) {
logger.error("Could not load project from store.", e);
}
}
return Optional.ofNullable(project);
}
|
Queries an active project by name. Fetches from database if not present in cache.
@param key name of the project
@return Project
|
getProjectByName
|
java
|
azkaban/azkaban
|
azkaban-common/src/main/java/azkaban/project/InMemoryProjectCache.java
|
https://github.com/azkaban/azkaban/blob/master/azkaban-common/src/main/java/azkaban/project/InMemoryProjectCache.java
|
Apache-2.0
|
@Override
public Optional<Project> getProjectById(final Integer key) throws ProjectManagerException {
Project project = this.projectsById.get(key);
if (project == null) {
logger.info("Project with ID " + key + " not found in cache, fetching from DB");
project = fetchProjectById(key);
}
return Optional.ofNullable(project);
}
|
Fetch active/inactive project by project id. If active project not present in cache, fetches
from DB. Fetches inactive project from DB.
@param key Project id
@return Project
|
getProjectById
|
java
|
azkaban/azkaban
|
azkaban-common/src/main/java/azkaban/project/InMemoryProjectCache.java
|
https://github.com/azkaban/azkaban/blob/master/azkaban-common/src/main/java/azkaban/project/InMemoryProjectCache.java
|
Apache-2.0
|
@Override
public void removeProject(final Project project) {
this.projectsByName.remove(project.getName());
this.projectsById.remove(project.getId());
}
|
Invalidates the given project from cache.
|
removeProject
|
java
|
azkaban/azkaban
|
azkaban-common/src/main/java/azkaban/project/InMemoryProjectCache.java
|
https://github.com/azkaban/azkaban/blob/master/azkaban-common/src/main/java/azkaban/project/InMemoryProjectCache.java
|
Apache-2.0
|
@Override
public List<Project> getProjectsWithSimilarNames(final Pattern pattern) {
final List<Project> matches = new ArrayList<>();
final ArrayList<String> names = new ArrayList<>(this.projectsByName.getKeys());
for (final String projName : names) {
if (pattern.matcher(projName).find()) {
matches.add(this.projectsByName.get(projName));
}
}
return matches;
}
|
@param pattern
@return List of Projects matching to given pattern.
|
getProjectsWithSimilarNames
|
java
|
azkaban/azkaban
|
azkaban-common/src/main/java/azkaban/project/InMemoryProjectCache.java
|
https://github.com/azkaban/azkaban/blob/master/azkaban-common/src/main/java/azkaban/project/InMemoryProjectCache.java
|
Apache-2.0
|
@Override
public List<Project> getActiveProjects() {
return new ArrayList<>(this.projectsById.values());
}
|
Returns all the projects from the in-memory cache map.
|
getActiveProjects
|
java
|
azkaban/azkaban
|
azkaban-common/src/main/java/azkaban/project/InMemoryProjectCache.java
|
https://github.com/azkaban/azkaban/blob/master/azkaban-common/src/main/java/azkaban/project/InMemoryProjectCache.java
|
Apache-2.0
|
public Map<Dependency, FileValidationStatus> getValidationStatuses(final Set<Dependency> deps,
final String validationKey) throws SQLException {
Map<Dependency, FileValidationStatus> depValidationStatuses = new HashMap<>();
if (deps.isEmpty()) {
// There's nothing for us to do.
return depValidationStatuses;
}
// Map of (filename + sha1) -> Dependency for resolving the dependencies already cached in the DB
// after the query completes.
Map<String, Dependency> hashAndFileNameToDep = new HashMap<>();
Connection conn = null;
ResultSet rs = null;
PreparedStatement stmnt = null;
// TODO: Use azkaban.db.DatabaseOperator.query() instead of getting the DB connection and
// dealing with connection lifecycle.
try {
conn = this.dbOperator.getDataSource().getConnection();
if (conn == null) {
throw new SQLException("Null connection");
}
stmnt = conn.prepareStatement(
String
.format("SELECT file_name, file_sha1, validation_status FROM validated_dependencies "
+ "WHERE validation_key = ? AND (%s)", makeStrWithQuestionMarks(deps.size())));
// Set the first param, which is the validation_key
stmnt.setString(1, validationKey);
// Start at 2 because the first parameter is at index 1, and that is the validator key that we already set.
int index = 2;
for (Dependency d : deps) {
stmnt.setString(index++, d.getFileName());
stmnt.setString(index++, d.getSHA1());
hashAndFileNameToDep.put(d.getFileName() + d.getSHA1(), d);
}
rs = stmnt.executeQuery();
while (rs.next()) {
// Columns are (starting at index 1): file_name, file_sha1, validation_status
Dependency d = hashAndFileNameToDep.remove(rs.getString(1) + rs.getString(2));
// HashMap.remove will return null if the key is not found, hence check for it before
// adding to depValidationStatuses.
if (d != null) {
FileValidationStatus v = FileValidationStatus.valueOf(rs.getInt(3));
depValidationStatuses.put(d, v);
}
}
// All remaining dependencies in the hashToDep map should be marked as being NEW (because they weren't
// associated with any DB entry)
hashAndFileNameToDep.values().stream()
.forEach(d -> depValidationStatuses.put(d, FileValidationStatus.NEW));
} catch (final SQLException ex) {
log.error("Transaction failed: ", ex);
throw ex;
} finally {
// Replicate the order of closing in org.apache.commons.dbutils.QueryRunner#query
DbUtils.closeQuietly(conn, stmnt, rs);
}
return depValidationStatuses;
}
|
Provides methods for interacting with dependency validation cache in DB. Used during thin archive
uploads.
|
getValidationStatuses
|
java
|
azkaban/azkaban
|
azkaban-common/src/main/java/azkaban/project/JdbcDependencyManager.java
|
https://github.com/azkaban/azkaban/blob/master/azkaban-common/src/main/java/azkaban/project/JdbcDependencyManager.java
|
Apache-2.0
|
public void updateValidationStatuses(final Map<Dependency, FileValidationStatus> depValidationStatuses,
final String validationKey) throws SQLException {
if (depValidationStatuses.isEmpty()) {
return;
}
// Order of columns: file_name, file_sha1, validation_key, validation_status
Object[][] rowsToInsert = depValidationStatuses
.keySet()
.stream()
.map(d -> new Object[]{d.getFileName(), d.getSHA1(), validationKey, depValidationStatuses.get(d).getValue()})
.toArray(Object[][]::new);
// We use insert IGNORE because a another process may have been processing the same dependency
// and written the row for a given dependency before we were able to (resulting in a duplicate primary key
// error when we try to write the row), so this will ignore the error and continue persisting the other
// dependencies.
this.dbOperator.batch("INSERT IGNORE INTO validated_dependencies "
+ "(file_name, file_sha1, validation_key, validation_status) VALUES (?, ?, ?, ?)", rowsToInsert);
}
|
Provides methods for interacting with dependency validation cache in DB. Used during thin archive
uploads.
|
updateValidationStatuses
|
java
|
azkaban/azkaban
|
azkaban-common/src/main/java/azkaban/project/JdbcDependencyManager.java
|
https://github.com/azkaban/azkaban/blob/master/azkaban-common/src/main/java/azkaban/project/JdbcDependencyManager.java
|
Apache-2.0
|
private static String makeStrWithQuestionMarks(final int num) {
StringBuilder builder = new StringBuilder();
for(int i = 0; i < num; i++) {
builder.append("(file_name = ? and file_sha1 = ?) or ");
}
// Remove trailing " or ";
return builder.substring(0, builder.length() - 4);
}
|
Provides methods for interacting with dependency validation cache in DB. Used during thin archive
uploads.
|
makeStrWithQuestionMarks
|
java
|
azkaban/azkaban
|
azkaban-common/src/main/java/azkaban/project/JdbcDependencyManager.java
|
https://github.com/azkaban/azkaban/blob/master/azkaban-common/src/main/java/azkaban/project/JdbcDependencyManager.java
|
Apache-2.0
|
@Override
public List<Project> handle(final ResultSet rs) throws SQLException {
if (!rs.next()) {
return Collections.emptyList();
}
// Project ID -> Project
final Map<Integer, Project> projects = new HashMap<>();
do {
final int id = rs.getInt(1);
// If a project has multiple permissions - the project will be returned multiple times,
// one for each permission and we don't need to go through the work of reconstructing the
// project object if we've already seen it.
if (!projects.containsKey(id)) {
// This project is new!
final String name = rs.getString(2);
final boolean active = rs.getBoolean(3);
final long modifiedTime = rs.getLong(4);
final long createTime = rs.getLong(5);
final int version = rs.getInt(6);
final String lastModifiedBy = rs.getString(7);
final String description = rs.getString(8);
final int encodingType = rs.getInt(9);
final byte[] data = rs.getBytes(10);
final String securityTag = rs.getString(11);
final Project project;
if (data != null) {
final EncodingType encType = EncodingType.fromInteger(encodingType);
final Object blobObj;
try {
// Convoluted way to inflate strings. Should find common package or
// helper function.
if (encType == EncodingType.GZIP) {
// Decompress the sucker.
final String jsonString = GZIPUtils.unGzipString(data, "UTF-8");
blobObj = JSONUtils.parseJSONFromString(jsonString);
} else {
final String jsonString = new String(data, "UTF-8");
blobObj = JSONUtils.parseJSONFromString(jsonString);
}
project = Project.projectFromObject(blobObj);
} catch (final IOException e) {
throw new SQLException(String.format("Failed to get project with id: %d", id), e);
}
} else {
project = new Project(id, name);
}
// update the fields as they may have changed
project.setActive(active);
project.setLastModifiedTimestamp(modifiedTime);
project.setCreateTimestamp(createTime);
project.setVersion(version);
project.setLastModifiedUser(lastModifiedBy);
project.setDescription(description);
project.setSecurityTag(securityTag != null ? SecurityTag.valueOf(securityTag) : null);
projects.put(id, project);
}
// Add the permission to the project
final String username = rs.getString(12);
final int permissionFlag = rs.getInt(13);
final boolean isGroup = rs.getBoolean(14);
final String uploader = rs.getString(15);
// Setting upload user in project Object
if (uploader != null) {
projects.get(id).setUploadUser(uploader);
}
// If username is not null, add the permission to the project
// If username is null, we can assume that this row was returned without any associated permission
// i.e. this project had no associated permissions.
if (username != null) {
Permission perm = new Permission(permissionFlag);
if (isGroup) {
projects.get(id).setGroupPermission(username, perm);
} else {
projects.get(id).setUserPermission(username, perm);
}
}
} while (rs.next());
return new ArrayList<>(projects.values());
}
|
This is a JDBC Handler collection place for all project handler classes.
|
handle
|
java
|
azkaban/azkaban
|
azkaban-common/src/main/java/azkaban/project/JdbcProjectHandlerSet.java
|
https://github.com/azkaban/azkaban/blob/master/azkaban-common/src/main/java/azkaban/project/JdbcProjectHandlerSet.java
|
Apache-2.0
|
@Override
public List<Flow> handle(final ResultSet rs) throws SQLException {
if (!rs.next()) {
return Collections.emptyList();
}
final ArrayList<Flow> flows = new ArrayList<>();
do {
final String flowId = rs.getString(3);
final int encodingType = rs.getInt(5);
final byte[] dataBytes = rs.getBytes(6);
if (dataBytes == null) {
continue;
}
final EncodingType encType = EncodingType.fromInteger(encodingType);
Object flowObj = null;
try {
// Convoluted way to inflate strings. Should find common package or
// helper function.
if (encType == EncodingType.GZIP) {
// Decompress the sucker.
final String jsonString = GZIPUtils.unGzipString(dataBytes, "UTF-8");
flowObj = JSONUtils.parseJSONFromString(jsonString);
} else {
final String jsonString = new String(dataBytes, "UTF-8");
flowObj = JSONUtils.parseJSONFromString(jsonString);
}
final Flow flow = Flow.flowFromObject(flowObj);
flows.add(flow);
} catch (final IOException e) {
throw new SQLException("Error retrieving flow data " + flowId, e);
}
} while (rs.next());
return flows;
}
|
This is a JDBC Handler collection place for all project handler classes.
|
handle
|
java
|
azkaban/azkaban
|
azkaban-common/src/main/java/azkaban/project/JdbcProjectHandlerSet.java
|
https://github.com/azkaban/azkaban/blob/master/azkaban-common/src/main/java/azkaban/project/JdbcProjectHandlerSet.java
|
Apache-2.0
|
@Override
public List<FlowResourceRecommendation> handle(final ResultSet rs) throws SQLException {
if (!rs.next()) {
return Collections.emptyList();
}
final ArrayList<FlowResourceRecommendation> flowResourceRecommendations = new ArrayList<>();
do {
final int id = rs.getInt(1);
final int projectId = rs.getInt(2);
final String flowId = rs.getString(3);
final String cpuRecommendation = rs.getString(4);
final String memoryRecommendation = rs.getString(5);
final String diskRecommendation = rs.getString(6);
final FlowResourceRecommendation flowResourceRecommendation =
new FlowResourceRecommendation(id, projectId, flowId, cpuRecommendation, memoryRecommendation,
diskRecommendation);
flowResourceRecommendations.add(flowResourceRecommendation);
} while (rs.next());
return flowResourceRecommendations;
}
|
This is a JDBC Handler collection place for all project handler classes.
|
handle
|
java
|
azkaban/azkaban
|
azkaban-common/src/main/java/azkaban/project/JdbcProjectHandlerSet.java
|
https://github.com/azkaban/azkaban/blob/master/azkaban-common/src/main/java/azkaban/project/JdbcProjectHandlerSet.java
|
Apache-2.0
|
@Override
public List<Pair<String, Props>> handle(final ResultSet rs) throws SQLException {
if (!rs.next()) {
return Collections.emptyList();
}
final List<Pair<String, Props>> properties = new ArrayList<>();
do {
final String name = rs.getString(3);
final int eventType = rs.getInt(5);
final byte[] dataBytes = rs.getBytes(6);
final EncodingType encType = EncodingType.fromInteger(eventType);
String propertyString = null;
try {
if (encType == EncodingType.GZIP) {
// Decompress the sucker.
propertyString = GZIPUtils.unGzipString(dataBytes, "UTF-8");
} else {
propertyString = new String(dataBytes, "UTF-8");
}
final Props props = PropsUtils.fromJSONString(propertyString);
props.setSource(name);
properties.add(new Pair<>(name, props));
} catch (final IOException e) {
throw new SQLException(e);
}
} while (rs.next());
return properties;
}
|
This is a JDBC Handler collection place for all project handler classes.
|
handle
|
java
|
azkaban/azkaban
|
azkaban-common/src/main/java/azkaban/project/JdbcProjectHandlerSet.java
|
https://github.com/azkaban/azkaban/blob/master/azkaban-common/src/main/java/azkaban/project/JdbcProjectHandlerSet.java
|
Apache-2.0
|
@Override
public List<ProjectLogEvent> handle(final ResultSet rs) throws SQLException {
if (!rs.next()) {
return Collections.emptyList();
}
final ArrayList<ProjectLogEvent> events = new ArrayList<>();
do {
final int projectId = rs.getInt(1);
final int eventType = rs.getInt(2);
final long eventTime = rs.getLong(3);
final String username = rs.getString(4);
final String message = rs.getString(5);
final ProjectLogEvent event =
new ProjectLogEvent(projectId, ProjectLogEvent.EventType.fromInteger(eventType),
eventTime, username,
message);
events.add(event);
} while (rs.next());
return events;
}
|
This is a JDBC Handler collection place for all project handler classes.
|
handle
|
java
|
azkaban/azkaban
|
azkaban-common/src/main/java/azkaban/project/JdbcProjectHandlerSet.java
|
https://github.com/azkaban/azkaban/blob/master/azkaban-common/src/main/java/azkaban/project/JdbcProjectHandlerSet.java
|
Apache-2.0
|
@Override
public List<byte[]> handle(final ResultSet rs) throws SQLException {
if (!rs.next()) {
return Collections.emptyList();
}
final ArrayList<byte[]> data = new ArrayList<>();
do {
final byte[] bytes = rs.getBytes(5);
data.add(bytes);
} while (rs.next());
return data;
}
|
This is a JDBC Handler collection place for all project handler classes.
|
handle
|
java
|
azkaban/azkaban
|
azkaban-common/src/main/java/azkaban/project/JdbcProjectHandlerSet.java
|
https://github.com/azkaban/azkaban/blob/master/azkaban-common/src/main/java/azkaban/project/JdbcProjectHandlerSet.java
|
Apache-2.0
|
@Override
public List<ProjectFileHandler> handle(final ResultSet rs) throws SQLException {
if (!rs.next()) {
return null;
}
final List<ProjectFileHandler> handlers = new ArrayList<>();
do {
final int projectId = rs.getInt(1);
final int version = rs.getInt(2);
final long uploadTime = rs.getLong(3);
final String uploader = rs.getString(4);
final String fileType = rs.getString(5);
final String fileName = rs.getString(6);
final byte[] md5 = rs.getBytes(7);
final int numChunks = rs.getInt(8);
final String resourceId = rs.getString(9);
final Blob startupDependenciesBlob = rs.getBlob(10);
final String uploaderIpAddr = rs.getString(11);
Set<Dependency> startupDependencies = Collections.emptySet();
if (startupDependenciesBlob != null) {
try {
startupDependencies = ThinArchiveUtils.parseStartupDependencies(
IOUtils.toString(startupDependenciesBlob.getBinaryStream(), StandardCharsets.UTF_8));
} catch (IOException | InvalidHashException e) {
// This should never happen unless the file is malformed in the database.
// The file was already validated when the project was uploaded.
throw new SQLException(e);
}
}
final ProjectFileHandler handler =
new ProjectFileHandler(projectId, version, uploadTime, uploader, fileType, fileName, numChunks, md5,
startupDependencies, resourceId, uploaderIpAddr);
handlers.add(handler);
} while (rs.next());
return handlers;
}
|
This is a JDBC Handler collection place for all project handler classes.
|
handle
|
java
|
azkaban/azkaban
|
azkaban-common/src/main/java/azkaban/project/JdbcProjectHandlerSet.java
|
https://github.com/azkaban/azkaban/blob/master/azkaban-common/src/main/java/azkaban/project/JdbcProjectHandlerSet.java
|
Apache-2.0
|
@Override
public Integer handle(final ResultSet rs) throws SQLException {
if (!rs.next()) {
return 0;
}
return rs.getInt(1);
}
|
This is a JDBC Handler collection place for all project handler classes.
|
handle
|
java
|
azkaban/azkaban
|
azkaban-common/src/main/java/azkaban/project/JdbcProjectHandlerSet.java
|
https://github.com/azkaban/azkaban/blob/master/azkaban-common/src/main/java/azkaban/project/JdbcProjectHandlerSet.java
|
Apache-2.0
|
@Override
public List<byte[]> handle(final ResultSet rs) throws SQLException {
if (!rs.next()) {
return Collections.emptyList();
}
final List<byte[]> data = new ArrayList<>();
do {
final byte[] bytes = rs.getBytes(1);
data.add(bytes);
} while (rs.next());
return data;
}
|
This is a JDBC Handler collection place for all project handler classes.
|
handle
|
java
|
azkaban/azkaban
|
azkaban-common/src/main/java/azkaban/project/JdbcProjectHandlerSet.java
|
https://github.com/azkaban/azkaban/blob/master/azkaban-common/src/main/java/azkaban/project/JdbcProjectHandlerSet.java
|
Apache-2.0
|
@Override
public List<Project> fetchAllActiveProjects() throws ProjectManagerException {
final ProjectResultHandler handler = new ProjectResultHandler();
List<Project> projects = null;
try {
projects = this.dbOperator.query(ProjectResultHandler.SELECT_ALL_ACTIVE_PROJECTS, handler);
// if Upload Lock feature is turned off, reset project upload lock to false.
if (!uploadProjectLockFeatureEnabled) {
projects.forEach(project -> project.setUploadLock(false));
}
} catch (final SQLException ex) {
logger.error(ProjectResultHandler.SELECT_ALL_ACTIVE_PROJECTS + " failed.", ex);
throw new ProjectManagerException("Error retrieving all active projects", ex);
}
return projects;
}
|
This class implements ProjectLoader using new azkaban-db code to allow DB failover. TODO
kunkun-tang: This class is too long. In future, we should split {@link ProjectLoader} interface
and have multiple short class implementations.
|
fetchAllActiveProjects
|
java
|
azkaban/azkaban
|
azkaban-common/src/main/java/azkaban/project/JdbcProjectImpl.java
|
https://github.com/azkaban/azkaban/blob/master/azkaban-common/src/main/java/azkaban/project/JdbcProjectImpl.java
|
Apache-2.0
|
@Override
public Project fetchProjectById(final int id) throws ProjectManagerException {
Project project = null;
final List<Integer> ids = Collections.singletonList(id);
try {
final List<Project> projects = fetchProjectById(ids);
if (projects == null || projects.isEmpty()) {
throw new ProjectManagerException("No project with id " + id + " exists in db.");
}
project = projects.get(0);
} catch (final ProjectManagerException ex) {
throw new ProjectManagerException("Query for existing project failed. Project " + id, ex);
}
return project;
}
|
This class implements ProjectLoader using new azkaban-db code to allow DB failover. TODO
kunkun-tang: This class is too long. In future, we should split {@link ProjectLoader} interface
and have multiple short class implementations.
|
fetchProjectById
|
java
|
azkaban/azkaban
|
azkaban-common/src/main/java/azkaban/project/JdbcProjectImpl.java
|
https://github.com/azkaban/azkaban/blob/master/azkaban-common/src/main/java/azkaban/project/JdbcProjectImpl.java
|
Apache-2.0
|
@Override
public Project fetchProjectByName(final String name) throws ProjectManagerException {
Project project = null;
final ProjectResultHandler handler = new ProjectResultHandler();
// At most one active project with the same name exists in db.
try {
final List<Project> projects = this.dbOperator
.query(ProjectResultHandler.SELECT_ACTIVE_PROJECT_BY_NAME, handler, name);
if (projects.isEmpty()) {
return null;
}
project = projects.get(0);
} catch (final SQLException ex) {
logger.error(ProjectResultHandler.SELECT_ACTIVE_PROJECT_BY_NAME + " failed.", ex);
throw new ProjectManagerException(
ProjectResultHandler.SELECT_ACTIVE_PROJECT_BY_NAME + " failed.", ex);
}
return project;
}
|
This class implements ProjectLoader using new azkaban-db code to allow DB failover. TODO
kunkun-tang: This class is too long. In future, we should split {@link ProjectLoader} interface
and have multiple short class implementations.
|
fetchProjectByName
|
java
|
azkaban/azkaban
|
azkaban-common/src/main/java/azkaban/project/JdbcProjectImpl.java
|
https://github.com/azkaban/azkaban/blob/master/azkaban-common/src/main/java/azkaban/project/JdbcProjectImpl.java
|
Apache-2.0
|
@Override
public synchronized Project createNewProject(String name, String description, User creator)
throws ProjectManagerException {
return createNewProject(name, description, creator, null);
}
|
Creates a Project in the db.
<p>
It will throw an exception if it finds an active project of the same name, or the SQL fails
|
createNewProject
|
java
|
azkaban/azkaban
|
azkaban-common/src/main/java/azkaban/project/JdbcProjectImpl.java
|
https://github.com/azkaban/azkaban/blob/master/azkaban-common/src/main/java/azkaban/project/JdbcProjectImpl.java
|
Apache-2.0
|
@Override
public synchronized Project createNewProject(final String name, final String description,
final User creator, final SecurityTag securityTag)
throws ProjectManagerException {
final String INSERT_PROJECT =
"INSERT INTO projects ( name, active, modified_time, create_time, version, last_modified_by, description, enc_type, settings_blob, security_tag) values (?,?,?,?,?,?,?,?,?,?)";
final SQLTransaction<Integer> insertProject = transOperator -> {
final long time = System.currentTimeMillis();
return transOperator
.update(INSERT_PROJECT, name, true, time, time, null, creator.getUserId(), description,
this.defaultEncodingType.getNumVal(), null, securityTag == null ? null : securityTag.name());
};
// Insert project
try {
final int numRowsInserted = this.dbOperator.transaction(insertProject);
if (numRowsInserted == 0) {
throw new ProjectManagerException("No projects have been inserted.");
}
} catch (final SQLException ex) {
logger.error(INSERT_PROJECT + " failed.", ex);
throw new ProjectManagerException("Insert project" + name + " for existing project failed. ",
ex);
}
return fetchProjectByName(name);
}
|
Creates a Project in the db with security tag.
<p>
It will throw an exception if it finds an active project of the same name, or the SQL fails
|
createNewProject
|
java
|
azkaban/azkaban
|
azkaban-common/src/main/java/azkaban/project/JdbcProjectImpl.java
|
https://github.com/azkaban/azkaban/blob/master/azkaban-common/src/main/java/azkaban/project/JdbcProjectImpl.java
|
Apache-2.0
|
@Override
public void uploadProjectFile(final int projectId, final int version, final File localFile,
final String uploader, final String uploaderIPAddr)
throws ProjectManagerException {
final long startMs = System.currentTimeMillis();
logger.info(String
.format("Uploading Project ID: %d file: %s [%d bytes]", projectId, localFile.getName(),
localFile.length()));
/*
* The below transaction uses one connection to do all operations. Ideally, we should commit
* after the transaction completes. However, uploadFile needs to commit every time when we
* upload any single chunk.
*
* Todo kunkun-tang: fix the transaction issue.
*/
final SQLTransaction<Integer> uploadProjectFileTransaction = transOperator -> {
/* Step 1: Update DB with new project info */
// Database storage does not support thin archives, so we just set the startupDependencies file to null.
addProjectToProjectVersions(transOperator, projectId, version, localFile, null, uploader,
computeHash(localFile), null, uploaderIPAddr);
transOperator.getConnection().commit();
/* Step 2: Upload File in chunks to DB */
final int chunks = uploadFileInChunks(transOperator, projectId, version, localFile);
/* Step 3: Update number of chunks in DB */
updateChunksInProjectVersions(transOperator, projectId, version, chunks);
return 1;
};
try {
this.dbOperator.transaction(uploadProjectFileTransaction);
} catch (final SQLException e) {
logger.error("upload project files failed.", e);
throw new ProjectManagerException("upload project files failed.", e);
}
final long duration = (System.currentTimeMillis() - startMs) / 1000;
logger.info(String.format("Uploaded Project ID: %d file: %s [%d bytes] in %d sec", projectId,
localFile.getName(),
localFile.length(), duration));
}
|
Creates a Project in the db with security tag.
<p>
It will throw an exception if it finds an active project of the same name, or the SQL fails
|
uploadProjectFile
|
java
|
azkaban/azkaban
|
azkaban-common/src/main/java/azkaban/project/JdbcProjectImpl.java
|
https://github.com/azkaban/azkaban/blob/master/azkaban-common/src/main/java/azkaban/project/JdbcProjectImpl.java
|
Apache-2.0
|
private byte[] computeHash(final File localFile) {
logger.info("Creating MD5 hash for upload " + localFile.getName());
final byte[] md5;
try {
md5 = HashUtils.MD5.getHashBytes(localFile);
} catch (final IOException e) {
throw new ProjectManagerException("Error getting MD5 hash.", e);
}
logger.info("MD5 hash created");
return md5;
}
|
Creates a Project in the db with security tag.
<p>
It will throw an exception if it finds an active project of the same name, or the SQL fails
|
computeHash
|
java
|
azkaban/azkaban
|
azkaban-common/src/main/java/azkaban/project/JdbcProjectImpl.java
|
https://github.com/azkaban/azkaban/blob/master/azkaban-common/src/main/java/azkaban/project/JdbcProjectImpl.java
|
Apache-2.0
|
@Override
public void addProjectVersion(final int projectId, final int version, final File localFile,
final File startupDependencies, final String uploader, final byte[] md5,
final String resourceId, final String uploaderIPAddr) throws ProjectManagerException {
// when one transaction completes, it automatically commits.
final SQLTransaction<Integer> transaction = transOperator -> {
addProjectToProjectVersions(transOperator, projectId, version, localFile,
startupDependencies, uploader, md5, resourceId, uploaderIPAddr);
return 1;
};
try {
this.dbOperator.transaction(transaction);
} catch (final SQLException e) {
logger.error("addProjectVersion failed.", e);
throw new ProjectManagerException("addProjectVersion failed.", e);
}
}
|
Creates a Project in the db with security tag.
<p>
It will throw an exception if it finds an active project of the same name, or the SQL fails
|
addProjectVersion
|
java
|
azkaban/azkaban
|
azkaban-common/src/main/java/azkaban/project/JdbcProjectImpl.java
|
https://github.com/azkaban/azkaban/blob/master/azkaban-common/src/main/java/azkaban/project/JdbcProjectImpl.java
|
Apache-2.0
|
private void addProjectToProjectVersions(
final DatabaseTransOperator transOperator,
final int projectId,
final int version,
final File localFile,
final File startupDependencies,
final String uploader,
final byte[] md5,
final String resourceId,
final String uploaderIPAddr) throws ProjectManagerException {
final long updateTime = System.currentTimeMillis();
final String INSERT_PROJECT_VERSION = "INSERT INTO project_versions "
+ "(project_id, version, upload_time, uploader, file_type, file_name, md5, num_chunks, resource_id, "
+ "startup_dependencies, uploader_ip_addr) values (?,?,?,?,?,?,?,?,?,?,?)";
try {
/*
* As we don't know the num_chunks before uploading the file, we initialize it to 0,
* and will update it after uploading completes.
*/
final String lowercaseFileExtension = FilenameUtils.getExtension(localFile.getName())
.toLowerCase();
// Get the startup dependencies input stream (or null if the file does not exist - indicating this is
// a fat archive).
final InputStream startupDependenciesStream = getStartupDependenciesInputStream(
startupDependencies);
// Perform the DB update
transOperator.update(INSERT_PROJECT_VERSION, projectId, version, updateTime,
uploader, lowercaseFileExtension, localFile.getName(), md5, 0, resourceId,
startupDependenciesStream, uploaderIPAddr);
} catch (final SQLException e) {
final String msg = String
.format("Error initializing project id: %d version: %d ", projectId, version);
logger.error(msg, e);
throw new ProjectManagerException(msg, e);
}
}
|
Insert a new version record to TABLE project_versions before uploading files.
<p>
The reason for this operation: When error chunking happens in remote mysql server, incomplete
file data remains in DB, and an SQL exception is thrown. If we don't have this operation before
uploading file, the SQL exception prevents AZ from creating the new version record in Table
project_versions. However, the Table project_files still reserve the incomplete files, which
causes troubles when uploading a new file: Since the version in TABLE project_versions is still
old, mysql will stop inserting new files to db.
<p>
Why this operation is safe: When AZ uploads a new zip file, it always fetches the latest
version proj_v from TABLE project_version, proj_v+1 will be used as the new version for the
uploading files.
<p>
Assume error chunking happens on day 1. proj_v is created for this bad file (old file version +
1). When we upload a new project zip in day2, new file in day 2 will use the new version
(proj_v + 1). When file uploading completes, AZ will clean all old chunks in DB afterward.
|
addProjectToProjectVersions
|
java
|
azkaban/azkaban
|
azkaban-common/src/main/java/azkaban/project/JdbcProjectImpl.java
|
https://github.com/azkaban/azkaban/blob/master/azkaban-common/src/main/java/azkaban/project/JdbcProjectImpl.java
|
Apache-2.0
|
private InputStream getStartupDependenciesInputStream(final File startupDependencies) {
try {
// If startupDependencies is null, we assume this is a fat archive and return null. If it is not null,
// we assume the file exists and return an input stream for the file.
return startupDependencies != null ? new FileInputStream(startupDependencies) : null;
} catch (final FileNotFoundException e) {
// This shouldn't happen, the file should always exist if it is non-null.
throw new RuntimeException(e);
}
}
|
Insert a new version record to TABLE project_versions before uploading files.
<p>
The reason for this operation: When error chunking happens in remote mysql server, incomplete
file data remains in DB, and an SQL exception is thrown. If we don't have this operation before
uploading file, the SQL exception prevents AZ from creating the new version record in Table
project_versions. However, the Table project_files still reserve the incomplete files, which
causes troubles when uploading a new file: Since the version in TABLE project_versions is still
old, mysql will stop inserting new files to db.
<p>
Why this operation is safe: When AZ uploads a new zip file, it always fetches the latest
version proj_v from TABLE project_version, proj_v+1 will be used as the new version for the
uploading files.
<p>
Assume error chunking happens on day 1. proj_v is created for this bad file (old file version +
1). When we upload a new project zip in day2, new file in day 2 will use the new version
(proj_v + 1). When file uploading completes, AZ will clean all old chunks in DB afterward.
|
getStartupDependenciesInputStream
|
java
|
azkaban/azkaban
|
azkaban-common/src/main/java/azkaban/project/JdbcProjectImpl.java
|
https://github.com/azkaban/azkaban/blob/master/azkaban-common/src/main/java/azkaban/project/JdbcProjectImpl.java
|
Apache-2.0
|
private int uploadFileInChunks(final DatabaseTransOperator transOperator, final int projectId,
final int version, final File localFile)
throws ProjectManagerException {
// Really... I doubt we'll get a > 2gig file. So int casting it is!
final byte[] buffer = new byte[CHUCK_SIZE];
final String INSERT_PROJECT_FILES =
"INSERT INTO project_files (project_id, version, chunk, size, file) values (?,?,?,?,?)";
BufferedInputStream bufferedStream = null;
int chunk = 0;
try {
bufferedStream = new BufferedInputStream(new FileInputStream(localFile));
int size = bufferedStream.read(buffer);
while (size >= 0) {
logger.info("Read bytes for " + localFile.getName() + " size:" + size);
byte[] buf = buffer;
if (size < buffer.length) {
buf = Arrays.copyOfRange(buffer, 0, size);
}
try {
logger.info("Running update for " + localFile.getName() + " chunk " + chunk);
transOperator.update(INSERT_PROJECT_FILES, projectId, version, chunk, size, buf);
/*
* We enforce az committing to db when uploading every single chunk,
* in order to reduce the transaction duration and conserve sql server resources.
*
* If the files to be uploaded is very large and we don't commit every single chunk,
* the remote mysql server will run into memory troubles.
*/
transOperator.getConnection().commit();
logger.info("Finished update for " + localFile.getName() + " chunk " + chunk);
} catch (final SQLException e) {
throw new ProjectManagerException("Error Chunking during uploading files to db...");
}
++chunk;
size = bufferedStream.read(buffer);
}
} catch (final IOException e) {
throw new ProjectManagerException(
String.format(
"Error chunking file. projectId: %d, version: %d, file:%s[%d bytes], chunk: %d",
projectId,
version, localFile.getName(), localFile.length(), chunk));
} finally {
IOUtils.closeQuietly(bufferedStream);
}
return chunk;
}
|
Insert a new version record to TABLE project_versions before uploading files.
<p>
The reason for this operation: When error chunking happens in remote mysql server, incomplete
file data remains in DB, and an SQL exception is thrown. If we don't have this operation before
uploading file, the SQL exception prevents AZ from creating the new version record in Table
project_versions. However, the Table project_files still reserve the incomplete files, which
causes troubles when uploading a new file: Since the version in TABLE project_versions is still
old, mysql will stop inserting new files to db.
<p>
Why this operation is safe: When AZ uploads a new zip file, it always fetches the latest
version proj_v from TABLE project_version, proj_v+1 will be used as the new version for the
uploading files.
<p>
Assume error chunking happens on day 1. proj_v is created for this bad file (old file version +
1). When we upload a new project zip in day2, new file in day 2 will use the new version
(proj_v + 1). When file uploading completes, AZ will clean all old chunks in DB afterward.
|
uploadFileInChunks
|
java
|
azkaban/azkaban
|
azkaban-common/src/main/java/azkaban/project/JdbcProjectImpl.java
|
https://github.com/azkaban/azkaban/blob/master/azkaban-common/src/main/java/azkaban/project/JdbcProjectImpl.java
|
Apache-2.0
|
private void updateChunksInProjectVersions(final DatabaseTransOperator transOperator,
final int projectId, final int version, final int chunk)
throws ProjectManagerException {
final String UPDATE_PROJECT_NUM_CHUNKS =
"UPDATE project_versions SET num_chunks=? WHERE project_id=? AND version=?";
try {
transOperator.update(UPDATE_PROJECT_NUM_CHUNKS, chunk, projectId, version);
transOperator.getConnection().commit();
} catch (final SQLException e) {
logger.error("Error updating project " + projectId + " : chunk_num " + chunk, e);
throw new ProjectManagerException(
"Error updating project " + projectId + " : chunk_num " + chunk, e);
}
}
|
we update num_chunks's actual number to db here.
|
updateChunksInProjectVersions
|
java
|
azkaban/azkaban
|
azkaban-common/src/main/java/azkaban/project/JdbcProjectImpl.java
|
https://github.com/azkaban/azkaban/blob/master/azkaban-common/src/main/java/azkaban/project/JdbcProjectImpl.java
|
Apache-2.0
|
@Override
public ProjectFileHandler fetchProjectMetaData(final int projectId, final int version) {
final ProjectVersionResultHandler pfHandler = new ProjectVersionResultHandler();
try {
final List<ProjectFileHandler> projectFiles =
this.dbOperator
.query(ProjectVersionResultHandler.SELECT_PROJECT_VERSION, pfHandler, projectId,
version);
if (projectFiles == null || projectFiles.isEmpty()) {
return null;
}
return projectFiles.get(0);
} catch (final SQLException ex) {
logger.error("Query for uploaded file for project id " + projectId + " failed.", ex);
throw new ProjectManagerException(
"Query for uploaded file for project id " + projectId + " failed.", ex);
}
}
|
we update num_chunks's actual number to db here.
|
fetchProjectMetaData
|
java
|
azkaban/azkaban
|
azkaban-common/src/main/java/azkaban/project/JdbcProjectImpl.java
|
https://github.com/azkaban/azkaban/blob/master/azkaban-common/src/main/java/azkaban/project/JdbcProjectImpl.java
|
Apache-2.0
|
@Override
public ProjectFileHandler getUploadedFile(final int projectId, final int version)
throws ProjectManagerException {
final ProjectFileHandler projHandler = fetchProjectMetaData(projectId, version);
if (projHandler == null) {
return null;
}
final int numChunks = projHandler.getNumChunks();
if (numChunks <= 0) {
throw new ProjectManagerException(String.format("Got numChunks=%s for version %s of project "
+ "%s - seems like this version has been cleaned up already, because enough newer "
+ "versions have been uploaded. To increase the retention of project versions, set "
+ "%s", numChunks, version, projectId,
ConfigurationKeys.PROJECT_VERSION_RETENTION));
}
BufferedOutputStream bStream = null;
File file;
try {
try {
file = File
.createTempFile(projHandler.getFileName(), String.valueOf(version), this.tempDir);
bStream = new BufferedOutputStream(new FileOutputStream(file));
} catch (final IOException e) {
throw new ProjectManagerException("Error creating temp file for stream.");
}
final int collect = 5;
int fromChunk = 0;
int toChunk = collect;
do {
final ProjectFileChunkResultHandler chunkHandler = new ProjectFileChunkResultHandler();
List<byte[]> data = null;
try {
data = this.dbOperator
.query(ProjectFileChunkResultHandler.SELECT_PROJECT_CHUNKS_FILE, chunkHandler,
projectId,
version, fromChunk, toChunk);
} catch (final SQLException e) {
logger.error(e);
throw new ProjectManagerException("Query for uploaded file for " + projectId + " failed.",
e);
}
try {
for (final byte[] d : data) {
bStream.write(d);
}
} catch (final IOException e) {
throw new ProjectManagerException("Error writing file", e);
}
// Add all the bytes to the stream.
fromChunk += collect;
toChunk += collect;
} while (fromChunk <= numChunks);
} finally {
IOUtils.closeQuietly(bStream);
}
// Check md5.
final byte[] md5;
try {
md5 = HashUtils.MD5.getHashBytes(file);
} catch (final IOException e) {
throw new ProjectManagerException("Error getting MD5 hash.", e);
}
if (Arrays.equals(projHandler.getMD5Hash(), md5)) {
logger.info("Md5 Hash is valid");
} else {
throw new ProjectManagerException(
String.format("Md5 Hash failed on project %s version %s retrieval of file %s. "
+ "Expected hash: %s , got hash: %s",
projHandler.getProjectId(), projHandler.getVersion(), file.getAbsolutePath(),
Arrays.toString(projHandler.getMD5Hash()), Arrays.toString(md5)));
}
projHandler.setLocalFile(file);
return projHandler;
}
|
we update num_chunks's actual number to db here.
|
getUploadedFile
|
java
|
azkaban/azkaban
|
azkaban-common/src/main/java/azkaban/project/JdbcProjectImpl.java
|
https://github.com/azkaban/azkaban/blob/master/azkaban-common/src/main/java/azkaban/project/JdbcProjectImpl.java
|
Apache-2.0
|
@Override
public void changeProjectVersion(final Project project, final int version, final String user)
throws ProjectManagerException {
final long timestamp = System.currentTimeMillis();
try {
final String UPDATE_PROJECT_VERSION =
"UPDATE projects SET version=?,modified_time=?,last_modified_by=? WHERE id=?";
this.dbOperator.update(UPDATE_PROJECT_VERSION, version, timestamp, user, project.getId());
project.setLastModifiedTimestamp(timestamp);
project.setLastModifiedUser(user);
} catch (final SQLException e) {
logger.error("Error updating switching project version " + project.getName(), e);
throw new ProjectManagerException(
"Error updating switching project version " + project.getName(), e);
}
}
|
we update num_chunks's actual number to db here.
|
changeProjectVersion
|
java
|
azkaban/azkaban
|
azkaban-common/src/main/java/azkaban/project/JdbcProjectImpl.java
|
https://github.com/azkaban/azkaban/blob/master/azkaban-common/src/main/java/azkaban/project/JdbcProjectImpl.java
|
Apache-2.0
|
@Override
public void updatePermission(final Project project, final String name, final Permission perm,
final boolean isGroup)
throws ProjectManagerException {
final long updateTime = System.currentTimeMillis();
try {
if (this.dbOperator.getDataSource().allowsOnDuplicateKey()) {
final String INSERT_PROJECT_PERMISSION =
"INSERT INTO project_permissions (project_id, modified_time, name, permissions, isGroup) values (?,?,?,?,?)"
+ "ON DUPLICATE KEY UPDATE modified_time = VALUES(modified_time), permissions = VALUES(permissions)";
this.dbOperator
.update(INSERT_PROJECT_PERMISSION, project.getId(), updateTime, name, perm.toFlags(),
isGroup);
} else {
final String MERGE_PROJECT_PERMISSION =
"MERGE INTO project_permissions (project_id, modified_time, name, permissions, isGroup) KEY (project_id, name) values (?,?,?,?,?)";
this.dbOperator
.update(MERGE_PROJECT_PERMISSION, project.getId(), updateTime, name, perm.toFlags(),
isGroup);
}
} catch (final SQLException ex) {
logger.error("Error updating project permission", ex);
throw new ProjectManagerException(
"Error updating project " + project.getName() + " permissions for " + name, ex);
}
if (isGroup) {
project.setGroupPermission(name, perm);
} else {
project.setUserPermission(name, perm);
}
}
|
we update num_chunks's actual number to db here.
|
updatePermission
|
java
|
azkaban/azkaban
|
azkaban-common/src/main/java/azkaban/project/JdbcProjectImpl.java
|
https://github.com/azkaban/azkaban/blob/master/azkaban-common/src/main/java/azkaban/project/JdbcProjectImpl.java
|
Apache-2.0
|
@Override
public void updateProjectSettings(final Project project) throws ProjectManagerException {
updateProjectSettings(project, this.defaultEncodingType);
}
|
we update num_chunks's actual number to db here.
|
updateProjectSettings
|
java
|
azkaban/azkaban
|
azkaban-common/src/main/java/azkaban/project/JdbcProjectImpl.java
|
https://github.com/azkaban/azkaban/blob/master/azkaban-common/src/main/java/azkaban/project/JdbcProjectImpl.java
|
Apache-2.0
|
private byte[] convertJsonToBytes(final EncodingType type, final String json) throws IOException {
byte[] data = json.getBytes("UTF-8");
if (type == EncodingType.GZIP) {
data = GZIPUtils.gzipBytes(data);
}
return data;
}
|
we update num_chunks's actual number to db here.
|
convertJsonToBytes
|
java
|
azkaban/azkaban
|
azkaban-common/src/main/java/azkaban/project/JdbcProjectImpl.java
|
https://github.com/azkaban/azkaban/blob/master/azkaban-common/src/main/java/azkaban/project/JdbcProjectImpl.java
|
Apache-2.0
|
private void updateProjectSettings(final Project project, final EncodingType encType)
throws ProjectManagerException {
final String UPDATE_PROJECT_SETTINGS = "UPDATE projects SET enc_type=?, settings_blob=? WHERE id=?";
final String json = JSONUtils.toJSON(project.toObject());
byte[] data = null;
try {
data = convertJsonToBytes(encType, json);
logger.debug("NumChars: " + json.length() + " Gzip:" + data.length);
} catch (final IOException e) {
throw new ProjectManagerException("Failed to encode. ", e);
}
try {
this.dbOperator.update(UPDATE_PROJECT_SETTINGS, encType.getNumVal(), data, project.getId());
} catch (final SQLException e) {
logger.error("update Project Settings failed.", e);
throw new ProjectManagerException(
"Error updating project " + project.getName() + " version " + project.getVersion(), e);
}
}
|
we update num_chunks's actual number to db here.
|
updateProjectSettings
|
java
|
azkaban/azkaban
|
azkaban-common/src/main/java/azkaban/project/JdbcProjectImpl.java
|
https://github.com/azkaban/azkaban/blob/master/azkaban-common/src/main/java/azkaban/project/JdbcProjectImpl.java
|
Apache-2.0
|
@Override
public void removePermission(final Project project, final String name, final boolean isGroup)
throws ProjectManagerException {
final String DELETE_PROJECT_PERMISSION =
"DELETE FROM project_permissions WHERE project_id=? AND name=? AND isGroup=?";
try {
this.dbOperator.update(DELETE_PROJECT_PERMISSION, project.getId(), name, isGroup);
} catch (final SQLException e) {
logger.error("remove Permission failed.", e);
throw new ProjectManagerException(
"Error deleting project " + project.getName() + " permissions for " + name, e);
}
if (isGroup) {
project.removeGroupPermission(name);
} else {
project.removeUserPermission(name);
}
}
|
we update num_chunks's actual number to db here.
|
removePermission
|
java
|
azkaban/azkaban
|
azkaban-common/src/main/java/azkaban/project/JdbcProjectImpl.java
|
https://github.com/azkaban/azkaban/blob/master/azkaban-common/src/main/java/azkaban/project/JdbcProjectImpl.java
|
Apache-2.0
|
@Override
public void removeProject(final Project project, final String user)
throws ProjectManagerException {
final long updateTime = System.currentTimeMillis();
final String UPDATE_INACTIVE_PROJECT =
"UPDATE projects SET active=false,modified_time=?,last_modified_by=? WHERE id=?";
try {
this.dbOperator.update(UPDATE_INACTIVE_PROJECT, updateTime, user, project.getId());
} catch (final SQLException e) {
logger.error("error remove project " + project.getName(), e);
throw new ProjectManagerException("Error remove project " + project.getName(), e);
}
}
|
Todo kunkun-tang: the below implementation doesn't remove a project, but inactivate a project.
We should rewrite the code to follow the literal meanings.
|
removeProject
|
java
|
azkaban/azkaban
|
azkaban-common/src/main/java/azkaban/project/JdbcProjectImpl.java
|
https://github.com/azkaban/azkaban/blob/master/azkaban-common/src/main/java/azkaban/project/JdbcProjectImpl.java
|
Apache-2.0
|
@Override
public boolean postEvent(final Project project, final EventType type, final String user,
final String message) {
final String INSERT_PROJECT_EVENTS =
"INSERT INTO project_events (project_id, event_type, event_time, username, message) values (?,?,?,?,?)";
final long updateTime = System.currentTimeMillis();
try {
this.dbOperator
.update(INSERT_PROJECT_EVENTS, project.getId(), type.getNumVal(), updateTime, user,
message);
} catch (final SQLException e) {
logger.error("post event failed,", e);
return false;
}
return true;
}
|
Todo kunkun-tang: the below implementation doesn't remove a project, but inactivate a project.
We should rewrite the code to follow the literal meanings.
|
postEvent
|
java
|
azkaban/azkaban
|
azkaban-common/src/main/java/azkaban/project/JdbcProjectImpl.java
|
https://github.com/azkaban/azkaban/blob/master/azkaban-common/src/main/java/azkaban/project/JdbcProjectImpl.java
|
Apache-2.0
|
@Override
public List<ProjectLogEvent> getProjectEvents(final Project project, final int num,
final int skip) throws ProjectManagerException {
final ProjectLogsResultHandler logHandler = new ProjectLogsResultHandler();
List<ProjectLogEvent> events = null;
try {
events = this.dbOperator
.query(ProjectLogsResultHandler.SELECT_PROJECT_EVENTS_ORDER, logHandler, project.getId(),
num,
skip);
} catch (final SQLException e) {
logger.error("Error getProjectEvents, project " + project.getName(), e);
throw new ProjectManagerException("Error getProjectEvents, project " + project.getName(), e);
}
return events;
}
|
Todo kunkun-tang: the below implementation doesn't remove a project, but inactivate a project.
We should rewrite the code to follow the literal meanings.
|
getProjectEvents
|
java
|
azkaban/azkaban
|
azkaban-common/src/main/java/azkaban/project/JdbcProjectImpl.java
|
https://github.com/azkaban/azkaban/blob/master/azkaban-common/src/main/java/azkaban/project/JdbcProjectImpl.java
|
Apache-2.0
|
@Override
public void updateDescription(final Project project, final String description, final String user)
throws ProjectManagerException {
final String UPDATE_PROJECT_DESCRIPTION =
"UPDATE projects SET description=?,modified_time=?,last_modified_by=? WHERE id=?";
final long updateTime = System.currentTimeMillis();
try {
this.dbOperator
.update(UPDATE_PROJECT_DESCRIPTION, description, updateTime, user, project.getId());
project.setDescription(description);
project.setLastModifiedTimestamp(updateTime);
project.setLastModifiedUser(user);
} catch (final SQLException e) {
logger.error(e);
throw new ProjectManagerException("Error update Description, project " + project.getName(),
e);
}
}
|
Todo kunkun-tang: the below implementation doesn't remove a project, but inactivate a project.
We should rewrite the code to follow the literal meanings.
|
updateDescription
|
java
|
azkaban/azkaban
|
azkaban-common/src/main/java/azkaban/project/JdbcProjectImpl.java
|
https://github.com/azkaban/azkaban/blob/master/azkaban-common/src/main/java/azkaban/project/JdbcProjectImpl.java
|
Apache-2.0
|
@Override
public int getLatestProjectVersion(final Project project) throws ProjectManagerException {
final IntHandler handler = new IntHandler();
try {
return this.dbOperator.query(IntHandler.SELECT_LATEST_VERSION, handler, project.getId());
} catch (final SQLException e) {
logger.error(e);
throw new ProjectManagerException(
"Error marking project " + project.getName() + " as inactive", e);
}
}
|
Todo kunkun-tang: the below implementation doesn't remove a project, but inactivate a project.
We should rewrite the code to follow the literal meanings.
|
getLatestProjectVersion
|
java
|
azkaban/azkaban
|
azkaban-common/src/main/java/azkaban/project/JdbcProjectImpl.java
|
https://github.com/azkaban/azkaban/blob/master/azkaban-common/src/main/java/azkaban/project/JdbcProjectImpl.java
|
Apache-2.0
|
@Override
public void uploadFlows(final Project project, final int version, final Collection<Flow> flows)
throws ProjectManagerException {
// We do one at a time instead of batch... because well, the batch could be
// large.
try {
for (final Flow flow : flows) {
uploadFlow(project, version, flow, this.defaultEncodingType);
}
} catch (final IOException e) {
throw new ProjectManagerException("Flow Upload failed.", e);
}
}
|
Todo kunkun-tang: the below implementation doesn't remove a project, but inactivate a project.
We should rewrite the code to follow the literal meanings.
|
uploadFlows
|
java
|
azkaban/azkaban
|
azkaban-common/src/main/java/azkaban/project/JdbcProjectImpl.java
|
https://github.com/azkaban/azkaban/blob/master/azkaban-common/src/main/java/azkaban/project/JdbcProjectImpl.java
|
Apache-2.0
|
@Override
public void uploadFlow(final Project project, final int version, final Flow flow)
throws ProjectManagerException {
logger.info("Uploading flow " + flow.getId());
try {
uploadFlow(project, version, flow, this.defaultEncodingType);
} catch (final IOException e) {
throw new ProjectManagerException("Flow Upload failed.", e);
}
}
|
Todo kunkun-tang: the below implementation doesn't remove a project, but inactivate a project.
We should rewrite the code to follow the literal meanings.
|
uploadFlow
|
java
|
azkaban/azkaban
|
azkaban-common/src/main/java/azkaban/project/JdbcProjectImpl.java
|
https://github.com/azkaban/azkaban/blob/master/azkaban-common/src/main/java/azkaban/project/JdbcProjectImpl.java
|
Apache-2.0
|
@Override
public void updateFlow(final Project project, final int version, final Flow flow)
throws ProjectManagerException {
logger.info("Uploading flow " + flow.getId());
try {
final String json = JSONUtils.toJSON(flow.toObject());
final byte[] data = convertJsonToBytes(this.defaultEncodingType, json);
logger.info("Flow upload " + flow.getId() + " is byte size " + data.length);
final String UPDATE_FLOW =
"UPDATE project_flows SET encoding_type=?,json=? WHERE project_id=? AND version=? AND flow_id=?";
try {
this.dbOperator
.update(UPDATE_FLOW, this.defaultEncodingType.getNumVal(), data, project.getId(),
version, flow.getId());
} catch (final SQLException e) {
logger.error("Error inserting flow", e);
throw new ProjectManagerException("Error inserting flow " + flow.getId(), e);
}
} catch (final IOException e) {
throw new ProjectManagerException("Flow Upload failed.", e);
}
}
|
Todo kunkun-tang: the below implementation doesn't remove a project, but inactivate a project.
We should rewrite the code to follow the literal meanings.
|
updateFlow
|
java
|
azkaban/azkaban
|
azkaban-common/src/main/java/azkaban/project/JdbcProjectImpl.java
|
https://github.com/azkaban/azkaban/blob/master/azkaban-common/src/main/java/azkaban/project/JdbcProjectImpl.java
|
Apache-2.0
|
private void uploadFlow(final Project project, final int version, final Flow flow,
final EncodingType encType)
throws ProjectManagerException, IOException {
final String json = JSONUtils.toJSON(flow.toObject());
final byte[] data = convertJsonToBytes(encType, json);
logger.info("Flow upload " + flow.getId() + " in project " + project.getName() + " is byte size " + data.length);
final String INSERT_FLOW =
"INSERT INTO project_flows (project_id, version, flow_id, modified_time, encoding_type, json) values (?,?,?,?,?,?)";
try {
this.dbOperator
.update(INSERT_FLOW, project.getId(), version, flow.getId(), System.currentTimeMillis(),
encType.getNumVal(), data);
} catch (final SQLException e) {
logger.error("Error inserting flow", e);
throw new ProjectManagerException("Error inserting flow " + flow.getId(), e);
}
}
|
Todo kunkun-tang: the below implementation doesn't remove a project, but inactivate a project.
We should rewrite the code to follow the literal meanings.
|
uploadFlow
|
java
|
azkaban/azkaban
|
azkaban-common/src/main/java/azkaban/project/JdbcProjectImpl.java
|
https://github.com/azkaban/azkaban/blob/master/azkaban-common/src/main/java/azkaban/project/JdbcProjectImpl.java
|
Apache-2.0
|
@Override
public Flow fetchFlow(final Project project, final String flowId) throws ProjectManagerException {
throw new UnsupportedOperationException("this method has not been instantiated.");
}
|
Todo kunkun-tang: the below implementation doesn't remove a project, but inactivate a project.
We should rewrite the code to follow the literal meanings.
|
fetchFlow
|
java
|
azkaban/azkaban
|
azkaban-common/src/main/java/azkaban/project/JdbcProjectImpl.java
|
https://github.com/azkaban/azkaban/blob/master/azkaban-common/src/main/java/azkaban/project/JdbcProjectImpl.java
|
Apache-2.0
|
@Override
public List<Flow> fetchAllProjectFlows(final Project project) throws ProjectManagerException {
return fetchAllFlowsForProjects(Arrays.asList(project)).get(project);
}
|
Todo kunkun-tang: the below implementation doesn't remove a project, but inactivate a project.
We should rewrite the code to follow the literal meanings.
|
fetchAllProjectFlows
|
java
|
azkaban/azkaban
|
azkaban-common/src/main/java/azkaban/project/JdbcProjectImpl.java
|
https://github.com/azkaban/azkaban/blob/master/azkaban-common/src/main/java/azkaban/project/JdbcProjectImpl.java
|
Apache-2.0
|
@Override
public Map<Project, List<Flow>> fetchAllFlowsForProjects(final List<Project> projects)
throws ProjectManagerException {
final SQLTransaction<Map<Project, List<Flow>>> transaction = transOperator -> {
final Map<Project, List<Flow>> projectToFlows = new HashMap();
for (final Project p : projects) {
projectToFlows.put(p, transOperator
.query(ProjectFlowsResultHandler.SELECT_ALL_PROJECT_FLOWS,
new ProjectFlowsResultHandler(), p.getId(),
p.getVersion()));
}
return projectToFlows;
};
try {
return this.dbOperator.transaction(transaction);
} catch (final SQLException e) {
throw new ProjectManagerException(
"Error fetching flows for " + projects.size() + " project(s).", e);
}
}
|
Todo kunkun-tang: the below implementation doesn't remove a project, but inactivate a project.
We should rewrite the code to follow the literal meanings.
|
fetchAllFlowsForProjects
|
java
|
azkaban/azkaban
|
azkaban-common/src/main/java/azkaban/project/JdbcProjectImpl.java
|
https://github.com/azkaban/azkaban/blob/master/azkaban-common/src/main/java/azkaban/project/JdbcProjectImpl.java
|
Apache-2.0
|
@Override
public void uploadProjectProperties(final Project project,
final int projectVersionOverride, final List<Props> properties)
throws ProjectManagerException {
for (final Props props : properties) {
try {
uploadProjectProperty(project, projectVersionOverride, props.getSource(), props);
} catch (final IOException e) {
throw new ProjectManagerException("Error uploading project property file", e);
}
}
}
|
Todo kunkun-tang: the below implementation doesn't remove a project, but inactivate a project.
We should rewrite the code to follow the literal meanings.
|
uploadProjectProperties
|
java
|
azkaban/azkaban
|
azkaban-common/src/main/java/azkaban/project/JdbcProjectImpl.java
|
https://github.com/azkaban/azkaban/blob/master/azkaban-common/src/main/java/azkaban/project/JdbcProjectImpl.java
|
Apache-2.0
|
@Override
public synchronized FlowResourceRecommendation createFlowResourceRecommendation(final int projectId, final String flowId)
throws ProjectManagerException {
logger.info("Creating flow resource recommendation. ProjectId: " + projectId + ", FlowId: " + flowId);
final String INSERT_FLOW_RESOURCE_RECOMMENDATION =
"INSERT INTO project_flow_resource_recommendations (project_id, flow_id, modified_time) values (?,?,?)";
final SQLTransaction<Integer> insertFlowResourceRecommendation = transOperator ->
transOperator.update(INSERT_FLOW_RESOURCE_RECOMMENDATION, projectId, flowId, System.currentTimeMillis());;
// Insert flow resource recommendation
try {
final int numRowsInserted = this.dbOperator.transaction(insertFlowResourceRecommendation);
if (numRowsInserted == 0) {
throw new ProjectManagerException("No flow resource recommendations have been inserted.");
}
} catch (final SQLException ex) {
// Possibly failed due to duplicate key. If not, fetchFlowResourceRecommendation will
// return another exception back.
logger.warn("Insert flow resource recommendation projectId: " + projectId + ", flowId: " + flowId
+ " for existing project failed.", ex);
}
return fetchFlowResourceRecommendation(projectId, flowId);
}
|
Todo kunkun-tang: the below implementation doesn't remove a project, but inactivate a project.
We should rewrite the code to follow the literal meanings.
|
createFlowResourceRecommendation
|
java
|
azkaban/azkaban
|
azkaban-common/src/main/java/azkaban/project/JdbcProjectImpl.java
|
https://github.com/azkaban/azkaban/blob/master/azkaban-common/src/main/java/azkaban/project/JdbcProjectImpl.java
|
Apache-2.0
|
@Override
public void updateFlowResourceRecommendation(final FlowResourceRecommendation flowResourceRecommendation)
throws ProjectManagerException {
logger.info("Updating flow resource recommendation " + flowResourceRecommendation.getId());
final String UPDATE_FLOW =
"UPDATE project_flow_resource_recommendations SET cpu_recommendation=?,memory_recommendation=?,"
+ "disk_recommendation=?,modified_time=? WHERE id=?";
try {
this.dbOperator
.update(UPDATE_FLOW, flowResourceRecommendation.getCpuRecommendation(),
flowResourceRecommendation.getMemoryRecommendation(),
flowResourceRecommendation.getDiskRecommendation(), System.currentTimeMillis(),
flowResourceRecommendation.getId());
} catch (final SQLException e) {
logger.error("Error updating flow resource recommendation", e);
throw new ProjectManagerException("Error updating flow resource recommendation " + flowResourceRecommendation.getId(), e);
}
}
|
Todo kunkun-tang: the below implementation doesn't remove a project, but inactivate a project.
We should rewrite the code to follow the literal meanings.
|
updateFlowResourceRecommendation
|
java
|
azkaban/azkaban
|
azkaban-common/src/main/java/azkaban/project/JdbcProjectImpl.java
|
https://github.com/azkaban/azkaban/blob/master/azkaban-common/src/main/java/azkaban/project/JdbcProjectImpl.java
|
Apache-2.0
|
@Override
public FlowResourceRecommendation fetchFlowResourceRecommendation(final int projectId, final String flowId)
throws ProjectManagerException {
logger.info("Fetching flow resource recommendation. ProjectId: " + projectId + ", FlowId: " + flowId);
try {
return this.dbOperator
.query(ProjectFlowResourceRecommendationsResultHandler.SELECT_PROJECT_FLOW_RESOURCE_RECOMMENDATION,
new ProjectFlowResourceRecommendationsResultHandler(), projectId, flowId).get(0);
} catch (final SQLException | IndexOutOfBoundsException e) {
logger.error("Error fetching flow resource recommendation", e);
throw new ProjectManagerException("Error fetching flow resource recommendation. ProjectId: " + projectId + ", FlowId: " + flowId, e);
}
}
|
Todo kunkun-tang: the below implementation doesn't remove a project, but inactivate a project.
We should rewrite the code to follow the literal meanings.
|
fetchFlowResourceRecommendation
|
java
|
azkaban/azkaban
|
azkaban-common/src/main/java/azkaban/project/JdbcProjectImpl.java
|
https://github.com/azkaban/azkaban/blob/master/azkaban-common/src/main/java/azkaban/project/JdbcProjectImpl.java
|
Apache-2.0
|
@Override
public Map<Project, List<FlowResourceRecommendation>> fetchAllFlowResourceRecommendationsForProjects(final List<Project> projects)
throws ProjectManagerException {
final SQLTransaction<Map<Project, List<FlowResourceRecommendation>>> transaction = transOperator -> {
final Map<Project, List<FlowResourceRecommendation>> projectToFlows = new HashMap();
for (final Project p : projects) {
projectToFlows.put(p, transOperator
.query(ProjectFlowResourceRecommendationsResultHandler.SELECT_ALL_PROJECT_FLOW_RESOURCE_RECOMMENDATIONS,
new ProjectFlowResourceRecommendationsResultHandler(), p.getId()));
}
return projectToFlows;
};
try {
return this.dbOperator.transaction(transaction);
} catch (final SQLException e) {
throw new ProjectManagerException(
"Error fetching flow resource recommendations for " + projects.size() + " project(s).", e);
}
}
|
Todo kunkun-tang: the below implementation doesn't remove a project, but inactivate a project.
We should rewrite the code to follow the literal meanings.
|
fetchAllFlowResourceRecommendationsForProjects
|
java
|
azkaban/azkaban
|
azkaban-common/src/main/java/azkaban/project/JdbcProjectImpl.java
|
https://github.com/azkaban/azkaban/blob/master/azkaban-common/src/main/java/azkaban/project/JdbcProjectImpl.java
|
Apache-2.0
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.