repo stringclasses 1k
values | file_url stringlengths 96 373 | file_path stringlengths 11 294 | content stringlengths 0 32.8k | language stringclasses 1
value | license stringclasses 6
values | commit_sha stringclasses 1k
values | retrieved_at stringdate 2026-01-04 14:45:56 2026-01-04 18:30:23 | truncated bool 2
classes |
|---|---|---|---|---|---|---|---|---|
Intel-bigdata/SSM | https://github.com/Intel-bigdata/SSM/blob/e0c90f054687a18c4e095547ac5e31b8b313b3ef/smart-hadoop-support/smart-hadoop/src/main/java/org/smartdata/hdfs/action/AllDiskFileAction.java | smart-hadoop-support/smart-hadoop/src/main/java/org/smartdata/hdfs/action/AllDiskFileAction.java | /**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.smartdata.hdfs.action;
import org.smartdata.action.annotation.ActionSignature;
/**
* An action to do all-disk for a file.
*/
@ActionSignature(
actionId = "alldisk",
displayName = "alldisk",
usage = HdfsAction.FILE_PATH + " $file "
)
public class AllDiskFileAction extends MoveFileAction {
@Override
public String getStoragePolicy() {
return "HOT";
}
}
| java | Apache-2.0 | e0c90f054687a18c4e095547ac5e31b8b313b3ef | 2026-01-05T02:41:11.405497Z | false |
Intel-bigdata/SSM | https://github.com/Intel-bigdata/SSM/blob/e0c90f054687a18c4e095547ac5e31b8b313b3ef/smart-hadoop-support/smart-hadoop/src/main/java/org/smartdata/hdfs/action/UncacheFileAction.java | smart-hadoop-support/smart-hadoop/src/main/java/org/smartdata/hdfs/action/UncacheFileAction.java | /**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.smartdata.hdfs.action;
import com.google.common.annotations.VisibleForTesting;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.RemoteIterator;
import org.apache.hadoop.hdfs.protocol.CacheDirectiveEntry;
import org.apache.hadoop.hdfs.protocol.CacheDirectiveInfo;
import org.smartdata.action.Utils;
import org.smartdata.action.annotation.ActionSignature;
import java.util.Map;
/**
* An action to un-cache a file.
*/
@ActionSignature(
actionId = "uncache",
displayName = "uncache",
usage = HdfsAction.FILE_PATH + " $file "
)
public class UncacheFileAction extends HdfsAction {
private String fileName;
@Override
public void init(Map<String, String> args) {
super.init(args);
fileName = args.get(FILE_PATH);
}
@Override
protected void execute() throws Exception {
if (fileName == null) {
throw new IllegalArgumentException("File parameter is missing! ");
}
this.appendLog(
String.format(
"Action starts at %s : %s -> uncache", Utils.getFormatedCurrentTime(), fileName));
removeDirective(fileName);
}
@VisibleForTesting
Long getCacheId(String fileName) throws Exception {
CacheDirectiveInfo.Builder filterBuilder = new CacheDirectiveInfo.Builder();
filterBuilder.setPath(new Path(fileName));
CacheDirectiveInfo filter = filterBuilder.build();
RemoteIterator<CacheDirectiveEntry> directiveEntries = dfsClient.listCacheDirectives(filter);
if (!directiveEntries.hasNext()) {
return null;
}
return directiveEntries.next().getInfo().getId();
}
private void removeDirective(String fileName) throws Exception {
Long id = getCacheId(fileName);
if (id == null) {
this.appendLog(String.format("File %s is not in cache. " +
"So there is no need to execute this action.", fileName));
return;
}
dfsClient.removeCacheDirective(id);
}
}
| java | Apache-2.0 | e0c90f054687a18c4e095547ac5e31b8b313b3ef | 2026-01-05T02:41:11.405497Z | false |
Intel-bigdata/SSM | https://github.com/Intel-bigdata/SSM/blob/e0c90f054687a18c4e095547ac5e31b8b313b3ef/smart-hadoop-support/smart-hadoop/src/main/java/org/smartdata/hdfs/action/BlockErasureCodeFileAction.java | smart-hadoop-support/smart-hadoop/src/main/java/org/smartdata/hdfs/action/BlockErasureCodeFileAction.java | /**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.smartdata.hdfs.action;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
/**
* An action to do block level erasure code a file, for both Hadoop 2.x and Hadoop 3.x.
*/
public class BlockErasureCodeFileAction extends HdfsAction {
private static final Logger LOG = LoggerFactory.getLogger(BlockErasureCodeFileAction.class);
@Override
protected void execute() throws Exception {
}
}
| java | Apache-2.0 | e0c90f054687a18c4e095547ac5e31b8b313b3ef | 2026-01-05T02:41:11.405497Z | false |
Intel-bigdata/SSM | https://github.com/Intel-bigdata/SSM/blob/e0c90f054687a18c4e095547ac5e31b8b313b3ef/smart-hadoop-support/smart-hadoop/src/main/java/org/smartdata/hdfs/action/Copy2S3Action.java | smart-hadoop-support/smart-hadoop/src/main/java/org/smartdata/hdfs/action/Copy2S3Action.java | /**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.smartdata.hdfs.action;
import org.apache.hadoop.fs.XAttrSetFlag;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hdfs.DFSConfigKeys;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.smartdata.action.ActionException;
import org.smartdata.action.Utils;
import org.smartdata.action.annotation.ActionSignature;
import org.smartdata.conf.SmartConfKeys;
import org.smartdata.hdfs.CompatibilityHelperLoader;
import java.io.IOException;
import java.io.InputStream;
import java.io.OutputStream;
import java.net.URI;
import java.util.Map;
import java.util.EnumSet;
/**
* An action to copy a single file from src to destination.
* If dest doesn't contains "hdfs" prefix, then destination will be set to
* current cluster, i.e., copy between dirs in current cluster.
* Note that destination should contains filename.
*/
@ActionSignature(
actionId = "copy2s3",
displayName = "copy2s3",
usage = HdfsAction.FILE_PATH + " $src " + Copy2S3Action.DEST +
" $dest " + Copy2S3Action.BUF_SIZE + " $size"
)
public class Copy2S3Action extends HdfsAction {
private static final Logger LOG =
LoggerFactory.getLogger(CopyFileAction.class);
public static final String BUF_SIZE = "-bufSize";
public static final String SRC = HdfsAction.FILE_PATH;
public static final String DEST = "-dest";
private String srcPath;
private String destPath;
private int bufferSize = 64 * 1024;
private Configuration conf;
@Override
public void init(Map<String, String> args) {
try {
this.conf = getContext().getConf();
String nameNodeURL =
this.conf.get(SmartConfKeys.SMART_DFS_NAMENODE_RPCSERVER_KEY);
conf.set(DFSConfigKeys.FS_DEFAULT_NAME_KEY, nameNodeURL);
} catch (NullPointerException e) {
this.conf = new Configuration();
appendLog("Conf error!, NameNode URL is not configured!");
}
super.init(args);
this.srcPath = args.get(FILE_PATH);
if (args.containsKey(DEST)) {
this.destPath = args.get(DEST);
}
if (args.containsKey(BUF_SIZE)) {
bufferSize = Integer.valueOf(args.get(BUF_SIZE));
}
}
@Override
protected void execute() throws Exception {
if (srcPath == null) {
throw new IllegalArgumentException("File parameter is missing.");
}
if (destPath == null) {
throw new IllegalArgumentException("Dest File parameter is missing.");
}
appendLog(
String.format("Action starts at %s : Read %s",
Utils.getFormatedCurrentTime(), srcPath));
if (!dfsClient.exists(srcPath)) {
throw new ActionException("CopyFile Action fails, file doesn't exist!");
}
appendLog(
String.format("Copy from %s to %s", srcPath, destPath));
copySingleFile(srcPath, destPath);
appendLog("Copy Successfully!!");
setXAttribute(srcPath, destPath);
appendLog("SetXattr Successfully!!");
}
private long getFileSize(String fileName) throws IOException {
if (fileName.startsWith("hdfs")) {
// Get InputStream from URL
FileSystem fs = FileSystem.get(URI.create(fileName), conf);
return fs.getFileStatus(new Path(fileName)).getLen();
} else {
return dfsClient.getFileInfo(fileName).getLen();
}
}
private boolean setXAttribute(String src, String dest) throws IOException {
String name = "user.coldloc";
dfsClient.setXAttr(srcPath, name, dest.getBytes(), EnumSet.of(XAttrSetFlag.CREATE,XAttrSetFlag.REPLACE) );
appendLog(" SetXattr feature is set - srcPath " + srcPath + "destination" + dest.getBytes() );
return true;
}
private boolean copySingleFile(String src, String dest) throws IOException {
//get The file size of source file
InputStream in = null;
OutputStream out = null;
try {
in = getSrcInputStream(src);
out = CompatibilityHelperLoader
.getHelper().getS3outputStream(dest, conf);
byte[] buf = new byte[bufferSize];
long bytesRemaining = getFileSize(src);
while (bytesRemaining > 0L) {
int bytesToRead =
(int) (bytesRemaining < (long) buf.length ? bytesRemaining :
(long) buf.length);
int bytesRead = in.read(buf, 0, bytesToRead);
if (bytesRead == -1) {
break;
}
out.write(buf, 0, bytesRead);
bytesRemaining -= (long) bytesRead;
}
return true;
} finally {
if (out != null) {
out.close();
}
if (in != null) {
in.close();
}
}
}
private InputStream getSrcInputStream(String src) throws IOException {
if (!src.startsWith("hdfs")) {
// Copy between different remote clusters
// Get InputStream from URL
FileSystem fs = FileSystem.get(URI.create(src), conf);
return fs.open(new Path(src));
} else {
// Copy from primary HDFS
return dfsClient.open(src);
}
}
private OutputStream getDestOutPutStream(String dest) throws IOException {
// Copy to remote S3
if (!dest.startsWith("s3")) {
throw new IOException();
}
// Copy to s3
FileSystem fs = FileSystem.get(URI.create(dest), conf);
return fs.create(new Path(dest), true);
}
}
| java | Apache-2.0 | e0c90f054687a18c4e095547ac5e31b8b313b3ef | 2026-01-05T02:41:11.405497Z | false |
Intel-bigdata/SSM | https://github.com/Intel-bigdata/SSM/blob/e0c90f054687a18c4e095547ac5e31b8b313b3ef/smart-hadoop-support/smart-hadoop/src/main/java/org/smartdata/hdfs/action/DecompressionAction.java | smart-hadoop-support/smart-hadoop/src/main/java/org/smartdata/hdfs/action/DecompressionAction.java | /**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.smartdata.hdfs.action;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Options;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.smartdata.SmartConstants;
import org.smartdata.action.ActionException;
import org.smartdata.action.annotation.ActionSignature;
import org.smartdata.hdfs.HadoopUtil;
import org.smartdata.model.CompressionFileState;
import org.smartdata.model.FileState;
import java.io.IOException;
import java.io.InputStream;
import java.io.OutputStream;
import java.util.Map;
/**
* This class is used to decompress file.
*/
@ActionSignature(
actionId = "decompress",
displayName = "decompress",
usage = HdfsAction.FILE_PATH
+ " $file "
+ CompressionAction.BUF_SIZE
+ " $bufSize "
)
public class DecompressionAction extends HdfsAction {
public static final Logger LOG =
LoggerFactory.getLogger(DecompressionAction.class);
public static final String COMPRESS_TMP = "-compressTmp";
private Configuration conf;
private float progress;
private String compressTmpPath;
private String filePath;
private int buffSize = 64 * 1024;
@Override
public void init(Map<String, String> args) {
super.init(args);
this.conf = getContext().getConf();
this.filePath = args.get(FILE_PATH);
// This is a temp path for compressing a file.
this.compressTmpPath = args.containsKey(COMPRESS_TMP) ?
args.get(COMPRESS_TMP) : compressTmpPath;
this.progress = 0.0F;
}
protected void execute() throws Exception {
if (filePath == null) {
throw new IllegalArgumentException("File path is missing.");
}
if (compressTmpPath == null) {
throw new IllegalArgumentException(
"Compression tmp path is not specified!");
}
if (!dfsClient.exists(filePath)) {
throw new ActionException(
"Failed to execute Compression Action: the given file doesn't exist!");
}
// Consider directory case.
if (dfsClient.getFileInfo(filePath).isDir()) {
appendLog("Decompression is not applicable to a directory.");
return;
}
FileState fileState = HadoopUtil.getFileState(dfsClient, filePath);
if (!(fileState instanceof CompressionFileState)) {
appendLog("The file is already decompressed!");
return;
}
OutputStream out = null;
InputStream in = null;
try {
// No need to lock the file by append operation,
// since compressed file cannot be modified.
out = dfsClient.create(compressTmpPath, true);
// Keep storage policy consistent.
// The below statement is not supported on Hadoop-2.7.3 or CDH-5.10.1
// String storagePolicyName = dfsClient.getStoragePolicy(filePath).getName();
byte storagePolicyId = dfsClient.getFileInfo(filePath).getStoragePolicy();
String storagePolicyName = SmartConstants.STORAGE_POLICY_MAP.get(storagePolicyId);
if (!storagePolicyName.equals("UNDEF")) {
dfsClient.setStoragePolicy(compressTmpPath, storagePolicyName);
}
in = dfsClient.open(filePath);
long length = dfsClient.getFileInfo(filePath).getLen();
outputDecompressedData(in, out, length);
// Overwrite the original file with decompressed data
dfsClient.setOwner(compressTmpPath, dfsClient.getFileInfo(filePath).getOwner(), dfsClient.getFileInfo(filePath).getGroup());
dfsClient.setPermission(compressTmpPath, dfsClient.getFileInfo(filePath).getPermission());
dfsClient.rename(compressTmpPath, filePath, Options.Rename.OVERWRITE);
appendLog("The given file is successfully decompressed by codec: " +
((CompressionFileState) fileState).getCompressionImpl());
} catch (IOException e) {
throw new IOException(e);
} finally {
if (out != null) {
out.close();
}
if (in != null) {
in.close();
}
}
}
private void outputDecompressedData(InputStream in, OutputStream out,
long length) throws IOException {
byte[] buff = new byte[buffSize];
long remainSize = length;
while (remainSize != 0) {
int copySize = remainSize < buffSize ? (int) remainSize : buffSize;
// readSize may be smaller than copySize. Here, readSize is the actual
// number of bytes read to buff.
int readSize = in.read(buff, 0, copySize);
if (readSize == -1) {
break;
}
// Use readSize instead of copySize.
out.write(buff, 0, readSize);
remainSize -= readSize;
this.progress = (float) (length - remainSize) / length;
}
}
@Override
public float getProgress() {
return this.progress;
}
}
| java | Apache-2.0 | e0c90f054687a18c4e095547ac5e31b8b313b3ef | 2026-01-05T02:41:11.405497Z | false |
Intel-bigdata/SSM | https://github.com/Intel-bigdata/SSM/blob/e0c90f054687a18c4e095547ac5e31b8b313b3ef/smart-hadoop-support/smart-hadoop/src/main/java/org/smartdata/hdfs/action/TruncateAction.java | smart-hadoop-support/smart-hadoop/src/main/java/org/smartdata/hdfs/action/TruncateAction.java | /**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.smartdata.hdfs.action;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hdfs.DistributedFileSystem;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.smartdata.action.annotation.ActionSignature;
import org.smartdata.hdfs.CompatibilityHelperLoader;
import java.io.IOException;
import java.net.URI;
import java.util.Map;
/**
* action to truncate file
*/
@ActionSignature(
actionId = "truncate",
displayName = "truncate",
usage = HdfsAction.FILE_PATH + " $src " + TruncateAction.LENGTH + " $length"
)
public class TruncateAction extends HdfsAction {
private static final Logger LOG = LoggerFactory.getLogger(TruncateAction.class);
public static final String LENGTH = "-length";
private String srcPath;
private long length;
@Override
public void init(Map<String, String> args) {
super.init(args);
srcPath = args.get(FILE_PATH);
this.length = -1;
if (args.containsKey(LENGTH)) {
this.length = Long.parseLong(args.get(LENGTH));
}
}
@Override
protected void execute() throws Exception {
if (srcPath == null) {
throw new IllegalArgumentException("File src is missing.");
}
if (length == -1) {
throw new IllegalArgumentException("Length is missing");
}
truncateClusterFile(srcPath, length);
}
private boolean truncateClusterFile(String srcFile, long length) throws IOException {
if (srcFile.startsWith("hdfs")) {
// TODO read conf from files
Configuration conf = new Configuration();
DistributedFileSystem fs = new DistributedFileSystem();
fs.initialize(URI.create(srcFile), conf);
//check the length
long oldLength = fs.getFileStatus(new Path(srcFile)).getLen();
if (length > oldLength) {
throw new IllegalArgumentException("Length is illegal");
} else {
return CompatibilityHelperLoader.getHelper().truncate(fs, srcPath, length);
}
} else {
long oldLength = dfsClient.getFileInfo(srcFile).getLen();
if (length > oldLength) {
throw new IllegalArgumentException("Length is illegal");
} else {
return CompatibilityHelperLoader.getHelper().truncate(dfsClient, srcPath, length);
}
}
}
}
| java | Apache-2.0 | e0c90f054687a18c4e095547ac5e31b8b313b3ef | 2026-01-05T02:41:11.405497Z | false |
Intel-bigdata/SSM | https://github.com/Intel-bigdata/SSM/blob/e0c90f054687a18c4e095547ac5e31b8b313b3ef/smart-hadoop-support/smart-hadoop/src/main/java/org/smartdata/hdfs/action/DiskBalanceAction.java | smart-hadoop-support/smart-hadoop/src/main/java/org/smartdata/hdfs/action/DiskBalanceAction.java | /**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.smartdata.hdfs.action;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
/**
* An action to do disk balance for a data node.
*/
public class DiskBalanceAction extends HdfsAction {
private static final Logger LOG = LoggerFactory.getLogger(DiskBalanceAction.class);
@Override
protected void execute() throws Exception {
}
}
| java | Apache-2.0 | e0c90f054687a18c4e095547ac5e31b8b313b3ef | 2026-01-05T02:41:11.405497Z | false |
Intel-bigdata/SSM | https://github.com/Intel-bigdata/SSM/blob/e0c90f054687a18c4e095547ac5e31b8b313b3ef/smart-hadoop-support/smart-hadoop/src/main/java/org/smartdata/hdfs/action/SetXAttrAction.java | smart-hadoop-support/smart-hadoop/src/main/java/org/smartdata/hdfs/action/SetXAttrAction.java | /**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.smartdata.hdfs.action;
import org.apache.hadoop.fs.XAttrSetFlag;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.smartdata.action.ActionException;
import org.smartdata.action.annotation.ActionSignature;
import java.util.EnumSet;
import java.util.Map;
/**
* An action to setXAttr to a given file.
*/
@ActionSignature(
actionId = "setxattr",
displayName = "setxattr",
usage = HdfsAction.FILE_PATH + " $src " + SetXAttrAction.ATT_NAME +
" $name " + SetXAttrAction.ATT_VALUE + " $value"
)
public class SetXAttrAction extends HdfsAction {
private static final Logger LOG =
LoggerFactory.getLogger(SetXAttrAction.class);
public static final String ATT_NAME = "-name";
public static final String ATT_VALUE = "-value";
private String srcPath;
private String attName;
private String attValue;
@Override
public void init(Map<String, String> args) {
super.init(args);
if (args.containsKey(FILE_PATH)) {
this.srcPath = args.get(FILE_PATH);
}
if (args.containsKey(ATT_NAME)) {
this.attName = args.get(ATT_NAME);
}
if (args.containsKey(ATT_VALUE)) {
this.attValue = args.get(ATT_VALUE);
}
}
@Override
protected void execute() throws Exception {
if (srcPath == null) {
throw new IllegalArgumentException("File parameter is missing.");
}
if (attName == null) {
throw new IllegalArgumentException("attName parameter is missing.");
}
if (attValue == null) {
throw new IllegalArgumentException("attValue parameter is missing.");
}
if (!dfsClient.exists(srcPath)) {
throw new ActionException("SetXAttr Action fails, file doesn't exist!");
}
LOG.debug("SetXattr path={} name={} value={}", srcPath, attName, attValue);
appendLog(String.format("SetXattr path=%s name=%s value=%s",
srcPath, attName, attValue));
dfsClient.setXAttr(srcPath, attName, attValue.getBytes(),
EnumSet.of(XAttrSetFlag.CREATE, XAttrSetFlag.REPLACE));
appendLog("SetXattr Successfully!!");
}
}
| java | Apache-2.0 | e0c90f054687a18c4e095547ac5e31b8b313b3ef | 2026-01-05T02:41:11.405497Z | false |
Intel-bigdata/SSM | https://github.com/Intel-bigdata/SSM/blob/e0c90f054687a18c4e095547ac5e31b8b313b3ef/smart-hadoop-support/smart-hadoop/src/main/java/org/smartdata/hdfs/action/CompressionAction.java | smart-hadoop-support/smart-hadoop/src/main/java/org/smartdata/hdfs/action/CompressionAction.java | /**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.smartdata.hdfs.action;
import com.google.gson.Gson;
import org.apache.commons.lang.SerializationUtils;
import org.apache.commons.lang.mutable.MutableFloat;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Options;
import org.apache.hadoop.fs.XAttrSetFlag;
import org.apache.hadoop.hdfs.CompressionCodec;
import org.apache.hadoop.hdfs.DFSInputStream;
import org.apache.hadoop.hdfs.SmartCompressorStream;
import org.apache.hadoop.hdfs.protocol.HdfsFileStatus;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.smartdata.SmartConstants;
import org.smartdata.action.ActionException;
import org.smartdata.action.Utils;
import org.smartdata.action.annotation.ActionSignature;
import org.smartdata.conf.SmartConfKeys;
import org.smartdata.hdfs.CompatibilityHelperLoader;
import org.smartdata.model.CompressionFileInfo;
import org.smartdata.model.CompressionFileState;
import org.smartdata.utils.StringUtil;
import java.io.IOException;
import java.io.InputStream;
import java.io.OutputStream;
import java.util.EnumSet;
import java.util.List;
import java.util.Map;
/**
* This action is used to compress a file.
*/
@ActionSignature(
actionId = "compress",
displayName = "compress",
usage =
HdfsAction.FILE_PATH
+ " $file "
+ CompressionAction.BUF_SIZE
+ " $bufSize "
+ CompressionAction.CODEC
+ " $codec"
)
public class CompressionAction extends HdfsAction {
private static final Logger LOG =
LoggerFactory.getLogger(CompressionAction.class);
public static final String BUF_SIZE = "-bufSize";
public static final String CODEC = "-codec";
private static List<String> compressionCodecList = CompressionCodec.CODEC_LIST;
private String filePath;
private Configuration conf;
private MutableFloat progress;
// bufferSize is also chunk size.
// This default value limits the minimum buffer size.
private int bufferSize = 1024 * 1024;
private int maxSplit;
// Can be set in config or action arg.
private String compressCodec;
// Specified by user in action arg.
private int userDefinedBufferSize;
public static final String XATTR_NAME =
SmartConstants.SMART_FILE_STATE_XATTR_NAME;
private CompressionFileInfo compressionFileInfo;
private CompressionFileState compressionFileState;
private String compressTmpPath;
public static final String COMPRESS_TMP = "-compressTmp";
@Override
public void init(Map<String, String> args) {
super.init(args);
this.conf = getContext().getConf();
this.compressCodec = conf.get(
SmartConfKeys.SMART_COMPRESSION_CODEC,
SmartConfKeys.SMART_COMPRESSION_CODEC_DEFAULT);
this.maxSplit = conf.getInt(
SmartConfKeys.SMART_COMPRESSION_MAX_SPLIT,
SmartConfKeys.SMART_COMPRESSION_MAX_SPLIT_DEFAULT);
this.filePath = args.get(FILE_PATH);
if (args.containsKey(BUF_SIZE) && !args.get(BUF_SIZE).isEmpty()) {
this.userDefinedBufferSize = (int) StringUtil.parseToByte(args.get(BUF_SIZE));
}
this.compressCodec = args.get(CODEC) != null ? args.get(CODEC) : compressCodec;
// This is a temp path for compressing a file.
this.compressTmpPath = args.containsKey(COMPRESS_TMP) ?
args.get(COMPRESS_TMP) : compressTmpPath;
this.progress = new MutableFloat(0.0F);
}
@Override
protected void execute() throws Exception {
if (filePath == null) {
throw new IllegalArgumentException("File path is missing.");
}
if (compressTmpPath == null) {
throw new IllegalArgumentException("Compression tmp path is not specified!");
}
if (!compressionCodecList.contains(compressCodec)) {
throw new ActionException(
"Compression Action failed due to unsupported codec: " + compressCodec);
}
appendLog(
String.format("Compression Action started at %s for %s",
Utils.getFormatedCurrentTime(), filePath));
if (!dfsClient.exists(filePath)) {
throw new ActionException(
"Failed to execute Compression Action: the given file doesn't exist!");
}
HdfsFileStatus srcFileStatus = dfsClient.getFileInfo(filePath);
// Consider directory case.
if (srcFileStatus.isDir()) {
appendLog("Compression is not applicable to a directory.");
return;
}
// Generate compressed file
compressionFileState = new CompressionFileState(filePath, bufferSize, compressCodec);
compressionFileState.setOriginalLength(srcFileStatus.getLen());
OutputStream appendOut = null;
DFSInputStream in = null;
OutputStream out = null;
try {
if (srcFileStatus.getLen() == 0) {
compressionFileInfo = new CompressionFileInfo(false, compressionFileState);
} else {
short replication = srcFileStatus.getReplication();
long blockSize = srcFileStatus.getBlockSize();
long fileSize = srcFileStatus.getLen();
appendLog("File length: " + fileSize);
bufferSize = getActualBuffSize(fileSize);
// SmartDFSClient will fail to open compressing file with PROCESSING FileStage
// set by Compression scheduler. But considering DfsClient may be used, we use
// append operation to lock the file to avoid any modification.
appendOut = CompatibilityHelperLoader.getHelper().
getDFSClientAppend(dfsClient, filePath, bufferSize);
in = dfsClient.open(filePath);
out = dfsClient.create(compressTmpPath,
true, replication, blockSize);
// Keep storage policy consistent.
// The below statement is not supported on Hadoop-2.7.3 or CDH-5.10.1
// String storagePolicyName = dfsClient.getStoragePolicy(filePath).getName();
byte storagePolicyId = srcFileStatus.getStoragePolicy();
String storagePolicyName = SmartConstants.STORAGE_POLICY_MAP.get(storagePolicyId);
if (!storagePolicyName.equals("UNDEF")) {
dfsClient.setStoragePolicy(compressTmpPath, storagePolicyName);
}
compress(in, out);
HdfsFileStatus destFileStatus = dfsClient.getFileInfo(compressTmpPath);
dfsClient.setOwner(compressTmpPath, srcFileStatus.getOwner(), srcFileStatus.getGroup());
dfsClient.setPermission(compressTmpPath, srcFileStatus.getPermission());
compressionFileState.setCompressedLength(destFileStatus.getLen());
appendLog("Compressed file length: " + destFileStatus.getLen());
compressionFileInfo =
new CompressionFileInfo(true, compressTmpPath, compressionFileState);
}
compressionFileState.setBufferSize(bufferSize);
appendLog("Compression buffer size: " + bufferSize);
appendLog("Compression codec: " + compressCodec);
String compressionInfoJson = new Gson().toJson(compressionFileInfo);
appendResult(compressionInfoJson);
LOG.warn(compressionInfoJson);
if (compressionFileInfo.needReplace()) {
// Add to temp path
// Please make sure content write to Xatte is less than 64K
dfsClient.setXAttr(compressionFileInfo.getTempPath(),
XATTR_NAME, SerializationUtils.serialize(compressionFileState),
EnumSet.of(XAttrSetFlag.CREATE));
// Rename operation is moved from CompressionScheduler.
// Thus, modification for original file will be avoided.
dfsClient.rename(compressTmpPath, filePath, Options.Rename.OVERWRITE);
} else {
// Add to raw path
dfsClient.setXAttr(filePath,
XATTR_NAME, SerializationUtils.serialize(compressionFileState),
EnumSet.of(XAttrSetFlag.CREATE));
}
} catch (IOException e) {
throw new IOException(e);
} finally {
if (appendOut != null) {
try {
appendOut.close();
} catch (IOException e) {
// Hide the expected exception that the original file is missing.
}
}
if (in != null) {
in.close();
}
if (out != null) {
out.close();
}
}
}
private void compress(InputStream inputStream, OutputStream outputStream) throws IOException {
// We use 'progress' (a percentage) to track compression progress.
SmartCompressorStream smartCompressorStream = new SmartCompressorStream(
inputStream, outputStream, bufferSize, compressionFileState, progress);
smartCompressorStream.convert();
}
private int getActualBuffSize(long fileSize) {
// The capacity of originalPos and compressedPos is maxSplit (1000, by default) in database
// Calculated by max number of splits.
int calculatedBufferSize = (int) (fileSize / maxSplit);
LOG.debug("Calculated buffer size: " + calculatedBufferSize);
LOG.debug("MaxSplit: " + maxSplit);
// Determine the actual buffer size
if (userDefinedBufferSize < bufferSize || userDefinedBufferSize < calculatedBufferSize) {
if (bufferSize <= calculatedBufferSize) {
LOG.debug("User defined buffer size is too small, use the calculated buffer size:" +
calculatedBufferSize);
} else {
LOG.debug("User defined buffer size is too small, use the default buffer size:" +
bufferSize);
}
}
return Math.max(Math.max(userDefinedBufferSize, calculatedBufferSize), bufferSize);
}
@Override
public float getProgress() {
return (float) this.progress.getValue();
}
}
| java | Apache-2.0 | e0c90f054687a18c4e095547ac5e31b8b313b3ef | 2026-01-05T02:41:11.405497Z | false |
Intel-bigdata/SSM | https://github.com/Intel-bigdata/SSM/blob/e0c90f054687a18c4e095547ac5e31b8b313b3ef/smart-hadoop-support/smart-hadoop/src/main/java/org/smartdata/hdfs/action/OneDiskFileAction.java | smart-hadoop-support/smart-hadoop/src/main/java/org/smartdata/hdfs/action/OneDiskFileAction.java | /**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.smartdata.hdfs.action;
import org.smartdata.action.annotation.ActionSignature;
/**
* An action to do one-disk for a file.
*/
@ActionSignature(
actionId = "onedisk",
displayName = "onedisk",
usage = HdfsAction.FILE_PATH + " $file "
)
public class OneDiskFileAction extends MoveFileAction {
@Override
public String getStoragePolicy() {
return "WARM";
}
}
| java | Apache-2.0 | e0c90f054687a18c4e095547ac5e31b8b313b3ef | 2026-01-05T02:41:11.405497Z | false |
Intel-bigdata/SSM | https://github.com/Intel-bigdata/SSM/blob/e0c90f054687a18c4e095547ac5e31b8b313b3ef/smart-hadoop-support/smart-hadoop/src/main/java/org/smartdata/hdfs/action/WriteFileAction.java | smart-hadoop-support/smart-hadoop/src/main/java/org/smartdata/hdfs/action/WriteFileAction.java | /**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.smartdata.hdfs.action;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FSDataOutputStream;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hdfs.DFSConfigKeys;
import org.smartdata.action.Utils;
import org.smartdata.action.annotation.ActionSignature;
import org.smartdata.conf.SmartConfKeys;
import java.util.Map;
import java.util.Random;
/**
* An action to write a file with generated content. Can be used to test: 1. storage policy; 2.
* stripping erasure coded file; 3. small file.
*
* <p>Arguments: file_path length [buffer_size, default=64k]
*/
@ActionSignature(
actionId = "write",
displayName = "write",
usage =
HdfsAction.FILE_PATH
+ " $file "
+ WriteFileAction.LENGTH
+ " $length "
+ WriteFileAction.BUF_SIZE
+ " $size"
)
public class WriteFileAction extends HdfsAction {
public static final String LENGTH = "-length";
public static final String BUF_SIZE = "-bufSize";
private String filePath;
private long length = -1;
private int bufferSize = 64 * 1024;
private Configuration conf;
@Override
public void init(Map<String, String> args) {
try {
this.conf = getContext().getConf();
String nameNodeURL = this.conf.get(SmartConfKeys.SMART_DFS_NAMENODE_RPCSERVER_KEY);
conf.set(DFSConfigKeys.FS_DEFAULT_NAME_KEY, nameNodeURL);
} catch (NullPointerException e) {
this.conf = new Configuration();
appendLog("Conf error!, NameNode URL is not configured!");
}
super.init(args);
this.filePath = args.get(FILE_PATH);
if (args.containsKey(LENGTH)) {
length = Long.valueOf(args.get(LENGTH));
}
if (args.containsKey(BUF_SIZE)) {
this.bufferSize = Integer.valueOf(args.get(BUF_SIZE));
}
}
@Override
protected void execute() throws Exception {
if (filePath == null) {
throw new IllegalArgumentException("File parameter is missing! ");
}
if (length == -1) {
throw new IllegalArgumentException("Write Action provides wrong length! ");
}
appendLog(
String.format(
"Action starts at %s : Write %s with length %s",
Utils.getFormatedCurrentTime(), filePath, length));
Path path = new Path(filePath);
FileSystem fileSystem = path.getFileSystem(conf);
int replication = fileSystem.getServerDefaults(new Path(filePath)).getReplication();
final FSDataOutputStream out = fileSystem.create(path, true, replication);
// generate random data with given length
byte[] buffer = new byte[bufferSize];
new Random().nextBytes(buffer);
appendLog(String.format("Generate random data with length %d", length));
for (long pos = 0; pos < length; pos += bufferSize) {
long writeLength = pos + bufferSize < length ? bufferSize : length - pos;
out.write(buffer, 0, (int) writeLength);
}
out.close();
appendLog("Write Successfully!");
}
}
| java | Apache-2.0 | e0c90f054687a18c4e095547ac5e31b8b313b3ef | 2026-01-05T02:41:11.405497Z | false |
Intel-bigdata/SSM | https://github.com/Intel-bigdata/SSM/blob/e0c90f054687a18c4e095547ac5e31b8b313b3ef/smart-hadoop-support/smart-hadoop/src/main/java/org/smartdata/hdfs/action/HdfsActionFactory.java | smart-hadoop-support/smart-hadoop/src/main/java/org/smartdata/hdfs/action/HdfsActionFactory.java | /**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.smartdata.hdfs.action;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.smartdata.action.AbstractActionFactory;
import org.smartdata.action.SmartAction;
import org.smartdata.hdfs.scheduler.ErasureCodingScheduler;
import java.util.Arrays;
import java.util.List;
/**
* Built-in smart actions for HDFS system.
*/
public class HdfsActionFactory extends AbstractActionFactory {
private static final Logger LOG = LoggerFactory.getLogger(HdfsActionFactory.class);
public static final List<String> HDFS3_ACTION_CLASSES = Arrays.asList(
"org.smartdata.hdfs.action.ListErasureCodingPolicy",
"org.smartdata.hdfs.action.CheckErasureCodingPolicy",
"org.smartdata.hdfs.action.ErasureCodingAction",
"org.smartdata.hdfs.action.UnErasureCodingAction",
"org.smartdata.hdfs.action.AddErasureCodingPolicy",
"org.smartdata.hdfs.action.RemoveErasureCodingPolicy",
"org.smartdata.hdfs.action.EnableErasureCodingPolicy",
"org.smartdata.hdfs.action.DisableErasureCodingPolicy");
static {
addAction(AllSsdFileAction.class);
addAction(AllDiskFileAction.class);
addAction(OneSsdFileAction.class);
addAction(OneDiskFileAction.class);
addAction(RamDiskFileAction.class);
addAction(ArchiveFileAction.class);
addAction(CacheFileAction.class);
addAction(UncacheFileAction.class);
addAction(ReadFileAction.class);
addAction(WriteFileAction.class);
addAction(CheckStorageAction.class);
addAction(SetXAttrAction.class);
// addAction("blockec", BlockErasureCodeFileAction.class);
addAction(CopyFileAction.class);
addAction(DeleteFileAction.class);
addAction(RenameFileAction.class);
addAction(ListFileAction.class);
addAction(ConcatFileAction.class);
addAction(AppendFileAction.class);
addAction(MergeFileAction.class);
addAction(MetaDataAction.class);
addAction(Copy2S3Action.class);
addAction(CompressionAction.class);
addAction(DecompressionAction.class);
addAction(CheckCompressAction.class);
addAction(TruncateAction.class);
addAction(Truncate0Action.class);
addAction(SmallFileCompactAction.class);
addAction(SmallFileUncompactAction.class);
addAction(CheckSumAction.class);
// addAction("list", ListFileAction.class);
// addAction("fsck", FsckAction.class);
// addAction("diskbalance", DiskBalanceAction.class);
// addAction("clusterbalance", ClusterBalanceAction.class);
// addAction("setstoragepolicy", SetStoragePolicyAction.class);
if (ErasureCodingScheduler.isECSupported()) {
ClassLoader loader = Thread.currentThread().getContextClassLoader();
if (loader == null) {
loader = ClassLoader.getSystemClassLoader();
}
try {
for (String classString : HDFS3_ACTION_CLASSES) {
addAction((Class<SmartAction>) loader.loadClass(classString));
}
} catch (ClassNotFoundException ex) {
LOG.error("Class not found!", ex);
}
}
}
}
| java | Apache-2.0 | e0c90f054687a18c4e095547ac5e31b8b313b3ef | 2026-01-05T02:41:11.405497Z | false |
Intel-bigdata/SSM | https://github.com/Intel-bigdata/SSM/blob/e0c90f054687a18c4e095547ac5e31b8b313b3ef/smart-hadoop-support/smart-hadoop/src/main/java/org/smartdata/hdfs/action/MoveFileAction.java | smart-hadoop-support/smart-hadoop/src/main/java/org/smartdata/hdfs/action/MoveFileAction.java | /**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.smartdata.hdfs.action;
import com.google.gson.Gson;
import org.apache.hadoop.hdfs.protocol.HdfsFileStatus;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.smartdata.action.ActionType;
import org.smartdata.action.Utils;
import org.smartdata.conf.SmartConf;
import org.smartdata.hdfs.HadoopUtil;
import org.smartdata.hdfs.action.move.AbstractMoveFileAction;
import org.smartdata.hdfs.action.move.MoverExecutor;
import org.smartdata.hdfs.action.move.MoverStatus;
import org.smartdata.model.action.FileMovePlan;
import java.io.IOException;
import java.util.Map;
/**
* An action to set and enforce storage policy for a file.
*/
public class MoveFileAction extends AbstractMoveFileAction {
private static final Logger LOG = LoggerFactory.getLogger(MoveFileAction.class);
private MoverStatus status;
private String storagePolicy;
private String fileName;
private FileMovePlan movePlan;
private SmartConf conf;
public MoveFileAction() {
super();
this.actionType = ActionType.MoveFile;
this.status = new MoverStatus();
}
public MoverStatus getStatus() {
return this.status;
}
@Override
public void init(Map<String, String> args) {
super.init(args);
this.fileName = args.get(FILE_PATH);
this.conf = getContext().getConf();
this.storagePolicy = getStoragePolicy() != null ?
getStoragePolicy() : args.get(STORAGE_POLICY);
if (args.containsKey(MOVE_PLAN)) {
String plan = args.get(MOVE_PLAN);
if (plan != null) {
Gson gson = new Gson();
movePlan = gson.fromJson(plan, FileMovePlan.class);
status.setTotalBlocks(movePlan.getBlockIds().size());
}
}
}
@Override
protected void execute() throws Exception {
this.setDfsClient(HadoopUtil.getDFSClient(
HadoopUtil.getNameNodeUri(conf), conf));
if (fileName == null) {
throw new IllegalArgumentException("File parameter is missing!");
}
if (movePlan == null) {
throw new IllegalArgumentException("File move plan not specified.");
}
if (movePlan.isDir()) {
dfsClient.setStoragePolicy(fileName, storagePolicy);
appendLog("Directory moved successfully.");
return;
}
int totalReplicas = movePlan.getBlockIds().size();
this.appendLog(
String.format(
"Action starts at %s : %s -> %s with %d replicas to move in total.",
Utils.getFormatedCurrentTime(), fileName, storagePolicy, totalReplicas));
int numFailed = move();
if (numFailed == 0) {
appendLog("All scheduled " + totalReplicas + " replicas moved successfully.");
if (movePlan.isBeingWritten() || recheckModification()) {
appendResult("UpdateStoragePolicy=false");
appendLog("NOTE: File may be changed during executing this action. "
+ "Will move the corresponding blocks later.");
}
} else {
String res = numFailed + " of " + totalReplicas + " replicas movement failed.";
appendLog(res);
throw new IOException(res);
}
}
private int move() throws Exception {
int maxMoves = movePlan.getPropertyValueInt(FileMovePlan.MAX_CONCURRENT_MOVES, 10);
int maxRetries = movePlan.getPropertyValueInt(FileMovePlan.MAX_NUM_RETRIES, 10);
MoverExecutor executor = new MoverExecutor(status, getContext().getConf(), maxRetries, maxMoves);
return executor.executeMove(movePlan, getResultOs(), getLogOs());
}
private boolean recheckModification() {
try {
HdfsFileStatus fileStatus = dfsClient.getFileInfo(fileName);
if (fileStatus == null) {
return true;
}
boolean closed = dfsClient.isFileClosed(fileName);
if (!closed
|| (movePlan.getFileId() != 0 && fileStatus.getFileId() != movePlan.getFileId())
|| fileStatus.getLen() != movePlan.getFileLength()
|| fileStatus.getModificationTime() != movePlan.getModificationTime()) {
return true;
}
return false;
} catch (Exception e) {
return true; // check again for this case
}
}
@Override
public float getProgress() {
return this.status.getPercentage();
}
public String getStoragePolicy() {
return storagePolicy;
}
}
| java | Apache-2.0 | e0c90f054687a18c4e095547ac5e31b8b313b3ef | 2026-01-05T02:41:11.405497Z | false |
Intel-bigdata/SSM | https://github.com/Intel-bigdata/SSM/blob/e0c90f054687a18c4e095547ac5e31b8b313b3ef/smart-hadoop-support/smart-hadoop/src/main/java/org/smartdata/hdfs/action/ReadFileAction.java | smart-hadoop-support/smart-hadoop/src/main/java/org/smartdata/hdfs/action/ReadFileAction.java | /**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.smartdata.hdfs.action;
import org.apache.hadoop.hdfs.DFSInputStream;
import org.smartdata.action.ActionException;
import org.smartdata.action.Utils;
import org.smartdata.action.annotation.ActionSignature;
import java.util.Map;
/**
* An action to read a file. The read content will be discarded immediately, not storing onto disk.
* Can be used to test: 1. cache file; 2. one-ssd/all-ssd file;
*
* <p>Arguments: file_path [buffer_size, default=64k]
*/
@ActionSignature(
actionId = "read",
displayName = "read",
usage = HdfsAction.FILE_PATH + " $file " + ReadFileAction.BUF_SIZE + " $size"
)
public class ReadFileAction extends HdfsAction {
public static final String BUF_SIZE = "-bufSize";
private String filePath;
private int bufferSize = 64 * 1024;
@Override
public void init(Map<String, String> args) {
super.init(args);
this.filePath = args.get(FILE_PATH);
if (args.containsKey(BUF_SIZE)) {
bufferSize = Integer.valueOf(args.get(BUF_SIZE));
}
}
@Override
protected void execute() throws Exception {
if (filePath == null) {
throw new IllegalArgumentException("File parameter is missing.");
}
appendLog(
String.format("Action starts at %s : Read %s",
Utils.getFormatedCurrentTime(), filePath));
if (!dfsClient.exists(filePath)) {
throw new ActionException("ReadFile Action fails, file " +
filePath + " doesn't exist!");
}
DFSInputStream dfsInputStream = dfsClient.open(filePath);
byte[] buffer = new byte[bufferSize];
// read from HDFS
while (dfsInputStream.read(buffer, 0, bufferSize) != -1) {}
dfsInputStream.close();
}
}
| java | Apache-2.0 | e0c90f054687a18c4e095547ac5e31b8b313b3ef | 2026-01-05T02:41:11.405497Z | false |
Intel-bigdata/SSM | https://github.com/Intel-bigdata/SSM/blob/e0c90f054687a18c4e095547ac5e31b8b313b3ef/smart-hadoop-support/smart-hadoop/src/main/java/org/smartdata/hdfs/action/RamDiskFileAction.java | smart-hadoop-support/smart-hadoop/src/main/java/org/smartdata/hdfs/action/RamDiskFileAction.java | /**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.smartdata.hdfs.action;
import org.smartdata.action.annotation.ActionSignature;
/**
* An action to do ram-disk for a file.
*/
@ActionSignature(
actionId = "ramdisk",
displayName = "ramdisk",
usage = HdfsAction.FILE_PATH + " $file "
)
public class RamDiskFileAction extends MoveFileAction {
@Override
public String getStoragePolicy() {
return "LAZY_PERSIST";
}
}
| java | Apache-2.0 | e0c90f054687a18c4e095547ac5e31b8b313b3ef | 2026-01-05T02:41:11.405497Z | false |
Intel-bigdata/SSM | https://github.com/Intel-bigdata/SSM/blob/e0c90f054687a18c4e095547ac5e31b8b313b3ef/smart-hadoop-support/smart-hadoop/src/main/java/org/smartdata/hdfs/action/SmallFileUncompactAction.java | smart-hadoop-support/smart-hadoop/src/main/java/org/smartdata/hdfs/action/SmallFileUncompactAction.java | /**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.smartdata.hdfs.action;
import com.google.gson.Gson;
import com.google.gson.reflect.TypeToken;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.XAttrSetFlag;
import org.apache.hadoop.hdfs.DFSClient;
import org.apache.hadoop.hdfs.DFSInputStream;
import org.apache.hadoop.hdfs.protocol.HdfsFileStatus;
import org.apache.hadoop.io.IOUtils;
import org.smartdata.SmartConstants;
import org.smartdata.action.Utils;
import org.smartdata.action.annotation.ActionSignature;
import org.smartdata.hdfs.HadoopUtil;
import java.io.IOException;
import java.io.OutputStream;
import java.util.ArrayList;
import java.util.EnumSet;
import java.util.Map;
/**
* An action to recovery contents of compacted ssm small files.
*/
@ActionSignature(
actionId = "uncompact",
displayName = "uncompact",
usage = SmallFileUncompactAction.CONTAINER_FILE + " $container_file "
)
public class SmallFileUncompactAction extends HdfsAction {
private float status = 0f;
private Configuration conf = null;
private String smallFiles = null;
private String xAttrNameFileState = null;
private String xAttrNameCheckSum = null;
private String containerFile = null;
private DFSClient smartDFSClient = null;
public static final String CONTAINER_FILE =
SmallFileCompactAction.CONTAINER_FILE;
@Override
public void init(Map<String, String> args) {
super.init(args);
this.conf = getContext().getConf();
this.smartDFSClient = dfsClient;
this.xAttrNameFileState = SmartConstants.SMART_FILE_STATE_XATTR_NAME;
this.xAttrNameCheckSum = SmartConstants.SMART_FILE_CHECKSUM_XATTR_NAME;
this.smallFiles = args.get(FILE_PATH);
this.containerFile = args.get(CONTAINER_FILE);
}
@Override
protected void execute() throws Exception {
// Set hdfs client by DFSClient rather than SmartDFSClient
this.setDfsClient(HadoopUtil.getDFSClient(
HadoopUtil.getNameNodeUri(conf), conf));
// Get small file list
if (smallFiles == null || smallFiles.isEmpty()) {
throw new IllegalArgumentException(
String.format("Invalid small files: %s.", smallFiles));
}
ArrayList<String> smallFileList = new Gson().fromJson(
smallFiles, new TypeToken<ArrayList<String>>() {
}.getType());
if (smallFileList == null || smallFileList.isEmpty()) {
throw new IllegalArgumentException(
String.format("Invalid small files: %s.", smallFiles));
}
// Get container file path
if (containerFile == null || containerFile.isEmpty()) {
throw new IllegalArgumentException(
String.format("Invalid container file: %s.", containerFile));
}
appendLog(String.format(
"Action starts at %s : uncompact small files.",
Utils.getFormatedCurrentTime()));
for (String smallFile : smallFileList) {
if ((smallFile != null) && !smallFile.isEmpty()
&& dfsClient.exists(smallFile)) {
DFSInputStream in = null;
OutputStream out = null;
try {
// Get compact input stream
in = smartDFSClient.open(smallFile);
// Save original metadata of small file and delete original small file
HdfsFileStatus fileStatus = dfsClient.getFileInfo(smallFile);
Map<String, byte[]> xAttr = dfsClient.getXAttrs(smallFile);
dfsClient.delete(smallFile, false);
// Create new small file
out = dfsClient.create(smallFile, true);
// Copy contents to original small file
IOUtils.copyBytes(in, out, 4096);
// Reset file meta data
resetFileMeta(smallFile, fileStatus, xAttr);
// Set status and update log
this.status = (smallFileList.indexOf(smallFile) + 1.0f)
/ smallFileList.size();
appendLog(String.format("Uncompact %s successfully.", smallFile));
} finally {
if (in != null) {
in.close();
}
if (out != null) {
out.close();
}
}
}
}
dfsClient.delete(containerFile, false);
appendLog(String.format("Uncompact all the small files of %s successfully.", containerFile));
}
/**
* Reset meta data of small file. We should exclude the setting for
* xAttrNameFileState or xAttrNameCheckSum.
*/
private void resetFileMeta(String path, HdfsFileStatus fileStatus,
Map<String, byte[]> xAttr) throws IOException {
dfsClient.setOwner(path, fileStatus.getOwner(), fileStatus.getGroup());
dfsClient.setPermission(path, fileStatus.getPermission());
for(Map.Entry<String, byte[]> entry : xAttr.entrySet()) {
if (!entry.getKey().equals(xAttrNameFileState) &&
!entry.getKey().equals(xAttrNameCheckSum)) {
dfsClient.setXAttr(path, entry.getKey(), entry.getValue(),
EnumSet.of(XAttrSetFlag.CREATE, XAttrSetFlag.REPLACE));
}
}
}
@Override
public float getProgress() {
return this.status;
}
}
| java | Apache-2.0 | e0c90f054687a18c4e095547ac5e31b8b313b3ef | 2026-01-05T02:41:11.405497Z | false |
Intel-bigdata/SSM | https://github.com/Intel-bigdata/SSM/blob/e0c90f054687a18c4e095547ac5e31b8b313b3ef/smart-hadoop-support/smart-hadoop/src/main/java/org/smartdata/hdfs/action/OneSsdFileAction.java | smart-hadoop-support/smart-hadoop/src/main/java/org/smartdata/hdfs/action/OneSsdFileAction.java | /**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.smartdata.hdfs.action;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.smartdata.action.annotation.ActionSignature;
/**
* An action to do one-ssd for a file.
*/
@ActionSignature(
actionId = "onessd",
displayName = "onessd",
usage = HdfsAction.FILE_PATH + " $file "
)
public class OneSsdFileAction extends MoveFileAction {
private static final Logger LOG = LoggerFactory.getLogger(OneSsdFileAction.class);
@Override
public String getStoragePolicy() {
return "ONE_SSD";
}
}
| java | Apache-2.0 | e0c90f054687a18c4e095547ac5e31b8b313b3ef | 2026-01-05T02:41:11.405497Z | false |
Intel-bigdata/SSM | https://github.com/Intel-bigdata/SSM/blob/e0c90f054687a18c4e095547ac5e31b8b313b3ef/smart-hadoop-support/smart-hadoop/src/main/java/org/smartdata/hdfs/action/AllSsdFileAction.java | smart-hadoop-support/smart-hadoop/src/main/java/org/smartdata/hdfs/action/AllSsdFileAction.java | /**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.smartdata.hdfs.action;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.smartdata.action.annotation.ActionSignature;
/**
* An action to do all-ssd for a file.
*/
@ActionSignature(
actionId = "allssd",
displayName = "allssd",
usage = HdfsAction.FILE_PATH + " $file "
)
public class AllSsdFileAction extends MoveFileAction {
private static final Logger LOG = LoggerFactory.getLogger(AllSsdFileAction.class);
@Override
public String getStoragePolicy() {
return "ALL_SSD";
}
}
| java | Apache-2.0 | e0c90f054687a18c4e095547ac5e31b8b313b3ef | 2026-01-05T02:41:11.405497Z | false |
Intel-bigdata/SSM | https://github.com/Intel-bigdata/SSM/blob/e0c90f054687a18c4e095547ac5e31b8b313b3ef/smart-hadoop-support/smart-hadoop/src/main/java/org/smartdata/hdfs/action/ClusterBalanceAction.java | smart-hadoop-support/smart-hadoop/src/main/java/org/smartdata/hdfs/action/ClusterBalanceAction.java | /**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.smartdata.hdfs.action;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
/**
* An action to do balance for a cluster.
*/
public class ClusterBalanceAction extends HdfsAction {
private static final Logger LOG = LoggerFactory.getLogger(ClusterBalanceAction.class);
@Override
protected void execute() throws Exception {
}
}
| java | Apache-2.0 | e0c90f054687a18c4e095547ac5e31b8b313b3ef | 2026-01-05T02:41:11.405497Z | false |
Intel-bigdata/SSM | https://github.com/Intel-bigdata/SSM/blob/e0c90f054687a18c4e095547ac5e31b8b313b3ef/smart-hadoop-support/smart-hadoop/src/main/java/org/smartdata/hdfs/action/Truncate0Action.java | smart-hadoop-support/smart-hadoop/src/main/java/org/smartdata/hdfs/action/Truncate0Action.java | /**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.smartdata.hdfs.action;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.XAttrSetFlag;
import org.apache.hadoop.hdfs.DFSClient;
import org.apache.hadoop.hdfs.DistributedFileSystem;
import org.apache.hadoop.hdfs.protocol.HdfsFileStatus;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.smartdata.action.annotation.ActionSignature;
import java.io.IOException;
import java.net.URI;
import java.util.EnumSet;
import java.util.Map;
/**
* action to set file length to zero
*/
@ActionSignature(
actionId = "truncate0",
displayName = "truncate0",
usage = HdfsAction.FILE_PATH + " $src "
)
public class Truncate0Action extends HdfsAction {
private static final Logger LOG = LoggerFactory.getLogger(TruncateAction.class);
private String srcPath;
@Override
public void init(Map<String, String> args) {
super.init(args);
srcPath = args.get(FILE_PATH);
}
@Override
protected void execute() throws Exception {
if (srcPath == null) {
throw new IllegalArgumentException("File src is missing.");
}
setLen2Zero(srcPath);
}
private boolean setLen2Zero(String srcPath) throws IOException {
if (srcPath.startsWith("hdfs")) {
// TODO read conf from files
Configuration conf = new Configuration();
DistributedFileSystem fs = new DistributedFileSystem();
fs.initialize(URI.create(srcPath), conf);
return setLen2Zero(fs, srcPath);
} else {
return setLen2Zero(dfsClient, srcPath);
}
}
private boolean setLen2Zero(DFSClient client, String src) throws IOException {
// return client.truncate(src, 0);
// Delete file and create file
// Save the metadata
HdfsFileStatus fileStatus = client.getFileInfo(src);
// AclStatus aclStatus = client.getAclStatus(src);
Map<String, byte[]> XAttr = client.getXAttrs(src);
// Delete file
client.delete(src, true);
// Create file
client.create(src, true);
// Set metadata
client.setOwner(src, fileStatus.getOwner(), fileStatus.getGroup());
client.setPermission(src, fileStatus.getPermission());
client.setReplication(src, fileStatus.getReplication());
client.setStoragePolicy(src, "Cold");
client.setTimes(src, fileStatus.getAccessTime(),
client.getFileInfo(src).getModificationTime());
// client.setAcl(src, aclStatus.getEntries());
for(Map.Entry<String, byte[]> entry : XAttr.entrySet()){
client.setXAttr(src, entry.getKey(), entry.getValue(),
EnumSet.of(XAttrSetFlag.CREATE, XAttrSetFlag.REPLACE));
}
return true;
}
private boolean setLen2Zero(DistributedFileSystem fileSystem, String src) throws IOException {
// return fileSystem.truncate(new Path(src), 0);
// Delete file and create file
// Save the metadata
FileStatus fileStatus = fileSystem.getFileStatus(new Path(src));
// AclStatus aclStatus = fileSystem.getAclStatus(new Path(src));
Map<String, byte[]> XAttr = fileSystem.getXAttrs(new Path(src));
// Delete file
fileSystem.delete(new Path(src), true);
// Create file
fileSystem.create(new Path(src), true);
// Set metadata
fileSystem.setOwner(new Path(src), fileStatus.getOwner(), fileStatus.getGroup());
fileSystem.setPermission(new Path(src), fileStatus.getPermission());
fileSystem.setReplication(new Path(src), fileStatus.getReplication());
fileSystem.setStoragePolicy(new Path(src), "Cold");
fileSystem.setTimes(new Path(src), fileStatus.getAccessTime(),
fileSystem.getFileStatus(new Path(src)).getModificationTime());
// fileSystem.setAcl(new Path(src), aclStatus.getEntries());
for(Map.Entry<String, byte[]> entry : XAttr.entrySet()){
fileSystem.setXAttr(new Path(src), entry.getKey(), entry.getValue(),
EnumSet.of(XAttrSetFlag.CREATE, XAttrSetFlag.REPLACE));
}
return true;
}
}
| java | Apache-2.0 | e0c90f054687a18c4e095547ac5e31b8b313b3ef | 2026-01-05T02:41:11.405497Z | false |
Intel-bigdata/SSM | https://github.com/Intel-bigdata/SSM/blob/e0c90f054687a18c4e095547ac5e31b8b313b3ef/smart-hadoop-support/smart-hadoop/src/main/java/org/smartdata/hdfs/action/ListFileAction.java | smart-hadoop-support/smart-hadoop/src/main/java/org/smartdata/hdfs/action/ListFileAction.java | /**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.smartdata.hdfs.action;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.RemoteIterator;
import org.apache.hadoop.hdfs.protocol.DirectoryListing;
import org.apache.hadoop.hdfs.protocol.HdfsFileStatus;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.smartdata.action.Utils;
import org.smartdata.action.annotation.ActionSignature;
import java.io.IOException;
import java.net.URI;
import java.text.DecimalFormat;
import java.text.SimpleDateFormat;
import java.util.LinkedList;
import java.util.Map;
import java.util.Queue;
/**
* An action to list files in a directory.
*/
@ActionSignature(
actionId = "list",
displayName = "list",
usage = HdfsAction.FILE_PATH + " $src1" + ListFileAction.RECURSIVELY + " $src2" + ListFileAction.DUMP + " $src3"
+ ListFileAction.HUMAN + " $src4"
)
public class ListFileAction extends HdfsAction {
private static final Logger LOG = LoggerFactory.getLogger(ListFileAction.class);
private String srcPath;
private boolean recursively = false;
private boolean dump = false;
private boolean human = false;
// Options
public static final String RECURSIVELY = "-R";
public static final String DUMP = "-d";
public static final String HUMAN = "-h";
@Override
public void init(Map<String, String> args) {
super.init(args);
if (args.containsKey(RECURSIVELY)) {
this.recursively = true;
if (this.srcPath == null || this.srcPath == "")
this.srcPath = args.get(RECURSIVELY);
}
if (args.containsKey(DUMP)) {
this.dump = true;
if (this.srcPath == null || this.srcPath == "")
this.srcPath = args.get(DUMP);
}
if (args.containsKey(HUMAN)) {
this.human = true;
if (this.srcPath == null || this.srcPath == "")
this.srcPath = args.get(HUMAN);
}
if (this.srcPath == null || this.srcPath == "")
this.srcPath = args.get(FILE_PATH);
}
@Override
protected void execute() throws Exception {
if (srcPath == null) {
throw new IllegalArgumentException("File parameter is missing.");
}
appendLog(
String.format("Action starts at %s : List %s", Utils.getFormatedCurrentTime(), srcPath));
//list the file in directionary
listDirectory(srcPath);
}
private static String readableFileSize(long size) {
if(size <= 0)
return "0";
final String[] units = new String[] { "", "K", "M", "G", "T" };
int digitGroups = (int) (Math.log10(size)/Math.log10(1024));
if (digitGroups == 0) {
return Long.toString(size);
}
return new DecimalFormat("#,##0.#").format(size/Math.pow(1024, digitGroups)) + " " + units[digitGroups];
}
private void listDirectory(String src) throws IOException {
if (!src.startsWith("hdfs")) {
//list file in local Dir
HdfsFileStatus hdfsFileStatus = dfsClient.getFileInfo(src);
if (hdfsFileStatus == null) {
appendLog("File not found!");
return;
}
if (hdfsFileStatus.isDir() && !dump) {
DirectoryListing listing = dfsClient.listPaths(src, HdfsFileStatus.EMPTY_NAME);
HdfsFileStatus[] fileList = listing.getPartialListing();
SimpleDateFormat formatter = new SimpleDateFormat("yyyy-MM-dd HH:mm");
for (int i = 0; i < fileList.length; i++) {
appendLog(
String.format("%s%s %5d %s\t%s\t%13s %s %s", fileList[i].isDir() ? 'd' : '-',
fileList[i].getPermission(), fileList[i].getReplication(),
fileList[i].getOwner(), fileList[i].getGroup(),
human ? readableFileSize(fileList[i].getLen()) : fileList[i].getLen(),
formatter.format(fileList[i].getModificationTime()), fileList[i].getFullPath(new Path(src))));
if (recursively && fileList[i].isDir()) {
listDirectory(fileList[i].getFullPath(new Path(src)).toString());
}
}
} else {
SimpleDateFormat formatter = new SimpleDateFormat("yyyy-MM-dd HH:mm");
appendLog(
String.format("%s%s %5d %s\t%s\t%13s %s %s", hdfsFileStatus.isDir() ? 'd' : '-',
hdfsFileStatus.getPermission(),
hdfsFileStatus.getReplication(),
hdfsFileStatus.getOwner(), hdfsFileStatus.getGroup(),
human ? readableFileSize(hdfsFileStatus.getLen()) : hdfsFileStatus.getLen(),
formatter.format(hdfsFileStatus.getModificationTime()), hdfsFileStatus.getFullPath(new Path(src))));
}
} else {
//list file in remote Directory
//TODO read conf from files
Configuration conf = new Configuration();
FileSystem fs = FileSystem.get(URI.create(src), conf);
Path srcPath = new Path(src);
FileStatus status = fs.getFileStatus(new Path(src));
if (status == null) {
appendLog("File not found!");
return;
}
if (status.isDirectory()) {
RemoteIterator<FileStatus> pathIterator = fs.listStatusIterator(srcPath);
while (pathIterator.hasNext()) {
appendLog(String.format("%s", pathIterator.next().getPath()));
}
} else {
appendLog(String.format("%s", src));
}
}
}
}
| java | Apache-2.0 | e0c90f054687a18c4e095547ac5e31b8b313b3ef | 2026-01-05T02:41:11.405497Z | false |
Intel-bigdata/SSM | https://github.com/Intel-bigdata/SSM/blob/e0c90f054687a18c4e095547ac5e31b8b313b3ef/smart-hadoop-support/smart-hadoop/src/main/java/org/smartdata/hdfs/action/DeleteFileAction.java | smart-hadoop-support/smart-hadoop/src/main/java/org/smartdata/hdfs/action/DeleteFileAction.java | /**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.smartdata.hdfs.action;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.smartdata.action.ActionException;
import org.smartdata.action.Utils;
import org.smartdata.action.annotation.ActionSignature;
import java.io.IOException;
import java.net.URI;
import java.util.Map;
/**
* An action to delete a single file in dest
* If dest doesn't contains "hdfs" prefix, then destination will be set to
* current cluster, i.e., delete file in current cluster.
* Note that destination should contains filename.
*/
@ActionSignature(
actionId = "delete",
displayName = "delete",
usage = HdfsAction.FILE_PATH + " $file"
)
public class DeleteFileAction extends HdfsAction {
private static final Logger LOG =
LoggerFactory.getLogger(DeleteFileAction.class);
private String filePath;
@Override
public void init(Map<String, String> args) {
super.init(args);
this.filePath = args.get(FILE_PATH);
}
@Override
protected void execute() throws Exception {
if (filePath == null) {
throw new IllegalArgumentException("File parameter is missing.");
}
appendLog(
String.format("Action starts at %s : Delete %s",
Utils.getFormatedCurrentTime(), filePath));
//delete File
deleteFile(filePath);
}
private boolean deleteFile(
String filePath) throws IOException, ActionException {
if (filePath.startsWith("hdfs")) {
//delete in remote cluster
// TODO read conf from file
Configuration conf = new Configuration();
//get FileSystem object
FileSystem fs = FileSystem.get(URI.create(filePath), conf);
if (!fs.exists(new Path(filePath))) {
throw new ActionException(
"DeleteFile Action fails, file doesn't exist!");
}
fs.delete(new Path(filePath), true);
return true;
} else {
//delete in local cluster
if (!dfsClient.exists(filePath)) {
throw new ActionException(
"DeleteFile Action fails, file doesn't exist!");
}
appendLog(String.format("Delete %s", filePath));
return dfsClient.delete(filePath, true);
}
}
}
| java | Apache-2.0 | e0c90f054687a18c4e095547ac5e31b8b313b3ef | 2026-01-05T02:41:11.405497Z | false |
Intel-bigdata/SSM | https://github.com/Intel-bigdata/SSM/blob/e0c90f054687a18c4e095547ac5e31b8b313b3ef/smart-hadoop-support/smart-hadoop/src/main/java/org/smartdata/hdfs/action/RenameFileAction.java | smart-hadoop-support/smart-hadoop/src/main/java/org/smartdata/hdfs/action/RenameFileAction.java | /**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.smartdata.hdfs.action;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Options;
import org.apache.hadoop.fs.Path;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.smartdata.action.ActionException;
import org.smartdata.action.Utils;
import org.smartdata.action.annotation.ActionSignature;
import java.io.IOException;
import java.net.URI;
import java.util.Map;
/**
* An action to rename a single file
* If dest doesn't contains "hdfs" prefix, then destination will be set to
* current cluster.
* Note that destination should contains filename.
*/
@ActionSignature(
actionId = "rename",
displayName = "rename",
usage = HdfsAction.FILE_PATH + " $src " + RenameFileAction.DEST_PATH +
" $dest"
)
public class RenameFileAction extends HdfsAction {
private static final Logger LOG =
LoggerFactory.getLogger(RenameFileAction.class);
public static final String DEST_PATH = "-dest";
private String srcPath;
private String destPath;
@Override
public void init(Map<String, String> args) {
super.init(args);
this.srcPath = args.get(FILE_PATH);
if (args.containsKey(DEST_PATH)) {
this.destPath = args.get(DEST_PATH);
}
}
@Override
protected void execute() throws Exception {
if (srcPath == null) {
throw new IllegalArgumentException("File parameter is missing.");
}
if (destPath == null) {
throw new IllegalArgumentException("Dest File parameter is missing.");
}
appendLog(String.format("Action starts at %s : Rename %s to %s",
Utils.getFormatedCurrentTime(), srcPath, destPath));
if (!renameSingleFile(srcPath, destPath)) {
throw new IOException("Failed to rename " + srcPath + " -> " + destPath);
}
}
private boolean renameSingleFile(String src,
String dest) throws IOException, ActionException {
if (dest.startsWith("hdfs") && src.startsWith("hdfs")) {
//rename file in the same remote cluster
// TODO read conf from files
//check the file name
if (!URI.create(dest).getHost().equals(URI.create(src).getHost())) {
throw new ActionException("the file names are not in the same cluster");
}
Configuration conf = new Configuration();
//get FileSystem object
FileSystem fs = FileSystem.get(URI.create(dest), conf);
return fs.rename(new Path(src), new Path(dest));
} else if (!dest.startsWith("hdfs") && !src.startsWith("hdfs")) {
//rename file in local cluster and overwrite
if (!dfsClient.exists(src)) {
throw new ActionException("the source file is not exist");
}
dfsClient.rename(src, dest, Options.Rename.NONE);
return true;
} else {
// TODO handle the case when dest prefixed with the default hdfs uri
// while src not, the two path are in the same cluster
throw new ActionException("the file names are not in the same cluster");
}
}
}
| java | Apache-2.0 | e0c90f054687a18c4e095547ac5e31b8b313b3ef | 2026-01-05T02:41:11.405497Z | false |
Intel-bigdata/SSM | https://github.com/Intel-bigdata/SSM/blob/e0c90f054687a18c4e095547ac5e31b8b313b3ef/smart-hadoop-support/smart-hadoop/src/main/java/org/smartdata/hdfs/action/move/NameNodeConnector.java | smart-hadoop-support/smart-hadoop/src/main/java/org/smartdata/hdfs/action/move/NameNodeConnector.java | /**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.smartdata.hdfs.action.move;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.FsServerDefaults;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hdfs.DistributedFileSystem;
import org.apache.hadoop.hdfs.NameNodeProxies;
import org.apache.hadoop.hdfs.protocol.ClientProtocol;
import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
import org.apache.hadoop.hdfs.protocol.HdfsConstants.DatanodeReportType;
import org.apache.hadoop.hdfs.server.balancer.KeyManager;
import org.apache.hadoop.hdfs.server.protocol.BlocksWithLocations;
import org.apache.hadoop.hdfs.server.protocol.DatanodeStorageReport;
import org.apache.hadoop.hdfs.server.protocol.NamenodeProtocol;
import org.apache.hadoop.hdfs.server.protocol.NamespaceInfo;
import java.io.Closeable;
import java.io.IOException;
import java.net.URI;
import java.util.concurrent.atomic.AtomicBoolean;
/**
* The class provides utilities for accessing a NameNode.
*/
public class NameNodeConnector implements Closeable {
private static final Log LOG = LogFactory.getLog(NameNodeConnector.class);
private final URI nameNodeUri;
private final String blockpoolID;
private final NamenodeProtocol namenode;
private final ClientProtocol client;
private final KeyManager keyManager;
final AtomicBoolean fallbackToSimpleAuth = new AtomicBoolean(false);
private final DistributedFileSystem fs;
public NameNodeConnector(URI nameNodeUri, Configuration conf)
throws IOException {
this.nameNodeUri = nameNodeUri;
this.namenode = NameNodeProxies.createProxy(conf, nameNodeUri,
NamenodeProtocol.class).getProxy();
this.client = NameNodeProxies.createProxy(conf, nameNodeUri,
ClientProtocol.class, fallbackToSimpleAuth).getProxy();
this.fs = (DistributedFileSystem) FileSystem.get(nameNodeUri, conf);
final NamespaceInfo namespaceinfo = namenode.versionRequest();
this.blockpoolID = namespaceinfo.getBlockPoolID();
final FsServerDefaults defaults = fs.getServerDefaults(new Path("/"));
this.keyManager = new KeyManager(blockpoolID, namenode,
defaults.getEncryptDataTransfer(), conf);
}
public DistributedFileSystem getDistributedFileSystem() {
return fs;
}
/** @return the block pool ID */
public String getBlockpoolID() {
return blockpoolID;
}
/** @return live datanode storage reports. */
public DatanodeStorageReport[] getLiveDatanodeStorageReport()
throws IOException {
return client.getDatanodeStorageReport(DatanodeReportType.LIVE);
}
/** @return the key manager */
public KeyManager getKeyManager() {
return keyManager;
}
@Override
public void close() {
keyManager.close();
}
@Override
public String toString() {
return getClass().getSimpleName() + "[namenodeUri=" + nameNodeUri
+ ", bpid=" + blockpoolID + "]";
}
}
| java | Apache-2.0 | e0c90f054687a18c4e095547ac5e31b8b313b3ef | 2026-01-05T02:41:11.405497Z | false |
Intel-bigdata/SSM | https://github.com/Intel-bigdata/SSM/blob/e0c90f054687a18c4e095547ac5e31b8b313b3ef/smart-hadoop-support/smart-hadoop/src/main/java/org/smartdata/hdfs/action/move/MoverStatus.java | smart-hadoop-support/smart-hadoop/src/main/java/org/smartdata/hdfs/action/move/MoverStatus.java | /**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.smartdata.hdfs.action.move;
/**
* ActionStatus of Mover tool.
*/
public class MoverStatus {
private int totalBlocks; // each replica is counted as a block
private int movedBlocks;
public MoverStatus() {
totalBlocks = 0;
movedBlocks = 0;
}
public float getPercentage() {
if (totalBlocks == 0) {
return 0;
}
return movedBlocks * 1.0F / totalBlocks;
}
public int getTotalBlocks() {
return totalBlocks;
}
public void setTotalBlocks(int blocks) {
totalBlocks = blocks;
}
synchronized public int increaseMovedBlocks(int blocks) {
movedBlocks += blocks;
return movedBlocks;
}
public int getMovedBlocks() {
return movedBlocks;
}
}
| java | Apache-2.0 | e0c90f054687a18c4e095547ac5e31b8b313b3ef | 2026-01-05T02:41:11.405497Z | false |
Intel-bigdata/SSM | https://github.com/Intel-bigdata/SSM/blob/e0c90f054687a18c4e095547ac5e31b8b313b3ef/smart-hadoop-support/smart-hadoop/src/main/java/org/smartdata/hdfs/action/move/AbstractMoveFileAction.java | smart-hadoop-support/smart-hadoop/src/main/java/org/smartdata/hdfs/action/move/AbstractMoveFileAction.java | /**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.smartdata.hdfs.action.move;
import org.smartdata.hdfs.action.HdfsAction;
public abstract class AbstractMoveFileAction extends HdfsAction {
public static final String STORAGE_POLICY = "-storagePolicy";
public static final String MOVE_PLAN = "-movePlan";
public abstract String getStoragePolicy();
}
| java | Apache-2.0 | e0c90f054687a18c4e095547ac5e31b8b313b3ef | 2026-01-05T02:41:11.405497Z | false |
Intel-bigdata/SSM | https://github.com/Intel-bigdata/SSM/blob/e0c90f054687a18c4e095547ac5e31b8b313b3ef/smart-hadoop-support/smart-hadoop/src/main/java/org/smartdata/hdfs/action/move/MoverExecutor.java | smart-hadoop-support/smart-hadoop/src/main/java/org/smartdata/hdfs/action/move/MoverExecutor.java | /**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.smartdata.hdfs.action.move;
import com.google.common.annotations.VisibleForTesting;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hdfs.DFSClient;
import org.apache.hadoop.hdfs.protocol.Block;
import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
import org.apache.hadoop.hdfs.protocol.HdfsFileStatus;
import org.apache.hadoop.hdfs.protocol.LocatedBlock;
import org.apache.hadoop.hdfs.protocol.datatransfer.TrustedChannelResolver;
import org.apache.hadoop.hdfs.protocol.datatransfer.sasl.DataTransferSaslUtil;
import org.apache.hadoop.hdfs.protocol.datatransfer.sasl.SaslDataTransferClient;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.smartdata.conf.SmartConfKeys;
import org.smartdata.hdfs.CompatibilityHelperLoader;
import org.smartdata.model.action.FileMovePlan;
import java.io.IOException;
import java.io.PrintStream;
import java.net.URI;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.concurrent.ExecutorService;
import java.util.concurrent.Executors;
import java.util.concurrent.atomic.AtomicInteger;
/**
* A light-weight executor for Mover.
*/
public class MoverExecutor {
static final Logger LOG = LoggerFactory.getLogger(MoverExecutor.class);
private Configuration conf;
private URI namenode;
private String fileName;
private NameNodeConnector nnc;
private DFSClient dfsClient;
private SaslDataTransferClient saslClient;
private int concurrentMoves;
private int maxConcurrentMoves;
private int maxConcurrentMovesPerInst;
private int maxRetryTimes;
private ExecutorService moveExecutor;
private List<ReplicaMove> allMoves;
private Map<Long, DBlock> sourceBlockMap;
private Map<String, DatanodeInfo> sourceDatanodeMap;
private MoverStatus status;
private List<LocatedBlock> locatedBlocks;
private static AtomicInteger instances = new AtomicInteger(0);
public MoverExecutor(MoverStatus status, Configuration conf,
int maxRetryTimes, int maxConcurrentMoves) {
this.status = status;
this.conf = conf;
this.maxRetryTimes = maxRetryTimes;
this.maxConcurrentMoves = maxConcurrentMoves;
maxConcurrentMovesPerInst = conf.getInt(
SmartConfKeys.SMART_CMDLET_MOVER_MAX_CONCURRENT_BLOCKS_PER_SRV_INST_KEY,
SmartConfKeys.SMART_CMDLET_MOVER_MAX_CONCURRENT_BLOCKS_PER_SRV_INST_DEFAULT);
}
/**
* Execute a move action providing the schedule plan
* @param plan the schedule plan of mover
* @return number of failed moves
* @throws Exception
*/
public int executeMove(FileMovePlan plan, PrintStream resultOs, PrintStream logOs) throws Exception {
if (plan == null) {
throw new RuntimeException("Schedule plan for mover is null");
}
init(plan);
HdfsFileStatus fileStatus = dfsClient.getFileInfo(fileName);
if (fileStatus == null) {
throw new RuntimeException("File does not exist.");
}
if (fileStatus.isDir()) {
throw new RuntimeException("File path is a directory.");
}
if ((plan.getFileId() != 0 && fileStatus.getFileId() != plan.getFileId())
|| fileStatus.getLen() < plan.getFileLength()) {
throw new RuntimeException("File has been changed after this action generated.");
}
locatedBlocks = dfsClient.getLocatedBlocks(fileName, 0, plan.getFileLength()).getLocatedBlocks();
parseSchedulePlan(plan);
concurrentMoves = allMoves.size() >= maxConcurrentMoves ? maxConcurrentMoves : allMoves.size();
concurrentMoves = concurrentMoves == 0 ? 1 : concurrentMoves;
moveExecutor = Executors.newFixedThreadPool(concurrentMoves);
try {
instances.incrementAndGet();
return doMove(resultOs, logOs);
} finally {
instances.decrementAndGet();
moveExecutor.shutdown();
moveExecutor = null;
}
}
/**
* Execute a move action providing the schedule plan.
*
* @param resultOs
* @param logOs
* @return
* @throws Exception
*/
public int doMove(PrintStream resultOs, PrintStream logOs) throws Exception {
for (int retryTimes = 0; retryTimes < maxRetryTimes; retryTimes ++) {
final AtomicInteger running = new AtomicInteger(0);
for (final ReplicaMove replicaMove : allMoves) {
moveExecutor.execute(new Runnable() {
@Override
public void run() {
try {
running.incrementAndGet();
replicaMove.run();
} finally {
running.decrementAndGet();
}
}
});
if (maxConcurrentMovesPerInst != 0) {
while (running.get() > (maxConcurrentMovesPerInst * 1.0 / instances.get())) {
Thread.sleep(50);
}
}
}
int sleeped = 0;
int[] stat = new int[2];
while (true) {
ReplicaMove.countStatus(allMoves, stat);
if (stat[0] == allMoves.size()) {
status.increaseMovedBlocks(stat[1]);
break;
}
Thread.sleep(10);
sleeped += 10;
}
int remaining = ReplicaMove.refreshMoverList(allMoves);
if (allMoves.size() == 0) {
LOG.info("{} succeeded", this);
return 0;
}
if (logOs != null) {
logOs.println(String.format("The %d/%d retry, remaining = %d",
retryTimes + 1, maxRetryTimes, remaining));
}
LOG.debug("{} : {} moves failed, start a new iteration", this, remaining);
if (sleeped < 1000) {
Thread.sleep(1000 - sleeped);
}
}
int failedMoves = ReplicaMove.failedMoves(allMoves);
LOG.info("{} : failed with {} moves", this, failedMoves);
return failedMoves;
}
@VisibleForTesting
public int executeMove(FileMovePlan plan) throws Exception {
return executeMove(plan, null, null);
}
@Override
public String toString() {
return "MoverExecutor <" + namenode + ":" + fileName + ">";
}
private void init(FileMovePlan plan) throws IOException {
this.namenode = plan.getNamenode();
this.fileName = plan.getFileName();
this.nnc = new NameNodeConnector(namenode, conf);
this.saslClient = new SaslDataTransferClient(conf,
DataTransferSaslUtil.getSaslPropertiesResolver(conf),
TrustedChannelResolver.getInstance(conf), nnc.fallbackToSimpleAuth);
dfsClient = nnc.getDistributedFileSystem().getClient();
allMoves = new ArrayList<>();
}
private void parseSchedulePlan(FileMovePlan plan) throws IOException {
generateSourceMap();
List<String> sourceUuids = plan.getSourceUuids();
List<String> sourceStorageTypes = plan.getSourceStoragetypes();
List<String> targetIpAddrs = plan.getTargetIpAddrs();
List<Integer> targetXferPorts = plan.getTargetXferPorts();
List<String> targetStorageTypes = plan.getTargetStorageTypes();
List<Long> blockIds = plan.getBlockIds();
for (int planIndex = 0; planIndex < blockIds.size(); planIndex ++) {
// build block
DBlock block = sourceBlockMap.get(blockIds.get(planIndex));
// build source
DatanodeInfo sourceDatanode = sourceDatanodeMap.get(sourceUuids.get(planIndex));
StorageGroup source = new StorageGroup(sourceDatanode, sourceStorageTypes.get(planIndex));
block.addLocation(source);
//build target
DatanodeInfo targetDatanode = CompatibilityHelperLoader.getHelper()
.newDatanodeInfo(targetIpAddrs.get(planIndex), targetXferPorts.get(planIndex));
StorageGroup target = new StorageGroup(targetDatanode, targetStorageTypes.get(planIndex));
// generate single move
ReplicaMove replicaMove = new ReplicaMove(block, source, target, nnc, saslClient, conf);
allMoves.add(replicaMove);
}
}
private void generateSourceMap() throws IOException {
sourceBlockMap = new HashMap<>();
sourceDatanodeMap = new HashMap<>();
for (LocatedBlock locatedBlock : locatedBlocks) {
DBlock block = CompatibilityHelperLoader.getHelper().newDBlock(locatedBlock, dfsClient.getFileInfo(fileName));
sourceBlockMap.put(block.getBlock().getBlockId(), block);
for (DatanodeInfo datanodeInfo : locatedBlock.getLocations()) {
sourceDatanodeMap.put(datanodeInfo.getDatanodeUuid(), datanodeInfo);
}
}
}
}
| java | Apache-2.0 | e0c90f054687a18c4e095547ac5e31b8b313b3ef | 2026-01-05T02:41:11.405497Z | false |
Intel-bigdata/SSM | https://github.com/Intel-bigdata/SSM/blob/e0c90f054687a18c4e095547ac5e31b8b313b3ef/smart-hadoop-support/smart-hadoop/src/main/java/org/smartdata/hdfs/action/move/ReplicaMove.java | smart-hadoop-support/smart-hadoop/src/main/java/org/smartdata/hdfs/action/move/ReplicaMove.java | /**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.smartdata.hdfs.action.move;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hdfs.protocol.Block;
import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
import org.apache.hadoop.hdfs.protocol.datatransfer.IOStreamPair;
import org.apache.hadoop.hdfs.protocol.datatransfer.sasl.SaslDataTransferClient;
import org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos;
import org.apache.hadoop.hdfs.protocolPB.PBHelper;
import org.apache.hadoop.hdfs.security.token.block.BlockTokenIdentifier;
import org.apache.hadoop.hdfs.security.token.block.InvalidBlockTokenException;
import org.apache.hadoop.hdfs.server.balancer.KeyManager;
import org.apache.hadoop.io.IOUtils;
import org.apache.hadoop.net.NetUtils;
import org.apache.hadoop.security.token.Token;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.smartdata.hdfs.CompatibilityHelperLoader;
import java.io.BufferedInputStream;
import java.io.BufferedOutputStream;
import java.io.DataInputStream;
import java.io.DataOutputStream;
import java.io.IOException;
import java.io.InputStream;
import java.io.OutputStream;
import java.net.Socket;
import java.util.Iterator;
import java.util.List;
/**
* One single move represents the move action of one replication.
*/
class ReplicaMove {
static final Logger LOG = LoggerFactory.getLogger(ReplicaMove.class);
private NameNodeConnector nnc;
private SaslDataTransferClient saslClient;
private Block block;
private StorageGroup target;
private StorageGroup source;
private ReplicaMoveStatus status;
private Configuration conf;
public ReplicaMove(DBlock block, StorageGroup source, StorageGroup target, NameNodeConnector nnc,
SaslDataTransferClient saslClient, Configuration conf) {
this.nnc = nnc;
this.saslClient = saslClient;
this.block =
CompatibilityHelperLoader.getHelper().getDBlock(block, source).getBlock();
this.target = target;
this.source = source;
this.status = new ReplicaMoveStatus();
this.conf = conf;
}
@Override
public String toString() {
String bStr = block != null ? (block + " with size=" + block.getNumBytes() + " ")
: " ";
return bStr + "from " + source.getDisplayName() + " to " + target.getDisplayName();
}
public void run() {
LOG.debug("Start moving " + this);
Socket sock = new Socket();
DataOutputStream out = null;
DataInputStream in = null;
try {
sock.connect(
NetUtils.createSocketAddr(target.getDatanodeInfo().getXferAddr()),
CompatibilityHelperLoader.getHelper().getReadTimeOutConstant());
sock.setKeepAlive(true);
OutputStream unbufOut = sock.getOutputStream();
InputStream unbufIn = sock.getInputStream();
ExtendedBlock eb = new ExtendedBlock(nnc.getBlockpoolID(), block);
final KeyManager km = nnc.getKeyManager();
Token<BlockTokenIdentifier> accessToken =
CompatibilityHelperLoader.getHelper().getAccessToken(km, eb, target);
IOStreamPair saslStreams = saslClient.socketSend(sock, unbufOut,
unbufIn, km, accessToken, target.getDatanodeInfo());
unbufOut = saslStreams.out;
unbufIn = saslStreams.in;
out = new DataOutputStream(new BufferedOutputStream(unbufOut,
CompatibilityHelperLoader.getHelper().getIOFileBufferSize(conf)));
in = new DataInputStream(new BufferedInputStream(unbufIn,
CompatibilityHelperLoader.getHelper().getIOFileBufferSize(conf)));
sendRequest(out, eb, accessToken);
receiveResponse(in);
if (LOG.isDebugEnabled()) {
LOG.debug("Successfully moved " + this);
}
status.setSuccessful(true);
} catch (IOException e) {
if (LOG.isDebugEnabled()) {
LOG.debug("Failed to move " + this + ": " + e.getMessage());
}
status.setSuccessful(false);
} finally {
IOUtils.closeStream(out);
IOUtils.closeStream(in);
IOUtils.closeSocket(sock);
status.setFinished(true);
}
}
/** Send a block replace request to the output stream */
private void sendRequest(
DataOutputStream out, ExtendedBlock eb, Token<BlockTokenIdentifier> accessToken)
throws IOException {
CompatibilityHelperLoader.getHelper()
.replaceBlock(
out,
eb,
target.getStorageType(),
accessToken,
source.getDatanodeInfo().getDatanodeUuid(),
source.getDatanodeInfo());
}
/** Receive a block copy response from the input stream */
private void receiveResponse(DataInputStream in) throws IOException {
DataTransferProtos.BlockOpResponseProto response =
DataTransferProtos.BlockOpResponseProto.parseFrom(CompatibilityHelperLoader.getHelper().getVintPrefixed(in));
while (response.getStatus() == DataTransferProtos.Status.IN_PROGRESS) {
// read intermediate responses
response = DataTransferProtos.BlockOpResponseProto.parseFrom(CompatibilityHelperLoader.getHelper().getVintPrefixed(in));
}
String logInfo = "block move is failed";
checkBlockOpStatus(response, logInfo);
}
public static void checkBlockOpStatus(
DataTransferProtos.BlockOpResponseProto response,
String logInfo) throws IOException {
if (response.getStatus() != DataTransferProtos.Status.SUCCESS) {
if (response.getStatus() == DataTransferProtos.Status.ERROR_ACCESS_TOKEN) {
throw new InvalidBlockTokenException(
"Got access token error"
+ ", status message " + response.getMessage()
+ ", " + logInfo
);
} else {
throw new IOException(
"Got error"
+ ", status message " + response.getMessage()
+ ", " + logInfo
);
}
}
}
public static int failedMoves(List<ReplicaMove> allMoves) {
int failedNum = 0;
for (ReplicaMove move : allMoves) {
if (!move.status.isSuccessful()) {
failedNum += 1;
}
}
return failedNum;
}
public static boolean allMoveFinished(List<ReplicaMove> allMoves) {
for (ReplicaMove move : allMoves) {
if (!move.status.isFinished()){
return false;
}
}
return true;
}
/**
*
* @param allMoves
* @param ret ret[0] = number finished, ret[1] = number succeeded
*/
public static void countStatus(List<ReplicaMove> allMoves, int[] ret) {
ret[0] = 0;
ret[1] = 0;
for (ReplicaMove move : allMoves) {
if (move.status.isFinished()) {
ret[0] += 1;
if (move.status.isSuccessful()) {
ret[1] += 1;
}
}
}
}
/**
* Remove successful moves and refresh the status of remaining ones for a new iteration.
* @param allMoves
* @return number of remaining moves
*/
public static int refreshMoverList(List<ReplicaMove> allMoves) {
for (Iterator<ReplicaMove> it = allMoves.iterator(); it.hasNext();) {
ReplicaMove replicaMove = it.next();
if (replicaMove.status.isSuccessful()) {
it.remove();
} else {
replicaMove.status.reset();
}
}
return allMoves.size();
}
/**
* A class for tracking the status of a single move.
*/
class ReplicaMoveStatus {
private boolean finished;
private boolean successful;
public ReplicaMoveStatus() {
finished = false;
successful = false;
}
public void setFinished(boolean finished) {
this.finished = finished;
}
public void setSuccessful(boolean successful) {
this.successful = successful;
}
public boolean isFinished() {
return finished;
}
public boolean isSuccessful() {
return successful;
}
public void reset() {
finished = false;
successful = false;
}
}
}
| java | Apache-2.0 | e0c90f054687a18c4e095547ac5e31b8b313b3ef | 2026-01-05T02:41:11.405497Z | false |
Intel-bigdata/SSM | https://github.com/Intel-bigdata/SSM/blob/e0c90f054687a18c4e095547ac5e31b8b313b3ef/smart-hadoop-support/smart-hadoop/src/main/java/org/smartdata/hdfs/scheduler/SmallFileScheduler.java | smart-hadoop-support/smart-hadoop/src/main/java/org/smartdata/hdfs/scheduler/SmallFileScheduler.java | /**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.smartdata.hdfs.scheduler;
import com.google.gson.Gson;
import com.google.gson.reflect.TypeToken;
import org.apache.hadoop.hdfs.DFSClient;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.smartdata.SmartContext;
import org.smartdata.SmartFilePermission;
import org.smartdata.hdfs.HadoopUtil;
import org.smartdata.hdfs.action.HdfsAction;
import org.smartdata.hdfs.action.SmallFileCompactAction;
import org.smartdata.hdfs.action.SmallFileUncompactAction;
import org.smartdata.metastore.MetaStore;
import org.smartdata.metastore.MetaStoreException;
import org.smartdata.model.ActionInfo;
import org.smartdata.model.CmdletInfo;
import org.smartdata.model.CompactFileState;
import org.smartdata.model.FileInfo;
import org.smartdata.model.FileState;
import org.smartdata.model.LaunchAction;
import org.smartdata.model.WhitelistHelper;
import org.smartdata.model.action.ScheduleResult;
import org.smartdata.protocol.message.LaunchCmdlet;
import java.io.IOException;
import java.net.URI;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Collections;
import java.util.HashMap;
import java.util.HashSet;
import java.util.List;
import java.util.Map;
import java.util.Queue;
import java.util.Set;
import java.util.concurrent.ConcurrentLinkedQueue;
import java.util.concurrent.Executors;
import java.util.concurrent.ScheduledExecutorService;
import java.util.concurrent.TimeUnit;
import static org.smartdata.model.ActionInfo.OLD_FILE_ID;
public class SmallFileScheduler extends ActionSchedulerService {
private MetaStore metaStore;
/**
* Container file lock.
*/
private Set<String> containerFileLock;
/**
* Compact small file lock.
*/
private Set<String> compactSmallFileLock;
/**
* Cache all the container files of SSM.
*/
private Set<String> containerFileCache;
/**
* Small files which waiting to be handled.
*/
private Set<String> handlingSmallFileCache;
/**
* Compact file state queue for caching these file state to update.
*/
private Queue<CompactFileState> compactFileStateQueue;
/**
* Scheduled service to update meta store.
*/
private ScheduledExecutorService executorService;
private static final int META_STORE_INSERT_BATCH_SIZE = 200;
public static final String COMPACT_ACTION_NAME = "compact";
public static final String UNCOMPACT_ACTION_NAME = "uncompact";
public static final List<String> ACTIONS =
Arrays.asList(COMPACT_ACTION_NAME, UNCOMPACT_ACTION_NAME);
private DFSClient dfsClient;
private static final Logger LOG = LoggerFactory.getLogger(SmallFileScheduler.class);
public SmallFileScheduler(SmartContext context, MetaStore metaStore) {
super(context, metaStore);
this.metaStore = metaStore;
}
@Override
public void init() throws IOException {
this.containerFileLock = Collections.synchronizedSet(new HashSet<String>());
this.compactSmallFileLock = Collections.synchronizedSet(new HashSet<String>());
this.containerFileCache = Collections.synchronizedSet(new HashSet<String>());
this.handlingSmallFileCache = Collections.synchronizedSet(new HashSet<String>());
this.compactFileStateQueue = new ConcurrentLinkedQueue<>();
this.executorService = Executors.newSingleThreadScheduledExecutor();
try {
final URI nnUri = HadoopUtil.getNameNodeUri(getContext().getConf());
dfsClient = HadoopUtil.getDFSClient(nnUri, getContext().getConf());
} catch (IOException e) {
LOG.warn("Failed to create dfsClient.");
}
}
@Override
public void start() throws IOException {
executorService.scheduleAtFixedRate(
new ScheduleTask(), 100, 50,
TimeUnit.MILLISECONDS);
try {
List<String> containerFileList = metaStore.getAllContainerFiles();
this.containerFileCache.addAll(containerFileList);
} catch (MetaStoreException e) {
throw new IOException(e);
}
}
@Override
public List<String> getSupportedActions() {
return ACTIONS;
}
@Override
public void recover(ActionInfo actionInfo) {
if (!actionInfo.getActionName().equals(COMPACT_ACTION_NAME) &&
!actionInfo.getActionName().equals(UNCOMPACT_ACTION_NAME)) {
return;
}
if (actionInfo.getActionName().equals(COMPACT_ACTION_NAME)) {
compactSmallFileLock.addAll(getSmallFileList(actionInfo));
}
containerFileLock.add(getContainerFile(actionInfo));
}
@Override
public boolean onSubmit(CmdletInfo cmdletInfo, ActionInfo actionInfo, int actionIndex)
throws IOException {
// check args
if (actionInfo.getArgs() == null) {
throw new IOException("No arguments for the action");
}
if (COMPACT_ACTION_NAME.equals(actionInfo.getActionName())) {
// Check if container file is null
String containerFilePath = getContainerFile(actionInfo);
if (containerFilePath == null || containerFilePath.isEmpty()) {
throw new IOException("Illegal container file path: " + containerFilePath);
}
// Check if small files is null or empty
String smallFiles = actionInfo.getArgs().get(HdfsAction.FILE_PATH);
if (smallFiles == null || smallFiles.isEmpty()) {
throw new IOException("Illegal small files: " + smallFiles);
}
// Check if small file list converted from Json is not empty
ArrayList<String> smallFileList = new Gson().fromJson(
smallFiles, new TypeToken<ArrayList<String>>() {
}.getType());
if (smallFileList.isEmpty()) {
throw new IOException("Illegal small files list: " + smallFileList);
}
// Check whitelist
if (WhitelistHelper.isEnabled(getContext().getConf())) {
for (String filePath : smallFileList) {
if (!WhitelistHelper.isInWhitelist(filePath, getContext().getConf())) {
throw new IOException("Path " + filePath + " is not in the whitelist.");
}
}
}
// Check if the small file list is valid
if (checkIfValidSmallFiles(smallFileList)) {
return true;
} else {
throw new IOException("Illegal small files are provided.");
}
} else {
return true;
}
}
/**
* Check if the small file list is valid.
*/
private boolean checkIfValidSmallFiles(List<String> smallFileList) {
for (String smallFile : smallFileList) {
if (smallFile == null || smallFile.isEmpty()) {
LOG.error("Illegal small file path: {}", smallFile);
return false;
} else if (compactSmallFileLock.contains(smallFile)) {
LOG.error(String.format("%s is locked.", smallFile));
return false;
} else if (handlingSmallFileCache.contains(smallFile)) {
LOG.error(String.format("%s is being handling.", smallFile));
return false;
} else if (containerFileCache.contains(smallFile)
|| containerFileLock.contains(smallFile)) {
LOG.error(String.format("%s is container file.", smallFile));
return false;
}
}
// Get small file info list and file state map from meta store.
List<FileInfo> fileInfos;
Map<String, FileState> fileStateMap;
try {
fileInfos = metaStore.getFilesByPaths(smallFileList);
fileStateMap = metaStore.getFileStates(smallFileList);
} catch (MetaStoreException e) {
LOG.error("Failed to get file states of small files.", e);
return false;
}
// Get small file info map
Map<String, FileInfo> fileInfoMap = new HashMap<>();
for (FileInfo fileInfo : fileInfos) {
fileInfoMap.put(fileInfo.getPath(), fileInfo);
}
// Check if the permission of small file is same,
// and all the small files exist
FileInfo firstFileInfo = null;
for (String smallFile : smallFileList) {
FileInfo fileInfo = fileInfoMap.get(smallFile);
if (fileInfo != null) {
if (firstFileInfo == null) {
firstFileInfo = fileInfo;
} else {
if (!(new SmartFilePermission(firstFileInfo)).equals(
new SmartFilePermission(fileInfo))) {
LOG.debug(String.format(
"%s has different file permission with %s.",
firstFileInfo.getPath(), fileInfo.getPath()));
return false;
}
}
} else {
LOG.debug("{} is not exist!!!", smallFile);
return false;
}
}
// Check if the state of small file is NORMAL
for (Map.Entry<String, FileState> entry : fileStateMap.entrySet()) {
String smallFile = entry.getKey();
FileState.FileType smallFileType = entry.getValue().getFileType();
if (smallFileType != FileState.FileType.NORMAL) {
LOG.debug(String.format(
"%s has invalid file state %s for small file compact.",
smallFile, smallFileType.toString()));
return false;
}
}
return true;
}
/**
* Get container file info according to action arguments and meta store.
*/
private SmartFilePermission getContainerFilePermission(ActionInfo actionInfo,
String containerFilePath) throws MetaStoreException, IllegalArgumentException {
// Get container file permission from the argument of this action
String containerFilePermissionArg = actionInfo.getArgs().get(
SmallFileCompactAction.CONTAINER_FILE_PERMISSION);
SmartFilePermission containerFilePermissionFromArg = null;
if (containerFilePermissionArg != null && !containerFilePermissionArg.isEmpty()) {
containerFilePermissionFromArg = new Gson().fromJson(
containerFilePermissionArg, new TypeToken<SmartFilePermission>() {
}.getType());
}
// Get container file permission from meta store
SmartFilePermission containerFilePermissionFromMeta = null;
FileInfo containerFileInfo = metaStore.getFile(containerFilePath);
if (containerFileInfo != null) {
containerFilePermissionFromMeta = new SmartFilePermission(containerFileInfo);
}
// Get container file permission
SmartFilePermission containerFilePermission;
if (containerFilePermissionFromArg == null
|| containerFilePermissionFromMeta == null) {
containerFilePermission = (containerFilePermissionFromArg == null) ?
containerFilePermissionFromMeta : containerFilePermissionFromArg;
} else {
if (containerFilePermissionFromArg.equals(containerFilePermissionFromMeta)) {
containerFilePermission = containerFilePermissionFromArg;
} else {
throw new IllegalArgumentException(
"Illegal container file permission argument.");
}
}
return containerFilePermission;
}
/**
* Get compact action schedule result according to action info.
*/
private ScheduleResult getCompactScheduleResult(ActionInfo actionInfo) {
// Get container file and small file list of this action
String containerFilePath = getContainerFile(actionInfo);
ArrayList<String> smallFileList = new Gson().fromJson(
actionInfo.getArgs().get(HdfsAction.FILE_PATH),
new TypeToken<ArrayList<String>>() {
}.getType());
// Check if container file is locked and retry
if (containerFileLock.contains(containerFilePath)) {
return ScheduleResult.RETRY;
} else {
// Check if the small file list is valid
if (!checkIfValidSmallFiles(smallFileList)) {
actionInfo.setResult("Small file list is invalid.");
return ScheduleResult.FAIL;
}
// Get container file permission
SmartFilePermission containerFilePermission;
try {
containerFilePermission = getContainerFilePermission(
actionInfo, containerFilePath);
} catch (MetaStoreException e1) {
actionInfo.setResult(String.format(
"Failed to get file info of the container file %s for %s.",
containerFilePath, e1.toString()));
return ScheduleResult.FAIL;
} catch (IllegalArgumentException e2) {
actionInfo.setResult(e2.getMessage());
return ScheduleResult.FAIL;
}
// Get first small file info
FileInfo firstFileInfo;
SmartFilePermission firstFilePermission;
try {
firstFileInfo = metaStore.getFile(smallFileList.get(0));
firstFilePermission = new SmartFilePermission(firstFileInfo);
} catch (MetaStoreException e) {
actionInfo.setResult(String.format(
"Failed to get first file info: %s.", containerFilePath));
return ScheduleResult.FAIL;
}
// Reset action arguments if container file is not exist
// and its permission is null
if (containerFilePermission == null) {
Map<String, String> args = new HashMap<>(3);
args.put(SmallFileCompactAction.CONTAINER_FILE,
getContainerFile(actionInfo));
args.put(SmallFileCompactAction.FILE_PATH,
new Gson().toJson(smallFileList));
args.put(SmallFileCompactAction.CONTAINER_FILE_PERMISSION,
new Gson().toJson(firstFilePermission));
actionInfo.setArgs(args);
} else {
if (!containerFilePermission.equals(firstFilePermission)) {
actionInfo.setResult(String.format(
"Container file %s has different permission with %s.",
containerFilePath, firstFileInfo.getPath()));
return ScheduleResult.FAIL;
}
}
// Lock container file and small files
containerFileLock.add(containerFilePath);
compactSmallFileLock.addAll(smallFileList);
afterSchedule(actionInfo);
return ScheduleResult.SUCCESS;
}
}
/**
* Get uncompact action schedule result according to action info,
* and reset action arguments.
*/
private ScheduleResult getUncompactScheduleResult(ActionInfo actionInfo,
LaunchAction action) {
// Check if container file path is valid
String containerFilePath = getContainerFile(actionInfo);
if (containerFilePath == null || containerFilePath.isEmpty()) {
LOG.debug("Illegal container file path: {}", containerFilePath);
actionInfo.setResult("Illegal container file path: " + containerFilePath);
return ScheduleResult.FAIL;
}
if (!containerFileCache.contains(containerFilePath)) {
LOG.debug("{} is not container file.", containerFilePath);
actionInfo.setResult(containerFilePath + " is not container file.");
return ScheduleResult.FAIL;
}
// Check if container file is locked
if (!containerFileLock.contains(containerFilePath)) {
// Get small file list of the container file
List<String> smallFileList;
try {
smallFileList = metaStore.getSmallFilesByContainerFile(containerFilePath);
} catch (MetaStoreException e) {
String errMsg = String.format(
"Failed to get small files of the container file %s for %s.",
containerFilePath, e.toString());
LOG.error(errMsg);
actionInfo.setResult(errMsg);
return ScheduleResult.FAIL;
}
if (!smallFileList.isEmpty()) {
// Update container file and uncompact small file lock
containerFileLock.add(containerFilePath);
// Put small files into arguments of this action
Map<String, String> args = new HashMap<>(2);
args.put(HdfsAction.FILE_PATH, new Gson().toJson(smallFileList));
args.put(SmallFileUncompactAction.CONTAINER_FILE,
getContainerFile(actionInfo));
action.setArgs(args);
actionInfo.setArgs(args);
afterSchedule(actionInfo);
return ScheduleResult.SUCCESS;
} else {
actionInfo.setResult("All the small files of" +
" this container file already be uncompacted.");
actionInfo.setSuccessful(true);
return ScheduleResult.SUCCESS;
}
} else {
// Retry if container file is locked
return ScheduleResult.RETRY;
}
}
@Override
public ScheduleResult onSchedule(CmdletInfo cmdletInfo, ActionInfo actionInfo,
LaunchCmdlet cmdlet, LaunchAction action, int actionIndex) {
if (COMPACT_ACTION_NAME.equals(actionInfo.getActionName())) {
return getCompactScheduleResult(actionInfo);
} else if (UNCOMPACT_ACTION_NAME.equals(actionInfo.getActionName())) {
return getUncompactScheduleResult(actionInfo, action);
}
return ScheduleResult.SUCCESS;
}
/**
* Speculate action status and set result accordingly.
*/
@Override
public boolean isSuccessfulBySpeculation(ActionInfo actionInfo) {
try {
boolean isSuccessful = true;
List<FileState> fileStateList = new ArrayList<>();
// If any one small file is not compacted, return false.
for (String path : getSmallFileList(actionInfo)) {
FileState fileState = HadoopUtil.getFileState(dfsClient, path);
FileState.FileType fileType = fileState.getFileType();
if (!isExpectedFileState(fileType, actionInfo.getActionName())) {
isSuccessful = false;
break;
}
// Only add compact file state.
if (actionInfo.getActionName().equals(COMPACT_ACTION_NAME)) {
fileStateList.add(fileState);
}
}
if (!isSuccessful) {
return false;
}
if (actionInfo.getActionName().equals(UNCOMPACT_ACTION_NAME)) {
return true;
}
// Recover action result for successful compact action.
if (actionInfo.getActionName().equals(COMPACT_ACTION_NAME)) {
List<CompactFileState> compactFileStates = new ArrayList<>();
assert fileStateList.size() == getSmallFileList(actionInfo).size();
for (FileState fileState : fileStateList) {
compactFileStates.add((CompactFileState) fileState);
}
actionInfo.setResult(new Gson().toJson(compactFileStates));
}
return true;
} catch (IOException e) {
LOG.warn("Failed to get file state, suppose this action was not " +
"successfully executed: {}", actionInfo.toString());
return false;
}
}
public boolean isExpectedFileState(FileState.FileType fileType,
String actionName) {
if (actionName.equals(COMPACT_ACTION_NAME)) {
return fileType == FileState.FileType.COMPACT;
}
return fileType == FileState.FileType.NORMAL;
}
/**
* Do something after a successful scheduling.
* For compact/uncompact action, the original small file will be replaced by
* other file with new fid. We need to keep the original file's id to let new
* file take over its data temperature metric.
*/
public void afterSchedule(ActionInfo actionInfo) {
try {
// Set old file ID, which will be persisted to DB.
setOldFileId(actionInfo);
} catch (Throwable t) {
// We think it may not be a big issue, so just warn user this issue.
LOG.warn("Failed in maintaining old fid for taking over " +
"old data's temperature.");
}
}
/**
* Set old file id which will be persisted into DB. For action status
* recovery case, the old file id can be acquired for taking over old file's
* data temperature.
*/
private void setOldFileId(ActionInfo actionInfo) throws IOException {
if (actionInfo.getArgs().get(OLD_FILE_ID) != null &&
!actionInfo.getArgs().get(OLD_FILE_ID).isEmpty()) {
return;
}
List<Long> oids = new ArrayList<>();
// For uncompact, small file list will be set by #onSchedule.
for (String path : getSmallFileList(actionInfo)) {
try {
oids.add(dfsClient.getFileInfo(path).getFileId());
} catch (IOException e) {
LOG.warn("Failed to set old fid for taking over data temperature!");
throw e;
}
}
actionInfo.setOldFileIds(oids);
}
/**
* Handle compact action result.
*/
private void handleCompactActionResult(ActionInfo actionInfo) {
// Get container file path, small files, result of this action
String containerFilePath = getContainerFile(actionInfo);
List<String> smallFileList = new Gson().fromJson(
actionInfo.getArgs().get(HdfsAction.FILE_PATH),
new TypeToken<ArrayList<String>>() {
}.getType());
List<CompactFileState> compactFileStates = new Gson().fromJson(
actionInfo.getResult(),
new TypeToken<ArrayList<CompactFileState>>() {
}.getType());
// Update container file cache, compact file state queue,
// handling small file cache
if (compactFileStates != null && !compactFileStates.isEmpty()) {
LOG.debug(String.format("Add container file %s into cache.",
containerFilePath));
containerFileCache.add(containerFilePath);
for (CompactFileState compactFileState : compactFileStates) {
handlingSmallFileCache.add(compactFileState.getPath());
compactFileStateQueue.offer(compactFileState);
}
}
// Remove locks of container file and small files
containerFileLock.remove(containerFilePath);
compactSmallFileLock.removeAll(smallFileList);
}
/**
* Handle uncompact action result.
*/
private void handleUncompactActionResult(ActionInfo actionInfo) {
// Get container file path, small files, result of this action
String containerFilePath = getContainerFile(actionInfo);
if (actionInfo.isSuccessful()) {
containerFileCache.remove(containerFilePath);
}
// Remove locks of container file
containerFileLock.remove(containerFilePath);
}
@Override
public void onActionFinished(CmdletInfo cmdletInfo, ActionInfo actionInfo,
int actionIndex) {
if (!actionInfo.getActionName().equals(COMPACT_ACTION_NAME) &&
!actionInfo.getActionName().equals(UNCOMPACT_ACTION_NAME)) {
return;
}
if (!actionInfo.isFinished()) {
return;
}
if (COMPACT_ACTION_NAME.equals(actionInfo.getActionName())) {
handleCompactActionResult(actionInfo);
} else if (UNCOMPACT_ACTION_NAME.equals(actionInfo.getActionName())) {
handleUncompactActionResult(actionInfo);
}
if (actionInfo.isSuccessful()) {
// For uncompact action, the small file list cannot be obtained from metastore,
// since the record can be deleted because container file was deleted.
takeOverAccessCount(actionInfo);
}
}
public List<String> getSmallFileList(ActionInfo actionInfo) {
return new Gson().fromJson(actionInfo.getArgs().get(HdfsAction.FILE_PATH),
new TypeToken<ArrayList<String>>() {
}.getType());
}
public String getContainerFile(ActionInfo actionInfo) {
return actionInfo.getArgs().get(SmallFileCompactAction.CONTAINER_FILE);
}
/**
* In rename case, the fid of renamed file is not changed. But sometimes, we need
* to keep old file's access count and let new file takes over this metric. E.g.,
* with (un)EC/(de)Compress/(un)Compact action, a new file will overwrite the old file.
*/
public void takeOverAccessCount(ActionInfo actionInfo) {
List<String> smallFiles = getSmallFileList(actionInfo);
List<Long> oldFids = actionInfo.getOldFileIds();
try {
for (int i = 0; i < smallFiles.size(); i++) {
String filePath = smallFiles.get(i);
long oldFid = oldFids.get(i);
// The new fid may have not been updated in metastore, so
// we get it from dfs client.
long newFid = dfsClient.getFileInfo(filePath).getFileId();
metaStore.updateAccessCountTableFid(oldFid, newFid);
}
} catch (Exception e) {
LOG.warn("Failed to take over file access count for all tables, " +
"which may make the measurement for data temperature inaccurate!",
e.getMessage());
}
}
/**
* Sync compact file states with meta store.
*/
private void syncMetaStore() {
List<CompactFileState> compactFileStates = new ArrayList<>();
// Get compact file states from compactFileStateQueue
for (int i = 0; i < META_STORE_INSERT_BATCH_SIZE; i++) {
CompactFileState compactFileState = compactFileStateQueue.poll();
if (compactFileState != null) {
try {
FileInfo info = metaStore.getFile(compactFileState.getPath());
if (info != null && info.getLength() == 0) {
LOG.debug(String.format("Ready to insert the file state of %s.",
compactFileState.getPath()));
compactFileStates.add(compactFileState);
} else {
LOG.debug(String.format(
"Waiting for the small file %s synced in the meta store.",
compactFileState.getPath()));
compactFileStateQueue.offer(compactFileState);
}
} catch (MetaStoreException e) {
LOG.error("Failed to get file info.", e);
compactFileStateQueue.offer(compactFileState);
}
} else {
break;
}
}
// Batch insert compact file states into meta store
try {
if (!compactFileStates.isEmpty()) {
metaStore.insertCompactFileStates(
compactFileStates.toArray(new CompactFileState[0]));
for (CompactFileState fileState : compactFileStates) {
handlingSmallFileCache.remove(fileState.getPath());
}
}
} catch (MetaStoreException e) {
for (CompactFileState fileState : compactFileStates) {
handlingSmallFileCache.remove(fileState.getPath());
}
LOG.error("Failed to update file state of meta store.", e);
}
}
/**
* Scheduled task to sync meta store.
*/
private class ScheduleTask implements Runnable {
@Override
public void run() {
try {
syncMetaStore();
} catch (Throwable t) {
LOG.error("Failed to sync compact file states with meta store. " + t.toString());
}
}
}
@Override
public void stop() throws IOException {
try {
syncMetaStore();
} catch (Exception e) {
throw new IOException(e);
}
executorService.shutdown();
}
}
| java | Apache-2.0 | e0c90f054687a18c4e095547ac5e31b8b313b3ef | 2026-01-05T02:41:11.405497Z | false |
Intel-bigdata/SSM | https://github.com/Intel-bigdata/SSM/blob/e0c90f054687a18c4e095547ac5e31b8b313b3ef/smart-hadoop-support/smart-hadoop/src/main/java/org/smartdata/hdfs/scheduler/MoverScheduler.java | smart-hadoop-support/smart-hadoop/src/main/java/org/smartdata/hdfs/scheduler/MoverScheduler.java | /**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.smartdata.hdfs.scheduler;
import com.google.common.util.concurrent.RateLimiter;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hdfs.DFSClient;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.smartdata.SmartContext;
import org.smartdata.conf.SmartConfKeys;
import org.smartdata.hdfs.HadoopUtil;
import org.smartdata.hdfs.action.HdfsAction;
import org.smartdata.hdfs.action.MoveFileAction;
import org.smartdata.hdfs.metric.fetcher.DatanodeStorageReportProcTask;
import org.smartdata.hdfs.metric.fetcher.MovePlanMaker;
import org.smartdata.metastore.MetaStore;
import org.smartdata.model.ActionInfo;
import org.smartdata.model.CmdletInfo;
import org.smartdata.model.LaunchAction;
import org.smartdata.model.action.FileMovePlan;
import org.smartdata.model.action.ScheduleResult;
import org.smartdata.protocol.message.LaunchCmdlet;
import java.io.IOException;
import java.net.URI;
import java.util.Arrays;
import java.util.HashSet;
import java.util.List;
import java.util.Set;
import java.util.concurrent.Executors;
import java.util.concurrent.ScheduledExecutorService;
import java.util.concurrent.ScheduledFuture;
import java.util.concurrent.TimeUnit;
public class MoverScheduler extends ActionSchedulerService {
private DFSClient client;
private MovePlanStatistics statistics;
private MovePlanMaker planMaker;
private final URI nnUri;
private long dnInfoUpdateInterval = 2 * 60 * 1000;
private ScheduledExecutorService updateService;
private ScheduledFuture updateServiceFuture;
private long throttleInMb;
private RateLimiter rateLimiter = null;
// Lock file after scheduling
private Set<String> fileLock;
public static final Logger LOG =
LoggerFactory.getLogger(MoverScheduler.class);
public MoverScheduler(SmartContext context, MetaStore metaStore)
throws IOException {
super(context, metaStore);
nnUri = HadoopUtil.getNameNodeUri(getContext().getConf());
throttleInMb = getContext().getConf()
.getLong(SmartConfKeys.SMART_ACTION_MOVE_THROTTLE_MB_KEY,
SmartConfKeys.SMART_ACTION_MOVE_THROTTLE_MB_DEFAULT);
if (throttleInMb > 0) {
rateLimiter = RateLimiter.create(throttleInMb);
}
this.fileLock = new HashSet<>();
}
public void init() throws IOException {
client = HadoopUtil.getDFSClient(nnUri, getContext().getConf());
statistics = new MovePlanStatistics();
updateService = Executors.newScheduledThreadPool(1);
}
/**
* After start call, all services and public calls should work.
* @return
* @throws IOException
*/
public void start() throws IOException {
// TODO: Will be removed when MetaStore part finished
DatanodeStorageReportProcTask task =
new DatanodeStorageReportProcTask(client, getContext().getConf());
task.run();
planMaker = new MovePlanMaker(client, task.getStorages(), task.getNetworkTopology(), statistics);
updateServiceFuture = updateService.scheduleAtFixedRate(
new UpdateClusterInfoTask(task),
dnInfoUpdateInterval, dnInfoUpdateInterval, TimeUnit.MILLISECONDS);
}
/**
* After stop call, all states in database will not be changed anymore.
* @throws IOException
*/
public void stop() throws IOException {
if (updateServiceFuture != null) {
updateServiceFuture.cancel(false);
}
}
private static final List<String> actions =
Arrays.asList("allssd", "onessd", "archive", "alldisk", "onedisk", "ramdisk");
public List<String> getSupportedActions() {
return actions;
}
@Override
public boolean onSubmit(CmdletInfo cmdletInfo, ActionInfo actionInfo, int actionIndex)
throws IOException {
// check args
if (actionInfo.getArgs() == null) {
throw new IOException("No arguments for the action");
}
if (fileLock.contains(actionInfo.getArgs().get(HdfsAction.FILE_PATH))) {
LOG.warn("The file {} is locked by other mover action!",
actionInfo.getArgs().get(HdfsAction.FILE_PATH));
return false;
}
return true;
}
@Override
public ScheduleResult onSchedule(CmdletInfo cmdletInfo, ActionInfo actionInfo,
LaunchCmdlet cmdlet, LaunchAction action, int actionIndex) {
if (!actions.contains(action.getActionType())) {
return ScheduleResult.SUCCESS;
}
String file = action.getArgs().get(HdfsAction.FILE_PATH);
if (file == null) {
actionInfo.appendLog("File path not specified!\n");
return ScheduleResult.FAIL;
}
String policy = null;
switch (action.getActionType()) {
case "allssd":
policy = "ALL_SSD";
break;
case "onessd":
policy = "ONE_SSD";
break;
case "archive":
policy = "COLD";
break;
case "alldisk":
policy = "HOT";
break;
case "onedisk":
policy = "WARM";
break;
case "ramdisk":
policy = "LAZY_PERSIST";
break;
}
try {
FileMovePlan plan = planMaker.processNamespace(new Path(file), policy);
if (rateLimiter != null) {
// Two possible understandings here: file level and replica level
int len = (int)(plan.getFileLengthToMove() >> 20);
if (len > 0) {
if (!rateLimiter.tryAcquire(len)) {
if (LOG.isDebugEnabled()) {
LOG.debug("Cancel Scheduling action {} due to throttling. {}", actionInfo, plan);
}
return ScheduleResult.RETRY;
}
}
}
plan.setNamenode(nnUri);
action.getArgs().put(MoveFileAction.MOVE_PLAN, plan.toString());
fileLock.add(action.getArgs().get(HdfsAction.FILE_PATH));
return ScheduleResult.SUCCESS;
} catch (IOException e) {
actionInfo.appendLogLine(e.getMessage());
LOG.error("Exception while processing " + action, e);
return ScheduleResult.FAIL;
} catch (Throwable t) {
actionInfo.appendLogLine(t.getMessage());
LOG.error("Unexpected exception when scheduling move " + policy + " '" + file + "'.", t);
return ScheduleResult.FAIL;
}
}
@Override
public void onActionFinished(CmdletInfo cmdletInfo, ActionInfo actionInfo,
int actionIndex) {
fileLock.remove(actionInfo.getArgs().get(HdfsAction.FILE_PATH));
}
private class UpdateClusterInfoTask implements Runnable {
private DatanodeStorageReportProcTask task;
public UpdateClusterInfoTask(DatanodeStorageReportProcTask task) {
this.task = task;
}
@Override
public void run() {
try {
task.run();
planMaker.updateClusterInfo(task.getStorages(), task.getNetworkTopology());
} catch (Throwable t) {
LOG.warn("Exception when updating cluster info ", t);
}
}
}
}
| java | Apache-2.0 | e0c90f054687a18c4e095547ac5e31b8b313b3ef | 2026-01-05T02:41:11.405497Z | false |
Intel-bigdata/SSM | https://github.com/Intel-bigdata/SSM/blob/e0c90f054687a18c4e095547ac5e31b8b313b3ef/smart-hadoop-support/smart-hadoop/src/main/java/org/smartdata/hdfs/scheduler/CopyScheduler.java | smart-hadoop-support/smart-hadoop/src/main/java/org/smartdata/hdfs/scheduler/CopyScheduler.java | /**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.smartdata.hdfs.scheduler;
import com.google.common.annotations.VisibleForTesting;
import com.google.common.util.concurrent.RateLimiter;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hdfs.DFSConfigKeys;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.smartdata.SmartContext;
import org.smartdata.action.SyncAction;
import org.smartdata.conf.SmartConfKeys;
import org.smartdata.hdfs.action.HdfsAction;
import org.smartdata.metastore.MetaStore;
import org.smartdata.metastore.MetaStoreException;
import org.smartdata.model.*;
import org.smartdata.model.action.ScheduleResult;
import org.smartdata.protocol.message.LaunchCmdlet;
import java.io.IOException;
import java.net.URI;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Collections;
import java.util.HashMap;
import java.util.Iterator;
import java.util.LinkedList;
import java.util.List;
import java.util.Map;
import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.CopyOnWriteArrayList;
import java.util.concurrent.Executors;
import java.util.concurrent.ScheduledExecutorService;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.atomic.AtomicInteger;
import static org.smartdata.SmartConstants.DISTRIBUTED_FILE_SYSTEM;
import static org.smartdata.SmartConstants.FS_HDFS_IMPL;
import static org.smartdata.SmartConstants.SMART_FILE_SYSTEM;
public class CopyScheduler extends ActionSchedulerService {
static final Logger LOG =
LoggerFactory.getLogger(CopyScheduler.class);
private static final List<String> actions = Collections.singletonList("sync");
private MetaStore metaStore;
// Fixed rate scheduler
private ScheduledExecutorService executorService;
// Global variables
private Configuration conf;
// <File path, file diff id>
private Map<String, Long> fileLock;
// <actionId, file diff id>
private Map<Long, Long> actionDiffMap;
// <File path, FileChain object>
private Map<String, ScheduleTask.FileChain> fileDiffChainMap;
// <did, Fail times>
private Map<Long, Integer> fileDiffMap;
// BaseSync queue
private Map<String, String> baseSyncQueue;
private Map<String, Boolean> overwriteQueue;
// Merge append length threshold
private long mergeLenTh = DFSConfigKeys.DFS_BLOCK_SIZE_DEFAULT * 3;
// Merge count length threshold
private long mergeCountTh = 10;
private int retryTh = 3;
// Check interval of executorService
private long checkInterval;
// Base sync batch insert size
private int batchSize = 500;
// Cache of the file_diff
private Map<Long, FileDiff> fileDiffCache;
// cache sync threshold, default 100
private int cacheSyncTh = 100;
// record the file_diff whether being changed
private Map<Long, Boolean> fileDiffCacheChanged;
// throttle for copy action
private long throttleInMb;
private RateLimiter rateLimiter = null;
// records the number of file diffs in useless states
private AtomicInteger numFileDiffUseless = new AtomicInteger(0);
// record the file diff info in order for check use
private List<FileDiff> fileDiffArchive;
public static final int fileDiffArchiveSize = 1000;
public CopyScheduler(SmartContext context, MetaStore metaStore) {
super(context, metaStore);
this.metaStore = metaStore;
this.fileLock = new ConcurrentHashMap<>();
this.actionDiffMap = new ConcurrentHashMap<>();
this.fileDiffChainMap = new ConcurrentHashMap<>();
this.fileDiffMap = new ConcurrentHashMap<>();
this.baseSyncQueue = new ConcurrentHashMap<>();
this.overwriteQueue = new ConcurrentHashMap<>();
this.executorService = Executors.newScheduledThreadPool(2);
this.fileDiffCache = new ConcurrentHashMap<>();
this.fileDiffCacheChanged = new ConcurrentHashMap<>();
// Get conf or new default conf
try {
conf = getContext().getConf();
} catch (NullPointerException e) {
// SmartContext is empty
conf = new Configuration();
}
// Conf related parameters
cacheSyncTh = conf.getInt(SmartConfKeys
.SMART_COPY_SCHEDULER_BASE_SYNC_BATCH,
SmartConfKeys.SMART_COPY_SCHEDULER_BASE_SYNC_BATCH_DEFAULT);
checkInterval = conf.getLong(SmartConfKeys.SMART_COPY_SCHEDULER_CHECK_INTERVAL,
SmartConfKeys.SMART_COPY_SCHEDULER_CHECK_INTERVAL_DEFAULT);
throttleInMb = conf.getLong(SmartConfKeys.SMART_ACTION_COPY_THROTTLE_MB_KEY,
SmartConfKeys.SMART_ACTION_COPY_THROTTLE_MB_DEFAULT);
if (throttleInMb > 0) {
rateLimiter = RateLimiter.create(throttleInMb);
}
try {
this.numFileDiffUseless.addAndGet(metaStore.getUselessFileDiffNum());
} catch (MetaStoreException e) {
LOG.error("Failed to get num of useless file diffs!");
}
this.fileDiffArchive = new CopyOnWriteArrayList<>();
}
@Override
public ScheduleResult onSchedule(CmdletInfo cmdletInfo, ActionInfo actionInfo,
LaunchCmdlet cmdlet, LaunchAction action, int actionIndex) {
if (!actionInfo.getActionName().equals("sync")) {
return ScheduleResult.FAIL;
}
String srcDir = action.getArgs().get(SyncAction.SRC);
String path = action.getArgs().get(HdfsAction.FILE_PATH);
String destDir = action.getArgs().get(SyncAction.DEST);
String destPath = path.replaceFirst(srcDir, destDir);
// Check again to avoid corner cases
long did = fileDiffChainMap.get(path).getHead();
if (did == -1) {
// FileChain is already empty
return ScheduleResult.FAIL;
}
FileDiff fileDiff = fileDiffCache.get(did);
if (fileDiff == null) {
return ScheduleResult.FAIL;
}
if (fileDiff.getState() != FileDiffState.PENDING) {
// If file diff is applied or failed
fileDiffChainMap.get(path).removeHead();
fileLock.remove(path);
return ScheduleResult.FAIL;
}
// wait dependent file diff
if (requireWait(fileDiff)) {
return ScheduleResult.RETRY;
}
// Check whether src is compressed, if so, the original length of syncing file should be used.
// Otherwise, only partial compressed file is copied. Using HDFS copy cmd or SSM copy action
// will not have such issue, since file length is obtained from SmartDFSClient in that case,
// where original length is acquired. For copying or syncing a compressed file, the backup
// file will not be compressed.
try {
FileState fileState = metaStore.getFileState(fileDiff.getSrc());
if (fileState instanceof CompressionFileState &&
fileDiff.getParameters().get("-length") != null) {
Long length = ((CompressionFileState) fileState).getOriginalLength();
fileDiff.getParameters().put("-length", length.toString());
}
} catch (MetaStoreException e) {
LOG.error("Failed to get FileState, the syncing file's length may be " +
"incorrect if it is compressed", e);
}
switch (fileDiff.getDiffType()) {
case APPEND:
action.setActionType("copy");
action.getArgs().put("-dest", destPath);
if (rateLimiter != null) {
String strLen = fileDiff.getParameters().get("-length");
if (strLen != null) {
int appendLen = (int)(Long.valueOf(strLen) >> 20);
if (appendLen > 0) {
if (!rateLimiter.tryAcquire(appendLen)) {
if (LOG.isDebugEnabled()) {
LOG.debug("Cancel Scheduling COPY action {} due to throttling.", actionInfo);
}
return ScheduleResult.RETRY;
}
}
}
}
break;
case DELETE:
action.setActionType("delete");
action.getArgs().put(HdfsAction.FILE_PATH, destPath);
break;
case RENAME:
action.setActionType("rename");
action.getArgs().put(HdfsAction.FILE_PATH, destPath);
// TODO scope check
String remoteDest = fileDiff.getParameters().get("-dest");
action.getArgs().put("-dest", remoteDest.replaceFirst(srcDir, destDir));
fileDiff.getParameters().remove("-dest");
break;
case METADATA:
action.setActionType("metadata");
action.getArgs().put(HdfsAction.FILE_PATH, destPath);
break;
default:
break;
}
// Put all parameters into args
action.getArgs().putAll(fileDiff.getParameters());
actionDiffMap.put(actionInfo.getActionId(), did);
if (!fileDiffMap.containsKey(did)) {
fileDiffMap.put(did, 1);
}
return ScheduleResult.SUCCESS;
}
@Override
public List<String> getSupportedActions() {
return actions;
}
private boolean isFileLocked(String path) {
if(fileLock.size() == 0) {
LOG.debug("File Lock is empty. Current path = {}", path);
}
if (fileLock.containsKey(path)) {
// File is locked
return true;
}
if (baseSyncQueue.containsKey(path)) {
// File is in base sync queue
return true;
}
if (!fileDiffChainMap.containsKey(path)) {
// File Chain is not ready
return true;
}
if (fileDiffChainMap.get(path).size() == 0) {
// File Chain is empty
return true;
}
return false;
}
public boolean requireWait(FileDiff fileDiff) {
for (FileDiff archiveDiff : fileDiffArchive) {
if (fileDiff.getDiffId() == archiveDiff.getDiffId()) {
break;
}
if (!FileDiffState.isTerminalState(archiveDiff.getState())) {
String fileDiffPath = fileDiff.getSrc().endsWith("/") ?
fileDiff.getSrc() : fileDiff.getSrc() + "/";
String archiveDiffPath = archiveDiff.getSrc().endsWith("/") ?
archiveDiff.getSrc() : archiveDiff.getSrc() + "/";
if (fileDiffPath.startsWith(archiveDiffPath) || archiveDiffPath.startsWith(fileDiffPath)) {
return true;
}
}
}
return false;
}
@Override
public boolean onSubmit(CmdletInfo cmdletInfo, ActionInfo actionInfo, int actionIndex)
throws IOException {
// check args
if (actionInfo.getArgs() == null) {
throw new IOException("No arguments for the action");
}
String path = actionInfo.getArgs().get(HdfsAction.FILE_PATH);
LOG.debug("Submit file {} with lock {}", path, fileLock.keySet());
// If locked then false
if (!isFileLocked(path)) {
// Lock this file/chain to avoid conflict
fileLock.put(path, 0L);
return true;
}
throw new IOException("The submit file " + path + " is in use by another program or user");
}
@Override
public void onActionFinished(CmdletInfo cmdletInfo, ActionInfo actionInfo, int actionIndex) {
// Remove lock
FileDiff fileDiff = null;
if (actionInfo.isFinished()) {
try {
long did = actionDiffMap.get(actionInfo.getActionId());
// Remove for action diff map
if (actionDiffMap.containsKey(actionInfo.getActionId())) {
actionDiffMap.remove(actionInfo.getActionId());
}
if (fileDiffCache.containsKey(did)) {
fileDiff = fileDiffCache.get(did);
} else {
LOG.error("Duplicate sync action->[ {} ] is triggered", did);
return;
}
if (fileDiff == null) {
return;
}
if (actionInfo.isSuccessful()) {
fileDiffTerminated(fileDiff);
//update state in cache
updateFileDiffInCache(did, FileDiffState.APPLIED);
} else {
if (fileDiffMap.containsKey(did)) {
int curr = fileDiffMap.get(did);
if (curr >= retryTh) {
fileDiffTerminated(fileDiff);
//update state in cache
updateFileDiffInCache(did, FileDiffState.FAILED);
} else {
fileDiffMap.put(did, curr + 1);
// Unlock this file for retry
fileLock.remove(fileDiff.getSrc());
}
} else {
fileDiffTerminated(fileDiff);
updateFileDiffInCache(did, FileDiffState.FAILED);
}
}
} catch (MetaStoreException e) {
LOG.error("Mark sync action in metastore failed!", e);
} catch (Exception e) {
LOG.error("Sync action error", e);
}
}
}
public void fileDiffTerminated(FileDiff fileDiff) {
if (fileDiffChainMap.containsKey(fileDiff.getSrc())) {
// Remove chain top
fileDiffChainMap.get(fileDiff.getSrc()).removeHead();
}
// remove from fileDiffMap which is for retry use
if (fileDiffMap.containsKey(fileDiff.getDiffId())) {
fileDiffMap.remove(fileDiff.getDiffId());
}
}
public void fileDiffTerminatedInternal(FileDiff fileDiff) {
if (fileDiffChainMap.containsKey(fileDiff.getSrc())) {
// Remove the fileDiff from chain
fileDiffChainMap.get(fileDiff.getSrc()).removeFromChain(fileDiff);
}
// remove from fileDiffMap which is for retry use
if (fileDiffMap.containsKey(fileDiff.getDiffId())) {
fileDiffMap.remove(fileDiff.getDiffId());
}
}
private void batchDirectSync() throws MetaStoreException {
// Use 90% of check interval to batchSync
if (baseSyncQueue.size() == 0) {
return;
}
LOG.debug("Base Sync size = {}", baseSyncQueue.size());
List<FileDiff> batchFileDiffs = new ArrayList<>();
List<String> removed = new ArrayList<>();
FileDiff fileDiff;
int index = 0;
for (Iterator<Map.Entry<String, String>> it =
baseSyncQueue.entrySet().iterator(); it.hasNext(); ) {
if (index >= batchSize) {
break;
}
Map.Entry<String, String> entry = it.next();
fileDiff = directSync(entry.getKey(), entry.getValue());
if (fileDiff != null) {
batchFileDiffs.add(fileDiff);
}
removed.add(entry.getKey());
index++;
}
// Batch Insert
Long dids[] = metaStore.insertFileDiffs(batchFileDiffs);
for (int i = 0; i < dids.length; i++) {
batchFileDiffs.get(i).setDiffId(dids[i]);
}
fileDiffArchive.addAll(batchFileDiffs);
// Remove from baseSyncQueue
for (String src : removed) {
baseSyncQueue.remove(src);
}
}
private FileStatus[] listFileStatusesOfDirs(String dirName) {
FileSystem fs = null;
FileStatus[] tmpFileStatus = null;
List<FileStatus> returnStatus = new LinkedList<>();
try {
// We simply use local HDFS conf for getting remote file system.
// The smart file system configured for local HDFS should not be
// introduced to remote file system.
Configuration remoteConf = new Configuration(conf);
if (remoteConf.get(FS_HDFS_IMPL, "").equals(
SMART_FILE_SYSTEM)) {
remoteConf.set(FS_HDFS_IMPL, DISTRIBUTED_FILE_SYSTEM);
}
fs = FileSystem.get(URI.create(dirName), remoteConf);
tmpFileStatus = fs.listStatus(new Path(dirName));
for (FileStatus fileStatus : tmpFileStatus) {
if (!fileStatus.isDirectory()) {
returnStatus.add(fileStatus);
} else {
//all the file in this fileStatuses
FileStatus[] childFileStatuses = listFileStatusesOfDirs(fileStatus.getPath().getName());
if (childFileStatuses.length != 0) {
returnStatus.addAll(Arrays.asList(childFileStatuses));
}
}
}
} catch (IOException e) {
LOG.debug("Fetch remote file list error!", e);
}
if (returnStatus.size() == 0) {
return new FileStatus[0];
}
return returnStatus.toArray(new FileStatus[returnStatus.size()]);
}
private void baseSync(String srcDir,
String destDir) throws MetaStoreException {
List<FileInfo> srcFiles = metaStore.getFilesByPrefix(srcDir);
if (srcFiles.size() > 0) {
LOG.info("Directory Base Sync {} files", srcFiles.size());
}
// <file name, fileInfo>
Map<String, FileInfo> srcFileSet = new HashMap<>();
for (FileInfo fileInfo : srcFiles) {
// Remove prefix/parent
srcFileSet.put(fileInfo.getPath().replaceFirst(srcDir, ""), fileInfo);
}
FileStatus[] fileStatuses = null;
// recursively file lists
fileStatuses = listFileStatusesOfDirs(destDir);
if (fileStatuses.length == 0) {
LOG.debug("Remote directory is empty!");
} else {
LOG.debug("Remote directory contains {} files!", fileStatuses.length);
for (FileStatus fileStatus : fileStatuses) {
// only get file name
String destName = fileStatus.getPath().getName();
if (srcFileSet.containsKey(destName)) {
FileInfo fileInfo = srcFileSet.get(destName);
String src = fileInfo.getPath();
String dest = src.replaceFirst(srcDir, destDir);
baseSyncQueue.put(src, dest);
srcFileSet.remove(destName);
}
}
}
LOG.debug("Directory Base Sync {} files", srcFileSet.size());
for (FileInfo fileInfo : srcFileSet.values()) {
if (fileInfo.isdir()) {
// Ignore directory
continue;
}
String src = fileInfo.getPath();
String dest = src.replaceFirst(srcDir, destDir);
baseSyncQueue.put(src, dest);
overwriteQueue.put(src, true);
// directSync(src, dest);
}
batchDirectSync();
}
private FileDiff directSync(String src, String dest) throws MetaStoreException {
FileInfo fileInfo = metaStore.getFile(src);
if (fileInfo == null) {
// Primary file doesn't exist
return null;
}
if (fileLock.containsKey(src)) {
// File is syncing
return null;
}
// Lock file to avoid diff apply
fileLock.put(src, 0L);
// Mark all related diff in cache as Merged
if (fileDiffChainMap.containsKey(src)) {
fileDiffChainMap.get(src).markAllDiffs();
fileDiffChainMap.remove(src);
pushCacheToDB();
}
// Mark all related diff in metastore as Merged
List<FileDiff> fileDiffs = metaStore.getFileDiffsByFileName(src);
List<Long> dids = new ArrayList<>();
for (FileDiff fileDiff : fileDiffs) {
if (fileDiff.getState() == FileDiffState.PENDING) {
dids.add(fileDiff.getDiffId());
}
}
metaStore.batchUpdateFileDiff(dids, FileDiffState.MERGED);
for (long did : dids) {
updateFileDiffArchive(did, FileDiffState.MERGED);
}
// Unlock this file
fileLock.remove(src);
// Generate a new file diff
FileDiff fileDiff;
long offSet;
if (overwriteQueue.containsKey(src)) {
offSet = -1;
overwriteQueue.remove(src);
} else {
offSet = fileCompare(fileInfo, dest);
}
if (offSet == -1) {
// Remote file does not exist
offSet = 0;
} else if (offSet == fileInfo.getLength()) {
LOG.debug("Primary len={}, remote len={}", fileInfo.getLength(), offSet);
return null;
} else if (offSet > fileInfo.getLength()) {
// Remove dirty remote file
fileDiff = new FileDiff(FileDiffType.DELETE, FileDiffState.PENDING);
fileDiff.setSrc(src);
metaStore.insertFileDiff(fileDiff);
offSet = 0;
}
// Copy tails to remote
fileDiff = new FileDiff(FileDiffType.APPEND, FileDiffState.PENDING);
fileDiff.setSrc(src);
// Append changes to remote files
fileDiff.getParameters()
.put("-length", String.valueOf(fileInfo.getLength() - offSet));
fileDiff.getParameters().put("-offset", String.valueOf(offSet));
fileDiff.setRuleId(-1);
return fileDiff;
}
private long fileCompare(FileInfo fileInfo,
String dest) throws MetaStoreException {
// Primary
long localLen = fileInfo.getLength();
// Get InputStream from URL
FileSystem fs = null;
// Get file statue from remote HDFS
try {
fs = FileSystem.get(URI.create(dest), conf);
FileStatus fileStatus = fs.getFileStatus(new Path(dest));
long remoteLen = fileStatus.getLen();
// TODO Add Checksum check
// Remote
return remoteLen;
} catch (IOException e) {
return -1;
}
}
/***
* add fileDiff to Cache, if diff is already in cache, then print error log
* @param fileDiff
* @throws MetaStoreException
*/
private void addDiffToCache(FileDiff fileDiff) throws MetaStoreException {
LOG.debug("Add FileDiff Cache into file_diff cache");
if (fileDiffCache.containsKey(fileDiff.getDiffId())) {
LOG.error("FileDiff {} already in cache!", fileDiff);
return;
}
fileDiffCache.put(fileDiff.getDiffId(), fileDiff);
}
private synchronized void updateFileDiffInCache(Long did,
FileDiffState fileDiffState) throws MetaStoreException {
LOG.debug("Update FileDiff");
if (!fileDiffCache.containsKey(did)) {
return;
}
FileDiff fileDiff = fileDiffCache.get(did);
fileDiff.setState(fileDiffState);
// Update
fileDiffCacheChanged.put(did, true);
fileDiffCache.put(did, fileDiff);
updateFileDiffArchive(did, fileDiffState);
if (fileDiffCacheChanged.size() >= cacheSyncTh) {
// update
pushCacheToDB();
}
if (FileDiffState.isUselessFileDiff(fileDiffState)) {
numFileDiffUseless.incrementAndGet();
}
}
private synchronized void updateFileDiffArchive(long did, FileDiffState state) {
for (FileDiff diff : fileDiffArchive) {
if (diff.getDiffId() == did) {
diff.setState(state);
}
}
}
/***
* delete cache and remove file lock if necessary
* @param did
*/
private void deleteDiffInCache(Long did) {
LOG.debug("Delete FileDiff in cache");
if (fileDiffCache.containsKey(did)) {
FileDiff fileDiff = fileDiffCache.get(did);
fileDiffCache.remove(did);
fileDiffCacheChanged.remove(did);
// Remove file lock
if (fileLock.containsKey(fileDiff.getSrc())) {
fileLock.remove(fileDiff.getSrc());
}
}
}
private synchronized void pushCacheToDB() throws MetaStoreException {
List<FileDiff> updatedFileDiffs = new ArrayList<>();
List<Long> needDel = new ArrayList<>();
FileDiff fileDiff;
// Only check changed cache rather than full cache
for (Long did: fileDiffCacheChanged.keySet()) {
fileDiff = fileDiffCache.get(did);
if (fileDiff == null) {
needDel.add(did);
continue;
}
updatedFileDiffs.add(fileDiff);
if (FileDiffState.isTerminalState(fileDiff.getState())) {
needDel.add(did);
}
}
// Push cache to metastore
if (updatedFileDiffs.size() != 0) {
LOG.debug("Push FileDiff from cache to metastore");
metaStore.updateFileDiff(updatedFileDiffs);
}
// Remove file diffs in cache and file lock
for (long did : needDel) {
deleteDiffInCache(did);
}
}
@Override
public void init() throws IOException {
}
@Override
public void start() throws IOException {
executorService.scheduleAtFixedRate(
new CopyScheduler.ScheduleTask(), 0, checkInterval,
TimeUnit.MILLISECONDS);
// The PurgeFileDiffTask runs in the period of 1800s
executorService.scheduleAtFixedRate(
new PurgeFileDiffTask(conf), 0, 1800, TimeUnit.SECONDS);
}
@Override
public void stop() throws IOException {
try {
batchDirectSync();
} catch (MetaStoreException e) {
throw new IOException(e);
}
executorService.shutdown();
}
private boolean fileExistOnStandby(String filePath) {
// TODO Need to be more general to handle failure
try {
// Check if file exists at standby cluster
FileSystem fs = FileSystem.get(URI.create(filePath), conf);
return fs.exists(new Path(filePath));
} catch (IOException e) {
LOG.debug("Fetch remote file status fails!", e);
return false;
}
}
private class ScheduleTask implements Runnable {
private void syncFileDiff() {
List<FileDiff> pendingDiffs = null;
try {
pushCacheToDB();
pendingDiffs = metaStore.getPendingDiff();
diffPreProcessing(pendingDiffs);
} catch (MetaStoreException e) {
LOG.error("Sync fileDiffs error", e);
}
}
private void diffPreProcessing(
List<FileDiff> fileDiffs) throws MetaStoreException {
for (FileDiff fileDiff: fileDiffs) {
addToFileDiffArchive(fileDiff);
}
// Merge all existing fileDiffs into fileChains
LOG.debug("Size of Pending diffs {}", fileDiffs.size());
if (fileDiffs.size() == 0 && baseSyncQueue.size() == 0) {
LOG.debug("All Backup directories are synced");
return;
}
for (FileDiff fileDiff : fileDiffs) {
if (fileDiff.getDiffType() == FileDiffType.BASESYNC) {
metaStore.updateFileDiff(fileDiff.getDiffId(), FileDiffState.MERGED);
updateFileDiffArchive(fileDiff.getDiffId(), FileDiffState.MERGED);
baseSync(fileDiff.getSrc(), fileDiff.getParameters().get("-dest"));
return;
}
FileChain fileChain;
String src = fileDiff.getSrc();
// Skip diff in cache
if (fileDiffCache.containsKey(fileDiff.getDiffId())) {
continue;
}
if (baseSyncQueue.containsKey(fileDiff.getSrc())) {
// Will be directly sync
continue;
}
// Get or create fileChain
if (fileDiffChainMap.containsKey(src)) {
fileChain = fileDiffChainMap.get(src);
} else {
fileChain = new FileChain(src);
fileDiffChainMap.put(src, fileChain);
}
fileChain.addToChain(fileDiff);
}
}
private void addToFileDiffArchive(FileDiff newFileDiff) {
for (FileDiff fileDiff: fileDiffArchive) {
if (fileDiff.getDiffId() == newFileDiff.getDiffId()) {
return;
}
}
fileDiffArchive.add(newFileDiff);
int index = 0;
while (fileDiffArchive.size() > fileDiffArchiveSize && index < fileDiffArchiveSize) {
if (FileDiffState.isTerminalState(fileDiffArchive.get(index).getState())) {
fileDiffArchive.remove(index);
continue;
}
index++;
}
}
@Override
public void run() {
try {
batchDirectSync();
syncFileDiff();
// addToRunning();
} catch (Exception e) {
LOG.error("CopyScheduler Run Error", e);
}
}
private class FileChain {
// Current append length in chain
private long currAppendLength;
// Current file path/name
private String filePath;
// file diff id
private List<Long> diffChain;
// append file diff id
private List<Long> appendChain;
// file name change trace
private List<String> nameChain;
FileChain() {
this.diffChain = new ArrayList<>();
this.appendChain = new ArrayList<>();
this.nameChain = new ArrayList<>();
this.currAppendLength = 0;
}
FileChain(String filePath) {
this();
this.filePath = filePath;
this.nameChain.add(filePath);
}
public String getFilePath() {
return filePath;
}
public void setFilePath(String filePath) {
this.filePath = filePath;
}
public List<Long> getDiffChain() {
return diffChain;
}
public void setDiffChain(List<Long> diffChain) {
this.diffChain = diffChain;
}
public int size() {
return diffChain.size();
}
void addToChain(FileDiff fileDiff) throws MetaStoreException {
addDiffToCache(fileDiff);
long did = fileDiff.getDiffId();
if (fileDiff.getDiffType() == FileDiffType.APPEND) {
String offset = fileDiff.getParameters().get("-offset");
if (offset != null && offset.equals("0") && diffChain.size() != 0) {
markAllDiffs();
}
if (currAppendLength >= mergeLenTh ||
appendChain.size() >= mergeCountTh) {
mergeAppend();
}
// Add Append to Append Chain
appendChain.add(did);
// Increase Append length
currAppendLength +=
Long.valueOf(fileDiff.getParameters().get("-length"));
diffChain.add(did);
} else if (fileDiff.getDiffType() == FileDiffType.RENAME) {
if (isRenameSyncedFile(fileDiff)) {
// Add New Name to Name Chain
mergeRename(fileDiff);
} else {
fileDiffTerminatedInternal(fileDiff);
// discard rename file diff due to not synced
updateFileDiffInCache(fileDiff.getDiffId(), FileDiffState.FAILED);
discardDirtyData(fileDiff);
}
} else if (fileDiff.getDiffType() == FileDiffType.DELETE) {
mergeDelete(fileDiff);
} else {
// Metadata
diffChain.add(did);
}
}
void discardDirtyData(FileDiff fileDiff) throws MetaStoreException {
// Clean dirty data
List<BackUpInfo> backUpInfos = metaStore.getBackUpInfoBySrc(fileDiff.getSrc());
for (BackUpInfo backUpInfo : backUpInfos) {
FileDiff deleteFileDiff = new FileDiff(FileDiffType.DELETE, FileDiffState.PENDING);
// use the rename file diff's src as delete file diff src
deleteFileDiff.setSrc(fileDiff.getSrc());
String destPath = deleteFileDiff.getSrc().replaceFirst(backUpInfo.getSrc(), backUpInfo.getDest());
//put sync's dest path in parameter for delete use
deleteFileDiff.getParameters().put("-dest", destPath);
long did = metaStore.insertFileDiff(deleteFileDiff);
deleteFileDiff.setDiffId(did);
fileDiffArchive.add(deleteFileDiff);
}
}
@VisibleForTesting
void mergeAppend() throws MetaStoreException {
if (fileLock.containsKey(filePath)) {
return;
}
LOG.debug("Append Merge Triggered!");
// Lock file to avoid File Chain being processed
fileLock.put(filePath, -1L);
long offset = Integer.MAX_VALUE;
long totalLength = 0;
long lastAppend = -1;
for (long did : appendChain) {
FileDiff fileDiff = fileDiffCache.get(did);
if (fileDiff != null && fileDiff.getState() != FileDiffState.APPLIED) {
long currOffset =
Long.valueOf(fileDiff.getParameters().get("-offset"));
if (offset > currOffset) {
offset = currOffset;
}
if (currOffset != offset && currOffset != totalLength + offset) {
// Check offset and length to avoid dirty append
break;
}
updateFileDiffInCache(did, FileDiffState.APPLIED);
// Add current file length to length
totalLength +=
Long.valueOf(fileDiff.getParameters().get("-length"));
lastAppend = did;
}
}
if (lastAppend == -1) {
return;
}
FileDiff fileDiff = fileDiffCache.get(lastAppend);
fileDiff.getParameters().put("-offset", "" + offset);
fileDiff.getParameters().put("-length", "" + totalLength);
// Update fileDiff in metastore
fileDiffCacheChanged.put(fileDiff.getDiffId(), true);
// Unlock file
fileLock.remove(filePath);
currAppendLength = 0;
appendChain.clear();
}
@VisibleForTesting
void mergeDelete(FileDiff fileDiff) throws MetaStoreException {
// LOG.debug("Delete Merge Triggered!");
// for (long did : appendChain) {
// FileDiff diff = fileDiffCache.get(did);
// fileDiffTerminatedInternal(diff);
// updateFileDiffInCache(did, FileDiffState.APPLIED);
// }
// appendChain.clear();
for (FileDiff archiveDiff : fileDiffArchive) {
if (archiveDiff.getDiffId() == fileDiff.getDiffId()) {
break;
}
if (FileDiffState.isTerminalState(archiveDiff.getState())) {
continue;
}
| java | Apache-2.0 | e0c90f054687a18c4e095547ac5e31b8b313b3ef | 2026-01-05T02:41:11.405497Z | true |
Intel-bigdata/SSM | https://github.com/Intel-bigdata/SSM/blob/e0c90f054687a18c4e095547ac5e31b8b313b3ef/smart-hadoop-support/smart-hadoop/src/main/java/org/smartdata/hdfs/scheduler/CopyTargetTask.java | smart-hadoop-support/smart-hadoop/src/main/java/org/smartdata/hdfs/scheduler/CopyTargetTask.java | /**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.smartdata.hdfs.scheduler;
public class CopyTargetTask {
private String dest;
private String source;
private long offset;
private long length;
public CopyTargetTask(String dest, String source, long offset, long length) {
this.dest = dest;
this.source = source;
this.offset = offset;
this.length = length;
}
public void setSource(String source) {
this.source = source;
}
public void setDest(String dest) {
this.dest = dest;
}
public void setOffset(long offset) {
this.offset = offset;
}
public void setLength(long length) {
this.length = length;
}
public String getDest() {
return dest;
}
public long getOffset() {
return offset;
}
public long getLength() {
return length;
}
public String getSource() {
return source;
}
@Override
public String toString() {
return "CopyTargetTask{" +
"dest='" + dest + '\'' +
", source='" + source + '\'' +
", offset=" + offset +
", length=" + length +
'}';
}
}
| java | Apache-2.0 | e0c90f054687a18c4e095547ac5e31b8b313b3ef | 2026-01-05T02:41:11.405497Z | false |
Intel-bigdata/SSM | https://github.com/Intel-bigdata/SSM/blob/e0c90f054687a18c4e095547ac5e31b8b313b3ef/smart-hadoop-support/smart-hadoop/src/main/java/org/smartdata/hdfs/scheduler/CompressionScheduler.java | smart-hadoop-support/smart-hadoop/src/main/java/org/smartdata/hdfs/scheduler/CompressionScheduler.java | /**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.smartdata.hdfs.scheduler;
import com.google.gson.Gson;
import com.google.gson.reflect.TypeToken;
import org.apache.hadoop.hdfs.DFSClient;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.smartdata.SmartContext;
import org.smartdata.action.annotation.ActionSignature;
import org.smartdata.conf.SmartConf;
import org.smartdata.conf.SmartConfKeys;
import org.smartdata.hdfs.HadoopUtil;
import org.smartdata.hdfs.action.CompressionAction;
import org.smartdata.hdfs.action.HdfsAction;
import org.smartdata.hdfs.action.DecompressionAction;
import org.smartdata.metastore.MetaStore;
import org.smartdata.metastore.MetaStoreException;
import org.smartdata.model.ActionInfo;
import org.smartdata.model.CmdletInfo;
import org.smartdata.model.CompressionFileInfo;
import org.smartdata.model.FileState;
import org.smartdata.model.CompressionFileState;
import org.smartdata.model.LaunchAction;
import org.smartdata.model.action.ScheduleResult;
import org.smartdata.protocol.message.LaunchCmdlet;
import java.io.File;
import java.io.IOException;
import java.net.URI;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.HashSet;
import java.util.List;
import java.util.Set;
import static org.smartdata.model.ActionInfo.OLD_FILE_ID;
/**
* A scheduler for compression/decompression action.
*/
public class CompressionScheduler extends ActionSchedulerService {
private DFSClient dfsClient;
private MetaStore metaStore;
public static final String COMPRESSION_ACTION_ID =
CompressionAction.class.getAnnotation(ActionSignature.class).actionId();
public static final String DECOMPRESSION_ACTION_ID =
DecompressionAction.class.getAnnotation(ActionSignature.class).actionId();
public static final List<String> actions = Arrays.asList(COMPRESSION_ACTION_ID,
DECOMPRESSION_ACTION_ID);
public static String COMPRESS_DIR;
public static final String COMPRESS_TMP = CompressionAction.COMPRESS_TMP;
public static final String COMPRESS_TMP_DIR = "compress_tmp/";
private SmartConf conf;
private Set<String> fileLock;
public static final Logger LOG =
LoggerFactory.getLogger(CompressionScheduler.class);
public CompressionScheduler(SmartContext context, MetaStore metaStore)
throws IOException {
super(context, metaStore);
this.conf = context.getConf();
this.metaStore = metaStore;
String ssmWorkDir = conf.get(
SmartConfKeys.SMART_WORK_DIR_KEY, SmartConfKeys.SMART_WORK_DIR_DEFAULT);
CompressionScheduler.COMPRESS_DIR =
new File(ssmWorkDir, COMPRESS_TMP_DIR).getAbsolutePath();
this.fileLock = new HashSet<>();
}
@Override
public void init() throws IOException {
try {
final URI nnUri = HadoopUtil.getNameNodeUri(getContext().getConf());
dfsClient = HadoopUtil.getDFSClient(nnUri, getContext().getConf());
} catch (IOException e) {
LOG.warn("Failed to create dfsClient.");
}
}
@Override
public void start() throws IOException {
}
@Override
public void stop() throws IOException {
}
@Override
public void recover(ActionInfo actionInfo) {
if (!actionInfo.getActionName().equals(COMPRESSION_ACTION_ID) &&
!actionInfo.getActionName().equals(DECOMPRESSION_ACTION_ID)) {
return;
}
String filePath = actionInfo.getArgs().get(HdfsAction.FILE_PATH);
fileLock.add(filePath);
}
@Override
public List<String> getSupportedActions() {
return actions;
}
/**
* Check if the file type support compression action.
*
* @param path
* @return true if the file supports compression action, else false
*/
public boolean supportCompression(String path) throws MetaStoreException, IOException {
if (path == null) {
LOG.warn("File path is not specified.");
return false;
}
if (dfsClient.getFileInfo(path).isDir()) {
LOG.warn("Compression is not applicable to a directory.");
return false;
}
// Current implementation: only normal file type supports compression action
FileState fileState = metaStore.getFileState(path);
if (fileState.getFileType().equals(FileState.FileType.NORMAL)
&& fileState.getFileStage().equals(FileState.FileStage.DONE)) {
return true;
}
LOG.debug("File " + path + " doesn't support compression action. "
+ "Type: " + fileState.getFileType() + "; Stage: " + fileState.getFileStage());
return false;
}
public boolean supportDecompression(String path) throws MetaStoreException, IOException {
if (path == null) {
LOG.warn("File path is not specified!");
return false;
}
// Exclude directory case
if (dfsClient.getFileInfo(path).isDir()) {
LOG.warn("Decompression is not applicable to a directory.");
return false;
}
FileState fileState = metaStore.getFileState(path);
if (fileState instanceof CompressionFileState) {
return true;
}
LOG.debug("A compressed file path should be given!");
return false;
}
private String createTmpName(LaunchAction action) {
String path = action.getArgs().get(HdfsAction.FILE_PATH);
String fileName;
int index = path.lastIndexOf("/");
if (index == path.length() - 1) {
index = path.substring(0, path.length() - 1).indexOf("/");
fileName = path.substring(index + 1, path.length() - 1);
} else {
fileName = path.substring(index + 1, path.length());
}
/**
* The dest tmp file is under COMPRESSION_DIR and
* named by fileName, aidxxx and current time in millisecond with "_" separated
*/
String tmpName = fileName + "_" + "aid" + action.getActionId() +
"_" + System.currentTimeMillis();
return tmpName;
}
@Override
public boolean onSubmit(CmdletInfo cmdletInfo, ActionInfo actionInfo,
int actionIndex) {
String srcPath = actionInfo.getArgs().get(HdfsAction.FILE_PATH);
if (!actions.contains(actionInfo.getActionName())) {
return false;
}
if (fileLock.contains(srcPath)) {
return false;
}
try {
if (actionInfo.getActionName().equals(COMPRESSION_ACTION_ID) &&
!supportCompression(srcPath)) {
return false;
}
if (actionInfo.getActionName().equals(DECOMPRESSION_ACTION_ID) &&
!supportDecompression(srcPath)) {
return false;
}
// TODO remove this part
CompressionFileState fileState = new CompressionFileState(srcPath,
FileState.FileStage.PROCESSING);
metaStore.insertUpdateFileState(fileState);
return true;
} catch (MetaStoreException e) {
LOG.error("Failed to submit action due to metastore exception!", e);
return false;
} catch (IOException e) {
LOG.error(e.getMessage());
return false;
}
}
@Override
public ScheduleResult onSchedule(CmdletInfo cmdletInfo, ActionInfo actionInfo,
LaunchCmdlet cmdlet, LaunchAction action, int actionIndex) {
// For compression, add compressTmp argument. This arg is assigned by CompressionScheduler
// and persisted to MetaStore for easily debugging.
String tmpName = createTmpName(action);
action.getArgs().put(COMPRESS_TMP, new File(COMPRESS_DIR, tmpName).getAbsolutePath());
actionInfo.getArgs().put(COMPRESS_TMP, new File(COMPRESS_DIR, tmpName).getAbsolutePath());
afterSchedule(actionInfo);
return ScheduleResult.SUCCESS;
}
public void afterSchedule(ActionInfo actionInfo) {
String srcPath = actionInfo.getArgs().get(HdfsAction.FILE_PATH);
// lock the file only if ec or unec action is scheduled
fileLock.add(srcPath);
try {
setOldFileId(actionInfo);
} catch (Throwable t) {
// We think it may not be a big issue, so just warn user this issue.
LOG.warn("Failed in maintaining old fid for taking over old data's temperature.");
}
}
/**
* Speculate action status and set result accordingly.
*/
@Override
public boolean isSuccessfulBySpeculation(ActionInfo actionInfo) {
String path = actionInfo.getArgs().get(HdfsAction.FILE_PATH);
try {
FileState fileState = HadoopUtil.getFileState(dfsClient, path);
FileState.FileType fileType = fileState.getFileType();
if (actionInfo.getActionName().equals(DECOMPRESSION_ACTION_ID)) {
return fileType == FileState.FileType.NORMAL;
}
// Recover action result for successful compress action.
if (fileType == FileState.FileType.COMPRESSION) {
CompressionFileInfo compressionFileInfo =
new CompressionFileInfo((CompressionFileState) fileState);
actionInfo.setResult(new Gson().toJson(compressionFileInfo));
return true;
}
return false;
} catch (IOException e) {
LOG.warn("Failed to get file state, suppose this action was not " +
"successfully executed: {}", actionInfo.toString());
return false;
}
}
/**
* Set old file id which will be persisted into DB. For action status
* recovery case, the old file id can be acquired for taking over old file's
* data temperature.
*/
private void setOldFileId(ActionInfo actionInfo) throws IOException {
if (actionInfo.getArgs().get(OLD_FILE_ID) != null &&
!actionInfo.getArgs().get(OLD_FILE_ID).isEmpty()) {
return;
}
List<Long> oids = new ArrayList<>();
String path = actionInfo.getArgs().get(HdfsAction.FILE_PATH);
try {
oids.add(dfsClient.getFileInfo(path).getFileId());
} catch (IOException e) {
LOG.warn("Failed to set old fid for taking over data temperature!");
throw e;
}
actionInfo.setOldFileIds(oids);
}
@Override
public void onActionFinished(CmdletInfo cmdletInfo, ActionInfo actionInfo, int actionIndex) {
if (!actionInfo.isFinished()) {
return;
}
String srcPath = actionInfo.getArgs().get(HdfsAction.FILE_PATH);
try {
// Compression Action failed
if (actionInfo.getActionName().equals(COMPRESSION_ACTION_ID) &&
!actionInfo.isSuccessful()) {
// TODO: refactor FileState in order to revert to original state if action failed
// Currently only converting from normal file to other types is supported, so
// when action failed, just remove the record of this file from metastore.
// In current implementation, no record in FileState table means the file is normal type.
metaStore.deleteFileState(srcPath);
return;
}
// Action execution is successful.
if (actionInfo.getActionName().equals(COMPRESSION_ACTION_ID)) {
onCompressActionFinished(actionInfo);
}
if (actionInfo.getActionName().equals(DECOMPRESSION_ACTION_ID)) {
onDecompressActionFinished(actionInfo);
}
// Take over access count after successful execution.
takeOverAccessCount(actionInfo);
} catch (MetaStoreException e) {
LOG.error("Compression action failed in metastore!", e);
} catch (Exception e) {
LOG.error("Compression action error", e);
} finally {
// Remove the record as long as the action is finished.
fileLock.remove(srcPath);
}
}
/**
* In rename case, the fid of renamed file is not changed. But sometimes, we need
* to keep old file's access count and let new file takes over this metric. E.g.,
* with (un)EC/(de)Compress/(un)Compact action, a new file will overwrite the old file.
*/
public void takeOverAccessCount(ActionInfo actionInfo) {
try {
String filePath = actionInfo.getArgs().get(HdfsAction.FILE_PATH);
long oldFid = actionInfo.getOldFileIds().get(0);
// The new fid may have not been updated in metastore, so
// we get it from dfs client.
long newFid = dfsClient.getFileInfo(filePath).getFileId();
metaStore.updateAccessCountTableFid(oldFid, newFid);
} catch (Exception e) {
LOG.warn("Failed to take over file access count for all tables, " +
"which may make the measurement for data temperature inaccurate!",
e.getMessage());
}
}
private void onCompressActionFinished(ActionInfo actionInfo)
throws MetaStoreException {
if (!actionInfo.getActionName().equals(COMPRESSION_ACTION_ID)) {
return;
}
Gson gson = new Gson();
String compressionInfoJson = actionInfo.getResult();
CompressionFileInfo compressionFileInfo = gson.fromJson(compressionInfoJson,
new TypeToken<CompressionFileInfo>() {
}.getType());
if (compressionFileInfo == null) {
LOG.error("CompressionFileInfo should NOT be null after successful " +
"execution!");
return;
}
CompressionFileState compressionFileState =
compressionFileInfo.getCompressionFileState();
compressionFileState.setFileStage(FileState.FileStage.DONE);
// Update metastore and then replace file with compressed one
metaStore.insertUpdateFileState(compressionFileState);
}
private void onDecompressActionFinished(ActionInfo actionInfo)
throws MetaStoreException {
if (!actionInfo.getActionName().equals(DECOMPRESSION_ACTION_ID)) {
return;
}
// Delete the record from compression_file table
metaStore.deleteFileState(actionInfo.getArgs().get(HdfsAction.FILE_PATH));
}
} | java | Apache-2.0 | e0c90f054687a18c4e095547ac5e31b8b313b3ef | 2026-01-05T02:41:11.405497Z | false |
Intel-bigdata/SSM | https://github.com/Intel-bigdata/SSM/blob/e0c90f054687a18c4e095547ac5e31b8b313b3ef/smart-hadoop-support/smart-hadoop/src/main/java/org/smartdata/hdfs/scheduler/MovePlanStatistics.java | smart-hadoop-support/smart-hadoop/src/main/java/org/smartdata/hdfs/scheduler/MovePlanStatistics.java | /**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.smartdata.hdfs.scheduler;
public class MovePlanStatistics {
private long totalBlocks;
private long totalSize;
public MovePlanStatistics() {
totalBlocks = 0;
totalSize = 0;
}
public MovePlanStatistics(long totalBlocks, long totalSize) {
this.totalBlocks = totalBlocks;
this.totalSize = totalSize;
}
public void increaseTotalBlocks(int numBlocks) {
totalBlocks += numBlocks;
}
public void increaseTotalSize(long size) {
totalSize += size;
}
public long getTotalBlocks() {
return totalBlocks;
}
public long getTotalSize() {
return totalSize;
}
}
| java | Apache-2.0 | e0c90f054687a18c4e095547ac5e31b8b313b3ef | 2026-01-05T02:41:11.405497Z | false |
Intel-bigdata/SSM | https://github.com/Intel-bigdata/SSM/blob/e0c90f054687a18c4e095547ac5e31b8b313b3ef/smart-hadoop-support/smart-hadoop/src/main/java/org/smartdata/hdfs/scheduler/ActionSchedulerService.java | smart-hadoop-support/smart-hadoop/src/main/java/org/smartdata/hdfs/scheduler/ActionSchedulerService.java | /**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.smartdata.hdfs.scheduler;
import org.smartdata.AbstractService;
import org.smartdata.SmartContext;
import org.smartdata.metastore.MetaStore;
import org.smartdata.model.ActionInfo;
import org.smartdata.model.CmdletInfo;
import org.smartdata.model.LaunchAction;
import org.smartdata.model.action.ActionScheduler;
import org.smartdata.model.action.ScheduleResult;
import org.smartdata.protocol.message.LaunchCmdlet;
import java.io.IOException;
public abstract class ActionSchedulerService extends AbstractService implements ActionScheduler {
private MetaStore metaStore;
public ActionSchedulerService(SmartContext context, MetaStore metaStore) {
super(context);
this.metaStore = metaStore;
}
@Override
public boolean onSubmit(CmdletInfo cmdletInfo, ActionInfo actionInfo, int actionIndex)
throws IOException {
return true;
}
@Override
public ScheduleResult onSchedule(CmdletInfo cmdletInfo, ActionInfo actionInfo,
LaunchCmdlet cmdlet, LaunchAction action, int actionIndex) {
return ScheduleResult.SUCCESS;
}
@Override
public void postSchedule(CmdletInfo cmdletInfo, ActionInfo actionInfo, int actionIndex,
ScheduleResult result) {
}
@Override
public void onPreDispatch(LaunchCmdlet cmdlet, LaunchAction action, int actionIndex) {
}
@Override
public void onActionFinished(CmdletInfo cmdletInfo, ActionInfo actionInfo, int actionIndex) {
}
@Override
public boolean isSuccessfulBySpeculation(ActionInfo actionInfo) {
return false;
}
@Override
public void recover(ActionInfo actionInfo) {
}
}
| java | Apache-2.0 | e0c90f054687a18c4e095547ac5e31b8b313b3ef | 2026-01-05T02:41:11.405497Z | false |
Intel-bigdata/SSM | https://github.com/Intel-bigdata/SSM/blob/e0c90f054687a18c4e095547ac5e31b8b313b3ef/smart-hadoop-support/smart-hadoop/src/main/java/org/smartdata/hdfs/scheduler/CacheScheduler.java | smart-hadoop-support/smart-hadoop/src/main/java/org/smartdata/hdfs/scheduler/CacheScheduler.java | /**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.smartdata.hdfs.scheduler;
import com.google.common.annotations.VisibleForTesting;
import org.apache.hadoop.fs.RemoteIterator;
import org.apache.hadoop.hdfs.DFSClient;
import org.apache.hadoop.hdfs.protocol.CachePoolEntry;
import org.apache.hadoop.hdfs.protocol.CachePoolInfo;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.smartdata.SmartContext;
import org.smartdata.hdfs.HadoopUtil;
import org.smartdata.hdfs.action.HdfsAction;
import org.smartdata.metastore.MetaStore;
import org.smartdata.model.ActionInfo;
import org.smartdata.model.CmdletInfo;
import org.smartdata.model.LaunchAction;
import org.smartdata.model.action.ScheduleResult;
import org.smartdata.protocol.message.LaunchCmdlet;
import java.io.IOException;
import java.net.URI;
import java.util.Arrays;
import java.util.HashSet;
import java.util.List;
import java.util.Set;
public class CacheScheduler extends ActionSchedulerService {
private static final Logger LOG =
LoggerFactory.getLogger(CacheScheduler.class);
public static final String CACHE_ACTION = "cache";
public static final String UNCACHE_ACTION = "uncache";
public static final List<String> ACTIONS = Arrays.asList(CACHE_ACTION, UNCACHE_ACTION);
public static final String SSM_POOL = "SSMPool";
private Set<String> fileLock;
private DFSClient dfsClient;
private static boolean isCachePoolCreated;
public CacheScheduler(SmartContext context, MetaStore metaStore) {
super(context, metaStore);
fileLock = new HashSet<>();
isCachePoolCreated = false;
}
@Override
public List<String> getSupportedActions() {
return ACTIONS;
}
public boolean isLocked(ActionInfo actionInfo) {
String srcPath = actionInfo.getArgs().get(HdfsAction.FILE_PATH);
return fileLock.contains(srcPath);
}
@VisibleForTesting
public Set<String> getFileLock() {
return fileLock;
}
@Override
public boolean onSubmit(CmdletInfo cmdletInfo, ActionInfo actionInfo, int actionIndex) {
if (isLocked(actionInfo)) {
return false;
}
return true;
}
@Override
public ScheduleResult onSchedule(CmdletInfo cmdletInfo, ActionInfo actionInfo,
LaunchCmdlet cmdlet, LaunchAction action, int actionIndex) {
String srcPath = actionInfo.getArgs().get(HdfsAction.FILE_PATH);
fileLock.add(srcPath);
return ScheduleResult.SUCCESS;
}
@Override
public void init() throws IOException {
try {
URI nnUri = HadoopUtil.getNameNodeUri(getContext().getConf());
dfsClient = HadoopUtil.getDFSClient(nnUri, getContext().getConf());
} catch (IOException e) {
LOG.warn("Failed to create dfsClient! Cache action will not work!", e);
return;
}
if (!isCachePoolCreated) {
createCachePool(this.dfsClient);
isCachePoolCreated = true;
}
}
/**
* For cache acton that going through cache scheduler, SSM cache pool will
* be checked and created.
* @param dfsClient
* @throws IOException
*/
public static void createCachePool(DFSClient dfsClient) throws IOException {
RemoteIterator<CachePoolEntry> poolEntries = dfsClient.listCachePools();
while (poolEntries.hasNext()) {
CachePoolEntry poolEntry = poolEntries.next();
if (poolEntry.getInfo().getPoolName().equals(SSM_POOL)) {
return;
}
}
dfsClient.addCachePool(new CachePoolInfo(SSM_POOL));
}
@Override
public void onActionFinished(CmdletInfo cmdletInfo, ActionInfo actionInfo, int actionIndex) {
if (!ACTIONS.contains(actionInfo.getActionName())) {
return;
}
if (isLocked(actionInfo)) {
fileLock.remove(actionInfo.getArgs().get(HdfsAction.FILE_PATH));
}
}
@Override
public void start() throws IOException {
}
@Override
public void stop() throws IOException {
}
}
| java | Apache-2.0 | e0c90f054687a18c4e095547ac5e31b8b313b3ef | 2026-01-05T02:41:11.405497Z | false |
Intel-bigdata/SSM | https://github.com/Intel-bigdata/SSM/blob/e0c90f054687a18c4e095547ac5e31b8b313b3ef/smart-hadoop-support/smart-hadoop/src/main/java/org/smartdata/hdfs/scheduler/Copy2S3Scheduler.java | smart-hadoop-support/smart-hadoop/src/main/java/org/smartdata/hdfs/scheduler/Copy2S3Scheduler.java | package org.smartdata.hdfs.scheduler;
import org.apache.hadoop.conf.Configuration;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.smartdata.SmartContext;
import org.smartdata.hdfs.action.HdfsAction;
import org.smartdata.metastore.MetaStore;
import org.smartdata.metastore.MetaStoreException;
import org.smartdata.model.ActionInfo;
import org.smartdata.model.CmdletInfo;
import org.smartdata.model.FileState;
import org.smartdata.model.LaunchAction;
import org.smartdata.model.S3FileState;
import org.smartdata.model.action.ScheduleResult;
import org.smartdata.protocol.message.LaunchCmdlet;
import java.io.IOException;
import java.util.Arrays;
import java.util.Collections;
import java.util.HashSet;
import java.util.List;
import java.util.Set;
public class Copy2S3Scheduler extends ActionSchedulerService {
private static final List<String> actions = Arrays.asList("copy2s3");
static final Logger LOG =
LoggerFactory.getLogger(Copy2S3Scheduler.class);
private MetaStore metaStore;
//The file in copy need to be locked
private Set<String> fileLock;
// Global variables
private Configuration conf;
public Copy2S3Scheduler(SmartContext context, MetaStore metaStore) {
super(context, metaStore);
this.metaStore = metaStore;
this.fileLock = Collections.synchronizedSet(new HashSet<String>());
try {
this.conf = getContext().getConf();
} catch (NullPointerException e) {
// If SmartContext is empty
this.conf = new Configuration();
}
}
private void lockTheFile(String filePath) {
fileLock.add(filePath);
}
private void unLockTheFile(String filePath) {
fileLock.remove(filePath);
}
private boolean ifLocked(String filePath) {
return fileLock.contains(filePath);
}
private long checkTheLengthOfFile(String fileName) {
try {
return metaStore.getFile(fileName).getLength();
} catch (MetaStoreException e) {
e.printStackTrace();
}
return 0;
}
private boolean isOnS3(String fileName) {
try {
return metaStore.getFileState(fileName)
.getFileType().getValue() == FileState.FileType.S3.getValue();
} catch (MetaStoreException e) {
return false;
}
}
@Override
public List<String> getSupportedActions() {
return actions;
}
@Override
public boolean onSubmit(CmdletInfo cmdletInfo, ActionInfo actionInfo, int actionIndex)
throws IOException {
// check args
if (actionInfo.getArgs() == null) {
throw new IOException("No arguments for the action");
}
String path = actionInfo.getArgs().get(HdfsAction.FILE_PATH);
if (ifLocked(path)) {
throw new IOException("The submit file " + path + " is locked");
}
if (checkTheLengthOfFile(path) == 0) {
throw new IOException("The submit file " + path + " length is 0");
}
if (isOnS3(path)) {
throw new IOException("The submit file " + path + " is already copied");
}
lockTheFile(path);
LOG.debug("The file {} can be submitted", path);
return true;
}
@Override
public void onActionFinished(CmdletInfo cmdletInfo, ActionInfo actionInfo, int actionIndex) {
String path = actionInfo.getArgs().get(HdfsAction.FILE_PATH);
if (actionInfo.isFinished() && actionInfo.isSuccessful()) {
// Insert fileState
try {
metaStore.insertUpdateFileState(new S3FileState(path));
} catch (MetaStoreException e) {
LOG.error("Failed to insert file state.", e);
}
}
// unlock filelock
if (ifLocked(path)) {
unLockTheFile(path);
LOG.debug("unlocked copy2s3 file {}", path);
}
}
@Override
public void init() throws IOException {
}
@Override
public void start() throws IOException {
}
@Override
public void stop() throws IOException {
}
}
| java | Apache-2.0 | e0c90f054687a18c4e095547ac5e31b8b313b3ef | 2026-01-05T02:41:11.405497Z | false |
Intel-bigdata/SSM | https://github.com/Intel-bigdata/SSM/blob/e0c90f054687a18c4e095547ac5e31b8b313b3ef/smart-hadoop-support/smart-hadoop/src/main/java/org/smartdata/hdfs/scheduler/ErasureCodingScheduler.java | smart-hadoop-support/smart-hadoop/src/main/java/org/smartdata/hdfs/scheduler/ErasureCodingScheduler.java | /**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.smartdata.hdfs.scheduler;
import com.google.common.util.concurrent.RateLimiter;
import org.apache.hadoop.hdfs.DFSClient;
import org.apache.hadoop.hdfs.protocol.HdfsFileStatus;
import org.apache.hadoop.util.VersionInfo;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.smartdata.SmartContext;
import org.smartdata.conf.SmartConf;
import org.smartdata.conf.SmartConfKeys;
import org.smartdata.hdfs.CompatibilityHelper;
import org.smartdata.hdfs.CompatibilityHelperLoader;
import org.smartdata.hdfs.HadoopUtil;
import org.smartdata.hdfs.action.*;
import org.smartdata.metastore.MetaStore;
import org.smartdata.metastore.MetaStoreException;
import org.smartdata.model.ActionInfo;
import org.smartdata.model.CmdletInfo;
import org.smartdata.model.FileInfo;
import org.smartdata.model.LaunchAction;
import org.smartdata.model.action.ScheduleResult;
import org.smartdata.protocol.message.LaunchCmdlet;
import java.io.IOException;
import java.net.URI;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.HashSet;
import java.util.List;
import java.util.Set;
import static org.smartdata.model.ActionInfo.OLD_FILE_ID;
public class ErasureCodingScheduler extends ActionSchedulerService {
public static final Logger LOG = LoggerFactory.getLogger(ErasureCodingScheduler.class);
public static final String EC_ACTION_ID = "ec";
public static final String UNEC_ACTION_ID = "unec";
public static final String CHECK_EC_ACTION_ID = "checkec";
public static final String LIST_EC_ACTION_ID = "listec";
public static final List<String> actions =
Arrays.asList(EC_ACTION_ID, UNEC_ACTION_ID, CHECK_EC_ACTION_ID, LIST_EC_ACTION_ID);
public static String EC_DIR;
public static final String EC_TMP_DIR = "ec_tmp/";
public static final String EC_TMP = "-ecTmp";
public static final String EC_POLICY = "-policy";
private Set<String> fileLock;
private SmartConf conf;
private MetaStore metaStore;
private long throttleInMb;
private RateLimiter rateLimiter;
private DFSClient dfsClient;
public ErasureCodingScheduler(SmartContext context, MetaStore metaStore) {
super(context, metaStore);
this.conf = context.getConf();
this.metaStore = metaStore;
this.throttleInMb = conf.getLong(SmartConfKeys.SMART_ACTION_EC_THROTTLE_MB_KEY,
SmartConfKeys.SMART_ACTION_EC_THROTTLE_MB_DEFAULT);
if (this.throttleInMb > 0) {
this.rateLimiter = RateLimiter.create(throttleInMb);
}
String ssmTmpDir = conf.get(
SmartConfKeys.SMART_WORK_DIR_KEY, SmartConfKeys.SMART_WORK_DIR_DEFAULT);
ssmTmpDir = ssmTmpDir + (ssmTmpDir.endsWith("/") ? "" : "/");
ErasureCodingScheduler.EC_DIR = ssmTmpDir + EC_TMP_DIR;
fileLock = new HashSet<>();
}
public List<String> getSupportedActions() {
return actions;
}
public void init() throws IOException {
fileLock.clear();
try {
final URI nnUri = HadoopUtil.getNameNodeUri(getContext().getConf());
dfsClient = HadoopUtil.getDFSClient(nnUri, getContext().getConf());
} catch (IOException e) {
LOG.warn("Failed to create dfsClient.");
}
}
@Override
public void start() throws IOException {
}
@Override
public void stop() throws IOException {
}
@Override
public void recover(ActionInfo actionInfo) {
if (!actionInfo.getActionName().equals(EC_ACTION_ID) &&
!actionInfo.getActionName().equals(UNEC_ACTION_ID)) {
return;
}
String filePath = actionInfo.getArgs().get(HdfsAction.FILE_PATH);
fileLock.add(filePath);
}
@Override
public boolean onSubmit(CmdletInfo cmdletInfo, ActionInfo actionInfo, int actionIndex)
throws IOException {
if (!isECSupported()) {
throw new IOException(actionInfo.getActionName() +
" is not supported on " + VersionInfo.getVersion());
}
if (actionInfo.getActionName().equals(LIST_EC_ACTION_ID)) {
return true;
}
if (actionInfo.getArgs().get(HdfsAction.FILE_PATH) == null) {
throw new IOException("File path is required for action " + actionInfo.getActionName() + "!");
}
String srcPath = actionInfo.getArgs().get(HdfsAction.FILE_PATH);
// The root dir should be excluded in checking whether file path ends with slash.
if (!srcPath.equals("/") && srcPath.endsWith("/")) {
srcPath = srcPath.substring(0, srcPath.length() - 1);
actionInfo.getArgs().put(HdfsAction.FILE_PATH, srcPath);
}
// For ec or unec action, check if the file is locked.
if (actionInfo.getActionName().equals(EC_ACTION_ID) ||
actionInfo.getActionName().equals(UNEC_ACTION_ID)) {
if (fileLock.contains(srcPath)) {
return false;
}
}
return true;
}
public static boolean isECSupported() {
String[] parts = VersionInfo.getVersion().split("\\.");
return Integer.parseInt(parts[0]) == 3;
}
@Override
public ScheduleResult onSchedule(CmdletInfo cmdletInfo, ActionInfo actionInfo,
LaunchCmdlet cmdlet, LaunchAction action, int actionIndex) {
if (!actions.contains(action.getActionType())) {
return ScheduleResult.SUCCESS;
}
if (actionInfo.getActionName().equals(LIST_EC_ACTION_ID)) {
return ScheduleResult.SUCCESS;
}
String srcPath = actionInfo.getArgs().get(HdfsAction.FILE_PATH);
if (srcPath == null) {
actionInfo.appendLog("No file is given in this action!");
return ScheduleResult.FAIL;
}
if (actionInfo.getActionName().equals(CHECK_EC_ACTION_ID)) {
return ScheduleResult.SUCCESS;
}
try {
// use the default EC policy if an ec action has not been given an EC policy
if (actionInfo.getActionName().equals(EC_ACTION_ID)) {
String ecPolicy = actionInfo.getArgs().get(EC_POLICY);
if (ecPolicy == null || ecPolicy.isEmpty()) {
String defaultEcPolicy = conf.getTrimmed("dfs.namenode.ec.system.default.policy",
"RS-6-3-1024k");
actionInfo.getArgs().put(EC_POLICY, defaultEcPolicy);
action.getArgs().put(EC_POLICY, defaultEcPolicy);
}
}
FileInfo fileinfo = metaStore.getFile(srcPath);
if (fileinfo != null && fileinfo.isdir()) {
return ScheduleResult.SUCCESS;
}
// The below code is just for ec or unec action with file as argument, not directory
if (isLimitedByThrottle(srcPath)) {
if (LOG.isDebugEnabled()) {
LOG.debug("Failed to schedule {} due to the limitation of throttle!", actionInfo);
}
return ScheduleResult.RETRY;
}
// For ec or unec, add ecTmp argument
String tmpName = createTmpName(action);
action.getArgs().put(EC_TMP, EC_DIR + tmpName);
actionInfo.getArgs().put(EC_TMP, EC_DIR + tmpName);
} catch (MetaStoreException ex) {
LOG.error("Error occurred for getting file info", ex);
actionInfo.appendLog(ex.getMessage());
return ScheduleResult.FAIL;
}
afterSchedule(actionInfo);
return ScheduleResult.SUCCESS;
}
@Override
public boolean isSuccessfulBySpeculation(ActionInfo actionInfo) {
String srcPath = actionInfo.getArgs().get(HdfsAction.FILE_PATH);
try {
HdfsFileStatus fileStatus = dfsClient.getFileInfo(srcPath);
CompatibilityHelper compatibilityHelper =
CompatibilityHelperLoader.getHelper();
// For unec, if current policy ID is 0, which means replication, we
// speculate that action was executed successful.
if (actionInfo.getActionName().equals(UNEC_ACTION_ID)) {
return
compatibilityHelper.getErasureCodingPolicy(fileStatus) == (byte) 0;
} else if (actionInfo.getActionName().equals(EC_ACTION_ID)) {
String currentSrcEcPolicyName =
compatibilityHelper.getErasureCodingPolicyName(fileStatus);
String actionEcPolicyName = actionInfo.getArgs().get(EC_POLICY);
return currentSrcEcPolicyName.equals(actionEcPolicyName);
}
return false;
} catch (IOException e) {
LOG.warn("Failed to get file status or EC policy, suppose this action " +
"was not successfully executed: {}", actionInfo.toString());
return false;
}
}
/**
* For EC/UnEC action, the src file will be locked and
* the old file id is kept in a map.
*/
public void afterSchedule(ActionInfo actionInfo) {
String srcPath = actionInfo.getArgs().get(HdfsAction.FILE_PATH);
// lock the file only if ec or unec action is scheduled
fileLock.add(srcPath);
try {
setOldFileId(actionInfo);
} catch (Throwable t) {
// We think it may not be a big issue, so just warn user this issue.
LOG.warn("Failed in maintaining old fid for taking over old data's temperature.");
}
}
/**
* Set old file id which will be persisted into DB. For action status
* recovery case, the old file id can be acquired for taking over old file's
* data temperature.
*/
private void setOldFileId(ActionInfo actionInfo) throws IOException {
if (actionInfo.getArgs().get(OLD_FILE_ID) != null &&
!actionInfo.getArgs().get(OLD_FILE_ID).isEmpty()) {
return;
}
List<Long> oids = new ArrayList<>();
String path = actionInfo.getArgs().get(HdfsAction.FILE_PATH);
try {
oids.add(dfsClient.getFileInfo(path).getFileId());
} catch (IOException e) {
LOG.warn("Failed to set old fid for taking over data temperature!");
throw e;
}
actionInfo.setOldFileIds(oids);
}
private String createTmpName(LaunchAction action) {
String path = action.getArgs().get(HdfsAction.FILE_PATH);
String fileName;
int index = path.lastIndexOf("/");
if (index == path.length() - 1) {
index = path.substring(0, path.length() - 1).indexOf("/");
fileName = path.substring(index + 1, path.length() - 1);
} else {
fileName = path.substring(index + 1, path.length());
}
/**
* The dest tmp file is under EC_DIR and
* named by fileName, aidxxx and current time in millisecond with "_" separated
*/
String tmpName = fileName + "_" + "aid" + action.getActionId() +
"_" + System.currentTimeMillis();
return tmpName;
}
@Override
public void onActionFinished(CmdletInfo cmdletInfo, ActionInfo actionInfo, int actionIndex) {
if (!actionInfo.isFinished()) {
return;
}
if (actionInfo.getActionName().equals(EC_ACTION_ID) ||
actionInfo.getActionName().equals(UNEC_ACTION_ID)) {
String filePath = null;
try {
filePath = actionInfo.getArgs().get(HdfsAction.FILE_PATH);
if (!actionInfo.isSuccessful()) {
return;
}
// Task over access count after successful execution.
takeOverAccessCount(actionInfo);
} finally {
// As long as the action is finished, regardless of success or not,
// we should remove the corresponding record from fileLock.
if (filePath != null) {
fileLock.remove(filePath);
}
}
}
}
/**
* In rename case, the fid of renamed file is not changed. But sometimes, we need
* to keep old file's access count and let new file takes over this metric. E.g.,
* with (un)EC/(de)Compress/(un)Compact action, a new file will overwrite the old file.
*/
public void takeOverAccessCount(ActionInfo actionInfo) {
try {
String filePath = actionInfo.getArgs().get(HdfsAction.FILE_PATH);
long oldFid = actionInfo.getOldFileIds().get(0);
// The new fid may have not been updated in metastore, so
// we get it from dfs client.
long newFid = dfsClient.getFileInfo(filePath).getFileId();
metaStore.updateAccessCountTableFid(oldFid, newFid);
} catch (Exception e) {
LOG.warn("Failed to take over file access count for all tables, " +
"which may make the measurement for data temperature inaccurate!",
e.getMessage());
}
}
public boolean isLimitedByThrottle(String srcPath) throws MetaStoreException {
if (this.rateLimiter == null) {
return false;
}
int fileLengthInMb = (int) metaStore.getFile(srcPath).getLength() >> 20;
if (fileLengthInMb > 0) {
return !rateLimiter.tryAcquire(fileLengthInMb);
}
return false;
}
}
| java | Apache-2.0 | e0c90f054687a18c4e095547ac5e31b8b313b3ef | 2026-01-05T02:41:11.405497Z | false |
Intel-bigdata/SSM | https://github.com/Intel-bigdata/SSM/blob/e0c90f054687a18c4e095547ac5e31b8b313b3ef/smart-hadoop-support/smart-hadoop/src/main/java/org/smartdata/hdfs/metric/NNMetricsAccessEventSource.java | smart-hadoop-support/smart-hadoop/src/main/java/org/smartdata/hdfs/metric/NNMetricsAccessEventSource.java | /**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.smartdata.hdfs.metric;
import org.smartdata.metrics.FileAccessEvent;
import org.smartdata.metrics.FileAccessEventCollector;
import org.smartdata.metrics.FileAccessEventSource;
public class NNMetricsAccessEventSource implements FileAccessEventSource {
private final NNMetricsAccessEventCollector collector;
public NNMetricsAccessEventSource() {
this.collector = new NNMetricsAccessEventCollector();
}
@Override
public FileAccessEventCollector getCollector() {
return this.collector;
}
@Override
public void insertEventFromSmartClient(FileAccessEvent event) {
// Do nothing.
}
@Override
public void close() {
this.collector.close();
}
}
| java | Apache-2.0 | e0c90f054687a18c4e095547ac5e31b8b313b3ef | 2026-01-05T02:41:11.405497Z | false |
Intel-bigdata/SSM | https://github.com/Intel-bigdata/SSM/blob/e0c90f054687a18c4e095547ac5e31b8b313b3ef/smart-hadoop-support/smart-hadoop/src/main/java/org/smartdata/hdfs/metric/NNMetricsAccessEventCollector.java | smart-hadoop-support/smart-hadoop/src/main/java/org/smartdata/hdfs/metric/NNMetricsAccessEventCollector.java | /**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.smartdata.hdfs.metric;
import com.google.common.annotations.VisibleForTesting;
import org.apache.commons.configuration.ConfigurationException;
import org.apache.commons.configuration.PropertiesConfiguration;
import org.apache.commons.configuration.SubsetConfiguration;
import org.apache.commons.lang.time.FastDateFormat;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.metrics2.MetricsException;
import org.apache.hadoop.metrics2.MetricsInfo;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.smartdata.metrics.FileAccessEvent;
import org.smartdata.metrics.FileAccessEventCollector;
import java.io.BufferedReader;
import java.io.Closeable;
import java.io.IOException;
import java.io.InputStreamReader;
import java.net.InetAddress;
import java.net.URI;
import java.net.URISyntaxException;
import java.net.UnknownHostException;
import java.nio.charset.StandardCharsets;
import java.util.ArrayList;
import java.util.Iterator;
import java.util.List;
import java.util.TimeZone;
import java.util.concurrent.TimeUnit;
import java.util.regex.Matcher;
import java.util.regex.Pattern;
public class NNMetricsAccessEventCollector implements FileAccessEventCollector {
static final Logger LOG = LoggerFactory.getLogger(NNMetricsAccessEventCollector.class);
private static final List<FileAccessEvent> EMPTY_RESULT = new ArrayList<>();
private Reader reader;
private long now;
public NNMetricsAccessEventCollector() {
try {
this.reader = Reader.create();
} catch (IOException | URISyntaxException e) {
LOG.error("Create Reader error", e);
}
now = System.currentTimeMillis();
}
@Override
public List<FileAccessEvent> collect() throws IOException {
try {
if (reader.exists(now)) {
reader.seekTo(now, false);
List<FileAccessEvent> events = new ArrayList<>();
while (reader.hasNext()) {
Info info = reader.next();
events.add(new FileAccessEvent(info.getPath(), info.getTimestamp()));
now = info.getTimestamp();
}
return events;
} else if (reader.exists(now + reader.getRollingIntervalMillis())) {
// This is the corner case that AccessEventFetcher starts a little bit ahead of Namenode
// and then Namenode begins log access event for the current rolling file, while
// AccessCountFetch is seeking for the last one, which will never exist.
now = now + reader.getRollingIntervalMillis() - now % reader.getRollingIntervalMillis();
}
} catch (IOException | URISyntaxException e) {
LOG.error("FileAccessEvent collect error", e);
}
return EMPTY_RESULT;
}
public void close() {
this.reader.close();
}
public static final FastDateFormat DATE_FORMAT =
FastDateFormat.getInstance("yyyyMMddHHmm", TimeZone.getTimeZone("GMT"));
private static final String BASEPATH_KEY = "basepath";
private static final String BASEPATH_DEFAULT = "/tmp";
private static final String SOURCE_KEY = "source";
private static final String SOURCE_DEFAULT = "unknown";
private static final String ROLL_INTERVAL_KEY = "roll-interval";
private static final String DEFAULT_ROLL_INTERVAL = "1h";
private static final String DEFAULT_FILE_NAME = "hadoop-metrics2.properties";
private static final String EOF = "EOF";
private static final String WATERMARK_VAL = "0";
private static final String NORMAL_VAL = "1";
private static final String INFO_SEPARATOR = ":";
private static final String RECORD_SEPARATOR = "=";
private static long getRollInterval(SubsetConfiguration properties) {
String rollInterval =
properties.getString(ROLL_INTERVAL_KEY, DEFAULT_ROLL_INTERVAL);
Pattern pattern = Pattern.compile("^\\s*(\\d+)\\s*([A-Za-z]*)\\s*$");
Matcher match = pattern.matcher(rollInterval);
long millis;
if (match.matches()) {
String flushUnit = match.group(2);
int rollIntervalInt;
try {
rollIntervalInt = Integer.parseInt(match.group(1));
} catch (NumberFormatException ex) {
throw new MetricsException("Unrecognized flush interval: "
+ rollInterval + ". Must be a number followed by an optional "
+ "unit. The unit must be one of: minute, hour, day", ex);
}
if ("".equals(flushUnit)) {
millis = TimeUnit.HOURS.toMillis(rollIntervalInt);
} else {
switch (flushUnit.toLowerCase()) {
/* case "s":
case "sec":
case "second":
case "seconds":
millis = TimeUnit.SECONDS.toMillis(rollIntervalInt);
break;*/
case "m":
case "min":
case "minute":
case "minutes":
millis = TimeUnit.MINUTES.toMillis(rollIntervalInt);
break;
case "h":
case "hr":
case "hour":
case "hours":
millis = TimeUnit.HOURS.toMillis(rollIntervalInt);
break;
case "d":
case "day":
case "days":
millis = TimeUnit.DAYS.toMillis(rollIntervalInt);
break;
default:
throw new MetricsException("Unrecognized unit for flush interval: "
+ flushUnit + ". Must be one of: minute, hour, day");
}
}
} else {
throw new MetricsException("Unrecognized flush interval: "
+ rollInterval + ". Must be a number followed by an optional unit."
+ " The unit must be one of: minute, hour, day");
}
if (millis < 60000) {
throw new MetricsException("The flush interval property must be "
+ "at least 1 minute. Value was " + rollInterval);
}
return millis;
}
public static Path findMostRecentLogFile(FileSystem fs, Path initial)
throws IOException {
Path logFile = null;
Path nextLogFile = initial;
int id = 1;
do {
logFile = nextLogFile;
nextLogFile = new Path(initial.toString() + "." + id);
id += 1;
} while (fs.exists(nextLogFile));
return logFile;
}
public static String getLogFileName(String source) throws UnknownHostException {
return source + "-" + InetAddress.getLocalHost().getHostName() + ".log";
}
public static String getLogDirName(long time) {
return DATE_FORMAT.format(time) + "00";
}
public static class Info implements MetricsInfo {
private String path;
private String user;
private long timestamp;
public String getPath() {
return path;
}
public String getUser() {
return user;
}
public long getTimestamp() {
return timestamp;
}
public Info(String path, String user, long timestamp) {
this.path = path;
this.user = user;
this.timestamp = timestamp;
}
@Override
public boolean equals(Object o) {
if (this == o) return true;
if (o == null || getClass() != o.getClass()) return false;
Info that = (Info) o;
if (timestamp != that.timestamp) return false;
if (path != null ? !path.equals(that.path) : that.path != null) return false;
return user != null ? user.equals(that.user) : that.user == null;
}
@Override
public int hashCode() {
int result = path != null ? path.hashCode() : 0;
result = 31 * result + (user != null ? user.hashCode() : 0);
result = 31 * result + (int) (timestamp ^ (timestamp >>> 32));
return result;
}
@Override
public String name() {
return path + INFO_SEPARATOR + user + INFO_SEPARATOR + timestamp;
}
@Override
public String description() {
return name();
}
}
public static class Reader implements Iterator<Info>, Closeable {
public static final Logger LOG = LoggerFactory.getLogger(Reader.class);
private static final String PREFIX = "namenode.sink.file_access";
private long rollIntervalMillis;
private String basePath;
private String source;
private BufferedReader reader;
private long curTime;
private Info curInfo;
private FileSystem fs;
private boolean endOfFile;
@VisibleForTesting
Reader(SubsetConfiguration conf)
throws URISyntaxException, IOException {
basePath = conf.getString(BASEPATH_KEY, BASEPATH_DEFAULT);
source = conf.getString(SOURCE_KEY, SOURCE_DEFAULT);
rollIntervalMillis = getRollInterval(conf);
fs = FileSystem.get(new URI(basePath), new Configuration());
}
public static Reader create()
throws IOException, URISyntaxException {
SubsetConfiguration properties =
loadConfiguration(PREFIX,"hadoop-metrics2-" +
PREFIX + ".properties", DEFAULT_FILE_NAME);
return new Reader(properties);
}
@Override
public boolean hasNext() {
return curInfo != null;
}
@Override
public Info next() {
try {
Info ret = curInfo;
curInfo = readInfo();
return ret;
} catch (IOException | URISyntaxException e) {
throw new RuntimeException(e);
}
}
@Override
public void remove() {
throw new UnsupportedOperationException("remove");
}
@Override
public void close() {
try {
if (reader != null) {
reader.close();
}
fs.close();
} catch (IOException e) {
LOG.error(e.getMessage());
}
}
private static SubsetConfiguration loadConfiguration(String prefix, String... fileNames) {
for (String fname : fileNames) {
try {
org.apache.commons.configuration.Configuration cf = new PropertiesConfiguration(fname)
.interpolatedConfiguration();
LOG.info("loaded properties from " + fname);
return new SubsetConfiguration(cf, prefix, ".");
} catch (ConfigurationException e) {
if (e.getMessage().startsWith("Cannot locate configuration")) {
continue;
}
throw new RuntimeException(e);
}
}
return new SubsetConfiguration(new PropertiesConfiguration(), prefix);
}
/**
* seek to the first timestamp larger than @param timestamp
*
* @param timestamp target time
* @param start whether to seek to file start
* @return whether a valid timestamp is found
* @throws IOException
* @throws URISyntaxException
*/
public boolean seekTo(long timestamp, boolean start)
throws IOException, URISyntaxException {
reader = getReader(timestamp);
Info info;
do {
info = readInfo();
} while (!start && info != null && info.timestamp <= timestamp);
if (info != null) {
curTime = timestamp;
}
curInfo = info;
return curInfo != null;
}
/**
* @param timestamp
* @return whether the file containing the timestamp exists
*/
public boolean exists(long timestamp) throws IOException {
return fs.exists(getLogDir(basePath, timestamp, rollIntervalMillis));
}
public long getRollingIntervalMillis() {
return rollIntervalMillis;
}
public boolean isEndOfFile() {
return endOfFile;
}
private Info readInfo()
throws IOException, URISyntaxException {
if (reader != null) {
String line = reader.readLine();
if (line != null) {
if (line.equals(EOF)) {
endOfFile = true;
reader.close();
curTime += rollIntervalMillis;
reader = getReader(curTime);
return readInfo();
} else {
endOfFile = false;
String[] kv = line.split(RECORD_SEPARATOR);
if (kv.length == 2 && (kv[1].equals(WATERMARK_VAL) || kv[1].equals(NORMAL_VAL))) {
String[] ks = kv[0].split(INFO_SEPARATOR);
if (ks.length == 3) {
return new Info(ks[0], ks[1], Long.parseLong(ks[2]));
}
}
}
}
}
return null;
}
private BufferedReader getReader(long time) throws URISyntaxException, IOException {
Path dir = getLogDir(basePath, time, rollIntervalMillis);
if (fs.exists(dir)) {
Path file = findMostRecentLogFile(fs, new Path(dir, getLogFileName(source)));
if (file != null) {
return new BufferedReader(new InputStreamReader(fs.open(file),
StandardCharsets.UTF_8));
}
}
return null;
}
@VisibleForTesting
protected static Path getLogDir(String base, long time, long interval) {
String dir =
getLogDirName(time / interval * interval);
return new Path(base, dir);
}
}
}
| java | Apache-2.0 | e0c90f054687a18c4e095547ac5e31b8b313b3ef | 2026-01-05T02:41:11.405497Z | false |
Intel-bigdata/SSM | https://github.com/Intel-bigdata/SSM/blob/e0c90f054687a18c4e095547ac5e31b8b313b3ef/smart-hadoop-support/smart-hadoop/src/main/java/org/smartdata/hdfs/metric/fetcher/NamespaceFetcher.java | smart-hadoop-support/smart-hadoop/src/main/java/org/smartdata/hdfs/metric/fetcher/NamespaceFetcher.java | /**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.smartdata.hdfs.metric.fetcher;
import org.apache.hadoop.hdfs.DFSClient;
import org.apache.hadoop.hdfs.protocol.DirectoryListing;
import org.apache.hadoop.hdfs.protocol.HdfsFileStatus;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.smartdata.conf.SmartConf;
import org.smartdata.conf.SmartConfKeys;
import org.smartdata.hdfs.CompatibilityHelperLoader;
import org.smartdata.metastore.MetaStoreException;
import org.smartdata.model.ErasureCodingPolicyInfo;
import org.smartdata.model.FileInfo;
import org.smartdata.metastore.MetaStore;
import org.smartdata.metastore.ingestion.IngestionTask;
import org.smartdata.model.FileInfoBatch;
import org.smartdata.metastore.ingestion.FileStatusIngester;
import java.io.IOException;
import java.util.ArrayList;
import java.util.List;
import java.util.Map;
import java.util.concurrent.Executors;
import java.util.concurrent.ScheduledExecutorService;
import java.util.concurrent.ScheduledFuture;
import java.util.concurrent.TimeUnit;
import static org.smartdata.hdfs.CompatibilityHelperLoader.getHelper;
public class NamespaceFetcher {
private static final Long DEFAULT_INTERVAL = 1L;
private final ScheduledExecutorService scheduledExecutorService;
private final long fetchInterval;
private ScheduledFuture[] fetchTaskFutures;
private ScheduledFuture[] consumerFutures;
private FileStatusIngester[] consumers;
private IngestionTask[] ingestionTasks;
private DFSClient client;
private MetaStore metaStore;
private SmartConf conf;
public static final Logger LOG =
LoggerFactory.getLogger(NamespaceFetcher.class);
public NamespaceFetcher(DFSClient client, MetaStore metaStore, ScheduledExecutorService service) {
this(client, metaStore, DEFAULT_INTERVAL, service, new SmartConf());
}
public NamespaceFetcher(DFSClient client, MetaStore metaStore, ScheduledExecutorService service, SmartConf conf) {
this(client, metaStore, DEFAULT_INTERVAL, service, conf);
}
public NamespaceFetcher(DFSClient client, MetaStore metaStore, long fetchInterval) {
this(client, metaStore, fetchInterval, null, new SmartConf());
}
public NamespaceFetcher(DFSClient client, MetaStore metaStore, long fetchInterval, SmartConf conf) {
this(client, metaStore, fetchInterval, null, conf);
}
public NamespaceFetcher(DFSClient client, MetaStore metaStore, long fetchInterval,
ScheduledExecutorService service, SmartConf conf) {
int numProducers = conf.getInt(SmartConfKeys.SMART_NAMESPACE_FETCHER_PRODUCERS_NUM_KEY,
SmartConfKeys.SMART_NAMESPACE_FETCHER_PRODUCERS_NUM_DEFAULT);
numProducers = numProducers <= 0 ? 1 : numProducers;
this.ingestionTasks = new IngestionTask[numProducers];
HdfsFetchTask.init();
for (int i = 0; i < numProducers; i++) {
ingestionTasks[i] = new HdfsFetchTask(ingestionTasks, client, conf);
}
int numConsumers = conf.getInt(SmartConfKeys.SMART_NAMESPACE_FETCHER_CONSUMERS_NUM_KEY,
SmartConfKeys.SMART_NAMESPACE_FETCHER_CONSUMERS_NUM_DEFAULT);
numConsumers = numConsumers <= 0 ? 1 : numConsumers;
consumers = new FileStatusIngester[numConsumers];
for (int i = 0; i < numConsumers; i++) {
consumers[i] = new FileStatusIngester(metaStore);
}
this.fetchInterval = fetchInterval;
if (service != null) {
this.scheduledExecutorService = service;
} else {
scheduledExecutorService = Executors.newScheduledThreadPool(numProducers + numConsumers);
}
this.client = client;
this.metaStore = metaStore;
this.conf = conf;
}
public static void init(SmartConf conf) {
IngestionTask.init(conf);
}
public void startFetch() throws IOException {
try {
init(conf);
metaStore.deleteAllEcPolicies();
Map<Byte, String> idToPolicyName =
CompatibilityHelperLoader.getHelper().getErasureCodingPolicies(client);
if (idToPolicyName != null) {
ArrayList<ErasureCodingPolicyInfo> ecInfos = new ArrayList<>();
for (Byte id : idToPolicyName.keySet()) {
ecInfos.add(new ErasureCodingPolicyInfo(id, idToPolicyName.get(id)));
}
metaStore.insertEcPolicies(ecInfos);
LOG.info("Finished fetching all EC policies!");
}
} catch (MetaStoreException e) {
throw new IOException("Failed to clean and fetch EC policies!");
}
try {
metaStore.deleteAllFileInfo();
} catch (MetaStoreException e) {
throw new IOException("Error while reset files", e);
}
this.fetchTaskFutures = new ScheduledFuture[ingestionTasks.length];
for (int i = 0; i < ingestionTasks.length; i++) {
fetchTaskFutures[i] = this.scheduledExecutorService.scheduleAtFixedRate(
ingestionTasks[i], 0, fetchInterval, TimeUnit.MILLISECONDS);
}
this.consumerFutures = new ScheduledFuture[consumers.length];
for (int i = 0; i < consumers.length; i++) {
consumerFutures[i] = this.scheduledExecutorService.scheduleAtFixedRate(
consumers[i], 0, fetchInterval, TimeUnit.MILLISECONDS);
}
LOG.info("Started.");
}
public static void init(String dir) {
IngestionTask.init(dir);
}
/*
startFetch(dir) is used to restart fetcher to fetch one specific dir.
In rename event, when src is not in file table because it is not fetched or other reason,
dest should be fetched by using startFetch(dest).
*/
public void startFetch(String dir) {
init(dir);
this.fetchTaskFutures = new ScheduledFuture[ingestionTasks.length];
for (int i = 0; i < ingestionTasks.length; i++) {
fetchTaskFutures[i] = this.scheduledExecutorService.scheduleAtFixedRate(
ingestionTasks[i], 0, fetchInterval, TimeUnit.MILLISECONDS);
}
this.consumerFutures = new ScheduledFuture[consumers.length];
for (int i = 0; i < consumers.length; i++) {
consumerFutures[i] = this.scheduledExecutorService.scheduleAtFixedRate(
consumers[i], 0, fetchInterval, TimeUnit.MILLISECONDS);
}
LOG.info("Start fetch the given dir.");
}
public boolean fetchFinished() {
return IngestionTask.finished();
}
public void stop() {
if (fetchTaskFutures != null) {
for (ScheduledFuture f: fetchTaskFutures) {
if (f != null) {
f.cancel(false);
}
}
}
if (consumerFutures != null) {
for (ScheduledFuture f : consumerFutures) {
if (f != null) {
f.cancel(false);
}
}
}
}
private static class HdfsFetchTask extends IngestionTask {
private final HdfsFileStatus[] EMPTY_STATUS = new HdfsFileStatus[0];
private final DFSClient client;
private final SmartConf conf;
private byte[] startAfter = null;
private final byte[] empty = HdfsFileStatus.EMPTY_NAME;
private String parent = "";
private String pendingParent;
private IngestionTask[] ingestionTasks;
private static List<String> ignoreList;
private static int idCounter = 0;
private int id;
public HdfsFetchTask(IngestionTask[] ingestionTasks, DFSClient client, SmartConf conf) {
super();
id = idCounter++;
this.ingestionTasks = ingestionTasks;
this.client = client;
this.conf = conf;
defaultBatchSize = conf.getInt(SmartConfKeys
.SMART_NAMESPACE_FETCHER_BATCH_KEY,
SmartConfKeys.SMART_NAMESPACE_FETCHER_BATCH_DEFAULT);
ignoreList = this.conf.getIgnoreDir();
}
public static void init() {
HdfsFetchTask.idCounter = 0;
}
// BFS finished
public boolean isDequeEmpty() {
for (IngestionTask ingestionTask: ingestionTasks) {
if (((HdfsFetchTask)ingestionTask).parent != null) {
return false;
}
}
return true;
}
@Override
public void run() {
if (LOG.isDebugEnabled()) {
long curr = System.currentTimeMillis();
if (curr - lastUpdateTime >= 2000) {
LOG.debug(String.format(
"%d sec, numDirectories = %d, numFiles = %d, batchsInqueue = %d",
(curr - startTime) / 1000,
numDirectoriesFetched.get(), numFilesFetched.get(), batches.size()));
lastUpdateTime = curr;
}
}
if (batches.size() >= maxPendingBatches) {
return;
}
if (this.pendingParent != null) {
this.parent = pendingParent;
this.pendingParent = null;
} else {
this.parent = deque.pollFirst();
}
if (parent == null) {
if (currentBatch.actualSize() > 0) {
try {
this.batches.put(currentBatch);
} catch (InterruptedException e) {
LOG.error("Current batch actual size = "
+ currentBatch.actualSize(), e);
}
this.currentBatch = new FileInfoBatch(defaultBatchSize);
}
if (this.id == 0 && isDequeEmpty() && this.batches.isEmpty()) {
if (!IngestionTask.isFinished) {
IngestionTask.isFinished = true;
long curr = System.currentTimeMillis();
LOG.info(String.format(
"Finished fetch Namespace! %ds, %dms used, numDirs = %d, numFiles = %d",
(curr - startTime) / 1000, (curr - startTime) % 1000,
numDirectoriesFetched.get(), numFilesFetched.get()));
}
}
return;
}
if (startAfter == null) {
String tmpParent = parent.endsWith("/") ? parent : parent + "/";
for (String dir : ignoreList) {
if (tmpParent.startsWith(dir)) {
return;
}
}
}
try {
HdfsFileStatus status = client.getFileInfo(parent);
if (status == null) {
throw new IOException();
}
if(!status.isDir()) {
this.addFileStatus(convertToFileInfo(status, parent));
numFilesFetched.incrementAndGet();
}
if (status.isDir()) {
if (startAfter == null) {
FileInfo internal = convertToFileInfo(status, "");
internal.setPath(parent);
this.addFileStatus(internal);
numDirectoriesFetched.incrementAndGet();
}
HdfsFileStatus[] children;
do {
children = listStatus(parent);
if (children == null || children.length == 0) {
break;
}
for (HdfsFileStatus child : children) {
if (child.isDir()) {
this.deque.add(child.getFullName(parent));
} else {
this.addFileStatus(convertToFileInfo(child, parent));
numFilesFetched.incrementAndGet();
}
}
} while (startAfter != null && batches.size() < maxPendingBatches);
if (startAfter != null) {
pendingParent = parent;
}
}
} catch (IOException | InterruptedException e) {
startAfter = null;
LOG.error("Totally, numDirectoriesFetched = " + numDirectoriesFetched
+ ", numFilesFetched = " + numFilesFetched
+ ". Parent = " + parent, e);
}
}
/**
* Code copy form {@link org.apache.hadoop.fs.Hdfs}
*/
private HdfsFileStatus[] listStatus(String src) throws IOException {
DirectoryListing thisListing = client.listPaths(
src, startAfter == null ? empty : startAfter);
if (thisListing == null) {
// the directory does not exist
startAfter = null;
return EMPTY_STATUS;
}
HdfsFileStatus[] partialListing = thisListing.getPartialListing();
if (!thisListing.hasMore()) {
// got all entries of the directory
startAfter = null;
} else {
startAfter = thisListing.getLastName();
}
return partialListing;
}
private FileInfo convertToFileInfo(HdfsFileStatus status, String parent) {
FileInfo fileInfo = new FileInfo(
status.getFullName(parent),
status.getFileId(),
status.getLen(),
status.isDir(),
status.getReplication(),
status.getBlockSize(),
status.getModificationTime(),
status.getAccessTime(),
status.getPermission().toShort(),
status.getOwner(),
status.getGroup(),
status.getStoragePolicy(),
getHelper().getErasureCodingPolicy(status));
return fileInfo;
}
}
}
| java | Apache-2.0 | e0c90f054687a18c4e095547ac5e31b8b313b3ef | 2026-01-05T02:41:11.405497Z | false |
Intel-bigdata/SSM | https://github.com/Intel-bigdata/SSM/blob/e0c90f054687a18c4e095547ac5e31b8b313b3ef/smart-hadoop-support/smart-hadoop/src/main/java/org/smartdata/hdfs/metric/fetcher/DataNodeInfoFetcher.java | smart-hadoop-support/smart-hadoop/src/main/java/org/smartdata/hdfs/metric/fetcher/DataNodeInfoFetcher.java | /**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.smartdata.hdfs.metric.fetcher;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hdfs.DFSClient;
import org.apache.hadoop.hdfs.DFSUtil;
import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
import org.apache.hadoop.hdfs.protocol.HdfsConstants;
import org.apache.hadoop.hdfs.server.protocol.DatanodeStorageReport;
import org.apache.hadoop.hdfs.server.protocol.StorageReport;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.smartdata.conf.SmartConfKeys;
import org.smartdata.hdfs.CompatibilityHelperLoader;
import org.smartdata.metastore.MetaStore;
import org.smartdata.metastore.MetaStoreException;
import org.smartdata.model.DataNodeInfo;
import org.smartdata.model.DataNodeStorageInfo;
import org.smartdata.model.StorageCapacity;
import java.io.IOException;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.Map.Entry;
import java.util.concurrent.ScheduledExecutorService;
import java.util.concurrent.ScheduledFuture;
import java.util.concurrent.TimeUnit;
/**
* Fetch and maintain data nodes related info.
*/
public class DataNodeInfoFetcher {
private long updateInterval;
private final DFSClient client;
private final MetaStore metaStore;
private final ScheduledExecutorService scheduledExecutorService;
private ScheduledFuture dnStorageReportProcTaskFuture;
private Configuration conf;
private DataNodeInfoFetchTask procTask;
public static final Logger LOG =
LoggerFactory.getLogger(DataNodeInfoFetcher.class);
public DataNodeInfoFetcher(DFSClient client, MetaStore metaStore,
ScheduledExecutorService service, Configuration conf) {
this.client = client;
this.metaStore = metaStore;
this.scheduledExecutorService = service;
this.conf = conf;
updateInterval = conf.getInt(SmartConfKeys.SMART_STORAGE_INFO_UPDATE_INTERVAL_KEY,
SmartConfKeys.SMART_STORAGE_INFO_UPDATE_INTERVAL_DEFAULT) * 1000;
}
public void start() throws IOException {
LOG.info("Starting DataNodeInfoFetcher service ...");
procTask = new DataNodeInfoFetchTask(client, conf, metaStore);
dnStorageReportProcTaskFuture = scheduledExecutorService.scheduleAtFixedRate(
procTask, 0, updateInterval, TimeUnit.MILLISECONDS);
LOG.info("DataNodeInfoFetcher service started.");
}
public boolean isFetchFinished() {
return this.procTask.isFinished();
}
public void stop() {
if (dnStorageReportProcTaskFuture != null) {
dnStorageReportProcTaskFuture.cancel(false);
}
}
private class DataNodeInfoFetchTask implements Runnable {
private DFSClient client;
private Configuration conf;
private MetaStore metaStore;
private volatile boolean isFinished = false;
private Map<String, StorageCapacity> storages;
public final Logger LOG =
LoggerFactory.getLogger(DataNodeInfoFetchTask.class);
public DataNodeInfoFetchTask(DFSClient client, Configuration conf, MetaStore metaStore)
throws IOException {
this.client = client;
this.conf = conf;
this.metaStore = metaStore;
try {
storages = metaStore.getStorageCapacity();
} catch (MetaStoreException e) {
throw new IOException("Can not get storage info");
}
}
@Override
public void run() {
StorageCapacity sc;
Map<String, StorageCapacity> storagesNow = new HashMap<>();
try {
final List<DatanodeStorageReport> reports = getDNStorageReports();
metaStore.deleteAllDataNodeInfo();
for (DatanodeStorageReport r : reports) {
metaStore.insertDataNodeInfo(transform(r.getDatanodeInfo()));
List<DataNodeStorageInfo> infos = new ArrayList<>();
//insert record in DataNodeStorageInfoTable
for (int i = 0; i < r.getStorageReports().length; i++) {
StorageReport storageReport = r.getStorageReports()[i];
long sid = CompatibilityHelperLoader.getHelper().getSidInDatanodeStorageReport(
storageReport.getStorage());
String uuid = r.getDatanodeInfo().getDatanodeUuid();
long state = storageReport.getStorage().getState().ordinal();
String storageId = storageReport.getStorage().getStorageID();
long fail = 1;
if (!storageReport.isFailed()) {
fail = 0;
}
long capacity = storageReport.getCapacity();
long dfsUsed = storageReport.getDfsUsed();
long remaining = storageReport.getRemaining();
long blockPoolUsed = storageReport.getBlockPoolUsed();
infos.add(new DataNodeStorageInfo(uuid, sid, state,
storageId, fail, capacity, dfsUsed, remaining, blockPoolUsed));
String sn = storageReport.getStorage().getStorageType().name();
if (!storagesNow.containsKey(sn)) {
sc = new StorageCapacity(sn, capacity, remaining);
storagesNow.put(sn, sc);
} else {
sc = storagesNow.get(sn);
sc.addCapacity(capacity);
sc.addFree(remaining);
}
}
metaStore.deleteDataNodeStorageInfo(r.getDatanodeInfo().getDatanodeUuid());
metaStore.insertDataNodeStorageInfos(infos);
}
updateStorages(storagesNow);
storages = storagesNow;
isFinished = true;
} catch (IOException e) {
LOG.error("Process datanode report error", e);
} catch (MetaStoreException e) {
LOG.error("Process datanode report error", e);
}
}
private void updateStorages(Map<String, StorageCapacity> storagesNow)
throws MetaStoreException {
String k;
StorageCapacity v;
List<StorageCapacity> sc = new ArrayList<>();
for (Entry<String, StorageCapacity> kv : storages.entrySet()) {
k = kv.getKey();
if (storagesNow.containsKey(k)) {
v = storagesNow.get(k);
if (!kv.getValue().equals(v)) {
sc.add(v);
}
} else {
metaStore.deleteStorage(kv.getKey());
}
}
for (Entry<String, StorageCapacity> kv : storagesNow.entrySet()) {
if (!storages.containsKey(kv.getKey())) {
sc.add(kv.getValue());
}
}
metaStore.insertUpdateStoragesTable(sc);
}
/**
* Get live datanode storage reports and then build the network topology.
* @return
* @throws IOException
*/
public List<DatanodeStorageReport> getDNStorageReports() throws IOException {
final DatanodeStorageReport[] reports =
client.getDatanodeStorageReport(HdfsConstants.DatanodeReportType.LIVE);
final List<DatanodeStorageReport> trimmed = new ArrayList<DatanodeStorageReport>();
// create network topology and classify utilization collections:
// over-utilized, above-average, below-average and under-utilized.
for (DatanodeStorageReport r : DFSUtil.shuffle(reports)) {
final DatanodeInfo datanode = r.getDatanodeInfo();
trimmed.add(r);
}
return trimmed;
}
private DataNodeInfo transform(DatanodeInfo datanodeInfo) {
return DataNodeInfo.newBuilder().setUuid(datanodeInfo.getDatanodeUuid()).
setHostName(datanodeInfo.getHostName()).
setRpcAddress(datanodeInfo.getIpAddr() + ":" +
Integer.toString(datanodeInfo.getIpcPort())).
setCacheCapacity(datanodeInfo.getCacheCapacity()).
setCacheUsed(datanodeInfo.getCacheUsed()).
setLocation(datanodeInfo.getNetworkLocation()).build();
}
public boolean isFinished() {
return this.isFinished;
}
}
}
| java | Apache-2.0 | e0c90f054687a18c4e095547ac5e31b8b313b3ef | 2026-01-05T02:41:11.405497Z | false |
Intel-bigdata/SSM | https://github.com/Intel-bigdata/SSM/blob/e0c90f054687a18c4e095547ac5e31b8b313b3ef/smart-hadoop-support/smart-hadoop/src/main/java/org/smartdata/hdfs/metric/fetcher/DatanodeStorageReportProcTask.java | smart-hadoop-support/smart-hadoop/src/main/java/org/smartdata/hdfs/metric/fetcher/DatanodeStorageReportProcTask.java | /**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.smartdata.hdfs.metric.fetcher;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hdfs.DFSClient;
import org.apache.hadoop.hdfs.DFSUtil;
import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
import org.apache.hadoop.hdfs.protocol.HdfsConstants.DatanodeReportType;
import org.apache.hadoop.hdfs.server.protocol.DatanodeStorageReport;
import org.apache.hadoop.hdfs.server.protocol.StorageReport;
import org.apache.hadoop.net.NetworkTopology;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.smartdata.hdfs.CompatibilityHelperLoader;
import org.smartdata.hdfs.action.move.Source;
import org.smartdata.hdfs.action.move.StorageGroup;
import org.smartdata.hdfs.action.move.StorageMap;
import java.io.IOException;
import java.util.ArrayList;
import java.util.List;
public class DatanodeStorageReportProcTask implements Runnable {
private static final int maxConcurrentMovesPerNode = 5;
private DFSClient client;
private StorageMap storages;
private NetworkTopology networkTopology;
private Configuration conf;
public static final Logger LOG =
LoggerFactory.getLogger(DatanodeStorageReportProcTask.class);
public DatanodeStorageReportProcTask(DFSClient client, Configuration conf) throws IOException {
this.client = client;
this.storages = new StorageMap();
this.conf = conf;
}
public StorageMap getStorages() {
return storages;
}
public NetworkTopology getNetworkTopology() {
return networkTopology;
}
public void reset() {
storages = new StorageMap();
networkTopology = NetworkTopology.getInstance(conf);
}
@Override
public void run() {
try {
reset();
final List<DatanodeStorageReport> reports = getDNStorageReports();
for(DatanodeStorageReport r : reports) {
// TODO: store data abstracted from reports to MetaStore
final DDatanode dn = new DDatanode(r.getDatanodeInfo(), maxConcurrentMovesPerNode);
for(String t : CompatibilityHelperLoader.getHelper().getMovableTypes()) {
final Source source = dn.addSource(t);
final long maxRemaining = getMaxRemaining(r, t);
final StorageGroup target = maxRemaining > 0L ? dn.addTarget(t) : null;
storages.add(source, target);
}
}
} catch (IOException e) {
LOG.error("Process datanode report error", e);
}
}
private static long getMaxRemaining(DatanodeStorageReport report, String type) {
long max = 0L;
for(StorageReport r : report.getStorageReports()) {
if (CompatibilityHelperLoader.getHelper().getStorageType(r).equals(type)) {
if (r.getRemaining() > max) {
max = r.getRemaining();
}
}
}
return max;
}
/**
* Get live datanode storage reports and then build the network topology.
* @return
* @throws IOException
*/
public List<DatanodeStorageReport> getDNStorageReports() throws IOException {
final DatanodeStorageReport[] reports =
client.getDatanodeStorageReport(DatanodeReportType.LIVE);
final List<DatanodeStorageReport> trimmed = new ArrayList<DatanodeStorageReport>();
// create network topology and classify utilization collections:
// over-utilized, above-average, below-average and under-utilized.
for (DatanodeStorageReport r : DFSUtil.shuffle(reports)) {
final DatanodeInfo datanode = r.getDatanodeInfo();
trimmed.add(r);
}
return trimmed;
}
}
| java | Apache-2.0 | e0c90f054687a18c4e095547ac5e31b8b313b3ef | 2026-01-05T02:41:11.405497Z | false |
Intel-bigdata/SSM | https://github.com/Intel-bigdata/SSM/blob/e0c90f054687a18c4e095547ac5e31b8b313b3ef/smart-hadoop-support/smart-hadoop/src/main/java/org/smartdata/hdfs/metric/fetcher/InotifyEventApplier.java | smart-hadoop-support/smart-hadoop/src/main/java/org/smartdata/hdfs/metric/fetcher/InotifyEventApplier.java | /**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.smartdata.hdfs.metric.fetcher;
import org.apache.hadoop.fs.XAttr;
import org.apache.hadoop.hdfs.DFSClient;
import org.apache.hadoop.hdfs.inotify.Event;
import org.apache.hadoop.hdfs.protocol.HdfsFileStatus;
import org.apache.hadoop.io.WritableUtils;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.smartdata.conf.SmartConf;
import org.smartdata.hdfs.CompatibilityHelperLoader;
import org.smartdata.hdfs.HadoopUtil;
import org.smartdata.metastore.DBType;
import org.smartdata.metastore.MetaStore;
import org.smartdata.metastore.MetaStoreException;
import org.smartdata.model.BackUpInfo;
import org.smartdata.model.FileDiff;
import org.smartdata.model.FileDiffType;
import org.smartdata.model.FileInfo;
import java.io.ByteArrayInputStream;
import java.io.DataInputStream;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.List;
import java.io.IOException;
import java.net.URI;
import java.net.URISyntaxException;
/**
* This is a very preliminary and buggy applier, can further enhance by referring to
* {@link org.apache.hadoop.hdfs.server.namenode.FSEditLogLoader}
*/
public class InotifyEventApplier {
private final MetaStore metaStore;
private DFSClient client;
private static final Logger LOG =
LoggerFactory.getLogger(InotifyEventFetcher.class);
private List<String> ignoreEventDirs;
private List<String> fetchEventDirs;
private NamespaceFetcher namespaceFetcher;
public InotifyEventApplier(MetaStore metaStore, DFSClient client) {
this.metaStore = metaStore;
this.client = client;
initialize();
}
public InotifyEventApplier(MetaStore metaStore, DFSClient client, NamespaceFetcher namespaceFetcher) {
this(metaStore, client);
this.namespaceFetcher = namespaceFetcher;
}
private void initialize(){
SmartConf conf = new SmartConf();
ignoreEventDirs = conf.getIgnoreDir();
fetchEventDirs = conf.getCoverDir();
}
public void apply(List<Event> events) throws IOException, MetaStoreException, InterruptedException {
List<String> statements = new ArrayList<>();
for (Event event : events) {
List<String> gen = getSqlStatement(event);
if (gen != null && !gen.isEmpty()) {
for (String s : gen) {
if (s != null && s.length() > 0) {
statements.add(s);
}
}
}
}
this.metaStore.execute(statements);
}
//check if the dir is in ignoreList
public void apply(Event[] events) throws IOException, MetaStoreException, InterruptedException {
this.apply(Arrays.asList(events));
}
private boolean shouldIgnore(String path) {
String toCheck = path.endsWith("/") ? path : path + "/";
for (String s : ignoreEventDirs) {
if (toCheck.startsWith(s)) {
return true;
}
}
if (fetchEventDirs.isEmpty()) {
return false;
}
for (String s : fetchEventDirs) {
if (toCheck.startsWith(s)) {
return false;
}
}
return true;
}
private List<String> getSqlStatement(Event event) throws IOException, MetaStoreException, InterruptedException {
String path;
String srcPath, dstPath;
LOG.debug("Even Type = {}", event.getEventType().toString());
switch (event.getEventType()) {
case CREATE:
path = ((Event.CreateEvent) event).getPath();
if (shouldIgnore(path)) {
return Arrays.asList();
}
LOG.trace("event type:" + event.getEventType().name() +
", path:" + ((Event.CreateEvent) event).getPath());
return Arrays.asList(this.getCreateSql((Event.CreateEvent) event));
case CLOSE:
path = ((Event.CloseEvent) event).getPath();
if (shouldIgnore(path)) {
return Arrays.asList();
}
LOG.trace("event type:" + event.getEventType().name() +
", path:" + ((Event.CloseEvent) event).getPath());
return Arrays.asList(this.getCloseSql((Event.CloseEvent) event));
case RENAME:
srcPath = ((Event.RenameEvent) event).getSrcPath();
dstPath = ((Event.RenameEvent) event).getDstPath();
if (shouldIgnore(srcPath) && shouldIgnore(dstPath)) {
return Arrays.asList();
}
LOG.trace("event type:" + event.getEventType().name() +
", src path:" + ((Event.RenameEvent) event).getSrcPath() +
", dest path:" + ((Event.RenameEvent) event).getDstPath());
return this.getRenameSql((Event.RenameEvent)event);
case METADATA:
// The property dfs.namenode.accesstime.precision in HDFS's configuration controls
// the precision of access time. Its default value is 1h. To avoid missing a
// MetadataUpdateEvent for updating access time, a smaller value should be set.
path = ((Event.MetadataUpdateEvent)event).getPath();
if (shouldIgnore(path)) {
return Arrays.asList();
}
LOG.trace("event type:" + event.getEventType().name() +
", path:" + ((Event.MetadataUpdateEvent)event).getPath());
return Arrays.asList(this.getMetaDataUpdateSql((Event.MetadataUpdateEvent)event));
case APPEND:
path = ((Event.AppendEvent)event).getPath();
if (shouldIgnore(path)) {
return Arrays.asList();
}
LOG.trace("event type:" + event.getEventType().name() +
", path:" + ((Event.AppendEvent)event).getPath());
return this.getAppendSql((Event.AppendEvent)event);
case UNLINK:
path = ((Event.UnlinkEvent)event).getPath();
if (shouldIgnore(path)) {
return Arrays.asList();
}
LOG.trace("event type:" + event.getEventType().name() +
", path:" + ((Event.UnlinkEvent)event).getPath());
return this.getUnlinkSql((Event.UnlinkEvent)event);
}
return Arrays.asList();
}
//Todo: times and ec policy id, etc.
private String getCreateSql(Event.CreateEvent createEvent) throws IOException, MetaStoreException {
HdfsFileStatus fileStatus = client.getFileInfo(createEvent.getPath());
if (fileStatus == null) {
LOG.debug("Can not get HdfsFileStatus for file " + createEvent.getPath());
return "";
}
FileInfo fileInfo = HadoopUtil.convertFileStatus(fileStatus, createEvent.getPath());
if (inBackup(fileInfo.getPath())) {
if (!fileInfo.isdir()) {
// ignore dir
FileDiff fileDiff = new FileDiff(FileDiffType.APPEND);
fileDiff.setSrc(fileInfo.getPath());
fileDiff.getParameters().put("-offset", String.valueOf(0));
// Note that "-length 0" means create an empty file
fileDiff.getParameters()
.put("-length", String.valueOf(fileInfo.getLength()));
// TODO add support in CopyFileAction or split into two file diffs
//add modification_time and access_time to filediff
fileDiff.getParameters().put("-mtime", "" + fileInfo.getModificationTime());
// fileDiff.getParameters().put("-atime", "" + fileInfo.getAccessTime());
//add owner to filediff
fileDiff.getParameters().put("-owner", "" + fileInfo.getOwner());
fileDiff.getParameters().put("-group", "" + fileInfo.getGroup());
//add Permission to filediff
fileDiff.getParameters().put("-permission", "" + fileInfo.getPermission());
//add replication count to file diff
fileDiff.getParameters().put("-replication", "" + fileInfo.getBlockReplication());
metaStore.insertFileDiff(fileDiff);
}
}
metaStore.deleteFileByPath(fileInfo.getPath());
metaStore.deleteFileState(fileInfo.getPath());
metaStore.insertFile(fileInfo);
return "";
}
private boolean inBackup(String src) throws MetaStoreException {
if (metaStore.srcInbackup(src)) {
return true;
}
return false;
}
//Todo: should update mtime? atime?
private String getCloseSql(Event.CloseEvent closeEvent) throws IOException, MetaStoreException {
FileDiff fileDiff = new FileDiff(FileDiffType.APPEND);
fileDiff.setSrc(closeEvent.getPath());
long newLen = closeEvent.getFileSize();
long currLen = 0l;
// TODO make sure offset is correct
if (inBackup(closeEvent.getPath())) {
FileInfo fileInfo = metaStore.getFile(closeEvent.getPath());
if (fileInfo == null) {
// TODO add metadata
currLen = 0;
} else {
currLen = fileInfo.getLength();
}
if (currLen != newLen) {
fileDiff.getParameters().put("-offset", String.valueOf(currLen));
fileDiff.getParameters()
.put("-length", String.valueOf(newLen - currLen));
metaStore.insertFileDiff(fileDiff);
}
}
return String.format(
"UPDATE file SET length = %s, modification_time = %s WHERE path = '%s';",
closeEvent.getFileSize(), closeEvent.getTimestamp(), closeEvent.getPath());
}
//Todo: should update mtime? atime?
// private String getTruncateSql(Event.TruncateEvent truncateEvent) {
// return String.format(
// "UPDATE file SET length = %s, modification_time = %s WHERE path = '%s';",
// truncateEvent.getFileSize(), truncateEvent.getTimestamp(), truncateEvent.getPath());
// }
private List<String> getRenameSql(Event.RenameEvent renameEvent)
throws IOException, MetaStoreException, InterruptedException {
String src = renameEvent.getSrcPath();
String dest = renameEvent.getDstPath();
List<String> ret = new ArrayList<>();
HdfsFileStatus status = client.getFileInfo(dest);
FileInfo info = metaStore.getFile(src);
// For backup data to use.
generateFileDiff(renameEvent);
if (status == null) {
LOG.debug("Get rename dest status failed, {} -> {}", src, dest);
}
// The dest path which the src is renamed to should be checked in file table
// to avoid duplicated record for one same path.
FileInfo destInfo = metaStore.getFile(dest);
if (destInfo != null) {
metaStore.deleteFileByPath(dest);
}
// src is not in file table because it is not fetched or other reason
if (info == null) {
if (status != null) {
//info = HadoopUtil.convertFileStatus(status, dest);
//metaStore.insertFile(info);
namespaceFetcher.startFetch(dest);
while(!namespaceFetcher.fetchFinished()) {
LOG.info("Fetching the files under " + dest);
Thread.sleep(100);
}
namespaceFetcher.stop();
}
} else {
// if the dest is ignored, delete src info from file table
// TODO: tackle with file_state and small_state
if (shouldIgnore(dest)) {
// fuzzy matching is used to delete content under the dir
if (info.isdir()) {
ret.add(String.format("DELETE FROM file WHERE path LIKE '%s/%%';", src));
}
ret.add(String.format("DELETE FROM file WHERE path = '%s';", src));
return ret;
} else {
ret.add(String.format("UPDATE file SET path = replace(path, '%s', '%s') "
+ "WHERE path = '%s';", src, dest, src));
ret.add(String.format("UPDATE file_state SET path = replace(path, '%s', '%s') "
+ "WHERE path = '%s';", src, dest, src));
ret.add(String.format("UPDATE small_file SET path = replace(path, '%s', '%s') "
+ "WHERE path = '%s';", src, dest, src));
if (info.isdir()) {
if (metaStore.getDbType() == DBType.MYSQL) {
ret.add(String.format("UPDATE file SET path = CONCAT('%s', SUBSTR(path, %d)) "
+ "WHERE path LIKE '%s/%%';", dest, src.length() + 1, src));
ret.add(String.format("UPDATE file_state SET path = CONCAT('%s', SUBSTR(path, %d)) "
+ "WHERE path LIKE '%s/%%';", dest, src.length() + 1, src));
ret.add(String.format("UPDATE small_file SET path = CONCAT('%s', SUBSTR(path, %d)) "
+ "WHERE path LIKE '%s/%%';", dest, src.length() + 1, src));
} else if (metaStore.getDbType() == DBType.SQLITE) {
ret.add(String.format("UPDATE file SET path = '%s' || SUBSTR(path, %d) "
+ "WHERE path LIKE '%s/%%';", dest, src.length() + 1, src));
ret.add(String.format("UPDATE file_state SET path = '%s' || SUBSTR(path, %d) "
+ "WHERE path LIKE '%s/%%';", dest, src.length() + 1, src));
ret.add(String.format("UPDATE small_file SET path = '%s' || SUBSTR(path, %d) "
+ "WHERE path LIKE '%s/%%';", dest, src.length() + 1, src));
}
}
}
}
return ret;
}
private void generateFileDiff(Event.RenameEvent renameEvent)
throws MetaStoreException {
String src = renameEvent.getSrcPath();
String dest = renameEvent.getDstPath();
FileInfo info = metaStore.getFile(src);
// TODO: consider src or dest is ignored by SSM
if (inBackup(src)) {
// rename the file if the renamed file is still under the backup src dir
// if not, insert a delete file diff
if (inBackup(dest)) {
FileDiff fileDiff = new FileDiff(FileDiffType.RENAME);
fileDiff.setSrc(src);
fileDiff.getParameters().put("-dest", dest);
metaStore.insertFileDiff(fileDiff);
} else {
insertDeleteDiff(src, info.isdir());
}
} else if (inBackup(dest)) {
// tackle such case: rename file from outside into backup dir
if (!info.isdir()) {
FileDiff fileDiff = new FileDiff(FileDiffType.APPEND);
fileDiff.setSrc(dest);
fileDiff.getParameters().put("-offset", String.valueOf(0));
fileDiff.getParameters()
.put("-length", String.valueOf(info.getLength()));
metaStore.insertFileDiff(fileDiff);
} else {
List<FileInfo> fileInfos = metaStore.getFilesByPrefix(src.endsWith("/") ? src : src + "/");
for (FileInfo fileInfo : fileInfos) {
// TODO: cover subdir with no file case
if (fileInfo.isdir()) {
continue;
}
FileDiff fileDiff = new FileDiff(FileDiffType.APPEND);
fileDiff.setSrc(fileInfo.getPath().replaceFirst(src, dest));
fileDiff.getParameters().put("-offset", String.valueOf(0));
fileDiff.getParameters()
.put("-length", String.valueOf(fileInfo.getLength()));
metaStore.insertFileDiff(fileDiff);
}
}
}
}
private String getMetaDataUpdateSql(Event.MetadataUpdateEvent metadataUpdateEvent) throws MetaStoreException {
FileDiff fileDiff = null;
if (inBackup(metadataUpdateEvent.getPath())) {
fileDiff = new FileDiff(FileDiffType.METADATA);
fileDiff.setSrc(metadataUpdateEvent.getPath());
}
switch (metadataUpdateEvent.getMetadataType()) {
case TIMES:
if (metadataUpdateEvent.getMtime() > 0 && metadataUpdateEvent.getAtime() > 0) {
if (fileDiff != null) {
fileDiff.getParameters().put("-mtime", "" + metadataUpdateEvent.getMtime());
// fileDiff.getParameters().put("-access_time", "" + metadataUpdateEvent.getAtime());
metaStore.insertFileDiff(fileDiff);
}
return String.format(
"UPDATE file SET modification_time = %s, access_time = %s WHERE path = '%s';",
metadataUpdateEvent.getMtime(),
metadataUpdateEvent.getAtime(),
metadataUpdateEvent.getPath());
} else if (metadataUpdateEvent.getMtime() > 0) {
if (fileDiff != null) {
fileDiff.getParameters().put("-mtime", "" + metadataUpdateEvent.getMtime());
metaStore.insertFileDiff(fileDiff);
}
return String.format(
"UPDATE file SET modification_time = %s WHERE path = '%s';",
metadataUpdateEvent.getMtime(),
metadataUpdateEvent.getPath());
} else if (metadataUpdateEvent.getAtime() > 0) {
// if (fileDiff != null) {
// fileDiff.getParameters().put("-access_time", "" + metadataUpdateEvent.getAtime());
// metaStore.insertFileDiff(fileDiff);
// }
return String.format(
"UPDATE file SET access_time = %s WHERE path = '%s';",
metadataUpdateEvent.getAtime(),
metadataUpdateEvent.getPath());
} else {
return "";
}
case OWNER:
if (fileDiff != null) {
fileDiff.getParameters().put("-owner", "" + metadataUpdateEvent.getOwnerName());
metaStore.insertFileDiff(fileDiff);
}
return String.format(
"UPDATE file SET owner = '%s', owner_group = '%s' WHERE path = '%s';",
metadataUpdateEvent.getOwnerName(),
metadataUpdateEvent.getGroupName(),
metadataUpdateEvent.getPath());
case PERMS:
if (fileDiff != null) {
fileDiff.getParameters().put("-permission", "" + metadataUpdateEvent.getPerms().toShort());
metaStore.insertFileDiff(fileDiff);
}
return String.format(
"UPDATE file SET permission = %s WHERE path = '%s';",
metadataUpdateEvent.getPerms().toShort(), metadataUpdateEvent.getPath());
case REPLICATION:
if (fileDiff != null) {
fileDiff.getParameters().put("-replication", "" + metadataUpdateEvent.getReplication());
metaStore.insertFileDiff(fileDiff);
}
return String.format(
"UPDATE file SET block_replication = %s WHERE path = '%s';",
metadataUpdateEvent.getReplication(), metadataUpdateEvent.getPath());
case XATTRS:
final String EC_POLICY = "hdfs.erasurecoding.policy";
//Todo
if (LOG.isDebugEnabled()) {
String message = "\n";
for (XAttr xAttr : metadataUpdateEvent.getxAttrs()) {
message += xAttr.toString() + "\n";
}
LOG.debug(message);
}
// The following code should be executed merely on HDFS3.x.
for (XAttr xAttr : metadataUpdateEvent.getxAttrs()) {
if (xAttr.getName().equals(EC_POLICY)) {
try {
String ecPolicyName = WritableUtils.readString(
new DataInputStream(new ByteArrayInputStream(xAttr.getValue())));
byte ecPolicyId = CompatibilityHelperLoader.getHelper().
getErasureCodingPolicyByName(client, ecPolicyName);
if (ecPolicyId == (byte) -1) {
LOG.error("Unrecognized EC policy for updating!");
}
return String.format("UPDATE file SET ec_policy_id = %s WHERE path = '%s'",
ecPolicyId, metadataUpdateEvent.getPath());
} catch (IOException ex) {
LOG.error("Error occurred for updating ecPolicy!", ex);
}
}
}
break;
case ACLS:
return "";
}
return "";
}
private List<String> getAppendSql(Event.AppendEvent appendEvent) {
//Do nothing;
return Arrays.asList();
}
private List<String> getUnlinkSql(Event.UnlinkEvent unlinkEvent) throws MetaStoreException {
// delete root, i.e., /
String root = "/";
if (root.equals(unlinkEvent.getPath())) {
LOG.warn("Deleting root directory!!!");
insertDeleteDiff(root, true);
return Arrays.asList(
String.format("DELETE FROM file WHERE path like '%s%%'", root),
String.format("DELETE FROM file_state WHERE path like '%s%%'", root),
String.format("DELETE FROM small_file WHERE path like '%s%%'", root));
}
String path = unlinkEvent.getPath();
// file has no "/" appended in the metaStore
FileInfo fileInfo = metaStore.getFile(path.endsWith("/") ?
path.substring(0, path.length() - 1) : path);
if (fileInfo == null) return Arrays.asList();
if (fileInfo.isdir()) {
insertDeleteDiff(unlinkEvent.getPath(), true);
// delete all files in this dir from file table
return Arrays.asList(
String.format("DELETE FROM file WHERE path LIKE '%s/%%';", unlinkEvent.getPath()),
String.format("DELETE FROM file WHERE path = '%s';", unlinkEvent.getPath()),
String.format("DELETE FROM file_state WHERE path LIKE '%s/%%';", unlinkEvent.getPath()),
String.format("DELETE FROM file_state WHERE path = '%s';", unlinkEvent.getPath()),
String.format("DELETE FROM small_file WHERE path LIKE '%s/%%';", unlinkEvent.getPath()),
String.format("DELETE FROM small_file WHERE path = '%s';", unlinkEvent.getPath()));
} else {
insertDeleteDiff(unlinkEvent.getPath(), false);
// delete file in file table
return Arrays.asList(
String.format("DELETE FROM file WHERE path = '%s';", unlinkEvent.getPath()),
String.format("DELETE FROM file_state WHERE path = '%s';", unlinkEvent.getPath()),
String.format("DELETE FROM small_file WHERE path = '%s';", unlinkEvent.getPath()));
}
}
// TODO: just insert a fileDiff for this kind of path.
// It seems that there is no need to see if path matches with one dir in FileInfo.
private void insertDeleteDiff(String path, boolean isDir) throws MetaStoreException {
if (isDir) {
path = path.endsWith("/") ? path.substring(0, path.length() - 1) : path;
List<FileInfo> fileInfos = metaStore.getFilesByPrefix(path);
for (FileInfo fileInfo : fileInfos) {
if (fileInfo.isdir()) {
if (path.equals(fileInfo.getPath())) {
insertDeleteDiff(fileInfo.getPath());
break;
}
}
}
} else {
insertDeleteDiff(path);
}
}
private void insertDeleteDiff(String path) throws MetaStoreException {
// TODO: remove "/" appended in src or dest in backup_file table
String pathWithSlash = path.endsWith("/") ? path : path + "/";
if (inBackup(pathWithSlash)) {
List<BackUpInfo> backUpInfos = metaStore.getBackUpInfoBySrc(pathWithSlash);
for (BackUpInfo backUpInfo : backUpInfos) {
String destPath = pathWithSlash.replaceFirst(backUpInfo.getSrc(), backUpInfo.getDest());
try {
// tackle root path case
URI namenodeUri = new URI(destPath);
String root = "hdfs://" + namenodeUri.getHost() + ":"
+ String.valueOf(namenodeUri.getPort());
if (destPath.equals(root) || destPath.equals(root + "/") || destPath.equals("/")) {
for (String srcFilePath : getFilesUnderDir(pathWithSlash)) {
FileDiff fileDiff = new FileDiff(FileDiffType.DELETE);
fileDiff.setSrc(srcFilePath);
String destFilePath = srcFilePath.replaceFirst(backUpInfo.getSrc(), backUpInfo.getDest());
fileDiff.getParameters().put("-dest", destFilePath);
metaStore.insertFileDiff(fileDiff);
}
} else {
FileDiff fileDiff = new FileDiff(FileDiffType.DELETE);
// use the path getting from event with no slash appended
fileDiff.setSrc(path);
// put sync's dest path in parameter for delete use
fileDiff.getParameters().put("-dest", destPath);
metaStore.insertFileDiff(fileDiff);
}
} catch (URISyntaxException e) {
LOG.error("Error occurs!", e);
}
}
}
}
private List<String> getFilesUnderDir(String dir) throws MetaStoreException {
dir = dir.endsWith("/") ? dir : dir + "/";
List<String> fileList = new ArrayList<>();
List<String> subdirList = new ArrayList<>();
// get fileInfo in asc order of path to guarantee that
// the subdir is tackled prior to files or dirs under it
List<FileInfo> fileInfos = metaStore.getFilesByPrefixInOrder(dir);
for (FileInfo fileInfo : fileInfos) {
// just delete subdir instead of deleting all files under it
if (isUnderDir(fileInfo.getPath(), subdirList)) {
continue;
}
fileList.add(fileInfo.getPath());
if (fileInfo.isdir()) {
subdirList.add(fileInfo.getPath());
}
}
return fileList;
}
private boolean isUnderDir(String path, List<String> dirs) {
if (dirs.isEmpty()) {
return false;
}
for (String subdir : dirs) {
if (path.startsWith(subdir)) {
return true;
}
}
return false;
}
}
| java | Apache-2.0 | e0c90f054687a18c4e095547ac5e31b8b313b3ef | 2026-01-05T02:41:11.405497Z | false |
Intel-bigdata/SSM | https://github.com/Intel-bigdata/SSM/blob/e0c90f054687a18c4e095547ac5e31b8b313b3ef/smart-hadoop-support/smart-hadoop/src/main/java/org/smartdata/hdfs/metric/fetcher/DDatanode.java | smart-hadoop-support/smart-hadoop/src/main/java/org/smartdata/hdfs/metric/fetcher/DDatanode.java | /**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.smartdata.hdfs.metric.fetcher;
import com.google.common.base.Preconditions;
import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
import org.smartdata.hdfs.action.move.Source;
import org.smartdata.hdfs.action.move.StorageGroup;
import java.util.HashMap;
import java.util.Map;
/** A class that keeps track of a datanode. */
public class DDatanode {
final DatanodeInfo datanode;
private final Map<String, Source> sourceMap;
private final Map<String, StorageGroup> targetMap;
@Override
public String toString() {
return getClass().getSimpleName() + ":" + datanode;
}
public DDatanode(DatanodeInfo datanode, int maxConcurrentMoves) {
this.datanode = datanode;
this.sourceMap = new HashMap<>();
this.targetMap = new HashMap<>();
}
public DatanodeInfo getDatanodeInfo() {
return datanode;
}
private static <G extends StorageGroup> void put(String storageType,
G g, Map<String, G> map) {
final StorageGroup existing = map.put(storageType, g);
Preconditions.checkState(existing == null);
}
public StorageGroup addTarget(String storageType) {
final StorageGroup g = new StorageGroup(this.datanode, storageType);
put(storageType, g, targetMap);
return g;
}
public Source addSource(String storageType) {
final Source s = new Source(storageType, this.getDatanodeInfo());
put(storageType, s, sourceMap);
return s;
}
} | java | Apache-2.0 | e0c90f054687a18c4e095547ac5e31b8b313b3ef | 2026-01-05T02:41:11.405497Z | false |
Intel-bigdata/SSM | https://github.com/Intel-bigdata/SSM/blob/e0c90f054687a18c4e095547ac5e31b8b313b3ef/smart-hadoop-support/smart-hadoop/src/main/java/org/smartdata/hdfs/metric/fetcher/InotifyEventFetcher.java | smart-hadoop-support/smart-hadoop/src/main/java/org/smartdata/hdfs/metric/fetcher/InotifyEventFetcher.java | /**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.smartdata.hdfs.metric.fetcher;
import com.google.common.annotations.VisibleForTesting;
import com.google.common.util.concurrent.FutureCallback;
import com.google.common.util.concurrent.Futures;
import com.google.common.util.concurrent.ListenableFuture;
import com.google.common.util.concurrent.ListeningExecutorService;
import com.google.common.util.concurrent.MoreExecutors;
import com.squareup.tape.QueueFile;
import org.apache.hadoop.hdfs.DFSClient;
import org.apache.hadoop.hdfs.DFSInotifyEventInputStream;
import org.apache.hadoop.hdfs.inotify.Event;
import org.apache.hadoop.hdfs.inotify.EventBatch;
import org.apache.hadoop.hdfs.inotify.MissingEventsException;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.smartdata.SmartConstants;
import org.smartdata.conf.SmartConf;
import org.smartdata.conf.SmartConfKeys;
import org.smartdata.metastore.MetaStore;
import org.smartdata.metastore.MetaStoreException;
import org.smartdata.model.SystemInfo;
import org.smartdata.utils.StringUtil;
import java.io.File;
import java.io.IOException;
import java.util.ArrayList;
import java.util.HashSet;
import java.util.List;
import java.util.Random;
import java.util.Set;
import java.util.concurrent.Callable;
import java.util.concurrent.ScheduledExecutorService;
import java.util.concurrent.ScheduledFuture;
import java.util.concurrent.TimeUnit;
public class InotifyEventFetcher {
private final DFSClient client;
private final NamespaceFetcher nameSpaceFetcher;
private final ScheduledExecutorService scheduledExecutorService;
private final InotifyEventApplier applier;
private final MetaStore metaStore;
private Callable finishedCallback;
private ScheduledFuture inotifyFetchFuture;
private ScheduledFuture fetchAndApplyFuture;
private EventApplyTask eventApplyTask;
private java.io.File inotifyFile;
private QueueFile queueFile;
private SmartConf conf;
public static final Logger LOG =
LoggerFactory.getLogger(InotifyEventFetcher.class);
public static List<String> oldList = new ArrayList<>();
public InotifyEventFetcher(DFSClient client, MetaStore metaStore,
ScheduledExecutorService service, Callable callBack) {
this(client, metaStore, service, new InotifyEventApplier(metaStore, client), callBack, new SmartConf());
}
public InotifyEventFetcher(DFSClient client, MetaStore metaStore,
ScheduledExecutorService service, Callable callBack, SmartConf conf) {
this(client, metaStore, service, null, callBack, conf);
}
public InotifyEventFetcher(DFSClient client, MetaStore metaStore,
ScheduledExecutorService service, InotifyEventApplier applier, Callable callBack) {
this.client = client;
this.applier = applier;
this.metaStore = metaStore;
this.scheduledExecutorService = service;
this.finishedCallback = callBack;
// use independent thread pool
this.nameSpaceFetcher = new NamespaceFetcher(client, metaStore, null);
this.conf = new SmartConf();
}
public InotifyEventFetcher(DFSClient client, MetaStore metaStore,
ScheduledExecutorService service, InotifyEventApplier applier,
Callable callBack, SmartConf conf) {
this.client = client;
this.metaStore = metaStore;
this.scheduledExecutorService = service;
this.finishedCallback = callBack;
this.conf = conf;
this.nameSpaceFetcher = new NamespaceFetcher(client, metaStore, null, conf);
this.applier = new InotifyEventApplier(metaStore, client, nameSpaceFetcher);
}
public void start() throws IOException {
boolean ignore = conf.getBoolean(
SmartConfKeys.SMART_NAMESPACE_FETCHER_IGNORE_UNSUCCESSIVE_INOTIFY_EVENT_KEY,
SmartConfKeys.SMART_NAMESPACE_FETCHER_IGNORE_UNSUCCESSIVE_INOTIFY_EVENT_DEFAULT);
if (!ignore) {
Long lastTxid = getLastTxid();
//If whitelist is changed, the whole namespace will be fetched when servers restart
if (lastTxid != null && lastTxid != -1 && canContinueFromLastTxid(client, lastTxid)
&& !isWhitelistChanged(conf, metaStore)) {
startFromLastTxid(lastTxid);
} else {
startWithFetchingNameSpace();
LOG.info("Start fetch namespace fully!");
}
//Update old whitelist
try {
String currentList = StringUtil.join(",", conf.getCoverDir());
metaStore.updateWhitelistTable(currentList);
} catch (MetaStoreException e) {
LOG.warn("Failed to update whitelist.", e);
}
} else {
long id = client.getNamenode().getCurrentEditLogTxid();
LOG.warn("NOTE: Incomplete iNotify event may cause unpredictable consequences. "
+ "This should only be used for testing.");
startFromLastTxid(id);
}
}
@VisibleForTesting
static boolean canContinueFromLastTxid(DFSClient client, Long lastId) {
try {
if (client.getNamenode().getCurrentEditLogTxid() == lastId) {
return true;
}
DFSInotifyEventInputStream is = client.getInotifyEventStream(lastId);
EventBatch eventBatch = is.poll();
return eventBatch != null;
} catch (Exception e) {
return false;
}
}
private Long getLastTxid() {
try {
SystemInfo info =
metaStore.getSystemInfoByProperty(SmartConstants.SMART_HDFS_LAST_INOTIFY_TXID);
return info != null ? Long.parseLong(info.getValue()) : -1L;
} catch (MetaStoreException e) {
return -1L;
}
}
private void startWithFetchingNameSpace() throws IOException {
ListeningExecutorService listeningExecutorService = MoreExecutors.listeningDecorator(scheduledExecutorService);
inotifyFile = new File("/tmp/inotify" + new Random().nextLong());
queueFile = new QueueFile(inotifyFile);
long startId = client.getNamenode().getCurrentEditLogTxid();
LOG.info("Start fetching namespace with current edit log txid = " + startId);
nameSpaceFetcher.startFetch();
inotifyFetchFuture = scheduledExecutorService.scheduleAtFixedRate(
new InotifyFetchTask(queueFile, client, startId), 0, 100, TimeUnit.MILLISECONDS);
eventApplyTask = new EventApplyTask(nameSpaceFetcher, applier, queueFile, startId, conf);
ListenableFuture<?> future = listeningExecutorService.submit(eventApplyTask);
Futures.addCallback(future, new NameSpaceFetcherCallBack(), scheduledExecutorService);
LOG.info("Start apply iNotify events.");
}
private void startFromLastTxid(long lastId) throws IOException {
LOG.info("Skipped fetching Name Space, start applying inotify events from " + lastId);
submitFetchAndApplyTask(lastId);
try {
finishedCallback.call();
} catch (Exception e) {
LOG.error("Call back failed", e);
}
}
private void submitFetchAndApplyTask(long lastId) throws IOException {
fetchAndApplyFuture =
scheduledExecutorService.scheduleAtFixedRate(
new InotifyFetchAndApplyTask(client, metaStore, applier, lastId),
0,
100,
TimeUnit.MILLISECONDS);
}
private class NameSpaceFetcherCallBack implements FutureCallback<Object> {
@Override
public void onSuccess(Object o) {
inotifyFetchFuture.cancel(false);
nameSpaceFetcher.stop();
try {
queueFile.close();
submitFetchAndApplyTask(eventApplyTask.getLastId());
LOG.info("Name space fetch finished.");
finishedCallback.call();
} catch (Exception e) {
LOG.error("Call back failed", e);
}
}
@Override
public void onFailure(Throwable throwable) {
LOG.error("NameSpaceFetcher failed", throwable);
}
}
public void stop() {
if (inotifyFile != null) {
inotifyFile.delete();
}
if (inotifyFetchFuture != null) {
inotifyFetchFuture.cancel(false);
}
if (fetchAndApplyFuture != null){
fetchAndApplyFuture.cancel(false);
}
}
/**
*Verify if whitelist changed. It influences namespace fetching process.
*/
public static boolean isWhitelistChanged(SmartConf conf, MetaStore metaStore) {
List<String> currentList = conf.getCoverDir();
try {
oldList = metaStore.getLastFetchedDirs();
} catch (MetaStoreException e) {
LOG.warn("Failed to get last fetched dirs.", e);
}
if (currentList.size() != oldList.size()) {
return true;
}
Set<String> set = new HashSet<>();
for (String s : oldList) {
set.add(s);
}
for (String s : currentList) {
if (!set.contains(s)) {
return true;
}
}
return false;
}
private static class InotifyFetchTask implements Runnable {
private final QueueFile queueFile;
private DFSInotifyEventInputStream inotifyEventInputStream;
public InotifyFetchTask(QueueFile queueFile, DFSClient client, long startId) throws IOException {
this.queueFile = queueFile;
this.inotifyEventInputStream = client.getInotifyEventStream(startId);
}
@Override
public void run() {
try {
EventBatch eventBatch = inotifyEventInputStream.poll();
while (eventBatch != null) {
this.queueFile.add(EventBatchSerializer.serialize(eventBatch));
eventBatch = inotifyEventInputStream.poll();
}
} catch (IOException | MissingEventsException e) {
LOG.error("Inotify enqueue error", e);
}
}
}
private static class EventApplyTask implements Runnable {
private final NamespaceFetcher namespaceFetcher;
private final InotifyEventApplier applier;
private final QueueFile queueFile;
private long lastId;
private SmartConf conf;
private List<String> ignoreList;
private List<String> fetchList;
public EventApplyTask(NamespaceFetcher namespaceFetcher, InotifyEventApplier applier,
QueueFile queueFile, long lastId) {
this.namespaceFetcher = namespaceFetcher;
this.queueFile = queueFile;
this.applier = applier;
this.lastId = lastId;
this.conf = new SmartConf();
this.ignoreList = getIgnoreDirFromConfig();
}
public EventApplyTask(NamespaceFetcher namespaceFetcher, InotifyEventApplier applier,
QueueFile queueFile, long lastId, SmartConf conf) {
this.namespaceFetcher = namespaceFetcher;
this.queueFile = queueFile;
this.applier = applier;
this.lastId = lastId;
this.conf = conf;
this.ignoreList = getIgnoreDirFromConfig();
this.fetchList = getFetchDirFromConfig();
}
public List<String> getIgnoreDirFromConfig() {
return conf.getIgnoreDir();
}
public List<String> getFetchDirFromConfig() {
return conf.getCoverDir();
}
public boolean shouldIgnore(String path) {
if (!path.endsWith("/")) {
path = path.concat("/");
}
for (String dir : ignoreList) {
if (path.startsWith(dir)) {
return true;
}
}
if (fetchList.isEmpty()) {
return false;
}
for (String dir : fetchList) {
if (path.startsWith(dir)) {
return false;
}
}
return true;
}
public boolean ifEventIgnore(Event event) {
String path;
switch (event.getEventType()) {
case CREATE:
Event.CreateEvent createEvent = (Event.CreateEvent) event;
path = createEvent.getPath();
return shouldIgnore(path);
case CLOSE:
Event.CloseEvent closeEvent = (Event.CloseEvent) event;
path = closeEvent.getPath();
return shouldIgnore(path);
case RENAME:
Event.RenameEvent renameEvent = (Event.RenameEvent) event;
path = renameEvent.getSrcPath();
String dest = renameEvent.getDstPath();
return shouldIgnore(path) && shouldIgnore(dest);
case METADATA:
Event.MetadataUpdateEvent metadataUpdateEvent = (Event.MetadataUpdateEvent) event;
path = metadataUpdateEvent.getPath();
return shouldIgnore(path);
case APPEND:
Event.AppendEvent appendEvent = (Event.AppendEvent) event;
path = appendEvent.getPath();
return shouldIgnore(path);
case UNLINK:
Event.UnlinkEvent unlinkEvent = (Event.UnlinkEvent) event;
path = unlinkEvent.getPath();
return shouldIgnore(path);
}
return true;
}
public Event[] deleteIgnoreEvent(Event[] events) {
ArrayList<Event> eventArrayList = new ArrayList<>();
for (int i = 0; i < events.length; i++) {
if (!ifEventIgnore(events[i])) {
eventArrayList.add(events[i]);
}
}
return eventArrayList.toArray(new Event[eventArrayList.size()]);
}
@Override
public void run() {
try {
while (!Thread.currentThread().isInterrupted()) {
if (!namespaceFetcher.fetchFinished()) {
Thread.sleep(100);
} else {
while (!queueFile.isEmpty()) {
EventBatch batch = EventBatchSerializer.deserialize(queueFile.peek());
queueFile.remove();
Event[] event = batch.getEvents();
event = deleteIgnoreEvent(event);
if (event.length > 0) {
this.applier.apply(event);
this.lastId = batch.getTxid();
}
}
break;
}
}
} catch (InterruptedException | IOException | MetaStoreException e) {
LOG.error("Inotify dequeue error", e);
}
}
public long getLastId() {
return this.lastId;
}
}
}
| java | Apache-2.0 | e0c90f054687a18c4e095547ac5e31b8b313b3ef | 2026-01-05T02:41:11.405497Z | false |
Intel-bigdata/SSM | https://github.com/Intel-bigdata/SSM/blob/e0c90f054687a18c4e095547ac5e31b8b313b3ef/smart-hadoop-support/smart-hadoop/src/main/java/org/smartdata/hdfs/metric/fetcher/StorageInfoSampler.java | smart-hadoop-support/smart-hadoop/src/main/java/org/smartdata/hdfs/metric/fetcher/StorageInfoSampler.java | /**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.smartdata.hdfs.metric.fetcher;
import org.apache.hadoop.conf.Configuration;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.smartdata.conf.SmartConfKeys;
import org.smartdata.metastore.MetaStore;
import org.smartdata.model.StorageCapacity;
import org.smartdata.utils.StringUtil;
import java.io.IOException;
import java.util.HashMap;
import java.util.Map;
import java.util.concurrent.Executors;
import java.util.concurrent.ScheduledExecutorService;
import java.util.concurrent.TimeUnit;
/**
* Storage information sampling.
*/
public class StorageInfoSampler {
private MetaStore metaStore;
private Configuration conf;
private Map<Long, Integer> samplingIntervals;
private ScheduledExecutorService service;
private static final Logger LOG =
LoggerFactory.getLogger(StorageInfoSampler.class);
public StorageInfoSampler(MetaStore metaStore, Configuration conf) throws IOException {
this.metaStore = metaStore;
this.conf = conf;
samplingIntervals = getSamplingConfiguration();
this.service = Executors.newScheduledThreadPool(1);
}
public void start() throws IOException {
LOG.info("Starting storage sampling service ...");
long curr = System.currentTimeMillis();
for (Long intval : samplingIntervals.keySet()) {
long initDelay = intval - (curr % intval);
service.scheduleAtFixedRate(new InfoSamplingTask(intval, samplingIntervals.get(intval)),
initDelay, intval, TimeUnit.MILLISECONDS);
}
LOG.info("Storage sampling service started.");
}
public void stop() {
if (service != null) {
service.shutdown();
}
}
private Map<Long, Integer> getSamplingConfiguration() throws IOException {
String samplingStr = conf.get(SmartConfKeys.SMART_STORAGE_INFO_SAMPLING_INTERVALS_KEY,
SmartConfKeys.SMART_STORAGE_INFO_SAMPLING_INTERVALS_DEFAULT);
String[] items = samplingStr.split(";");
Map<Long, Integer> ret = new HashMap<>();
for (String s : items) {
if (!s.equals("")) {
String[] samples = s.split(",");
Long interval = StringUtil.pharseTimeString(samples[0]);
Integer maxNum;
if (samples.length == 2) {
maxNum = Integer.valueOf(samples[1]);
} else if (samples.length == 1) {
maxNum = Integer.MAX_VALUE;
} else {
throw new IOException("Invalid value format for configure option '"
+ SmartConfKeys.SMART_STORAGE_INFO_SAMPLING_INTERVALS_KEY + "' = '"
+ samplingStr + "' on part '" + s + "'");
}
ret.put(interval, maxNum);
}
}
return ret;
}
private class InfoSamplingTask implements Runnable {
private long interval;
private long maxItems;
private boolean clean = false;
public InfoSamplingTask(long interval, int maxItems) {
this.interval = interval;
this.maxItems = maxItems;
clean = System.currentTimeMillis() - (maxItems + 1L) * interval > 0;
}
@Override
public void run() {
long curr = System.currentTimeMillis();
Map<String, StorageCapacity> capacities;
try {
capacities = metaStore.getStorageCapacity();
for (StorageCapacity c : capacities.values()) {
c.setTimeStamp(curr);
}
metaStore.insertStorageHistTable(
capacities.values().toArray(new StorageCapacity[capacities.size()]), interval);
if (clean) {
for (String t : capacities.keySet()) {
metaStore.deleteStorageHistoryOldRecords(t, interval,
curr - maxItems * interval - interval / 2);
}
}
} catch (Throwable t) {
LOG.error("Storage info sampling task error: ", t);
}
}
}
} | java | Apache-2.0 | e0c90f054687a18c4e095547ac5e31b8b313b3ef | 2026-01-05T02:41:11.405497Z | false |
Intel-bigdata/SSM | https://github.com/Intel-bigdata/SSM/blob/e0c90f054687a18c4e095547ac5e31b8b313b3ef/smart-hadoop-support/smart-hadoop/src/main/java/org/smartdata/hdfs/metric/fetcher/InotifyFetchAndApplyTask.java | smart-hadoop-support/smart-hadoop/src/main/java/org/smartdata/hdfs/metric/fetcher/InotifyFetchAndApplyTask.java | /**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.smartdata.hdfs.metric.fetcher;
import org.apache.hadoop.hdfs.DFSClient;
import org.apache.hadoop.hdfs.DFSInotifyEventInputStream;
import org.apache.hadoop.hdfs.inotify.EventBatch;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.smartdata.SmartConstants;
import org.smartdata.metastore.MetaStore;
import org.smartdata.model.SystemInfo;
import java.io.IOException;
import java.util.Date;
import java.util.concurrent.atomic.AtomicLong;
public class InotifyFetchAndApplyTask implements Runnable {
static final Logger LOG = LoggerFactory.getLogger(InotifyFetchAndApplyTask.class);
private final AtomicLong lastId;
private final MetaStore metaStore;
private final InotifyEventApplier applier;
private DFSInotifyEventInputStream inotifyEventInputStream;
public InotifyFetchAndApplyTask(DFSClient client, MetaStore metaStore, InotifyEventApplier applier, long startId)
throws IOException {
this.applier = applier;
this.metaStore = metaStore;
this.lastId = new AtomicLong(startId);
this.inotifyEventInputStream = client.getInotifyEventStream(startId);
}
@Override
public void run() {
LOG.trace("InotifyFetchAndApplyTask run at " + new Date());
try {
EventBatch eventBatch = inotifyEventInputStream.poll();
while (eventBatch != null) {
applier.apply(eventBatch.getEvents());
lastId.getAndSet(eventBatch.getTxid());
metaStore.updateAndInsertIfNotExist(
new SystemInfo(
SmartConstants.SMART_HDFS_LAST_INOTIFY_TXID, String.valueOf(lastId.get())));
eventBatch = inotifyEventInputStream.poll();
}
} catch (Throwable t) {
LOG.error("Inotify Apply Events error", t);
}
}
public long getLastId() {
return this.lastId.get();
}
}
| java | Apache-2.0 | e0c90f054687a18c4e095547ac5e31b8b313b3ef | 2026-01-05T02:41:11.405497Z | false |
Intel-bigdata/SSM | https://github.com/Intel-bigdata/SSM/blob/e0c90f054687a18c4e095547ac5e31b8b313b3ef/smart-hadoop-support/smart-hadoop/src/main/java/org/smartdata/hdfs/metric/fetcher/CachedListFetcher.java | smart-hadoop-support/smart-hadoop/src/main/java/org/smartdata/hdfs/metric/fetcher/CachedListFetcher.java | /**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.smartdata.hdfs.metric.fetcher;
import org.apache.hadoop.fs.RemoteIterator;
import org.apache.hadoop.hdfs.DFSClient;
import org.apache.hadoop.hdfs.protocol.CacheDirectiveEntry;
import org.apache.hadoop.hdfs.protocol.CacheDirectiveInfo;
import org.apache.hadoop.hdfs.protocol.CachePoolEntry;
import org.apache.hadoop.util.Time;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.smartdata.hdfs.scheduler.CacheScheduler;
import org.smartdata.metastore.MetaStore;
import org.smartdata.metastore.MetaStoreException;
import org.smartdata.model.CachedFileStatus;
import org.smartdata.model.StorageCapacity;
import java.io.IOException;
import java.util.ArrayList;
import java.util.HashSet;
import java.util.List;
import java.util.Map;
import java.util.Set;
import java.util.concurrent.Executors;
import java.util.concurrent.ScheduledExecutorService;
import java.util.concurrent.ScheduledFuture;
import java.util.concurrent.TimeUnit;
public class CachedListFetcher {
private static final Long DEFAULT_INTERVAL = 5 * 1000L;
private final ScheduledExecutorService scheduledExecutorService;
private final Long fetchInterval;
private FetchTask fetchTask;
private ScheduledFuture scheduledFuture;
private MetaStore metaStore;
public static final Logger LOG =
LoggerFactory.getLogger(CachedListFetcher.class);
public CachedListFetcher(
Long fetchInterval,
DFSClient dfsClient, MetaStore metaStore,
ScheduledExecutorService service) {
this.fetchInterval = fetchInterval;
this.metaStore = metaStore;
this.fetchTask = new FetchTask(dfsClient, metaStore);
this.scheduledExecutorService = service;
}
public CachedListFetcher(
Long fetchInterval,
DFSClient dfsClient, MetaStore metaStore) {
this(fetchInterval, dfsClient, metaStore,
Executors.newSingleThreadScheduledExecutor());
}
public CachedListFetcher(
DFSClient dfsClient, MetaStore metaStore) {
this(DEFAULT_INTERVAL, dfsClient, metaStore,
Executors.newSingleThreadScheduledExecutor());
}
public CachedListFetcher(
DFSClient dfsClient, MetaStore metaStore,
ScheduledExecutorService service) {
this(DEFAULT_INTERVAL, dfsClient, metaStore, service);
}
public void start() {
Long current = System.currentTimeMillis();
Long toWait = fetchInterval - (current % fetchInterval);
this.scheduledFuture = scheduledExecutorService.scheduleAtFixedRate(
fetchTask, toWait, fetchInterval, TimeUnit.MILLISECONDS);
}
public void stop() {
if (scheduledFuture != null) {
this.scheduledFuture.cancel(false);
}
}
public List<CachedFileStatus> getCachedList() throws MetaStoreException {
return this.metaStore.getCachedFileStatus();
}
private static class FetchTask extends Thread {
private DFSClient dfsClient;
private MetaStore metaStore;
private Set<Long> fileSet;
private boolean reInit;
public FetchTask(DFSClient dfsClient, MetaStore metaStore) {
this.dfsClient = dfsClient;
this.metaStore = metaStore;
reInit = true;
}
private void syncFromDB() {
fileSet = new HashSet<>();
try {
LOG.debug("Sync CacheObject list from DB!");
fileSet.addAll(metaStore.getCachedFids());
reInit = false;
} catch (MetaStoreException e) {
LOG.error("Read fids from DB error!", e);
reInit = true;
}
}
private void clearAll() throws MetaStoreException {
LOG.debug("CacheObject List empty!");
if (fileSet.size() > 0) {
metaStore.deleteAllCachedFile();
fileSet.clear();
}
}
@Override
public void run() {
if (reInit) {
syncFromDB();
}
Set<Long> newFileSet = new HashSet<>();
List<CachedFileStatus> cachedFileStatuses = new ArrayList<>();
try {
CacheDirectiveInfo.Builder filterBuilder = new CacheDirectiveInfo.Builder();
filterBuilder.setPool(CacheScheduler.SSM_POOL);
CacheDirectiveInfo filter = filterBuilder.build();
RemoteIterator<CacheDirectiveEntry> cacheDirectives =
dfsClient.listCacheDirectives(filter);
// Add new cache files to DB
//get the size of SSM cache pool
RemoteIterator<CachePoolEntry> cachePoolList = dfsClient.listCachePools();
long cacheMaxSize = 0;
while (cachePoolList.hasNext()) {
CachePoolEntry cachePoolEntry = cachePoolList.next();
if (cachePoolEntry.getInfo().getPoolName().equals(CacheScheduler.SSM_POOL)) {
cacheMaxSize = cachePoolEntry.getInfo().getLimit();
}
}
long cacheUsage = 0;
if (!cacheDirectives.hasNext()) {
metaStore.insertUpdateStoragesTable(
new StorageCapacity("cache", cacheMaxSize, cacheMaxSize - cacheUsage));
clearAll();
return;
}
List<String> paths = new ArrayList<>();
while (cacheDirectives.hasNext()) {
CacheDirectiveEntry cacheDirectiveEntry = cacheDirectives.next();
CacheDirectiveInfo currentInfo = cacheDirectiveEntry.getInfo();
paths.add(currentInfo.getPath().toString());
cacheUsage = cacheUsage + cacheDirectiveEntry.getStats().getBytesCached();
LOG.debug("File in HDFS cache: " + currentInfo.getPath().toString());
}
//add cache information into metastore
metaStore.insertUpdateStoragesTable(
new StorageCapacity("cache", cacheMaxSize, cacheMaxSize - cacheUsage));
// Delete all records to avoid conflict
// metaStore.deleteAllCachedFile();
// Insert new records into DB
Map<String, Long> pathFid = metaStore.getFileIDs(paths);
if (pathFid == null || pathFid.size() == 0) {
clearAll();
return;
}
for (String p : pathFid.keySet()) {
long fid = pathFid.get(p);
newFileSet.add(fid);
if (!fileSet.contains(fid)) {
cachedFileStatuses.add(new CachedFileStatus(fid,
p, Time.now(), Time.now(), 0));
}
}
if (cachedFileStatuses.size() != 0) {
metaStore.insertCachedFiles(cachedFileStatuses);
}
// Remove uncached files from DB
for (Long fid : fileSet) {
if (!newFileSet.contains(fid)) {
metaStore.deleteCachedFile(fid);
}
}
} catch (MetaStoreException e) {
LOG.error("Sync cached file list SQL error!", e);
reInit = true;
} catch (IOException e) {
LOG.error("Sync cached file list HDFS error!", e);
reInit = true;
}
fileSet = newFileSet;
}
}
}
| java | Apache-2.0 | e0c90f054687a18c4e095547ac5e31b8b313b3ef | 2026-01-05T02:41:11.405497Z | false |
Intel-bigdata/SSM | https://github.com/Intel-bigdata/SSM/blob/e0c90f054687a18c4e095547ac5e31b8b313b3ef/smart-hadoop-support/smart-hadoop/src/main/java/org/smartdata/hdfs/metric/fetcher/MovePlanMaker.java | smart-hadoop-support/smart-hadoop/src/main/java/org/smartdata/hdfs/metric/fetcher/MovePlanMaker.java | /**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.smartdata.hdfs.metric.fetcher;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hdfs.DFSClient;
import org.apache.hadoop.hdfs.protocol.BlockStoragePolicy;
import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
import org.apache.hadoop.hdfs.protocol.DirectoryListing;
import org.apache.hadoop.hdfs.protocol.HdfsFileStatus;
import org.apache.hadoop.hdfs.protocol.HdfsLocatedFileStatus;
import org.apache.hadoop.hdfs.protocol.LocatedBlock;
import org.apache.hadoop.hdfs.protocol.LocatedBlocks;
import org.apache.hadoop.hdfs.server.balancer.Matcher;
import org.apache.hadoop.net.NetworkTopology;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.smartdata.hdfs.CompatibilityHelperLoader;
import org.smartdata.hdfs.action.move.DBlock;
import org.smartdata.hdfs.action.move.MLocation;
import org.smartdata.hdfs.action.move.Source;
import org.smartdata.hdfs.action.move.StorageGroup;
import org.smartdata.hdfs.action.move.StorageMap;
import org.smartdata.hdfs.scheduler.MovePlanStatistics;
import org.smartdata.model.action.FileMovePlan;
import java.io.IOException;
import java.util.Arrays;
import java.util.Collections;
import java.util.HashMap;
import java.util.Iterator;
import java.util.LinkedList;
import java.util.List;
import java.util.Map;
import java.util.concurrent.atomic.AtomicInteger;
/**
* A processor to do Mover action.
*/
public class MovePlanMaker {
static final Logger LOG = LoggerFactory.getLogger(MovePlanMaker.class);
private final DFSClient dfs;
private NetworkTopology networkTopology;
private StorageMap storages;
private final AtomicInteger retryCount;
private final Map<String, BlockStoragePolicy> mapStoragePolicies;
private final Map<Byte, String> mapPolicyIdToName;
private final MovePlanStatistics statistics;
private FileMovePlan schedulePlan;
public MovePlanMaker(DFSClient dfsClient, StorageMap storages,
NetworkTopology cluster, MovePlanStatistics statistics) throws IOException {
this.dfs = dfsClient;
this.storages = storages;
this.networkTopology = cluster;
this.retryCount = new AtomicInteger(1);
this.mapStoragePolicies = new HashMap<>();
this.mapPolicyIdToName = new HashMap<>();
initStoragePolicies();
this.statistics = statistics;
}
private void initStoragePolicies() throws IOException {
BlockStoragePolicy[] policies = dfs.getStoragePolicies();
for (BlockStoragePolicy policy : policies) {
mapStoragePolicies.put(policy.getName(), policy);
mapPolicyIdToName.put(policy.getId(), policy.getName());
}
}
public synchronized void updateClusterInfo(StorageMap storages, NetworkTopology cluster) {
this.storages = storages;
this.networkTopology = cluster;
}
/**
* @return whether there is still remaining migration work for the next
* round
*/
public synchronized FileMovePlan processNamespace(Path targetPath, String destPolicy)
throws IOException {
schedulePlan = new FileMovePlan();
String filePath = targetPath.toUri().getPath();
schedulePlan.setFileName(filePath);
schedulePlan.setDestStoragePolicy(destPolicy);
HdfsFileStatus status = dfs.getFileInfo(filePath);
if (status == null) {
throw new IOException("File '" + filePath + "' not found!");
}
if (status.isDir()) {
schedulePlan.setDir(true);
return schedulePlan;
}
byte currSpId = status.getStoragePolicy();
String currSpName = mapPolicyIdToName.get(currSpId);
schedulePlan.setCurrStoragePolicy(currSpName);
if (currSpName == null || !currSpName.equals(destPolicy)) {
try {
dfs.setStoragePolicy(filePath, destPolicy);
} catch (IOException e) {
}
}
DirectoryListing files = dfs.listPaths(filePath, HdfsFileStatus.EMPTY_NAME, true);
HdfsFileStatus[] statuses = files.getPartialListing();
if (statuses == null || statuses.length == 0) {
throw new IOException("File '" + filePath + "' not found!");
}
if (statuses.length != 1) {
throw new IOException("Get '" + filePath + "' file located status error.");
}
status = statuses[0];
if (status.isDir()) {
throw new IOException("Unexpected '" + filePath + "' directory located status error.");
}
schedulePlan.setFileId(status.getFileId());
schedulePlan.setModificationTime(status.getModificationTime());
schedulePlan.setDir(false);
schedulePlan.setFileLength(status.getLen());
processFile(targetPath.toUri().getPath(), (HdfsLocatedFileStatus) status, destPolicy);
return schedulePlan;
}
/**
* @return true if it is necessary to run another round of migration
*/
private void processFile(String fullPath, HdfsLocatedFileStatus status,
String destPolicy) throws IOException {
final BlockStoragePolicy policy = mapStoragePolicies.get(destPolicy);
if (policy == null) {
LOG.warn("Failed to get the storage policy of file " + fullPath);
return;
}
List<String> types = CompatibilityHelperLoader.getHelper()
.chooseStorageTypes(policy, status.getReplication());
final LocatedBlocks locatedBlocks = CompatibilityHelperLoader.getHelper().getLocatedBlocks(status);
final boolean lastBlkComplete = locatedBlocks.isLastBlockComplete();
schedulePlan.setBeingWritten(!lastBlkComplete);
List<LocatedBlock> lbs = locatedBlocks.getLocatedBlocks();
for (int i = 0; i < lbs.size(); i++) {
if (i == lbs.size() - 1 && !lastBlkComplete) {
// last block is incomplete, skip it
continue;
}
LocatedBlock lb = lbs.get(i);
List<String> typesForEcBlock = CompatibilityHelperLoader.getHelper().
getStorageTypeForEcBlock(lb, policy, status.getStoragePolicy());
if (typesForEcBlock != null) {
types = typesForEcBlock;
}
final StorageTypeDiff diff =
new StorageTypeDiff(types, CompatibilityHelperLoader.getHelper().getStorageTypes(lb));
int remainingReplications = diff.removeOverlap(true);
long toMove = lb.getBlockSize() * remainingReplications;
schedulePlan.addSizeToMove(toMove);
schedulePlan.incBlocksToMove();
schedulePlan.addFileLengthToMove(lb.getBlockSize());
statistics.increaseTotalSize(toMove);
statistics.increaseTotalBlocks(remainingReplications);
if (remainingReplications != 0) {
scheduleMoveBlock(diff, lb, status);
}
}
}
/**
* TODO: consider the case that fails to move some blocks, i.e., scheduleMoveReplica fails.
*/
void scheduleMoveBlock(StorageTypeDiff diff, LocatedBlock lb, HdfsFileStatus status) {
final List<MLocation> locations = MLocation.toLocations(lb);
if (!CompatibilityHelperLoader.getHelper().isLocatedStripedBlock(lb)) {
// Shuffle replica locations to make storage medium in balance.
// E.g., if three replicas are under ALL_SSD policy and ONE_SSD is the target policy,
// with shuffling locations, two randomly picked replicas will be moved to DISK.
Collections.shuffle(locations);
}
// EC block case is considered.
final DBlock db =
CompatibilityHelperLoader.getHelper().newDBlock(lb, status);
for (MLocation ml : locations) {
StorageGroup source = storages.getSource(ml);
if (source != null) {
db.addLocation(source);
}
}
for (int index = 0; index < diff.existing.size(); index++) {
String t = diff.existing.get(index);
Iterator<MLocation> iter = locations.iterator();
while (iter.hasNext()) {
MLocation ml = iter.next();
final Source source = storages.getSource(ml);
// Check whether the replica's storage type equals with the one
// in diff's existing list. If so, try to schedule the moving.
if (ml.getStorageType() == t && source != null) {
// Schedule moving a replica on a source location.
// The corresponding storage type in diff's expected list is used.
if (scheduleMoveReplica(db, source,
Arrays.asList(diff.expected.get(index)))) {
// If the replica is successfully scheduled to move.
// No need to consider it any more.
iter.remove();
// Tackle the next storage type in diff existing list.
break;
}
}
}
}
}
boolean scheduleMoveReplica(DBlock db, Source source, List<String> targetTypes) {
// Match storage on the same node
if (chooseTargetInSameNode(db, source, targetTypes)) {
return true;
}
if (networkTopology.isNodeGroupAware()) {
if (chooseTarget(db, source, targetTypes, Matcher.SAME_NODE_GROUP)) {
return true;
}
}
// Then, match nodes on the same rack
if (chooseTarget(db, source, targetTypes, Matcher.SAME_RACK)) {
return true;
}
// At last, match all remaining nodes
return chooseTarget(db, source, targetTypes, Matcher.ANY_OTHER);
}
/**
* Choose the target storage within same Datanode if possible.
*/
boolean chooseTargetInSameNode(DBlock db, Source source,
List<String> targetTypes) {
for (String t : targetTypes) {
StorageGroup target = storages.getTarget(source.getDatanodeInfo()
.getDatanodeUuid(), t);
if (target == null) {
continue;
}
addPlan(source, target, db.getBlock().getBlockId());
return true;
}
return false;
}
boolean chooseTarget(DBlock db, Source source,
List<String> targetTypes, Matcher matcher) {
final NetworkTopology cluster = this.networkTopology;
for (String t : targetTypes) {
final List<StorageGroup> targets = storages.getTargetStorages(t);
Collections.shuffle(targets);
for (StorageGroup target : targets) {
if (matcher.match(cluster, source.getDatanodeInfo(),
target.getDatanodeInfo())) {
addPlan(source, target, db.getBlock().getBlockId());
return true;
}
}
}
return false;
}
private void addPlan(StorageGroup source, StorageGroup target, long blockId) {
DatanodeInfo sourceDatanode = source.getDatanodeInfo();
DatanodeInfo targetDatanode = target.getDatanodeInfo();
schedulePlan.addPlan(blockId, sourceDatanode.getDatanodeUuid(), source.getStorageType(),
targetDatanode.getIpAddr(), targetDatanode.getXferPort(), target.getStorageType());
}
/**
* Record and process the difference of storage types between source and
* destination during Mover.
*/
class StorageTypeDiff {
final List<String> expected;
final List<String> existing;
StorageTypeDiff(List<String> expected, String[] existing) {
this.expected = new LinkedList<String>(expected);
this.existing = new LinkedList<String>(Arrays.asList(existing));
}
/**
* Remove the overlap between the expected types and the existing types.
* @param ignoreNonMovable ignore non-movable storage types
* by removing them from both expected and existing storage type list
* to prevent non-movable storage from being moved.
* @returns the remaining number of replications to move.
*/
int removeOverlap(boolean ignoreNonMovable) {
for(Iterator<String> i = existing.iterator(); i.hasNext(); ) {
final String t = i.next();
if (expected.remove(t)) {
i.remove();
}
}
if (ignoreNonMovable) {
removeNonMovable(existing);
removeNonMovable(expected);
}
return existing.size() < expected.size() ? existing.size() : expected.size();
}
void removeNonMovable(List<String> types) {
for (Iterator<String> i = types.iterator(); i.hasNext(); ) {
final String t = i.next();
if (!CompatibilityHelperLoader.getHelper().isMovable(t)) {
i.remove();
}
}
}
@Override
public String toString() {
return getClass().getSimpleName() + "{expected=" + expected
+ ", existing=" + existing + "}";
}
}
}
| java | Apache-2.0 | e0c90f054687a18c4e095547ac5e31b8b313b3ef | 2026-01-05T02:41:11.405497Z | false |
Intel-bigdata/SSM | https://github.com/Intel-bigdata/SSM/blob/e0c90f054687a18c4e095547ac5e31b8b313b3ef/smart-hadoop-support/smart-hadoop/src/main/java/org/smartdata/hdfs/metric/fetcher/EventBatchSerializer.java | smart-hadoop-support/smart-hadoop/src/main/java/org/smartdata/hdfs/metric/fetcher/EventBatchSerializer.java | /**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.smartdata.hdfs.metric.fetcher;
import com.google.common.collect.Lists;
import com.google.protobuf.ByteString;
import com.google.protobuf.InvalidProtocolBufferException;
import org.apache.hadoop.fs.XAttr;
import org.apache.hadoop.fs.permission.AclEntry;
import org.apache.hadoop.fs.permission.AclEntryScope;
import org.apache.hadoop.fs.permission.AclEntryType;
import org.apache.hadoop.fs.permission.FsAction;
import org.apache.hadoop.fs.permission.FsPermission;
import org.apache.hadoop.hdfs.inotify.Event;
import org.apache.hadoop.hdfs.inotify.EventBatch;
import org.apache.hadoop.hdfs.protocol.FsPermissionExtension;
import org.apache.hadoop.hdfs.protocol.proto.AclProtos;
import org.apache.hadoop.hdfs.protocol.proto.AclProtos.*;
import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos;
import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.*;
import org.apache.hadoop.hdfs.protocol.proto.InotifyProtos;
import org.apache.hadoop.hdfs.protocol.proto.XAttrProtos;
//import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FsPermissionProto;
//import org.apache.hadoop.hdfs.protocol.proto.AclProtos.AclEntryProto;
import org.apache.hadoop.hdfs.protocol.proto.XAttrProtos.XAttrProto;
import org.apache.hadoop.hdfs.protocol.proto.XAttrProtos.XAttrProto.XAttrNamespaceProto;
import org.apache.hadoop.hdfs.protocol.proto.AclProtos.AclEntryProto.AclEntryTypeProto;
import org.apache.hadoop.hdfs.protocol.proto.AclProtos.AclEntryProto.AclEntryScopeProto;
import org.apache.hadoop.hdfs.protocol.proto.AclProtos.AclEntryProto.FsActionProto;
import org.smartdata.hdfs.CompatibilityHelperLoader;
import java.util.ArrayList;
import java.util.List;
public class EventBatchSerializer {
private static final AclEntryScope[] ACL_ENTRY_SCOPE_VALUES =
AclEntryScope.values();
private static final AclEntryType[] ACL_ENTRY_TYPE_VALUES =
AclEntryType.values();
private static final FsAction[] FSACTION_VALUES =
FsAction.values();
private static final XAttr.NameSpace[] XATTR_NAMESPACE_VALUES =
XAttr.NameSpace.values();
//Code copy from PBHelperClient.java
public static byte[] serialize(EventBatch eventBatch) {
List<InotifyProtos.EventProto> events = Lists.newArrayList();
for (Event e : eventBatch.getEvents()) {
switch (e.getEventType()) {
case CLOSE:
Event.CloseEvent ce = (Event.CloseEvent) e;
events.add(InotifyProtos.EventProto.newBuilder()
.setType(InotifyProtos.EventType.EVENT_CLOSE)
.setContents(
InotifyProtos.CloseEventProto.newBuilder()
.setPath(ce.getPath())
.setFileSize(ce.getFileSize())
.setTimestamp(ce.getTimestamp()).build().toByteString()
).build());
break;
case CREATE:
Event.CreateEvent ce2 = (Event.CreateEvent) e;
events.add(InotifyProtos.EventProto.newBuilder()
.setType(InotifyProtos.EventType.EVENT_CREATE)
.setContents(
InotifyProtos.CreateEventProto.newBuilder()
.setType(createTypeConvert(ce2.getiNodeType()))
.setPath(ce2.getPath())
.setCtime(ce2.getCtime())
.setOwnerName(ce2.getOwnerName())
.setGroupName(ce2.getGroupName())
.setPerms(convert(ce2.getPerms()))
.setReplication(ce2.getReplication())
.setSymlinkTarget(ce2.getSymlinkTarget() == null ?
"" : ce2.getSymlinkTarget())
.setDefaultBlockSize(ce2.getDefaultBlockSize())
.setOverwrite(ce2.getOverwrite()).build().toByteString()
).build());
break;
case METADATA:
Event.MetadataUpdateEvent me = (Event.MetadataUpdateEvent) e;
InotifyProtos.MetadataUpdateEventProto.Builder metaB =
InotifyProtos.MetadataUpdateEventProto.newBuilder()
.setPath(me.getPath())
.setType(metadataUpdateTypeConvert(me.getMetadataType()))
.setMtime(me.getMtime())
.setAtime(me.getAtime())
.setReplication(me.getReplication())
.setOwnerName(me.getOwnerName() == null ? "" :
me.getOwnerName())
.setGroupName(me.getGroupName() == null ? "" :
me.getGroupName())
.addAllAcls(me.getAcls() == null ?
Lists.<AclProtos.AclEntryProto>newArrayList() :
convertAclEntryProto(me.getAcls()))
.addAllXAttrs(me.getxAttrs() == null ?
Lists.<XAttrProtos.XAttrProto>newArrayList() :
convertXAttrProto(me.getxAttrs()))
.setXAttrsRemoved(me.isxAttrsRemoved());
if (me.getPerms() != null) {
metaB.setPerms(convert(me.getPerms()));
}
events.add(InotifyProtos.EventProto.newBuilder()
.setType(InotifyProtos.EventType.EVENT_METADATA)
.setContents(metaB.build().toByteString())
.build());
break;
case RENAME:
Event.RenameEvent re = (Event.RenameEvent) e;
events.add(InotifyProtos.EventProto.newBuilder()
.setType(InotifyProtos.EventType.EVENT_RENAME)
.setContents(
InotifyProtos.RenameEventProto.newBuilder()
.setSrcPath(re.getSrcPath())
.setDestPath(re.getDstPath())
.setTimestamp(re.getTimestamp()).build().toByteString()
).build());
break;
case APPEND:
Event.AppendEvent re2 = (Event.AppendEvent) e;
events.add(InotifyProtos.EventProto.newBuilder()
.setType(InotifyProtos.EventType.EVENT_APPEND)
.setContents(CompatibilityHelperLoader.getHelper().getAppendEventProto(re2).toByteString())
.build());
break;
case UNLINK:
Event.UnlinkEvent ue = (Event.UnlinkEvent) e;
events.add(InotifyProtos.EventProto.newBuilder()
.setType(InotifyProtos.EventType.EVENT_UNLINK)
.setContents(
InotifyProtos.UnlinkEventProto.newBuilder()
.setPath(ue.getPath())
.setTimestamp(ue.getTimestamp()).build().toByteString()
).build());
break;
/*
case TRUNCATE:
Event.TruncateEvent te = (Event.TruncateEvent) e;
events.add(InotifyProtos.EventProto.newBuilder()
.setType(InotifyProtos.EventType.EVENT_TRUNCATE)
.setContents(
InotifyProtos.TruncateEventProto.newBuilder()
.setPath(te.getPath())
.setFileSize(te.getFileSize())
.setTimestamp(te.getTimestamp()).build().toByteString()
).build());
break;
*/
default:
throw new RuntimeException("Unexpected inotify event: " + e);
}
}
return InotifyProtos.EventBatchProto.newBuilder().
setTxid(eventBatch.getTxid()).
addAllEvents(events).build().toByteArray();
}
public static EventBatch deserialize(byte[] bytes) throws InvalidProtocolBufferException {
InotifyProtos.EventBatchProto proto = InotifyProtos.EventBatchProto.parseFrom(bytes);
long txid = proto.getTxid();
List<Event> events = Lists.newArrayList();
for (InotifyProtos.EventProto p : proto.getEventsList()) {
switch (p.getType()) {
case EVENT_CLOSE:
InotifyProtos.CloseEventProto close =
InotifyProtos.CloseEventProto.parseFrom(p.getContents());
events.add(new Event.CloseEvent(close.getPath(),
close.getFileSize(), close.getTimestamp()));
break;
case EVENT_CREATE:
InotifyProtos.CreateEventProto create =
InotifyProtos.CreateEventProto.parseFrom(p.getContents());
events.add(new Event.CreateEvent.Builder()
.iNodeType(createTypeConvert(create.getType()))
.path(create.getPath())
.ctime(create.getCtime())
.ownerName(create.getOwnerName())
.groupName(create.getGroupName())
.perms(convert(create.getPerms()))
.replication(create.getReplication())
.symlinkTarget(create.getSymlinkTarget().isEmpty() ? null :
create.getSymlinkTarget())
.defaultBlockSize(create.getDefaultBlockSize())
.overwrite(create.getOverwrite()).build());
break;
case EVENT_METADATA:
InotifyProtos.MetadataUpdateEventProto meta =
InotifyProtos.MetadataUpdateEventProto.parseFrom(p.getContents());
events.add(new Event.MetadataUpdateEvent.Builder()
.path(meta.getPath())
.metadataType(metadataUpdateTypeConvert(meta.getType()))
.mtime(meta.getMtime())
.atime(meta.getAtime())
.replication(meta.getReplication())
.ownerName(
meta.getOwnerName().isEmpty() ? null : meta.getOwnerName())
.groupName(
meta.getGroupName().isEmpty() ? null : meta.getGroupName())
.perms(meta.hasPerms() ? convert(meta.getPerms()) : null)
.acls(meta.getAclsList().isEmpty() ? null : convertAclEntry(
meta.getAclsList()))
.xAttrs(meta.getXAttrsList().isEmpty() ? null : convertXAttrs(
meta.getXAttrsList()))
.xAttrsRemoved(meta.getXAttrsRemoved())
.build());
break;
case EVENT_RENAME:
InotifyProtos.RenameEventProto rename =
InotifyProtos.RenameEventProto.parseFrom(p.getContents());
events.add(new Event.RenameEvent.Builder()
.srcPath(rename.getSrcPath())
.dstPath(rename.getDestPath())
.timestamp(rename.getTimestamp())
.build());
break;
case EVENT_APPEND:
InotifyProtos.AppendEventProto append =
InotifyProtos.AppendEventProto.parseFrom(p.getContents());
events.add(CompatibilityHelperLoader.getHelper().getAppendEvent(append));
break;
case EVENT_UNLINK:
InotifyProtos.UnlinkEventProto unlink =
InotifyProtos.UnlinkEventProto.parseFrom(p.getContents());
events.add(new Event.UnlinkEvent.Builder()
.path(unlink.getPath())
.timestamp(unlink.getTimestamp())
.build());
break;
/*
case EVENT_TRUNCATE:
InotifyProtos.TruncateEventProto truncate =
InotifyProtos.TruncateEventProto.parseFrom(p.getContents());
events.add(new Event.TruncateEvent(truncate.getPath(),
truncate.getFileSize(), truncate.getTimestamp()));
break;
*/
default:
throw new RuntimeException("Unexpected inotify event type: " +
p.getType());
}
}
return new EventBatch(txid, events.toArray(new Event[events.size()]));
}
private static InotifyProtos.INodeType createTypeConvert(Event.CreateEvent.INodeType
type) {
switch (type) {
case DIRECTORY:
return InotifyProtos.INodeType.I_TYPE_DIRECTORY;
case FILE:
return InotifyProtos.INodeType.I_TYPE_FILE;
case SYMLINK:
return InotifyProtos.INodeType.I_TYPE_SYMLINK;
default:
return null;
}
}
private static Event.CreateEvent.INodeType createTypeConvert(InotifyProtos.INodeType
type) {
switch (type) {
case I_TYPE_DIRECTORY:
return Event.CreateEvent.INodeType.DIRECTORY;
case I_TYPE_FILE:
return Event.CreateEvent.INodeType.FILE;
case I_TYPE_SYMLINK:
return Event.CreateEvent.INodeType.SYMLINK;
default:
return null;
}
}
public static FsPermissionProto convert(FsPermission p) {
return FsPermissionProto.newBuilder().setPerm(p.toExtendedShort()).build();
}
public static FsPermission convert(FsPermissionProto p) {
return new FsPermissionExtension((short)p.getPerm());
}
private static InotifyProtos.MetadataUpdateType metadataUpdateTypeConvert(
Event.MetadataUpdateEvent.MetadataType type) {
switch (type) {
case TIMES:
return InotifyProtos.MetadataUpdateType.META_TYPE_TIMES;
case REPLICATION:
return InotifyProtos.MetadataUpdateType.META_TYPE_REPLICATION;
case OWNER:
return InotifyProtos.MetadataUpdateType.META_TYPE_OWNER;
case PERMS:
return InotifyProtos.MetadataUpdateType.META_TYPE_PERMS;
case ACLS:
return InotifyProtos.MetadataUpdateType.META_TYPE_ACLS;
case XATTRS:
return InotifyProtos.MetadataUpdateType.META_TYPE_XATTRS;
default:
return null;
}
}
private static Event.MetadataUpdateEvent.MetadataType metadataUpdateTypeConvert(
InotifyProtos.MetadataUpdateType type) {
switch (type) {
case META_TYPE_TIMES:
return Event.MetadataUpdateEvent.MetadataType.TIMES;
case META_TYPE_REPLICATION:
return Event.MetadataUpdateEvent.MetadataType.REPLICATION;
case META_TYPE_OWNER:
return Event.MetadataUpdateEvent.MetadataType.OWNER;
case META_TYPE_PERMS:
return Event.MetadataUpdateEvent.MetadataType.PERMS;
case META_TYPE_ACLS:
return Event.MetadataUpdateEvent.MetadataType.ACLS;
case META_TYPE_XATTRS:
return Event.MetadataUpdateEvent.MetadataType.XATTRS;
default:
return null;
}
}
public static List<AclEntryProto> convertAclEntryProto(
List<AclEntry> aclSpec) {
ArrayList<AclEntryProto> r = Lists.newArrayListWithCapacity(aclSpec.size());
for (AclEntry e : aclSpec) {
AclEntryProto.Builder builder = AclEntryProto.newBuilder();
builder.setType(convert(e.getType()));
builder.setScope(convert(e.getScope()));
builder.setPermissions(convert(e.getPermission()));
if (e.getName() != null) {
builder.setName(e.getName());
}
r.add(builder.build());
}
return r;
}
private static XAttrNamespaceProto convert(XAttr.NameSpace v) {
return XAttrNamespaceProto.valueOf(v.ordinal());
}
private static XAttr.NameSpace convert(XAttrNamespaceProto v) {
return castEnum(v, XATTR_NAMESPACE_VALUES);
}
public static List<AclEntry> convertAclEntry(List<AclEntryProto> aclSpec) {
ArrayList<AclEntry> r = Lists.newArrayListWithCapacity(aclSpec.size());
for (AclEntryProto e : aclSpec) {
AclEntry.Builder builder = new AclEntry.Builder();
builder.setType(convert(e.getType()));
builder.setScope(convert(e.getScope()));
builder.setPermission(convert(e.getPermissions()));
if (e.hasName()) {
builder.setName(e.getName());
}
r.add(builder.build());
}
return r;
}
public static List<XAttr> convertXAttrs(List<XAttrProto> xAttrSpec) {
ArrayList<XAttr> xAttrs = Lists.newArrayListWithCapacity(xAttrSpec.size());
for (XAttrProto a : xAttrSpec) {
XAttr.Builder builder = new XAttr.Builder();
builder.setNameSpace(convert(a.getNamespace()));
if (a.hasName()) {
builder.setName(a.getName());
}
if (a.hasValue()) {
builder.setValue(a.getValue().toByteArray());
}
xAttrs.add(builder.build());
}
return xAttrs;
}
public static List<XAttrProto> convertXAttrProto(
List<XAttr> xAttrSpec) {
if (xAttrSpec == null) {
return Lists.newArrayListWithCapacity(0);
}
ArrayList<XAttrProto> xAttrs = Lists.newArrayListWithCapacity(
xAttrSpec.size());
for (XAttr a : xAttrSpec) {
XAttrProto.Builder builder = XAttrProto.newBuilder();
builder.setNamespace(convert(a.getNameSpace()));
if (a.getName() != null) {
builder.setName(a.getName());
}
if (a.getValue() != null) {
builder.setValue(getByteString(a.getValue()));
}
xAttrs.add(builder.build());
}
return xAttrs;
}
public static XAttrProto convertXAttrProto(XAttr a) {
XAttrProto.Builder builder = XAttrProto.newBuilder();
builder.setNamespace(convert(a.getNameSpace()));
if (a.getName() != null) {
builder.setName(a.getName());
}
if (a.getValue() != null) {
builder.setValue(getByteString(a.getValue()));
}
return builder.build();
}
public static ByteString getByteString(byte[] bytes) {
return ByteString.copyFrom(bytes);
}
private static AclEntryTypeProto convert(AclEntryType e) {
return AclEntryTypeProto.valueOf(e.ordinal());
}
private static AclEntryScopeProto convert(AclEntryScope v) {
return AclEntryScopeProto.valueOf(v.ordinal());
}
public static FsActionProto convert(FsAction v) {
return FsActionProto.valueOf(v != null ? v.ordinal() : 0);
}
public static FsAction convert(FsActionProto v) {
return castEnum(v, FSACTION_VALUES);
}
private static <T extends Enum<T>, U extends Enum<U>> U castEnum(T from, U[] to) {
return to[from.ordinal()];
}
private static AclEntryType convert(AclEntryTypeProto v) {
return castEnum(v, ACL_ENTRY_TYPE_VALUES);
}
private static AclEntryScope convert(AclEntryScopeProto v) {
return castEnum(v, ACL_ENTRY_SCOPE_VALUES);
}
}
| java | Apache-2.0 | e0c90f054687a18c4e095547ac5e31b8b313b3ef | 2026-01-05T02:41:11.405497Z | false |
Intel-bigdata/SSM | https://github.com/Intel-bigdata/SSM/blob/e0c90f054687a18c4e095547ac5e31b8b313b3ef/smart-hadoop-support/smart-inputstream/src/main/java/org/apache/hadoop/hdfs/CompressionCodec.java | smart-hadoop-support/smart-inputstream/src/main/java/org/apache/hadoop/hdfs/CompressionCodec.java | /**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.io.compress.Compressor;
import org.apache.hadoop.io.compress.Decompressor;
import org.apache.hadoop.io.compress.Lz4Codec;
import org.apache.hadoop.io.compress.SnappyCodec;
import org.apache.hadoop.io.compress.bzip2.Bzip2Compressor;
import org.apache.hadoop.io.compress.bzip2.Bzip2Decompressor;
import org.apache.hadoop.io.compress.bzip2.Bzip2Factory;
import org.apache.hadoop.io.compress.lz4.Lz4Compressor;
import org.apache.hadoop.io.compress.lz4.Lz4Decompressor;
import org.apache.hadoop.io.compress.snappy.SnappyCompressor;
import org.apache.hadoop.io.compress.snappy.SnappyDecompressor;
import org.apache.hadoop.io.compress.zlib.ZlibCompressor;
import org.apache.hadoop.io.compress.zlib.ZlibDecompressor;
import org.apache.hadoop.io.compress.zlib.ZlibFactory;
import org.apache.hadoop.util.NativeCodeLoader;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.io.IOException;
import java.util.Arrays;
import java.util.List;
/**
* This class decide which compressor type for SmartCompressorStream
*/
public class CompressionCodec {
static final Logger LOG = LoggerFactory.getLogger(CompressionCodec.class);
public static final String LZ4 = "Lz4";
public static final String BZIP2 = "Bzip2";
public static final String SNAPPY = "snappy";
public static final String ZLIB = "Zlib";
public static final List<String> CODEC_LIST = Arrays.asList(LZ4, BZIP2, SNAPPY, ZLIB);
private static Configuration conf = new Configuration();
private static boolean nativeCodeLoaded = NativeCodeLoader.isNativeCodeLoaded();
public static boolean getNativeCodeLoaded() {
return nativeCodeLoaded;
}
/**
* Return compression overhead of given codec
* @param bufferSize buffSize of codec (int)
* @param codec codec name (String)
* @return compression overhead (int)
*/
public static int compressionOverhead(int bufferSize, String codec) {
// According to Hadoop 3.0
switch (codec) {
case LZ4:
return bufferSize / 255 + 16;
case SNAPPY:
return bufferSize / 6 + 32;
default:
return 18;
}
}
/**
* Create a compressor
*/
public static Compressor createCompressor(int bufferSize, String codec)
throws IOException {
if (!CODEC_LIST.contains(codec)) {
throw new IOException("Invalid compression codec, SSM only support: " +
CODEC_LIST.toString());
}
if (!codec.equals(ZLIB) && !nativeCodeLoaded) {
throw new IOException(codec + " is not supported, " +
" because Hadoop native lib was not successfully loaded");
}
// Sequentially load compressors
switch (codec) {
case LZ4:
if (Lz4Codec.isNativeCodeLoaded()) {
return new Lz4Compressor(bufferSize);
}
throw new IOException("Failed to load/initialize native-Lz4 library");
case BZIP2:
if (Bzip2Factory.isNativeBzip2Loaded(conf)) {
return new Bzip2Compressor(Bzip2Factory.getBlockSize(conf),
Bzip2Factory.getWorkFactor(conf),
bufferSize);
}
throw new IOException("Failed to load/initialize native-bzip2 library");
case SNAPPY:
if (SnappyCodec.isNativeCodeLoaded()) {
return new SnappyCompressor(bufferSize);
}
throw new IOException("Failed to load/initialize native-snappy library");
case ZLIB:
if (nativeCodeLoaded) {
return new ZlibCompressor(ZlibCompressor.CompressionLevel.DEFAULT_COMPRESSION,
ZlibCompressor.CompressionStrategy.DEFAULT_STRATEGY,
ZlibCompressor.CompressionHeader.DEFAULT_HEADER,
bufferSize);
}
// TODO buffer size for build-in zlib codec
return ZlibFactory.getZlibCompressor(conf);
default:
throw new IOException("Unsupported codec: " + codec);
}
}
/**
* Create a Decompressor
*/
public static Decompressor creatDecompressor(int bufferSize, String codec) throws IOException {
if (!CODEC_LIST.contains(codec)) {
throw new IOException("Invalid compression codec, SSM only recognize: " +
CODEC_LIST.toString());
}
if (!codec.equals(ZLIB) && !nativeCodeLoaded) {
throw new IOException("Hadoop native lib was not successfully loaded, so " +
codec + " is not supported.");
}
// Sequentially load a decompressor
switch (codec) {
case LZ4:
if (Lz4Codec.isNativeCodeLoaded()) {
return new Lz4Decompressor(bufferSize);
}
throw new IOException("Failed to load/initialize native-Lz4 library");
case BZIP2:
if (Bzip2Factory.isNativeBzip2Loaded(conf)) {
return new Bzip2Decompressor(false, bufferSize);
}
throw new IOException("Failed to load/initialize native-bzip2 library");
case SNAPPY:
if (SnappyCodec.isNativeCodeLoaded()) {
return new SnappyDecompressor(bufferSize);
}
throw new IOException("Failed to load/initialize native-snappy library");
case ZLIB:
if (nativeCodeLoaded) {
return new ZlibDecompressor(
ZlibDecompressor.CompressionHeader.DEFAULT_HEADER, bufferSize);
}
// TODO buffer size for build-in zlib codec
return ZlibFactory.getZlibDecompressor(conf);
default:
throw new IOException("Unsupported codec: " + codec);
}
}
}
| java | Apache-2.0 | e0c90f054687a18c4e095547ac5e31b8b313b3ef | 2026-01-05T02:41:11.405497Z | false |
Intel-bigdata/SSM | https://github.com/Intel-bigdata/SSM/blob/e0c90f054687a18c4e095547ac5e31b8b313b3ef/smart-hadoop-support/smart-inputstream/src/main/java/org/apache/hadoop/hdfs/SmartInputStreamFactory.java | smart-hadoop-support/smart-inputstream/src/main/java/org/apache/hadoop/hdfs/SmartInputStreamFactory.java | /**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs;
import org.apache.hadoop.fs.UnresolvedLinkException;
import org.smartdata.hdfs.CompatibilityHelper;
import org.smartdata.hdfs.CompatibilityHelperLoader;
import org.smartdata.model.FileState;
import java.io.IOException;
/**
* Factory to create SmartInputStream with corresponding Hadoop version.
*/
public class SmartInputStreamFactory {
/**
* Get HDFS input stream from dfsClient, file path and its file state.
*
* @param dfsClient HDFS client
* @param src file path
* @param fileState file state
* @param verifyChecksum check if need to checksum
* @return HDFS input stream
* @throws IOException if IOException occurs
*/
public static DFSInputStream create(DFSClient dfsClient, String src,
boolean verifyChecksum, FileState fileState) throws IOException {
dfsClient.checkOpen();
return createSmartInputStream(dfsClient, src, verifyChecksum, fileState);
}
protected static DFSInputStream createSmartInputStream(DFSClient dfsClient, String src,
boolean verifyChecksum, FileState fileState) throws IOException {
DFSInputStream inputStream;
switch (fileState.getFileType()) {
case NORMAL:
// Instead of using inputStream = new SmartInputStream(dfsClient, src, verifyChecksum, fileState);
// EC case should be considered. Please refer to DFSClient.open() -> DFSClient.openInternal().
// EC data is also viewed as NORMAL. Currently, it is NOT supported to combine EC with SSM compact,
// SSM compression etc.
inputStream = CompatibilityHelperLoader.getHelper().
getNormalInputStream(dfsClient, src, verifyChecksum, fileState);
break;
case COMPACT:
inputStream = new CompactInputStream(dfsClient, verifyChecksum, fileState);
break;
case COMPRESSION:
inputStream = new SmartCompressionInputStream(dfsClient, src, verifyChecksum, fileState);
break;
case S3:
inputStream = new S3InputStream(dfsClient, src, verifyChecksum, fileState);
break;
default:
throw new IOException("Unsupported file type");
}
return inputStream;
}
}
| java | Apache-2.0 | e0c90f054687a18c4e095547ac5e31b8b313b3ef | 2026-01-05T02:41:11.405497Z | false |
Intel-bigdata/SSM | https://github.com/Intel-bigdata/SSM/blob/e0c90f054687a18c4e095547ac5e31b8b313b3ef/smart-hadoop-support/smart-inputstream/src/main/java/org/apache/hadoop/hdfs/SmartCompressionInputStream.java | smart-hadoop-support/smart-inputstream/src/main/java/org/apache/hadoop/hdfs/SmartCompressionInputStream.java | /**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs;
import org.apache.hadoop.fs.FileEncryptionInfo;
import org.apache.hadoop.fs.ReadOption;
import org.apache.hadoop.io.ByteBufferPool;
import org.apache.hadoop.io.compress.Decompressor;
import org.smartdata.model.CompressionFileState;
import org.smartdata.model.CompressionTrunk;
import org.smartdata.model.FileState;
import java.io.EOFException;
import java.io.IOException;
import java.nio.ByteBuffer;
import java.util.EnumSet;
public class SmartCompressionInputStream extends SmartInputStream {
private Decompressor decompressor = null;
private byte[] buffer;
private boolean closed = false;
private long pos = 0;
private CompressionFileState compressionFileState;
private final long originalLength;
SmartCompressionInputStream(DFSClient dfsClient, String src, boolean verifyChecksum,
FileState fileState) throws IOException {
super(dfsClient, src, verifyChecksum, fileState);
if (fileState instanceof CompressionFileState) {
compressionFileState = (CompressionFileState) fileState;
} else {
throw new IOException("Compression info cannot be fetched");
}
originalLength = compressionFileState.getOriginalLength();
int bufferSize = compressionFileState.getBufferSize();
this.buffer = new byte[bufferSize];
this.decompressor = CompressionCodec.creatDecompressor(bufferSize,
compressionFileState.getCompressionImpl());
}
@Override
public synchronized int read() throws IOException {
byte[] oneByteBuf = new byte[1];
int ret = read( oneByteBuf, 0, 1 );
return ( ret <= 0 ) ? -1 : (oneByteBuf[0] & 0xff);
}
@Override
public synchronized int read(final byte b[], int off, int len) throws IOException {
if ((off | len | (off + len) | (b.length - (off + len))) < 0) {
throw new IndexOutOfBoundsException();
} else if (len == 0) {
return 0;
}
int n = decompress(b, off, len);
pos += n;
return n;
}
@Override
public synchronized int read(final ByteBuffer buf) throws IOException {
return read(buf.array(), buf.position(), buf.remaining());
}
@Override
public int read(long position, byte[] buffer, int offset, int length)
throws IOException {
seek(position);
return read(buffer, offset, length);
}
@Override
public synchronized void close() throws IOException {
super.close();
this.closed = true;
}
private int decompress(byte[] b, int off, int len) throws IOException {
int n = 0;
while ((n = decompressor.decompress(b, off, len)) == 0) {
if (decompressor.needsInput()) {
int m;
try {
m = getCompressedData();
} catch (EOFException e) {
return -1;
}
// Send the read data to the decompressor
decompressor.reset();
decompressor.setInput(buffer, 0, m);
}
}
// Note the no. of decompressed bytes read from 'current' block
//noUncompressedBytes += n;
return n;
}
private int getCompressedData() throws IOException {
// Get the size of the compressed chunk (always non-negative)
int len = rawReadInt();
// Read len bytes from underlying stream
if (len > buffer.length) {
buffer = new byte[len];
}
int n = 0, off = 0;
while (n < len) {
int count = super.read(buffer, off + n, len - n);
if (count < 0) {
throw new EOFException("Unexpected end of block in input stream");
}
n += count;
}
return len;
}
private int rawReadInt() throws IOException {
byte[] bytes = new byte[4];
int b1 = super.read(bytes, 0, 1);
int b2 = super.read(bytes, 1, 1);
int b3 = super.read(bytes, 2, 1);
int b4 = super.read(bytes, 3, 1);
if ((b1 | b2 | b3 | b4) < 0)
throw new EOFException();
return (((bytes[0] & 0xff) << 24) + ((bytes[1] & 0xff) << 16)
+ ((bytes[2] & 0xff) << 8) + ((bytes[3] & 0xff) << 0));
}
/* @Override
public long getFileLength() {
return originalLength;
}*/
@Override
public long skip(long n) throws IOException {
return super.skip(n);
}
@Override
public synchronized void seek(long targetPos) throws IOException {
if (targetPos > originalLength) {
throw new EOFException("Cannot seek after EOF");
}
if (targetPos < 0) {
throw new EOFException("Cannot seek to negative offset");
}
if(targetPos == pos) {
return;
}
// Seek to the start of the compression trunk
CompressionTrunk compressionTrunk = compressionFileState.locateCompressionTrunk(
false, targetPos);
long hdfsFilePos = compressionTrunk.getCompressedOffset();
super.seek(hdfsFilePos);
// Decompress the trunk until reaching the targetPos of the original file
int startPos = (int)(targetPos - compressionTrunk.getOriginOffset());
decompressor.reset();
int m = getCompressedData();
decompressor.setInput(buffer, 0, m);
if (startPos > 0) {
byte[] temp = new byte[startPos];
decompress(temp, 0, startPos);
}
pos = targetPos;
}
@Override
public synchronized boolean seekToNewSource(long targetPos) throws IOException {
throw new RuntimeException("SeekToNewSource not supported for compressed file");
}
@Override
public synchronized long getPos() {
return pos;
}
@Override
public synchronized int available() throws IOException {
if (closed) {
throw new IOException("Stream closed");
}
final long remaining = originalLength - pos;
return remaining <= Integer.MAX_VALUE? (int)remaining: Integer.MAX_VALUE;
}
@Override
public ReadStatistics getReadStatistics() {
throw new RuntimeException("GetReadStatistics not supported for compressed file");
}
@Override
public void clearReadStatistics() {
throw new RuntimeException("ClearReadStatistics not supported for compressed file");
}
@Override
public FileEncryptionInfo getFileEncryptionInfo() {
// throw new RuntimeException("GetFileEncryptionInfo not supported for compressed file");
return super.getFileEncryptionInfo();
}
@Override
public synchronized ByteBuffer read(ByteBufferPool bufferPool,
int maxLength, EnumSet<ReadOption> opts)
throws IOException, UnsupportedOperationException {
throw new RuntimeException("Read(ByteBufferPool, int, EnumSet) not supported " +
"for compressed file");
}
}
| java | Apache-2.0 | e0c90f054687a18c4e095547ac5e31b8b313b3ef | 2026-01-05T02:41:11.405497Z | false |
Intel-bigdata/SSM | https://github.com/Intel-bigdata/SSM/blob/e0c90f054687a18c4e095547ac5e31b8b313b3ef/smart-hadoop-support/smart-inputstream/src/main/java/org/apache/hadoop/hdfs/SmartCompressorStream.java | smart-hadoop-support/smart-inputstream/src/main/java/org/apache/hadoop/hdfs/SmartCompressorStream.java | /**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs;
import org.apache.commons.lang.mutable.MutableFloat;
import org.apache.hadoop.io.compress.Compressor;
import org.apache.hadoop.io.compress.bzip2.Bzip2Compressor;
import org.apache.hadoop.io.compress.lz4.Lz4Compressor;
import org.apache.hadoop.io.compress.snappy.SnappyCompressor;
import org.apache.hadoop.io.compress.zlib.ZlibCompressor;
import org.smartdata.model.CompressionFileState;
import java.io.IOException;
import java.io.InputStream;
import java.io.OutputStream;
import java.util.ArrayList;
import java.util.List;
/**
* SmartOutputStream.
*/
public class SmartCompressorStream {
private Compressor compressor;
private byte[] buffer;
private final int bufferSize;
private CompressionFileState compressionInfo;
private OutputStream out;
private InputStream in;
private final int maxLength;
private MutableFloat progress;
private long originPos = 0;
private long compressedPos = 0;
private List<Long> originPositions = new ArrayList<>();
private List<Long> compressedPositions = new ArrayList<>();
public SmartCompressorStream(InputStream inputStream, OutputStream outputStream,
int bufferSize, CompressionFileState compressionInfo, MutableFloat progress) throws IOException {
this.out = outputStream;
this.in = inputStream;
this.compressionInfo = compressionInfo;
this.progress = progress;
// This bufferSize is equal to chunk size
this.bufferSize = bufferSize;
// Compression overHead, e.g., Snappy's overHead is buffSize/6 + 32
int overHead = CompressionCodec.compressionOverhead(bufferSize,
compressionInfo.getCompressionImpl());
// Add overhead to buffer, such that actual buff is larger than bufferSize
this.maxLength = bufferSize;
buffer = new byte[bufferSize + overHead];
this.compressor = CompressionCodec
.createCompressor(bufferSize + overHead,
compressionInfo.getCompressionImpl());
checkCompressor();
}
private void checkCompressor() {
if (compressor instanceof ZlibCompressor) {
compressionInfo.setCompressionImpl(CompressionCodec.ZLIB);
} else if (compressor instanceof SnappyCompressor) {
compressionInfo.setCompressionImpl(CompressionCodec.SNAPPY);
} else if (compressor instanceof Bzip2Compressor) {
compressionInfo.setCompressionImpl(CompressionCodec.BZIP2);
} else if (compressor instanceof Lz4Compressor) {
compressionInfo.setCompressionImpl(CompressionCodec.LZ4);
}
}
/**
* Convert the original input stream to compressed output stream.
*/
public void convert() throws IOException {
byte[] buf = new byte[bufferSize];
while (true) {
int off = 0;
// Compression chunk with chunk size (bufferSize)
while (off < bufferSize) {
int len = in.read(buf, off, bufferSize - off);
// Complete when input stream reaches eof
if (len <= 0) {
write(buf, 0, off);
finish();
out.close();
compressionInfo.setPositionMapping(originPositions.toArray(new Long[0]),
compressedPositions.toArray(new Long[0]));
return;
}
off += len;
}
write(buf, 0, off);
originPos += off;
this.progress.setValue((float) originPos / compressionInfo.getOriginalLength());
}
}
public void write(byte[] b, int off, int len) throws IOException {
if (b == null) {
throw new NullPointerException();
} else if ((off < 0) || (off > b.length) || (len < 0) ||
((off + len) > b.length)) {
throw new IndexOutOfBoundsException();
} else if (len == 0) {
return;
}
// TODO add check to avoid buff overflow
originPositions.add(originPos);
compressedPositions.add(compressedPos);
compressor.setInput(b, off, len);
compressor.finish();
while (!compressor.finished()) {
compress();
}
compressor.reset();
}
public void finish() throws IOException {
if (!compressor.finished()) {
compressor.finish();
while (!compressor.finished()) {
compress();
}
}
}
protected void compress() throws IOException {
// TODO when compressed result is larger than raw
int len = compressor.compress(buffer, 0, bufferSize);
if (len > 0) {
// Write out the compressed chunk
rawWriteInt(len);
out.write(buffer, 0, len);
compressedPos += len;
}
}
private void rawWriteInt(int v) throws IOException {
out.write((v >>> 24) & 0xFF);
out.write((v >>> 16) & 0xFF);
out.write((v >>> 8) & 0xFF);
out.write((v >>> 0) & 0xFF);
compressedPos += 4;
}
}
| java | Apache-2.0 | e0c90f054687a18c4e095547ac5e31b8b313b3ef | 2026-01-05T02:41:11.405497Z | false |
Intel-bigdata/SSM | https://github.com/Intel-bigdata/SSM/blob/e0c90f054687a18c4e095547ac5e31b8b313b3ef/smart-hadoop-support/smart-inputstream/src/main/java/org/apache/hadoop/hdfs/S3InputStream.java | smart-hadoop-support/smart-inputstream/src/main/java/org/apache/hadoop/hdfs/S3InputStream.java | /**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs;
import org.apache.hadoop.fs.UnresolvedLinkException;
import org.smartdata.model.FileState;
import java.io.IOException;
public class S3InputStream extends SmartInputStream {
S3InputStream(DFSClient dfsClient, String src, boolean verifyChecksum,
FileState fileState) throws IOException, UnresolvedLinkException {
super(dfsClient, src, verifyChecksum, fileState);
}
}
| java | Apache-2.0 | e0c90f054687a18c4e095547ac5e31b8b313b3ef | 2026-01-05T02:41:11.405497Z | false |
Intel-bigdata/SSM | https://github.com/Intel-bigdata/SSM/blob/e0c90f054687a18c4e095547ac5e31b8b313b3ef/smart-hadoop-support/smart-hadoop-client-cdh-2.6/src/main/java/org/smartdata/hdfs/client/SmartDFSClient.java | smart-hadoop-support/smart-hadoop-client-cdh-2.6/src/main/java/org/smartdata/hdfs/client/SmartDFSClient.java | /**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.smartdata.hdfs.client;
import org.apache.commons.lang.SerializationUtils;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.BlockLocation;
import org.apache.hadoop.fs.CreateFlag;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.MD5MD5CRC32FileChecksum;
import org.apache.hadoop.fs.permission.AclEntry;
import org.apache.hadoop.fs.permission.FsAction;
import org.apache.hadoop.fs.permission.FsPermission;
import org.apache.hadoop.hdfs.DFSClient;
import org.apache.hadoop.hdfs.DFSInputStream;
import org.apache.hadoop.hdfs.SmartInputStreamFactory;
import org.apache.hadoop.hdfs.client.HdfsDataOutputStream;
import org.apache.hadoop.hdfs.protocol.CorruptFileBlocks;
import org.apache.hadoop.hdfs.protocol.HdfsFileStatus;
import org.apache.hadoop.hdfs.protocol.LocatedBlock;
import org.apache.hadoop.hdfs.protocol.LocatedBlocks;
import org.apache.hadoop.ipc.RemoteException;
import org.apache.hadoop.security.UserGroupInformation;
import org.apache.hadoop.util.Progressable;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.smartdata.SmartConstants;
import org.smartdata.client.SmartClient;
import org.smartdata.hdfs.CompatibilityHelperLoader;
import org.smartdata.metrics.FileAccessEvent;
import org.smartdata.model.CompactFileState;
import org.smartdata.model.CompressionFileState;
import org.smartdata.model.FileState;
import org.smartdata.model.NormalFileState;
import java.io.ByteArrayInputStream;
import java.io.DataInputStream;
import java.io.File;
import java.io.IOException;
import java.net.InetSocketAddress;
import java.net.URI;
import java.util.ArrayList;
import java.util.EnumSet;
import java.util.List;
public class SmartDFSClient extends DFSClient {
private static final Logger LOG = LoggerFactory.getLogger(SmartDFSClient.class);
private static final String CALLER_CLASS = "org.apache.hadoop.hdfs.DFSInputStream";
private SmartClient smartClient = null;
private boolean healthy = false;
public SmartDFSClient(InetSocketAddress nameNodeAddress, Configuration conf,
InetSocketAddress smartServerAddress) throws IOException {
super(nameNodeAddress, conf);
if (isSmartClientDisabled()) {
return;
}
try {
smartClient = new SmartClient(conf, smartServerAddress);
healthy = true;
} catch (IOException e) {
super.close();
throw e;
}
}
public SmartDFSClient(final URI nameNodeUri, final Configuration conf,
final InetSocketAddress smartServerAddress) throws IOException {
super(nameNodeUri, conf);
if (isSmartClientDisabled()) {
return;
}
try {
smartClient = new SmartClient(conf, smartServerAddress);
healthy = true;
} catch (IOException e) {
super.close();
throw e;
}
}
public SmartDFSClient(URI nameNodeUri, Configuration conf,
FileSystem.Statistics stats, InetSocketAddress smartServerAddress)
throws IOException {
super(nameNodeUri, conf, stats);
if (isSmartClientDisabled()) {
return;
}
try {
smartClient = new SmartClient(conf, smartServerAddress);
healthy = true;
} catch (IOException e) {
super.close();
throw e;
}
}
public SmartDFSClient(Configuration conf,
InetSocketAddress smartServerAddress) throws IOException {
super(conf);
if (isSmartClientDisabled()) {
return;
}
try {
smartClient = new SmartClient(conf, smartServerAddress);
healthy = true;
} catch (IOException e) {
super.close();
throw e;
}
}
public SmartDFSClient(Configuration conf) throws IOException {
super(conf);
if (isSmartClientDisabled()) {
return;
}
try {
smartClient = new SmartClient(conf);
healthy = true;
} catch (IOException e) {
super.close();
throw e;
}
}
@Override
public DFSInputStream open(String src) throws IOException {
return open(src, 4096, true);
}
@Override
public DFSInputStream open(String src, int buffersize,
boolean verifyChecksum) throws IOException {
DFSInputStream is = super.open(src, buffersize, verifyChecksum);
if (is.getFileLength() == 0) {
is.close();
FileState fileState = getFileState(src);
if (fileState.getFileStage().equals(FileState.FileStage.PROCESSING)) {
throw new IOException("Cannot open " + src + " when it is under PROCESSING to "
+ fileState.getFileType());
}
is = SmartInputStreamFactory.create(this, src,
verifyChecksum, fileState);
} else {
is.close();
FileState fileState = getFileState(src);
if (fileState.getFileStage().equals(FileState.FileStage.PROCESSING)) {
throw new IOException("Cannot open " + src + " when it is under PROCESSING to "
+ fileState.getFileType());
}
is = SmartInputStreamFactory.create(this, src,
verifyChecksum, fileState);
}
reportFileAccessEvent(src);
return is;
}
@Deprecated
@Override
public DFSInputStream open(String src, int buffersize,
boolean verifyChecksum, FileSystem.Statistics stats)
throws IOException {
return open(src, buffersize, verifyChecksum);
}
@Override
public HdfsDataOutputStream append(final String src, final int buffersize,
final Progressable progress, final FileSystem.Statistics statistics) throws IOException {
HdfsDataOutputStream out = super.append(src, buffersize, progress, statistics);
if (out.getPos() == 0) {
FileState fileState = getFileState(src);
if (fileState instanceof CompactFileState) {
out.close();
throw new IOException(getExceptionMsg("Append", "SSM Small File"));
}
} else {
FileState fileState = getFileState(src);
if (fileState instanceof CompressionFileState) {
out.close();
throw new IOException(getExceptionMsg("Append", "Compressed File"));
}
}
return out;
}
public boolean truncate(String src, long newLength) throws IOException {
FileState fileState = getFileState(src);
if (fileState instanceof CompressionFileState) {
throw new IOException(getExceptionMsg("Append", "Compressed File"));
}
return false;
}
@Override
public HdfsFileStatus getFileInfo(String src) throws IOException {
HdfsFileStatus oldStatus = super.getFileInfo(src);
if (oldStatus == null) return null;
if (oldStatus.getLen() == 0) {
FileState fileState = getFileState(src);
if (fileState instanceof CompactFileState) {
long len = ((CompactFileState) fileState).getFileContainerInfo().getLength();
return CompatibilityHelperLoader.getHelper().createHdfsFileStatus(len, oldStatus.isDir(), oldStatus.getReplication(),
oldStatus.getBlockSize(), oldStatus.getModificationTime(), oldStatus.getAccessTime(),
oldStatus.getPermission(), oldStatus.getOwner(), oldStatus.getGroup(),
oldStatus.isSymlink() ? oldStatus.getSymlinkInBytes() : null,
oldStatus.isEmptyLocalName() ? new byte[0] : oldStatus.getLocalNameInBytes(),
oldStatus.getFileId(), oldStatus.getChildrenNum(),
oldStatus.getFileEncryptionInfo(), oldStatus.getStoragePolicy());
}
} else {
FileState fileState = getFileState(src);
if (fileState instanceof CompressionFileState) {
long len = ((CompressionFileState) fileState).getOriginalLength();
return new HdfsFileStatus(len, oldStatus.isDir(), oldStatus.getReplication(),
oldStatus.getBlockSize(), oldStatus.getModificationTime(), oldStatus.getAccessTime(),
oldStatus.getPermission(), oldStatus.getOwner(), oldStatus.getGroup(),
oldStatus.isSymlink() ? oldStatus.getSymlinkInBytes() : null,
oldStatus.isEmptyLocalName() ? new byte[0] : oldStatus.getLocalNameInBytes(),
oldStatus.getFileId(), oldStatus.getChildrenNum(),
oldStatus.getFileEncryptionInfo(), oldStatus.getStoragePolicy());
}
}
return oldStatus;
}
@Override
public LocatedBlocks getLocatedBlocks(String src, long start)
throws IOException {
LocatedBlocks locatedBlocks = super.getLocatedBlocks(src, start);
if (!CALLER_CLASS.equals(Thread.currentThread().getStackTrace()[2].getClassName())
&& locatedBlocks.getFileLength() == 0) {
FileState fileState = getFileState(src);
if (fileState instanceof CompactFileState) {
String containerFile = ((CompactFileState) fileState)
.getFileContainerInfo().getContainerFilePath();
long offset = ((CompactFileState) fileState).getFileContainerInfo().getOffset();
return super.getLocatedBlocks(containerFile, offset + start);
}
}
return locatedBlocks;
}
@Override
public BlockLocation[] getBlockLocations(String src, long start,
long length) throws IOException {
BlockLocation[] blockLocations = super.getBlockLocations(src, start, length);
if (blockLocations.length == 0) {
FileState fileState = getFileState(src);
if (fileState instanceof CompactFileState) {
String containerFile = ((CompactFileState) fileState)
.getFileContainerInfo().getContainerFilePath();
long offset = ((CompactFileState) fileState).getFileContainerInfo().getOffset();
blockLocations = super.getBlockLocations(containerFile, offset + start, length);
for (BlockLocation blockLocation : blockLocations) {
blockLocation.setOffset(blockLocation.getOffset() - offset);
}
return blockLocations;
}
} else {
FileState fileState = getFileState(src);
if (fileState instanceof CompressionFileState) {
CompressionFileState compressionInfo = (CompressionFileState) fileState;
Long[] originalPos =
compressionInfo.getOriginalPos().clone();
Long[] compressedPos =
compressionInfo.getCompressedPos().clone();
int startIndex = compressionInfo.getPosIndexByOriginalOffset(start);
int endIndex =
compressionInfo.getPosIndexByOriginalOffset(start + length - 1);
long compressedStart = compressedPos[startIndex];
long compressedLength = 0;
if (endIndex < compressedPos.length - 1) {
compressedLength = compressedPos[endIndex + 1] - compressedStart;
} else {
compressedLength =
compressionInfo.getCompressedLength() - compressedStart;
}
LocatedBlocks originalLocatedBlocks =
super.getLocatedBlocks(src, compressedStart, compressedLength);
List<LocatedBlock> blocks = new ArrayList<>();
for (LocatedBlock block : originalLocatedBlocks.getLocatedBlocks()) {
// TODO handle CDH2.6 storage type
// blocks.add(new LocatedBlock(
// block.getBlock(),
// block.getLocations(),
// block.getStorageIDs(),
// block.getStorageTypes(),
// compressionInfo
// .getPosIndexByCompressedOffset(block.getStartOffset()),
// block.isCorrupt(),
// block.getCachedLocations()
// ));
blocks.add(new LocatedBlock(
block.getBlock(),
block.getLocations(),
compressionInfo
.getPosIndexByCompressedOffset(block.getStartOffset()),
block.isCorrupt()
));
}
LocatedBlock lastLocatedBlock =
originalLocatedBlocks.getLastLocatedBlock();
long fileLength = compressionInfo.getOriginalLength();
return new LocatedBlocks(fileLength,
originalLocatedBlocks.isUnderConstruction(),
blocks,
lastLocatedBlock,
originalLocatedBlocks.isLastBlockComplete(),
originalLocatedBlocks.getFileEncryptionInfo())
.getLocatedBlocks().toArray(new BlockLocation[0]);
}
}
return blockLocations;
}
@Override
public boolean setReplication(String src, short replication)
throws IOException {
FileState fileState = getFileState(src);
if (fileState instanceof CompactFileState) {
throw new IOException(getExceptionMsg("Set replication", "SSM Small File"));
} else {
return super.setReplication(src, replication);
}
}
@Override
public void setStoragePolicy(String src, String policyName)
throws IOException {
FileState fileState = getFileState(src);
if (fileState instanceof CompactFileState) {
throw new IOException(getExceptionMsg("Set storage policy", "SSM Small File"));
} else {
super.setStoragePolicy(src, policyName);
}
}
@Override
public long getBlockSize(String f) throws IOException {
long blockSize = super.getBlockSize(f);
FileState fileState = getFileState(f);
if (fileState instanceof CompactFileState) {
blockSize = super.getBlockSize(((CompactFileState) fileState)
.getFileContainerInfo().getContainerFilePath());
}
return blockSize;
}
@Override
public void concat(String trg, String [] srcs) throws IOException {
try {
super.concat(trg, srcs);
} catch (IOException e) {
for (String src : srcs) {
FileState fileState = getFileState(src);
if (fileState instanceof CompactFileState) {
throw new IOException(getExceptionMsg("Concat", "SSM Small File"));
} else if (fileState instanceof CompressionFileState) {
throw new IOException(getExceptionMsg("Concat", "Compressed File"));
}
}
throw e;
}
}
@Override
public HdfsFileStatus getFileLinkInfo(String src) throws IOException {
HdfsFileStatus fileStatus = super.getFileLinkInfo(src);
if (fileStatus.getLen() == 0) {
String target = super.getLinkTarget(src);
FileState fileState = getFileState(target);
if (fileState instanceof CompactFileState) {
fileStatus = getFileInfo(target);
}
}
return fileStatus;
}
@Override
public MD5MD5CRC32FileChecksum getFileChecksum(String src, long length)
throws IOException {
MD5MD5CRC32FileChecksum ret = super.getFileChecksum(src, length);
if (ret.getChecksumOpt().getBytesPerChecksum() == 0) {
FileState fileState = getFileState(src);
if (fileState instanceof CompactFileState) {
try {
// Get original checksum for small file.
byte[] bytes = getXAttr(src, SmartConstants.SMART_FILE_CHECKSUM_XATTR_NAME);
ret = new MD5MD5CRC32FileChecksum();
ret.readFields(new DataInputStream(new ByteArrayInputStream(bytes)));
} catch (IOException e) {
throw new IOException("Failed to get checksum for SSM Small File: "
+ e.getMessage());
}
}
}
return ret;
}
@Override
public void setPermission(String src, FsPermission permission)
throws IOException {
FileState fileState = getFileState(src);
if (fileState instanceof CompactFileState) {
throw new IOException(getExceptionMsg("Set permission", "SSM Small File"));
} else {
super.setPermission(src, permission);
}
}
@Override
public void setOwner(String src, String username, String groupname)
throws IOException {
FileState fileState = getFileState(src);
if (fileState instanceof CompactFileState) {
throw new IOException(getExceptionMsg("Set owner", "SSM Small File"));
} else {
super.setOwner(src, username, groupname);
}
}
@Override
public CorruptFileBlocks listCorruptFileBlocks(String path, String cookie)
throws IOException {
CorruptFileBlocks corruptFileBlocks = super.listCorruptFileBlocks(path, cookie);
FileState fileState = getFileState(path);
if (fileState instanceof CompactFileState) {
corruptFileBlocks = super.listCorruptFileBlocks(((CompactFileState) fileState)
.getFileContainerInfo().getContainerFilePath(), cookie);
}
return corruptFileBlocks;
}
@Override
public void modifyAclEntries(String src, List<AclEntry> aclSpec)
throws IOException {
FileState fileState = getFileState(src);
if (fileState instanceof CompactFileState) {
throw new IOException(getExceptionMsg("Modify acl entries", "SSM Small File"));
} else {
super.modifyAclEntries(src, aclSpec);
}
}
@Override
public void removeAclEntries(String src, List<AclEntry> aclSpec)
throws IOException {
FileState fileState = getFileState(src);
if (fileState instanceof CompactFileState) {
throw new IOException(getExceptionMsg("Remove acl entries", "SSM Small File"));
} else {
super.removeAclEntries(src, aclSpec);
}
}
@Override
public void removeDefaultAcl(String src) throws IOException {
FileState fileState = getFileState(src);
if (fileState instanceof CompactFileState) {
throw new IOException(getExceptionMsg("Remove default acl", "SSM Small File"));
} else {
super.removeDefaultAcl(src);
}
}
@Override
public void removeAcl(String src) throws IOException {
FileState fileState = getFileState(src);
if (fileState instanceof CompactFileState) {
throw new IOException(getExceptionMsg("Remove acl", "SSM Small File"));
} else {
super.removeAcl(src);
}
}
@Override
public void setAcl(String src, List<AclEntry> aclSpec) throws IOException {
FileState fileState = getFileState(src);
if (fileState instanceof CompactFileState) {
throw new IOException(getExceptionMsg("Set acl", "SSM Small File"));
} else {
super.setAcl(src, aclSpec);
}
}
@Override
public void createEncryptionZone(String src, String keyName)
throws IOException {
FileState fileState = getFileState(src);
if (fileState instanceof CompactFileState) {
throw new IOException(getExceptionMsg("Create encryption zone", "SSM Small File"));
} else {
super.createEncryptionZone(src, keyName);
}
}
@Override
public void checkAccess(String src, FsAction mode) throws IOException {
FileState fileState = getFileState(src);
if (fileState instanceof CompactFileState) {
super.checkAccess(((CompactFileState) fileState)
.getFileContainerInfo().getContainerFilePath(), mode);
} else {
super.checkAccess(src, mode);
}
}
@Override
public boolean isFileClosed(String src) throws IOException {
boolean isFileClosed = super.isFileClosed(src);
if (!isFileClosed) {
FileState fileState = getFileState(src);
if (fileState instanceof CompactFileState) {
String containerFile = ((CompactFileState) fileState)
.getFileContainerInfo().getContainerFilePath();
isFileClosed = super.isFileClosed(containerFile);
}
}
return isFileClosed;
}
@Override
public synchronized void close() throws IOException {
try {
super.close();
} finally {
try {
if (smartClient != null) {
smartClient.close();
}
} finally {
healthy = false;
}
}
}
/**
* Report file access event to SSM server.
*/
private void reportFileAccessEvent(String src) {
try {
if (!healthy) {
return;
}
String userName;
try {
userName = UserGroupInformation.getCurrentUser().getUserName();
} catch (IOException e) {
userName = "Unknown";
}
smartClient.reportFileAccessEvent(new FileAccessEvent(src, userName));
} catch (IOException e) {
// Here just ignores that failed to report
LOG.error("Cannot report file access event to SmartServer: " + src
+ " , for: " + e.getMessage()
+ " , report mechanism will be disabled now in this instance.");
healthy = false;
}
}
/**
* Check if the smart client is disabled.
*/
private boolean isSmartClientDisabled() {
File idFile = new File(SmartConstants.SMART_CLIENT_DISABLED_ID_FILE);
return idFile.exists();
}
/**
* Get the exception message of unsupported operation.
*
* @param operation the hdfs operation name
* @param fileType the type of SSM specify file
* @return the message of unsupported exception
*/
public String getExceptionMsg(String operation, String fileType) {
return String.format("%s is not supported for %s.", operation, fileType);
}
/**
* Get file state of the specified file.
*
* @param filePath the path of source file
* @return file state of source file
* @throws IOException e
*/
public FileState getFileState(String filePath) throws IOException {
try {
byte[] fileState = getXAttr(filePath, SmartConstants.SMART_FILE_STATE_XATTR_NAME);
if (fileState != null) {
return (FileState) SerializationUtils.deserialize(fileState);
}
} catch (RemoteException e) {
return new NormalFileState(filePath);
}
return new NormalFileState(filePath);
}
}
| java | Apache-2.0 | e0c90f054687a18c4e095547ac5e31b8b313b3ef | 2026-01-05T02:41:11.405497Z | false |
Intel-bigdata/SSM | https://github.com/Intel-bigdata/SSM/blob/e0c90f054687a18c4e095547ac5e31b8b313b3ef/smart-hadoop-support/smart-hadoop-client-cdh-2.6/src/main/java/org/smartdata/hadoop/filesystem/SmartFileSystem.java | smart-hadoop-support/smart-hadoop-client-cdh-2.6/src/main/java/org/smartdata/hadoop/filesystem/SmartFileSystem.java | /**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.smartdata.hadoop.filesystem;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.BlockLocation;
import org.apache.hadoop.fs.CreateFlag;
import org.apache.hadoop.fs.FSDataInputStream;
import org.apache.hadoop.fs.FileChecksum;
import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.FileSystemLinkResolver;
import org.apache.hadoop.fs.FSDataOutputStream;
import org.apache.hadoop.fs.LocatedFileStatus;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.PathFilter;
import org.apache.hadoop.fs.RemoteIterator;
import org.apache.hadoop.fs.UnresolvedLinkException;
import org.apache.hadoop.fs.UnsupportedFileSystemException;
import org.apache.hadoop.fs.permission.AclEntry;
import org.apache.hadoop.fs.permission.FsPermission;
import org.apache.hadoop.hdfs.DFSInputStream;
import org.apache.hadoop.hdfs.DFSUtil;
import org.apache.hadoop.hdfs.DistributedFileSystem;
import org.apache.hadoop.hdfs.client.HdfsDataOutputStream;
import org.apache.hadoop.hdfs.protocol.DirectoryListing;
import org.apache.hadoop.hdfs.protocol.HdfsFileStatus;
import org.apache.hadoop.hdfs.protocol.HdfsLocatedFileStatus;
import org.apache.hadoop.util.Progressable;
import org.smartdata.conf.SmartConfKeys;
import org.smartdata.hdfs.client.SmartDFSClient;
import org.smartdata.model.CompactFileState;
import org.smartdata.model.CompressionFileState;
import org.smartdata.model.CompressionTrunk;
import org.smartdata.model.FileContainerInfo;
import org.smartdata.model.FileState;
import java.io.FileNotFoundException;
import java.io.IOException;
import java.net.InetSocketAddress;
import java.net.URI;
import java.util.ArrayList;
import java.util.EnumSet;
import java.util.List;
/**
* SmartFileSystem Deploy Guide
* 1. Build SSM, get all jar files start with name Smart*
* 2. Copy these jar files to HDFS classpath
* 3. Reconfigure HDFS
* Please do the following configurations,
* 1) core-site.xml
* Change property "fs.hdfs.impl" value, to point to the Smart Server provided
* "Smart File System".
* <property>
* <name>fs.hdfs.impl</name>
* <value>org.smartdata.hadoop.filesystem.SmartFileSystem</value>
* <description>The FileSystem for hdfs URL</description>
* </property>
* 2) hdfs-site.xml
* Add property "smart.server.rpc.adddress" to point to Smart Server.
* If SSM HA mode is enabled, more than one Smart Server address can
* be specified with comma delimited.
* <property>
* <name>smart.server.rpc.address</name>
* <value>127.0.0.1:7042</value>
* </property>
*
* 4. Restart HDFS
*/
public class SmartFileSystem extends DistributedFileSystem {
private SmartDFSClient smartDFSClient;
private boolean verifyChecksum = true;
@Override
public void initialize(URI uri, Configuration conf) throws IOException {
super.initialize(uri, conf);
this.smartDFSClient = new SmartDFSClient(conf);
}
@Override
public FSDataInputStream open(Path path, final int bufferSize)
throws IOException {
statistics.incrementReadOps(1);
Path absF = fixRelativePart(path);
final DFSInputStream in = smartDFSClient.open(
absF.toUri().getPath(), bufferSize, verifyChecksum);
return smartDFSClient.createWrappedInputStream(in);
}
@Override
public void setVerifyChecksum(boolean verifyChecksum) {
this.verifyChecksum = verifyChecksum;
}
// @Override
// public FSDataOutputStream append(Path f, final int bufferSize,
// final Progressable progress) throws IOException {
// try {
// return super.append(f, bufferSize, progress);
// } catch (IOException e) {
// FileState fileState = smartDFSClient.getFileState(getPathName(f));
// if (fileState instanceof CompactFileState) {
// throw new IOException(
// smartDFSClient.getExceptionMsg("Append", "SSM Small File"));
// }
// throw e;
// }
// }
@Override
public FSDataOutputStream append(Path f, final int bufferSize, final Progressable progress)
throws IOException {
FSDataOutputStream out = super.append(f, bufferSize, progress);
if (out.getPos() == 0) {
FileState fileState = smartDFSClient.getFileState(getPathName(f));
if (fileState instanceof CompactFileState) {
throw new IOException(
smartDFSClient.getExceptionMsg("Append", "SSM Small File"));
}
} else {
FileState fileState = smartDFSClient.getFileState(getPathName(f));
if (fileState instanceof CompressionFileState) {
throw new IOException(
smartDFSClient.getExceptionMsg("Append", "Compressed File"));
}
}
return out;
}
// @Override
// public FSDataOutputStream append(Path f, final EnumSet<CreateFlag> flag,
// final int bufferSize, final Progressable progress,
// final InetSocketAddress[] favoredNodes)
// throws IOException {
// FSDataOutputStream out = super.append(f, flag, bufferSize, progress, favoredNodes);
// if (out.getPos() == 0) {
// FileState fileState = smartDFSClient.getFileState(getPathName(f));
// if (fileState instanceof CompactFileState) {
// throw new IOException(
// smartDFSClient.getExceptionMsg("Append", "SSM Small File"));
// }
// }
// return out;
// }
@Override
public FileStatus getFileStatus(Path f) throws IOException {
FileStatus oldStatus = super.getFileStatus(f);
if (oldStatus == null) return null;
if (oldStatus.getLen() == 0) {
FileState fileState = smartDFSClient.getFileState(getPathName(f));
if (fileState instanceof CompactFileState) {
long len = ((CompactFileState) fileState).getFileContainerInfo().getLength();
return new FileStatus(len, oldStatus.isDirectory(), oldStatus.getReplication(),
oldStatus.getBlockSize(), oldStatus.getModificationTime(),
oldStatus.getAccessTime(), oldStatus.getPermission(),
oldStatus.getOwner(), oldStatus.getGroup(),
oldStatus.isSymlink() ? oldStatus.getSymlink() : null, oldStatus.getPath());
}
} else {
FileState fileState = smartDFSClient.getFileState(getPathName(f));
if (fileState instanceof CompressionFileState) {
long len = ((CompressionFileState) fileState).getOriginalLength();
return new FileStatus(len, oldStatus.isDirectory(), oldStatus.getReplication(),
oldStatus.getBlockSize(), oldStatus.getModificationTime(),
oldStatus.getAccessTime(), oldStatus.getPermission(),
oldStatus.getOwner(), oldStatus.getGroup(),
oldStatus.isSymlink() ? oldStatus.getSymlink() : null, oldStatus.getPath());
}
}
return oldStatus;
}
@Override
public FileStatus[] listStatus(Path p) throws IOException {
FileStatus[] oldStatus = super.listStatus(p);
ArrayList<FileStatus> newStatus = new ArrayList<>(oldStatus.length);
for (FileStatus status : oldStatus) {
if (oldStatus == null) {
newStatus.add(null);
continue;
}
if (status.getLen() == 0) {
FileState fileState = smartDFSClient.getFileState(getPathName(status.getPath()));
if (fileState instanceof CompactFileState) {
long len = ((CompactFileState) fileState).getFileContainerInfo().getLength();
newStatus.add(new FileStatus(len, status.isDirectory(), status.getReplication(),
status.getBlockSize(), status.getModificationTime(), status.getAccessTime(),
status.getPermission(), status.getOwner(), status.getGroup(),
status.isSymlink() ? status.getSymlink() : null, status.getPath()));
} else {
newStatus.add(status);
}
} else {
FileState fileState = smartDFSClient.getFileState(getPathName(status.getPath()));
if (fileState instanceof CompressionFileState) {
long len = ((CompressionFileState) fileState).getOriginalLength();
newStatus.add(new FileStatus(len, status.isDirectory(), status.getReplication(),
status.getBlockSize(), status.getModificationTime(), status.getAccessTime(),
status.getPermission(), status.getOwner(), status.getGroup(),
status.isSymlink() ? status.getSymlink() : null, status.getPath()));
} else {
newStatus.add(status);
}
}
}
return newStatus.toArray(new FileStatus[oldStatus.length]);
}
@Override
public BlockLocation[] getFileBlockLocations(Path p, final long start,
final long len) throws IOException {
BlockLocation[] blockLocations = super.getFileBlockLocations(
p, start, len);
if (blockLocations.length == 0) {
FileState fileState = smartDFSClient.getFileState(getPathName(p));
if (fileState instanceof CompactFileState) {
FileContainerInfo fileContainerInfo = ((CompactFileState) fileState).getFileContainerInfo();
String containerFile = fileContainerInfo.getContainerFilePath();
long offset = fileContainerInfo.getOffset();
blockLocations = super.getFileBlockLocations(
new Path(containerFile), offset + start, len);
for (BlockLocation blockLocation : blockLocations) {
blockLocation.setOffset(blockLocation.getOffset() - offset);
}
}
}
return blockLocations;
}
@Override
public boolean setReplication(Path src,
final short replication) throws IOException {
FileState fileState = smartDFSClient.getFileState(getPathName(src));
if (fileState instanceof CompactFileState) {
throw new IOException(
smartDFSClient.getExceptionMsg("Set replication", "SSM Small File"));
} else {
return super.setReplication(src, replication);
}
}
@Override
public void setStoragePolicy(final Path src, final String policyName)
throws IOException {
FileState fileState = smartDFSClient.getFileState(getPathName(src));
if (fileState instanceof CompactFileState) {
throw new IOException(
smartDFSClient.getExceptionMsg("Set storage policy", "SSM Small File"));
} else {
super.setStoragePolicy(src, policyName);
}
}
@Override
public void concat(Path trg, Path [] psrcs) throws IOException {
try {
super.concat(trg, psrcs);
} catch (IOException e) {
for (Path src : psrcs) {
FileState fileState = smartDFSClient.getFileState(getPathName(src));
if (fileState instanceof CompactFileState) {
throw new IOException(
smartDFSClient.getExceptionMsg("Concat", "SSM Small File"));
}
}
}
}
@Override
public FileStatus getFileLinkStatus(final Path f) throws IOException {
FileStatus fileStatus = super.getFileLinkStatus(f);
if (fileStatus.getLen() == 0) {
Path target = getLinkTarget(f);
FileState fileState = smartDFSClient.getFileState(getPathName(target));
if (fileState instanceof CompactFileState) {
fileStatus = getFileStatus(target);
}
} else {
Path target = getLinkTarget(f);
FileState fileState = smartDFSClient.getFileState(getPathName(target));
if (fileState instanceof CompressionFileState) {
fileStatus = getFileStatus(target);
}
}
return fileStatus;
}
@Override
public FileChecksum getFileChecksum(Path f) throws IOException {
statistics.incrementReadOps(1);
Path absF = fixRelativePart(f);
return new FileSystemLinkResolver<FileChecksum>() {
@Override
public FileChecksum doCall(final Path p)
throws IOException {
return smartDFSClient.getFileChecksum(getPathName(p), Long.MAX_VALUE);
}
@Override
public FileChecksum next(final FileSystem fs, final Path p)
throws IOException {
return fs.getFileChecksum(p);
}
}.resolve(this, absF);
}
@Override
public FileChecksum getFileChecksum(Path f, final long length)
throws IOException {
statistics.incrementReadOps(1);
Path absF = fixRelativePart(f);
return new FileSystemLinkResolver<FileChecksum>() {
@Override
public FileChecksum doCall(final Path p)
throws IOException {
return smartDFSClient.getFileChecksum(getPathName(p), length);
}
@Override
public FileChecksum next(final FileSystem fs, final Path p)
throws IOException {
if (fs instanceof SmartFileSystem) {
return fs.getFileChecksum(p, length);
} else {
throw new UnsupportedFileSystemException(
"getFileChecksum(Path, long) is not supported by "
+ fs.getClass().getSimpleName());
}
}
}.resolve(this, absF);
}
@Override
public void setPermission(Path p, final FsPermission permission
) throws IOException {
statistics.incrementWriteOps(1);
Path absF = fixRelativePart(p);
new FileSystemLinkResolver<Void>() {
@Override
public Void doCall(final Path p)
throws IOException, UnresolvedLinkException {
smartDFSClient.setPermission(getPathName(p), permission);
return null;
}
@Override
public Void next(final FileSystem fs, final Path p)
throws IOException {
fs.setPermission(p, permission);
return null;
}
}.resolve(this, absF);
}
@Override
public void setOwner(Path p, final String username, final String groupname
) throws IOException {
if (username == null && groupname == null) {
throw new IOException("username == null && groupname == null");
}
statistics.incrementWriteOps(1);
Path absF = fixRelativePart(p);
new FileSystemLinkResolver<Void>() {
@Override
public Void doCall(final Path p)
throws IOException, UnresolvedLinkException {
smartDFSClient.setOwner(getPathName(p), username, groupname);
return null;
}
@Override
public Void next(final FileSystem fs, final Path p)
throws IOException {
fs.setOwner(p, username, groupname);
return null;
}
}.resolve(this, absF);
}
@Override
public RemoteIterator<Path> listCorruptFileBlocks(Path path)
throws IOException {
RemoteIterator<Path> corruptFileBlocksIterator = super.listCorruptFileBlocks(path);
FileState fileState = smartDFSClient.getFileState(getPathName(path));
if (fileState instanceof CompactFileState) {
corruptFileBlocksIterator = super.listCorruptFileBlocks(
new Path(((CompactFileState) fileState)
.getFileContainerInfo().getContainerFilePath()));
}
return corruptFileBlocksIterator;
}
@Override
public void modifyAclEntries(Path path, final List<AclEntry> aclSpec)
throws IOException {
Path absF = fixRelativePart(path);
new FileSystemLinkResolver<Void>() {
@Override
public Void doCall(final Path p) throws IOException {
smartDFSClient.modifyAclEntries(getPathName(p), aclSpec);
return null;
}
@Override
public Void next(final FileSystem fs, final Path p)
throws IOException {
fs.modifyAclEntries(p, aclSpec);
return null;
}
}.resolve(this, absF);
}
@Override
public void removeAclEntries(Path path, final List<AclEntry> aclSpec)
throws IOException {
Path absF = fixRelativePart(path);
new FileSystemLinkResolver<Void>() {
@Override
public Void doCall(final Path p) throws IOException {
smartDFSClient.removeAclEntries(getPathName(p), aclSpec);
return null;
}
@Override
public Void next(final FileSystem fs, final Path p)
throws IOException {
fs.removeAclEntries(p, aclSpec);
return null;
}
}.resolve(this, absF);
}
@Override
public void removeDefaultAcl(Path path) throws IOException {
final Path absF = fixRelativePart(path);
new FileSystemLinkResolver<Void>() {
@Override
public Void doCall(final Path p) throws IOException {
smartDFSClient.removeDefaultAcl(getPathName(p));
return null;
}
@Override
public Void next(final FileSystem fs, final Path p)
throws IOException, UnresolvedLinkException {
fs.removeDefaultAcl(p);
return null;
}
}.resolve(this, absF);
}
@Override
public void removeAcl(Path path) throws IOException {
final Path absF = fixRelativePart(path);
new FileSystemLinkResolver<Void>() {
@Override
public Void doCall(final Path p) throws IOException {
smartDFSClient.removeAcl(getPathName(p));
return null;
}
@Override
public Void next(final FileSystem fs, final Path p)
throws IOException, UnresolvedLinkException {
fs.removeAcl(p);
return null;
}
}.resolve(this, absF);
}
@Override
public void setAcl(Path path, final List<AclEntry> aclSpec)
throws IOException {
Path absF = fixRelativePart(path);
new FileSystemLinkResolver<Void>() {
@Override
public Void doCall(final Path p) throws IOException {
smartDFSClient.setAcl(getPathName(p), aclSpec);
return null;
}
@Override
public Void next(final FileSystem fs, final Path p)
throws IOException {
fs.setAcl(p, aclSpec);
return null;
}
}.resolve(this, absF);
}
@Override
public void createEncryptionZone(Path path, String keyName)
throws IOException {
smartDFSClient.createEncryptionZone(getPathName(path), keyName);
}
@Override
public RemoteIterator<FileStatus> listStatusIterator(final Path p)
throws IOException {
Path absF = fixRelativePart(p);
return new FileSystemLinkResolver<RemoteIterator<FileStatus>>() {
@Override
public RemoteIterator<FileStatus> doCall(final Path p)
throws IOException {
return new SmartDirListingIterator<>(p, false);
}
@Override
public RemoteIterator<FileStatus> next(final FileSystem fs, final Path p)
throws IOException {
return ((DistributedFileSystem) fs).listStatusIterator(p);
}
}.resolve(this, absF);
}
@Override
protected RemoteIterator<LocatedFileStatus> listLocatedStatus(final Path p,
final PathFilter filter) throws IOException {
Path absF = fixRelativePart(p);
return new FileSystemLinkResolver<RemoteIterator<LocatedFileStatus>>() {
@Override
public RemoteIterator<LocatedFileStatus> doCall(final Path p)
throws IOException {
return new SmartDirListingIterator<>(p, filter, true);
}
@Override
public RemoteIterator<LocatedFileStatus> next(final FileSystem fs, final Path p)
throws IOException {
if (fs instanceof SmartFileSystem) {
return ((SmartFileSystem)fs).listLocatedStatus(p, filter);
}
// symlink resolution for this methods does not work cross file systems
// because it is a protected method.
throw new IOException("Link resolution does not work with multiple " +
"file systems for listLocatedStatus(): " + p);
}
}.resolve(this, absF);
}
private class SmartDirListingIterator<T extends FileStatus>
implements RemoteIterator<T> {
private DirectoryListing thisListing;
private int i;
private Path p;
private String src;
private T curStat = null;
private PathFilter filter;
private boolean needLocation;
private SmartDirListingIterator(Path p, PathFilter filter,
boolean needLocation) throws IOException {
this.p = p;
this.src = getPathName(p);
this.filter = filter;
this.needLocation = needLocation;
// fetch the first batch of entries in the directory
thisListing = smartDFSClient.listPaths(src, HdfsFileStatus.EMPTY_NAME,
needLocation);
statistics.incrementReadOps(1);
// the directory does not exist
if (thisListing == null) {
throw new FileNotFoundException("File " + p + " does not exist.");
}
i = 0;
}
private SmartDirListingIterator(Path p, boolean needLocation)
throws IOException {
this(p, null, needLocation);
}
@Override
@SuppressWarnings("unchecked")
public boolean hasNext() throws IOException {
while (curStat == null && hasNextNoFilter()) {
T next;
HdfsFileStatus fileStat = thisListing.getPartialListing()[i++];
if (needLocation) {
next = (T)((HdfsLocatedFileStatus) fileStat).makeQualifiedLocated(getUri(), p);
String fileName = next.getPath().toUri().getPath();
// Reconstruct FileStatus
if (next.getLen() == 0) {
FileState fileState = smartDFSClient.getFileState(fileName);
if (fileState instanceof CompactFileState) {
CompactFileState compactFileState = (CompactFileState) fileState;
long len = compactFileState.getFileContainerInfo().getLength();
BlockLocation[] blockLocations = smartDFSClient.getBlockLocations(
fileName, 0, len);
next = (T) new LocatedFileStatus(len,
next.isDirectory(),
next.getReplication(),
next.getBlockSize(),
next.getModificationTime(),
next.getAccessTime(),
next.getPermission(),
next.getOwner(),
next.getGroup(),
next.isSymlink() ? next.getSymlink() : null,
next.getPath(),
blockLocations);
}
} else {
FileState fileState = smartDFSClient.getFileState(fileName);
if (fileState instanceof CompressionFileState) {
CompressionFileState compressionFileState = (CompressionFileState) fileState;
long fileLen = compressionFileState.getOriginalLength();
BlockLocation[] blockLocations =
((LocatedFileStatus)next).getBlockLocations();
for (BlockLocation blockLocation : blockLocations) {
convertBlockLocation(blockLocation, compressionFileState);
}
next = (T) new LocatedFileStatus(fileLen,
next.isDirectory(),
next.getReplication(),
next.getBlockSize(),
next.getModificationTime(),
next.getAccessTime(),
next.getPermission(),
next.getOwner(),
next.getGroup(),
next.isSymlink() ? next.getSymlink() : null,
next.getPath(),
blockLocations);
}
}
} else {
next = (T) fileStat.makeQualified(getUri(), p);
String fileName = next.getPath().toUri().getPath();
// Reconstruct FileStatus
if (next.getLen() == 0) {
FileState fileState = smartDFSClient.getFileState(fileName);
if (fileState instanceof CompactFileState) {
CompactFileState compactFileState = (CompactFileState) fileState;
long len = compactFileState.getFileContainerInfo().getLength();
next = (T) new FileStatus(len,
next.isDirectory(),
next.getReplication(),
next.getBlockSize(),
next.getModificationTime(),
next.getAccessTime(),
next.getPermission(),
next.getOwner(),
next.getGroup(),
next.isSymlink() ? next.getSymlink() : null,
next.getPath());
}
} else {
FileState fileState = smartDFSClient.getFileState(fileName);
if (fileState instanceof CompressionFileState) {
CompressionFileState compressionFileState = (CompressionFileState) fileState;
long fileLen = compressionFileState.getOriginalLength();
BlockLocation[] blockLocations =
((LocatedFileStatus)next).getBlockLocations();
for (BlockLocation blockLocation : blockLocations) {
convertBlockLocation(blockLocation, compressionFileState);
}
next = (T) new LocatedFileStatus(fileLen,
next.isDirectory(),
next.getReplication(),
next.getBlockSize(),
next.getModificationTime(),
next.getAccessTime(),
next.getPermission(),
next.getOwner(),
next.getGroup(),
next.isSymlink() ? next.getSymlink() : null,
next.getPath(),
blockLocations);
}
}
}
// apply filter if not null
if (filter == null || filter.accept(next.getPath())) {
curStat = next;
}
}
return curStat != null;
}
// Definitions:
// * Compression trunk doesn't cross over two blocks:
// - Offset = original start of the first trunk
// - End = original end of the last trunk
// * Compression trunk crosses over two blocks:
// - Offset = original middle of the first incomplete trunk
// - End = original middle of the last incomplete trunk
private void convertBlockLocation(BlockLocation blockLocation,
CompressionFileState compressionInfo) throws IOException {
long compressedStart = blockLocation.getOffset();
long compressedEnd = compressedStart + blockLocation.getLength() - 1;
CompressionTrunk startTrunk = compressionInfo.locateCompressionTrunk(
true, compressedStart);
CompressionTrunk endTrunk = compressionInfo.locateCompressionTrunk(
true, compressedEnd);
long originStart;
// If the first trunk crosses over two blocks, set start as middle of the trunk
if (startTrunk.getCompressedOffset() < compressedStart) {
originStart = startTrunk.getOriginOffset() + startTrunk.getOriginLength() / 2 + 1;
} else {
originStart = startTrunk.getOriginOffset();
}
long originEnd;
// If the last trunk corsses over two blocks, set end as middle of the trunk
if (endTrunk.getCompressedOffset() + endTrunk.getCompressedLength() - 1 > compressedEnd) {
originEnd = endTrunk.getOriginOffset() + endTrunk.getOriginLength() / 2;
} else {
originEnd = endTrunk.getOriginOffset() + endTrunk.getOriginLength() - 1;
}
blockLocation.setOffset(originStart);
blockLocation.setLength(originEnd - originStart + 1);
}
/**
* Check if there is a next item before applying the given filter
*/
private boolean hasNextNoFilter() throws IOException {
if (thisListing == null) {
return false;
}
if (i >= thisListing.getPartialListing().length && thisListing.hasMore()) {
// current listing is exhausted & fetch a new listing
thisListing = smartDFSClient.listPaths(
src, thisListing.getLastName(), needLocation);
statistics.incrementReadOps(1);
if (thisListing == null) {
return false;
}
i = 0;
}
return (i < thisListing.getPartialListing().length);
}
@Override
public T next() throws IOException {
if (hasNext()) {
T tmp = curStat;
curStat = null;
return tmp;
}
throw new java.util.NoSuchElementException("No more entry in " + p);
}
}
@Override
public boolean isFileClosed(final Path src) throws IOException {
boolean isFileClosed = super.isFileClosed(src);
if (!isFileClosed) {
FileState fileState = smartDFSClient.getFileState(getPathName(src));
if (fileState instanceof CompactFileState) {
String containerFile = ((CompactFileState) fileState)
.getFileContainerInfo().getContainerFilePath();
isFileClosed = smartDFSClient.isFileClosed(containerFile);
}
}
return isFileClosed;
}
@Override
public void close() throws IOException {
try {
super.close();
} finally {
if (smartDFSClient != null) {
this.smartDFSClient.close();
}
}
}
/**
* Checks that the passed URI belongs to this filesystem and returns
* just the path component. Expects a URI with an absolute path.
*
* @param file URI with absolute path
* @return path component of {file}
* @throws IllegalArgumentException if URI does not belong to this DFS
*/
private String getPathName(Path file) {
checkPath(file);
String result = fixRelativePart(file).toUri().getPath();
if (!DFSUtil.isValidName(result)) {
throw new IllegalArgumentException("Pathname " + result + " from " +
file + " is not a valid DFS filename.");
}
return result;
}
}
| java | Apache-2.0 | e0c90f054687a18c4e095547ac5e31b8b313b3ef | 2026-01-05T02:41:11.405497Z | false |
Intel-bigdata/SSM | https://github.com/Intel-bigdata/SSM/blob/e0c90f054687a18c4e095547ac5e31b8b313b3ef/smart-hadoop-support/smart-hadoop-2/src/main/java/org/apache/hadoop/hdfs/CompactInputStream.java | smart-hadoop-support/smart-hadoop-2/src/main/java/org/apache/hadoop/hdfs/CompactInputStream.java | /**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs;
import org.apache.hadoop.fs.ReadOption;
import org.apache.hadoop.hdfs.protocol.LocatedBlock;
import org.apache.hadoop.io.ByteBufferPool;
import org.smartdata.model.CompactFileState;
import org.smartdata.model.FileContainerInfo;
import org.smartdata.model.FileState;
import java.io.EOFException;
import java.io.IOException;
import java.nio.ByteBuffer;
import java.util.ArrayList;
import java.util.EnumSet;
import java.util.List;
public class CompactInputStream extends SmartInputStream {
private FileContainerInfo fileContainerInfo;
private boolean closed = false;
private static final String INHERITED_CLASS = "org.apache.hadoop.hdfs.DFSInputStream";
CompactInputStream(DFSClient dfsClient, boolean verifyChecksum,
FileState fileState) throws IOException {
super(dfsClient,
((CompactFileState) fileState).getFileContainerInfo().getContainerFilePath(),
verifyChecksum,
fileState);
this.fileContainerInfo = ((CompactFileState) fileState).getFileContainerInfo();
super.seek(fileContainerInfo.getOffset());
}
@Override
public long getFileLength() {
String callerClass = Thread.currentThread().getStackTrace()[2].getClassName();
if (INHERITED_CLASS.equals(callerClass)) {
return fileContainerInfo.getLength() + fileContainerInfo.getOffset();
} else {
return fileContainerInfo.getLength();
}
}
@Override
public List<LocatedBlock> getAllBlocks() throws IOException {
List<LocatedBlock> blocks = super.getAllBlocks();
List<LocatedBlock> ret = new ArrayList<>(16);
long off = fileContainerInfo.getOffset();
long len = fileContainerInfo.getLength();
for (LocatedBlock b : blocks) {
if (off > b.getStartOffset() + b.getBlockSize() || off + len < b.getStartOffset()) {
continue;
}
ret.add(b);
}
return ret;
}
@Override
public synchronized int read(final byte[] buf, int off, int len) throws IOException {
int realLen = (int) Math.min(len, fileContainerInfo.getLength() - getPos());
if (realLen == 0) {
return -1;
} else {
return super.read(buf, off, realLen);
}
}
@Override
public synchronized int read(final ByteBuffer buf) throws IOException {
int realLen = (int) Math.min(buf.remaining(), fileContainerInfo.getLength() - getPos());
if (realLen == 0) {
return -1;
} else {
buf.limit(realLen + buf.position());
return super.read(buf);
}
}
@Override
public int read(long position, byte[] buffer, int offset, int length) throws IOException {
long realPos = position + fileContainerInfo.getOffset();
int realLen = (int) Math.min(length, fileContainerInfo.getLength() - position);
if (realLen == 0) {
return -1;
} else {
return super.read(realPos, buffer, offset, realLen);
}
}
@Override
public synchronized long getPos() throws IOException {
String callerClass = Thread.currentThread().getStackTrace()[2].getClassName();
if (INHERITED_CLASS.equals(callerClass)) {
return super.getPos();
} else {
return super.getPos() - fileContainerInfo.getOffset();
}
}
@Override
public synchronized int available() throws IOException {
if (closed) {
throw new IOException("Stream closed.");
}
final long remaining = getFileLength() - getPos();
return remaining <= Integer.MAX_VALUE ? (int) remaining : Integer.MAX_VALUE;
}
@Override
public synchronized void seek(long targetPos) throws IOException {
String callerClass = Thread.currentThread().getStackTrace()[2].getClassName();
if (INHERITED_CLASS.equals(callerClass)) {
super.seek(targetPos);
} else {
if (targetPos > fileContainerInfo.getLength()) {
throw new EOFException("Cannot seek after EOF");
}
if (targetPos < 0) {
throw new EOFException("Cannot seek to negative offset");
}
super.seek(fileContainerInfo.getOffset() + targetPos);
}
}
@Override
public synchronized boolean seekToNewSource(long targetPos) throws IOException {
String callerClass = Thread.currentThread().getStackTrace()[2].getClassName();
if (INHERITED_CLASS.equals(callerClass)) {
return super.seekToNewSource(targetPos);
} else {
if (targetPos < 0) {
throw new EOFException("Cannot seek after EOF");
} else {
return super.seekToNewSource(fileContainerInfo.getOffset() + targetPos);
}
}
}
@Override
public synchronized void setReadahead(Long readahead) throws IOException {
long realReadAhead = Math.min(readahead, fileContainerInfo.getLength() - getPos());
super.setReadahead(realReadAhead);
}
@Override
public synchronized ByteBuffer read(ByteBufferPool bufferPool,
int maxLength, EnumSet<ReadOption> opts)
throws IOException, UnsupportedOperationException {
int realMaxLen = (int) Math.min(maxLength, fileContainerInfo.getLength() - getPos());
return super.read(bufferPool, realMaxLen, opts);
}
@Override
public synchronized void close() throws IOException {
super.close();
this.closed = true;
}
}
| java | Apache-2.0 | e0c90f054687a18c4e095547ac5e31b8b313b3ef | 2026-01-05T02:41:11.405497Z | false |
Intel-bigdata/SSM | https://github.com/Intel-bigdata/SSM/blob/e0c90f054687a18c4e095547ac5e31b8b313b3ef/smart-hadoop-support/smart-hadoop-2/src/main/java/org/apache/hadoop/hdfs/SmartInputStream.java | smart-hadoop-support/smart-hadoop-2/src/main/java/org/apache/hadoop/hdfs/SmartInputStream.java | /**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs;
import org.apache.hadoop.fs.UnresolvedLinkException;
import org.apache.hadoop.hdfs.DFSClient;
import org.apache.hadoop.hdfs.DFSInputStream;
import org.smartdata.model.FileState;
import java.io.IOException;
/**
* DFSInputStream for SSM.
*/
public class SmartInputStream extends DFSInputStream {
protected final FileState fileState;
public SmartInputStream(DFSClient dfsClient, String src, boolean verifyChecksum,
FileState fileState) throws IOException, UnresolvedLinkException {
super(dfsClient, src, verifyChecksum);
this.fileState = fileState;
}
public FileState.FileType getType() {
return fileState.getFileType();
}
} | java | Apache-2.0 | e0c90f054687a18c4e095547ac5e31b8b313b3ef | 2026-01-05T02:41:11.405497Z | false |
Intel-bigdata/SSM | https://github.com/Intel-bigdata/SSM/blob/e0c90f054687a18c4e095547ac5e31b8b313b3ef/smart-hadoop-support/smart-hadoop-2/src/main/java/org/smartdata/hdfs/CompatibilityHelper2.java | smart-hadoop-support/smart-hadoop-2/src/main/java/org/smartdata/hdfs/CompatibilityHelper2.java | /**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.smartdata.hdfs;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileEncryptionInfo;
import org.apache.hadoop.fs.permission.FsPermission;
import org.apache.hadoop.hdfs.DFSClient;
import org.apache.hadoop.hdfs.DFSInputStream;
import org.apache.hadoop.hdfs.SmartInputStream;
import org.apache.hadoop.hdfs.protocol.*;
import org.apache.hadoop.hdfs.protocolPB.PBHelper;
import org.apache.hadoop.hdfs.security.token.block.BlockTokenIdentifier;
import org.apache.hadoop.hdfs.server.balancer.KeyManager;
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants;
import org.apache.hadoop.security.token.Token;
import org.smartdata.SmartConstants;
import org.smartdata.hdfs.action.move.DBlock;
import org.smartdata.hdfs.action.move.MLocation;
import org.smartdata.hdfs.action.move.StorageGroup;
import org.smartdata.hdfs.action.move.StorageMap;
import org.smartdata.model.FileState;
import java.io.DataInputStream;
import java.io.IOException;
import java.io.InputStream;
import java.util.List;
import java.util.Map;
public abstract class CompatibilityHelper2 implements CompatibilityHelper {
public int getReadTimeOutConstant() {
return HdfsServerConstants.READ_TIMEOUT;
}
public Token<BlockTokenIdentifier> getAccessToken(
KeyManager km, ExtendedBlock eb, StorageGroup target) throws IOException {
return km.getAccessToken(eb);
}
public int getIOFileBufferSize(Configuration conf) {
return HdfsConstants.IO_FILE_BUFFER_SIZE;
}
public InputStream getVintPrefixed(DataInputStream in) throws IOException {
return PBHelper.vintPrefixed(in);
}
public LocatedBlocks getLocatedBlocks(HdfsLocatedFileStatus status) {
return status.getBlockLocations();
}
public HdfsFileStatus createHdfsFileStatus(
long length, boolean isdir, int block_replication, long blocksize, long modification_time,
long access_time, FsPermission permission, String owner, String group, byte[] symlink, byte[] path,
long fileId, int childrenNum, FileEncryptionInfo feInfo, byte storagePolicy) {
return new HdfsFileStatus(
length, isdir, block_replication, blocksize, modification_time, access_time, permission,
owner, group, symlink, path, fileId, childrenNum, feInfo, storagePolicy);
}
public byte getErasureCodingPolicy(HdfsFileStatus fileStatus) {
// for HDFS2.x, the erasure policy is always replication whose id is 0 in HDFS.
return (byte) 0;
}
@Override
public String getErasureCodingPolicyName(HdfsFileStatus fileStatus) {
return SmartConstants.REPLICATION_CODEC_NAME;
}
public byte getErasureCodingPolicyByName(DFSClient client, String ecPolicyName) throws IOException {
return (byte) 0;
}
public Map<Byte, String> getErasureCodingPolicies(DFSClient dfsClient) throws IOException {
return null;
}
public List<String> getStorageTypeForEcBlock(LocatedBlock lb, BlockStoragePolicy policy,
byte policyId) throws IOException {
return null;
}
public DBlock newDBlock(LocatedBlock lb, HdfsFileStatus status) {
Block blk = lb.getBlock().getLocalBlock();
DBlock db = new DBlock(blk);
return db;
}
public boolean isLocatedStripedBlock(LocatedBlock lb) {
return false;
}
public DBlock getDBlock(DBlock block, StorageGroup source) {
return block;
}
public DFSInputStream getNormalInputStream(DFSClient dfsClient, String src, boolean verifyChecksum,
FileState fileState) throws IOException {
return new SmartInputStream(dfsClient, src, verifyChecksum, fileState);
}
} | java | Apache-2.0 | e0c90f054687a18c4e095547ac5e31b8b313b3ef | 2026-01-05T02:41:11.405497Z | false |
Intel-bigdata/SSM | https://github.com/Intel-bigdata/SSM/blob/e0c90f054687a18c4e095547ac5e31b8b313b3ef/smart-hadoop-support/smart-hadoop-2.7/src/test/java/org/smartdata/hdfs/MiniClusterFactory27.java | smart-hadoop-support/smart-hadoop-2.7/src/test/java/org/smartdata/hdfs/MiniClusterFactory27.java | /**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.smartdata.hdfs;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.StorageType;
import org.apache.hadoop.hdfs.MiniDFSCluster;
import java.io.IOException;
public class MiniClusterFactory27 extends MiniClusterFactory {
@Override
public MiniDFSCluster create(int dataNodes, Configuration conf) throws IOException {
return new MiniDFSCluster.Builder(conf)
.numDataNodes(dataNodes)
.build();
}
@Override
public MiniDFSCluster createWithStorages(int dataNodes, Configuration conf) throws IOException {
return new MiniDFSCluster.Builder(conf)
.numDataNodes(dataNodes)
.storagesPerDatanode(3)
.storageTypes(new StorageType[]{StorageType.DISK, StorageType.ARCHIVE,
StorageType.SSD})
.build();
}
}
| java | Apache-2.0 | e0c90f054687a18c4e095547ac5e31b8b313b3ef | 2026-01-05T02:41:11.405497Z | false |
Intel-bigdata/SSM | https://github.com/Intel-bigdata/SSM/blob/e0c90f054687a18c4e095547ac5e31b8b313b3ef/smart-hadoop-support/smart-hadoop-2.7/src/main/java/org/smartdata/hdfs/CompatibilityHelper27.java | smart-hadoop-support/smart-hadoop-2.7/src/main/java/org/smartdata/hdfs/CompatibilityHelper27.java | /**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.smartdata.hdfs;
import org.apache.hadoop.fs.CreateFlag;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.StorageType;
import org.apache.hadoop.hdfs.DFSClient;
import org.apache.hadoop.hdfs.DistributedFileSystem;
import org.apache.hadoop.hdfs.inotify.Event;
import org.apache.hadoop.hdfs.protocol.BlockStoragePolicy;
import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
import org.apache.hadoop.hdfs.protocol.LocatedBlock;
import org.apache.hadoop.hdfs.protocol.datatransfer.Sender;
import org.apache.hadoop.hdfs.protocol.proto.InotifyProtos;
import org.apache.hadoop.hdfs.security.token.block.BlockTokenIdentifier;
import org.apache.hadoop.hdfs.server.protocol.DatanodeStorage;
import org.apache.hadoop.hdfs.server.protocol.StorageReport;
import org.apache.hadoop.security.token.Token;
import org.apache.hadoop.conf.Configuration;
import java.io.DataOutputStream;
import java.io.IOException;
import java.io.OutputStream;
import java.net.URI;
import java.util.ArrayList;
import java.util.EnumSet;
import java.util.List;
public class CompatibilityHelper27 extends CompatibilityHelper2 {
@Override
public String[] getStorageTypes(LocatedBlock lb) {
List<String> types = new ArrayList<>();
for(StorageType type : lb.getStorageTypes()) {
types.add(type.toString());
}
return types.toArray(new String[types.size()]);
}
@Override
public void replaceBlock(
DataOutputStream out,
ExtendedBlock eb,
String storageType,
Token<BlockTokenIdentifier> accessToken,
String dnUUID,
DatanodeInfo info)
throws IOException {
new Sender(out).replaceBlock(eb, StorageType.valueOf(storageType), accessToken, dnUUID, info);
}
@Override
public String[] getMovableTypes() {
List<String> types = new ArrayList<>();
for(StorageType type : StorageType.getMovableTypes()) {
types.add(type.toString());
}
return types.toArray(new String[types.size()]);
}
@Override
public String getStorageType(StorageReport report) {
return report.getStorage().getStorageType().toString();
}
@Override
public List<String> chooseStorageTypes(BlockStoragePolicy policy, short replication) {
List<String> types = new ArrayList<>();
for(StorageType type : policy.chooseStorageTypes(replication)) {
types.add(type.toString());
}
return types;
}
@Override
public boolean isMovable(String type) {
return StorageType.valueOf(type).isMovable();
}
@Override
public DatanodeInfo newDatanodeInfo(String ipAddress, int xferPort) {
return new DatanodeInfo(
ipAddress,
null,
null,
xferPort,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
null,
null);
}
@Override
public InotifyProtos.AppendEventProto getAppendEventProto(Event.AppendEvent event) {
return InotifyProtos.AppendEventProto.newBuilder()
.setPath(event.getPath())
.setNewBlock(event.toNewBlock()).build();
}
@Override
public Event.AppendEvent getAppendEvent(InotifyProtos.AppendEventProto proto) {
return new Event.AppendEvent.Builder().path(proto.getPath())
.newBlock(proto.hasNewBlock() && proto.getNewBlock())
.build();
}
@Override
public boolean truncate(DFSClient client, String src, long newLength) throws IOException {
return client.truncate(src, newLength);
}
@Override
public boolean truncate(DistributedFileSystem fileSystem, String src, long newLength) throws IOException {
return fileSystem.truncate(new Path(src), newLength);
}
@Override
public int getSidInDatanodeStorageReport(DatanodeStorage datanodeStorage) {
StorageType storageType = datanodeStorage.getStorageType();
return storageType.ordinal();
}
@Override
public OutputStream getDFSClientAppend(DFSClient client, String dest,
int buffersize, long offset) throws IOException {
if (client.exists(dest) && offset != 0) {
return getDFSClientAppend(client, dest, buffersize);
}
return client.create(dest, true);
}
@Override
public OutputStream getDFSClientAppend(
DFSClient client, String dest, int buffersize) throws IOException {
return client
.append(dest, buffersize,
EnumSet.of(CreateFlag.APPEND), null, null);
}
@Override
public OutputStream getS3outputStream(String dest, Configuration conf) throws IOException {
// Copy to remote S3
if (!dest.startsWith("s3")) {
throw new IOException();
}
// Copy to s3
org.apache.hadoop.fs.FileSystem fs = org.apache.hadoop.fs.FileSystem.get(URI.create(dest), conf);
return fs.create(new Path(dest), true);
}
}
| java | Apache-2.0 | e0c90f054687a18c4e095547ac5e31b8b313b3ef | 2026-01-05T02:41:11.405497Z | false |
Intel-bigdata/SSM | https://github.com/Intel-bigdata/SSM/blob/e0c90f054687a18c4e095547ac5e31b8b313b3ef/smart-hadoop-support/smart-hadoop-client-3.1/src/test/java/org/smartdata/hdfs/TestSmartDFSClientReadECData.java | smart-hadoop-support/smart-hadoop-client-3.1/src/test/java/org/smartdata/hdfs/TestSmartDFSClientReadECData.java | /**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.smartdata.hdfs;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hdfs.DFSInputStream;
import org.apache.hadoop.hdfs.DFSStripedInputStream;
import org.junit.Assert;
import org.junit.Test;
import org.smartdata.conf.SmartConf;
import org.smartdata.conf.SmartConfKeys;
import org.smartdata.hdfs.action.ErasureCodingAction;
import org.smartdata.hdfs.action.ErasureCodingBase;
import org.smartdata.hdfs.action.HdfsAction;
import org.smartdata.hdfs.action.TestErasureCodingMiniCluster;
import org.smartdata.hdfs.client.SmartDFSClient;
import java.io.IOException;
import java.util.HashMap;
import java.util.Map;
import static org.junit.Assert.assertTrue;
public class TestSmartDFSClientReadECData extends TestErasureCodingMiniCluster {
public static final String TEST_DIR = "/ec";
@Test
public void testReadECDataCreatedByHDFS() throws IOException {
cluster.getFileSystem().mkdirs(new Path(TEST_DIR));
// Set an EC policy for this test dir, so the file created under it will
// be stored by this EC policy.
dfsClient.setErasureCodingPolicy(TEST_DIR, ecPolicy.getName());
String srcPath = "/ec/a.txt";
createTestFile(srcPath, 300000);
Assert.assertTrue(ecPolicy == dfsClient.getErasureCodingPolicy(srcPath));
SmartConf smartConf = smartContext.getConf();
// The below single configuration is in order to make sure a SmartDFSClient can be created
// successfully, and the actual value for this property does't matter.
smartConf.set(SmartConfKeys.SMART_SERVER_RPC_ADDRESS_KEY,
SmartConfKeys.SMART_SERVER_RPC_ADDRESS_DEFAULT);
SmartDFSClient smartDFSClient = new SmartDFSClient(smartConf);
DFSInputStream dfsInputStream = smartDFSClient.open(srcPath);
// In unit test, a DFSInputStream can still be used to read EC data. But in real environment,
// DFSStripedInputStream is required, otherwise, block not found exception will occur.
Assert.assertTrue(dfsInputStream instanceof DFSStripedInputStream);
int bufferSize = 64 * 1024;
byte[] buffer = new byte[bufferSize];
// Read EC data from HDFS
while (dfsInputStream.read(buffer, 0, bufferSize) != -1) {
}
dfsInputStream.close();
}
@Test
public void testReadECDataCreatedBySSM() throws IOException {
cluster.getFileSystem().mkdirs(new Path(TEST_DIR));
String srcPath = "/ec/a.txt";
createTestFile(srcPath, 300000);
SmartConf smartConf = smartContext.getConf();
// The below single configuration is in order to make sure a SmartDFSClient can be created
// successfully, and the actual value for this property does't matter.
smartConf.set(SmartConfKeys.SMART_SERVER_RPC_ADDRESS_KEY,
SmartConfKeys.SMART_SERVER_RPC_ADDRESS_DEFAULT);
SmartDFSClient smartDFSClient = new SmartDFSClient(smartConf);
ErasureCodingAction ecAction = new ErasureCodingAction();
ecAction.setContext(smartContext);
String ecTmpPath = "/ssm/ec_tmp/tmp_file";
Map<String, String> args = new HashMap<>();
args.put(HdfsAction.FILE_PATH, srcPath);
args.put(ErasureCodingBase.EC_TMP, ecTmpPath);
args.put(ErasureCodingAction.EC_POLICY_NAME, ecPolicy.getName());
ecAction.init(args);
ecAction.run();
assertTrue(ecAction.getExpectedAfterRun());
Assert.assertTrue(ecPolicy == dfsClient.getErasureCodingPolicy(srcPath));
DFSInputStream dfsInputStream = smartDFSClient.open(srcPath);
// In unit test, a DFSInputStream can still be used to read EC data. But in real environment,
// DFSStripedInputStream is required, otherwise, block not found exception will occur.
Assert.assertTrue(dfsInputStream instanceof DFSStripedInputStream);
int bufferSize = 64 * 1024;
byte[] buffer = new byte[bufferSize];
// Read EC data from HDFS
while (dfsInputStream.read(buffer, 0, bufferSize) != -1) {
}
dfsInputStream.close();
}
}
| java | Apache-2.0 | e0c90f054687a18c4e095547ac5e31b8b313b3ef | 2026-01-05T02:41:11.405497Z | false |
Intel-bigdata/SSM | https://github.com/Intel-bigdata/SSM/blob/e0c90f054687a18c4e095547ac5e31b8b313b3ef/smart-hadoop-support/smart-hadoop-client-3.1/src/main/java/org/smartdata/hdfs/client/SmartDFSClient.java | smart-hadoop-support/smart-hadoop-client-3.1/src/main/java/org/smartdata/hdfs/client/SmartDFSClient.java | /**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.smartdata.hdfs.client;
import org.apache.commons.lang.SerializationUtils;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.BlockLocation;
import org.apache.hadoop.fs.CreateFlag;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.MD5MD5CRC32FileChecksum;
import org.apache.hadoop.fs.permission.AclEntry;
import org.apache.hadoop.fs.permission.FsAction;
import org.apache.hadoop.fs.permission.FsPermission;
import org.apache.hadoop.hdfs.DFSClient;
import org.apache.hadoop.hdfs.DFSInputStream;
import org.apache.hadoop.hdfs.SmartInputStreamFactory;
import org.apache.hadoop.hdfs.client.HdfsDataOutputStream;
import org.apache.hadoop.hdfs.protocol.CorruptFileBlocks;
import org.apache.hadoop.hdfs.protocol.HdfsFileStatus;
import org.apache.hadoop.hdfs.protocol.HdfsPathHandle;
import org.apache.hadoop.hdfs.protocol.LocatedBlock;
import org.apache.hadoop.hdfs.protocol.LocatedBlocks;
import org.apache.hadoop.ipc.RemoteException;
import org.apache.hadoop.security.UserGroupInformation;
import org.apache.hadoop.util.Progressable;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.smartdata.SmartConstants;
import org.smartdata.client.SmartClient;
import org.smartdata.hdfs.CompatibilityHelperLoader;
import org.smartdata.metrics.FileAccessEvent;
import org.smartdata.model.CompactFileState;
import org.smartdata.model.CompressionFileState;
import org.smartdata.model.FileState;
import org.smartdata.model.NormalFileState;
import java.io.ByteArrayInputStream;
import java.io.DataInputStream;
import java.io.File;
import java.io.IOException;
import java.net.InetSocketAddress;
import java.net.URI;
import java.util.ArrayList;
import java.util.EnumSet;
import java.util.List;
public class SmartDFSClient extends DFSClient {
private static final Logger LOG = LoggerFactory.getLogger(SmartDFSClient.class);
private static final String CALLER_CLASS = "org.apache.hadoop.hdfs.DFSClient";
private SmartClient smartClient = null;
private boolean healthy = false;
public SmartDFSClient(InetSocketAddress nameNodeAddress, Configuration conf,
InetSocketAddress smartServerAddress) throws IOException {
super(nameNodeAddress, conf);
if (isSmartClientDisabled()) {
return;
}
try {
smartClient = new SmartClient(conf, smartServerAddress);
healthy = true;
} catch (IOException e) {
super.close();
throw e;
}
}
public SmartDFSClient(final URI nameNodeUri, final Configuration conf,
final InetSocketAddress smartServerAddress) throws IOException {
super(nameNodeUri, conf);
if (isSmartClientDisabled()) {
return;
}
try {
smartClient = new SmartClient(conf, smartServerAddress);
healthy = true;
} catch (IOException e) {
super.close();
throw e;
}
}
public SmartDFSClient(URI nameNodeUri, Configuration conf,
FileSystem.Statistics stats, InetSocketAddress smartServerAddress)
throws IOException {
super(nameNodeUri, conf, stats);
if (isSmartClientDisabled()) {
return;
}
try {
smartClient = new SmartClient(conf, smartServerAddress);
healthy = true;
} catch (IOException e) {
super.close();
throw e;
}
}
public SmartDFSClient(Configuration conf,
InetSocketAddress[] smartServerAddress) throws IOException {
super(conf);
if (isSmartClientDisabled()) {
return;
}
try {
smartClient = new SmartClient(conf, smartServerAddress);
healthy = true;
} catch (IOException e) {
super.close();
throw e;
}
}
public SmartDFSClient(Configuration conf) throws IOException {
super(conf);
if (isSmartClientDisabled()) {
return;
}
try {
smartClient = new SmartClient(conf);
healthy = true;
} catch (IOException e) {
super.close();
throw e;
}
}
@Override
public DFSInputStream open(String src) throws IOException {
return open(src, 4096, true);
}
/**
* Functionality: create an InputStream and report access event to SSM server.
*
* DFSClient is firstly used to get an Inputstream to get file length. The real
* InputStream returned is obtained from SmartInputStreamFactory which has some
* considerations about compression, compact, S3 etc.
*
* It is supported that DFSStripedInputstream can be obtained for reading normal
* EC data, but it is NOT supported to combine EC with SSM compact, SSM compression
* etc. Also, there may lack of consideration on ErasureCoding in some places where
* DFSInputStream is used instead of DFSStripedInputStream, which can lead to block
* not found exception.
*/
@Override
public DFSInputStream open(String src, int buffersize,
boolean verifyChecksum) throws IOException {
DFSInputStream is;
FileState fileState = getFileState(src);
if (fileState.getFileStage().equals(FileState.FileStage.PROCESSING)) {
throw new IOException("Cannot open " + src + " when it is under PROCESSING to "
+ fileState.getFileType());
}
is = SmartInputStreamFactory.create(this, src,
verifyChecksum, fileState);
// Report access event to smart server.
reportFileAccessEvent(src);
return is;
}
@Deprecated
@Override
public DFSInputStream open(String src, int buffersize,
boolean verifyChecksum, FileSystem.Statistics stats)
throws IOException {
return open(src, buffersize, verifyChecksum);
}
@Override
public DFSInputStream open(HdfsPathHandle fd, int buffersize, boolean verifyChecksum) throws IOException {
String src = fd.getPath();
DFSInputStream is = super.open(fd, buffersize, verifyChecksum);
if (is.getFileLength() == 0) {
is.close();
FileState fileState = getFileState(src);
if (fileState.getFileStage().equals(FileState.FileStage.PROCESSING)) {
throw new IOException("Cannot open " + src + " when it is under PROCESSING to "
+ fileState.getFileType());
}
is = SmartInputStreamFactory.create(this, src,
verifyChecksum, fileState);
}
reportFileAccessEvent(src);
return is;
}
@Override
public boolean truncate(String src, long newLength) throws IOException {
FileState fileState = getFileState(src);
if (fileState instanceof CompressionFileState) {
throw new IOException(getExceptionMsg("Append", "Compressed File"));
}
return super.truncate(src, newLength);
}
@Override
public HdfsDataOutputStream append(final String src, final int buffersize,
EnumSet<CreateFlag> flag, final Progressable progress,
final FileSystem.Statistics statistics) throws IOException {
HdfsDataOutputStream out = super.append(src, buffersize, flag, progress, statistics);
if (out.getPos() == 0) {
FileState fileState = getFileState(src);
if (fileState instanceof CompactFileState) {
out.close();
throw new IOException(getExceptionMsg("Append", "SSM Small File"));
}
} else {
FileState fileState = getFileState(src);
if (fileState instanceof CompressionFileState) {
out.close();
throw new IOException(getExceptionMsg("Append", "Compressed File"));
}
}
return out;
}
@Override
public HdfsDataOutputStream append(final String src, final int buffersize,
EnumSet<CreateFlag> flag, final Progressable progress,
final FileSystem.Statistics statistics,
final InetSocketAddress[] favoredNodes) throws IOException {
HdfsDataOutputStream out = super.append(
src, buffersize, flag, progress, statistics, favoredNodes);
if (out.getPos() == 0) {
FileState fileState = getFileState(src);
if (fileState instanceof CompactFileState) {
out.close();
throw new IOException(getExceptionMsg("Append", "SSM Small File"));
}
} else {
FileState fileState = getFileState(src);
if (fileState instanceof CompressionFileState) {
out.close();
throw new IOException(getExceptionMsg("Append", "Compressed File"));
}
}
return out;
}
@Override
public HdfsFileStatus getFileInfo(String src) throws IOException {
HdfsFileStatus oldStatus = super.getFileInfo(src);
if (oldStatus == null) return null;
if (oldStatus.getLen() == 0) {
FileState fileState = getFileState(src);
if (fileState instanceof CompactFileState) {
long len = ((CompactFileState) fileState).getFileContainerInfo().getLength();
return CompatibilityHelperLoader.getHelper().createHdfsFileStatus(len, oldStatus.isDir(), oldStatus.getReplication(),
oldStatus.getBlockSize(), oldStatus.getModificationTime(), oldStatus.getAccessTime(),
oldStatus.getPermission(), oldStatus.getOwner(), oldStatus.getGroup(),
oldStatus.isSymlink() ? oldStatus.getSymlinkInBytes() : null,
oldStatus.isEmptyLocalName() ? new byte[0] : oldStatus.getLocalNameInBytes(),
oldStatus.getFileId(), oldStatus.getChildrenNum(),
oldStatus.getFileEncryptionInfo(), oldStatus.getStoragePolicy());
}
} else {
FileState fileState = getFileState(src);
if (fileState instanceof CompressionFileState) {
// To make SmartDFSClient return the original length of compressed file.
long len = ((CompressionFileState) fileState).getOriginalLength();
return CompatibilityHelperLoader.getHelper().createHdfsFileStatus(len, oldStatus.isDir(), oldStatus.getReplication(),
oldStatus.getBlockSize(), oldStatus.getModificationTime(), oldStatus.getAccessTime(),
oldStatus.getPermission(), oldStatus.getOwner(), oldStatus.getGroup(),
oldStatus.isSymlink() ? oldStatus.getSymlinkInBytes() : null,
oldStatus.isEmptyLocalName() ? new byte[0] : oldStatus.getLocalNameInBytes(),
oldStatus.getFileId(), oldStatus.getChildrenNum(),
oldStatus.getFileEncryptionInfo(), oldStatus.getStoragePolicy());
}
}
return oldStatus;
}
@Override
public LocatedBlocks getLocatedBlocks(String src, long start)
throws IOException {
LocatedBlocks locatedBlocks = super.getLocatedBlocks(src, start);
if (!CALLER_CLASS.equals(Thread.currentThread().getStackTrace()[2].getClassName())
&& locatedBlocks.getFileLength() == 0) {
FileState fileState = getFileState(src);
if (fileState instanceof CompactFileState) {
String containerFile = ((CompactFileState) fileState)
.getFileContainerInfo().getContainerFilePath();
long offset = ((CompactFileState) fileState).getFileContainerInfo().getOffset();
return super.getLocatedBlocks(containerFile, offset + start);
}
}
return locatedBlocks;
}
@Override
public BlockLocation[] getBlockLocations(String src, long start,
long length) throws IOException {
BlockLocation[] blockLocations = super.getBlockLocations(src, start, length);
if (blockLocations.length == 0) {
FileState fileState = getFileState(src);
if (fileState instanceof CompactFileState) {
String containerFile = ((CompactFileState) fileState)
.getFileContainerInfo().getContainerFilePath();
long offset = ((CompactFileState) fileState).getFileContainerInfo().getOffset();
blockLocations = super.getBlockLocations(containerFile, offset + start, length);
for (BlockLocation blockLocation : blockLocations) {
blockLocation.setOffset(blockLocation.getOffset() - offset);
}
return blockLocations;
}
} else {
FileState fileState = getFileState(src);
if (fileState instanceof CompressionFileState) {
CompressionFileState compressionInfo = (CompressionFileState) fileState;
Long[] originalPos =
compressionInfo.getOriginalPos().clone();
Long[] compressedPos =
compressionInfo.getCompressedPos().clone();
int startIndex = compressionInfo.getPosIndexByOriginalOffset(start);
int endIndex =
compressionInfo.getPosIndexByOriginalOffset(start + length - 1);
long compressedStart = compressedPos[startIndex];
long compressedLength = 0;
if (endIndex < compressedPos.length - 1) {
compressedLength = compressedPos[endIndex + 1] - compressedStart;
} else {
compressedLength =
compressionInfo.getCompressedLength() - compressedStart;
}
LocatedBlocks originalLocatedBlocks =
super.getLocatedBlocks(src, compressedStart, compressedLength);
List<LocatedBlock> blocks = new ArrayList<>();
for (LocatedBlock block : originalLocatedBlocks.getLocatedBlocks()) {
// TODO handle CDH2.6 storage type
// blocks.add(new LocatedBlock(
// block.getBlock(),
// block.getLocations(),
// block.getStorageIDs(),
// block.getStorageTypes(),
// compressionInfo
// .getPosIndexByCompressedOffset(block.getStartOffset()),
// block.isCorrupt(),
// block.getCachedLocations()
// ));
blocks.add(new LocatedBlock(
block.getBlock(),
block.getLocations(),
block.getStorageIDs(),
block.getStorageTypes(),
compressionInfo
.getPosIndexByCompressedOffset(block.getStartOffset()),
block.isCorrupt(),
block.getCachedLocations()
));
}
LocatedBlock lastLocatedBlock =
originalLocatedBlocks.getLastLocatedBlock();
long fileLength = compressionInfo.getOriginalLength();
return new LocatedBlocks(fileLength,
originalLocatedBlocks.isUnderConstruction(),
blocks,
lastLocatedBlock,
originalLocatedBlocks.isLastBlockComplete(),
originalLocatedBlocks.getFileEncryptionInfo(),
originalLocatedBlocks.getErasureCodingPolicy())
.getLocatedBlocks().toArray(new BlockLocation[0]);
}
}
return blockLocations;
}
@Override
public boolean setReplication(String src, short replication)
throws IOException {
FileState fileState = getFileState(src);
if (fileState instanceof CompactFileState) {
throw new IOException(getExceptionMsg("Set replication", "SSM Small File"));
} else {
return super.setReplication(src, replication);
}
}
@Override
public void setStoragePolicy(String src, String policyName)
throws IOException {
FileState fileState = getFileState(src);
if (fileState instanceof CompactFileState) {
throw new IOException(getExceptionMsg("Set storage policy", "SSM Small File"));
} else {
super.setStoragePolicy(src, policyName);
}
}
@Override
public long getBlockSize(String f) throws IOException {
long blockSize = super.getBlockSize(f);
FileState fileState = getFileState(f);
if (fileState instanceof CompactFileState) {
blockSize = super.getBlockSize(((CompactFileState) fileState)
.getFileContainerInfo().getContainerFilePath());
}
return blockSize;
}
@Override
public void concat(String trg, String [] srcs) throws IOException {
try {
super.concat(trg, srcs);
} catch (IOException e) {
for (String src : srcs) {
FileState fileState = getFileState(src);
if (fileState instanceof CompactFileState) {
throw new IOException(getExceptionMsg("Concat", "SSM Small File"));
} else if (fileState instanceof CompressionFileState) {
throw new IOException(getExceptionMsg("Concat", "Compressed File"));
}
}
throw e;
}
}
@Override
public HdfsFileStatus getFileLinkInfo(String src) throws IOException {
HdfsFileStatus fileStatus = super.getFileLinkInfo(src);
if (fileStatus.getLen() == 0) {
String target = super.getLinkTarget(src);
FileState fileState = getFileState(target);
if (fileState instanceof CompactFileState) {
fileStatus = getFileInfo(target);
}
}
return fileStatus;
}
@Override
public MD5MD5CRC32FileChecksum getFileChecksum(String src, long length)
throws IOException {
MD5MD5CRC32FileChecksum ret = super.getFileChecksum(src, length);
if (ret.getChecksumOpt().getBytesPerChecksum() == 0) {
FileState fileState = getFileState(src);
if (fileState instanceof CompactFileState) {
try {
// Get original checksum for small file.
byte[] bytes = getXAttr(src, SmartConstants.SMART_FILE_CHECKSUM_XATTR_NAME);
ret = new MD5MD5CRC32FileChecksum();
ret.readFields(new DataInputStream(new ByteArrayInputStream(bytes)));
} catch (IOException e) {
throw new IOException("Failed to get checksum for SSM Small File: "
+ e.getMessage());
}
}
}
return ret;
}
@Override
public void setPermission(String src, FsPermission permission)
throws IOException {
FileState fileState = getFileState(src);
if (fileState instanceof CompactFileState) {
throw new IOException(getExceptionMsg("Set permission", "SSM Small File"));
} else {
super.setPermission(src, permission);
}
}
@Override
public void setOwner(String src, String username, String groupname)
throws IOException {
FileState fileState = getFileState(src);
if (fileState instanceof CompactFileState) {
throw new IOException(getExceptionMsg("Set owner", "SSM Small File"));
} else {
super.setOwner(src, username, groupname);
}
}
@Override
public CorruptFileBlocks listCorruptFileBlocks(String path, String cookie)
throws IOException {
CorruptFileBlocks corruptFileBlocks = super.listCorruptFileBlocks(path, cookie);
FileState fileState = getFileState(path);
if (fileState instanceof CompactFileState) {
corruptFileBlocks = super.listCorruptFileBlocks(((CompactFileState) fileState)
.getFileContainerInfo().getContainerFilePath(), cookie);
}
return corruptFileBlocks;
}
@Override
public void modifyAclEntries(String src, List<AclEntry> aclSpec)
throws IOException {
FileState fileState = getFileState(src);
if (fileState instanceof CompactFileState) {
throw new IOException(getExceptionMsg("Modify acl entries", "SSM Small File"));
} else {
super.modifyAclEntries(src, aclSpec);
}
}
@Override
public void removeAclEntries(String src, List<AclEntry> aclSpec)
throws IOException {
FileState fileState = getFileState(src);
if (fileState instanceof CompactFileState) {
throw new IOException(getExceptionMsg("Remove acl entries", "SSM Small File"));
} else {
super.removeAclEntries(src, aclSpec);
}
}
@Override
public void removeDefaultAcl(String src) throws IOException {
FileState fileState = getFileState(src);
if (fileState instanceof CompactFileState) {
throw new IOException(getExceptionMsg("Remove default acl", "SSM Small File"));
} else {
super.removeDefaultAcl(src);
}
}
@Override
public void removeAcl(String src) throws IOException {
FileState fileState = getFileState(src);
if (fileState instanceof CompactFileState) {
throw new IOException(getExceptionMsg("Remove acl", "SSM Small File"));
} else {
super.removeAcl(src);
}
}
@Override
public void setAcl(String src, List<AclEntry> aclSpec) throws IOException {
FileState fileState = getFileState(src);
if (fileState instanceof CompactFileState) {
throw new IOException(getExceptionMsg("Set acl", "SSM Small File"));
} else {
super.setAcl(src, aclSpec);
}
}
@Override
public void createEncryptionZone(String src, String keyName)
throws IOException {
FileState fileState = getFileState(src);
if (fileState instanceof CompactFileState) {
throw new IOException(getExceptionMsg("Create encryption zone", "SSM Small File"));
} else {
super.createEncryptionZone(src, keyName);
}
}
@Override
public void checkAccess(String src, FsAction mode) throws IOException {
FileState fileState = getFileState(src);
if (fileState instanceof CompactFileState) {
super.checkAccess(((CompactFileState) fileState)
.getFileContainerInfo().getContainerFilePath(), mode);
} else {
super.checkAccess(src, mode);
}
}
@Override
public boolean isFileClosed(String src) throws IOException {
boolean isFileClosed = super.isFileClosed(src);
if (!isFileClosed) {
FileState fileState = getFileState(src);
if (fileState instanceof CompactFileState) {
String containerFile = ((CompactFileState) fileState)
.getFileContainerInfo().getContainerFilePath();
isFileClosed = super.isFileClosed(containerFile);
}
}
return isFileClosed;
}
@Override
public synchronized void close() throws IOException {
try {
super.close();
} finally {
try {
if (smartClient != null) {
smartClient.close();
}
} finally {
healthy = false;
}
}
}
/**
* Report file access event to SSM server.
*/
private void reportFileAccessEvent(String src) {
try {
if (!healthy) {
return;
}
String userName;
try {
userName = UserGroupInformation.getCurrentUser().getUserName();
} catch (IOException e) {
userName = "Unknown";
}
smartClient.reportFileAccessEvent(new FileAccessEvent(src, userName));
} catch (IOException e) {
// Here just ignores that failed to report
LOG.error("Cannot report file access event to SmartServer: " + src
+ " , for: " + e.getMessage()
+ " , report mechanism will be disabled now in this instance.");
healthy = false;
}
}
/**
* Check if the smart client is disabled.
*/
private boolean isSmartClientDisabled() {
File idFile = new File(SmartConstants.SMART_CLIENT_DISABLED_ID_FILE);
return idFile.exists();
}
/**
* Get the exception message of unsupported operation.
*
* @param operation the hdfs operation name
* @param fileType the type of SSM specify file
* @return the message of unsupported exception
*/
public String getExceptionMsg(String operation, String fileType) {
return String.format("%s is not supported for %s.", operation, fileType);
}
/**
* Get file state of the specified file.
*
* @param filePath the path of source file
* @return file state of source file
* @throws IOException e
*/
public FileState getFileState(String filePath) throws IOException {
try {
byte[] fileState = getXAttr(filePath, SmartConstants.SMART_FILE_STATE_XATTR_NAME);
if (fileState != null) {
return (FileState) SerializationUtils.deserialize(fileState);
}
} catch (RemoteException e) {
return new NormalFileState(filePath);
}
return new NormalFileState(filePath);
}
}
| java | Apache-2.0 | e0c90f054687a18c4e095547ac5e31b8b313b3ef | 2026-01-05T02:41:11.405497Z | false |
Intel-bigdata/SSM | https://github.com/Intel-bigdata/SSM/blob/e0c90f054687a18c4e095547ac5e31b8b313b3ef/smart-hadoop-support/smart-hadoop-client-3.1/src/main/java/org/smartdata/hadoop/filesystem/SmartFileSystem.java | smart-hadoop-support/smart-hadoop-client-3.1/src/main/java/org/smartdata/hadoop/filesystem/SmartFileSystem.java | /**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.smartdata.hadoop.filesystem;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.BlockLocation;
import org.apache.hadoop.fs.CreateFlag;
import org.apache.hadoop.fs.FSDataInputStream;
import org.apache.hadoop.fs.FileChecksum;
import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.FileSystemLinkResolver;
import org.apache.hadoop.fs.FSDataOutputStream;
import org.apache.hadoop.fs.LocatedFileStatus;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.PathFilter;
import org.apache.hadoop.fs.RemoteIterator;
import org.apache.hadoop.fs.UnresolvedLinkException;
import org.apache.hadoop.fs.UnsupportedFileSystemException;
import org.apache.hadoop.fs.permission.AclEntry;
import org.apache.hadoop.fs.permission.FsPermission;
import org.apache.hadoop.hdfs.DFSInputStream;
import org.apache.hadoop.hdfs.DFSUtil;
import org.apache.hadoop.hdfs.DistributedFileSystem;
import org.apache.hadoop.hdfs.protocol.DirectoryListing;
import org.apache.hadoop.hdfs.protocol.HdfsFileStatus;
import org.apache.hadoop.hdfs.protocol.HdfsLocatedFileStatus;
import org.apache.hadoop.hdfs.protocol.HdfsNamedFileStatus;
import org.apache.hadoop.util.Progressable;
import org.smartdata.hdfs.client.SmartDFSClient;
import org.smartdata.model.CompactFileState;
import org.smartdata.model.CompressionFileState;
import org.smartdata.model.CompressionTrunk;
import org.smartdata.model.FileContainerInfo;
import org.smartdata.model.FileState;
import java.io.FileNotFoundException;
import java.io.IOException;
import java.net.InetSocketAddress;
import java.net.URI;
import java.util.ArrayList;
import java.util.EnumSet;
import java.util.List;
import java.util.Set;
/**
* SmartFileSystem Deploy Guide
* 1. Build SSM, get all jar files start with name Smart*
* 2. Copy these jar files to HDFS classpath
* 3. Reconfigure HDFS
* Please do the following configurations,
* 1) core-site.xml
* Change property "fs.hdfs.impl" value, to point "Smart File System",
* provided by SSM.
* <property>
* <name>fs.hdfs.impl</name>
* <value>org.smartdata.hadoop.filesystem.SmartFileSystem</value>
* <description>The FileSystem for hdfs URL</description>
* </property>
* 2) hdfs-site.xml
* Add property "smart.server.rpc.address" to point to Smart Server.
* If SSM HA mode is enabled, more than one Smart Server address can
* be specified with comma delimited.
* <property>
* <name>smart.server.rpc.address</name>
* <value>127.0.0.1:7042</value>
* </property>
*
* 4. Restart HDFS
*/
public class SmartFileSystem extends DistributedFileSystem {
private SmartDFSClient smartDFSClient;
private boolean verifyChecksum = true;
@Override
public void initialize(URI uri, Configuration conf) throws IOException {
super.initialize(uri, conf);
this.smartDFSClient = new SmartDFSClient(conf);
}
@Override
public FSDataInputStream open(Path path, final int bufferSize)
throws IOException {
statistics.incrementReadOps(1);
Path absF = fixRelativePart(path);
final DFSInputStream in = smartDFSClient.open(
absF.toUri().getPath(), bufferSize, verifyChecksum);
return smartDFSClient.createWrappedInputStream(in);
}
@Override
public void setVerifyChecksum(boolean verifyChecksum) {
this.verifyChecksum = verifyChecksum;
}
@Override
public FSDataOutputStream append(Path f, final int bufferSize,
final Progressable progress) throws IOException {
return append(f, EnumSet.of(CreateFlag.APPEND), bufferSize, progress);
}
@Override
public FSDataOutputStream append(Path f, final EnumSet<CreateFlag> flag,
final int bufferSize, final Progressable progress)
throws IOException {
FSDataOutputStream out = super.append(f, flag, bufferSize, progress);
if (out.getPos() == 0) {
FileState fileState = smartDFSClient.getFileState(getPathName(f));
if (fileState instanceof CompactFileState) {
throw new IOException(
smartDFSClient.getExceptionMsg("Append", "SSM Small File"));
}
} else {
FileState fileState = smartDFSClient.getFileState(getPathName(f));
if (fileState instanceof CompressionFileState) {
throw new IOException(
smartDFSClient.getExceptionMsg("Append", "Compressed File"));
}
}
return out;
}
@Override
public FSDataOutputStream append(Path f, final EnumSet<CreateFlag> flag,
final int bufferSize, final Progressable progress,
final InetSocketAddress[] favoredNodes)
throws IOException {
FSDataOutputStream out = super.append(f, flag, bufferSize, progress, favoredNodes);
if (out.getPos() == 0) {
FileState fileState = smartDFSClient.getFileState(getPathName(f));
if (fileState instanceof CompactFileState) {
throw new IOException(
smartDFSClient.getExceptionMsg("Append", "SSM Small File"));
}
} else {
FileState fileState = smartDFSClient.getFileState(getPathName(f));
if (fileState instanceof CompressionFileState) {
throw new IOException(
smartDFSClient.getExceptionMsg("Append", "Compressed File"));
}
}
return out;
}
@Override
public FileStatus getFileStatus(Path f) throws IOException {
FileStatus oldStatus = super.getFileStatus(f);
if (oldStatus == null) return null;
if (oldStatus.getLen() == 0) {
FileState fileState = smartDFSClient.getFileState(getPathName(f));
if (fileState instanceof CompactFileState) {
long len = ((CompactFileState) fileState).getFileContainerInfo().getLength();
return new FileStatus(len, oldStatus.isDirectory(), oldStatus.getReplication(),
oldStatus.getBlockSize(), oldStatus.getModificationTime(),
oldStatus.getAccessTime(), oldStatus.getPermission(),
oldStatus.getOwner(), oldStatus.getGroup(),
oldStatus.isSymlink() ? oldStatus.getSymlink() : null, oldStatus.getPath());
}
} else {
FileState fileState = smartDFSClient.getFileState(getPathName(f));
if (fileState instanceof CompressionFileState) {
long len = ((CompressionFileState) fileState).getOriginalLength();
return new FileStatus(len, oldStatus.isDirectory(), oldStatus.getReplication(),
oldStatus.getBlockSize(), oldStatus.getModificationTime(),
oldStatus.getAccessTime(), oldStatus.getPermission(),
oldStatus.getOwner(), oldStatus.getGroup(),
oldStatus.isSymlink() ? oldStatus.getSymlink() : null, oldStatus.getPath());
}
}
return oldStatus;
}
@Override
public FileStatus[] listStatus(Path p) throws IOException {
FileStatus[] oldStatus = super.listStatus(p);
ArrayList<FileStatus> newStatus = new ArrayList<>(oldStatus.length);
for (FileStatus status : oldStatus) {
if (oldStatus == null) {
newStatus.add(null);
continue;
}
if (status.getLen() == 0) {
FileState fileState = smartDFSClient.getFileState(getPathName(status.getPath()));
if (fileState instanceof CompactFileState) {
long len = ((CompactFileState) fileState).getFileContainerInfo().getLength();
newStatus.add(new FileStatus(len, status.isDirectory(), status.getReplication(),
status.getBlockSize(), status.getModificationTime(), status.getAccessTime(),
status.getPermission(), status.getOwner(), status.getGroup(),
status.isSymlink() ? status.getSymlink() : null, status.getPath()));
} else {
newStatus.add(status);
}
} else {
FileState fileState = smartDFSClient.getFileState(getPathName(status.getPath()));
if (fileState instanceof CompressionFileState) {
long len = ((CompressionFileState) fileState).getOriginalLength();
newStatus.add(new FileStatus(len, status.isDirectory(), status.getReplication(),
status.getBlockSize(), status.getModificationTime(), status.getAccessTime(),
status.getPermission(), status.getOwner(), status.getGroup(),
status.isSymlink() ? status.getSymlink() : null, status.getPath()));
} else {
newStatus.add(status);
}
}
}
return newStatus.toArray(new FileStatus[oldStatus.length]);
}
@Override
public BlockLocation[] getFileBlockLocations(Path p, final long start,
final long len) throws IOException {
BlockLocation[] blockLocations = super.getFileBlockLocations(
p, start, len);
if (blockLocations.length == 0) {
FileState fileState = smartDFSClient.getFileState(getPathName(p));
if (fileState instanceof CompactFileState) {
FileContainerInfo fileContainerInfo = ((CompactFileState) fileState).getFileContainerInfo();
String containerFile = fileContainerInfo.getContainerFilePath();
long offset = fileContainerInfo.getOffset();
blockLocations = super.getFileBlockLocations(
new Path(containerFile), offset + start, len);
for (BlockLocation blockLocation : blockLocations) {
blockLocation.setOffset(blockLocation.getOffset() - offset);
}
}
}
return blockLocations;
}
@Override
public boolean setReplication(Path src,
final short replication) throws IOException {
FileState fileState = smartDFSClient.getFileState(getPathName(src));
if (fileState instanceof CompactFileState) {
throw new IOException(
smartDFSClient.getExceptionMsg("Set replication", "SSM Small File"));
} else {
return super.setReplication(src, replication);
}
}
@Override
public void setStoragePolicy(final Path src, final String policyName)
throws IOException {
FileState fileState = smartDFSClient.getFileState(getPathName(src));
if (fileState instanceof CompactFileState) {
throw new IOException(
smartDFSClient.getExceptionMsg("Set storage policy", "SSM Small File"));
} else {
super.setStoragePolicy(src, policyName);
}
}
@Override
public void concat(Path trg, Path [] psrcs) throws IOException {
try {
super.concat(trg, psrcs);
} catch (IOException e) {
for (Path src : psrcs) {
FileState fileState = smartDFSClient.getFileState(getPathName(src));
if (fileState instanceof CompactFileState) {
throw new IOException(
smartDFSClient.getExceptionMsg("Concat", "SSM Small File"));
}
}
}
}
@Override
public FileStatus getFileLinkStatus(final Path f) throws IOException {
FileStatus fileStatus = super.getFileLinkStatus(f);
if (fileStatus.getLen() == 0) {
Path target = getLinkTarget(f);
FileState fileState = smartDFSClient.getFileState(getPathName(target));
if (fileState instanceof CompactFileState) {
fileStatus = getFileStatus(target);
}
} else {
Path target = getLinkTarget(f);
FileState fileState = smartDFSClient.getFileState(getPathName(target));
if (fileState instanceof CompressionFileState) {
fileStatus = getFileStatus(target);
}
}
return fileStatus;
}
@Override
public FileChecksum getFileChecksum(Path f) throws IOException {
statistics.incrementReadOps(1);
Path absF = fixRelativePart(f);
return new FileSystemLinkResolver<FileChecksum>() {
@Override
public FileChecksum doCall(final Path p)
throws IOException {
return smartDFSClient.getFileChecksum(getPathName(p), Long.MAX_VALUE);
}
@Override
public FileChecksum next(final FileSystem fs, final Path p)
throws IOException {
return fs.getFileChecksum(p);
}
}.resolve(this, absF);
}
@Override
public FileChecksum getFileChecksum(Path f, final long length)
throws IOException {
statistics.incrementReadOps(1);
Path absF = fixRelativePart(f);
return new FileSystemLinkResolver<FileChecksum>() {
@Override
public FileChecksum doCall(final Path p)
throws IOException {
return smartDFSClient.getFileChecksum(getPathName(p), length);
}
@Override
public FileChecksum next(final FileSystem fs, final Path p)
throws IOException {
if (fs instanceof SmartFileSystem) {
return fs.getFileChecksum(p, length);
} else {
throw new UnsupportedFileSystemException(
"getFileChecksum(Path, long) is not supported by "
+ fs.getClass().getSimpleName());
}
}
}.resolve(this, absF);
}
@Override
public void setPermission(Path p, final FsPermission permission
) throws IOException {
statistics.incrementWriteOps(1);
Path absF = fixRelativePart(p);
new FileSystemLinkResolver<Void>() {
@Override
public Void doCall(final Path p)
throws IOException, UnresolvedLinkException {
smartDFSClient.setPermission(getPathName(p), permission);
return null;
}
@Override
public Void next(final FileSystem fs, final Path p)
throws IOException {
fs.setPermission(p, permission);
return null;
}
}.resolve(this, absF);
}
@Override
public void setOwner(Path p, final String username, final String groupname
) throws IOException {
if (username == null && groupname == null) {
throw new IOException("username == null && groupname == null");
}
statistics.incrementWriteOps(1);
Path absF = fixRelativePart(p);
new FileSystemLinkResolver<Void>() {
@Override
public Void doCall(final Path p)
throws IOException, UnresolvedLinkException {
smartDFSClient.setOwner(getPathName(p), username, groupname);
return null;
}
@Override
public Void next(final FileSystem fs, final Path p)
throws IOException {
fs.setOwner(p, username, groupname);
return null;
}
}.resolve(this, absF);
}
@Override
public RemoteIterator<Path> listCorruptFileBlocks(Path path)
throws IOException {
RemoteIterator<Path> corruptFileBlocksIterator = super.listCorruptFileBlocks(path);
FileState fileState = smartDFSClient.getFileState(getPathName(path));
if (fileState instanceof CompactFileState) {
corruptFileBlocksIterator = super.listCorruptFileBlocks(
new Path(((CompactFileState) fileState)
.getFileContainerInfo().getContainerFilePath()));
}
return corruptFileBlocksIterator;
}
@Override
public void modifyAclEntries(Path path, final List<AclEntry> aclSpec)
throws IOException {
Path absF = fixRelativePart(path);
new FileSystemLinkResolver<Void>() {
@Override
public Void doCall(final Path p) throws IOException {
smartDFSClient.modifyAclEntries(getPathName(p), aclSpec);
return null;
}
@Override
public Void next(final FileSystem fs, final Path p)
throws IOException {
fs.modifyAclEntries(p, aclSpec);
return null;
}
}.resolve(this, absF);
}
@Override
public void removeAclEntries(Path path, final List<AclEntry> aclSpec)
throws IOException {
Path absF = fixRelativePart(path);
new FileSystemLinkResolver<Void>() {
@Override
public Void doCall(final Path p) throws IOException {
smartDFSClient.removeAclEntries(getPathName(p), aclSpec);
return null;
}
@Override
public Void next(final FileSystem fs, final Path p)
throws IOException {
fs.removeAclEntries(p, aclSpec);
return null;
}
}.resolve(this, absF);
}
@Override
public void removeDefaultAcl(Path path) throws IOException {
final Path absF = fixRelativePart(path);
new FileSystemLinkResolver<Void>() {
@Override
public Void doCall(final Path p) throws IOException {
smartDFSClient.removeDefaultAcl(getPathName(p));
return null;
}
@Override
public Void next(final FileSystem fs, final Path p)
throws IOException, UnresolvedLinkException {
fs.removeDefaultAcl(p);
return null;
}
}.resolve(this, absF);
}
@Override
public void removeAcl(Path path) throws IOException {
final Path absF = fixRelativePart(path);
new FileSystemLinkResolver<Void>() {
@Override
public Void doCall(final Path p) throws IOException {
smartDFSClient.removeAcl(getPathName(p));
return null;
}
@Override
public Void next(final FileSystem fs, final Path p)
throws IOException, UnresolvedLinkException {
fs.removeAcl(p);
return null;
}
}.resolve(this, absF);
}
@Override
public void setAcl(Path path, final List<AclEntry> aclSpec)
throws IOException {
Path absF = fixRelativePart(path);
new FileSystemLinkResolver<Void>() {
@Override
public Void doCall(final Path p) throws IOException {
smartDFSClient.setAcl(getPathName(p), aclSpec);
return null;
}
@Override
public Void next(final FileSystem fs, final Path p)
throws IOException {
fs.setAcl(p, aclSpec);
return null;
}
}.resolve(this, absF);
}
@Override
public void createEncryptionZone(Path path, String keyName)
throws IOException {
smartDFSClient.createEncryptionZone(getPathName(path), keyName);
}
@Override
public RemoteIterator<FileStatus> listStatusIterator(final Path p)
throws IOException {
Path absF = fixRelativePart(p);
return new FileSystemLinkResolver<RemoteIterator<FileStatus>>() {
@Override
public RemoteIterator<FileStatus> doCall(final Path p)
throws IOException {
return new SmartDirListingIterator<>(p, false);
}
@Override
public RemoteIterator<FileStatus> next(final FileSystem fs, final Path p)
throws IOException {
return ((DistributedFileSystem) fs).listStatusIterator(p);
}
}.resolve(this, absF);
}
@Override
protected RemoteIterator<LocatedFileStatus> listLocatedStatus(final Path p,
final PathFilter filter) throws IOException {
Path absF = fixRelativePart(p);
return new FileSystemLinkResolver<RemoteIterator<LocatedFileStatus>>() {
@Override
public RemoteIterator<LocatedFileStatus> doCall(final Path p)
throws IOException {
return new SmartDirListingIterator<>(p, filter, true);
}
@Override
public RemoteIterator<LocatedFileStatus> next(final FileSystem fs, final Path p)
throws IOException {
if (fs instanceof SmartFileSystem) {
return ((SmartFileSystem)fs).listLocatedStatus(p, filter);
}
// symlink resolution for this methods does not work cross file systems
// because it is a protected method.
throw new IOException("Link resolution does not work with multiple " +
"file systems for listLocatedStatus(): " + p);
}
}.resolve(this, absF);
}
private class SmartDirListingIterator<T extends FileStatus>
implements RemoteIterator<T> {
private DirectoryListing thisListing;
private int i;
private Path p;
private String src;
private T curStat = null;
private PathFilter filter;
private boolean needLocation;
private SmartDirListingIterator(Path p, PathFilter filter,
boolean needLocation) throws IOException {
this.p = p;
this.src = getPathName(p);
this.filter = filter;
this.needLocation = needLocation;
// fetch the first batch of entries in the directory
thisListing = smartDFSClient.listPaths(src, HdfsFileStatus.EMPTY_NAME,
needLocation);
statistics.incrementReadOps(1);
// the directory does not exist
if (thisListing == null) {
throw new FileNotFoundException("File " + p + " does not exist.");
}
i = 0;
}
private SmartDirListingIterator(Path p, boolean needLocation)
throws IOException {
this(p, null, needLocation);
}
@Override
@SuppressWarnings("unchecked")
public boolean hasNext() throws IOException {
while (curStat == null && hasNextNoFilter()) {
T next;
HdfsFileStatus fileStat = thisListing.getPartialListing()[i++];
if (needLocation) {
next = (T)((HdfsLocatedFileStatus) fileStat).makeQualifiedLocated(getUri(), p);
String fileName = next.getPath().toUri().getPath();
// Reconstruct FileStatus
if (next.getLen() == 0) {
FileState fileState = smartDFSClient.getFileState(fileName);
if (fileState instanceof CompactFileState) {
CompactFileState compactFileState = (CompactFileState) fileState;
long len = compactFileState.getFileContainerInfo().getLength();
BlockLocation[] blockLocations = smartDFSClient.getBlockLocations(
fileName, 0, len);
next = (T) new LocatedFileStatus(len,
next.isDirectory(),
next.getReplication(),
next.getBlockSize(),
next.getModificationTime(),
next.getAccessTime(),
next.getPermission(),
next.getOwner(),
next.getGroup(),
next.isSymlink() ? next.getSymlink() : null,
next.getPath(),
blockLocations);
}
} else {
FileState fileState = smartDFSClient.getFileState(fileName);
if (fileState instanceof CompressionFileState) {
next = getCompressedFileStatus(fileState, next);
}
}
} else {
next = (T) fileStat.makeQualified(getUri(), p);
String fileName = next.getPath().toUri().getPath();
// Reconstruct FileStatus
if (next.getLen() == 0) {
FileState fileState = smartDFSClient.getFileState(fileName);
if (fileState instanceof CompactFileState) {
CompactFileState compactFileState = (CompactFileState) fileState;
long len = compactFileState.getFileContainerInfo().getLength();
next = (T) new FileStatus(len,
next.isDirectory(),
next.getReplication(),
next.getBlockSize(),
next.getModificationTime(),
next.getAccessTime(),
next.getPermission(),
next.getOwner(),
next.getGroup(),
next.isSymlink() ? next.getSymlink() : null,
next.getPath());
}
} else {
FileState fileState = smartDFSClient.getFileState(fileName);
if (fileState instanceof CompressionFileState) {
next = getCompressedFileStatus(fileState, next);
}
}
}
// apply filter if not null
if (filter == null || filter.accept(next.getPath())) {
curStat = next;
}
}
return curStat != null;
}
private T getCompressedFileStatus(FileState fileState, T next) throws IOException {
CompressionFileState compressionFileState = (CompressionFileState) fileState;
long fileLen = compressionFileState.getOriginalLength();
if (next instanceof HdfsNamedFileStatus) {
Set<FileStatus.AttrFlags> flags = next.attributes(next.hasAcl(), next.isEncrypted(),
next.isErasureCoded(), next.isSnapshotEnabled());
HdfsNamedFileStatus nextHdfsNamed = (HdfsNamedFileStatus) next;
HdfsFileStatus.Builder builder = new HdfsFileStatus.Builder();
return (T) builder.atime(next.getAccessTime())
.blocksize(next.getBlockSize())
.children(nextHdfsNamed.getChildrenNum())
.ecPolicy(nextHdfsNamed.getErasureCodingPolicy())
.fileId(nextHdfsNamed.getFileId())
.feInfo(nextHdfsNamed.getFileEncryptionInfo())
.flags(convert(flags))
.group(next.getGroup())
.isdir(next.isDirectory())
.length(fileLen)
.locations(null)
.mtime(next.getModificationTime())
.owner(next.getOwner())
.path(nextHdfsNamed.getLocalNameInBytes())
.perm(next.getPermission())
.replication(next.getReplication())
.storagePolicy(nextHdfsNamed.getStoragePolicy())
.symlink(nextHdfsNamed.getSymlinkInBytes())
.build()
.makeQualified(getUri(), next.getPath());
}
BlockLocation[] blockLocations =
((LocatedFileStatus) next).getBlockLocations();
for (BlockLocation blockLocation : blockLocations) {
convertBlockLocation(blockLocation, compressionFileState);
}
return (T) new LocatedFileStatus(fileLen,
next.isDirectory(),
next.getReplication(),
next.getBlockSize(),
next.getModificationTime(),
next.getAccessTime(),
next.getPermission(),
next.getOwner(),
next.getGroup(),
next.isSymlink() ? next.getSymlink() : null,
next.getPath(),
blockLocations);
}
public EnumSet<HdfsFileStatus.Flags> convert(Set<FileStatus.AttrFlags> e) {
EnumSet<HdfsFileStatus.Flags> result = EnumSet.noneOf(HdfsFileStatus.Flags.class);
if (e == null || e.isEmpty()) {
return result;
}
for (FileStatus.AttrFlags af : e) {
if (af == FileStatus.AttrFlags.HAS_ACL) {
result.add(HdfsFileStatus.Flags.HAS_ACL);
}
if (af == FileStatus.AttrFlags.HAS_CRYPT) {
result.add(HdfsFileStatus.Flags.HAS_CRYPT);
}
if (af == FileStatus.AttrFlags.HAS_EC) {
result.add(HdfsFileStatus.Flags.HAS_EC);
}
if (af == FileStatus.AttrFlags.SNAPSHOT_ENABLED) {
result.add(HdfsFileStatus.Flags.SNAPSHOT_ENABLED);
}
}
return result;
}
// Definitions:
// * Compression trunk doesn't cross over two blocks:
// - Offset = original start of the first trunk
// - End = original end of the last trunk
// * Compression trunk crosses over two blocks:
// - Offset = original middle of the first incomplete trunk
// - End = original middle of the last incomplete trunk
private void convertBlockLocation(BlockLocation blockLocation,
CompressionFileState compressionInfo) throws IOException {
long compressedStart = blockLocation.getOffset();
long compressedEnd = compressedStart + blockLocation.getLength() - 1;
CompressionTrunk startTrunk = compressionInfo.locateCompressionTrunk(
true, compressedStart);
CompressionTrunk endTrunk = compressionInfo.locateCompressionTrunk(
true, compressedEnd);
long originStart;
// If the first trunk crosses over two blocks, set start as middle of the trunk
if (startTrunk.getCompressedOffset() < compressedStart) {
originStart = startTrunk.getOriginOffset() + startTrunk.getOriginLength() / 2 + 1;
} else {
originStart = startTrunk.getOriginOffset();
}
long originEnd;
// If the last trunk corsses over two blocks, set end as middle of the trunk
if (endTrunk.getCompressedOffset() + endTrunk.getCompressedLength() - 1 > compressedEnd) {
originEnd = endTrunk.getOriginOffset() + endTrunk.getOriginLength() / 2;
} else {
originEnd = endTrunk.getOriginOffset() + endTrunk.getOriginLength() - 1;
}
blockLocation.setOffset(originStart);
blockLocation.setLength(originEnd - originStart + 1);
}
/**
* Check if there is a next item before applying the given filter
*/
private boolean hasNextNoFilter() throws IOException {
if (thisListing == null) {
return false;
}
if (i >= thisListing.getPartialListing().length && thisListing.hasMore()) {
// current listing is exhausted & fetch a new listing
thisListing = smartDFSClient.listPaths(
src, thisListing.getLastName(), needLocation);
statistics.incrementReadOps(1);
if (thisListing == null) {
return false;
}
i = 0;
}
return (i < thisListing.getPartialListing().length);
}
@Override
public T next() throws IOException {
if (hasNext()) {
T tmp = curStat;
curStat = null;
return tmp;
}
throw new java.util.NoSuchElementException("No more entry in " + p);
}
}
@Override
public boolean isFileClosed(final Path src) throws IOException {
boolean isFileClosed = super.isFileClosed(src);
if (!isFileClosed) {
FileState fileState = smartDFSClient.getFileState(getPathName(src));
if (fileState instanceof CompactFileState) {
String containerFile = ((CompactFileState) fileState)
.getFileContainerInfo().getContainerFilePath();
isFileClosed = smartDFSClient.isFileClosed(containerFile);
}
}
return isFileClosed;
}
@Override
public void close() throws IOException {
try {
super.close();
} finally {
if (smartDFSClient != null) {
this.smartDFSClient.close();
}
}
}
/**
* Checks that the passed URI belongs to this filesystem and returns
* just the path component. Expects a URI with an absolute path.
*
* @param file URI with absolute path
* @return path component of {file}
* @throws IllegalArgumentException if URI does not belong to this DFS
*/
private String getPathName(Path file) {
checkPath(file);
String result = fixRelativePart(file).toUri().getPath();
if (!DFSUtil.isValidName(result)) {
throw new IllegalArgumentException("Pathname " + result + " from " +
file + " is not a valid DFS filename.");
}
return result;
}
}
| java | Apache-2.0 | e0c90f054687a18c4e095547ac5e31b8b313b3ef | 2026-01-05T02:41:11.405497Z | false |
Intel-bigdata/SSM | https://github.com/Intel-bigdata/SSM/blob/e0c90f054687a18c4e095547ac5e31b8b313b3ef/smart-hadoop-support/smart-hadoop-cdh-2.6/src/test/java/org/smartdata/hdfs/MiniClusterFactory26.java | smart-hadoop-support/smart-hadoop-cdh-2.6/src/test/java/org/smartdata/hdfs/MiniClusterFactory26.java | /**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.smartdata.hdfs;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hdfs.MiniDFSCluster;
import org.apache.hadoop.hdfs.StorageType;
import java.io.IOException;
public class MiniClusterFactory26 extends MiniClusterFactory {
@Override
public MiniDFSCluster create(int dataNodes, Configuration conf) throws IOException {
return new MiniDFSCluster.Builder(conf)
.numDataNodes(dataNodes)
.build();
}
@Override
public MiniDFSCluster createWithStorages(int dataNodes, Configuration conf) throws IOException {
return new MiniDFSCluster.Builder(conf)
.numDataNodes(dataNodes)
.storagesPerDatanode(3)
.storageTypes(new StorageType[]{StorageType.DISK, StorageType.ARCHIVE,
StorageType.SSD})
.build();
}
}
| java | Apache-2.0 | e0c90f054687a18c4e095547ac5e31b8b313b3ef | 2026-01-05T02:41:11.405497Z | false |
Intel-bigdata/SSM | https://github.com/Intel-bigdata/SSM/blob/e0c90f054687a18c4e095547ac5e31b8b313b3ef/smart-hadoop-support/smart-hadoop-cdh-2.6/src/main/java/org/smartdata/hdfs/CompatibilityHelper26.java | smart-hadoop-support/smart-hadoop-cdh-2.6/src/main/java/org/smartdata/hdfs/CompatibilityHelper26.java | /**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.smartdata.hdfs;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hdfs.DFSClient;
import org.apache.hadoop.hdfs.DistributedFileSystem;
import org.apache.hadoop.hdfs.StorageType;
import org.apache.hadoop.hdfs.inotify.Event;
import org.apache.hadoop.hdfs.protocol.BlockStoragePolicy;
import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
import org.apache.hadoop.hdfs.protocol.LocatedBlock;
import org.apache.hadoop.hdfs.protocol.datatransfer.Sender;
import org.apache.hadoop.hdfs.protocol.proto.InotifyProtos;
import org.apache.hadoop.hdfs.security.token.block.BlockTokenIdentifier;
import org.apache.hadoop.hdfs.server.protocol.DatanodeStorage;
import org.apache.hadoop.hdfs.server.protocol.StorageReport;
import org.apache.hadoop.security.token.Token;
import java.io.DataOutputStream;
import java.io.IOException;
import java.io.OutputStream;
import java.net.URI;
import java.util.ArrayList;
import java.util.List;
public class CompatibilityHelper26 extends CompatibilityHelper2 {
@Override
public String[] getStorageTypes(LocatedBlock lb) {
List<String> types = new ArrayList<>();
for(StorageType type : lb.getStorageTypes()) {
types.add(type.toString());
}
return types.toArray(new String[types.size()]);
}
@Override
public void replaceBlock(DataOutputStream out, ExtendedBlock eb, String storageType, Token<BlockTokenIdentifier> accessToken, String dnUUID, DatanodeInfo info) throws IOException {
new Sender(out).replaceBlock(eb, StorageType.valueOf(storageType), accessToken, dnUUID, info);
}
@Override
public String[] getMovableTypes() {
List<String> types = new ArrayList<>();
for(StorageType type : StorageType.getMovableTypes()) {
types.add(type.toString());
}
return types.toArray(new String[types.size()]);
}
@Override
public String getStorageType(StorageReport report) {
return report.getStorage().getStorageType().toString();
}
@Override
public List<String> chooseStorageTypes(BlockStoragePolicy policy, short replication) {
List<String> types = new ArrayList<>();
for(StorageType type : policy.chooseStorageTypes(replication)) {
types.add(type.toString());
}
return types;
}
@Override
public boolean isMovable(String type) {
return StorageType.valueOf(type).isMovable();
}
@Override
public DatanodeInfo newDatanodeInfo(String ipAddress, int xferPort) {
return new DatanodeInfo(
ipAddress,
null,
null,
xferPort,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
null,
null);
}
@Override
public InotifyProtos.AppendEventProto getAppendEventProto(Event.AppendEvent event) {
return InotifyProtos.AppendEventProto.newBuilder()
.setPath(event.getPath()).build();
}
@Override
public Event.AppendEvent getAppendEvent(InotifyProtos.AppendEventProto proto) {
return new Event.AppendEvent.Builder().path(proto.getPath()).build();
}
@Override
public boolean truncate(DFSClient client, String src, long newLength) throws IOException {
throw new UnsupportedOperationException("Hadoop 2.6 does not support truncate.");
}
@Override
public boolean truncate(DistributedFileSystem fs, String src, long newLength) throws IOException {
throw new UnsupportedOperationException("Hadoop 2.6 does not support truncate.");
}
@Override
public int getSidInDatanodeStorageReport(DatanodeStorage datanodeStorage) {
StorageType storageType = datanodeStorage.getStorageType();
return storageType.ordinal();
}
@Override
public OutputStream getDFSClientAppend(DFSClient client, String dest,
int buffersize, long offset) throws IOException {
if (client.exists(dest) && offset != 0) {
return getDFSClientAppend(client, dest, buffersize);
}
return client.create(dest, true);
}
@Override
public OutputStream getDFSClientAppend(
DFSClient client, String dest, int buffersize) throws IOException {
return client
.append(dest, buffersize, null, null);
}
@Override
public OutputStream getS3outputStream(String dest, Configuration conf) throws IOException {
// Copy to remote S3
if (!dest.startsWith("s3")) {
throw new IOException();
}
// Copy to s3
org.apache.hadoop.fs.FileSystem fs = org.apache.hadoop.fs.FileSystem.get(URI.create(dest), conf);
return fs.create(new Path(dest), true);
}
}
| java | Apache-2.0 | e0c90f054687a18c4e095547ac5e31b8b313b3ef | 2026-01-05T02:41:11.405497Z | false |
Intel-bigdata/SSM | https://github.com/Intel-bigdata/SSM/blob/e0c90f054687a18c4e095547ac5e31b8b313b3ef/smart-maven-plugins/src/main/java/org/smartdata/maven/plugin/util/Exec.java | smart-maven-plugins/src/main/java/org/smartdata/maven/plugin/util/Exec.java | /**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.smartdata.maven.plugin.util;
import org.apache.maven.plugin.Mojo;
import java.io.BufferedReader;
import java.io.IOException;
import java.io.InputStream;
import java.io.InputStreamReader;
import java.io.UnsupportedEncodingException;
import java.util.ArrayList;
import java.util.Iterator;
import java.util.List;
import java.util.Map;
import java.util.Map.Entry;
public class Exec {
private Mojo mojo;
public Exec(Mojo mojo) {
this.mojo = mojo;
}
public int run(List<String> command, List<String> output) {
return this.run(command, output, (List) null);
}
public int run(List<String> command, List<String> output, List<String> errors) {
int retCode = 1;
ProcessBuilder pb = new ProcessBuilder(command);
try {
Process p = pb.start();
Exec.OutputBufferThread stdOut = new Exec.OutputBufferThread(p.getInputStream());
Exec.OutputBufferThread stdErr = new Exec.OutputBufferThread(p.getErrorStream());
stdOut.start();
stdErr.start();
retCode = p.waitFor();
if (retCode != 0) {
this.mojo.getLog().warn(command + " failed with error code " + retCode);
Iterator var9 = stdErr.getOutput().iterator();
while (var9.hasNext()) {
String s = (String) var9.next();
this.mojo.getLog().debug(s);
}
}
stdOut.join();
stdErr.join();
output.addAll(stdOut.getOutput());
if (errors != null) {
errors.addAll(stdErr.getOutput());
}
} catch (IOException var11) {
this.mojo.getLog().warn(command + " failed: " + var11.toString());
} catch (InterruptedException var12) {
this.mojo.getLog().warn(command + " failed: " + var12.toString());
}
return retCode;
}
public static void addEnvironment(ProcessBuilder pb, Map<String, String> env) {
if (env != null) {
Map<String, String> processEnv = pb.environment();
Entry entry;
String val;
for (Iterator var3 = env.entrySet().iterator();
var3.hasNext();
processEnv.put((String) entry.getKey(), val)) {
entry = (Entry) var3.next();
val = (String) entry.getValue();
if (val == null) {
val = "";
}
}
}
}
public static String envToString(Map<String, String> env) {
StringBuilder bld = new StringBuilder();
bld.append("{");
Entry entry;
String val;
if (env != null) {
for (Iterator var2 = env.entrySet().iterator();
var2.hasNext();
bld.append("\n ")
.append((String) entry.getKey())
.append(" = '")
.append(val)
.append("'\n")) {
entry = (Entry) var2.next();
val = (String) entry.getValue();
if (val == null) {
val = "";
}
}
}
bld.append("}");
return bld.toString();
}
public static class OutputBufferThread extends Thread {
private List<String> output;
private BufferedReader reader;
public OutputBufferThread(InputStream is) {
this.setDaemon(true);
this.output = new ArrayList();
try {
this.reader = new BufferedReader(new InputStreamReader(is, "UTF-8"));
} catch (UnsupportedEncodingException var3) {
throw new RuntimeException("Unsupported encoding " + var3.toString());
}
}
public void run() {
try {
for (String line = this.reader.readLine(); line != null; line = this.reader.readLine()) {
this.output.add(line);
}
} catch (IOException var2) {
throw new RuntimeException("make failed with error code " + var2.toString());
}
}
public List<String> getOutput() {
return this.output;
}
}
}
| java | Apache-2.0 | e0c90f054687a18c4e095547ac5e31b8b313b3ef | 2026-01-05T02:41:11.405497Z | false |
Intel-bigdata/SSM | https://github.com/Intel-bigdata/SSM/blob/e0c90f054687a18c4e095547ac5e31b8b313b3ef/smart-maven-plugins/src/main/java/org/smartdata/maven/plugin/cmakebuilder/CompileMojo.java | smart-maven-plugins/src/main/java/org/smartdata/maven/plugin/cmakebuilder/CompileMojo.java | /**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.smartdata.maven.plugin.cmakebuilder;
import org.apache.maven.plugin.AbstractMojo;
import org.apache.maven.plugin.MojoExecutionException;
import org.apache.maven.plugins.annotations.LifecyclePhase;
import org.apache.maven.plugins.annotations.Mojo;
import org.apache.maven.plugins.annotations.Parameter;
import org.smartdata.maven.plugin.util.Exec;
import org.smartdata.maven.plugin.util.Exec.OutputBufferThread;
import java.io.File;
import java.io.IOException;
import java.util.Iterator;
import java.util.LinkedList;
import java.util.List;
import java.util.Locale;
import java.util.Map;
import java.util.Map.Entry;
import java.util.concurrent.TimeUnit;
@Mojo(name = "cmake-compile", defaultPhase = LifecyclePhase.COMPILE)
public class CompileMojo extends AbstractMojo {
private static int availableProcessors = Runtime.getRuntime().availableProcessors();
@Parameter(defaultValue = "${project.build.directory}/native")
private File output;
@Parameter(defaultValue = "${basedir}/src/main/native", required = true)
private File source;
@Parameter private String target;
@Parameter private Map<String, String> env;
@Parameter private Map<String, String> vars;
public CompileMojo() {}
private static void validatePlatform() throws MojoExecutionException {
if (System.getProperty("os.name").toLowerCase(Locale.ENGLISH).startsWith("windows")) {
throw new MojoExecutionException("CMakeBuilder does not yet support the Windows platform.");
}
}
public void execute() throws MojoExecutionException {
long start = System.nanoTime();
validatePlatform();
this.runCMake();
this.runMake();
this.runMake();
long end = System.nanoTime();
this.getLog()
.info(
"cmake compilation finished successfully in "
+ TimeUnit.MILLISECONDS.convert(end - start, TimeUnit.NANOSECONDS)
+ " millisecond(s).");
}
static void validateSourceParams(File source, File output) throws MojoExecutionException {
String cOutput = null;
String cSource = null;
try {
cOutput = output.getCanonicalPath();
} catch (IOException var6) {
throw new MojoExecutionException("error getting canonical path for output", var6);
}
try {
cSource = source.getCanonicalPath();
} catch (IOException var5) {
throw new MojoExecutionException("error getting canonical path for source", var5);
}
if (cSource.startsWith(cOutput)) {
throw new MojoExecutionException(
"The source directory must not be inside the output directory "
+ "(it would be destroyed by 'mvn clean')");
}
}
public void runCMake() throws MojoExecutionException {
validatePlatform();
validateSourceParams(this.source, this.output);
if (this.output.mkdirs()) {
this.getLog().info("mkdirs '" + this.output + "'");
}
List<String> cmd = new LinkedList();
cmd.add("cmake");
cmd.add(this.source.getAbsolutePath());
Iterator var2 = this.vars.entrySet().iterator();
while (var2.hasNext()) {
Entry<String, String> entry = (Entry) var2.next();
if (entry.getValue() != null && !((String) entry.getValue()).equals("")) {
cmd.add("-D" + (String) entry.getKey() + "=" + (String) entry.getValue());
}
}
cmd.add("-G");
cmd.add("Unix Makefiles");
String prefix = "";
StringBuilder bld = new StringBuilder();
for (Iterator var4 = cmd.iterator(); var4.hasNext(); prefix = " ") {
String c = (String) var4.next();
bld.append(prefix).append(c);
}
this.getLog().info("Running " + bld.toString());
this.getLog().info("with extra environment variables " + Exec.envToString(this.env));
ProcessBuilder pb = new ProcessBuilder(cmd);
pb.directory(this.output);
pb.redirectErrorStream(true);
Exec.addEnvironment(pb, this.env);
Process proc = null;
OutputBufferThread outThread = null;
int retCode = -1;
boolean var18 = false;
try {
var18 = true;
proc = pb.start();
outThread = new OutputBufferThread(proc.getInputStream());
outThread.start();
retCode = proc.waitFor();
if (retCode != 0) {
throw new MojoExecutionException("CMake failed with error code " + retCode);
}
var18 = false;
} catch (IOException var21) {
throw new MojoExecutionException("Error executing CMake", var21);
} catch (InterruptedException var22) {
throw new MojoExecutionException("Interrupted while waiting for CMake process", var22);
} finally {
if (var18) {
if (proc != null) {
proc.destroy();
}
if (outThread != null) {
try {
outThread.interrupt();
outThread.join();
} catch (InterruptedException var19) {
this.getLog().error("Interrupted while joining output thread", var19);
}
if (retCode != 0) {
Iterator var11 = outThread.getOutput().iterator();
while (var11.hasNext()) {
String line = (String) var11.next();
this.getLog().warn(line);
}
}
}
}
}
if (proc != null) {
proc.destroy();
}
if (outThread != null) {
try {
outThread.interrupt();
outThread.join();
} catch (InterruptedException var20) {
this.getLog().error("Interrupted while joining output thread", var20);
}
if (retCode != 0) {
Iterator var8 = outThread.getOutput().iterator();
while (var8.hasNext()) {
String line = (String) var8.next();
this.getLog().warn(line);
}
}
}
}
public void runMake() throws MojoExecutionException {
List<String> cmd = new LinkedList();
cmd.add("make");
cmd.add("-j");
cmd.add(String.valueOf(availableProcessors));
cmd.add("VERBOSE=1");
if (this.target != null) {
cmd.add(this.target);
}
StringBuilder bld = new StringBuilder();
String prefix = "";
for (Iterator var4 = cmd.iterator(); var4.hasNext(); prefix = " ") {
String c = (String) var4.next();
bld.append(prefix).append(c);
}
this.getLog().info("Running " + bld.toString());
ProcessBuilder pb = new ProcessBuilder(cmd);
pb.directory(this.output);
Process proc = null;
int retCode = -1;
OutputBufferThread stdoutThread = null;
OutputBufferThread stderrThread = null;
boolean var21 = false;
try {
var21 = true;
proc = pb.start();
stdoutThread = new OutputBufferThread(proc.getInputStream());
stderrThread = new OutputBufferThread(proc.getErrorStream());
stdoutThread.start();
stderrThread.start();
retCode = proc.waitFor();
if (retCode != 0) {
throw new MojoExecutionException("make failed with error code " + retCode);
}
var21 = false;
} catch (InterruptedException var26) {
throw new MojoExecutionException("Interrupted during Process#waitFor", var26);
} catch (IOException var27) {
throw new MojoExecutionException("Error executing make", var27);
} finally {
if (var21) {
Iterator var12;
String line;
if (stdoutThread != null) {
try {
stdoutThread.join();
} catch (InterruptedException var23) {
this.getLog().error("Interrupted while joining stdoutThread", var23);
}
if (retCode != 0) {
var12 = stdoutThread.getOutput().iterator();
while (var12.hasNext()) {
line = (String) var12.next();
this.getLog().warn(line);
}
}
}
if (stderrThread != null) {
try {
stderrThread.join();
} catch (InterruptedException var22) {
this.getLog().error("Interrupted while joining stderrThread", var22);
}
var12 = stderrThread.getOutput().iterator();
while (var12.hasNext()) {
line = (String) var12.next();
this.getLog().warn(line);
}
}
if (proc != null) {
proc.destroy();
}
}
}
Iterator var9;
String line;
if (stdoutThread != null) {
try {
stdoutThread.join();
} catch (InterruptedException var25) {
this.getLog().error("Interrupted while joining stdoutThread", var25);
}
if (retCode != 0) {
var9 = stdoutThread.getOutput().iterator();
while (var9.hasNext()) {
line = (String) var9.next();
this.getLog().warn(line);
}
}
}
if (stderrThread != null) {
try {
stderrThread.join();
} catch (InterruptedException var24) {
this.getLog().error("Interrupted while joining stderrThread", var24);
}
var9 = stderrThread.getOutput().iterator();
while (var9.hasNext()) {
line = (String) var9.next();
this.getLog().warn(line);
}
}
if (proc != null) {
proc.destroy();
}
}
}
| java | Apache-2.0 | e0c90f054687a18c4e095547ac5e31b8b313b3ef | 2026-01-05T02:41:11.405497Z | false |
Intel-bigdata/SSM | https://github.com/Intel-bigdata/SSM/blob/e0c90f054687a18c4e095547ac5e31b8b313b3ef/smart-maven-plugins/src/main/java/org/smartdata/maven/plugin/cmakebuilder/TestMojo.java | smart-maven-plugins/src/main/java/org/smartdata/maven/plugin/cmakebuilder/TestMojo.java | /**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.smartdata.maven.plugin.cmakebuilder;
import org.apache.maven.execution.MavenSession;
import org.apache.maven.plugin.AbstractMojo;
import org.apache.maven.plugin.MojoExecutionException;
import org.apache.maven.plugins.annotations.LifecyclePhase;
import org.apache.maven.plugins.annotations.Mojo;
import org.apache.maven.plugins.annotations.Parameter;
import org.smartdata.maven.plugin.util.Exec;
import java.io.BufferedWriter;
import java.io.File;
import java.io.FileOutputStream;
import java.io.IOException;
import java.io.OutputStreamWriter;
import java.util.HashMap;
import java.util.Iterator;
import java.util.LinkedList;
import java.util.List;
import java.util.Locale;
import java.util.Map;
import java.util.Map.Entry;
import java.util.concurrent.TimeUnit;
@Mojo(name = "cmake-test", defaultPhase = LifecyclePhase.TEST)
public class TestMojo extends AbstractMojo {
private static final String ALL_NATIVE = "allNative";
@Parameter(required = true)
private File binary;
@Parameter private String testName;
@Parameter private Map<String, String> env;
@Parameter private List<String> args = new LinkedList();
@Parameter(defaultValue = "600")
private int timeout;
@Parameter private File workingDirectory;
@Parameter(defaultValue = "native-results")
private File results;
@Parameter private Map<String, String> preconditions = new HashMap();
@Parameter(defaultValue = "false")
private boolean skipIfMissing;
@Parameter(defaultValue = "success")
private String expectedResult;
@Parameter(defaultValue = "${session}", readonly = true, required = true)
private MavenSession session;
private static final String VALID_PRECONDITION_TYPES_STR =
"Valid precondition types are \"and\", \"andNot\"";
public TestMojo() {}
private static void validatePlatform() throws MojoExecutionException {
if (System.getProperty("os.name").toLowerCase(Locale.ENGLISH).startsWith("windows")) {
throw new MojoExecutionException("CMakeBuilder does not yet support the Windows platform.");
}
}
private void writeStatusFile(String status) throws IOException {
FileOutputStream fos = new FileOutputStream(new File(this.results, this.testName + ".pstatus"));
BufferedWriter out = null;
try {
out = new BufferedWriter(new OutputStreamWriter(fos, "UTF8"));
out.write(status + "\n");
} finally {
if (out != null) {
out.close();
} else {
fos.close();
}
}
}
private static boolean isTruthy(String str) {
return str == null
? false
: (str.equalsIgnoreCase("")
? false
: (str.equalsIgnoreCase("false")
? false
: (str.equalsIgnoreCase("no")
? false
: (str.equalsIgnoreCase("off") ? false : !str.equalsIgnoreCase("disable")))));
}
private void validateParameters() throws MojoExecutionException {
if (!this.expectedResult.equals("success")
&& !this.expectedResult.equals("failure")
&& !this.expectedResult.equals("any")) {
throw new MojoExecutionException("expectedResult must be either success, failure, or any");
}
}
private boolean shouldRunTest() throws MojoExecutionException {
String skipTests = this.session.getSystemProperties().getProperty("skipTests");
if (isTruthy(skipTests)) {
this.getLog().info("skipTests is in effect for test " + this.testName);
return false;
} else if (!this.binary.exists()) {
if (this.skipIfMissing) {
this.getLog().info("Skipping missing test " + this.testName);
return false;
} else {
throw new MojoExecutionException(
"Test " + this.binary + " was not built! (File does not exist.)");
}
} else {
String testProp = this.session.getSystemProperties().getProperty("test");
if (testProp != null) {
String[] testPropArr = testProp.split(",");
boolean found = false;
String[] var5 = testPropArr;
int var6 = testPropArr.length;
for (int var7 = 0; var7 < var6; ++var7) {
String test = var5[var7];
if (test.equals("allNative")) {
found = true;
break;
}
if (test.equals(this.testName)) {
found = true;
break;
}
}
if (!found) {
this.getLog().debug("did not find test '" + this.testName + "' in list " + testProp);
return false;
}
}
if (this.preconditions != null) {
int idx = 1;
for (Iterator var10 = this.preconditions.entrySet().iterator(); var10.hasNext(); ++idx) {
Entry<String, String> entry = (Entry) var10.next();
String key = (String) entry.getKey();
String val = (String) entry.getValue();
if (key == null) {
throw new MojoExecutionException(
"NULL is not a valid precondition type. "
+ "Valid precondition types are \"and\", \"andNot\"");
}
if (key.equals("and")) {
if (!isTruthy(val)) {
this.getLog()
.info(
"Skipping test "
+ this.testName
+ " because precondition number "
+ idx
+ " was not met.");
return false;
}
} else {
if (!key.equals("andNot")) {
throw new MojoExecutionException(
key
+ " is not a valid precondition type. "
+ "Valid precondition types are \"and\", \"andNot\"");
}
if (isTruthy(val)) {
this.getLog()
.info(
"Skipping test "
+ this.testName
+ " because negative precondition number "
+ idx
+ " was met.");
return false;
}
}
}
}
return true;
}
}
public void execute() throws MojoExecutionException {
if (this.testName == null) {
this.testName = this.binary.getName();
}
validatePlatform();
this.validateParameters();
if (this.shouldRunTest()) {
if (!this.results.isDirectory() && !this.results.mkdirs()) {
throw new MojoExecutionException(
"Failed to create output directory '" + this.results + "'!");
} else {
List<String> cmd = new LinkedList();
cmd.add(this.binary.getAbsolutePath());
this.getLog().info("-------------------------------------------------------");
this.getLog().info(" C M A K E B U I L D E R T E S T");
this.getLog().info("-------------------------------------------------------");
StringBuilder bld = new StringBuilder();
bld.append(this.testName).append(": running ");
bld.append(this.binary.getAbsolutePath());
Iterator var3 = this.args.iterator();
while (var3.hasNext()) {
String entry = (String) var3.next();
cmd.add(entry);
bld.append(" ").append(entry);
}
this.getLog().info(bld.toString());
ProcessBuilder pb = new ProcessBuilder(cmd);
Exec.addEnvironment(pb, this.env);
if (this.workingDirectory != null) {
pb.directory(this.workingDirectory);
}
pb.redirectError(new File(this.results, this.testName + ".stderr"));
pb.redirectOutput(new File(this.results, this.testName + ".stdout"));
this.getLog().info("with extra environment variables " + Exec.envToString(this.env));
Process proc = null;
TestMojo.TestThread testThread = null;
int retCode = -1;
String status = "IN_PROGRESS";
try {
this.writeStatusFile(status);
} catch (IOException var23) {
throw new MojoExecutionException("Error writing the status file", var23);
}
long start = System.nanoTime();
try {
proc = pb.start();
testThread = new TestMojo.TestThread(proc);
testThread.start();
testThread.join((long) (this.timeout * 1000));
if (!testThread.isAlive()) {
retCode = testThread.retCode();
testThread = null;
proc = null;
}
} catch (IOException var24) {
throw new MojoExecutionException(
"IOException while executing the test " + this.testName, var24);
} catch (InterruptedException var25) {
throw new MojoExecutionException(
"Interrupted while executing the test " + this.testName, var25);
} finally {
if (testThread != null) {
testThread.interrupt();
try {
testThread.join();
} catch (InterruptedException var22) {
this.getLog().error("Interrupted while waiting for testThread", var22);
}
status = "TIMED OUT";
} else if (retCode == 0) {
status = "SUCCESS";
} else {
status = "ERROR CODE " + String.valueOf(retCode);
}
try {
this.writeStatusFile(status);
} catch (Exception var21) {
this.getLog().error("failed to write status file!", var21);
}
if (proc != null) {
proc.destroy();
}
}
long end = System.nanoTime();
this.getLog()
.info(
"STATUS: "
+ status
+ " after "
+ TimeUnit.MILLISECONDS.convert(end - start, TimeUnit.NANOSECONDS)
+ " millisecond(s).");
this.getLog().info("-------------------------------------------------------");
if (status.equals("TIMED_OUT")) {
if (this.expectedResult.equals("success")) {
throw new MojoExecutionException(
"Test " + this.binary + " timed out after " + this.timeout + " seconds!");
}
} else if (!status.equals("SUCCESS")) {
if (this.expectedResult.equals("success")) {
throw new MojoExecutionException("Test " + this.binary + " returned " + status);
}
} else if (this.expectedResult.equals("failure")) {
throw new MojoExecutionException(
"Test " + this.binary + " succeeded, but we expected failure!");
}
}
}
}
private static class TestThread extends Thread {
private Process proc;
private int retCode = -1;
public TestThread(Process proc) {
this.proc = proc;
}
public void run() {
try {
this.retCode = this.proc.waitFor();
} catch (InterruptedException var2) {
this.retCode = -1;
}
}
public int retCode() {
return this.retCode;
}
}
}
| java | Apache-2.0 | e0c90f054687a18c4e095547ac5e31b8b313b3ef | 2026-01-05T02:41:11.405497Z | false |
Intel-bigdata/SSM | https://github.com/Intel-bigdata/SSM/blob/e0c90f054687a18c4e095547ac5e31b8b313b3ef/smart-alluxio-support/smart-alluxio-client/src/main/java/org/smartdata/alluxio/filesystem/FileSystemFactory.java | smart-alluxio-support/smart-alluxio-client/src/main/java/org/smartdata/alluxio/filesystem/FileSystemFactory.java | package org.smartdata.alluxio.filesystem;
import alluxio.client.file.FileSystem;
import alluxio.client.file.FileSystemContext;
import java.io.IOException;
/**
* Factory for {@link FileSystemFactory}.
* Usage:
* FileSystem fs = FileSystemFactory.get();
* InputStream is = fs.openFile(new AlluxioURI("/path1/file1"));
*/
public class FileSystemFactory {
private FileSystemFactory() {} // prevent instantiation
public static FileSystem get() throws IOException {
return SmartAlluxioBaseFileSystem.get(FileSystemContext.INSTANCE);
}
public static FileSystem get(FileSystemContext context) {
return SmartAlluxioBaseFileSystem.get(context);
}
}
| java | Apache-2.0 | e0c90f054687a18c4e095547ac5e31b8b313b3ef | 2026-01-05T02:41:11.405497Z | false |
Intel-bigdata/SSM | https://github.com/Intel-bigdata/SSM/blob/e0c90f054687a18c4e095547ac5e31b8b313b3ef/smart-alluxio-support/smart-alluxio-client/src/main/java/org/smartdata/alluxio/filesystem/SmartAlluxioBaseFileSystem.java | smart-alluxio-support/smart-alluxio-client/src/main/java/org/smartdata/alluxio/filesystem/SmartAlluxioBaseFileSystem.java | package org.smartdata.alluxio.filesystem;
import alluxio.AlluxioURI;
import alluxio.client.file.BaseFileSystem;
import alluxio.client.file.FileInStream;
import alluxio.client.file.FileSystemContext;
import alluxio.client.file.options.OpenFileOptions;
import alluxio.exception.AlluxioException;
import alluxio.exception.FileDoesNotExistException;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.smartdata.client.SmartClient;
import org.smartdata.conf.SmartConf;
import org.smartdata.metrics.FileAccessEvent;
import java.io.IOException;
public class SmartAlluxioBaseFileSystem extends BaseFileSystem {
protected static final Logger LOG = LoggerFactory.getLogger(SmartAlluxioBaseFileSystem.class);
private SmartClient smartClient = null;
protected SmartAlluxioBaseFileSystem(FileSystemContext context) {
super(context);
try {
smartClient = new SmartClient(new SmartConf());
} catch (IOException e) {
LOG.error(e.getMessage());
System.exit(-1);
}
}
/**
* @param context file system context
* @return a {@link SmartAlluxioBaseFileSystem}
*/
public static SmartAlluxioBaseFileSystem get(FileSystemContext context) {
return new SmartAlluxioBaseFileSystem(context);
}
@Override
public FileInStream openFile(AlluxioURI uri)
throws FileDoesNotExistException, IOException, AlluxioException {
reportFileAccessEvent(uri.getPath());
return super.openFile(uri);
}
@Override
public FileInStream openFile(AlluxioURI uri, OpenFileOptions options)
throws FileDoesNotExistException, IOException, AlluxioException {
FileInStream fis = super.openFile(uri, options);
reportFileAccessEvent(uri.getPath());
return fis;
}
private void reportFileAccessEvent(String src) {
try {
smartClient.reportFileAccessEvent(new FileAccessEvent(src));
} catch (IOException e) {
LOG.error("Can not report file access event to SmartServer: " + src);
}
}
}
| java | Apache-2.0 | e0c90f054687a18c4e095547ac5e31b8b313b3ef | 2026-01-05T02:41:11.405497Z | false |
Intel-bigdata/SSM | https://github.com/Intel-bigdata/SSM/blob/e0c90f054687a18c4e095547ac5e31b8b313b3ef/smart-alluxio-support/smart-alluxio/src/test/java/org/smartdata/alluxio/action/TestAlluxioActions.java | smart-alluxio-support/smart-alluxio/src/test/java/org/smartdata/alluxio/action/TestAlluxioActions.java | /**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.smartdata.alluxio.action;
import static org.junit.Assert.*;
import java.net.URL;
import java.util.HashMap;
import java.util.Map;
import java.util.Set;
import alluxio.Configuration;
import alluxio.PropertyKey;
import alluxio.master.file.FileSystemMaster;
import alluxio.util.io.PathUtils;
import org.junit.After;
import org.junit.Before;
import org.junit.Test;
import alluxio.AlluxioURI;
import alluxio.client.WriteType;
import alluxio.client.file.FileOutStream;
import alluxio.client.file.FileSystem;
import alluxio.client.file.URIStatus;
import alluxio.client.file.options.CreateFileOptions;
import alluxio.master.LocalAlluxioCluster;
public class TestAlluxioActions {
LocalAlluxioCluster mLocalAlluxioCluster;
FileSystem fs;
@Before
public void setUp() throws Exception {
mLocalAlluxioCluster = new LocalAlluxioCluster(2);
mLocalAlluxioCluster.initConfiguration();
Configuration.set(PropertyKey.WEB_RESOURCES,
PathUtils.concatPath(System.getProperty("user.dir"), "src/test/webapp"));
mLocalAlluxioCluster.start();
fs = mLocalAlluxioCluster.getClient();
}
@After
public void tearDown() throws Exception {
if (mLocalAlluxioCluster != null) {
mLocalAlluxioCluster.stop();
}
}
@Test
public void testPersistAction() throws Exception {
// write a file and not persisted
fs.createDirectory(new AlluxioURI("/dir1"));
CreateFileOptions options = CreateFileOptions.defaults().setWriteType(
WriteType.MUST_CACHE);
FileOutStream fos = fs.createFile(new AlluxioURI("/dir1/file1"), options);
fos.write(new byte[] { 1 });
fos.close();
// check status not persisted
URIStatus status1 = fs.getStatus(new AlluxioURI("/dir1/file1"));
assertEquals(status1.getPersistenceState(), "NOT_PERSISTED");
// run persist action
PersistAction persistAction = new PersistAction();
Map<String, String> args = new HashMap<>();
args.put("-path", "/dir1/file1");
persistAction.init(args);
persistAction.execute();
// check status persisted
URIStatus status2 = fs.getStatus(new AlluxioURI("/dir1/file1"));
assertEquals(status2.getPersistenceState(), "PERSISTED");
}
@Test
public void testLoadAction() throws Exception {
// write a file but not loaded in cache
fs.createDirectory(new AlluxioURI("/dir1"));
CreateFileOptions options = CreateFileOptions.defaults().setWriteType(
WriteType.THROUGH);
FileOutStream fos = fs.createFile(new AlluxioURI("/dir1/file1"), options);
fos.write(new byte[] { 1 });
fos.close();
// check file is not cached
URIStatus status1 = fs.getStatus(new AlluxioURI("/dir1/file1"));
assertEquals(0, status1.getInMemoryPercentage());
// run load action
LoadAction loadAction = new LoadAction();
Map<String, String> args = new HashMap<>();
args.put("-path", "/dir1/file1");
loadAction.init(args);
loadAction.execute();
// check file cached status
URIStatus status2 = fs.getStatus(new AlluxioURI("/dir1/file1"));
assertEquals(100, status2.getInMemoryPercentage());
}
@Test
public void testFreeAction() throws Exception {
// write a file and loaded in cache
fs.createDirectory(new AlluxioURI("/dir1"));
CreateFileOptions options = CreateFileOptions.defaults().setWriteType(
WriteType.CACHE_THROUGH);
FileOutStream fos = fs.createFile(new AlluxioURI("/dir1/file1"), options);
fos.write(new byte[] { 1 });
fos.close();
// check file is cached
URIStatus status1 = fs.getStatus(new AlluxioURI("/dir1/file1"));
assertEquals(100, status1.getInMemoryPercentage());
// run load action
FreeAction freeAction = new FreeAction();
Map<String, String> args = new HashMap<>();
args.put("-path", "/dir1/file1");
freeAction.init(args);
freeAction.execute();
// sleep to wait cache freed
Thread.sleep(2000);
// check file cached status
URIStatus status2 = fs.getStatus(new AlluxioURI("/dir1/file1"));
assertEquals(0, status2.getInMemoryPercentage());
}
@Test
public void testSetTTLAction() throws Exception {
// write a file and loaded in cache
fs.createDirectory(new AlluxioURI("/dir1"));
CreateFileOptions options = CreateFileOptions.defaults().setWriteType(
WriteType.CACHE_THROUGH);
FileOutStream fos = fs.createFile(new AlluxioURI("/dir1/file1"), options);
fos.write(new byte[] { 1 });
fos.close();
// check file is cached
URIStatus status1 = fs.getStatus(new AlluxioURI("/dir1/file1"));
assertEquals(-1, status1.getTtl());
// run ttl action
SetTTLAction setTTLAction = new SetTTLAction();
Map<String, String> args = new HashMap<>();
args.put("-path", "/dir1/file1");
args.put("TTL", "10000");
setTTLAction.init(args);
setTTLAction.execute();
// check file cached status
URIStatus status2 = fs.getStatus(new AlluxioURI("/dir1/file1"));
assertEquals(10000, status2.getTtl());
}
@Test
public void testPinUnpinAction() throws Exception {
// write a file and loaded in cache
fs.createDirectory(new AlluxioURI("/dir1"));
CreateFileOptions options = CreateFileOptions.defaults().setWriteType(
WriteType.CACHE_THROUGH);
FileOutStream fos = fs.createFile(new AlluxioURI("/dir1/file1"), options);
fos.write(new byte[] { 1 });
fos.close();
// check file not pinned
URIStatus status1 = fs.getStatus(new AlluxioURI("/dir1/file1"));
Set<Long> pinSet1 = mLocalAlluxioCluster.getLocalAlluxioMaster().getMasterProcess()
.getMaster(FileSystemMaster.class).getPinIdList();
assertFalse(pinSet1.contains(status1.getFileId()));
// run pin action
PinAction pinAction = new PinAction();
Map<String, String> args = new HashMap<>();
args.put("-path", "/dir1/file1");
pinAction.init(args);
pinAction.execute();
// check file pinned
Set<Long> pinSet2 = mLocalAlluxioCluster.getLocalAlluxioMaster().getMasterProcess()
.getMaster(FileSystemMaster.class).getPinIdList();
assertTrue(pinSet2.contains(status1.getFileId()));
// run unpin action
UnpinAction unpinAction = new UnpinAction();
Map<String, String> args1 = new HashMap<>();
args1.put("-path", "/dir1/file1");
unpinAction.init(args1);
unpinAction.execute();
// check file unpinned
Set<Long> pinSet3 = mLocalAlluxioCluster.getLocalAlluxioMaster().getMasterProcess()
.getMaster(FileSystemMaster.class).getPinIdList();
assertFalse(pinSet3.contains(status1.getFileId()));
}
}
| java | Apache-2.0 | e0c90f054687a18c4e095547ac5e31b8b313b3ef | 2026-01-05T02:41:11.405497Z | false |
Intel-bigdata/SSM | https://github.com/Intel-bigdata/SSM/blob/e0c90f054687a18c4e095547ac5e31b8b313b3ef/smart-alluxio-support/smart-alluxio/src/test/java/org/smartdata/alluxio/action/metric/fetcher/TestAlluxioEntryApplier.java | smart-alluxio-support/smart-alluxio/src/test/java/org/smartdata/alluxio/action/metric/fetcher/TestAlluxioEntryApplier.java | package org.smartdata.alluxio.action.metric.fetcher;
import alluxio.AlluxioURI;
import alluxio.client.file.FileSystem;
import alluxio.client.file.URIStatus;
import alluxio.proto.journal.File.*;
import alluxio.proto.journal.Journal.JournalEntry;
import org.junit.After;
import org.junit.Assert;
import org.junit.Before;
import org.junit.Test;
import org.mockito.Mockito;
import org.smartdata.alluxio.metric.fetcher.AlluxioEntryApplier;
import org.smartdata.metastore.MetaStore;
import org.smartdata.metastore.TestDaoUtil;
import org.smartdata.model.BackUpInfo;
import org.smartdata.model.FileDiff;
import org.smartdata.model.FileDiffType;
import org.smartdata.model.FileInfo;
import java.util.List;
public class TestAlluxioEntryApplier extends TestDaoUtil {
private MetaStore metaStore;
@Before
public void setUp() throws Exception {
initDao();
metaStore = new MetaStore(druidPool);
}
@After
public void tearDown() throws Exception {
closeDao();
}
@Test
public void testInodeDirectoryApplier() throws Exception {
FileSystem fs = Mockito.mock(FileSystem.class);
AlluxioEntryApplier entryApplier = new AlluxioEntryApplier(metaStore, fs);
FileInfo rootDir = FileInfo.newBuilder()
.setFileId(0)
.setIsdir(true)
.setPath("/")
.build();
metaStore.insertFile(rootDir);
alluxio.wire.FileInfo info1 = new alluxio.wire.FileInfo()
.setFileId(1)
.setPath("/dir1")
.setLength(0L)
.setFolder(true)
.setBlockSizeBytes(1000000)
.setLastModificationTimeMs(1528876616216L)
.setCreationTimeMs(1528876616216L)
.setMode(493)
.setOwner("user1")
.setGroup("group1");
URIStatus status1 = new URIStatus(info1);
Mockito.when(fs.getStatus(new AlluxioURI("/dir1"))).thenReturn(status1);
InodeDirectoryEntry inodeDirectoryEntry = InodeDirectoryEntry.newBuilder()
.setId(1)
.setParentId(0)
.setName("dir1")
.setPersistenceState("NOT_PERSISTED")
.setPinned(false)
.setCreationTimeMs(1528876616216L)
.setLastModificationTimeMs(1528876616216L)
.setOwner("user1")
.setGroup("group1")
.setMode(493)
.setMountPoint(false)
.setDirectChildrenLoaded(false)
.setTtl(-1L)
.setTtlAction(PTtlAction.DELETE)
.build();
JournalEntry inodeDirectoryJEntry = JournalEntry.newBuilder()
.setInodeDirectory(inodeDirectoryEntry)
.build();
entryApplier.apply(inodeDirectoryJEntry);
Assert.assertTrue(metaStore.getFile().get(0).getPath().equals("/"));
Assert.assertTrue(metaStore.getFile().get(1).getPath().equals("/dir1"));
Assert.assertEquals("user1", metaStore.getFile("/dir1").getOwner());
Assert.assertEquals(1528876616216L, metaStore.getFile("/dir1").getModificationTime());
}
@Test
public void testInodeFileApplier() throws Exception {
FileSystem fs = Mockito.mock(FileSystem.class);
AlluxioEntryApplier entryApplier = new AlluxioEntryApplier(metaStore, fs);
FileInfo fooDir = FileInfo.newBuilder()
.setFileId(6)
.setIsdir(true)
.setPath("/foo")
.build();
metaStore.insertFile(fooDir);
BackUpInfo backUpInfo = new BackUpInfo(1L, "/foo/foobar1", "remote/dest/", 10);
metaStore.insertBackUpInfo(backUpInfo);
alluxio.wire.FileInfo info1 = new alluxio.wire.FileInfo()
.setFileId(33554431)
.setPath("/foo/foobar1")
.setLength(10L)
.setFolder(false)
.setBlockSizeBytes(536870912)
.setLastModificationTimeMs(1515665470681L)
.setCreationTimeMs(1515665470681L)
.setMode(420)
.setOwner("user1")
.setGroup("group1");
URIStatus status1 = new URIStatus(info1);
Mockito.when(fs.getStatus(new AlluxioURI("/foo/foobar1"))).thenReturn(status1);
InodeFileEntry inodeFileEntry = InodeFileEntry.newBuilder()
.setId(33554431)
.setParentId(6)
.setName("foobar1")
.setPersistenceState("NOT_PERSISTED")
.setPinned(false)
.setCreationTimeMs(1515665470681L)
.setBlockSizeBytes(536870912)
.setLength(10L)
.setCompleted(false)
.setCacheable(true)
.setTtl(-1L)
.setOwner("user1")
.setGroup("group1")
.setMode(420)
.setTtlAction(PTtlAction.DELETE)
.build();
JournalEntry inodeFileJEntry = JournalEntry.newBuilder()
.setInodeFile(inodeFileEntry)
.build();
entryApplier.apply(inodeFileJEntry);
Assert.assertEquals(33554431, metaStore.getFile("/foo/foobar1").getFileId());
Assert.assertEquals("user1", metaStore.getFile("/foo/foobar1").getOwner());
Assert.assertEquals(536870912, metaStore.getFile("/foo/foobar1").getBlocksize());
List<FileDiff> fileDiffs = metaStore.getFileDiffsByFileName("/foo/foobar1");
Assert.assertTrue(fileDiffs.size() > 0);
for (FileDiff fileDiff : fileDiffs) {
if (fileDiff.getDiffType().equals(FileDiffType.APPEND)) {
Assert.assertTrue(fileDiff.getParameters().get("-owner").equals("user1"));
Assert.assertTrue(fileDiff.getParameters().get("-mtime").equals("1515665470681"));
Assert.assertTrue(fileDiff.getParameters().get("-length").equals("10"));
}
}
}
@Test
public void testInodeLastMTimeApplier() throws Exception {
FileSystem fs = Mockito.mock(FileSystem.class);
AlluxioEntryApplier entryApplier = new AlluxioEntryApplier(metaStore, fs);
FileInfo fooFile = FileInfo.newBuilder()
.setFileId(5)
.setIsdir(true)
.setPath("/baz")
.build();
metaStore.insertFile(fooFile);
BackUpInfo backUpInfo = new BackUpInfo(1L, "/baz", "remote/dest/", 10);
metaStore.insertBackUpInfo(backUpInfo);
alluxio.wire.FileInfo info1 = new alluxio.wire.FileInfo()
.setFileId(5)
.setPath("/baz")
.setLength(0L)
.setFolder(true)
.setBlockSizeBytes(1000000)
.setLastModificationTimeMs(1515665470681L)
.setCreationTimeMs(1515665470681L)
.setMode(493)
.setOwner("user1")
.setGroup("group1");
URIStatus status1 = new URIStatus(info1);
Mockito.when(fs.getStatus(new AlluxioURI("/baz"))).thenReturn(status1);
InodeLastModificationTimeEntry inodeLastMTimeEntry = InodeLastModificationTimeEntry.newBuilder()
.setId(5)
.setLastModificationTimeMs(1515667810911L)
.build();
JournalEntry inodeLastMTimeJEntry = JournalEntry.newBuilder()
.setInodeLastModificationTime(inodeLastMTimeEntry)
.build();
entryApplier.apply(inodeLastMTimeJEntry);
List<FileDiff> fileDiffs = metaStore.getFileDiffsByFileName("/baz");
Assert.assertTrue(fileDiffs.size() > 0);
for (FileDiff fileDiff : fileDiffs) {
if (fileDiff.getDiffType().equals(FileDiffType.METADATA)) {
Assert.assertEquals("/baz", fileDiff.getSrc());
Assert.assertEquals("1515667810911", fileDiff.getParameters().get("-mtime"));
}
}
}
@Test
public void testSetAttributeApplier() throws Exception {
FileSystem fs = Mockito.mock(FileSystem.class);
AlluxioEntryApplier entryApplier = new AlluxioEntryApplier(metaStore, fs);
FileInfo fooFile = FileInfo.newBuilder()
.setFileId(33554431)
.setIsdir(false)
.setPath("/foo/foobar")
.build();
metaStore.insertFile(fooFile);
BackUpInfo backUpInfo = new BackUpInfo(1L, "/foo/foobar", "remote/dest/", 10);
metaStore.insertBackUpInfo(backUpInfo);
alluxio.wire.FileInfo info1 = new alluxio.wire.FileInfo()
.setFileId(33554431)
.setPath("/foo/foobar")
.setLength(100L)
.setFolder(false)
.setBlockSizeBytes(210000)
.setLastModificationTimeMs(1515665470681L)
.setCreationTimeMs(1515665470681L)
.setMode(493)
.setOwner("user1")
.setGroup("group1");
URIStatus status1 = new URIStatus(info1);
Mockito.when(fs.getStatus(new AlluxioURI("/foo/foobar"))).thenReturn(status1);
SetAttributeEntry setAttributeEntry = SetAttributeEntry.newBuilder()
.setId(33554431)
.setOpTimeMs(1515667208590658L)
.setPermission(511)
.build();
JournalEntry setAttributeJEntry = JournalEntry.newBuilder()
.setSetAttribute(setAttributeEntry)
.build();
entryApplier.apply(setAttributeJEntry);
List<FileDiff> fileDiffs = metaStore.getFileDiffsByFileName("/foo/foobar");
Assert.assertTrue(fileDiffs.size() > 0);
for (FileDiff fileDiff : fileDiffs) {
if (fileDiff.getDiffType().equals(FileDiffType.METADATA)) {
Assert.assertEquals("511", fileDiff.getParameters().get("-permission"));
}
}
}
@Test
public void testRenameApplier() throws Exception {
FileSystem fs = Mockito.mock(FileSystem.class);
AlluxioEntryApplier entryApplier = new AlluxioEntryApplier(metaStore, fs);
FileInfo fooFile = FileInfo.newBuilder()
.setFileId(50331647)
.setIsdir(false)
.setPath("/bar/foobar1")
.build();
metaStore.insertFile(fooFile);
BackUpInfo backUpInfo = new BackUpInfo(1L, "/bar/foobar1", "remote/dest/", 10);
metaStore.insertBackUpInfo(backUpInfo);
alluxio.wire.FileInfo info1 = new alluxio.wire.FileInfo()
.setFileId(50331647)
.setPath("/bar/foobar1")
.setLength(300L)
.setFolder(false)
.setBlockSizeBytes(310000)
.setLastModificationTimeMs(1515665270681L)
.setCreationTimeMs(1515665270681L)
.setMode(493)
.setOwner("user1")
.setGroup("group1");
URIStatus status1 = new URIStatus(info1);
Mockito.when(fs.getStatus(new AlluxioURI("/bar/foobar1"))).thenReturn(status1);
RenameEntry renameEntry = RenameEntry.newBuilder()
.setId(50331647)
.setOpTimeMs(1515666148444L)
.setDstPath("/bar/foobar1_new")
.build();
JournalEntry renameJEntry = JournalEntry.newBuilder()
.setRename(renameEntry)
.build();
entryApplier.apply(renameJEntry);
List<FileDiff> fileDiffs = metaStore.getFileDiffsByFileName("/bar/foobar1");
Assert.assertTrue(fileDiffs.size() > 0);
for (FileDiff fileDiff : fileDiffs) {
if (fileDiff.getDiffType().equals(FileDiffType.RENAME)) {
Assert.assertEquals("/bar/foobar1", fileDiff.getSrc());
Assert.assertEquals("/bar/foobar1_new", fileDiff.getParameters().get("-dest"));
}
}
}
@Test
public void testDeleteFileApplier() throws Exception {
FileSystem fs = Mockito.mock(FileSystem.class);
AlluxioEntryApplier entryApplier = new AlluxioEntryApplier(metaStore, fs);
FileInfo fooFile = FileInfo.newBuilder()
.setFileId(100663295)
.setIsdir(false)
.setPath("/foo/foobar_del")
.build();
metaStore.insertFile(fooFile);
BackUpInfo backUpInfo = new BackUpInfo(1L, "/foo/foobar_del", "remote/dest/", 10);
metaStore.insertBackUpInfo(backUpInfo);
alluxio.wire.FileInfo info1 = new alluxio.wire.FileInfo()
.setFileId(100663295)
.setPath("/foo/foobar_del")
.setLength(500L)
.setFolder(false)
.setBlockSizeBytes(510000)
.setLastModificationTimeMs(1515665270681L)
.setCreationTimeMs(1515665270681L)
.setMode(493)
.setOwner("user1")
.setGroup("group1");
URIStatus status1 = new URIStatus(info1);
Mockito.when(fs.getStatus(new AlluxioURI("/foo/foobar_del"))).thenReturn(status1);
DeleteFileEntry deleteFileEntry = DeleteFileEntry.newBuilder()
.setId(100663295)
.setOpTimeMs(1515737580798L)
.setAlluxioOnly(true)
.setRecursive(false)
.build();
JournalEntry deleteFileJEntry = JournalEntry.newBuilder()
.setDeleteFile(deleteFileEntry)
.build();
entryApplier.apply(deleteFileJEntry);
List<FileDiff> fileDiffs = metaStore.getFileDiffsByFileName("/foo/foobar_del");
Assert.assertTrue(fileDiffs.size() > 0);
for (FileDiff fileDiff : fileDiffs) {
if (fileDiff.getDiffType().equals(FileDiffType.DELETE)) {
Assert.assertEquals("/foo/foobar_del", fileDiff.getSrc());
}
}
}
}
| java | Apache-2.0 | e0c90f054687a18c4e095547ac5e31b8b313b3ef | 2026-01-05T02:41:11.405497Z | false |
Intel-bigdata/SSM | https://github.com/Intel-bigdata/SSM/blob/e0c90f054687a18c4e095547ac5e31b8b313b3ef/smart-alluxio-support/smart-alluxio/src/test/java/org/smartdata/alluxio/action/metric/fetcher/TestAlluxioEntryFetcher.java | smart-alluxio-support/smart-alluxio/src/test/java/org/smartdata/alluxio/action/metric/fetcher/TestAlluxioEntryFetcher.java | /**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.smartdata.alluxio.action.metric.fetcher;
import alluxio.AlluxioURI;
import alluxio.Configuration;
import alluxio.PropertyKey;
import alluxio.client.WriteType;
import alluxio.client.file.FileSystem;
import alluxio.client.file.FileSystemTestUtils;
import alluxio.client.file.options.*;
import alluxio.master.LocalAlluxioCluster;
import alluxio.master.journal.JournalUtils;
import alluxio.proto.journal.Journal.JournalEntry;
import alluxio.security.authorization.Mode;
import alluxio.underfs.UnderFileSystem;
import alluxio.util.io.PathUtils;
import org.junit.*;
import org.junit.rules.TemporaryFolder;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.smartdata.SmartConstants;
import org.smartdata.alluxio.metric.fetcher.AlluxioEntryApplier;
import org.smartdata.alluxio.metric.fetcher.AlluxioEntryFetcher;
import org.smartdata.conf.SmartConf;
import org.smartdata.conf.SmartConfKeys;
import org.smartdata.metastore.MetaStore;
import org.smartdata.metastore.TestDaoUtil;
import java.io.File;
import java.io.IOException;
import java.net.URI;
import java.util.ArrayList;
import java.util.List;
import java.util.concurrent.Callable;
import java.util.concurrent.Executors;
public class TestAlluxioEntryFetcher extends TestDaoUtil {
public static final Logger LOG = LoggerFactory.getLogger(TestAlluxioEntryFetcher.class);
LocalAlluxioCluster localAlluxioCluster;
FileSystem fs;
MetaStore metaStore;
@Rule
public TemporaryFolder mTemporaryFolder = new TemporaryFolder();
@Before
public void setUp() throws Exception {
localAlluxioCluster = new LocalAlluxioCluster(2);
localAlluxioCluster.initConfiguration();
Configuration.set(PropertyKey.WEB_RESOURCES,
PathUtils.concatPath(System.getProperty("user.dir"), "src/test/webapp"));
localAlluxioCluster.start();
fs = localAlluxioCluster.getClient();
initDao();
metaStore = new MetaStore(druidPool);
}
@After
public void tearDown() throws Exception {
if (localAlluxioCluster != null) {
localAlluxioCluster.stop();
}
closeDao();
}
@Test
public void testEntryFetcher() throws Exception {
URI journalLocation = JournalUtils.getJournalLocation();
SmartConf conf = new SmartConf();
conf.set(SmartConfKeys.SMART_ALLUXIO_MASTER_JOURNAL_DIR_KEY, journalLocation.getPath());
EntryApplierForTest entryApplierForTest = new EntryApplierForTest(metaStore, fs);
final AlluxioEntryFetcher entryFetcher = new AlluxioEntryFetcher(fs, metaStore,
Executors.newScheduledThreadPool(2), entryApplierForTest, new Callable() {
@Override
public Object call() throws Exception {
return null; // Do nothing
}
}, conf);
Assert.assertFalse(AlluxioEntryFetcher.canReadFromLastSeqNum(100L));
/**
* Generate such local structure
* ├── foo |
* ├── foobar1
* └── foobar2
* ├── bar |
* └── foobar3
* └── foobar4
*/
fs.createDirectory(new AlluxioURI("/foo"));
fs.createDirectory(new AlluxioURI("/bar"));
FileSystemTestUtils.createByteFile(fs, "/foo/foobar1", WriteType.CACHE_THROUGH, 10);
FileSystemTestUtils.createByteFile(fs, "/foo/foobar2", WriteType.CACHE_THROUGH, 20);
FileSystemTestUtils.createByteFile(fs, "/bar/foobar3", WriteType.CACHE_THROUGH, 30);
FileSystemTestUtils.createByteFile(fs, "/foobar4", WriteType.CACHE_THROUGH, 40);
Thread thread = new Thread() {
public void run() {
try {
entryFetcher.start();
} catch (IOException e) {
e.printStackTrace();
}
}
};
thread.start();
// need wait long enough to finish namespace fetcher
Thread.sleep(10*1000);
fs.setAttribute(new AlluxioURI("/foo/foobar1"), SetAttributeOptions.defaults().setPersisted(true));
fs.setAttribute(new AlluxioURI("/foo/foobar2"), SetAttributeOptions.defaults().setPinned(true));
fs.setAttribute(new AlluxioURI("/bar/foobar3"), SetAttributeOptions.defaults().setTtl(1000000L));
String mLocalUfsRoot = mTemporaryFolder.getRoot().getAbsolutePath();
UnderFileSystem mLocalUfs = UnderFileSystem.Factory.create(mLocalUfsRoot);
String mountpath = PathUtils.concatPath(mLocalUfsRoot, "mtd_ufs");
mLocalUfs.mkdirs(mountpath);
Assert.assertTrue(new File(mountpath).exists());
fs.mount(new AlluxioURI("/mtd_t"), new AlluxioURI(mountpath), MountOptions.defaults());
fs.rename(new AlluxioURI("/foo/foobar1"), new AlluxioURI("/foo/foo1"), RenameOptions.defaults());
fs.delete(new AlluxioURI("/bar/foobar3"), DeleteOptions.defaults().setRecursive(true));
fs.createDirectory(new AlluxioURI("/baz"));
FileSystemTestUtils.createByteFile(fs, "/baz/foobar5", WriteType.CACHE_THROUGH, 50);
Mode mode = new Mode((short)0755);
fs.setAttribute(new AlluxioURI("/baz/foobar5"), SetAttributeOptions.defaults().setMode(mode));
// free action does not generate journal entry
fs.free(new AlluxioURI("/baz"), FreeOptions.defaults().setRecursive(true));
while (entryApplierForTest.getEntries().size() != 16) {
Thread.sleep(100);
}
List<JournalEntry> entries = entryApplierForTest.getEntries();
Assert.assertTrue(entries.get(0).hasSetAttribute() && entries.get(0).getSetAttribute().hasPersisted());
Assert.assertTrue(entries.get(1).hasSetAttribute() && entries.get(1).getSetAttribute().hasPinned());
Assert.assertTrue(entries.get(2).hasSetAttribute() && entries.get(2).getSetAttribute().hasTtl());
Assert.assertTrue(entries.get(3).hasInodeLastModificationTime());
Assert.assertTrue(entries.get(4).hasInodeDirectoryIdGenerator());
Assert.assertTrue(entries.get(5).hasInodeDirectory());
Assert.assertTrue(entries.get(6).hasAddMountPoint());
Assert.assertTrue(entries.get(7).hasRename());
Assert.assertTrue(entries.get(8).hasDeleteFile());
Assert.assertTrue(entries.get(9).hasInodeLastModificationTime());
Assert.assertTrue(entries.get(10).hasInodeDirectoryIdGenerator());
Assert.assertTrue(entries.get(11).hasInodeDirectory());
Assert.assertTrue(entries.get(12).hasInodeLastModificationTime());
Assert.assertTrue(entries.get(13).hasInodeFile());
Assert.assertTrue(entries.get(14).hasCompleteFile());
Assert.assertTrue(entries.get(15).hasSetAttribute() && entries.get(15).getSetAttribute().hasPermission());
entryFetcher.stop();
Assert.assertTrue(metaStore.containSystemInfo(SmartConstants.SMART_ALLUXIO_LAST_ENTRY_SN));
Assert.assertTrue(AlluxioEntryFetcher.canReadFromLastSeqNum(
Long.parseLong(metaStore.getSystemInfoByProperty(SmartConstants.SMART_ALLUXIO_LAST_ENTRY_SN).getValue())));
}
private static class EntryApplierForTest extends AlluxioEntryApplier {
private List<JournalEntry> entries = new ArrayList<>();
public EntryApplierForTest(MetaStore metaStore, FileSystem fs) {
super(metaStore, fs);
}
@Override
public void apply(JournalEntry entry) {
entries.add(entry);
}
public List<JournalEntry> getEntries() {
return entries;
}
}
}
| java | Apache-2.0 | e0c90f054687a18c4e095547ac5e31b8b313b3ef | 2026-01-05T02:41:11.405497Z | false |
Intel-bigdata/SSM | https://github.com/Intel-bigdata/SSM/blob/e0c90f054687a18c4e095547ac5e31b8b313b3ef/smart-alluxio-support/smart-alluxio/src/test/java/org/smartdata/alluxio/action/metric/fetcher/TestAlluxioNamespaceFetcher.java | smart-alluxio-support/smart-alluxio/src/test/java/org/smartdata/alluxio/action/metric/fetcher/TestAlluxioNamespaceFetcher.java | /**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.smartdata.alluxio.action.metric.fetcher;
import alluxio.AlluxioURI;
import alluxio.Configuration;
import alluxio.PropertyKey;
import alluxio.client.WriteType;
import alluxio.client.file.FileOutStream;
import alluxio.client.file.FileSystem;
import alluxio.client.file.options.CreateFileOptions;
import alluxio.exception.AlluxioException;
import alluxio.master.LocalAlluxioCluster;
import alluxio.util.io.PathUtils;
import org.junit.After;
import org.junit.Before;
import org.junit.Test;
import org.smartdata.alluxio.metric.fetcher.AlluxioNamespaceFetcher;
import org.smartdata.metastore.MetaStore;
import org.smartdata.metastore.TestDaoUtil;
import org.smartdata.model.FileInfo;
import java.io.IOException;
import java.util.concurrent.Executors;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertFalse;
import static org.junit.Assert.assertTrue;
public class TestAlluxioNamespaceFetcher extends TestDaoUtil {
LocalAlluxioCluster mLocalAlluxioCluster;
FileSystem fs;
MetaStore metaStore;
@Before
public void setUp() throws Exception {
mLocalAlluxioCluster = new LocalAlluxioCluster(2);
mLocalAlluxioCluster.initConfiguration();
Configuration.set(PropertyKey.WEB_RESOURCES,
PathUtils.concatPath(System.getProperty("user.dir"), "src/test/webapp"));
mLocalAlluxioCluster.start();
fs = mLocalAlluxioCluster.getClient();
initDao();
metaStore = new MetaStore(druidPool);
}
@After
public void tearDown() throws Exception {
if (mLocalAlluxioCluster != null) {
mLocalAlluxioCluster.stop();
}
closeDao();
}
@Test
public void testNamespaceFetcher() throws Exception {
// create namespace:
// /dir1
// /dir2
// -dir21
// -dir22
// -file221
// -file222
// -file21
// /dir3
// -file31
fs.createDirectory(new AlluxioURI("/dir1"));
fs.createDirectory(new AlluxioURI("/dir2"));
fs.createDirectory(new AlluxioURI("/dir3"));
fs.createDirectory(new AlluxioURI("/dir2/dir21"));
fs.createDirectory(new AlluxioURI("/dir2/dir22"));
createFile("/dir3/file31");
createFile("/dir2/dir22/file221");
createFile("/dir2/dir22/file222");
createFile("/dir2/file21");
AlluxioNamespaceFetcher fetcher = new AlluxioNamespaceFetcher(fs, metaStore, 100,
Executors.newScheduledThreadPool(4));
fetcher.startFetch();
//wait complete
while (!fetcher.fetchFinished()) {
Thread.sleep(1000);
}
Thread.sleep(2000);
assertEquals(10, metaStore.getFile().size());
FileInfo dir1 = metaStore.getFile("/dir2/dir22");
assertTrue(dir1 != null);
assertTrue(dir1.isdir());
FileInfo file1 = metaStore.getFile("/dir2/dir22/file221");
assertTrue(file1 != null);
assertFalse(file1.isdir());
assertEquals(1, file1.getBlockReplication());
}
private void createFile(String path) {
CreateFileOptions options = CreateFileOptions.defaults().setWriteType(
WriteType.MUST_CACHE);
FileOutStream fos = null;
try {
fos = fs.createFile(new AlluxioURI(path), options);
fos.write(new byte[] { 1 });
} catch (IOException | AlluxioException e) {
e.printStackTrace();
} finally {
if (fos != null) {
try {
fos.close();
} catch (IOException e) {
e.printStackTrace();
}
}
}
}
}
| java | Apache-2.0 | e0c90f054687a18c4e095547ac5e31b8b313b3ef | 2026-01-05T02:41:11.405497Z | false |
Intel-bigdata/SSM | https://github.com/Intel-bigdata/SSM/blob/e0c90f054687a18c4e095547ac5e31b8b313b3ef/smart-alluxio-support/smart-alluxio/src/main/java/alluxio/master/journal/ufs/AlluxioJournalUtil.java | smart-alluxio-support/smart-alluxio/src/main/java/alluxio/master/journal/ufs/AlluxioJournalUtil.java | /**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package alluxio.master.journal.ufs;
import alluxio.AlluxioURI;
import alluxio.Configuration;
import alluxio.Constants;
import alluxio.PropertyKey;
import alluxio.master.NoopMaster;
import alluxio.master.journal.JournalReader;
import alluxio.proto.journal.Journal;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.smartdata.conf.SmartConf;
import org.smartdata.conf.SmartConfKeys;
import java.io.IOException;
import java.net.URI;
import java.net.URISyntaxException;
/**
* Util for reading the journal entries given a range of sequence numbers.
*/
public class AlluxioJournalUtil {
public static final Logger LOG = LoggerFactory.getLogger(AlluxioJournalUtil.class);
private static String sMaster = Constants.FILE_SYSTEM_MASTER_NAME;
/**
* @param conf smart configuration
* @return the current entry sequence number
*/
public static Long getCurrentSeqNum(SmartConf conf) {
UfsJournal journal =
new UfsJournalSystem(getJournalLocation(conf), 0).createJournal(new NoopMaster(sMaster));
UfsJournalFile currentLog;
try {
currentLog = UfsJournalSnapshot.getCurrentLog(journal);
} catch (IOException e) {
throw new RuntimeException(e);
}
long sn = -1L;
if (currentLog != null) {
try (JournalReader reader = new UfsJournalReader(journal, currentLog.getStart(), true)) {
Journal.JournalEntry entry;
while ((entry = reader.read()) != null) {
sn = entry.getSequenceNumber();
if (sn >= Long.MAX_VALUE) {
break;
}
}
} catch (Exception e) {
LOG.error("Failed to read next journal entry.", e);
}
}
return sn;
}
/**
* @param conf smart configuration
* @param startSn journal entry sequence number
* @return journal reader
*/
public static JournalReader getJournalReaderFromSn(SmartConf conf, Long startSn) {
UfsJournal journal =
new UfsJournalSystem(getJournalLocation(conf), 0).createJournal(new NoopMaster(sMaster));
JournalReader reader = new UfsJournalReader(journal, startSn, true);
return reader;
}
/**
* @param conf smart configuration
* @return the journal location
*/
private static URI getJournalLocation(SmartConf conf) {
String alluxioMasterJournalDir = conf.get(
SmartConfKeys.SMART_ALLUXIO_MASTER_JOURNAL_DIR_KEY, "/opt/alluxio/journal");
Configuration.set(PropertyKey.MASTER_JOURNAL_FOLDER, alluxioMasterJournalDir);
String journalDirectory = Configuration.get(PropertyKey.MASTER_JOURNAL_FOLDER);
if (!journalDirectory.endsWith(AlluxioURI.SEPARATOR)) {
journalDirectory += AlluxioURI.SEPARATOR;
}
try {
return new URI(journalDirectory);
} catch (URISyntaxException e) {
throw new RuntimeException(e);
}
}
} | java | Apache-2.0 | e0c90f054687a18c4e095547ac5e31b8b313b3ef | 2026-01-05T02:41:11.405497Z | false |
Intel-bigdata/SSM | https://github.com/Intel-bigdata/SSM/blob/e0c90f054687a18c4e095547ac5e31b8b313b3ef/smart-alluxio-support/smart-alluxio/src/main/java/org/smartdata/alluxio/AlluxioUtil.java | smart-alluxio-support/smart-alluxio/src/main/java/org/smartdata/alluxio/AlluxioUtil.java | /**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.smartdata.alluxio;
import java.io.IOException;
import alluxio.client.file.URIStatus;
import org.smartdata.SmartContext;
import org.smartdata.conf.SmartConfKeys;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import alluxio.Configuration;
import alluxio.PropertyKey;
import alluxio.client.file.FileSystem;
import alluxio.client.file.FileSystemContext;
import org.smartdata.model.FileInfo;
/**
* Contain utils related to alluxio cluster.
*/
public class AlluxioUtil {
public static final Logger LOG =
LoggerFactory.getLogger(AlluxioUtil.class);
public static FileSystem getAlluxioFs(SmartContext context) throws IOException {
String alluxioMaster = context.getConf().get(
SmartConfKeys.SMART_ALLUXIO_MASTER_HOSTNAME_KEY, "localhost");
Configuration.set(PropertyKey.MASTER_HOSTNAME, alluxioMaster);
FileSystemContext fsContext = FileSystemContext.create();
return FileSystem.Factory.get(fsContext);
}
public static FileInfo convertFileStatus(URIStatus status) {
FileInfo fileInfo = new FileInfo(
status.getPath(),
status.getFileId(),
status.getLength(),
status.isFolder(),
(short)1,
status.getBlockSizeBytes(),
status.getLastModificationTimeMs(),
status.getCreationTimeMs(),
(short) status.getMode(),
status.getOwner(),
status.getGroup(),
(byte) 0,
(byte) 0
);
return fileInfo;
}
}
| java | Apache-2.0 | e0c90f054687a18c4e095547ac5e31b8b313b3ef | 2026-01-05T02:41:11.405497Z | false |
Intel-bigdata/SSM | https://github.com/Intel-bigdata/SSM/blob/e0c90f054687a18c4e095547ac5e31b8b313b3ef/smart-alluxio-support/smart-alluxio/src/main/java/org/smartdata/alluxio/AlluxioStatesUpdateService.java | smart-alluxio-support/smart-alluxio/src/main/java/org/smartdata/alluxio/AlluxioStatesUpdateService.java | /**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.smartdata.alluxio;
import alluxio.AlluxioURI;
import alluxio.client.WriteType;
import alluxio.client.file.FileOutStream;
import alluxio.client.file.FileSystem;
import alluxio.client.file.options.CreateFileOptions;
import alluxio.client.file.options.DeleteOptions;
import alluxio.exception.AlluxioException;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.smartdata.SmartContext;
import org.smartdata.alluxio.metric.fetcher.AlluxioEntryFetcher;
import org.smartdata.metastore.MetaStore;
import org.smartdata.metastore.StatesUpdateService;
import java.io.IOException;
import java.net.InetAddress;
import java.util.concurrent.Callable;
import java.util.concurrent.Executors;
import java.util.concurrent.ScheduledExecutorService;
/**
* Polls metrics and events from Alluxio Server
*/
public class AlluxioStatesUpdateService extends StatesUpdateService {
private static final String ALLUXIO_MOVER_ID_PATH = "/system/alluxio-mover.id";
private volatile boolean inSafeMode;
private FileSystem alluxioFs;
private ScheduledExecutorService executorService;
private AlluxioEntryFetcher alluxioEntryFetcher;
private FileOutStream moverIdOutStream;
public static final Logger LOG =
LoggerFactory.getLogger(AlluxioStatesUpdateService.class);
public AlluxioStatesUpdateService(SmartContext context, MetaStore metaStore) {
super(context, metaStore);
this.inSafeMode = true;
}
/**
* Load configure/data to initialize.
*
* @return true if initialized successfully
*/
@Override
public void init() throws IOException {
LOG.info("Initializing ...");
SmartContext context = getContext();
this.alluxioFs = AlluxioUtil.getAlluxioFs(context);
this.moverIdOutStream = checkAndMarkRunning(alluxioFs);
this.executorService = Executors.newScheduledThreadPool(4);
this.alluxioEntryFetcher = new AlluxioEntryFetcher(alluxioFs, metaStore,
executorService, new EntryFetchFinishedCallBack(), context.getConf());
LOG.info("Initialized.");
}
private class EntryFetchFinishedCallBack implements Callable<Object> {
@Override
public Object call() throws Exception {
inSafeMode = false;
return null;
}
}
@Override
public boolean inSafeMode() {
return inSafeMode;
}
/**
* Start daemon threads in StatesManager for function.
*/
@Override
public void start() throws IOException {
LOG.info("Starting ...");
this.alluxioEntryFetcher.start();
LOG.info("Started. ");
}
@Override
public void stop() throws IOException {
LOG.info("Stopping ...");
if (moverIdOutStream != null) {
try {
moverIdOutStream.close();
} catch (IOException e) {
LOG.debug("Close alluxio 'mover' ID output stream error", e);
}
}
if (alluxioEntryFetcher != null) {
alluxioEntryFetcher.stop();
}
LOG.info("Stopped.");
}
private FileOutStream checkAndMarkRunning(FileSystem fs) throws IOException {
AlluxioURI moverIdPath = new AlluxioURI(ALLUXIO_MOVER_ID_PATH);
try {
if (fs.exists(moverIdPath)) {
// Alluxio does not support append operation (ALLUXIO-25), here just delete it
fs.delete(moverIdPath, DeleteOptions.defaults().setRecursive(true));
}
CreateFileOptions options = CreateFileOptions.defaults().setWriteType(
WriteType.MUST_CACHE);
FileOutStream fos = fs.createFile(moverIdPath, options);
fos.write(InetAddress.getLocalHost().getHostName().getBytes());
fos.flush();
return fos;
} catch (IOException | AlluxioException e) {
LOG.error("Unable to lock alluxio 'mover', please stop alluxio 'mover' first.");
throw new IOException(e.getMessage());
}
}
}
| java | Apache-2.0 | e0c90f054687a18c4e095547ac5e31b8b313b3ef | 2026-01-05T02:41:11.405497Z | false |
Intel-bigdata/SSM | https://github.com/Intel-bigdata/SSM/blob/e0c90f054687a18c4e095547ac5e31b8b313b3ef/smart-alluxio-support/smart-alluxio/src/main/java/org/smartdata/alluxio/action/AlluxioActionType.java | smart-alluxio-support/smart-alluxio/src/main/java/org/smartdata/alluxio/action/AlluxioActionType.java | /**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.smartdata.alluxio.action;
public enum AlluxioActionType {
None(0), // doing nothing
External(1), // execute some cmdlet lines specified
LOAD(2), // Load file to Alluxio Cache
FREE(3), // Free file from alluxio
PERSIST(4), // Persist file to under file system
PIN(5), // Make file avoid being evicted from memory
UNPIN(6), // Unset the PIN flag
SetTTL(7), // Set the TTL (time to live) in milliseconds to a file
COPY(8); // Copy file from one under file system to another
private final int value;
AlluxioActionType(int value) {
this.value = value;
}
public int getValue() {
return value;
}
public static AlluxioActionType fromValue(int value) {
for (AlluxioActionType t : values()) {
if (t.getValue() == value) {
return t;
}
}
return null;
}
public static AlluxioActionType fromName(String name) {
for (AlluxioActionType t : values()) {
if (t.toString().equalsIgnoreCase(name)) {
return t;
}
}
return null;
}
}
| java | Apache-2.0 | e0c90f054687a18c4e095547ac5e31b8b313b3ef | 2026-01-05T02:41:11.405497Z | false |
Intel-bigdata/SSM | https://github.com/Intel-bigdata/SSM/blob/e0c90f054687a18c4e095547ac5e31b8b313b3ef/smart-alluxio-support/smart-alluxio/src/main/java/org/smartdata/alluxio/action/LoadAction.java | smart-alluxio-support/smart-alluxio/src/main/java/org/smartdata/alluxio/action/LoadAction.java | /**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.smartdata.alluxio.action;
import java.util.ArrayList;
import java.util.List;
import java.util.Map;
import org.smartdata.action.annotation.ActionSignature;
import alluxio.AlluxioURI;
import alluxio.Constants;
import alluxio.client.ReadType;
import alluxio.client.file.FileInStream;
import alluxio.client.file.URIStatus;
import alluxio.client.file.options.OpenFileOptions;
@ActionSignature(
actionId = "load",
displayName = "load",
usage = AlluxioAction.FILE_PATH + " $file "
)
public class LoadAction extends AlluxioAction {
private List<String> exceptionMessages;
@Override
public void init(Map<String, String> args) {
super.init(args);
this.actionType = AlluxioActionType.LOAD;
this.exceptionMessages = new ArrayList<>();
}
@Override
protected void execute() throws Exception {
LOG.info("Executing Alluxio action: LoadAction, path:" + uri.toString());
loadInternal(uri);
if (!exceptionMessages.isEmpty()) {
for (String message : exceptionMessages) {
LOG.warn(message);
}
} else {
LOG.info("Path " + uri + " was successfully loaded.");
}
}
// load file into memory recursively
private void loadInternal(AlluxioURI path) throws Exception {
URIStatus status = alluxioFs.getStatus(path);
if (status.isFolder()) {
List<URIStatus> statuses = alluxioFs.listStatus(path);
for (URIStatus uriStatus : statuses) {
AlluxioURI newPath = new AlluxioURI(uriStatus.getPath());
loadInternal(newPath);
}
} else {
if (status.getInMemoryPercentage() == 100) {
// The file has already been fully loaded into Alluxio memory.
return;
}
FileInStream in = null;
try {
OpenFileOptions options = OpenFileOptions.defaults().setReadType(ReadType.CACHE_PROMOTE);
in = alluxioFs.openFile(path, options);
byte[] buf = new byte[8 * Constants.MB];
while (in.read(buf) != -1) {
}
} catch (Exception e) {
exceptionMessages.add(e.getMessage());
} finally {
if (null != in){
in.close();
}
}
}
}
}
| java | Apache-2.0 | e0c90f054687a18c4e095547ac5e31b8b313b3ef | 2026-01-05T02:41:11.405497Z | false |
Intel-bigdata/SSM | https://github.com/Intel-bigdata/SSM/blob/e0c90f054687a18c4e095547ac5e31b8b313b3ef/smart-alluxio-support/smart-alluxio/src/main/java/org/smartdata/alluxio/action/AlluxioCopyRunner.java | smart-alluxio-support/smart-alluxio/src/main/java/org/smartdata/alluxio/action/AlluxioCopyRunner.java | /**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.smartdata.alluxio.action;
/*
import org.smartdata.hdfs.actions.copy.CopyRunner;
public class AlluxioCopyRunner extends CopyRunner {
@Override
public void copy(String srcFile, String destFile) throws IOException {
// TODO Auto-generated method stub
}
@Override
public void copy(String[] srcFiles, String destDirectory) throws IOException {
// TODO Auto-generated method stub
}
}
*/
| java | Apache-2.0 | e0c90f054687a18c4e095547ac5e31b8b313b3ef | 2026-01-05T02:41:11.405497Z | false |
Intel-bigdata/SSM | https://github.com/Intel-bigdata/SSM/blob/e0c90f054687a18c4e095547ac5e31b8b313b3ef/smart-alluxio-support/smart-alluxio/src/main/java/org/smartdata/alluxio/action/AlluxioAction.java | smart-alluxio-support/smart-alluxio/src/main/java/org/smartdata/alluxio/action/AlluxioAction.java | /**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.smartdata.alluxio.action;
import java.util.Map;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.smartdata.action.SmartAction;
import alluxio.AlluxioURI;
import alluxio.client.file.FileSystem;
public abstract class AlluxioAction extends SmartAction {
protected static final Logger LOG = LoggerFactory.getLogger(AlluxioAction.class);
public static final String FILE_PATH = "-path";
protected AlluxioURI uri;
protected AlluxioActionType actionType;
protected FileSystem alluxioFs;
@Override
public void init(Map<String, String> args) {
super.init(args);
this.uri = new AlluxioURI(args.get(FILE_PATH));
this.alluxioFs = FileSystem.Factory.get();
}
public void setFileSystem(FileSystem fs) {
this.alluxioFs = fs;
}
}
| java | Apache-2.0 | e0c90f054687a18c4e095547ac5e31b8b313b3ef | 2026-01-05T02:41:11.405497Z | false |
Intel-bigdata/SSM | https://github.com/Intel-bigdata/SSM/blob/e0c90f054687a18c4e095547ac5e31b8b313b3ef/smart-alluxio-support/smart-alluxio/src/main/java/org/smartdata/alluxio/action/SetTTLAction.java | smart-alluxio-support/smart-alluxio/src/main/java/org/smartdata/alluxio/action/SetTTLAction.java | /**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.smartdata.alluxio.action;
import java.util.Map;
import org.smartdata.action.annotation.ActionSignature;
import alluxio.client.file.options.SetAttributeOptions;
@ActionSignature(
actionId = "setTtl",
displayName = "setTtl",
usage = AlluxioAction.FILE_PATH + " $file " + " TTL $ttl"
)
public class SetTTLAction extends AlluxioAction {
private long ttl;
@Override
public void init(Map<String, String> args) {
super.init(args);
this.actionType = AlluxioActionType.SetTTL;
this.ttl = Long.valueOf(args.get("TTL"));
}
@Override
protected void execute() throws Exception {
LOG.info("Executing Alluxio action: SetTTLAction, path:" + uri.toString());
SetAttributeOptions options = SetAttributeOptions.defaults().setTtl(ttl);
alluxioFs.setAttribute(uri, options);
LOG.info("File " + uri + " was successfully set TTL to " + ttl + ".");
}
}
| java | Apache-2.0 | e0c90f054687a18c4e095547ac5e31b8b313b3ef | 2026-01-05T02:41:11.405497Z | false |
Intel-bigdata/SSM | https://github.com/Intel-bigdata/SSM/blob/e0c90f054687a18c4e095547ac5e31b8b313b3ef/smart-alluxio-support/smart-alluxio/src/main/java/org/smartdata/alluxio/action/AlluxioActionFactory.java | smart-alluxio-support/smart-alluxio/src/main/java/org/smartdata/alluxio/action/AlluxioActionFactory.java | /**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.smartdata.alluxio.action;
import org.smartdata.action.AbstractActionFactory;
/**
* Built-in smart actions for HDFS system.
*/
public class AlluxioActionFactory extends AbstractActionFactory {
static {
addAction(FreeAction.class);
addAction(LoadAction.class);
addAction(PersistAction.class);
addAction(PinAction.class);
addAction(SetTTLAction.class);
addAction(UnpinAction.class);
}
}
| java | Apache-2.0 | e0c90f054687a18c4e095547ac5e31b8b313b3ef | 2026-01-05T02:41:11.405497Z | false |
Intel-bigdata/SSM | https://github.com/Intel-bigdata/SSM/blob/e0c90f054687a18c4e095547ac5e31b8b313b3ef/smart-alluxio-support/smart-alluxio/src/main/java/org/smartdata/alluxio/action/UnpinAction.java | smart-alluxio-support/smart-alluxio/src/main/java/org/smartdata/alluxio/action/UnpinAction.java | /**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.smartdata.alluxio.action;
import java.util.Map;
import org.smartdata.action.annotation.ActionSignature;
import alluxio.client.file.options.SetAttributeOptions;
@ActionSignature(
actionId = "unpin",
displayName = "unpin",
usage = AlluxioAction.FILE_PATH + " $file "
)
public class UnpinAction extends AlluxioAction {
@Override
public void init(Map<String, String> args) {
super.init(args);
this.actionType = AlluxioActionType.UNPIN;
}
@Override
protected void execute() throws Exception {
LOG.info("Executing Alluxio action: UnpinAction, path:" + uri.toString());
SetAttributeOptions options = SetAttributeOptions.defaults().setPinned(false);
alluxioFs.setAttribute(uri, options);
LOG.info("File " + uri + " was successfully unpinned.");
}
}
| java | Apache-2.0 | e0c90f054687a18c4e095547ac5e31b8b313b3ef | 2026-01-05T02:41:11.405497Z | false |
Intel-bigdata/SSM | https://github.com/Intel-bigdata/SSM/blob/e0c90f054687a18c4e095547ac5e31b8b313b3ef/smart-alluxio-support/smart-alluxio/src/main/java/org/smartdata/alluxio/action/PersistAction.java | smart-alluxio-support/smart-alluxio/src/main/java/org/smartdata/alluxio/action/PersistAction.java | /**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.smartdata.alluxio.action;
import java.util.ArrayList;
import java.util.List;
import java.util.Map;
import org.smartdata.action.annotation.ActionSignature;
import alluxio.AlluxioURI;
import alluxio.client.file.FileSystemUtils;
import alluxio.client.file.URIStatus;
@ActionSignature(
actionId = "persist",
displayName = "persist",
usage = AlluxioAction.FILE_PATH + " $file "
)
public class PersistAction extends AlluxioAction {
private List<String> exceptionMessages;
@Override
public void init(Map<String, String> args) {
super.init(args);
this.actionType = AlluxioActionType.PERSIST;
this.exceptionMessages = new ArrayList<>();
}
@Override
protected void execute() throws Exception {
LOG.info("Executing Alluxio action: PersistAction, path:" + uri.toString());
persistInternal(uri);
if (!exceptionMessages.isEmpty()) {
for (String message : exceptionMessages) {
LOG.warn(message);
}
} else {
LOG.info("Path " + uri + " was successfully persisted.");
}
}
// persist file to underfilesystem recursively
private void persistInternal(AlluxioURI path) throws Exception {
URIStatus status = alluxioFs.getStatus(path);
if (status.isFolder()) {
List<URIStatus> statuses = alluxioFs.listStatus(path);
for (URIStatus uriStatus : statuses) {
AlluxioURI newPath = new AlluxioURI(uriStatus.getPath());
persistInternal(newPath);
}
} else if (status.isPersisted()) {
LOG.info(path + " is already persisted.");
} else {
try {
FileSystemUtils.persistFile(alluxioFs, path);
LOG.info("Persisted file " + path + " with size " + status.getLength());
} catch (Exception e) {
exceptionMessages.add(e.getMessage());
}
}
}
} | java | Apache-2.0 | e0c90f054687a18c4e095547ac5e31b8b313b3ef | 2026-01-05T02:41:11.405497Z | false |
Intel-bigdata/SSM | https://github.com/Intel-bigdata/SSM/blob/e0c90f054687a18c4e095547ac5e31b8b313b3ef/smart-alluxio-support/smart-alluxio/src/main/java/org/smartdata/alluxio/action/PinAction.java | smart-alluxio-support/smart-alluxio/src/main/java/org/smartdata/alluxio/action/PinAction.java | /**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.smartdata.alluxio.action;
import java.util.Map;
import org.smartdata.action.annotation.ActionSignature;
import alluxio.client.file.options.SetAttributeOptions;
@ActionSignature(
actionId = "pin",
displayName = "pin",
usage = AlluxioAction.FILE_PATH + " $file "
)
public class PinAction extends AlluxioAction {
@Override
public void init(Map<String, String> args) {
super.init(args);
this.actionType = AlluxioActionType.PIN;
}
@Override
protected void execute() throws Exception {
LOG.info("Executing Alluxio action: PinAction, path:" + uri.toString());
SetAttributeOptions options = SetAttributeOptions.defaults().setPinned(true);
alluxioFs.setAttribute(uri, options);
LOG.info("File " + uri + " was successfully pinned.");
}
}
| java | Apache-2.0 | e0c90f054687a18c4e095547ac5e31b8b313b3ef | 2026-01-05T02:41:11.405497Z | false |
Intel-bigdata/SSM | https://github.com/Intel-bigdata/SSM/blob/e0c90f054687a18c4e095547ac5e31b8b313b3ef/smart-alluxio-support/smart-alluxio/src/main/java/org/smartdata/alluxio/action/FreeAction.java | smart-alluxio-support/smart-alluxio/src/main/java/org/smartdata/alluxio/action/FreeAction.java | /**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.smartdata.alluxio.action;
import java.util.Map;
import org.smartdata.action.annotation.ActionSignature;
@ActionSignature(
actionId = "free",
displayName = "free",
usage = AlluxioAction.FILE_PATH + " $file "
)
public class FreeAction extends AlluxioAction{
@Override
public void init(Map<String, String> args) {
super.init(args);
this.actionType = AlluxioActionType.FREE;
}
@Override
protected void execute() throws Exception {
LOG.info("Executing Alluxio action: FreeAction, file:" + uri.toString());
alluxioFs.free(uri);
LOG.info("File " + uri + " was successfully freed.");
}
}
| java | Apache-2.0 | e0c90f054687a18c4e095547ac5e31b8b313b3ef | 2026-01-05T02:41:11.405497Z | false |
Intel-bigdata/SSM | https://github.com/Intel-bigdata/SSM/blob/e0c90f054687a18c4e095547ac5e31b8b313b3ef/smart-alluxio-support/smart-alluxio/src/main/java/org/smartdata/alluxio/metric/fetcher/AlluxioEntryFetchAndApplyTask.java | smart-alluxio-support/smart-alluxio/src/main/java/org/smartdata/alluxio/metric/fetcher/AlluxioEntryFetchAndApplyTask.java | /**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.smartdata.alluxio.metric.fetcher;
import alluxio.master.journal.JournalReader;
import alluxio.master.journal.ufs.AlluxioJournalUtil;
import alluxio.proto.journal.Journal.JournalEntry;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.smartdata.SmartConstants;
import org.smartdata.conf.SmartConf;
import org.smartdata.metastore.MetaStore;
import org.smartdata.model.SystemInfo;
import java.util.Date;
import java.util.concurrent.atomic.AtomicLong;
public class AlluxioEntryFetchAndApplyTask implements Runnable {
public static final Logger LOG = LoggerFactory.getLogger(AlluxioEntryFetchAndApplyTask.class);
private final AtomicLong lastSn;
private final MetaStore metaStore;
private final AlluxioEntryApplier entryApplier;
private JournalReader journalReader;
public AlluxioEntryFetchAndApplyTask(SmartConf conf, MetaStore metaStore, AlluxioEntryApplier entryApplier, long startSn) {
this.metaStore = metaStore;
this.entryApplier = entryApplier;
this.lastSn = new AtomicLong(startSn);
this.journalReader = AlluxioJournalUtil.getJournalReaderFromSn(conf, startSn + 1);
}
@Override
public void run() {
LOG.trace("AlluxioEntryFetchAndApplyTask run at " + new Date());
try {
JournalEntry journalEntry = journalReader.read();
while (journalEntry != null) {
entryApplier.apply(journalEntry);
lastSn.getAndSet(journalEntry.getSequenceNumber());
metaStore.updateAndInsertIfNotExist(
new SystemInfo(
SmartConstants.SMART_ALLUXIO_LAST_ENTRY_SN, String.valueOf(lastSn.get())));
journalEntry = journalReader.read();
}
} catch (Throwable t) {
LOG.error("Alluxio Entry Apply Events error", t);
}
}
public long getLastSn() {
return this.lastSn.get();
}
}
| java | Apache-2.0 | e0c90f054687a18c4e095547ac5e31b8b313b3ef | 2026-01-05T02:41:11.405497Z | false |
Intel-bigdata/SSM | https://github.com/Intel-bigdata/SSM/blob/e0c90f054687a18c4e095547ac5e31b8b313b3ef/smart-alluxio-support/smart-alluxio/src/main/java/org/smartdata/alluxio/metric/fetcher/AlluxioNamespaceFetcher.java | smart-alluxio-support/smart-alluxio/src/main/java/org/smartdata/alluxio/metric/fetcher/AlluxioNamespaceFetcher.java | /**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.smartdata.alluxio.metric.fetcher;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.smartdata.metastore.ingestion.IngestionTask;
import org.smartdata.model.FileInfo;
import org.smartdata.metastore.MetaStore;
import org.smartdata.model.FileInfoBatch;
import org.smartdata.metastore.ingestion.FileStatusIngester;
import alluxio.AlluxioURI;
import alluxio.client.file.FileSystem;
import alluxio.client.file.URIStatus;
import alluxio.exception.AlluxioException;
import java.io.IOException;
import java.util.List;
import java.util.concurrent.ScheduledExecutorService;
import java.util.concurrent.ScheduledFuture;
import java.util.concurrent.TimeUnit;
public class AlluxioNamespaceFetcher {
public static final Long DEFAULT_INTERVAL = 1000L;
private final ScheduledExecutorService scheduledExecutorService;
private final long fetchInterval;
private ScheduledFuture fetchTaskFuture;
private ScheduledFuture consumerFuture;
private FileStatusIngester consumer;
private AlluxioFetchTask fetchTask;
public static final Logger LOG =
LoggerFactory.getLogger(AlluxioNamespaceFetcher.class);
public AlluxioNamespaceFetcher(FileSystem fs, MetaStore metaStore, ScheduledExecutorService service) {
this(fs, metaStore, DEFAULT_INTERVAL, service);
}
public AlluxioNamespaceFetcher(FileSystem fs, MetaStore metaStore, long fetchInterval,
ScheduledExecutorService service) {
this.fetchTask = new AlluxioFetchTask(fs);
this.consumer = new FileStatusIngester(metaStore);
this.fetchInterval = fetchInterval;
this.scheduledExecutorService = service;
}
public void startFetch() throws IOException {
this.fetchTaskFuture = this.scheduledExecutorService.scheduleAtFixedRate(
fetchTask, 0, fetchInterval, TimeUnit.MILLISECONDS);
this.consumerFuture = this.scheduledExecutorService.scheduleAtFixedRate(
consumer, 0, 100, TimeUnit.MILLISECONDS);
LOG.info("Started.");
}
public boolean fetchFinished() {
return this.fetchTask.finished();
}
public void stop() {
if (fetchTaskFuture != null) {
this.fetchTaskFuture.cancel(false);
}
if (consumerFuture != null) {
this.consumerFuture.cancel(false);
}
}
private static class AlluxioFetchTask extends IngestionTask {
private final FileSystem fs;
private long lastUpdateTime = System.currentTimeMillis();
private long startTime = lastUpdateTime;
public AlluxioFetchTask(FileSystem fs) {
super();
this.fs = fs;
}
@Override
public void run() {
if (LOG.isDebugEnabled()) {
long curr = System.currentTimeMillis();
if (curr - lastUpdateTime >= 2000) {
LOG.debug(String.format(
"%d sec, numDirectories = %d, numFiles = %d, batchsInqueue = %d",
(curr - startTime) / 1000,
numDirectoriesFetched.get(), numFilesFetched.get(), batches.size()));
lastUpdateTime = curr;
}
}
String parent = deque.pollFirst();
if (parent == null) { // BFS finished
if (currentBatch.actualSize() > 0) {
try {
this.batches.put(currentBatch);
} catch (InterruptedException e) {
LOG.error("Current batch actual size = "
+ currentBatch.actualSize(), e);
}
this.currentBatch = new FileInfoBatch(defaultBatchSize);
}
if (this.batches.isEmpty()) {
if (!this.isFinished) {
this.isFinished = true;
long curr = System.currentTimeMillis();
LOG.info(String.format(
"Finished fetch Namespace! %d secs used, numDirs = %d, numFiles = %d",
(curr - startTime) / 1000,
numDirectoriesFetched.get(), numFilesFetched.get()));
}
}
return;
}
try {
URIStatus status = fs.getStatus(new AlluxioURI(parent));
if (status != null && status.isFolder()) {
List<URIStatus> children = fs.listStatus(new AlluxioURI(parent));
FileInfo fileInfo = convertToFileInfo(status);
this.addFileStatus(fileInfo);
numDirectoriesFetched.incrementAndGet();
for (URIStatus child : children) {
if (child.isFolder()) {
this.deque.add(child.getPath());
} else {
this.addFileStatus(convertToFileInfo(child));
numFilesFetched.incrementAndGet();
}
}
}
} catch (IOException | InterruptedException | AlluxioException e) {
LOG.error("Totally, numDirectoriesFetched = " + numDirectoriesFetched
+ ", numFilesFetched = " + numFilesFetched
+ ". Parent = " + parent, e);
}
}
private FileInfo convertToFileInfo(URIStatus status) {
FileInfo fileInfo = new FileInfo(
status.getPath(),
status.getFileId(),
status.getLength(),
status.isFolder(),
(short)1,
status.getBlockSizeBytes(),
status.getLastModificationTimeMs(),
status.getCreationTimeMs(),
(short) status.getMode(),
status.getOwner(),
status.getGroup(),
(byte) 0,
(byte) 0);
return fileInfo;
}
}
}
| java | Apache-2.0 | e0c90f054687a18c4e095547ac5e31b8b313b3ef | 2026-01-05T02:41:11.405497Z | false |
Intel-bigdata/SSM | https://github.com/Intel-bigdata/SSM/blob/e0c90f054687a18c4e095547ac5e31b8b313b3ef/smart-alluxio-support/smart-alluxio/src/main/java/org/smartdata/alluxio/metric/fetcher/AlluxioEntryFetcher.java | smart-alluxio-support/smart-alluxio/src/main/java/org/smartdata/alluxio/metric/fetcher/AlluxioEntryFetcher.java | /**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.smartdata.alluxio.metric.fetcher;
import alluxio.client.file.FileSystem;
import alluxio.exception.InvalidJournalEntryException;
import alluxio.master.journal.JournalReader;
import alluxio.master.journal.ufs.AlluxioJournalUtil;
import alluxio.proto.journal.Journal.JournalEntry;
import com.google.common.util.concurrent.*;
import com.squareup.tape.QueueFile;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.smartdata.SmartConstants;
import org.smartdata.conf.SmartConf;
import org.smartdata.conf.SmartConfKeys;
import org.smartdata.metastore.MetaStore;
import org.smartdata.metastore.MetaStoreException;
import org.smartdata.model.SystemInfo;
import javax.annotation.Nullable;
import java.io.File;
import java.io.IOException;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.List;
import java.util.Random;
import java.util.concurrent.Callable;
import java.util.concurrent.ScheduledExecutorService;
import java.util.concurrent.ScheduledFuture;
import java.util.concurrent.TimeUnit;
public class AlluxioEntryFetcher {
public static final Logger LOG = LoggerFactory.getLogger(AlluxioEntryFetcher.class);
private final FileSystem fileSystem;
private final AlluxioNamespaceFetcher alluxioNamespaceFetcher;
private final ScheduledExecutorService scheduledExecutorService;
private final AlluxioEntryApplier alluxioEntryApplier;
private final MetaStore metaStore;
private Callable finishedCallback;
private ScheduledFuture entryFetchFuture;
private ScheduledFuture entryFetchAndApplyFuture;
private AlluxioEntryApplyTask entryApplyTask;
private File entryInotifyFile;
private QueueFile entryQueueFile;
private static SmartConf conf;
public AlluxioEntryFetcher(FileSystem fileSystem, MetaStore metaStore, ScheduledExecutorService scheduledExecutorService,
Callable finishedCallback) {
this(fileSystem, metaStore, scheduledExecutorService,
new AlluxioEntryApplier(metaStore, fileSystem), finishedCallback, new SmartConf());
}
public AlluxioEntryFetcher(FileSystem fileSystem, MetaStore metaStore, ScheduledExecutorService scheduledExecutorService,
Callable finishedCallback, SmartConf conf) {
this(fileSystem, metaStore, scheduledExecutorService,
new AlluxioEntryApplier(metaStore, fileSystem), finishedCallback, conf);
}
public AlluxioEntryFetcher(FileSystem fileSystem, MetaStore metaStore, ScheduledExecutorService scheduledExecutorService,
AlluxioEntryApplier alluxioEntryApplier, Callable finishedCallback) {
this.fileSystem = fileSystem;
this.scheduledExecutorService = scheduledExecutorService;
this.alluxioEntryApplier = alluxioEntryApplier;
this.metaStore = metaStore;
this.finishedCallback = finishedCallback;
this.conf = new SmartConf();
this.alluxioNamespaceFetcher = new AlluxioNamespaceFetcher(fileSystem, metaStore, scheduledExecutorService);
}
public AlluxioEntryFetcher(FileSystem fileSystem, MetaStore metaStore, ScheduledExecutorService scheduledExecutorService,
AlluxioEntryApplier alluxioEntryApplier, Callable finishedCallback, SmartConf conf) {
this.fileSystem = fileSystem;
this.scheduledExecutorService = scheduledExecutorService;
this.alluxioEntryApplier = alluxioEntryApplier;
this.metaStore = metaStore;
this.finishedCallback = finishedCallback;
this.conf = conf;
this.alluxioNamespaceFetcher = new AlluxioNamespaceFetcher(fileSystem, metaStore, scheduledExecutorService);
}
public void start() throws IOException {
Long lastSn = getLastSeqNum();
if (lastSn != null && lastSn != -1 && canReadFromLastSeqNum(lastSn)) {
startFromLastSeqNum(lastSn);
} else {
startWithFetchingAlluxioNameSpace();
}
}
public static boolean canReadFromLastSeqNum(Long lastSn) {
try {
if (AlluxioJournalUtil.getCurrentSeqNum(conf) == lastSn) {
return true;
}
JournalReader reader = AlluxioJournalUtil.getJournalReaderFromSn(conf, lastSn);
JournalEntry entry = reader.read();
return entry != null;
} catch (Exception e) {
return false;
}
}
private Long getLastSeqNum() {
try {
SystemInfo info =
metaStore.getSystemInfoByProperty(SmartConstants.SMART_ALLUXIO_LAST_ENTRY_SN);
return info != null ? Long.parseLong(info.getValue()) : -1L;
} catch (MetaStoreException e) {
return -1L;
}
}
private void startFromLastSeqNum(long lastSn) throws IOException {
LOG.info("Skipped fetching Alluxio Name Space, start applying alluxio journal entry from " + lastSn);
submitEntryFetchAndApplyTask(lastSn);
try {
finishedCallback.call();
} catch (Exception e) {
LOG.error("Call back failed", e);
}
}
private void submitEntryFetchAndApplyTask(long lastSn) throws IOException {
entryFetchAndApplyFuture =
scheduledExecutorService.scheduleAtFixedRate(
new AlluxioEntryFetchAndApplyTask(conf, metaStore, alluxioEntryApplier, lastSn),
0,
100,
TimeUnit.MILLISECONDS);
}
private void startWithFetchingAlluxioNameSpace() throws IOException {
ListeningExecutorService listeningExecutorService = MoreExecutors.listeningDecorator(scheduledExecutorService);
entryInotifyFile = new File("/tmp/entry-inotify-" + new Random().nextLong());
entryQueueFile = new QueueFile(entryInotifyFile);
long startSn = AlluxioJournalUtil.getCurrentSeqNum(conf);
LOG.info("Start fetching alluxio namespace with current journal entry sequence number = " + startSn);
alluxioNamespaceFetcher.startFetch();
entryFetchFuture = scheduledExecutorService.scheduleAtFixedRate(
new AlluxioEntryFetchTask(entryQueueFile, conf, startSn), 0, 100, TimeUnit.MILLISECONDS);
entryApplyTask = new AlluxioEntryApplyTask(alluxioNamespaceFetcher, alluxioEntryApplier, entryQueueFile, conf, startSn);
ListenableFuture<?> future = listeningExecutorService.submit(entryApplyTask);
Futures.addCallback(future, new AlluxioNameSpaceFetcherCallBack(), scheduledExecutorService);
LOG.info("Start apply alluxio entry.");
}
private class AlluxioNameSpaceFetcherCallBack implements FutureCallback<Object> {
@Override
public void onSuccess(@Nullable Object o) {
entryFetchFuture.cancel(false);
alluxioNamespaceFetcher.stop();
try {
entryQueueFile.close();
submitEntryFetchAndApplyTask(entryApplyTask.getLastSn());
LOG.info("Alluxio Namespace fetch finished.");
finishedCallback.call();
} catch (Exception e) {
LOG.error("Call back failed", e);
}
}
@Override
public void onFailure(Throwable throwable) {
LOG.error("Alluxio NameSpace fetch failed", throwable);
}
}
public void stop() {
if (entryInotifyFile != null) {
entryInotifyFile.delete();
}
if (entryFetchFuture != null) {
entryFetchFuture.cancel(false);
}
if (entryFetchAndApplyFuture != null) {
entryFetchAndApplyFuture.cancel(false);
}
}
private static class AlluxioEntryFetchTask implements Runnable {
private final QueueFile queueFile;
private JournalReader journalReader;
public AlluxioEntryFetchTask(QueueFile queueFile, SmartConf conf, long startSn) {
this.queueFile = queueFile;
this.journalReader = AlluxioJournalUtil.getJournalReaderFromSn(conf, startSn + 1);
}
@Override
public void run() {
try {
JournalEntry journalEntry = journalReader.read();
while (journalEntry != null) {
byte[] seqEntry = journalEntry.toByteArray();
this.queueFile.add(seqEntry);
journalEntry = journalReader.read();
}
} catch (IOException | InvalidJournalEntryException e) {
LOG.error("Alluxio entry enqueue error", e);
}
}
}
private static class AlluxioEntryApplyTask implements Runnable {
private final AlluxioNamespaceFetcher namespaceFetcher;
private final AlluxioEntryApplier entryApplier;
private final QueueFile queueFile;
private long lastSn;
private SmartConf conf;
private List<String> ignoreList;
public AlluxioEntryApplyTask(AlluxioNamespaceFetcher namespaceFetcher, AlluxioEntryApplier entryApplier,
QueueFile queueFile, SmartConf conf, long lastSn) {
this.namespaceFetcher = namespaceFetcher;
this.entryApplier = entryApplier;
this.queueFile = queueFile;
this.conf = conf;
this.lastSn = lastSn;
this.ignoreList = getIgnoreDirFromConfig();
}
public List<String> getIgnoreDirFromConfig() {
String ignoreDirs = this.conf.get(SmartConfKeys.SMART_IGNORE_DIRS_KEY);
List<String> ignoreList;
if (ignoreDirs == null || ignoreDirs.equals("")) {
ignoreList = new ArrayList<>();
} else {
ignoreList = Arrays.asList(ignoreDirs.split(","));
}
for (int i = 0; i < ignoreList.size(); i++) {
if (!ignoreList.get(i).endsWith("/")) {
ignoreList.set(i, ignoreList.get(i).concat("/"));
}
}
return ignoreList;
}
public boolean fetchPathInIgnoreList(String path) {
if (!path.endsWith("/")) {
path = path.concat("/");
}
for (int i = 0; i < ignoreList.size(); i++) {
if (path.equals(ignoreList.get(i))) {
return true;
}
}
return false;
}
public boolean ignoreEntry(JournalEntry entry) throws MetaStoreException {
String inodePath;
if (entry.hasInodeDirectory()) {
inodePath = entryApplier.getPathFromInodeDir(entry.getInodeDirectory());
return fetchPathInIgnoreList(inodePath);
} else if (entry.hasInodeFile()) {
inodePath = entryApplier.getPathFromInodeFile(entry.getInodeFile());
return fetchPathInIgnoreList(inodePath);
} else if (entry.hasInodeLastModificationTime()) {
inodePath = entryApplier.getPathByFileId(entry.getInodeLastModificationTime().getId());
return fetchPathInIgnoreList(inodePath);
} else if (entry.hasPersistDirectory()) {
inodePath = entryApplier.getPathByFileId(entry.getPersistDirectory().getId());
return fetchPathInIgnoreList(inodePath);
} else if (entry.hasSetAttribute()) {
inodePath = entryApplier.getPathByFileId(entry.getSetAttribute().getId());
return fetchPathInIgnoreList(inodePath);
} else if (entry.hasRename()) {
inodePath = entryApplier.getPathByFileId(entry.getRename().getId());
return fetchPathInIgnoreList(inodePath);
} else if (entry.hasDeleteFile()) {
inodePath = entryApplier.getPathByFileId(entry.getDeleteFile().getId());
return fetchPathInIgnoreList(inodePath);
} else if (entry.hasAddMountPoint()) {
inodePath = entry.getAddMountPoint().getAlluxioPath();
return fetchPathInIgnoreList(inodePath);
} else if (entry.hasDeleteMountPoint()) {
inodePath = entry.getDeleteMountPoint().getAlluxioPath();
return fetchPathInIgnoreList(inodePath);
} else if (entry.hasAsyncPersistRequest()
|| entry.hasCompleteFile()
|| entry.hasInodeDirectoryIdGenerator()
|| entry.hasReinitializeFile()) {
return false;
}
return true;
}
@Override
public void run() {
try {
while (!Thread.currentThread().isInterrupted()) {
if (!namespaceFetcher.fetchFinished()) {
Thread.sleep(100);
} else {
while (!queueFile.isEmpty()) {
JournalEntry entry = JournalEntry.parseFrom(queueFile.peek());
queueFile.remove();
if (!ignoreEntry(entry)) {
this.entryApplier.apply(entry);
this.lastSn = entry.getSequenceNumber();
}
}
break;
}
}
} catch (InterruptedException | IOException | MetaStoreException e) {
LOG.error("Alluxio entry dequeue error", e);
}
}
public long getLastSn() {
return this.lastSn;
}
}
}
| java | Apache-2.0 | e0c90f054687a18c4e095547ac5e31b8b313b3ef | 2026-01-05T02:41:11.405497Z | false |
Intel-bigdata/SSM | https://github.com/Intel-bigdata/SSM/blob/e0c90f054687a18c4e095547ac5e31b8b313b3ef/smart-alluxio-support/smart-alluxio/src/main/java/org/smartdata/alluxio/metric/fetcher/AlluxioEntryApplier.java | smart-alluxio-support/smart-alluxio/src/main/java/org/smartdata/alluxio/metric/fetcher/AlluxioEntryApplier.java | /**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.smartdata.alluxio.metric.fetcher;
import alluxio.AlluxioURI;
import alluxio.client.file.FileSystem;
import alluxio.client.file.URIStatus;
import alluxio.exception.AlluxioException;
import alluxio.exception.ExceptionMessage;
import alluxio.proto.journal.File.*;
import alluxio.proto.journal.Journal.JournalEntry;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.smartdata.alluxio.AlluxioUtil;
import org.smartdata.metastore.MetaStore;
import org.smartdata.metastore.MetaStoreException;
import org.smartdata.model.FileDiff;
import org.smartdata.model.FileDiffType;
import org.smartdata.model.FileInfo;
import java.io.IOException;
import java.util.ArrayList;
import java.util.Collections;
import java.util.List;
public class AlluxioEntryApplier {
private static final Logger LOG = LoggerFactory.getLogger(AlluxioEntryApplier.class);
private final MetaStore metaStore;
private FileSystem fs;
public AlluxioEntryApplier(MetaStore metaStore, FileSystem fs) {
this.metaStore = metaStore;
this.fs = fs;
}
public void apply(JournalEntry entry) throws IOException, MetaStoreException {
List<String> statements = new ArrayList<>();
List<String> sqlist = processEntryToSql(entry);
if (sqlist != null && !sqlist.isEmpty()){
for (String sql : sqlist) {
if (sql != null && sql.length() > 0) {
statements.add(sql);
}
}
}
this.metaStore.execute(statements);
}
private List<String> processEntryToSql(JournalEntry entry) throws IOException, MetaStoreException {
if (entry.hasInodeDirectory()) {
LOG.trace("entry type:" + entry.getInodeDirectory().getClass() +
", id:" + entry.getInodeDirectory().getId());
InodeDirectoryEntry inodeDirectoryEntry = entry.getInodeDirectory();
String inodeDir = getPathFromInodeDir(inodeDirectoryEntry);
URIStatus dStatus = null;
try {
dStatus = fs.getStatus(new AlluxioURI(inodeDir));
} catch (AlluxioException e) {
e.printStackTrace();
}
FileInfo fileInfo = AlluxioUtil.convertFileStatus(dStatus);
metaStore.insertFile(fileInfo);
return Collections.singletonList("");
} else if (entry.hasInodeFile()) {
LOG.trace("entry type:" + entry.getInodeFile().getClass() +
", id:" + entry.getInodeFile().getId());
String addSql = addInodeFileFromEntry(entry.getInodeFile());
return Collections.singletonList(addSql);
} else if (entry.hasInodeLastModificationTime()) {
LOG.trace("entry type:" + entry.getInodeLastModificationTime().getClass() +
", id:" + entry.getInodeLastModificationTime().getId());
InodeLastModificationTimeEntry modTimeEntry = entry.getInodeLastModificationTime();
String path = getPathByFileId(modTimeEntry.getId());
FileDiff fileDiff = null;
if (inBackup(path)) {
fileDiff = new FileDiff(FileDiffType.METADATA);
fileDiff.setSrc(path);
}
if (fileDiff != null) {
fileDiff.getParameters().put("-mtime", "" + modTimeEntry.getLastModificationTimeMs());
metaStore.insertFileDiff(fileDiff);
}
String modifySql = String.format(
"UPDATE file SET modification_time = %s WHERE fid = '%s';",
modTimeEntry.getLastModificationTimeMs(),
modTimeEntry.getId());
return Collections.singletonList(modifySql);
} else if (entry.hasPersistDirectory()) {
LOG.trace("entry type:" + entry.getPersistDirectory().getClass() +
", id:" + entry.getPersistDirectory().getId());
PersistDirectoryEntry typedEntry = entry.getPersistDirectory();
LOG.debug("Persist directory id " + typedEntry.getId());
return Collections.singletonList("");
} else if (entry.hasSetAttribute()) {
LOG.trace("entry type:" + entry.getSetAttribute().getClass() +
", id:" + entry.getSetAttribute().getId());
String setAttrSql = setAttributeFromEntry(entry.getSetAttribute());
return Collections.singletonList(setAttrSql);
} else if (entry.hasRename()) {
LOG.trace("entry type:" + entry.getRename().getClass() +
", id:" + entry.getRename().getId());
return renameFromEntry(entry.getRename());
} else if (entry.hasDeleteFile()) {
LOG.trace("entry type:" + entry.getDeleteFile().getClass() +
", id:" + entry.getDeleteFile().getId());
String delSql = deleteFromEntry(entry.getDeleteFile());
return Collections.singletonList(delSql);
} else if (entry.hasAddMountPoint()) {
LOG.trace("entry type:" + entry.getAddMountPoint().getClass() +
", alluxio path:" + entry.getAddMountPoint().getAlluxioPath() +
", ufs path:" + entry.getAddMountPoint().getUfsPath());
return Collections.singletonList(mountFromEntry(entry.getAddMountPoint()));
} else if (entry.hasDeleteMountPoint()) {
LOG.trace("entry type:" + entry.getDeleteMountPoint().getClass() +
", alluxio path:" + entry.getDeleteMountPoint().getAlluxioPath());
return Collections.singletonList(unmountFromEntry(entry.getDeleteMountPoint()));
} else if (entry.hasAsyncPersistRequest()
|| entry.hasCompleteFile()
|| entry.hasInodeDirectoryIdGenerator()
|| entry.hasReinitializeFile()) {
//Do nothing
} else {
throw new IOException(ExceptionMessage.UNEXPECTED_JOURNAL_ENTRY.getMessage(entry));
}
return Collections.emptyList();
}
private String addInodeFileFromEntry(InodeFileEntry inodeFileEntry) throws MetaStoreException {
String inodePath = getPathFromInodeFile(inodeFileEntry);
URIStatus status = null;
try {
status = fs.getStatus(new AlluxioURI(inodePath));
} catch (IOException | AlluxioException e) {
e.printStackTrace();
}
FileInfo fileInfo = AlluxioUtil.convertFileStatus(status);
if (inBackup(fileInfo.getPath())) {
FileDiff fileDiff = new FileDiff(FileDiffType.APPEND);
fileDiff.setSrc(fileInfo.getPath());
fileDiff.getParameters().put("-offset", String.valueOf(0));
// Note that "-length 0" means create an empty file
fileDiff.getParameters()
.put("-length", String.valueOf(fileInfo.getLength()));
//add modification_time and access_time to filediff
fileDiff.getParameters().put("-mtime", "" + fileInfo.getModificationTime());
//add owner to filediff
fileDiff.getParameters().put("-owner", "" + fileInfo.getOwner());
fileDiff.getParameters().put("-group", "" + fileInfo.getGroup());
//add Permission to filediff
fileDiff.getParameters().put("-permission", "" + fileInfo.getPermission());
metaStore.insertFileDiff(fileDiff);
}
metaStore.insertFile(fileInfo);
return "";
}
private String setAttributeFromEntry(SetAttributeEntry setAttrEntry) throws MetaStoreException {
String path = getPathByFileId(setAttrEntry.getId());
FileDiff fileDiff = null;
if (inBackup(path)) {
fileDiff = new FileDiff(FileDiffType.METADATA);
fileDiff.setSrc(path);
}
if (setAttrEntry.hasPinned()) {
LOG.debug(String.format("File %s is pinned %s", setAttrEntry.getId(), setAttrEntry.getPinned()));
//Todo
} else if (setAttrEntry.hasTtl()) {
LOG.debug(String.format("File %s has ttl %s with ttlAction %s", setAttrEntry.getId(), setAttrEntry.getTtl(), setAttrEntry.getTtlAction()));
//Todo
} else if (setAttrEntry.hasPersisted()) {
LOG.debug(String.format("File %s is persisted %s", setAttrEntry.getId(), setAttrEntry.getPersisted()));
//Todo
} else if (setAttrEntry.hasOwner()) {
if (fileDiff != null) {
fileDiff.getParameters().put("-owner", "" + setAttrEntry.getOwner());
metaStore.insertFileDiff(fileDiff);
}
//Todo
} else if (setAttrEntry.hasGroup()) {
if (fileDiff != null) {
fileDiff.getParameters().put("-group", "" + setAttrEntry.getGroup());
metaStore.insertFileDiff(fileDiff);
}
//Todo
} else if (setAttrEntry.hasPermission()) {
if (fileDiff != null) {
fileDiff.getParameters().put("-permission", "" + (short)setAttrEntry.getPermission());
metaStore.insertFileDiff(fileDiff);
}
return String.format(
"UPDATE file SET permission = %s WHERE path = '%s';",
(short)setAttrEntry.getPermission(), path);
}
return "";
}
private List<String> renameFromEntry(RenameEntry renameEntry) throws MetaStoreException {
List<String> sqlist = new ArrayList<>();
URIStatus dStatus = null;
try {
dStatus = fs.getStatus(new AlluxioURI(renameEntry.getDstPath()));
} catch (IOException | AlluxioException e) {
e.printStackTrace();
}
if (dStatus == null) {
LOG.debug("Get rename dest status failed, {}", renameEntry.getDstPath());
}
FileInfo fileInfo = metaStore.getFile(renameEntry.getId());
if (fileInfo == null) {
if (dStatus != null) {
fileInfo = AlluxioUtil.convertFileStatus(dStatus);
metaStore.insertFile(fileInfo);
}
} else {
FileDiff fileDiff = new FileDiff(FileDiffType.RENAME);
String srcPath = fileInfo.getPath();
if (inBackup(srcPath)) {
fileDiff.setSrc(srcPath);
fileDiff.getParameters().put("-dest", renameEntry.getDstPath());
metaStore.insertFileDiff(fileDiff);
}
sqlist.add(String.format("UPDATE file SET path = replace(path, '%s', '%s') WHERE path = '%s';",
srcPath, renameEntry.getDstPath(), srcPath));
if (fileInfo.isdir()) {
sqlist.add(String.format("UPDATE file SET path = replace(path, '%s', '%s') WHERE path LIKE '%s/%%';",
srcPath, renameEntry.getDstPath(), srcPath));
}
}
return sqlist;
}
private String deleteFromEntry(DeleteFileEntry deleteFileEntry) throws MetaStoreException {
String path = getPathByFileId(deleteFileEntry.getId());
if (inBackup(path)) {
FileDiff fileDiff = new FileDiff(FileDiffType.DELETE);
fileDiff.setSrc(path);
metaStore.insertFileDiff(fileDiff);
}
return String.format("DELETE FROM file WHERE fid =%s;", deleteFileEntry.getId());
}
private String mountFromEntry(AddMountPointEntry mountPointEntry) {
LOG.debug("Add mount alluxio path %s to ufs path %s",
mountPointEntry.getAlluxioPath(), mountPointEntry.getUfsPath());
return "";
}
private String unmountFromEntry(DeleteMountPointEntry unmountEntry) {
LOG.debug("Delete mount alluxio path %s", unmountEntry.getAlluxioPath());
return "";
}
private boolean inBackup(String src) throws MetaStoreException {
return metaStore.srcInbackup(src);
}
public String getPathFromInodeFile(InodeFileEntry fileEntry) throws MetaStoreException {
long pid = fileEntry.getParentId();
String fName = fileEntry.getName();
FileInfo fileInfo = metaStore.getFile(pid);
String pPath = "";
if (fileInfo != null) {
pPath = formatPath(fileInfo.getPath());
}
return pPath.concat(fName);
}
public String getPathFromInodeDir(InodeDirectoryEntry dirEntry) throws MetaStoreException {
long pid = dirEntry.getParentId();
String dName = dirEntry.getName();
FileInfo fileInfo = metaStore.getFile(pid);
String pDir = "";
if (fileInfo != null) {
pDir = formatPath(fileInfo.getPath());
}
return pDir.concat(dName);
}
public String getPathByFileId(long fid) throws MetaStoreException {
FileInfo fileInfo = metaStore.getFile(fid);
String path = "";
if (fileInfo != null) {
path = fileInfo.getPath();
}
return path;
}
private String formatPath(String path) {
if (!path.endsWith(AlluxioURI.SEPARATOR)) {
path += AlluxioURI.SEPARATOR;
}
return path;
}
}
| java | Apache-2.0 | e0c90f054687a18c4e095547ac5e31b8b313b3ef | 2026-01-05T02:41:11.405497Z | false |
Intel-bigdata/SSM | https://github.com/Intel-bigdata/SSM/blob/e0c90f054687a18c4e095547ac5e31b8b313b3ef/smart-client/src/main/java/org/smartdata/client/SmartClient.java | smart-client/src/main/java/org/smartdata/client/SmartClient.java | /**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.smartdata.client;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.ipc.ProtobufRpcEngine;
import org.apache.hadoop.ipc.RPC;
import org.smartdata.SmartConstants;
import org.smartdata.conf.SmartConfKeys;
import org.smartdata.metrics.FileAccessEvent;
import org.smartdata.model.FileState;
import org.smartdata.model.NormalFileState;
import org.smartdata.protocol.SmartClientProtocol;
import org.smartdata.protocol.protobuffer.ClientProtocolClientSideTranslator;
import org.smartdata.protocol.protobuffer.ClientProtocolProtoBuffer;
import org.smartdata.utils.StringUtil;
import java.io.File;
import java.io.FileNotFoundException;
import java.io.FileWriter;
import java.io.IOException;
import java.net.ConnectException;
import java.net.InetSocketAddress;
import java.util.ArrayList;
import java.util.Collection;
import java.util.Deque;
import java.util.HashMap;
import java.util.LinkedList;
import java.util.List;
import java.util.Map;
import java.util.Scanner;
import java.util.concurrent.Callable;
import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.ExecutionException;
import java.util.concurrent.ExecutorService;
import java.util.concurrent.Executors;
import java.util.concurrent.Future;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.TimeoutException;
public class SmartClient implements java.io.Closeable, SmartClientProtocol {
private static final long VERSION = 1;
private Configuration conf;
/** The server queue keeps server's order according to active status. **/
private Deque<SmartClientProtocol> serverQue;
/** The map from server to its rpc address in "hostname:port" format. **/
private Map<SmartClientProtocol, String> serverToRpcAddr;
private volatile boolean running = true;
private List<String> ignoreAccessEventDirs;
private Map<String, Integer> singleIgnoreList;
private List<String> coverAccessEventDirs;
public static final String ACTIVE_SMART_SERVER_FILE_PATH = "/tmp/active_smart_server";
public SmartClient(Configuration conf) throws IOException {
this.conf = conf;
this.serverQue = new LinkedList<>();
this.serverToRpcAddr = new HashMap<>();
this.ignoreAccessEventDirs = new ArrayList<>();
this.coverAccessEventDirs = new ArrayList<>();
this.singleIgnoreList = new ConcurrentHashMap<>(200);
String[] rpcConfValue =
conf.getTrimmedStrings(SmartConfKeys.SMART_SERVER_RPC_ADDRESS_KEY);
if (rpcConfValue == null || rpcConfValue.length == 0) {
throw new IOException("SmartServer address not found. Please configure "
+ "it through " + SmartConfKeys.SMART_SERVER_RPC_ADDRESS_KEY);
}
List<InetSocketAddress> addrList = new LinkedList<>();
for (String rpcValue : rpcConfValue) {
String[] hostAndPort = rpcValue.split(":");
try {
InetSocketAddress smartServerAddress = new InetSocketAddress(
hostAndPort[hostAndPort.length - 2],
Integer.parseInt(hostAndPort[hostAndPort.length - 1]));
addrList.add(smartServerAddress);
} catch (Exception e) {
throw new IOException("Incorrect SmartServer address. Please follow "
+ "IP/Hostname:Port format");
}
}
initialize(addrList.toArray(new InetSocketAddress[addrList.size()]));
}
public SmartClient(Configuration conf, InetSocketAddress address)
throws IOException {
this.conf = conf;
this.serverQue = new LinkedList<>();
this.serverToRpcAddr = new HashMap<>();
this.ignoreAccessEventDirs = new ArrayList<>();
this.coverAccessEventDirs = new ArrayList<>();
this.singleIgnoreList = new ConcurrentHashMap<>(200);
initialize(new InetSocketAddress[]{address});
}
public SmartClient(Configuration conf, InetSocketAddress[] addrs)
throws IOException {
this.conf = conf;
this.serverQue = new LinkedList<>();
this.serverToRpcAddr = new HashMap<>();
this.ignoreAccessEventDirs = new ArrayList<>();
this.coverAccessEventDirs = new ArrayList<>();
this.singleIgnoreList = new ConcurrentHashMap<>(200);
initialize(addrs);
}
private void initialize(InetSocketAddress[] addrs) throws IOException {
RPC.setProtocolEngine(conf, ClientProtocolProtoBuffer.class,
ProtobufRpcEngine.class);
List<InetSocketAddress> orderedAddrs = new ArrayList<>();
InetSocketAddress recordedActiveAddr = getActiveServerAddress();
if (recordedActiveAddr != null) {
orderedAddrs.add(recordedActiveAddr);
}
for (InetSocketAddress addr : addrs) {
if (!addr.equals(recordedActiveAddr)) {
orderedAddrs.add(addr);
}
}
for (InetSocketAddress addr : orderedAddrs) {
ClientProtocolProtoBuffer proxy = RPC.getProxy(
ClientProtocolProtoBuffer.class, VERSION, addr, conf);
SmartClientProtocol server = new ClientProtocolClientSideTranslator(proxy);
serverQue.addLast(server);
serverToRpcAddr.put(server, addr.getHostName() + ":" + addr.getPort());
}
// SMART_IGNORE_DIRS_KEY and SMART_WORK_DIR_KEY should be configured on
// application side if its dfsClient is replaced by SmartDfsClient.
Collection<String> ignoreDirs = conf.getTrimmedStringCollection(
SmartConfKeys.SMART_IGNORE_DIRS_KEY);
// The system folder and SSM work folder should be ignored to
// report access count.
ignoreDirs.add(SmartConstants.SYSTEM_FOLDER);
ignoreDirs.add(conf.get(SmartConfKeys.SMART_WORK_DIR_KEY,
SmartConfKeys.SMART_WORK_DIR_DEFAULT));
for (String s : ignoreDirs) {
ignoreAccessEventDirs.add(s + (s.endsWith("/") ? "" : "/"));
}
Collection<String> coverDirs = conf.getTrimmedStringCollection(
SmartConfKeys.SMART_COVER_DIRS_KEY);
for (String s : coverDirs) {
coverAccessEventDirs.add(s + (s.endsWith("/") ? "" : "/"));
}
}
private void checkOpen() throws IOException {
if (!running) {
throw new IOException("SmartClient closed");
}
}
/**
* Record active server (hostname:port) currently found into a local file.
* This file can be dropped by OS, but considering it's just used for
* optimization, the lack of recorded active server doesn't cause critical
* issue.
*/
private void recordActiveServerAddr(String addr) {
FileWriter fw = null;
try {
if (!new File(ACTIVE_SMART_SERVER_FILE_PATH).exists()) {
new File(ACTIVE_SMART_SERVER_FILE_PATH).createNewFile();
}
fw = new FileWriter(ACTIVE_SMART_SERVER_FILE_PATH);
fw.write(addr);
} catch (IOException e) {
// Nothing to do.
} finally {
if (fw != null) {
try {
fw.close();
} catch (IOException e) {
// Nothing to do.
}
}
}
}
/**
* Get recorded active server address (hostname:port).
*
* @return active server address if found. Otherwise, null.
*/
private InetSocketAddress getActiveServerAddress() {
try {
Scanner scanner = new Scanner(new File(ACTIVE_SMART_SERVER_FILE_PATH));
if (scanner.hasNextLine()) {
String address = scanner.nextLine();
String[] strings = address.split(":");
return new InetSocketAddress(strings[0], Integer.valueOf(strings[1]));
}
} catch (FileNotFoundException e) {
return null;
}
return null;
}
/**
* Reports access count event to smart server. In SSM HA mode, multiple
* smart servers can be configured. If fail to connect to one server,
* this method will pick up the next one from a queue to try again. If
* all servers cannot be connected, an exception will be thrown.
* <p></p>
* Generally, Configuration class has only one instance. If this method
* finds active server has been changed, it will reset the value for
* property SMART_SERVER_RPC_ADDRESS_KEY in Configuration instance. Thus,
* next time a SmartClient is created with this Configuration instance,
* active server will be put in the head of a queue and it will be picked
* up firstly.
*
* @param event
* @throws IOException
*/
@Override
public void reportFileAccessEvent(FileAccessEvent event)
throws IOException {
if (shouldIgnore(event.getPath())) {
return;
}
checkOpen();
if (conf.getBoolean(SmartConfKeys.SMART_CLIENT_CONCURRENT_REPORT_ENABLED,
SmartConfKeys.SMART_CLIENT_CONCURRENT_REPORT_ENABLED_DEFAULT)) {
reportFileAccessEventConcurrently(event);
} else {
reportFileAccessEventSimply(event);
}
}
/**
* A simple report strategy that tries to connect to smart server one by one.
* And active smart server address will be updated in a local file for new
* client to use henceforth.
* @param event
* @throws IOException
*/
private void reportFileAccessEventSimply(FileAccessEvent event)
throws IOException {
int failedServerNum = 0;
while (true) {
try {
SmartClientProtocol server = serverQue.getFirst();
server.reportFileAccessEvent(event);
if (failedServerNum != 0) {
onNewActiveSmartServer();
}
break;
} catch (ConnectException e) {
failedServerNum++;
// If all servers has been tried but still fail,
// throw an exception.
if (failedServerNum == serverQue.size()) {
throw new ConnectException("Tried to connect to configured SSM "
+ "server(s), but failed." + e.getMessage());
}
// Move the first server to last.
serverQue.addLast(serverQue.pollFirst());
}
}
}
/**
* Report file access event concurrently. Only one server is active, so
* reporting to this server will be successful.
* @param event
*/
private void reportFileAccessEventConcurrently(FileAccessEvent event)
throws IOException {
int num = serverQue.size();
ExecutorService executorService = Executors.newFixedThreadPool(num);
Future<Void>[] futures = new Future[num];
int index = 0;
for (SmartClientProtocol server : serverQue) {
futures[index] = executorService.submit(new Callable<Void>() {
@Override
public Void call() throws IOException {
server.reportFileAccessEvent(event);
return null;
}
});
index++;
}
boolean isReported = false;
byte tryNum = 0;
while (tryNum++ < 10) {
for (Future<Void> future : futures) {
try {
// A short timeout value for performance consideration.
future.get(200, TimeUnit.MILLISECONDS);
isReported = true;
break;
// ExecutionException will be thrown if IOException inside #call is
// thrown. Multiple calling #get with exception thrown behaves
// consistently.
} catch (InterruptedException | ExecutionException | TimeoutException e) {
continue;
}
}
if (isReported) {
break;
}
}
// Cancel the report tasks. No impact on the successfully executed task.
for (Future<Void> future : futures) {
future.cancel(true);
}
if (!isReported) {
throw new IOException("Failed to report access event to Smart Server!");
}
}
/**
* Reset smart server address in conf and a local file to reflect the
* changes of active smart server in fail over.
*/
public void onNewActiveSmartServer() {
List<String> rpcAddrs = new LinkedList<>();
for (SmartClientProtocol s : serverQue) {
rpcAddrs.add(serverToRpcAddr.get(s));
}
conf.set(SmartConfKeys.SMART_SERVER_RPC_ADDRESS_KEY,
StringUtil.join(",", rpcAddrs));
String addr = serverToRpcAddr.get(serverQue.getFirst());
recordActiveServerAddr(addr);
}
@Override
public FileState getFileState(String filePath) throws IOException {
checkOpen();
int triedServerNum = 0;
while (true) {
try {
SmartClientProtocol server = serverQue.getFirst();
return server.getFileState(filePath);
} catch (ConnectException e) {
triedServerNum++;
// If all servers has been tried, interrupt and throw the exception.
if (triedServerNum == serverQue.size()) {
// client cannot connect to server
// don't report access event for this file this time
singleIgnoreList.put(filePath, 0);
// Assume the given file is normal, but serious error can occur if
// the file is compacted or compressed by SSM.
return new NormalFileState(filePath);
}
// Put the first server to last, and will pick the second one to try.
serverQue.addLast(serverQue.pollFirst());
}
}
}
public boolean shouldIgnore(String path) {
if (singleIgnoreList.containsKey(path)) {
// this report should be ignored
singleIgnoreList.remove(path);
return true;
}
String toCheck = path.endsWith("/") ? path : path + "/";
for (String s : ignoreAccessEventDirs) {
if (toCheck.startsWith(s)) {
return true;
}
}
if (coverAccessEventDirs.isEmpty()) {
return false;
}
for (String s : coverAccessEventDirs) {
if (toCheck.startsWith(s)) {
return false;
}
}
return true;
}
@Override
public void close() {
if (running) {
running = false;
for (SmartClientProtocol server : serverQue) {
RPC.stopProxy(server);
}
serverQue = null;
}
}
}
| java | Apache-2.0 | e0c90f054687a18c4e095547ac5e31b8b313b3ef | 2026-01-05T02:41:11.405497Z | false |
Intel-bigdata/SSM | https://github.com/Intel-bigdata/SSM/blob/e0c90f054687a18c4e095547ac5e31b8b313b3ef/smart-server/src/test/java/org/smartdata/hdfs/client/TestSmartDFSClient.java | smart-server/src/test/java/org/smartdata/hdfs/client/TestSmartDFSClient.java | /**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.smartdata.hdfs.client;
import com.google.gson.Gson;
import org.apache.hadoop.fs.BlockLocation;
import org.apache.hadoop.fs.FSDataOutputStream;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hdfs.protocol.HdfsFileStatus;
import org.junit.After;
import org.junit.Assert;
import org.junit.Test;
import org.smartdata.hdfs.action.SmallFileCompactAction;
import org.smartdata.server.MiniSmartClusterHarness;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.Random;
public class TestSmartDFSClient extends MiniSmartClusterHarness {
private void createSmallFiles() throws Exception {
Path path = new Path("/test/small_files/");
dfs.mkdirs(path);
for (int i = 0; i < 3; i++) {
String fileName = "/test/small_files/file_" + i;
FSDataOutputStream out = dfs.create(new Path(fileName), (short) 1);
long fileLen = 9;
byte[] buf = new byte[50];
Random rb = new Random(2018);
rb.nextBytes(buf);
out.write(buf, 0, (int) fileLen);
out.close();
}
// Compact small files
SmallFileCompactAction smallFileCompactAction = new SmallFileCompactAction();
smallFileCompactAction.setDfsClient(dfsClient);
smallFileCompactAction.setContext(smartContext);
Map<String , String> args = new HashMap<>();
List<String> smallFileList = new ArrayList<>();
smallFileList.add("/test/small_files/file_0");
smallFileList.add("/test/small_files/file_1");
args.put(SmallFileCompactAction.FILE_PATH , new Gson().toJson(smallFileList));
args.put(SmallFileCompactAction.CONTAINER_FILE,
"/test/small_files/container_file_3");
smallFileCompactAction.init(args);
smallFileCompactAction.run();
}
@Test
public void testSmallFile() throws Exception {
waitTillSSMExitSafeMode();
createSmallFiles();
SmartDFSClient smartDFSClient = new SmartDFSClient(smartContext.getConf());
BlockLocation[] blockLocations = smartDFSClient.getBlockLocations(
"/test/small_files/file_0", 0, 30);
Assert.assertEquals(blockLocations.length, 1);
HdfsFileStatus fileInfo = smartDFSClient.getFileInfo(
"/test/small_files/file_0");
Assert.assertEquals(9, fileInfo.getLen());
smartDFSClient.rename("/test/small_files/file_0", "/test/small_files/file_5");
Assert.assertTrue(!dfsClient.exists("/test/small_files/file_0"));
Assert.assertTrue(dfsClient.exists("/test/small_files/file_5"));
smartDFSClient.delete("/test/small_files/file_5", false);
Assert.assertTrue(!dfsClient.exists("/test/small_files/file_5"));
smartDFSClient.delete("/test/small_files", true);
Assert.assertTrue(!dfsClient.exists("/test/small_files/file_1"));
}
@After
public void tearDown() throws Exception {
dfs.getClient().delete("/test", true);
}
}
| java | Apache-2.0 | e0c90f054687a18c4e095547ac5e31b8b313b3ef | 2026-01-05T02:41:11.405497Z | false |
Intel-bigdata/SSM | https://github.com/Intel-bigdata/SSM/blob/e0c90f054687a18c4e095547ac5e31b8b313b3ef/smart-server/src/test/java/org/smartdata/hadoop/filesystem/TestSmartFileSystem.java | smart-server/src/test/java/org/smartdata/hadoop/filesystem/TestSmartFileSystem.java | /**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.smartdata.hadoop.filesystem;
import com.google.gson.Gson;
import org.apache.hadoop.fs.FSDataOutputStream;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hdfs.DFSUtil;
import org.junit.Assert;
import org.junit.Before;
import org.junit.Test;
import org.smartdata.conf.SmartConf;
import org.smartdata.hdfs.action.SmallFileCompactAction;
import org.smartdata.server.MiniSmartClusterHarness;
import java.net.URI;
import java.util.ArrayList;
import java.util.Collection;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.Random;
public class TestSmartFileSystem extends MiniSmartClusterHarness {
private SmartFileSystem smartFileSystem;
@Before
@Override
public void init() throws Exception {
super.init();
this.smartFileSystem = new SmartFileSystem();
SmartConf conf = smartContext.getConf();
Collection<URI> nameNodes = DFSUtil.getInternalNsRpcUris(conf);
smartFileSystem.initialize(new ArrayList<>(
nameNodes).get(0), smartContext.getConf());
createSmallFiles();
}
private void createSmallFiles() throws Exception {
Path smallFilePath = new Path("/test/small_files/");
dfs.mkdirs(smallFilePath);
Path containerPath = new Path("/test/container_files/");
dfs.mkdirs(containerPath);
for (int i = 0; i < 2; i++) {
String fileName = "/test/small_files/file_" + i;
FSDataOutputStream out = dfs.create(new Path(fileName), (short) 1);
long fileLen = 8;
byte[] buf = new byte[50];
Random rb = new Random(2018);
rb.nextBytes(buf);
out.write(buf, 0, (int) fileLen);
out.close();
}
SmallFileCompactAction smallFileCompactAction = new SmallFileCompactAction();
smallFileCompactAction.setDfsClient(dfsClient);
smallFileCompactAction.setContext(smartContext);
Map<String , String> args = new HashMap<>();
List<String> smallFileList = new ArrayList<>();
smallFileList.add("/test/small_files/file_0");
smallFileList.add("/test/small_files/file_1");
args.put(SmallFileCompactAction.FILE_PATH , new Gson().toJson(smallFileList));
args.put(SmallFileCompactAction.CONTAINER_FILE,
"/test/small_files/container_file_6");
smallFileCompactAction.init(args);
smallFileCompactAction.run();
}
@Test
public void testSmartFileSystem() throws Exception {
waitTillSSMExitSafeMode();
smartFileSystem.rename(new Path("/test/small_files/file_0"),
new Path("/test/small_files/file_5"));
Assert.assertTrue(!dfsClient.exists("/test/small_files/file_0"));
Assert.assertTrue(dfsClient.exists("/test/small_files/file_5"));
smartFileSystem.delete(new Path("/test/small_files/file_0"), false);
Assert.assertTrue(!dfsClient.exists("/test/small_files/file_0"));
smartFileSystem.delete(new Path("/test/small_files"), true);
Assert.assertTrue(!dfsClient.exists("/test/small_files/file_1"));
Assert.assertTrue(!dfsClient.exists("/test/small_files/file_5"));
}
}
| java | Apache-2.0 | e0c90f054687a18c4e095547ac5e31b8b313b3ef | 2026-01-05T02:41:11.405497Z | false |
Intel-bigdata/SSM | https://github.com/Intel-bigdata/SSM/blob/e0c90f054687a18c4e095547ac5e31b8b313b3ef/smart-server/src/test/java/org/smartdata/server/TestSmartServerLogin.java | smart-server/src/test/java/org/smartdata/server/TestSmartServerLogin.java | /**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.smartdata.server;
import org.apache.hadoop.hdfs.DFSUtil;
import org.apache.hadoop.hdfs.MiniDFSCluster;
import org.apache.kerby.kerberos.kerb.server.SimpleKdcServer;
import org.apache.kerby.util.NetworkUtil;
import org.junit.After;
import org.junit.Before;
import org.junit.Test;
import org.smartdata.conf.SmartConf;
import org.smartdata.conf.SmartConfKeys;
import org.smartdata.hdfs.MiniClusterFactory;
import org.smartdata.metastore.TestDBUtil;
import org.smartdata.metastore.utils.MetaStoreUtils;
import java.io.File;
import java.net.URI;
import java.util.ArrayList;
import java.util.Collection;
import java.util.List;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_HTTP_ADDRESS_KEY;
/**
* Test.
*/
public class TestSmartServerLogin {
private SimpleKdcServer kdcServer;
private String serverHost = "localhost";
private int serverPort = -1;
private SmartConf conf;
private MiniDFSCluster cluster;
private String dbFile;
private String dbUrl;
private SmartServer ssm;
private final String keytabFileName = "smart.keytab";
private final String principal = "ssmroot@EXAMPLE.COM";
@Before
public void setupKdcServer() throws Exception {
kdcServer = new SimpleKdcServer();
kdcServer.setKdcHost(serverHost);
kdcServer.setAllowUdp(false);
kdcServer.setAllowTcp(true);
serverPort = NetworkUtil.getServerPort();
kdcServer.setKdcTcpPort(serverPort);
kdcServer.init();
kdcServer.start();
}
private void initConf() throws Exception {
conf = new SmartConf();
cluster = MiniClusterFactory.get().create(3, conf);
Collection<URI> namenodes = DFSUtil.getInternalNsRpcUris(conf);
List<URI> uriList = new ArrayList<>(namenodes);
conf.set(DFS_NAMENODE_HTTP_ADDRESS_KEY, uriList.get(0).toString());
conf.set(SmartConfKeys.SMART_DFS_NAMENODE_RPCSERVER_KEY,
uriList.get(0).toString());
// Set db used
dbFile = TestDBUtil.getUniqueEmptySqliteDBFile();
dbUrl = MetaStoreUtils.SQLITE_URL_PREFIX + dbFile;
conf.set(SmartConfKeys.SMART_METASTORE_DB_URL_KEY, dbUrl);
conf.setBoolean(SmartConfKeys.SMART_SECURITY_ENABLE, true);
conf.set(SmartConfKeys.SMART_SERVER_KEYTAB_FILE_KEY, keytabFileName);
conf.set(SmartConfKeys.SMART_SERVER_KERBEROS_PRINCIPAL_KEY, principal);
}
private File generateKeytab(String keytabFileName, String principal) throws Exception {
File keytabFile = new File(keytabFileName);
kdcServer.createAndExportPrincipals(keytabFile, principal);
return new File(keytabFileName);
}
@Test
public void loginSmartServerUsingKeytab() throws Exception {
initConf();
generateKeytab(keytabFileName, principal);
ssm = SmartServer.launchWith(conf);
}
@After
public void tearDown() throws Exception {
File keytabFile = new File(keytabFileName);
if (keytabFile.exists()) {
keytabFile.delete();
}
if (kdcServer != null) {
kdcServer.stop();
}
if (ssm != null) {
ssm.shutdown();
}
if (cluster != null) {
cluster.shutdown();
}
}
}
| java | Apache-2.0 | e0c90f054687a18c4e095547ac5e31b8b313b3ef | 2026-01-05T02:41:11.405497Z | false |
Intel-bigdata/SSM | https://github.com/Intel-bigdata/SSM/blob/e0c90f054687a18c4e095547ac5e31b8b313b3ef/smart-server/src/test/java/org/smartdata/server/MiniSmartClusterHarness.java | smart-server/src/test/java/org/smartdata/server/MiniSmartClusterHarness.java | /**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.smartdata.server;
import org.apache.hadoop.hdfs.DFSUtil;
import org.junit.After;
import org.junit.Before;
import org.smartdata.SmartServiceState;
import org.smartdata.admin.SmartAdmin;
import org.smartdata.conf.SmartConf;
import org.smartdata.conf.SmartConfKeys;
import org.smartdata.hdfs.MiniClusterWithStoragesHarness;
import org.smartdata.metastore.TestDBUtil;
import org.smartdata.metastore.utils.MetaStoreUtils;
import java.io.IOException;
import java.net.URI;
import java.util.ArrayList;
import java.util.Collection;
import java.util.List;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_HTTP_ADDRESS_KEY;
public class MiniSmartClusterHarness extends MiniClusterWithStoragesHarness {
protected SmartServer ssm;
private String dbFile;
private String dbUrl;
@Before
@Override
public void init() throws Exception {
super.init();
// Set db used
SmartConf conf = smartContext.getConf();
Collection<URI> namenodes = DFSUtil.getInternalNsRpcUris(conf);
List<URI> uriList = new ArrayList<>(namenodes);
conf.set(DFS_NAMENODE_HTTP_ADDRESS_KEY, uriList.get(0).toString());
conf.set(SmartConfKeys.SMART_DFS_NAMENODE_RPCSERVER_KEY,
uriList.get(0).toString());
dbFile = TestDBUtil.getUniqueEmptySqliteDBFile();
dbUrl = MetaStoreUtils.SQLITE_URL_PREFIX + dbFile;
smartContext.getConf().set(SmartConfKeys.SMART_METASTORE_DB_URL_KEY, dbUrl);
// rpcServer start in SmartServer
ssm = SmartServer.launchWith(conf);
}
public void waitTillSSMExitSafeMode() throws Exception {
SmartAdmin client = new SmartAdmin(smartContext.getConf());
long start = System.currentTimeMillis();
int retry = 5;
while (true) {
try {
SmartServiceState state = client.getServiceState();
if (state != SmartServiceState.SAFEMODE) {
break;
}
int secs = (int) (System.currentTimeMillis() - start) / 1000;
System.out.println("Waited for " + secs + " seconds ...");
Thread.sleep(1000);
} catch (Exception e) {
if (retry <= 0) {
throw e;
}
retry--;
}
}
}
@After
@Override
public void shutdown() throws IOException {
if (ssm != null) {
ssm.shutdown();
}
super.shutdown();
}
}
| java | Apache-2.0 | e0c90f054687a18c4e095547ac5e31b8b313b3ef | 2026-01-05T02:41:11.405497Z | false |
Intel-bigdata/SSM | https://github.com/Intel-bigdata/SSM/blob/e0c90f054687a18c4e095547ac5e31b8b313b3ef/smart-server/src/test/java/org/smartdata/server/TestCopyScheduler.java | smart-server/src/test/java/org/smartdata/server/TestCopyScheduler.java | /**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.smartdata.server;
/*import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hdfs.DFSTestUtil;
import org.apache.hadoop.hdfs.DistributedFileSystem;
import org.junit.Assert;
import org.junit.Test;
import org.smartdata.admin.SmartAdmin;
import org.smartdata.metastore.MetaStore;
import org.smartdata.model.ActionInfo;
import org.smartdata.model.BackUpInfo;
import org.smartdata.model.FileDiff;
import org.smartdata.model.FileDiffState;
import org.smartdata.model.FileDiffType;
import org.smartdata.model.FileInfo;
import org.smartdata.model.RuleState;
import org.smartdata.server.engine.CmdletManager;
import java.util.List;
public class TestCopyScheduler extends MiniSmartClusterHarness {
@Test(timeout = 45000)
public void appendMerge() throws Exception {
waitTillSSMExitSafeMode();
MetaStore metaStore = ssm.getMetaStore();
// SmartAdmin admin = new SmartAdmin(smartContext.getConf());
// CmdletManager cmdletManager = ssm.getCmdletManager();
DistributedFileSystem dfs = cluster.getFileSystem();
final String srcPath = "/src/";
final String destPath = "/dest/";
BackUpInfo backUpInfo = new BackUpInfo(1L, srcPath, destPath, 100);
metaStore.insertBackUpInfo(backUpInfo);
dfs.mkdirs(new Path(srcPath));
dfs.mkdirs(new Path(destPath));
// Write to src
for (int i = 0; i < 3; i++) {
// Create test files
DFSTestUtil.createFile(dfs, new Path(srcPath + i),
1024, (short) 1, 1);
for (int j = 0; j < 10; j++) {
DFSTestUtil.appendFile(dfs, new Path(srcPath + i), 1024);
}
}
do {
Thread.sleep(1500);
} while (metaStore.getPendingDiff().size() >= 30);
List<FileDiff> fileDiffs = metaStore.getFileDiffs(FileDiffState.PENDING);
Assert.assertTrue(fileDiffs.size() < 30);
}
@Test(timeout = 45000)
public void deleteMerge() throws Exception {
waitTillSSMExitSafeMode();
MetaStore metaStore = ssm.getMetaStore();
// SmartAdmin admin = new SmartAdmin(smartContext.getConf());
// CmdletManager cmdletManager = ssm.getCmdletManager();
DistributedFileSystem dfs = cluster.getFileSystem();
final String srcPath = "/src/";
final String destPath = "/dest/";
BackUpInfo backUpInfo = new BackUpInfo(1L, srcPath, destPath, 100);
metaStore.insertBackUpInfo(backUpInfo);
dfs.mkdirs(new Path(srcPath));
dfs.mkdirs(new Path(destPath));
// Write to src
for (int i = 0; i < 3; i++) {
// Create test files
DFSTestUtil.createFile(dfs, new Path(srcPath + i),
1024, (short) 1, 1);
do {
Thread.sleep(500);
} while (!dfs.isFileClosed(new Path(srcPath + i)));
dfs.delete(new Path(srcPath + i), false);
}
Thread.sleep(1200);
List<FileDiff> fileDiffs;
fileDiffs = metaStore.getFileDiffs(FileDiffState.PENDING);
while (fileDiffs.size() != 0) {
Thread.sleep(1000);
for (FileDiff fileDiff : fileDiffs) {
System.out.println(fileDiff.toString());
}
fileDiffs = metaStore.getFileDiffs(FileDiffState.PENDING);
}
// File is not created, so clear all fileDiff
}
@Test(timeout = 45000)
public void renameMerge() throws Exception {
waitTillSSMExitSafeMode();
MetaStore metaStore = ssm.getMetaStore();
// SmartAdmin admin = new SmartAdmin(smartContext.getConf());
DistributedFileSystem dfs = cluster.getFileSystem();
final String srcPath = "/src/";
final String destPath = "/dest/";
BackUpInfo backUpInfo = new BackUpInfo(1L, srcPath, destPath, 100);
metaStore.insertBackUpInfo(backUpInfo);
dfs.mkdirs(new Path(srcPath));
dfs.mkdirs(new Path(destPath));
// Write to src
for (int i = 0; i < 3; i++) {
// Create test files
DFSTestUtil.createFile(dfs, new Path(srcPath + i),
1024, (short) 1, 1);
dfs.rename(new Path(srcPath + i),
new Path(srcPath + i + 10));
// Rename target ends with 10
DFSTestUtil.appendFile(dfs, new Path(srcPath + i + 10), 1024);
}
do {
Thread.sleep(1500);
} while (metaStore.getPendingDiff().size() < 9);
Thread.sleep(1000);
List<FileDiff> fileDiffs = metaStore.getFileDiffs(FileDiffState.PENDING);
for (FileDiff fileDiff : fileDiffs) {
if (fileDiff.getDiffType() == FileDiffType.APPEND) {
Assert.assertTrue(fileDiff.getSrc().endsWith("10"));
}
}
}
@Test(timeout = 45000)
public void failRetry() throws Exception {
waitTillSSMExitSafeMode();
MetaStore metaStore = ssm.getMetaStore();
// CmdletManager cmdletManager = ssm.getCmdletManager();
SmartAdmin admin = new SmartAdmin(smartContext.getConf());
long ruleId =
admin.submitRule(
"file: every 1s | path matches \"/src/*\"| sync -dest /dest/",
RuleState.ACTIVE);
FileDiff fileDiff = new FileDiff(FileDiffType.RENAME, FileDiffState.PENDING);
fileDiff.setSrc("/src/1");
fileDiff.getParameters().put("-dest", "/src/2");
metaStore.insertFileDiff(fileDiff);
Thread.sleep(1200);
while (metaStore.getPendingDiff().size() != 0) {
Thread.sleep(1000);
}
Thread.sleep(2000);
fileDiff = metaStore.getFileDiffsByFileName("/src/1").get(0);
Assert.assertTrue(fileDiff.getState() == FileDiffState.FAILED);
}
@Test
public void testForceSync() throws Exception {
waitTillSSMExitSafeMode();
MetaStore metaStore = ssm.getMetaStore();
SmartAdmin admin = new SmartAdmin(smartContext.getConf());
CmdletManager cmdletManager = ssm.getCmdletManager();
DistributedFileSystem dfs = cluster.getFileSystem();
final String srcPath = "/src/";
final String destPath = "/dest/";
dfs.mkdirs(new Path(srcPath));
dfs.mkdirs(new Path(destPath));
// Write to src
for (int i = 0; i < 3; i++) {
// Create test files
DFSTestUtil.createFile(dfs, new Path(srcPath + i),
1024, (short) 1, 1);
}
for (int i = 0; i < 3; i++) {
// Create test files
DFSTestUtil.createFile(dfs, new Path(destPath + i + 5),
1024, (short) 1, 1);
}
// Clear file diffs
metaStore.deleteAllFileDiff();
// Submit rules and trigger forceSync
long ruleId =
admin.submitRule(
"file: every 2s | path matches \"/src/*\"| sync -dest /dest/",
RuleState.ACTIVE);
Thread.sleep(1000);
Assert.assertTrue(metaStore.getFileDiffs(FileDiffState.PENDING).size() > 0);
}
@Test(timeout = 40000)
public void batchSync() throws Exception {
waitTillSSMExitSafeMode();
MetaStore metaStore = ssm.getMetaStore();
SmartAdmin admin = new SmartAdmin(smartContext.getConf());
DistributedFileSystem dfs = cluster.getFileSystem();
final String srcPath = "/src/";
final String destPath = "/dest/";
FileInfo fileInfo;
long now = System.currentTimeMillis();
for (int i = 0; i < 100; i++) {
fileInfo = new FileInfo(srcPath + i, i,
1024, false, (short) 3,
1024, now, now, (short) 1,
null, null, (byte) 3);
metaStore.insertFile(fileInfo);
Thread.sleep(100);
}
long ruleId =
admin.submitRule(
"file: every 2s | path matches \"/src/*\"| sync -dest /dest/",
RuleState.ACTIVE);
Thread.sleep(2200);
do {
Thread.sleep(1000);
} while (metaStore.getPendingDiff().size() != 100);
}
@Test(timeout = 60000)
public void testDelete() throws Exception {
waitTillSSMExitSafeMode();
MetaStore metaStore = ssm.getMetaStore();
CmdletManager cmdletManager = ssm.getCmdletManager();
SmartAdmin admin = new SmartAdmin(smartContext.getConf());
long ruleId =
admin.submitRule(
"file: every 2s | path matches \"/src/*\"| sync -dest /dest/",
RuleState.ACTIVE);
FileDiff fileDiff = new FileDiff(FileDiffType.DELETE, FileDiffState.PENDING);
fileDiff.setSrc("/src/1");
metaStore.insertFileDiff(fileDiff);
Thread.sleep(1200);
do {
Thread.sleep(1000);
} while (admin.getRuleInfo(ruleId).getNumCmdsGen() == 0);
Assert.assertTrue(cmdletManager
.listNewCreatedActions("sync", 0).size() > 0);
}
@Test(timeout = 60000)
public void testRename() throws Exception {
waitTillSSMExitSafeMode();
MetaStore metaStore = ssm.getMetaStore();
CmdletManager cmdletManager = ssm.getCmdletManager();
SmartAdmin admin = new SmartAdmin(smartContext.getConf());
long ruleId =
admin.submitRule(
"file: every 2s | path matches \"/src/*\"| sync -dest /dest/",
RuleState.ACTIVE);
FileDiff fileDiff = new FileDiff(FileDiffType.RENAME, FileDiffState.PENDING);
fileDiff.setSrc("/src/1");
fileDiff.getParameters().put("-dest", "/src/2");
metaStore.insertFileDiff(fileDiff);
Thread.sleep(1200);
do {
Thread.sleep(1000);
} while (admin.getRuleInfo(ruleId).getNumCmdsGen() == 0);
Assert.assertTrue(cmdletManager
.listNewCreatedActions("sync", 0).size() > 0);
}
@Test
public void testMeta() throws Exception {
waitTillSSMExitSafeMode();
MetaStore metaStore = ssm.getMetaStore();
CmdletManager cmdletManager = ssm.getCmdletManager();
SmartAdmin admin = new SmartAdmin(smartContext.getConf());
DistributedFileSystem dfs = cluster.getFileSystem();
final String srcPath = "/src/";
final String destPath = "/dest/";
dfs.mkdirs(new Path(srcPath));
dfs.mkdirs(new Path(destPath));
long ruleId =
admin.submitRule(
"file: every 2s | path matches \"/src/*\"| sync -dest /dest/",
RuleState.ACTIVE);
Thread.sleep(4200);
// Write to src
DFSTestUtil.createFile(dfs, new Path(srcPath + 1),
1024, (short) 1, 1);
Thread.sleep(1000);
FileDiff fileDiff = new FileDiff(FileDiffType.METADATA, FileDiffState.PENDING);
fileDiff.setSrc("/src/1");
fileDiff.getParameters().put("-permission", "777");
metaStore.insertFileDiff(fileDiff);
do {
Thread.sleep(1000);
} while (admin.getRuleInfo(ruleId).getNumCmdsGen() == 0);
while (metaStore.getPendingDiff().size() != 0) {
Thread.sleep(500);
}
Thread.sleep(1000);
FileStatus fileStatus = dfs.getFileStatus(new Path(destPath + 1));
Assert.assertTrue(fileStatus.getPermission().toString().equals("rwxrwxrwx"));
}
@Test(timeout = 40000)
public void testCache() throws Exception {
waitTillSSMExitSafeMode();
MetaStore metaStore = ssm.getMetaStore();
SmartAdmin admin = new SmartAdmin(smartContext.getConf());
CmdletManager cmdletManager = ssm.getCmdletManager();
DistributedFileSystem dfs = cluster.getFileSystem();
final String srcPath = "/src/";
final String destPath = "/dest/";
// Submit sync rule
long ruleId =
admin.submitRule(
"file: every 2s | path matches \"/src/*\"| sync -dest " + destPath,
RuleState.ACTIVE);
Thread.sleep(2000);
dfs.mkdirs(new Path(srcPath));
dfs.mkdirs(new Path(destPath));
// Write to src
for (int i = 0; i < 3; i++) {
// Create test files
DFSTestUtil.createFile(dfs, new Path(srcPath + i),
1024, (short) 1, 1);
}
do {
Thread.sleep(1000);
} while (admin.getRuleInfo(ruleId).getNumCmdsGen() <= 2);
List<ActionInfo> actionInfos = cmdletManager
.listNewCreatedActions("sync", 0);
Assert.assertTrue(actionInfos.size() >= 3);
Thread.sleep(20000);
}
@Test(timeout = 40000)
public void testWithSyncRule() throws Exception {
waitTillSSMExitSafeMode();
MetaStore metaStore = ssm.getMetaStore();
SmartAdmin admin = new SmartAdmin(smartContext.getConf());
CmdletManager cmdletManager = ssm.getCmdletManager();
// metaStore.deleteAllFileDiff();
// metaStore.deleteAllFileInfo();
// metaStore.deleteAllCmdlets();
// metaStore.deleteAllActions();
// metaStore.deleteAllRules();
DistributedFileSystem dfs = cluster.getFileSystem();
final String srcPath = "/src/";
final String destPath = "/dest/";
// Submit sync rule
long ruleId =
admin.submitRule(
"file: every 2s | path matches \"/src/*\"| sync -dest " + destPath,
RuleState.ACTIVE);
Thread.sleep(2000);
dfs.mkdirs(new Path(srcPath));
dfs.mkdirs(new Path(destPath));
// Write to src
for (int i = 0; i < 3; i++) {
// Create test files
DFSTestUtil.createFile(dfs, new Path(srcPath + i),
1024, (short) 1, 1);
}
do {
Thread.sleep(1000);
} while (admin.getRuleInfo(ruleId).getNumCmdsGen() <= 2);
List<ActionInfo> actionInfos = cmdletManager
.listNewCreatedActions("sync", 0);
Assert.assertTrue(actionInfos.size() >= 3);
do {
Thread.sleep(800);
} while (metaStore.getPendingDiff().size() != 0);
for (int i = 0; i < 3; i++) {
// Check 3 files
Assert.assertTrue(dfs.exists(new Path(destPath + i)));
System.out.printf("File %d is copied.\n", i);
}
}
@Test(timeout = 60000)
public void testCopy() throws Exception {
waitTillSSMExitSafeMode();
MetaStore metaStore = ssm.getMetaStore();
// metaStore.deleteAllFileDiff();
// metaStore.deleteAllFileInfo();
// metaStore.deleteAllCmdlets();
// metaStore.deleteAllActions();
DistributedFileSystem dfs = cluster.getFileSystem();
final String srcPath = "/src/";
final String destPath = "/dest/";
BackUpInfo backUpInfo = new BackUpInfo(1L, srcPath, destPath, 100);
metaStore.insertBackUpInfo(backUpInfo);
dfs.mkdirs(new Path(srcPath));
dfs.mkdirs(new Path(destPath));
// Write to src
for (int i = 0; i < 3; i++) {
// Create test files
DFSTestUtil.createFile(dfs, new Path(srcPath + i),
1024, (short) 1, 1);
}
Thread.sleep(1000);
CmdletManager cmdletManager = ssm.getCmdletManager();
// Submit sync action
for (int i = 0; i < 3; i++) {
// Create test files
cmdletManager.submitCmdlet(
"sync -file /src/" + i + " -src " + srcPath + " -dest " + destPath);
}
List<ActionInfo> actionInfos = cmdletManager
.listNewCreatedActions("sync", 0);
Assert.assertTrue(actionInfos.size() >= 3);
do {
Thread.sleep(1000);
} while (cmdletManager.getActionsSizeInCache() + cmdletManager.getCmdletsSizeInCache() > 0);
for (int i = 0; i < 3; i++) {
// Write 10 files
Assert.assertTrue(dfs.exists(new Path(destPath + i)));
System.out.printf("File %d is copied.\n", i);
}
}
@Test(timeout = 40000)
public void testEmpyDelete() throws Exception {
// Delete files not exist on standby cluster
waitTillSSMExitSafeMode();
MetaStore metaStore = ssm.getMetaStore();
// metaStore.deleteAllFileDiff();
// metaStore.deleteAllFileInfo();
// metaStore.deleteAllCmdlets();
// metaStore.deleteAllActions();
DistributedFileSystem dfs = cluster.getFileSystem();
final String srcPath = "/src/";
final String destPath = "/dest/";
// Write to src
for (int i = 0; i < 3; i++) {
// Create test files
DFSTestUtil.createFile(dfs, new Path(srcPath + i),
1024, (short) 1, 1);
}
Thread.sleep(500);
BackUpInfo backUpInfo = new BackUpInfo(1L, srcPath, destPath, 100);
metaStore.insertBackUpInfo(backUpInfo);
dfs.mkdirs(new Path(srcPath));
dfs.mkdirs(new Path(destPath));
Thread.sleep(100);
for (int i = 0; i < 3; i++) {
// delete test files on primary cluster
dfs.delete(new Path(srcPath + i), false);
}
Thread.sleep(2000);
CmdletManager cmdletManager = ssm.getCmdletManager();
// Submit sync action
for (int i = 0; i < 3; i++) {
// Create test files
cmdletManager.submitCmdlet(
"sync -file /src/" + i + " -src " + srcPath + " -dest " + destPath);
}
}
@Test(timeout = 40000)
public void testCopyDelete() throws Exception {
waitTillSSMExitSafeMode();
MetaStore metaStore = ssm.getMetaStore();
// metaStore.deleteAllFileDiff();
// metaStore.deleteAllFileInfo();
// metaStore.deleteAllCmdlets();
// metaStore.deleteAllActions();
DistributedFileSystem dfs = cluster.getFileSystem();
final String srcPath = "/src/";
final String destPath = "/dest/";
BackUpInfo backUpInfo = new BackUpInfo(1L, srcPath, destPath, 100);
metaStore.insertBackUpInfo(backUpInfo);
dfs.mkdirs(new Path(srcPath));
dfs.mkdirs(new Path(destPath));
// Write to src
for (int i = 0; i < 3; i++) {
// Create test files
DFSTestUtil.createFile(dfs, new Path(srcPath + i),
1024, (short) 1, 1);
dfs.delete(new Path(srcPath + i), false);
}
Thread.sleep(2000);
CmdletManager cmdletManager = ssm.getCmdletManager();
// Submit sync action
for (int i = 0; i < 3; i++) {
// Create test files
cmdletManager.submitCmdlet(
"sync -file /src/" + i + " -src " + srcPath + " -dest " + destPath);
}
}
}*/
| java | Apache-2.0 | e0c90f054687a18c4e095547ac5e31b8b313b3ef | 2026-01-05T02:41:11.405497Z | false |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.