repo stringclasses 1k
values | file_url stringlengths 96 373 | file_path stringlengths 11 294 | content stringlengths 0 32.8k | language stringclasses 1
value | license stringclasses 6
values | commit_sha stringclasses 1k
values | retrieved_at stringdate 2026-01-04 14:45:56 2026-01-04 18:30:23 | truncated bool 2
classes |
|---|---|---|---|---|---|---|---|---|
Intel-bigdata/SSM | https://github.com/Intel-bigdata/SSM/blob/e0c90f054687a18c4e095547ac5e31b8b313b3ef/smart-hadoop-support/smart-hadoop-3.1/src/main/java/org/smartdata/hdfs/action/EnableErasureCodingPolicy.java | smart-hadoop-support/smart-hadoop-3.1/src/main/java/org/smartdata/hdfs/action/EnableErasureCodingPolicy.java | /**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.smartdata.hdfs.action;
import org.smartdata.action.annotation.ActionSignature;
import org.smartdata.conf.SmartConf;
import org.smartdata.hdfs.HadoopUtil;
import java.util.Map;
/**
* An action to enable an EC policy.
*/
@ActionSignature(
actionId = "enableec",
displayName = "enableec",
usage = EnableErasureCodingPolicy.EC_POLICY_NAME + " $policy"
)
public class EnableErasureCodingPolicy extends HdfsAction {
public static final String EC_POLICY_NAME = "-policy";
private SmartConf conf;
private String policyName;
@Override
public void init(Map<String, String> args) {
super.init(args);
this.conf = getContext().getConf();
this.policyName = args.get(EC_POLICY_NAME);
}
@Override
public void execute() throws Exception {
this.setDfsClient(HadoopUtil.getDFSClient(
HadoopUtil.getNameNodeUri(conf), conf));
dfsClient.enableErasureCodingPolicy(policyName);
appendLog(String.format("The EC policy named %s is enabled!", policyName));
}
}
| java | Apache-2.0 | e0c90f054687a18c4e095547ac5e31b8b313b3ef | 2026-01-05T02:41:11.405497Z | false |
Intel-bigdata/SSM | https://github.com/Intel-bigdata/SSM/blob/e0c90f054687a18c4e095547ac5e31b8b313b3ef/smart-hadoop-support/smart-hadoop-3.1/src/main/java/org/smartdata/hdfs/action/ListErasureCodingPolicy.java | smart-hadoop-support/smart-hadoop-3.1/src/main/java/org/smartdata/hdfs/action/ListErasureCodingPolicy.java | /**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.smartdata.hdfs.action;
import org.apache.hadoop.hdfs.protocol.ErasureCodingPolicyInfo;
import org.smartdata.action.annotation.ActionSignature;
import org.smartdata.conf.SmartConf;
import org.smartdata.hdfs.HadoopUtil;
import java.util.Map;
/**
* An action to list the info for all EC policies in HDFS.
*/
@ActionSignature(
actionId = "listec",
displayName = "listec",
usage = "No args"
)
public class ListErasureCodingPolicy extends HdfsAction {
private SmartConf conf;
@Override
public void init(Map<String, String> args) {
super.init(args);
this.conf = getContext().getConf();
}
@Override
public void execute() throws Exception {
this.setDfsClient(HadoopUtil.getDFSClient(
HadoopUtil.getNameNodeUri(conf), conf));
for (ErasureCodingPolicyInfo policyInfo : dfsClient.getErasureCodingPolicies()) {
appendLog("{" + policyInfo.toString() + "}");
}
}
}
| java | Apache-2.0 | e0c90f054687a18c4e095547ac5e31b8b313b3ef | 2026-01-05T02:41:11.405497Z | false |
Intel-bigdata/SSM | https://github.com/Intel-bigdata/SSM/blob/e0c90f054687a18c4e095547ac5e31b8b313b3ef/smart-hadoop-support/smart-hadoop-3.1/src/main/java/org/smartdata/hdfs/action/DisableErasureCodingPolicy.java | smart-hadoop-support/smart-hadoop-3.1/src/main/java/org/smartdata/hdfs/action/DisableErasureCodingPolicy.java | /**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.smartdata.hdfs.action;
import org.smartdata.action.annotation.ActionSignature;
import org.smartdata.conf.SmartConf;
import org.smartdata.hdfs.HadoopUtil;
import java.util.Map;
/**
* An action to disable an EC policy.
*/
@ActionSignature(
actionId = "disableec",
displayName = "disableec",
usage = DisableErasureCodingPolicy.EC_POLICY_NAME + " $policy"
)
public class DisableErasureCodingPolicy extends HdfsAction {
public static final String EC_POLICY_NAME = "-policy";
private SmartConf conf;
private String policyName;
@Override
public void init(Map<String, String> args) {
super.init(args);
this.conf = getContext().getConf();
this.policyName = args.get(EC_POLICY_NAME);
}
@Override
public void execute() throws Exception {
this.setDfsClient(HadoopUtil.getDFSClient(
HadoopUtil.getNameNodeUri(conf), conf));
dfsClient.disableErasureCodingPolicy(policyName);
appendLog(String.format("The EC policy named %s is disabled!", policyName));
}
}
| java | Apache-2.0 | e0c90f054687a18c4e095547ac5e31b8b313b3ef | 2026-01-05T02:41:11.405497Z | false |
Intel-bigdata/SSM | https://github.com/Intel-bigdata/SSM/blob/e0c90f054687a18c4e095547ac5e31b8b313b3ef/smart-hadoop-support/smart-hadoop-3.1/src/main/java/org/smartdata/hdfs/action/ErasureCodingBase.java | smart-hadoop-support/smart-hadoop-3.1/src/main/java/org/smartdata/hdfs/action/ErasureCodingBase.java | /**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.smartdata.hdfs.action;
import org.apache.hadoop.fs.CreateFlag;
import org.apache.hadoop.fs.XAttrSetFlag;
import org.apache.hadoop.fs.permission.FsPermission;
import org.apache.hadoop.hdfs.DFSConfigKeys;
import org.apache.hadoop.hdfs.DFSInputStream;
import org.apache.hadoop.hdfs.DFSOutputStream;
import org.apache.hadoop.hdfs.protocol.HdfsFileStatus;
import org.apache.hadoop.hdfs.protocol.SystemErasureCodingPolicies;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.smartdata.action.ActionException;
import org.smartdata.conf.SmartConf;
import java.io.IOException;
import java.util.EnumSet;
import java.util.Map;
/**
* An abstract base class for ErasureCodingAction & UnErasureCodingAction.
*/
abstract public class ErasureCodingBase extends HdfsAction {
private static final Logger LOG =
LoggerFactory.getLogger(ErasureCodingBase.class);
public static final String BUF_SIZE = "-bufSize";
protected String srcPath;
protected String ecTmpPath;
protected int bufferSize = 1024 * 1024;
protected float progress;
protected SmartConf conf;
protected String ecPolicyName;
// The value for -ecTmp is assigned by ErasureCodingScheduler.
public static final String EC_TMP = "-ecTmp";
public static final String REPLICATION_POLICY_NAME =
SystemErasureCodingPolicies.getReplicationPolicy().getName();
protected void convert(HdfsFileStatus srcFileStatus) throws ActionException {
DFSInputStream in = null;
DFSOutputStream out = null;
try {
long blockSize = conf.getLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY,
DFSConfigKeys.DFS_BLOCK_SIZE_DEFAULT);
in = dfsClient.open(srcPath, bufferSize, true);
short replication = (short) conf.getInt(DFSConfigKeys.DFS_REPLICATION_KEY,
DFSConfigKeys.DFS_REPLICATION_DEFAULT);
// use the same FsPermission as srcPath
FsPermission permission = srcFileStatus.getPermission();
out = dfsClient.create(ecTmpPath, permission, EnumSet.of(CreateFlag.CREATE), true,
replication, blockSize, null, bufferSize, null, null, ecPolicyName);
// Keep storage policy according with original file except UNDEF storage policy
String storagePolicyName = dfsClient.getStoragePolicy(srcPath).getName();
if (!storagePolicyName.equals("UNDEF")) {
dfsClient.setStoragePolicy(ecTmpPath, storagePolicyName);
}
long bytesRemaining = srcFileStatus.getLen();
byte[] buf = new byte[bufferSize];
while (bytesRemaining > 0L) {
int bytesToRead =
(int) (bytesRemaining < (long) buf.length ? bytesRemaining :
(long) buf.length);
int bytesRead = in.read(buf, 0, bytesToRead);
if (bytesRead == -1) {
break;
}
out.write(buf, 0, bytesRead);
bytesRemaining -= (long) bytesRead;
this.progress = (float) (srcFileStatus.getLen() - bytesRemaining) / srcFileStatus.getLen();
}
} catch (Exception ex) {
throw new ActionException(ex);
} finally {
try {
if (in != null) {
in.close();
}
if (out != null) {
out.close();
}
} catch (IOException ex) {
LOG.error("IOException occurred when closing DFSInputStream or DFSOutputStream!");
}
}
}
// set attributes for dest to keep them consistent with their counterpart of src
protected void setAttributes(String src, HdfsFileStatus fileStatus, String dest)
throws IOException {
dfsClient.setOwner(dest, fileStatus.getOwner(), fileStatus.getGroup());
dfsClient.setPermission(dest, fileStatus.getPermission());
dfsClient.setStoragePolicy(dest, dfsClient.getStoragePolicy(src).getName());
dfsClient.setTimes(dest, fileStatus.getModificationTime(), fileStatus.getAccessTime());
boolean aclsEnabled = getContext().getConf().getBoolean(
DFSConfigKeys.DFS_NAMENODE_ACLS_ENABLED_KEY,
DFSConfigKeys.DFS_NAMENODE_ACLS_ENABLED_DEFAULT);
if (aclsEnabled) {
dfsClient.setAcl(dest, dfsClient.getAclStatus(src).getEntries());
}
//TODO: check ec related record to avoid paradox
for (Map.Entry<String, byte[]> entry : dfsClient.getXAttrs(src).entrySet()) {
dfsClient.setXAttr(dest, entry.getKey(), entry.getValue(),
EnumSet.of(XAttrSetFlag.CREATE, XAttrSetFlag.REPLACE));
}
}
} | java | Apache-2.0 | e0c90f054687a18c4e095547ac5e31b8b313b3ef | 2026-01-05T02:41:11.405497Z | false |
Intel-bigdata/SSM | https://github.com/Intel-bigdata/SSM/blob/e0c90f054687a18c4e095547ac5e31b8b313b3ef/smart-hadoop-support/smart-hadoop-3.1/src/main/java/org/smartdata/hdfs/action/ErasureCodingAction.java | smart-hadoop-support/smart-hadoop-3.1/src/main/java/org/smartdata/hdfs/action/ErasureCodingAction.java | /**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.smartdata.hdfs.action;
import org.apache.hadoop.fs.CreateFlag;
import org.apache.hadoop.fs.Options;
import org.apache.hadoop.hdfs.DFSConfigKeys;
import org.apache.hadoop.hdfs.client.HdfsDataOutputStream;
import org.apache.hadoop.hdfs.protocol.ErasureCodingPolicy;
import org.apache.hadoop.hdfs.protocol.ErasureCodingPolicyInfo;
import org.apache.hadoop.hdfs.protocol.ErasureCodingPolicyState;
import org.apache.hadoop.hdfs.protocol.HdfsFileStatus;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.smartdata.action.ActionException;
import org.smartdata.action.annotation.ActionSignature;
import org.smartdata.conf.SmartConf;
import org.smartdata.hdfs.HadoopUtil;
import org.smartdata.utils.StringUtil;
import java.io.IOException;
import java.util.EnumSet;
import java.util.HashMap;
import java.util.Map;
/**
* An action to set an EC policy for a dir or convert a file to another one in an EC policy.
* Default values are used for arguments of policy & bufSize if their values are not given in this action.
*/
@ActionSignature(
actionId = "ec",
displayName = "ec",
usage = HdfsAction.FILE_PATH + " $src " + ErasureCodingAction.EC_POLICY_NAME + " $policy" +
ErasureCodingBase.BUF_SIZE + " $bufSize"
)
public class ErasureCodingAction extends ErasureCodingBase {
private static final Logger LOG =
LoggerFactory.getLogger(ErasureCodingAction.class);
public static final String EC_POLICY_NAME = "-policy";
@Override
public void init(Map<String, String> args) {
super.init(args);
this.conf = getContext().getConf();
this.srcPath = args.get(FILE_PATH);
if (args.containsKey(EC_TMP)) {
// this is a temp file kept for converting a file to another with other ec policy.
this.ecTmpPath = args.get(EC_TMP);
}
if (args.containsKey(EC_POLICY_NAME) && !args.get(EC_POLICY_NAME).isEmpty()) {
this.ecPolicyName = args.get(EC_POLICY_NAME);
} else {
String defaultEcPolicy = conf.getTrimmed(DFSConfigKeys.DFS_NAMENODE_EC_SYSTEM_DEFAULT_POLICY,
DFSConfigKeys.DFS_NAMENODE_EC_SYSTEM_DEFAULT_POLICY_DEFAULT);
this.ecPolicyName = defaultEcPolicy;
}
if (args.containsKey(BUF_SIZE) && !args.get(BUF_SIZE).isEmpty()) {
this.bufferSize = (int) StringUtil.parseToByte(args.get(BUF_SIZE));
}
this.progress = 0.0F;
}
@Override
protected void execute() throws Exception {
final String MATCH_RESULT =
"The current EC policy is already matched with the target one.";
final String DIR_RESULT =
"The EC policy is set successfully for the given directory.";
final String CONVERT_RESULT =
"The file is converted successfully with the given or default EC policy.";
// Make sure DFSClient is used instead of SmartDFSClient.
this.setDfsClient(HadoopUtil.getDFSClient(
HadoopUtil.getNameNodeUri(conf), conf));
// keep attribute consistent
HdfsFileStatus fileStatus = dfsClient.getFileInfo(srcPath);
if (fileStatus == null) {
throw new ActionException("File doesn't exist!");
}
validateEcPolicy(ecPolicyName);
ErasureCodingPolicy srcEcPolicy = fileStatus.getErasureCodingPolicy();
// if the current ecPolicy is already the target one, no need to convert
if (srcEcPolicy != null) {
if (srcEcPolicy.getName().equals(ecPolicyName)) {
appendLog(MATCH_RESULT);
this.progress = 1.0F;
return;
}
} else {
// if ecPolicy is null, it means replication.
if (ecPolicyName.equals(REPLICATION_POLICY_NAME)) {
appendLog(MATCH_RESULT);
this.progress = 1.0F;
return;
}
}
if (fileStatus.isDir()) {
dfsClient.setErasureCodingPolicy(srcPath, ecPolicyName);
this.progress = 1.0F;
appendLog(DIR_RESULT);
return;
}
HdfsDataOutputStream outputStream = null;
try {
// a file only with replication policy can be appended.
if (srcEcPolicy == null) {
// append the file to acquire the lock to avoid modifying, real appending wouldn't occur.
outputStream =
dfsClient.append(srcPath, bufferSize, EnumSet.of(CreateFlag.APPEND), null, null);
}
convert(fileStatus);
/**
* The append operation will change the modification time accordingly,
* so we use the FileStatus obtained before append to set ecTmp file's most attributes
*/
setAttributes(srcPath, fileStatus, ecTmpPath);
dfsClient.rename(ecTmpPath, srcPath, Options.Rename.OVERWRITE);
appendLog(CONVERT_RESULT);
if (srcEcPolicy == null) {
appendLog("The previous EC policy is replication.");
} else {
appendLog(String.format("The previous EC policy is %s.", srcEcPolicy.getName()));
}
appendLog(String.format("The current EC policy is %s.", ecPolicyName));
} catch (ActionException ex) {
try {
if (dfsClient.getFileInfo(ecTmpPath) != null) {
dfsClient.delete(ecTmpPath, false);
}
} catch (IOException e) {
LOG.error("Failed to delete tmp file created during the conversion!");
}
throw new ActionException(ex);
} finally {
if (outputStream != null) {
try {
outputStream.close();
} catch (IOException ex) {
// Hide the expected exception that the original file is missing.
}
}
}
}
public void validateEcPolicy(String ecPolicyName) throws Exception {
Map<String, ErasureCodingPolicyState> ecPolicyNameToState = new HashMap<>();
for (ErasureCodingPolicyInfo info : dfsClient.getErasureCodingPolicies()) {
ecPolicyNameToState.put(info.getPolicy().getName(), info.getState());
}
if (!ecPolicyNameToState.keySet().contains(ecPolicyName) && !ecPolicyName.equals(REPLICATION_POLICY_NAME)) {
throw new ActionException("The EC policy " + ecPolicyName + " is not supported!");
} else if (ecPolicyNameToState.get(ecPolicyName) == ErasureCodingPolicyState.DISABLED
|| ecPolicyNameToState.get(ecPolicyName) == ErasureCodingPolicyState.REMOVED) {
throw new ActionException("The EC policy " + ecPolicyName + " is disabled or removed!");
}
}
@Override
public float getProgress() {
return progress;
}
} | java | Apache-2.0 | e0c90f054687a18c4e095547ac5e31b8b313b3ef | 2026-01-05T02:41:11.405497Z | false |
Intel-bigdata/SSM | https://github.com/Intel-bigdata/SSM/blob/e0c90f054687a18c4e095547ac5e31b8b313b3ef/smart-hadoop-support/smart-hadoop-3.1/src/main/java/org/smartdata/hdfs/action/RemoveErasureCodingPolicy.java | smart-hadoop-support/smart-hadoop-3.1/src/main/java/org/smartdata/hdfs/action/RemoveErasureCodingPolicy.java | /**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.smartdata.hdfs.action;
import org.smartdata.action.annotation.ActionSignature;
import org.smartdata.conf.SmartConf;
import org.smartdata.hdfs.HadoopUtil;
import java.util.Map;
/**
* An action to remove an EC policy.
*/
@ActionSignature(
actionId = "removeec",
displayName = "removeec",
usage = RemoveErasureCodingPolicy.EC_POLICY_NAME + " $policy"
)
public class RemoveErasureCodingPolicy extends HdfsAction {
public static final String EC_POLICY_NAME = "-policy";
private SmartConf conf;
private String policyName;
@Override
public void init(Map<String, String> args) {
super.init(args);
this.conf = getContext().getConf();
this.policyName = args.get(EC_POLICY_NAME);
}
@Override
public void execute() throws Exception {
this.setDfsClient(HadoopUtil.getDFSClient(
HadoopUtil.getNameNodeUri(conf), conf));
dfsClient.removeErasureCodingPolicy(policyName);
appendLog(String.format("The EC policy named %s is removed!", policyName));
}
}
| java | Apache-2.0 | e0c90f054687a18c4e095547ac5e31b8b313b3ef | 2026-01-05T02:41:11.405497Z | false |
Intel-bigdata/SSM | https://github.com/Intel-bigdata/SSM/blob/e0c90f054687a18c4e095547ac5e31b8b313b3ef/smart-hadoop-support/smart-hadoop-3.1/src/main/java/org/smartdata/hdfs/action/AddErasureCodingPolicy.java | smart-hadoop-support/smart-hadoop-3.1/src/main/java/org/smartdata/hdfs/action/AddErasureCodingPolicy.java | /**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.smartdata.hdfs.action;
import org.apache.hadoop.hdfs.protocol.AddErasureCodingPolicyResponse;
import org.apache.hadoop.hdfs.protocol.ErasureCodingPolicy;
import org.apache.hadoop.io.erasurecode.ECSchema;
import org.smartdata.action.ActionException;
import org.smartdata.action.annotation.ActionSignature;
import org.smartdata.conf.SmartConf;
import org.smartdata.hdfs.HadoopUtil;
import org.smartdata.utils.StringUtil;
import java.util.Map;
/**
* An action to add an EC policy.
*/
@ActionSignature(
actionId = "addec",
displayName = "addec",
usage = AddErasureCodingPolicy.POLICY_NAME + "$policyName" +
AddErasureCodingPolicy.CODEC_NAME + " $codeName" +
AddErasureCodingPolicy.DATA_UNITS_NUM + " $dataNum" +
AddErasureCodingPolicy.PARITY_UNITS_NUM + " $parityNum" +
AddErasureCodingPolicy.CELL_SIZE + " $cellSize"
)
public class AddErasureCodingPolicy extends HdfsAction {
public static final String POLICY_NAME = "-policyName";
public static final String CODEC_NAME = "-codec";
public static final String DATA_UNITS_NUM = "-dataNum";
public static final String PARITY_UNITS_NUM = "-parityNum";
public static final String CELL_SIZE = "-cellSize";
private SmartConf conf;
private String policyName;
private String codecName;
private int numDataUnits;
private int numParityUnits;
private int cellSize;
@Override
public void init(Map<String, String> args) {
super.init(args);
this.conf = getContext().getConf();
if (args.get(POLICY_NAME) != null && !args.get(POLICY_NAME).isEmpty()) {
this.policyName = args.get(POLICY_NAME);
String[] policySchema = policyName.split("-");
if (policySchema.length != 4) {
return;
}
this.codecName = policySchema[0].toLowerCase();
this.numDataUnits = Integer.parseInt(policySchema[1]);
this.numParityUnits = Integer.parseInt(policySchema[2]);
this.cellSize = (int) StringUtil.parseToByte(policySchema[3]);
return;
}
if (args.get(CODEC_NAME) != null && !args.get(CODEC_NAME).isEmpty()) {
this.codecName = args.get(CODEC_NAME).toLowerCase();
}
if (args.get(DATA_UNITS_NUM) != null && !args.get(DATA_UNITS_NUM).isEmpty()) {
this.numDataUnits = Integer.parseInt(args.get(DATA_UNITS_NUM));
}
if (args.get(PARITY_UNITS_NUM) != null && !args.get(PARITY_UNITS_NUM).isEmpty()) {
this.numParityUnits = Integer.parseInt(args.get(PARITY_UNITS_NUM));
}
if (args.get(CELL_SIZE) != null && !args.get(CELL_SIZE).isEmpty()) {
this.cellSize = (int) StringUtil.parseToByte(args.get(CELL_SIZE));
}
}
@Override
public void execute() throws Exception {
this.setDfsClient(HadoopUtil.getDFSClient(
HadoopUtil.getNameNodeUri(conf), conf));
if (codecName == null || numDataUnits <= 0 || numParityUnits <= 0 ||
cellSize <= 0 || cellSize % 1024 != 0) {
throw new ActionException("Illegal EC policy Schema! " +
"A valid codec name should be given, " +
"the dataNum, parityNum and cellSize should be positive and " +
"the cellSize should be divisible by 1024.");
}
ECSchema ecSchema = new ECSchema(codecName, numDataUnits, numParityUnits);
ErasureCodingPolicy ecPolicy = new ErasureCodingPolicy(ecSchema, cellSize);
AddErasureCodingPolicyResponse addEcResponse =
dfsClient.addErasureCodingPolicies(new ErasureCodingPolicy[]{ecPolicy})[0];
if (addEcResponse.isSucceed()) {
appendLog(String.format("EC policy named %s is added successfully!",
addEcResponse.getPolicy().getName()));
} else {
appendLog(String.format("Failed to add the given EC policy!"));
throw new ActionException(addEcResponse.getErrorMsg());
}
}
}
| java | Apache-2.0 | e0c90f054687a18c4e095547ac5e31b8b313b3ef | 2026-01-05T02:41:11.405497Z | false |
Intel-bigdata/SSM | https://github.com/Intel-bigdata/SSM/blob/e0c90f054687a18c4e095547ac5e31b8b313b3ef/smart-hadoop-support/smart-hadoop-3.1/src/main/java/org/smartdata/hdfs/action/CheckErasureCodingPolicy.java | smart-hadoop-support/smart-hadoop-3.1/src/main/java/org/smartdata/hdfs/action/CheckErasureCodingPolicy.java | /**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.smartdata.hdfs.action;
import org.apache.hadoop.hdfs.protocol.ErasureCodingPolicy;
import org.smartdata.action.annotation.ActionSignature;
import org.smartdata.conf.SmartConf;
import org.smartdata.hdfs.HadoopUtil;
import java.util.Map;
/**
* An action to check the EC policy for a file or dir.
*/
@ActionSignature(
actionId = "checkec",
displayName = "checkec",
usage = HdfsAction.FILE_PATH + " $src"
)
public class CheckErasureCodingPolicy extends HdfsAction {
public static final String RESULT_OF_NULL_EC_POLICY =
"The EC policy is replication.";
private SmartConf conf;
private String srcPath;
@Override
public void init(Map<String, String> args) {
super.init(args);
this.conf = getContext().getConf();
this.srcPath = args.get(HdfsAction.FILE_PATH);
}
@Override
public void execute() throws Exception {
this.setDfsClient(HadoopUtil.getDFSClient(
HadoopUtil.getNameNodeUri(conf), conf));
ErasureCodingPolicy srcEcPolicy = dfsClient.getErasureCodingPolicy(srcPath);
if (srcEcPolicy == null) {
appendLog(RESULT_OF_NULL_EC_POLICY);
} else {
appendLog(srcEcPolicy.toString());
}
}
}
| java | Apache-2.0 | e0c90f054687a18c4e095547ac5e31b8b313b3ef | 2026-01-05T02:41:11.405497Z | false |
Intel-bigdata/SSM | https://github.com/Intel-bigdata/SSM/blob/e0c90f054687a18c4e095547ac5e31b8b313b3ef/smart-hadoop-support/smart-hadoop-3.1/src/main/java/org/smartdata/hdfs/action/UnErasureCodingAction.java | smart-hadoop-support/smart-hadoop-3.1/src/main/java/org/smartdata/hdfs/action/UnErasureCodingAction.java | /**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.smartdata.hdfs.action;
import org.apache.hadoop.fs.Options;
import org.apache.hadoop.hdfs.protocol.ErasureCodingPolicy;
import org.apache.hadoop.hdfs.protocol.HdfsFileStatus;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.smartdata.action.ActionException;
import org.smartdata.action.annotation.ActionSignature;
import org.smartdata.conf.SmartConf;
import org.smartdata.hdfs.HadoopUtil;
import org.smartdata.utils.StringUtil;
import java.io.IOException;
import java.util.Map;
/**
* An action to set replication policy for a dir or convert a file to another one in replication policy.
* Default value is used for argument of bufSize if its value is not given in this action.
*/
@ActionSignature(
actionId = "unec",
displayName = "unec",
usage = HdfsAction.FILE_PATH + " $src " + ErasureCodingBase.BUF_SIZE + " $bufSize"
)
public class UnErasureCodingAction extends ErasureCodingBase {
private static final Logger LOG =
LoggerFactory.getLogger(UnErasureCodingAction.class);
@Override
public void init(Map<String, String> args) {
super.init(args);
this.conf = getContext().getConf();
this.srcPath = args.get(FILE_PATH);
this.ecPolicyName = REPLICATION_POLICY_NAME;
if (args.containsKey(EC_TMP)) {
this.ecTmpPath = args.get(EC_TMP);
}
if (args.containsKey(BUF_SIZE) && !args.get(BUF_SIZE).isEmpty()) {
this.bufferSize = (int) StringUtil.parseToByte(args.get(BUF_SIZE));
}
this.progress = 0.0F;
}
@Override
protected void execute() throws Exception {
final String MATCH_RESULT =
"The current EC policy is replication already.";
final String DIR_RESULT =
"The replication EC policy is set successfully for the given directory.";
final String CONVERT_RESULT =
"The file is converted successfully with replication EC policy.";
this.setDfsClient(HadoopUtil.getDFSClient(
HadoopUtil.getNameNodeUri(conf), conf));
HdfsFileStatus fileStatus = dfsClient.getFileInfo(srcPath);
if (fileStatus == null) {
throw new ActionException("File doesn't exist!");
}
ErasureCodingPolicy srcEcPolicy = fileStatus.getErasureCodingPolicy();
// if ecPolicy is null, it means replication.
if (srcEcPolicy == null) {
this.progress = 1.0F;
appendLog(MATCH_RESULT);
return;
}
if (fileStatus.isDir()) {
dfsClient.setErasureCodingPolicy(srcPath, ecPolicyName);
progress = 1.0F;
appendLog(DIR_RESULT);
return;
}
try {
convert(fileStatus);
setAttributes(srcPath, fileStatus, ecTmpPath);
dfsClient.rename(ecTmpPath, srcPath, Options.Rename.OVERWRITE);
appendLog(CONVERT_RESULT);
appendLog(String.format("The previous EC policy is %s.", srcEcPolicy.getName()));
appendLog(String.format("The current EC policy is %s.", REPLICATION_POLICY_NAME));
} catch (ActionException ex) {
try {
if (dfsClient.getFileInfo(ecTmpPath) != null) {
dfsClient.delete(ecTmpPath, false);
}
} catch (IOException e) {
LOG.error("Failed to delete tmp file created during the conversion!");
}
throw new ActionException(ex);
}
}
@Override
public float getProgress() {
return progress;
}
} | java | Apache-2.0 | e0c90f054687a18c4e095547ac5e31b8b313b3ef | 2026-01-05T02:41:11.405497Z | false |
Intel-bigdata/SSM | https://github.com/Intel-bigdata/SSM/blob/e0c90f054687a18c4e095547ac5e31b8b313b3ef/smart-hadoop-support/smart-hadoop-3.1/src/main/java/org/smartdata/hdfs/action/move/DBlockStriped.java | smart-hadoop-support/smart-hadoop-3.1/src/main/java/org/smartdata/hdfs/action/move/DBlockStriped.java | package org.smartdata.hdfs.action.move;
import org.apache.hadoop.hdfs.protocol.Block;
import org.apache.hadoop.hdfs.util.StripedBlockUtil;
public class DBlockStriped extends DBlock {
final byte[] indices;
final short dataBlockNum;
final int cellSize;
public DBlockStriped(Block block, byte[] indices, short dataBlockNum,
int cellSize) {
super(block);
this.indices = indices;
this.dataBlockNum = dataBlockNum;
this.cellSize = cellSize;
}
public DBlock getInternalBlock(StorageGroup storage) {
int idxInLocs = locations.indexOf(storage);
if (idxInLocs == -1) {
return null;
}
byte idxInGroup = indices[idxInLocs];
long blkId = getBlock().getBlockId() + idxInGroup;
long numBytes = StripedBlockUtil.getInternalBlockLength(getNumBytes(), cellSize,
dataBlockNum, idxInGroup);
Block blk = new Block(getBlock());
blk.setBlockId(blkId);
blk.setNumBytes(numBytes);
DBlock dblk = new DBlock(blk);
dblk.addLocation(storage);
return dblk;
}
}
| java | Apache-2.0 | e0c90f054687a18c4e095547ac5e31b8b313b3ef | 2026-01-05T02:41:11.405497Z | false |
Intel-bigdata/SSM | https://github.com/Intel-bigdata/SSM/blob/e0c90f054687a18c4e095547ac5e31b8b313b3ef/smart-hadoop-support/smart-erasurecodec/src/test/java/org/smartdata/erasurecode/TestSetFile.java | smart-hadoop-support/smart-erasurecodec/src/test/java/org/smartdata/erasurecode/TestSetFile.java | /**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.smartdata.erasurecode;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.IntWritable;
import org.apache.hadoop.io.MapFile;
import org.apache.hadoop.io.SequenceFile.CompressionType;
import org.apache.hadoop.io.SetFile;
import org.apache.hadoop.io.WritableComparator;
import org.junit.Test;
import java.io.IOException;
import java.util.Arrays;
import java.util.Random;
import static org.junit.Assert.*;
/** Support for flat files of binary key/value pairs. */
public class TestSetFile {
private static final Log LOG = LogFactory.getLog(TestSetFile.class);
private static String FILE = org.smartdata.erasurecode.test.GenericTestUtils.getTempPath("test.set");
private static Configuration conf = new Configuration();
@Test
public void testSetFile() throws Exception {
FileSystem fs = FileSystem.getLocal(conf);
try {
RandomDatum[] data = generate(10000);
writeTest(fs, data, FILE, CompressionType.NONE);
readTest(fs, data, FILE);
writeTest(fs, data, FILE, CompressionType.BLOCK);
readTest(fs, data, FILE);
} finally {
fs.close();
}
}
/**
* test {@code SetFile.Reader} methods
* next(), get() in combination
*/
@Test
public void testSetFileAccessMethods() {
try {
FileSystem fs = FileSystem.getLocal(conf);
int size = 10;
writeData(fs, size);
SetFile.Reader reader = createReader(fs);
assertTrue("testSetFileWithConstruction1 error !!!", reader.next(new IntWritable(0)));
// don't know why reader.get(i) return i+1
assertEquals("testSetFileWithConstruction2 error !!!", new IntWritable(size/2 + 1), reader.get(new IntWritable(size/2)));
assertNull("testSetFileWithConstruction3 error !!!", reader.get(new IntWritable(size*2)));
} catch (Exception ex) {
fail("testSetFileWithConstruction error !!!");
}
}
private SetFile.Reader createReader(FileSystem fs) throws IOException {
return new SetFile.Reader(fs, FILE,
WritableComparator.get(IntWritable.class), conf);
}
@SuppressWarnings("deprecation")
private void writeData(FileSystem fs, int elementSize) throws IOException {
MapFile.delete(fs, FILE);
SetFile.Writer writer = new SetFile.Writer(fs, FILE, IntWritable.class);
for (int i = 0; i < elementSize; i++)
writer.append(new IntWritable(i));
writer.close();
}
private static RandomDatum[] generate(int count) {
LOG.info("generating " + count + " records in memory");
RandomDatum[] data = new RandomDatum[count];
RandomDatum.Generator generator = new RandomDatum.Generator();
for (int i = 0; i < count; i++) {
generator.next();
data[i] = generator.getValue();
}
LOG.info("sorting " + count + " records");
Arrays.sort(data);
return data;
}
private static void writeTest(FileSystem fs, RandomDatum[] data,
String file, CompressionType compress)
throws IOException {
MapFile.delete(fs, file);
LOG.info("creating with " + data.length + " records");
SetFile.Writer writer =
new SetFile.Writer(conf, fs, file,
WritableComparator.get(RandomDatum.class),
compress);
for (int i = 0; i < data.length; i++)
writer.append(data[i]);
writer.close();
}
private static void readTest(FileSystem fs, RandomDatum[] data, String file)
throws IOException {
RandomDatum v = new RandomDatum();
int sample = (int)Math.sqrt(data.length);
Random random = new Random();
LOG.info("reading " + sample + " records");
SetFile.Reader reader = new SetFile.Reader(fs, file, conf);
for (int i = 0; i < sample; i++) {
if (!reader.seek(data[random.nextInt(data.length)]))
throw new RuntimeException("wrong value at " + i);
}
reader.close();
LOG.info("done reading " + data.length);
}
/** For debugging and testing. */
public static void main(String[] args) throws Exception {
int count = 1024 * 1024;
boolean create = true;
boolean check = true;
String file = FILE;
String compress = "NONE";
String usage = "Usage: TestSetFile [-count N] [-nocreate] [-nocheck] [-compress type] file";
if (args.length == 0) {
System.err.println(usage);
System.exit(-1);
}
int i = 0;
Path fpath=null;
FileSystem fs = null;
try {
for (; i < args.length; i++) { // parse command line
if (args[i] == null) {
continue;
} else if (args[i].equals("-count")) {
count = Integer.parseInt(args[++i]);
} else if (args[i].equals("-nocreate")) {
create = false;
} else if (args[i].equals("-nocheck")) {
check = false;
} else if (args[i].equals("-compress")) {
compress = args[++i];
} else {
// file is required parameter
file = args[i];
fpath=new Path(file);
}
}
fs = fpath.getFileSystem(conf);
LOG.info("count = " + count);
LOG.info("create = " + create);
LOG.info("check = " + check);
LOG.info("compress = " + compress);
LOG.info("file = " + file);
RandomDatum[] data = generate(count);
if (create) {
writeTest(fs, data, file, CompressionType.valueOf(compress));
}
if (check) {
readTest(fs, data, file);
}
} finally {
fs.close();
}
}
}
| java | Apache-2.0 | e0c90f054687a18c4e095547ac5e31b8b313b3ef | 2026-01-05T02:41:11.405497Z | false |
Intel-bigdata/SSM | https://github.com/Intel-bigdata/SSM/blob/e0c90f054687a18c4e095547ac5e31b8b313b3ef/smart-hadoop-support/smart-erasurecodec/src/test/java/org/smartdata/erasurecode/TestArrayFile.java | smart-hadoop-support/smart-erasurecodec/src/test/java/org/smartdata/erasurecode/TestArrayFile.java | /**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.smartdata.erasurecode;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.*;
import org.apache.hadoop.io.RandomDatum;
import org.apache.hadoop.io.SequenceFile.CompressionType;
import org.apache.hadoop.io.TestMapFile;
import org.apache.hadoop.util.Progressable;
import org.junit.Test;
import java.io.IOException;
import static org.junit.Assert.*;
/** Support for flat files of binary key/value pairs. */
public class TestArrayFile {
private static final Log LOG = LogFactory.getLog(TestArrayFile.class);
private static final Path TEST_DIR = new Path(org.smartdata.erasurecode.test.GenericTestUtils.getTempPath(
TestMapFile.class.getSimpleName()));
private static String TEST_FILE = new Path(TEST_DIR, "test.array").toString();
@Test
public void testArrayFile() throws Exception {
Configuration conf = new Configuration();
FileSystem fs = FileSystem.getLocal(conf);
RandomDatum[] data = generate(10000);
writeTest(fs, data, TEST_FILE);
readTest(fs, data, TEST_FILE, conf);
}
@Test
public void testEmptyFile() throws Exception {
Configuration conf = new Configuration();
FileSystem fs = FileSystem.getLocal(conf);
writeTest(fs, new RandomDatum[0], TEST_FILE);
ArrayFile.Reader reader = new ArrayFile.Reader(fs, TEST_FILE, conf);
assertNull(reader.get(0, new RandomDatum()));
reader.close();
}
private static RandomDatum[] generate(int count) {
if(LOG.isDebugEnabled()) {
LOG.debug("generating " + count + " records in debug");
}
RandomDatum[] data = new RandomDatum[count];
RandomDatum.Generator generator = new RandomDatum.Generator();
for (int i = 0; i < count; i++) {
generator.next();
data[i] = generator.getValue();
}
return data;
}
private static void writeTest(FileSystem fs, RandomDatum[] data, String file)
throws IOException {
Configuration conf = new Configuration();
MapFile.delete(fs, file);
if(LOG.isDebugEnabled()) {
LOG.debug("creating with " + data.length + " debug");
}
ArrayFile.Writer writer = new ArrayFile.Writer(conf, fs, file, RandomDatum.class);
writer.setIndexInterval(100);
for (int i = 0; i < data.length; i++)
writer.append(data[i]);
writer.close();
}
private static void readTest(FileSystem fs, RandomDatum[] data, String file, Configuration conf)
throws IOException {
RandomDatum v = new RandomDatum();
if(LOG.isDebugEnabled()) {
LOG.debug("reading " + data.length + " debug");
}
ArrayFile.Reader reader = new ArrayFile.Reader(fs, file, conf);
try {
for (int i = 0; i < data.length; i++) { // try forwards
reader.get(i, v);
if (!v.equals(data[i])) {
throw new RuntimeException("wrong value at " + i);
}
}
for (int i = data.length-1; i >= 0; i--) { // then backwards
reader.get(i, v);
if (!v.equals(data[i])) {
throw new RuntimeException("wrong value at " + i);
}
}
if(LOG.isDebugEnabled()) {
LOG.debug("done reading " + data.length + " debug");
}
} finally {
reader.close();
}
}
/**
* test on {@link ArrayFile.Reader} iteration methods
* <pre>
* {@code next(), seek()} in and out of range.
* </pre>
*/
@Test
public void testArrayFileIteration() {
int SIZE = 10;
Configuration conf = new Configuration();
try {
FileSystem fs = FileSystem.get(conf);
ArrayFile.Writer writer = new ArrayFile.Writer(conf, fs, TEST_FILE,
LongWritable.class, CompressionType.RECORD, defaultProgressable);
assertNotNull("testArrayFileIteration error !!!", writer);
for (int i = 0; i < SIZE; i++)
writer.append(new LongWritable(i));
writer.close();
ArrayFile.Reader reader = new ArrayFile.Reader(fs, TEST_FILE, conf);
LongWritable nextWritable = new LongWritable(0);
for (int i = 0; i < SIZE; i++) {
nextWritable = (LongWritable)reader.next(nextWritable);
assertEquals(nextWritable.get(), i);
}
assertTrue("testArrayFileIteration seek error !!!",
reader.seek(new LongWritable(6)));
nextWritable = (LongWritable) reader.next(nextWritable);
assertTrue("testArrayFileIteration error !!!", reader.key() == 7);
assertTrue("testArrayFileIteration error !!!",
nextWritable.equals(new LongWritable(7)));
assertFalse("testArrayFileIteration error !!!",
reader.seek(new LongWritable(SIZE + 5)));
reader.close();
} catch (Exception ex) {
fail("testArrayFileWriterConstruction error !!!");
}
}
/** For debugging and testing. */
public static void main(String[] args) throws Exception {
int count = 1024 * 1024;
boolean create = true;
boolean check = true;
String file = TEST_FILE;
String usage = "Usage: TestArrayFile [-count N] [-nocreate] [-nocheck] file";
if (args.length == 0) {
System.err.println(usage);
System.exit(-1);
}
Configuration conf = new Configuration();
int i = 0;
Path fpath = null;
FileSystem fs = null;
try {
for (; i < args.length; i++) { // parse command line
if (args[i] == null) {
continue;
} else if (args[i].equals("-count")) {
count = Integer.parseInt(args[++i]);
} else if (args[i].equals("-nocreate")) {
create = false;
} else if (args[i].equals("-nocheck")) {
check = false;
} else {
// file is required parameter
file = args[i];
fpath=new Path(file);
}
}
fs = fpath.getFileSystem(conf);
LOG.info("count = " + count);
LOG.info("create = " + create);
LOG.info("check = " + check);
LOG.info("file = " + file);
RandomDatum[] data = generate(count);
if (create) {
writeTest(fs, data, file);
}
if (check) {
readTest(fs, data, file, conf);
}
} finally {
fs.close();
}
}
private static final Progressable defaultProgressable = new Progressable() {
@Override
public void progress() {
}
};
}
| java | Apache-2.0 | e0c90f054687a18c4e095547ac5e31b8b313b3ef | 2026-01-05T02:41:11.405497Z | false |
Intel-bigdata/SSM | https://github.com/Intel-bigdata/SSM/blob/e0c90f054687a18c4e095547ac5e31b8b313b3ef/smart-hadoop-support/smart-erasurecodec/src/test/java/org/smartdata/erasurecode/TestCodecRawCoderMapping.java | smart-hadoop-support/smart-erasurecodec/src/test/java/org/smartdata/erasurecode/TestCodecRawCoderMapping.java | /**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.smartdata.erasurecode;
import org.apache.hadoop.conf.Configuration;
import org.junit.Assert;
import org.junit.Before;
import org.junit.Test;
import org.smartdata.erasurecode.rawcoder.*;
import org.smartdata.erasurecode.test.GenericTestUtils;
/**
* Test the codec to raw coder mapping.
*/
public class TestCodecRawCoderMapping {
private static Configuration conf;
private static final int numDataUnit = 6;
private static final int numParityUnit = 3;
@Before
public void setup() {
conf = new Configuration();
}
@Test
public void testRSDefaultRawCoder() {
ErasureCoderOptions coderOptions = new ErasureCoderOptions(
numDataUnit, numParityUnit);
// should return default raw coder of rs-default codec
RawErasureEncoder encoder = CodecUtil.createRawEncoder(
conf, ErasureCodeConstants.RS_DEFAULT_CODEC_NAME, coderOptions);
Assert.assertTrue(encoder instanceof RSRawEncoder);
RawErasureDecoder decoder = CodecUtil.createRawDecoder(
conf, ErasureCodeConstants.RS_DEFAULT_CODEC_NAME, coderOptions);
Assert.assertTrue(decoder instanceof RSRawDecoder);
// should return default raw coder of rs-legacy codec
encoder = CodecUtil.createRawEncoder(conf,
ErasureCodeConstants.RS_LEGACY_CODEC_NAME, coderOptions);
Assert.assertTrue(encoder instanceof RSRawEncoderLegacy);
decoder = CodecUtil.createRawDecoder(conf,
ErasureCodeConstants.RS_LEGACY_CODEC_NAME, coderOptions);
Assert.assertTrue(decoder instanceof RSRawDecoderLegacy);
}
@Test
public void testDedicatedRawCoderKey() {
ErasureCoderOptions coderOptions = new ErasureCoderOptions(
numDataUnit, numParityUnit);
String dummyFactName = "DummyNoneExistingFactory";
// set the dummy factory to rs-legacy and create a raw coder
// with rs-default, which is OK as the raw coder key is not used
conf.set(CodecUtil.
IO_ERASURECODE_CODEC_RS_LEGACY_RAWCODER_KEY, dummyFactName);
RawErasureEncoder encoder = CodecUtil.createRawEncoder(conf,
ErasureCodeConstants.RS_DEFAULT_CODEC_NAME, coderOptions);
Assert.assertTrue(encoder instanceof RSRawEncoder);
// now create the raw coder with rs-legacy, which should throw exception
try {
CodecUtil.createRawEncoder(conf,
ErasureCodeConstants.RS_LEGACY_CODEC_NAME, coderOptions);
Assert.fail();
} catch (Exception e) {
GenericTestUtils.assertExceptionContains("Failed to create raw coder", e);
}
}
}
| java | Apache-2.0 | e0c90f054687a18c4e095547ac5e31b8b313b3ef | 2026-01-05T02:41:11.405497Z | false |
Intel-bigdata/SSM | https://github.com/Intel-bigdata/SSM/blob/e0c90f054687a18c4e095547ac5e31b8b313b3ef/smart-hadoop-support/smart-erasurecodec/src/test/java/org/smartdata/erasurecode/TestECSchema.java | smart-hadoop-support/smart-erasurecodec/src/test/java/org/smartdata/erasurecode/TestECSchema.java | /**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.smartdata.erasurecode;
import org.junit.Rule;
import org.junit.Test;
import org.junit.rules.Timeout;
import java.util.HashMap;
import java.util.Map;
import static org.junit.Assert.assertEquals;
public class TestECSchema {
@Rule
public Timeout globalTimeout = new Timeout(300000);
@Test
public void testGoodSchema() {
int numDataUnits = 6;
int numParityUnits = 3;
String codec = "rs";
String extraOption = "extraOption";
String extraOptionValue = "extraOptionValue";
Map<String, String> options = new HashMap<String, String>();
options.put(ECSchema.NUM_DATA_UNITS_KEY, String.valueOf(numDataUnits));
options.put(ECSchema.NUM_PARITY_UNITS_KEY, String.valueOf(numParityUnits));
options.put(ECSchema.CODEC_NAME_KEY, codec);
options.put(extraOption, extraOptionValue);
ECSchema schema = new ECSchema(options);
System.out.println(schema.toString());
assertEquals(numDataUnits, schema.getNumDataUnits());
assertEquals(numParityUnits, schema.getNumParityUnits());
assertEquals(codec, schema.getCodecName());
assertEquals(extraOptionValue, schema.getExtraOptions().get(extraOption));
}
}
| java | Apache-2.0 | e0c90f054687a18c4e095547ac5e31b8b313b3ef | 2026-01-05T02:41:11.405497Z | false |
Intel-bigdata/SSM | https://github.com/Intel-bigdata/SSM/blob/e0c90f054687a18c4e095547ac5e31b8b313b3ef/smart-hadoop-support/smart-erasurecodec/src/test/java/org/smartdata/erasurecode/BufferAllocator.java | smart-hadoop-support/smart-erasurecodec/src/test/java/org/smartdata/erasurecode/BufferAllocator.java | /**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.smartdata.erasurecode;
import java.nio.ByteBuffer;
/**
* An abstract buffer allocator used for test.
*/
public abstract class BufferAllocator {
private boolean usingDirect = false;
public BufferAllocator(boolean usingDirect) {
this.usingDirect = usingDirect;
}
protected boolean isUsingDirect() {
return usingDirect;
}
/**
* Allocate and return a ByteBuffer of specified length.
* @param bufferLen
* @return
*/
public abstract ByteBuffer allocate(int bufferLen);
/**
* A simple buffer allocator that just uses ByteBuffer's
* allocate/allocateDirect API.
*/
public static class SimpleBufferAllocator extends BufferAllocator {
public SimpleBufferAllocator(boolean usingDirect) {
super(usingDirect);
}
@Override
public ByteBuffer allocate(int bufferLen) {
return isUsingDirect() ? ByteBuffer.allocateDirect(bufferLen) :
ByteBuffer.allocate(bufferLen);
}
}
/**
* A buffer allocator that allocates a buffer from an existing large buffer by
* slice calling, but if no available space just degrades as
* SimpleBufferAllocator. So please ensure enough space for it.
*/
public static class SlicedBufferAllocator extends BufferAllocator {
private ByteBuffer overallBuffer;
public SlicedBufferAllocator(boolean usingDirect, int totalBufferLen) {
super(usingDirect);
overallBuffer = isUsingDirect() ?
ByteBuffer.allocateDirect(totalBufferLen) :
ByteBuffer.allocate(totalBufferLen);
}
@Override
public ByteBuffer allocate(int bufferLen) {
if (bufferLen > overallBuffer.capacity() - overallBuffer.position()) {
// If no available space for the requested length, then allocate new
return isUsingDirect() ? ByteBuffer.allocateDirect(bufferLen) :
ByteBuffer.allocate(bufferLen);
}
overallBuffer.limit(overallBuffer.position() + bufferLen);
ByteBuffer result = overallBuffer.slice();
overallBuffer.position(overallBuffer.position() + bufferLen);
return result;
}
}
}
| java | Apache-2.0 | e0c90f054687a18c4e095547ac5e31b8b313b3ef | 2026-01-05T02:41:11.405497Z | false |
Intel-bigdata/SSM | https://github.com/Intel-bigdata/SSM/blob/e0c90f054687a18c4e095547ac5e31b8b313b3ef/smart-hadoop-support/smart-erasurecodec/src/test/java/org/smartdata/erasurecode/TestCoderBase.java | smart-hadoop-support/smart-erasurecodec/src/test/java/org/smartdata/erasurecode/TestCoderBase.java | /**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.smartdata.erasurecode;
import org.apache.hadoop.conf.Configuration;
import org.smartdata.erasurecode.rawcoder.util.DumpUtil;
import java.nio.ByteBuffer;
import java.util.Arrays;
import java.util.Random;
import static org.junit.Assert.assertTrue;
/**
* Test base of common utilities for tests not only raw coders but also block
* coders.
*/
public abstract class TestCoderBase {
protected static Random RAND = new Random();
protected boolean allowDump = true;
private Configuration conf;
protected int numDataUnits;
protected int numParityUnits;
protected int baseChunkSize = 1024;
private int chunkSize = baseChunkSize;
private BufferAllocator allocator;
private byte[] zeroChunkBytes;
private boolean startBufferWithZero = true;
// Indexes of erased data units.
protected int[] erasedDataIndexes = new int[] {0};
// Indexes of erased parity units.
protected int[] erasedParityIndexes = new int[] {0};
// Data buffers are either direct or on-heap, for performance the two cases
// may go to different coding implementations.
protected boolean usingDirectBuffer = true;
protected boolean usingFixedData = true;
// Using this the generated data can be repeatable across multiple calls to
// encode(), in order for troubleshooting.
private static int FIXED_DATA_GENERATOR = 0;
protected byte[][] fixedData;
protected boolean allowChangeInputs;
protected int getChunkSize() {
return chunkSize;
}
protected void setChunkSize(int chunkSize) {
this.chunkSize = chunkSize;
this.zeroChunkBytes = new byte[chunkSize]; // With ZERO by default
}
protected byte[] getZeroChunkBytes() {
return zeroChunkBytes;
}
protected void prepareBufferAllocator(boolean usingSlicedBuffer) {
if (usingSlicedBuffer) {
int roughEstimationSpace =
chunkSize * (numDataUnits + numParityUnits) * 10;
allocator = new BufferAllocator.SlicedBufferAllocator(usingDirectBuffer,
roughEstimationSpace);
} else {
allocator = new BufferAllocator.SimpleBufferAllocator(usingDirectBuffer);
}
}
protected boolean isAllowDump() {
return allowDump;
}
/**
* Prepare before running the case.
* @param conf
* @param numDataUnits
* @param numParityUnits
* @param erasedDataIndexes
* @param erasedParityIndexes
* @param usingFixedData Using fixed or pre-generated data to test instead of
* generating data
*/
protected void prepare(Configuration conf, int numDataUnits,
int numParityUnits, int[] erasedDataIndexes,
int[] erasedParityIndexes, boolean usingFixedData) {
this.conf = conf != null ? conf : new Configuration();
this.numDataUnits = numDataUnits;
this.numParityUnits = numParityUnits;
this.erasedDataIndexes = erasedDataIndexes != null ?
erasedDataIndexes : new int[] {0};
this.erasedParityIndexes = erasedParityIndexes != null ?
erasedParityIndexes : new int[] {0};
this.usingFixedData = usingFixedData;
if (usingFixedData) {
prepareFixedData();
}
}
/**
* Prepare before running the case.
* @param conf
* @param numDataUnits
* @param numParityUnits
* @param erasedDataIndexes
* @param erasedParityIndexes
*/
protected void prepare(Configuration conf, int numDataUnits,
int numParityUnits, int[] erasedDataIndexes,
int[] erasedParityIndexes) {
prepare(conf, numDataUnits, numParityUnits, erasedDataIndexes,
erasedParityIndexes, false);
}
/**
* Prepare before running the case.
* @param numDataUnits
* @param numParityUnits
* @param erasedDataIndexes
* @param erasedParityIndexes
*/
protected void prepare(int numDataUnits, int numParityUnits,
int[] erasedDataIndexes, int[] erasedParityIndexes) {
prepare(null, numDataUnits, numParityUnits, erasedDataIndexes,
erasedParityIndexes, false);
}
/**
* Get the conf the test.
* @return configuration
*/
protected Configuration getConf() {
return this.conf;
}
/**
* Compare and verify if erased chunks are equal to recovered chunks
* @param erasedChunks
* @param recoveredChunks
*/
protected void compareAndVerify(ECChunk[] erasedChunks,
ECChunk[] recoveredChunks) {
byte[][] erased = toArrays(erasedChunks);
byte[][] recovered = toArrays(recoveredChunks);
boolean result = Arrays.deepEquals(erased, recovered);
if (!result) {
assertTrue("Decoding and comparing failed.", result);
}
}
/**
* Adjust and return erased indexes altogether, including erased data indexes
* and parity indexes.
* @return erased indexes altogether
*/
protected int[] getErasedIndexesForDecoding() {
int[] erasedIndexesForDecoding =
new int[erasedDataIndexes.length + erasedParityIndexes.length];
int idx = 0;
for (int i = 0; i < erasedDataIndexes.length; i++) {
erasedIndexesForDecoding[idx ++] = erasedDataIndexes[i];
}
for (int i = 0; i < erasedParityIndexes.length; i++) {
erasedIndexesForDecoding[idx ++] = erasedParityIndexes[i] + numDataUnits;
}
return erasedIndexesForDecoding;
}
/**
* Return input chunks for decoding, which is dataChunks + parityChunks.
* @param dataChunks
* @param parityChunks
* @return
*/
protected ECChunk[] prepareInputChunksForDecoding(ECChunk[] dataChunks,
ECChunk[] parityChunks) {
ECChunk[] inputChunks = new ECChunk[numDataUnits + numParityUnits];
int idx = 0;
for (int i = 0; i < numDataUnits; i++) {
inputChunks[idx ++] = dataChunks[i];
}
for (int i = 0; i < numParityUnits; i++) {
inputChunks[idx ++] = parityChunks[i];
}
return inputChunks;
}
/**
* Erase some data chunks to test the recovering of them. As they're erased,
* we don't need to read them and will not have the buffers at all, so just
* set them as null.
* @param dataChunks
* @param parityChunks
* @return clone of erased chunks
*/
protected ECChunk[] backupAndEraseChunks(ECChunk[] dataChunks,
ECChunk[] parityChunks) {
ECChunk[] toEraseChunks = new ECChunk[erasedDataIndexes.length +
erasedParityIndexes.length];
int idx = 0;
for (int i = 0; i < erasedDataIndexes.length; i++) {
toEraseChunks[idx ++] = dataChunks[erasedDataIndexes[i]];
dataChunks[erasedDataIndexes[i]] = null;
}
for (int i = 0; i < erasedParityIndexes.length; i++) {
toEraseChunks[idx ++] = parityChunks[erasedParityIndexes[i]];
parityChunks[erasedParityIndexes[i]] = null;
}
return toEraseChunks;
}
/**
* Erase data from the specified chunks, just setting them as null.
* @param chunks
*/
protected void eraseDataFromChunks(ECChunk[] chunks) {
for (int i = 0; i < chunks.length; i++) {
chunks[i] = null;
}
}
protected void markChunks(ECChunk[] chunks) {
for (int i = 0; i < chunks.length; i++) {
if (chunks[i] != null) {
chunks[i].getBuffer().mark();
}
}
}
protected void restoreChunksFromMark(ECChunk[] chunks) {
for (int i = 0; i < chunks.length; i++) {
if (chunks[i] != null) {
chunks[i].getBuffer().reset();
}
}
}
/**
* Clone chunks along with copying the associated data. It respects how the
* chunk buffer is allocated, direct or non-direct. It avoids affecting the
* original chunk buffers.
* @param chunks
* @return
*/
protected ECChunk[] cloneChunksWithData(ECChunk[] chunks) {
ECChunk[] results = new ECChunk[chunks.length];
for (int i = 0; i < chunks.length; i++) {
results[i] = cloneChunkWithData(chunks[i]);
}
return results;
}
/**
* Clone chunk along with copying the associated data. It respects how the
* chunk buffer is allocated, direct or non-direct. It avoids affecting the
* original chunk.
* @param chunk
* @return a new chunk
*/
protected ECChunk cloneChunkWithData(ECChunk chunk) {
if (chunk == null) {
return null;
}
ByteBuffer srcBuffer = chunk.getBuffer();
byte[] bytesArr = new byte[srcBuffer.remaining()];
srcBuffer.mark();
srcBuffer.get(bytesArr, 0, bytesArr.length);
srcBuffer.reset();
ByteBuffer destBuffer = allocateOutputBuffer(bytesArr.length);
int pos = destBuffer.position();
destBuffer.put(bytesArr);
destBuffer.flip();
destBuffer.position(pos);
return new ECChunk(destBuffer);
}
/**
* Allocate a chunk for output or writing.
* @return
*/
protected ECChunk allocateOutputChunk() {
ByteBuffer buffer = allocateOutputBuffer(chunkSize);
return new ECChunk(buffer);
}
/**
* Allocate a buffer for output or writing. It can prepare for two kinds of
* data buffers: one with position as 0, the other with position > 0
* @return a buffer ready to write chunkSize bytes from current position
*/
protected ByteBuffer allocateOutputBuffer(int bufferLen) {
/**
* When startBufferWithZero, will prepare a buffer as:---------------
* otherwise, the buffer will be like: ___TO--BE--WRITTEN___,
* and in the beginning, dummy data are prefixed, to simulate a buffer of
* position > 0.
*/
int startOffset = startBufferWithZero ? 0 : 11; // 11 is arbitrary
int allocLen = startOffset + bufferLen + startOffset;
ByteBuffer buffer = allocator.allocate(allocLen);
buffer.limit(startOffset + bufferLen);
fillDummyData(buffer, startOffset);
startBufferWithZero = ! startBufferWithZero;
return buffer;
}
/**
* Prepare data chunks for each data unit, by generating random data.
* @return
*/
protected ECChunk[] prepareDataChunksForEncoding() {
if (usingFixedData) {
ECChunk[] chunks = new ECChunk[numDataUnits];
for (int i = 0; i < chunks.length; i++) {
chunks[i] = makeChunkUsingData(fixedData[i]);
}
return chunks;
}
return generateDataChunks();
}
private ECChunk makeChunkUsingData(byte[] data) {
ECChunk chunk = allocateOutputChunk();
ByteBuffer buffer = chunk.getBuffer();
int pos = buffer.position();
buffer.put(data, 0, chunkSize);
buffer.flip();
buffer.position(pos);
return chunk;
}
private ECChunk[] generateDataChunks() {
ECChunk[] chunks = new ECChunk[numDataUnits];
for (int i = 0; i < chunks.length; i++) {
chunks[i] = generateDataChunk();
}
return chunks;
}
private void prepareFixedData() {
// We may load test data from a resource, or just generate randomly.
// The generated data will be used across subsequent encode/decode calls.
this.fixedData = new byte[numDataUnits][];
for (int i = 0; i < numDataUnits; i++) {
fixedData[i] = generateFixedData(baseChunkSize * 2);
}
}
/**
* Generate data chunk by making random data.
* @return
*/
protected ECChunk generateDataChunk() {
ByteBuffer buffer = allocateOutputBuffer(chunkSize);
int pos = buffer.position();
buffer.put(generateData(chunkSize));
buffer.flip();
buffer.position(pos);
return new ECChunk(buffer);
}
/**
* Fill len of dummy data in the buffer at the current position.
* @param buffer
* @param len
*/
protected void fillDummyData(ByteBuffer buffer, int len) {
byte[] dummy = new byte[len];
RAND.nextBytes(dummy);
buffer.put(dummy);
}
protected byte[] generateData(int len) {
byte[] buffer = new byte[len];
for (int i = 0; i < buffer.length; i++) {
buffer[i] = (byte) RAND.nextInt(256);
}
return buffer;
}
protected byte[] generateFixedData(int len) {
byte[] buffer = new byte[len];
for (int i = 0; i < buffer.length; i++) {
buffer[i] = (byte) FIXED_DATA_GENERATOR++;
if (FIXED_DATA_GENERATOR == 256) {
FIXED_DATA_GENERATOR = 0;
}
}
return buffer;
}
/**
* Prepare parity chunks for encoding, each chunk for each parity unit.
* @return
*/
protected ECChunk[] prepareParityChunksForEncoding() {
ECChunk[] chunks = new ECChunk[numParityUnits];
for (int i = 0; i < chunks.length; i++) {
chunks[i] = allocateOutputChunk();
}
return chunks;
}
/**
* Prepare output chunks for decoding, each output chunk for each erased
* chunk.
* @return
*/
protected ECChunk[] prepareOutputChunksForDecoding() {
ECChunk[] chunks = new ECChunk[erasedDataIndexes.length +
erasedParityIndexes.length];
for (int i = 0; i < chunks.length; i++) {
chunks[i] = allocateOutputChunk();
}
return chunks;
}
/**
* Convert an array of this chunks to an array of byte array.
* Note the chunk buffers are not affected.
* @param chunks
* @return an array of byte array
*/
protected byte[][] toArrays(ECChunk[] chunks) {
byte[][] bytesArr = new byte[chunks.length][];
for (int i = 0; i < chunks.length; i++) {
if (chunks[i] != null) {
bytesArr[i] = chunks[i].toBytesArray();
}
}
return bytesArr;
}
/**
* Dump all the settings used in the test case if isAllowingVerboseDump is enabled.
*/
protected void dumpSetting() {
if (allowDump) {
StringBuilder sb = new StringBuilder("Erasure coder test settings:\n");
sb.append(" data_unit_num=").append(numDataUnits);
sb.append(" parity_unit_num=").append(numParityUnits);
sb.append(" chunkSize=").append(chunkSize).append("\n");
sb.append(" erasedDataIndexes=").
append(Arrays.toString(erasedDataIndexes));
sb.append(" erasedParityIndexes=").
append(Arrays.toString(erasedParityIndexes));
sb.append(" usingDirectBuffer=").append(usingDirectBuffer);
sb.append(" allowChangeInputs=").append(allowChangeInputs);
sb.append(" allowVerboseDump=").append(allowDump);
sb.append("\n");
System.out.println(sb.toString());
}
}
/**
* Dump chunks prefixed with a header if isAllowingVerboseDump is enabled.
* @param header
* @param chunks
*/
protected void dumpChunks(String header, ECChunk[] chunks) {
if (allowDump) {
DumpUtil.dumpChunks(header, chunks);
}
}
/**
* Make some chunk messy or not correct any more
* @param chunks
*/
protected void corruptSomeChunk(ECChunk[] chunks) {
int idx = new Random().nextInt(chunks.length);
ByteBuffer buffer = chunks[idx].getBuffer();
if (buffer.hasRemaining()) {
buffer.position(buffer.position() + 1);
}
}
}
| java | Apache-2.0 | e0c90f054687a18c4e095547ac5e31b8b313b3ef | 2026-01-05T02:41:11.405497Z | false |
Intel-bigdata/SSM | https://github.com/Intel-bigdata/SSM/blob/e0c90f054687a18c4e095547ac5e31b8b313b3ef/smart-hadoop-support/smart-erasurecodec/src/test/java/org/smartdata/erasurecode/RandomDatum.java | smart-hadoop-support/smart-erasurecodec/src/test/java/org/smartdata/erasurecode/RandomDatum.java | /**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.smartdata.erasurecode;
import org.apache.hadoop.io.WritableComparable;
import org.apache.hadoop.io.WritableComparator;
import java.io.DataInput;
import java.io.DataOutput;
import java.io.IOException;
import java.util.Arrays;
import java.util.Random;
public class RandomDatum implements WritableComparable<RandomDatum> {
private int length;
private byte[] data;
public RandomDatum() {}
public RandomDatum(Random random) {
length = 10 + (int) Math.pow(10.0, random.nextFloat() * 3.0);
data = new byte[length];
random.nextBytes(data);
}
public int getLength() {
return length;
}
@Override
public void write(DataOutput out) throws IOException {
out.writeInt(length);
out.write(data);
}
@Override
public void readFields(DataInput in) throws IOException {
length = in.readInt();
if (data == null || length > data.length)
data = new byte[length];
in.readFully(data, 0, length);
}
@Override
public int compareTo(RandomDatum o) {
return WritableComparator.compareBytes(this.data, 0, this.length,
o.data, 0, o.length);
}
@Override
public boolean equals(Object o) {
return compareTo((RandomDatum)o) == 0;
}
@Override
public int hashCode() {
return Arrays.hashCode(this.data);
}
private static final char[] HEX_DIGITS =
{'0','1','2','3','4','5','6','7','8','9','a','b','c','d','e','f'};
/** Returns a string representation of this object. */
@Override
public String toString() {
StringBuilder buf = new StringBuilder(length*2);
for (int i = 0; i < length; i++) {
int b = data[i];
buf.append(HEX_DIGITS[(b >> 4) & 0xf]);
buf.append(HEX_DIGITS[b & 0xf]);
}
return buf.toString();
}
public static class Generator {
Random random;
private RandomDatum key;
private RandomDatum value;
public Generator() { random = new Random(); }
public Generator(int seed) { random = new Random(seed); }
public RandomDatum getKey() { return key; }
public RandomDatum getValue() { return value; }
public void next() {
key = new RandomDatum(random);
value = new RandomDatum(random);
}
}
/** A WritableComparator optimized for RandomDatum. */
public static class Comparator extends WritableComparator {
public Comparator() {
super(RandomDatum.class);
}
@Override
public int compare(byte[] b1, int s1, int l1,
byte[] b2, int s2, int l2) {
int n1 = readInt(b1, s1);
int n2 = readInt(b2, s2);
return compareBytes(b1, s1+4, n1, b2, s2+4, n2);
}
}
}
| java | Apache-2.0 | e0c90f054687a18c4e095547ac5e31b8b313b3ef | 2026-01-05T02:41:11.405497Z | false |
Intel-bigdata/SSM | https://github.com/Intel-bigdata/SSM/blob/e0c90f054687a18c4e095547ac5e31b8b313b3ef/smart-hadoop-support/smart-erasurecodec/src/test/java/org/smartdata/erasurecode/codec/TestHHXORErasureCodec.java | smart-hadoop-support/smart-erasurecodec/src/test/java/org/smartdata/erasurecode/codec/TestHHXORErasureCodec.java | /**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.smartdata.erasurecode.codec;
import org.apache.hadoop.conf.Configuration;
import org.junit.Test;
import org.smartdata.erasurecode.ECSchema;
import org.smartdata.erasurecode.ErasureCodecOptions;
import org.smartdata.erasurecode.coder.ErasureCoder;
import static org.junit.Assert.assertEquals;
public class TestHHXORErasureCodec {
private ECSchema schema = new ECSchema("hhxor", 10, 4);
private ErasureCodecOptions options = new ErasureCodecOptions(schema);
@Test
public void testGoodCodec() {
HHXORErasureCodec codec
= new HHXORErasureCodec(new Configuration(), options);
ErasureCoder encoder = codec.createEncoder();
assertEquals(10, encoder.getNumDataUnits());
assertEquals(4, encoder.getNumParityUnits());
ErasureCoder decoder = codec.createDecoder();
assertEquals(10, decoder.getNumDataUnits());
assertEquals(4, decoder.getNumParityUnits());
}
}
| java | Apache-2.0 | e0c90f054687a18c4e095547ac5e31b8b313b3ef | 2026-01-05T02:41:11.405497Z | false |
Intel-bigdata/SSM | https://github.com/Intel-bigdata/SSM/blob/e0c90f054687a18c4e095547ac5e31b8b313b3ef/smart-hadoop-support/smart-erasurecodec/src/test/java/org/smartdata/erasurecode/rawcoder/TestXORRawCoderBase.java | smart-hadoop-support/smart-erasurecodec/src/test/java/org/smartdata/erasurecode/rawcoder/TestXORRawCoderBase.java | /**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.smartdata.erasurecode.rawcoder;
import org.junit.Test;
/**
* Test base for raw XOR coders.
*/
public abstract class TestXORRawCoderBase extends TestRawCoderBase {
@Test
public void testCoding_10x1_erasing_d0() {
prepare(null, 10, 1, new int[] {0}, new int[0]);
testCodingDoMixAndTwice();
}
@Test
public void testCoding_10x1_erasing_p0() {
prepare(null, 10, 1, new int[0], new int[] {0});
testCodingDoMixAndTwice();
}
@Test
public void testCoding_10x1_erasing_d5() {
prepare(null, 10, 1, new int[]{5}, new int[0]);
testCodingDoMixAndTwice();
}
@Test
public void testCodingNegative_10x1_erasing_too_many() {
prepare(null, 10, 1, new int[]{2}, new int[]{0});
testCodingWithErasingTooMany();
}
@Test
public void testCodingNegative_10x1_erasing_d5() {
prepare(null, 10, 1, new int[]{5}, new int[0]);
testCodingWithBadInput(true);
testCodingWithBadOutput(false);
testCodingWithBadInput(true);
testCodingWithBadOutput(false);
}
}
| java | Apache-2.0 | e0c90f054687a18c4e095547ac5e31b8b313b3ef | 2026-01-05T02:41:11.405497Z | false |
Intel-bigdata/SSM | https://github.com/Intel-bigdata/SSM/blob/e0c90f054687a18c4e095547ac5e31b8b313b3ef/smart-hadoop-support/smart-erasurecodec/src/test/java/org/smartdata/erasurecode/rawcoder/TestRSRawCoderInteroperable2.java | smart-hadoop-support/smart-erasurecodec/src/test/java/org/smartdata/erasurecode/rawcoder/TestRSRawCoderInteroperable2.java | /**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.smartdata.erasurecode.rawcoder;
import org.junit.Assume;
import org.junit.Before;
import org.smartdata.erasurecode.ErasureCodeNative;
/**
* Test raw Reed-solomon coder implemented in Java.
*/
public class TestRSRawCoderInteroperable2 extends TestRSRawCoderBase {
@Before
public void setup() {
Assume.assumeTrue(ErasureCodeNative.isNativeCodeLoaded());
this.encoderClass = NativeRSRawEncoder.class;
this.decoderClass = RSRawDecoder.class;
setAllowDump(true);
}
}
| java | Apache-2.0 | e0c90f054687a18c4e095547ac5e31b8b313b3ef | 2026-01-05T02:41:11.405497Z | false |
Intel-bigdata/SSM | https://github.com/Intel-bigdata/SSM/blob/e0c90f054687a18c4e095547ac5e31b8b313b3ef/smart-hadoop-support/smart-erasurecodec/src/test/java/org/smartdata/erasurecode/rawcoder/TestNativeXORRawCoder.java | smart-hadoop-support/smart-erasurecodec/src/test/java/org/smartdata/erasurecode/rawcoder/TestNativeXORRawCoder.java | /**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.smartdata.erasurecode.rawcoder;
import org.junit.Assume;
import org.junit.Before;
import org.smartdata.erasurecode.ErasureCodeNative;
/**
* Test NativeXOR encoding and decoding.
*/
public class TestNativeXORRawCoder extends TestXORRawCoderBase {
@Before
public void setup() {
Assume.assumeTrue(ErasureCodeNative.isNativeCodeLoaded());
this.encoderClass = NativeXORRawEncoder.class;
this.decoderClass = NativeXORRawDecoder.class;
setAllowDump(true);
}
}
| java | Apache-2.0 | e0c90f054687a18c4e095547ac5e31b8b313b3ef | 2026-01-05T02:41:11.405497Z | false |
Intel-bigdata/SSM | https://github.com/Intel-bigdata/SSM/blob/e0c90f054687a18c4e095547ac5e31b8b313b3ef/smart-hadoop-support/smart-erasurecodec/src/test/java/org/smartdata/erasurecode/rawcoder/TestRSRawCoderBase.java | smart-hadoop-support/smart-erasurecodec/src/test/java/org/smartdata/erasurecode/rawcoder/TestRSRawCoderBase.java | /**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.smartdata.erasurecode.rawcoder;
import org.junit.Test;
/**
* Test base for raw Reed-solomon coders.
*/
public abstract class TestRSRawCoderBase extends TestRawCoderBase {
@Test
public void testCoding_6x3_erasing_all_d() {
prepare(null, 6, 3, new int[]{0, 1, 2}, new int[0], true);
testCodingDoMixAndTwice();
}
@Test
public void testCoding_6x3_erasing_d0_d2() {
prepare(null, 6, 3, new int[] {0, 2}, new int[]{});
testCodingDoMixAndTwice();
}
@Test
public void testCoding_6x3_erasing_d0() {
prepare(null, 6, 3, new int[]{0}, new int[0]);
testCodingDoMixAndTwice();
}
@Test
public void testCoding_6x3_erasing_d2() {
prepare(null, 6, 3, new int[]{2}, new int[]{});
testCodingDoMixAndTwice();
}
@Test
public void testCoding_6x3_erasing_d0_p0() {
prepare(null, 6, 3, new int[]{0}, new int[]{0});
testCodingDoMixAndTwice();
}
@Test
public void testCoding_6x3_erasing_all_p() {
prepare(null, 6, 3, new int[0], new int[]{0, 1, 2});
testCodingDoMixAndTwice();
}
@Test
public void testCoding_6x3_erasing_p0() {
prepare(null, 6, 3, new int[0], new int[]{0});
testCodingDoMixAndTwice();
}
@Test
public void testCoding_6x3_erasing_p2() {
prepare(null, 6, 3, new int[0], new int[]{2});
testCodingDoMixAndTwice();
}
@Test
public void testCoding_6x3_erasure_p0_p2() {
prepare(null, 6, 3, new int[0], new int[]{0, 2});
testCodingDoMixAndTwice();
}
@Test
public void testCoding_6x3_erasing_d0_p0_p1() {
prepare(null, 6, 3, new int[]{0}, new int[]{0, 1});
testCodingDoMixAndTwice();
}
@Test
public void testCoding_6x3_erasing_d0_d2_p2() {
prepare(null, 6, 3, new int[]{0, 2}, new int[]{2});
testCodingDoMixAndTwice();
}
@Test
public void testCodingNegative_6x3_erasing_d2_d4() {
prepare(null, 6, 3, new int[]{2, 4}, new int[0]);
testCodingDoMixAndTwice();
}
@Test
public void testCodingNegative_6x3_erasing_too_many() {
prepare(null, 6, 3, new int[]{2, 4}, new int[]{0, 1});
testCodingWithErasingTooMany();
}
@Test
public void testCoding_10x4_erasing_d0_p0() {
prepare(null, 10, 4, new int[] {0}, new int[] {0});
testCodingDoMixAndTwice();
}
@Test
public void testCodingInputBufferPosition() {
prepare(null, 6, 3, new int[]{0}, new int[]{0});
testInputPosition(false);
testInputPosition(true);
}
}
| java | Apache-2.0 | e0c90f054687a18c4e095547ac5e31b8b313b3ef | 2026-01-05T02:41:11.405497Z | false |
Intel-bigdata/SSM | https://github.com/Intel-bigdata/SSM/blob/e0c90f054687a18c4e095547ac5e31b8b313b3ef/smart-hadoop-support/smart-erasurecodec/src/test/java/org/smartdata/erasurecode/rawcoder/TestXORRawCoderInteroperable2.java | smart-hadoop-support/smart-erasurecodec/src/test/java/org/smartdata/erasurecode/rawcoder/TestXORRawCoderInteroperable2.java | /**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.smartdata.erasurecode.rawcoder;
import org.junit.Assume;
import org.junit.Before;
import org.smartdata.erasurecode.ErasureCodeNative;
/**
* Test raw XOR coder implemented in Java.
*/
public class TestXORRawCoderInteroperable2 extends TestXORRawCoderBase {
@Before
public void setup() {
Assume.assumeTrue(ErasureCodeNative.isNativeCodeLoaded());
this.encoderClass = NativeXORRawEncoder.class;
this.decoderClass = XORRawDecoder.class;
setAllowDump(true);
}
}
| java | Apache-2.0 | e0c90f054687a18c4e095547ac5e31b8b313b3ef | 2026-01-05T02:41:11.405497Z | false |
Intel-bigdata/SSM | https://github.com/Intel-bigdata/SSM/blob/e0c90f054687a18c4e095547ac5e31b8b313b3ef/smart-hadoop-support/smart-erasurecodec/src/test/java/org/smartdata/erasurecode/rawcoder/TestRawErasureCoderBenchmark.java | smart-hadoop-support/smart-erasurecodec/src/test/java/org/smartdata/erasurecode/rawcoder/TestRawErasureCoderBenchmark.java | /**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.smartdata.erasurecode.rawcoder;
import org.junit.Assume;
import org.junit.Test;
import org.smartdata.erasurecode.ErasureCodeNative;
/**
* Tests for the raw erasure coder benchmark tool.
*/
public class TestRawErasureCoderBenchmark {
@Test
public void testDummyCoder() throws Exception {
// Dummy coder
RawErasureCoderBenchmark.performBench("encode",
RawErasureCoderBenchmark.CODER.DUMMY_CODER, 2, 100, 1024);
RawErasureCoderBenchmark.performBench("decode",
RawErasureCoderBenchmark.CODER.DUMMY_CODER, 5, 150, 100);
}
@Test
public void testLegacyRSCoder() throws Exception {
// Legacy RS Java coder
RawErasureCoderBenchmark.performBench("encode",
RawErasureCoderBenchmark.CODER.LEGACY_RS_CODER, 2, 80, 200);
RawErasureCoderBenchmark.performBench("decode",
RawErasureCoderBenchmark.CODER.LEGACY_RS_CODER, 5, 300, 350);
}
@Test
public void testRSCoder() throws Exception {
// RS Java coder
RawErasureCoderBenchmark.performBench("encode",
RawErasureCoderBenchmark.CODER.RS_CODER, 3, 200, 200);
RawErasureCoderBenchmark.performBench("decode",
RawErasureCoderBenchmark.CODER.RS_CODER, 4, 135, 20);
}
@Test
public void testISALCoder() throws Exception {
Assume.assumeTrue(ErasureCodeNative.isNativeCodeLoaded());
// ISA-L coder
RawErasureCoderBenchmark.performBench("encode",
RawErasureCoderBenchmark.CODER.ISAL_CODER, 5, 300, 64);
RawErasureCoderBenchmark.performBench("decode",
RawErasureCoderBenchmark.CODER.ISAL_CODER, 6, 200, 128);
}
}
| java | Apache-2.0 | e0c90f054687a18c4e095547ac5e31b8b313b3ef | 2026-01-05T02:41:11.405497Z | false |
Intel-bigdata/SSM | https://github.com/Intel-bigdata/SSM/blob/e0c90f054687a18c4e095547ac5e31b8b313b3ef/smart-hadoop-support/smart-erasurecodec/src/test/java/org/smartdata/erasurecode/rawcoder/TestCoderUtil.java | smart-hadoop-support/smart-erasurecodec/src/test/java/org/smartdata/erasurecode/rawcoder/TestCoderUtil.java | /**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.smartdata.erasurecode.rawcoder;
import org.apache.hadoop.HadoopIllegalArgumentException;
import org.junit.Test;
import java.nio.ByteBuffer;
import static org.junit.Assert.assertEquals;
/**
* Test of the utility of raw erasure coder.
*/
public class TestCoderUtil {
private final int numInputs = 9;
private final int chunkSize = 1024;
@Test
public void testGetEmptyChunk() {
byte[] ret = CoderUtil.getEmptyChunk(chunkSize);
for (int i = 0; i < chunkSize; i++) {
assertEquals(0, ret[i]);
}
}
@Test
public void testResetBuffer() {
ByteBuffer buf = ByteBuffer.allocate(chunkSize * 2).putInt(1234);
buf.position(0);
ByteBuffer ret = CoderUtil.resetBuffer(buf, chunkSize);
for (int i = 0; i < chunkSize; i++) {
assertEquals(0, ret.getInt(i));
}
byte[] inputs = ByteBuffer.allocate(numInputs)
.putInt(1234).array();
CoderUtil.resetBuffer(inputs, 0, numInputs);
for (int i = 0; i < numInputs; i++) {
assertEquals(0, inputs[i]);
}
}
@Test
public void testGetValidIndexes() {
byte[][] inputs = new byte[numInputs][];
inputs[0] = new byte[chunkSize];
inputs[1] = new byte[chunkSize];
inputs[7] = new byte[chunkSize];
inputs[8] = new byte[chunkSize];
int[] validIndexes = CoderUtil.getValidIndexes(inputs);
assertEquals(4, validIndexes.length);
// Check valid indexes
assertEquals(0, validIndexes[0]);
assertEquals(1, validIndexes[1]);
assertEquals(7, validIndexes[2]);
assertEquals(8, validIndexes[3]);
}
@Test
public void testNoValidIndexes() {
byte[][] inputs = new byte[numInputs][];
for (int i = 0; i < numInputs; i++) {
inputs[i] = null;
}
int[] validIndexes = CoderUtil.getValidIndexes(inputs);
assertEquals(0, validIndexes.length);
}
@Test
public void testGetNullIndexes() {
byte[][] inputs = new byte[numInputs][];
inputs[0] = new byte[chunkSize];
inputs[1] = new byte[chunkSize];
for (int i = 2; i < 7; i++) {
inputs[i] = null;
}
inputs[7] = new byte[chunkSize];
inputs[8] = new byte[chunkSize];
int[] nullIndexes = CoderUtil.getNullIndexes(inputs);
assertEquals(2, nullIndexes[0]);
assertEquals(3, nullIndexes[1]);
assertEquals(4, nullIndexes[2]);
assertEquals(5, nullIndexes[3]);
assertEquals(6, nullIndexes[4]);
}
@Test
public void testFindFirstValidInput() {
byte[][] inputs = new byte[numInputs][];
inputs[8] = ByteBuffer.allocate(4).putInt(1234).array();
byte[] firstValidInput = CoderUtil.findFirstValidInput(inputs);
assertEquals(firstValidInput, inputs[8]);
}
@Test(expected = HadoopIllegalArgumentException.class)
public void testNoValidInput() {
byte[][] inputs = new byte[numInputs][];
CoderUtil.findFirstValidInput(inputs);
}
} | java | Apache-2.0 | e0c90f054687a18c4e095547ac5e31b8b313b3ef | 2026-01-05T02:41:11.405497Z | false |
Intel-bigdata/SSM | https://github.com/Intel-bigdata/SSM/blob/e0c90f054687a18c4e095547ac5e31b8b313b3ef/smart-hadoop-support/smart-erasurecodec/src/test/java/org/smartdata/erasurecode/rawcoder/TestRSRawCoderInteroperable1.java | smart-hadoop-support/smart-erasurecodec/src/test/java/org/smartdata/erasurecode/rawcoder/TestRSRawCoderInteroperable1.java | /**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.smartdata.erasurecode.rawcoder;
import org.junit.Assume;
import org.junit.Before;
import org.smartdata.erasurecode.ErasureCodeNative;
/**
* Test raw Reed-solomon coder implemented in Java.
*/
public class TestRSRawCoderInteroperable1 extends TestRSRawCoderBase {
@Before
public void setup() {
Assume.assumeTrue(ErasureCodeNative.isNativeCodeLoaded());
this.encoderClass = RSRawEncoder.class;
this.decoderClass = NativeRSRawDecoder.class;
setAllowDump(true);
}
}
| java | Apache-2.0 | e0c90f054687a18c4e095547ac5e31b8b313b3ef | 2026-01-05T02:41:11.405497Z | false |
Intel-bigdata/SSM | https://github.com/Intel-bigdata/SSM/blob/e0c90f054687a18c4e095547ac5e31b8b313b3ef/smart-hadoop-support/smart-erasurecodec/src/test/java/org/smartdata/erasurecode/rawcoder/TestErasureCoderBase.java | smart-hadoop-support/smart-erasurecodec/src/test/java/org/smartdata/erasurecode/rawcoder/TestErasureCoderBase.java | /**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.smartdata.erasurecode.rawcoder;
import org.smartdata.erasurecode.*;
import org.smartdata.erasurecode.coder.ErasureCoder;
import org.smartdata.erasurecode.coder.ErasureCodingStep;
import java.lang.reflect.Constructor;
/**
* Erasure coder test base with utilities.
*/
public abstract class TestErasureCoderBase extends TestCoderBase {
protected Class<? extends ErasureCoder> encoderClass;
protected Class<? extends ErasureCoder> decoderClass;
private ErasureCoder encoder;
private ErasureCoder decoder;
protected int numChunksInBlock = 16;
/**
* It's just a block for this test purpose. We don't use HDFS block here
* at all for simple.
*/
protected static class TestBlock extends ECBlock {
protected ECChunk[] chunks;
// For simple, just assume the block have the chunks already ready.
// In practice we need to read/write chunks from/to the block via file IO.
public TestBlock(ECChunk[] chunks) {
this.chunks = chunks;
}
}
/**
* Generating source data, encoding, recovering and then verifying.
* RawErasureCoder mainly uses ECChunk to pass input and output data buffers,
* it supports two kinds of ByteBuffers, one is array backed, the other is
* direct ByteBuffer. Have usingDirectBuffer to indicate which case to test.
* @param usingDirectBuffer
*/
protected void testCoding(boolean usingDirectBuffer) {
this.usingDirectBuffer = usingDirectBuffer;
prepareCoders();
/**
* The following runs will use 3 different chunkSize for inputs and outputs,
* to verify the same encoder/decoder can process variable width of data.
*/
performTestCoding(baseChunkSize, true);
performTestCoding(baseChunkSize - 17, false);
performTestCoding(baseChunkSize + 16, true);
}
private void performTestCoding(int chunkSize, boolean usingSlicedBuffer) {
setChunkSize(chunkSize);
prepareBufferAllocator(usingSlicedBuffer);
// Generate data and encode
ECBlockGroup blockGroup = prepareBlockGroupForEncoding();
// Backup all the source chunks for later recovering because some coders
// may affect the source data.
TestBlock[] clonedDataBlocks =
cloneBlocksWithData((TestBlock[]) blockGroup.getDataBlocks());
TestBlock[] parityBlocks = (TestBlock[]) blockGroup.getParityBlocks();
ErasureCodingStep codingStep;
codingStep = encoder.calculateCoding(blockGroup);
performCodingStep(codingStep);
// Erase specified sources but return copies of them for later comparing
TestBlock[] backupBlocks = backupAndEraseBlocks(clonedDataBlocks, parityBlocks);
// Decode
blockGroup = new ECBlockGroup(clonedDataBlocks, blockGroup.getParityBlocks());
codingStep = decoder.calculateCoding(blockGroup);
performCodingStep(codingStep);
// Compare
compareAndVerify(backupBlocks, codingStep.getOutputBlocks());
}
/**
* This is typically how a coding step should be performed.
* @param codingStep
*/
protected void performCodingStep(ErasureCodingStep codingStep) {
// Pretend that we're opening these input blocks and output blocks.
ECBlock[] inputBlocks = codingStep.getInputBlocks();
ECBlock[] outputBlocks = codingStep.getOutputBlocks();
// We allocate input and output chunks accordingly.
ECChunk[] inputChunks = new ECChunk[inputBlocks.length];
ECChunk[] outputChunks = new ECChunk[outputBlocks.length];
for (int i = 0; i < numChunksInBlock; ++i) {
// Pretend that we're reading input chunks from input blocks.
for (int j = 0; j < inputBlocks.length; ++j) {
inputChunks[j] = ((TestBlock) inputBlocks[j]).chunks[i];
}
// Pretend that we allocate and will write output results to the blocks.
for (int j = 0; j < outputBlocks.length; ++j) {
outputChunks[j] = allocateOutputChunk();
((TestBlock) outputBlocks[j]).chunks[i] = outputChunks[j];
}
// Given the input chunks and output chunk buffers, just call it !
codingStep.performCoding(inputChunks, outputChunks);
}
codingStep.finish();
}
/**
* Compare and verify if recovered blocks data are the same with the erased
* blocks data.
* @param erasedBlocks
* @param recoveredBlocks
*/
protected void compareAndVerify(ECBlock[] erasedBlocks,
ECBlock[] recoveredBlocks) {
for (int i = 0; i < erasedBlocks.length; ++i) {
compareAndVerify(((TestBlock) erasedBlocks[i]).chunks, ((TestBlock) recoveredBlocks[i]).chunks);
}
}
private void prepareCoders() {
if (encoder == null) {
encoder = createEncoder();
}
if (decoder == null) {
decoder = createDecoder();
}
}
/**
* Create the raw erasure encoder to test
* @return
*/
protected ErasureCoder createEncoder() {
ErasureCoder encoder;
try {
ErasureCoderOptions options = new ErasureCoderOptions(
numDataUnits, numParityUnits, allowChangeInputs, allowDump);
Constructor<? extends ErasureCoder> constructor =
(Constructor<? extends ErasureCoder>)
encoderClass.getConstructor(ErasureCoderOptions.class);
encoder = constructor.newInstance(options);
} catch (Exception e) {
throw new RuntimeException("Failed to create encoder", e);
}
encoder.setConf(getConf());
return encoder;
}
/**
* create the raw erasure decoder to test
* @return
*/
protected ErasureCoder createDecoder() {
ErasureCoder decoder;
try {
ErasureCoderOptions options = new ErasureCoderOptions(
numDataUnits, numParityUnits, allowChangeInputs, allowDump);
Constructor<? extends ErasureCoder> constructor =
(Constructor<? extends ErasureCoder>)
decoderClass.getConstructor(ErasureCoderOptions.class);
decoder = constructor.newInstance(options);
} catch (Exception e) {
throw new RuntimeException("Failed to create decoder", e);
}
decoder.setConf(getConf());
return decoder;
}
/**
* Prepare a block group for encoding.
* @return
*/
protected ECBlockGroup prepareBlockGroupForEncoding() {
ECBlock[] dataBlocks = new TestBlock[numDataUnits];
ECBlock[] parityBlocks = new TestBlock[numParityUnits];
for (int i = 0; i < numDataUnits; i++) {
dataBlocks[i] = generateDataBlock();
}
for (int i = 0; i < numParityUnits; i++) {
parityBlocks[i] = allocateOutputBlock();
}
return new ECBlockGroup(dataBlocks, parityBlocks);
}
/**
* Generate random data and return a data block.
* @return
*/
protected ECBlock generateDataBlock() {
ECChunk[] chunks = new ECChunk[numChunksInBlock];
for (int i = 0; i < numChunksInBlock; ++i) {
chunks[i] = generateDataChunk();
}
return new TestBlock(chunks);
}
/**
* Erase blocks to test the recovering of them. Before erasure clone them
* first so could return themselves.
* @param dataBlocks
* @return clone of erased dataBlocks
*/
protected TestBlock[] backupAndEraseBlocks(TestBlock[] dataBlocks,
TestBlock[] parityBlocks) {
TestBlock[] toEraseBlocks = new TestBlock[erasedDataIndexes.length +
erasedParityIndexes.length];
int idx = 0;
TestBlock block;
for (int i = 0; i < erasedDataIndexes.length; i++) {
block = dataBlocks[erasedDataIndexes[i]];
toEraseBlocks[idx ++] = cloneBlockWithData(block);
eraseDataFromBlock(block);
}
for (int i = 0; i < erasedParityIndexes.length; i++) {
block = parityBlocks[erasedParityIndexes[i]];
toEraseBlocks[idx ++] = cloneBlockWithData(block);
eraseDataFromBlock(block);
}
return toEraseBlocks;
}
/**
* Allocate an output block. Note the chunk buffer will be allocated by the
* up caller when performing the coding step.
* @return
*/
protected TestBlock allocateOutputBlock() {
ECChunk[] chunks = new ECChunk[numChunksInBlock];
return new TestBlock(chunks);
}
/**
* Clone blocks with data copied along with, avoiding affecting the original
* blocks.
* @param blocks
* @return
*/
protected TestBlock[] cloneBlocksWithData(TestBlock[] blocks) {
TestBlock[] results = new TestBlock[blocks.length];
for (int i = 0; i < blocks.length; ++i) {
results[i] = cloneBlockWithData(blocks[i]);
}
return results;
}
/**
* Clone exactly a block, avoiding affecting the original block.
* @param block
* @return a new block
*/
protected TestBlock cloneBlockWithData(TestBlock block) {
ECChunk[] newChunks = cloneChunksWithData(block.chunks);
return new TestBlock(newChunks);
}
/**
* Erase data from a block.
*/
protected void eraseDataFromBlock(TestBlock theBlock) {
eraseDataFromChunks(theBlock.chunks);
theBlock.setErased(true);
}
}
| java | Apache-2.0 | e0c90f054687a18c4e095547ac5e31b8b313b3ef | 2026-01-05T02:41:11.405497Z | false |
Intel-bigdata/SSM | https://github.com/Intel-bigdata/SSM/blob/e0c90f054687a18c4e095547ac5e31b8b313b3ef/smart-hadoop-support/smart-erasurecodec/src/test/java/org/smartdata/erasurecode/rawcoder/TestHHErasureCoderBase.java | smart-hadoop-support/smart-erasurecodec/src/test/java/org/smartdata/erasurecode/rawcoder/TestHHErasureCoderBase.java | /**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.smartdata.erasurecode.rawcoder;
import org.smartdata.erasurecode.ECBlock;
import org.smartdata.erasurecode.ECChunk;
import org.smartdata.erasurecode.coder.ErasureCodingStep;
/**
* Erasure coder test base with utilities for hitchhiker.
*/
public abstract class TestHHErasureCoderBase extends TestErasureCoderBase{
protected int subPacketSize = 2;
@Override
protected void performCodingStep(ErasureCodingStep codingStep) {
// Pretend that we're opening these input blocks and output blocks.
ECBlock[] inputBlocks = codingStep.getInputBlocks();
ECBlock[] outputBlocks = codingStep.getOutputBlocks();
// We allocate input and output chunks accordingly.
ECChunk[] inputChunks = new ECChunk[inputBlocks.length * subPacketSize];
ECChunk[] outputChunks = new ECChunk[outputBlocks.length * subPacketSize];
for (int i = 0; i < numChunksInBlock; i += subPacketSize) {
// Pretend that we're reading input chunks from input blocks.
for (int k = 0; k < subPacketSize; ++k) {
for (int j = 0; j < inputBlocks.length; ++j) {
inputChunks[k * inputBlocks.length + j] = ((TestBlock)
inputBlocks[j]).chunks[i + k];
}
// Pretend that we allocate and will write output results to the blocks.
for (int j = 0; j < outputBlocks.length; ++j) {
outputChunks[k * outputBlocks.length + j] = allocateOutputChunk();
((TestBlock) outputBlocks[j]).chunks[i + k] =
outputChunks[k * outputBlocks.length + j];
}
}
// Given the input chunks and output chunk buffers, just call it !
codingStep.performCoding(inputChunks, outputChunks);
}
codingStep.finish();
}
}
| java | Apache-2.0 | e0c90f054687a18c4e095547ac5e31b8b313b3ef | 2026-01-05T02:41:11.405497Z | false |
Intel-bigdata/SSM | https://github.com/Intel-bigdata/SSM/blob/e0c90f054687a18c4e095547ac5e31b8b313b3ef/smart-hadoop-support/smart-erasurecodec/src/test/java/org/smartdata/erasurecode/rawcoder/TestRSRawCoderLegacy.java | smart-hadoop-support/smart-erasurecodec/src/test/java/org/smartdata/erasurecode/rawcoder/TestRSRawCoderLegacy.java | /**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.smartdata.erasurecode.rawcoder;
import org.junit.Before;
/**
* Test the legacy raw Reed-solomon coder implemented in Java.
*/
public class TestRSRawCoderLegacy extends TestRSRawCoderBase {
@Before
public void setup() {
this.encoderClass = RSRawEncoderLegacy.class;
this.decoderClass = RSRawDecoderLegacy.class;
setAllowDump(false); // Change to true to allow verbose dump for debugging
}
}
| java | Apache-2.0 | e0c90f054687a18c4e095547ac5e31b8b313b3ef | 2026-01-05T02:41:11.405497Z | false |
Intel-bigdata/SSM | https://github.com/Intel-bigdata/SSM/blob/e0c90f054687a18c4e095547ac5e31b8b313b3ef/smart-hadoop-support/smart-erasurecodec/src/test/java/org/smartdata/erasurecode/rawcoder/TestRSErasureCoder.java | smart-hadoop-support/smart-erasurecodec/src/test/java/org/smartdata/erasurecode/rawcoder/TestRSErasureCoder.java | /**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.smartdata.erasurecode.rawcoder;
import org.apache.hadoop.conf.Configuration;
import org.junit.Before;
import org.junit.Rule;
import org.junit.Test;
import org.junit.rules.Timeout;
import org.smartdata.erasurecode.CodecUtil;
import org.smartdata.erasurecode.coder.RSErasureDecoder;
import org.smartdata.erasurecode.coder.RSErasureEncoder;
/**
* Test Reed-Solomon encoding and decoding.
*/
public class TestRSErasureCoder extends TestErasureCoderBase {
@Rule
public Timeout globalTimeout = new Timeout(300000);
@Before
public void setup() {
this.encoderClass = RSErasureEncoder.class;
this.decoderClass = RSErasureDecoder.class;
this.numChunksInBlock = 10;
}
@Test
public void testCodingNoDirectBuffer_10x4_erasing_d0_p0() {
prepare(null, 10, 4, new int[] {0}, new int[] {0});
/**
* Doing twice to test if the coders can be repeatedly reused. This matters
* as the underlying coding buffers are shared, which may have bugs.
*/
testCoding(false);
testCoding(false);
}
@Test
public void testCodingDirectBufferWithConf_10x4_erasing_d0() {
/**
* This tests if the configuration items work or not.
*/
Configuration conf = new Configuration();
conf.set(CodecUtil.IO_ERASURECODE_CODEC_RS_DEFAULT_RAWCODER_KEY,
RSRawErasureCoderFactory.class.getCanonicalName());
prepare(conf, 10, 4, new int[]{0}, new int[0]);
testCoding(true);
testCoding(true);
}
@Test
public void testCodingDirectBuffer_10x4_erasing_p1() {
prepare(null, 10, 4, new int[]{}, new int[]{1});
testCoding(true);
testCoding(true);
}
@Test
public void testCodingDirectBuffer_10x4_erasing_d2() {
prepare(null, 10, 4, new int[] {2}, new int[] {});
testCoding(true);
testCoding(true);
}
@Test
public void testCodingDirectBuffer_10x4_erasing_d0_p0() {
prepare(null, 10, 4, new int[] {0}, new int[] {0});
testCoding(true);
testCoding(true);
}
@Test
public void testCodingBothBuffers_10x4_erasing_d0_p0() {
prepare(null, 10, 4, new int[] {0}, new int[] {0});
/**
* Doing in mixed buffer usage model to test if the coders can be repeatedly
* reused with different buffer usage model. This matters as the underlying
* coding buffers are shared, which may have bugs.
*/
testCoding(true);
testCoding(false);
testCoding(true);
testCoding(false);
}
@Test
public void testCodingDirectBuffer_10x4_erasure_of_d2_d4_p0() {
prepare(null, 10, 4, new int[] {2, 4}, new int[] {0});
testCoding(true);
}
@Test
public void testCodingDirectBuffer_10x4_erasing_d0_d1_p0_p1() {
prepare(null, 10, 4, new int[] {0, 1}, new int[] {0, 1});
testCoding(true);
}
@Test
public void testCodingNoDirectBuffer_3x3_erasing_d0_p0() {
prepare(null, 3, 3, new int[] {0}, new int[] {0});
testCoding(false);
}
@Test
public void testCodingDirectBuffer_6x3_erasing_d0_p0() {
prepare(null, 6, 3, new int[] {0}, new int[] {0});
testCoding(true);
}
}
| java | Apache-2.0 | e0c90f054687a18c4e095547ac5e31b8b313b3ef | 2026-01-05T02:41:11.405497Z | false |
Intel-bigdata/SSM | https://github.com/Intel-bigdata/SSM/blob/e0c90f054687a18c4e095547ac5e31b8b313b3ef/smart-hadoop-support/smart-erasurecodec/src/test/java/org/smartdata/erasurecode/rawcoder/TestNativeRSRawCoder.java | smart-hadoop-support/smart-erasurecodec/src/test/java/org/smartdata/erasurecode/rawcoder/TestNativeRSRawCoder.java | /**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.smartdata.erasurecode.rawcoder;
import org.junit.Assume;
import org.junit.Before;
import org.junit.Test;
import org.smartdata.erasurecode.ErasureCodeNative;
/**
* Test native raw Reed-solomon encoding and decoding.
*/
public class TestNativeRSRawCoder extends TestRSRawCoderBase {
@Before
public void setup() {
Assume.assumeTrue(ErasureCodeNative.isNativeCodeLoaded());
this.encoderClass = NativeRSRawEncoder.class;
this.decoderClass = NativeRSRawDecoder.class;
setAllowDump(true);
}
@Test
public void testCoding_6x3_erasing_all_d() {
prepare(null, 6, 3, new int[]{0, 1, 2}, new int[0], true);
testCodingDoMixAndTwice();
}
@Test
public void testCoding_6x3_erasing_d0_d2() {
prepare(null, 6, 3, new int[] {0, 2}, new int[]{});
testCodingDoMixAndTwice();
}
@Test
public void testCoding_6x3_erasing_d0() {
prepare(null, 6, 3, new int[]{0}, new int[0]);
testCodingDoMixAndTwice();
}
@Test
public void testCoding_6x3_erasing_d2() {
prepare(null, 6, 3, new int[]{2}, new int[]{});
testCodingDoMixAndTwice();
}
@Test
public void testCoding_6x3_erasing_d0_p0() {
prepare(null, 6, 3, new int[]{0}, new int[]{0});
testCodingDoMixAndTwice();
}
@Test
public void testCoding_6x3_erasing_all_p() {
prepare(null, 6, 3, new int[0], new int[]{0, 1, 2});
testCodingDoMixAndTwice();
}
@Test
public void testCoding_6x3_erasing_p0() {
prepare(null, 6, 3, new int[0], new int[]{0});
testCodingDoMixAndTwice();
}
@Test
public void testCoding_6x3_erasing_p2() {
prepare(null, 6, 3, new int[0], new int[]{2});
testCodingDoMixAndTwice();
}
@Test
public void testCoding_6x3_erasure_p0_p2() {
prepare(null, 6, 3, new int[0], new int[]{0, 2});
testCodingDoMixAndTwice();
}
@Test
public void testCoding_6x3_erasing_d0_p0_p1() {
prepare(null, 6, 3, new int[]{0}, new int[]{0, 1});
testCodingDoMixAndTwice();
}
@Test
public void testCoding_6x3_erasing_d0_d2_p2() {
prepare(null, 6, 3, new int[]{0, 2}, new int[]{2});
testCodingDoMixAndTwice();
}
@Test
public void testCodingNegative_6x3_erasing_d2_d4() {
prepare(null, 6, 3, new int[]{2, 4}, new int[0]);
testCodingDoMixAndTwice();
}
@Test
public void testCodingNegative_6x3_erasing_too_many() {
prepare(null, 6, 3, new int[]{2, 4}, new int[]{0, 1});
testCodingWithErasingTooMany();
}
@Test
public void testCoding_10x4_erasing_d0_p0() {
prepare(null, 10, 4, new int[] {0}, new int[] {0});
testCodingDoMixAndTwice();
}
}
| java | Apache-2.0 | e0c90f054687a18c4e095547ac5e31b8b313b3ef | 2026-01-05T02:41:11.405497Z | false |
Intel-bigdata/SSM | https://github.com/Intel-bigdata/SSM/blob/e0c90f054687a18c4e095547ac5e31b8b313b3ef/smart-hadoop-support/smart-erasurecodec/src/test/java/org/smartdata/erasurecode/rawcoder/RawErasureCoderBenchmark.java | smart-hadoop-support/smart-erasurecodec/src/test/java/org/smartdata/erasurecode/rawcoder/RawErasureCoderBenchmark.java | /**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
* <p/>
* http://www.apache.org/licenses/LICENSE-2.0
* <p/>
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.smartdata.erasurecode.rawcoder;
import com.google.common.base.Preconditions;
import org.smartdata.erasurecode.rawcoder.*;
import org.apache.hadoop.util.StopWatch;
import org.smartdata.erasurecode.ErasureCoderOptions;
import java.nio.ByteBuffer;
import java.text.DecimalFormat;
import java.util.*;
import java.util.concurrent.*;
/**
* A benchmark tool to test the performance of different erasure coders.
* The tool launches multiple threads to encode/decode certain amount of data,
* and measures the total throughput. It only focuses on performance and doesn't
* validate correctness of the encoded/decoded results.
* User can specify the data size each thread processes, as well as the chunk
* size to use for the coder.
* Different coders are supported. User can specify the coder by a coder index.
* The coder is shared among all the threads.
*/
public final class RawErasureCoderBenchmark {
private RawErasureCoderBenchmark() {
// prevent instantiation
}
// target size of input data buffer
private static final int TARGET_BUFFER_SIZE_MB = 126;
private static final int MAX_CHUNK_SIZE =
TARGET_BUFFER_SIZE_MB / BenchData.NUM_DATA_UNITS * 1024;
private static final List<RawErasureCoderFactory> CODER_MAKERS =
Collections.unmodifiableList(
Arrays.asList(new DummyRawErasureCoderFactory(),
new RSRawErasureCoderFactoryLegacy(),
new RSRawErasureCoderFactory(),
new NativeRSRawErasureCoderFactory()));
enum CODER {
DUMMY_CODER("Dummy coder"),
LEGACY_RS_CODER("Legacy Reed-Solomon Java coder"),
RS_CODER("Reed-Solomon Java coder"),
ISAL_CODER("ISA-L coder");
private final String name;
CODER(String name) {
this.name = name;
}
@Override
public String toString() {
return name;
}
}
static {
Preconditions.checkArgument(CODER_MAKERS.size() == CODER.values().length);
}
private static void printAvailableCoders() {
StringBuilder sb = new StringBuilder(
"Available coders with coderIndex:\n");
for (CODER coder : CODER.values()) {
sb.append(coder.ordinal()).append(":").append(coder).append("\n");
}
System.out.println(sb.toString());
}
private static void usage(String message) {
if (message != null) {
System.out.println(message);
}
System.out.println(
"Usage: RawErasureCoderBenchmark <encode/decode> <coderIndex> " +
"[numThreads] [dataSize-in-MB] [chunkSize-in-KB]");
printAvailableCoders();
System.exit(1);
}
public static void main(String[] args) throws Exception {
String opType = null;
int coderIndex = 0;
// default values
int dataSizeMB = 10240;
int chunkSizeKB = 1024;
int numThreads = 1;
if (args.length > 1) {
opType = args[0];
if (!"encode".equals(opType) && !"decode".equals(opType)) {
usage("Invalid type: should be either 'encode' or 'decode'");
}
try {
coderIndex = Integer.parseInt(args[1]);
if (coderIndex < 0 || coderIndex >= CODER.values().length) {
usage("Invalid coder index, should be [0-" +
(CODER.values().length - 1) + "]");
}
} catch (NumberFormatException e) {
usage("Malformed coder index, " + e.getMessage());
}
} else {
usage(null);
}
if (args.length > 2) {
try {
numThreads = Integer.parseInt(args[2]);
if (numThreads <= 0) {
usage("Invalid number of threads.");
}
} catch (NumberFormatException e) {
usage("Malformed number of threads, " + e.getMessage());
}
}
if (args.length > 3) {
try {
dataSizeMB = Integer.parseInt(args[3]);
if (dataSizeMB <= 0) {
usage("Invalid data size.");
}
} catch (NumberFormatException e) {
usage("Malformed data size, " + e.getMessage());
}
}
if (args.length > 4) {
try {
chunkSizeKB = Integer.parseInt(args[4]);
if (chunkSizeKB <= 0) {
usage("Chunk size should be positive.");
}
if (chunkSizeKB > MAX_CHUNK_SIZE) {
usage("Chunk size should be no larger than " + MAX_CHUNK_SIZE);
}
} catch (NumberFormatException e) {
usage("Malformed chunk size, " + e.getMessage());
}
}
performBench(opType, CODER.values()[coderIndex],
numThreads, dataSizeMB, chunkSizeKB);
}
/**
* Performs benchmark.
*
* @param opType The operation to perform. Can be encode or decode
* @param coder The coder to use
* @param numThreads Number of threads to launch concurrently
* @param dataSizeMB Total test data size in MB
* @param chunkSizeKB Chunk size in KB
*/
public static void performBench(String opType, CODER coder,
int numThreads, int dataSizeMB, int chunkSizeKB) throws Exception {
BenchData.configure(dataSizeMB, chunkSizeKB);
RawErasureEncoder encoder = null;
RawErasureDecoder decoder = null;
ByteBuffer testData;
boolean isEncode = opType.equals("encode");
if (isEncode) {
encoder = getRawEncoder(coder.ordinal());
testData = genTestData(encoder.preferDirectBuffer(),
BenchData.bufferSizeKB);
} else {
decoder = getRawDecoder(coder.ordinal());
testData = genTestData(decoder.preferDirectBuffer(),
BenchData.bufferSizeKB);
}
ExecutorService executor = Executors.newFixedThreadPool(numThreads);
List<Future<Long>> futures = new ArrayList<>(numThreads);
StopWatch sw = new StopWatch().start();
for (int i = 0; i < numThreads; i++) {
futures.add(executor.submit(new BenchmarkCallable(isEncode,
encoder, decoder, testData.duplicate())));
}
List<Long> durations = new ArrayList<>(numThreads);
try {
for (Future<Long> future : futures) {
durations.add(future.get());
}
long duration = sw.now(TimeUnit.MILLISECONDS);
double totalDataSize = BenchData.totalDataSizeKB * numThreads / 1024.0;
DecimalFormat df = new DecimalFormat("#.##");
System.out.println(coder + " " + opType + " " +
df.format(totalDataSize) + "MB data, with chunk size " +
BenchData.chunkSize / 1024 + "KB");
System.out.println("Total time: " + df.format(duration / 1000.0) + " s.");
System.out.println("Total throughput: " + df.format(
totalDataSize / duration * 1000.0) + " MB/s");
printThreadStatistics(durations, df);
} catch (Exception e) {
System.out.println("Error waiting for thread to finish.");
e.printStackTrace();
throw e;
} finally {
executor.shutdown();
}
}
private static RawErasureEncoder getRawEncoder(int index) {
RawErasureEncoder encoder =
CODER_MAKERS.get(index).createEncoder(BenchData.OPTIONS);
final boolean isDirect = encoder.preferDirectBuffer();
encoder.encode(
getBufferForInit(BenchData.NUM_DATA_UNITS, 1, isDirect),
getBufferForInit(BenchData.NUM_PARITY_UNITS, 1, isDirect));
return encoder;
}
private static RawErasureDecoder getRawDecoder(int index) {
RawErasureDecoder decoder =
CODER_MAKERS.get(index).createDecoder(BenchData.OPTIONS);
final boolean isDirect = decoder.preferDirectBuffer();
ByteBuffer[] inputs = getBufferForInit(
BenchData.NUM_ALL_UNITS, 1, isDirect);
for (int erasedIndex : BenchData.ERASED_INDEXES) {
inputs[erasedIndex] = null;
}
decoder.decode(inputs, BenchData.ERASED_INDEXES,
getBufferForInit(BenchData.ERASED_INDEXES.length, 1, isDirect));
return decoder;
}
private static ByteBuffer[] getBufferForInit(int numBuf,
int bufCap, boolean isDirect) {
ByteBuffer[] buffers = new ByteBuffer[numBuf];
for (int i = 0; i < buffers.length; i++) {
buffers[i] = isDirect ? ByteBuffer.allocateDirect(bufCap) :
ByteBuffer.allocate(bufCap);
}
return buffers;
}
private static void printThreadStatistics(
List<Long> durations, DecimalFormat df) {
Collections.sort(durations);
System.out.println("Threads statistics: ");
Double min = durations.get(0) / 1000.0;
Double max = durations.get(durations.size() - 1) / 1000.0;
Long sum = 0L;
for (Long duration : durations) {
sum += duration;
}
Double avg = sum.doubleValue() / durations.size() / 1000.0;
Double percentile = durations.get(
(int) Math.ceil(durations.size() * 0.9) - 1) / 1000.0;
System.out.println(durations.size() + " threads in total.");
System.out.println("Min: " + df.format(min) + " s, Max: " +
df.format(max) + " s, Avg: " + df.format(avg) +
" s, 90th Percentile: " + df.format(percentile) + " s.");
}
private static ByteBuffer genTestData(boolean useDirectBuffer, int sizeKB) {
Random random = new Random();
int bufferSize = sizeKB * 1024;
byte[] tmp = new byte[bufferSize];
random.nextBytes(tmp);
ByteBuffer data = useDirectBuffer ?
ByteBuffer.allocateDirect(bufferSize) :
ByteBuffer.allocate(bufferSize);
data.put(tmp);
data.flip();
return data;
}
private static class BenchData {
public static final ErasureCoderOptions OPTIONS =
new ErasureCoderOptions(6, 3);
public static final int NUM_DATA_UNITS = OPTIONS.getNumDataUnits();
public static final int NUM_PARITY_UNITS = OPTIONS.getNumParityUnits();
public static final int NUM_ALL_UNITS = OPTIONS.getNumAllUnits();
private static int chunkSize;
private static long totalDataSizeKB;
private static int bufferSizeKB;
private static final int[] ERASED_INDEXES = new int[]{6, 7, 8};
private final ByteBuffer[] inputs = new ByteBuffer[NUM_DATA_UNITS];
private ByteBuffer[] outputs = new ByteBuffer[NUM_PARITY_UNITS];
private ByteBuffer[] decodeInputs = new ByteBuffer[NUM_ALL_UNITS];
public static void configure(int dataSizeMB, int chunkSizeKB) {
chunkSize = chunkSizeKB * 1024;
// buffer size needs to be a multiple of (numDataUnits * chunkSize)
int round = (int) Math.round(
TARGET_BUFFER_SIZE_MB * 1024.0 / NUM_DATA_UNITS / chunkSizeKB);
Preconditions.checkArgument(round > 0);
bufferSizeKB = NUM_DATA_UNITS * chunkSizeKB * round;
System.out.println("Using " + bufferSizeKB / 1024 + "MB buffer.");
round = (int) Math.round(
(dataSizeMB * 1024.0) / bufferSizeKB);
if (round == 0) {
round = 1;
}
totalDataSizeKB = round * bufferSizeKB;
}
public BenchData(boolean useDirectBuffer) {
for (int i = 0; i < outputs.length; i++) {
outputs[i] = useDirectBuffer ? ByteBuffer.allocateDirect(chunkSize) :
ByteBuffer.allocate(chunkSize);
}
}
public void prepareDecInput() {
System.arraycopy(inputs, 0, decodeInputs, 0, NUM_DATA_UNITS);
}
public void encode(RawErasureEncoder encoder) {
encoder.encode(inputs, outputs);
}
public void decode(RawErasureDecoder decoder) {
decoder.decode(decodeInputs, ERASED_INDEXES, outputs);
}
}
private static class BenchmarkCallable implements Callable<Long> {
private final boolean isEncode;
private final RawErasureEncoder encoder;
private final RawErasureDecoder decoder;
private final BenchData benchData;
private final ByteBuffer testData;
public BenchmarkCallable(boolean isEncode, RawErasureEncoder encoder,
RawErasureDecoder decoder, ByteBuffer testData) {
if (isEncode) {
Preconditions.checkArgument(encoder != null);
this.encoder = encoder;
this.decoder = null;
benchData = new BenchData(encoder.preferDirectBuffer());
} else {
Preconditions.checkArgument(decoder != null);
this.decoder = decoder;
this.encoder = null;
benchData = new BenchData(decoder.preferDirectBuffer());
}
this.isEncode = isEncode;
this.testData = testData;
}
@Override
public Long call() throws Exception {
long rounds = BenchData.totalDataSizeKB / BenchData.bufferSizeKB;
StopWatch sw = new StopWatch().start();
for (long i = 0; i < rounds; i++) {
while (testData.remaining() > 0) {
for (ByteBuffer output : benchData.outputs) {
output.clear();
}
for (int j = 0; j < benchData.inputs.length; j++) {
benchData.inputs[j] = testData.duplicate();
benchData.inputs[j].limit(
testData.position() + BenchData.chunkSize);
benchData.inputs[j] = benchData.inputs[j].slice();
testData.position(testData.position() + BenchData.chunkSize);
}
if (isEncode) {
benchData.encode(encoder);
} else {
benchData.prepareDecInput();
benchData.decode(decoder);
}
}
testData.clear();
}
return sw.now(TimeUnit.MILLISECONDS);
}
}
}
| java | Apache-2.0 | e0c90f054687a18c4e095547ac5e31b8b313b3ef | 2026-01-05T02:41:11.405497Z | false |
Intel-bigdata/SSM | https://github.com/Intel-bigdata/SSM/blob/e0c90f054687a18c4e095547ac5e31b8b313b3ef/smart-hadoop-support/smart-erasurecodec/src/test/java/org/smartdata/erasurecode/rawcoder/TestHHXORErasureCoder.java | smart-hadoop-support/smart-erasurecodec/src/test/java/org/smartdata/erasurecode/rawcoder/TestHHXORErasureCoder.java | /**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.smartdata.erasurecode.rawcoder;
import org.apache.hadoop.conf.Configuration;
import org.junit.Before;
import org.junit.Test;
import org.smartdata.erasurecode.CodecUtil;
import org.smartdata.erasurecode.coder.HHXORErasureDecoder;
import org.smartdata.erasurecode.coder.HHXORErasureEncoder;
public class TestHHXORErasureCoder extends TestHHErasureCoderBase {
@Before
public void setup() {
this.encoderClass = HHXORErasureEncoder.class;
this.decoderClass = HHXORErasureDecoder.class;
this.numChunksInBlock = 10;
this.subPacketSize = 2;
}
@Test
public void testCodingNoDirectBuffer_10x4_erasing_d0() {
prepare(null, 10, 4, new int[]{0}, new int[0]);
/**
* Doing twice to test if the coders can be repeatedly reused. This matters
* as the underlying coding buffers are shared, which may have bugs.
*/
testCoding(false);
testCoding(false);
}
@Test
public void testCodingDirectBufferWithConf_10x4_erasing_d0() {
/**
* This tests if the configuration items work or not.
*/
Configuration conf = new Configuration();
conf.set(CodecUtil.IO_ERASURECODE_CODEC_RS_DEFAULT_RAWCODER_KEY,
RSRawErasureCoderFactory.class.getCanonicalName());
prepare(conf, 10, 4, new int[]{0}, new int[0]);
testCoding(true);
testCoding(true);
}
@Test
public void testCodingDirectBuffer_10x4_erasing_p1() {
prepare(null, 10, 4, new int[]{}, new int[]{1});
testCoding(true);
testCoding(true);
}
@Test
public void testCodingDirectBuffer_10x4_erasing_d4() {
prepare(null, 10, 4, new int[] {4}, new int[] {});
testCoding(true);
testCoding(true);
}
@Test
public void testCodingDirectBuffer_10x4_erasing_d0_p0() {
prepare(null, 10, 4, new int[] {0}, new int[] {0});
testCoding(true);
testCoding(true);
}
@Test
public void testCodingBothBuffers_10x4_erasing_d0_p0() {
prepare(null, 10, 4, new int[] {0}, new int[] {0});
/**
* Doing in mixed buffer usage model to test if the coders can be repeatedly
* reused with different buffer usage model. This matters as the underlying
* coding buffers are shared, which may have bugs.
*/
testCoding(true);
testCoding(false);
testCoding(true);
testCoding(false);
}
@Test
public void testCodingDirectBuffer_10x4_erasure_of_d2_d4_p0() {
prepare(null, 10, 4, new int[] {2, 4}, new int[] {0});
testCoding(true);
}
@Test
public void testCodingDirectBuffer_10x4_erasing_d0_d1_p0_p1() {
prepare(null, 10, 4, new int[] {0, 1}, new int[] {0, 1});
testCoding(true);
}
// @Test
// public void testCodingNoDirectBuffer_3x3_erasing_d0_p0() {
// prepare(null, 3, 3, new int[] {0}, new int[] {0});
// testCoding(false);
// }
@Test
public void testCodingDirectBuffer_6x3_erasing_d0_p0() {
prepare(null, 6, 3, new int[] {0}, new int[] {0});
testCoding(true);
}
}
| java | Apache-2.0 | e0c90f054687a18c4e095547ac5e31b8b313b3ef | 2026-01-05T02:41:11.405497Z | false |
Intel-bigdata/SSM | https://github.com/Intel-bigdata/SSM/blob/e0c90f054687a18c4e095547ac5e31b8b313b3ef/smart-hadoop-support/smart-erasurecodec/src/test/java/org/smartdata/erasurecode/rawcoder/TestXORCoder.java | smart-hadoop-support/smart-erasurecodec/src/test/java/org/smartdata/erasurecode/rawcoder/TestXORCoder.java | /**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.smartdata.erasurecode.rawcoder;
import org.junit.Before;
import org.junit.Rule;
import org.junit.Test;
import org.junit.rules.Timeout;
import org.smartdata.erasurecode.coder.XORErasureDecoder;
import org.smartdata.erasurecode.coder.XORErasureEncoder;
/**
* Test XOR encoding and decoding.
*/
public class TestXORCoder extends TestErasureCoderBase {
@Rule
public Timeout globalTimeout = new Timeout(300000);
@Before
public void setup() {
this.encoderClass = XORErasureEncoder.class;
this.decoderClass = XORErasureDecoder.class;
this.numDataUnits = 10;
this.numParityUnits = 1;
this.numChunksInBlock = 10;
}
@Test
public void testCodingNoDirectBuffer_erasing_p0() {
prepare(null, 10, 1, new int[0], new int[] {0});
/**
* Doing twice to test if the coders can be repeatedly reused. This matters
* as the underlying coding buffers are shared, which may have bugs.
*/
testCoding(false);
testCoding(false);
}
@Test
public void testCodingBothBuffers_erasing_d5() {
prepare(null, 10, 1, new int[]{5}, new int[0]);
/**
* Doing in mixed buffer usage model to test if the coders can be repeatedly
* reused with different buffer usage model. This matters as the underlying
* coding buffers are shared, which may have bugs.
*/
testCoding(true);
testCoding(false);
testCoding(true);
testCoding(false);
}
}
| java | Apache-2.0 | e0c90f054687a18c4e095547ac5e31b8b313b3ef | 2026-01-05T02:41:11.405497Z | false |
Intel-bigdata/SSM | https://github.com/Intel-bigdata/SSM/blob/e0c90f054687a18c4e095547ac5e31b8b313b3ef/smart-hadoop-support/smart-erasurecodec/src/test/java/org/smartdata/erasurecode/rawcoder/TestXORRawCoderInteroperable1.java | smart-hadoop-support/smart-erasurecodec/src/test/java/org/smartdata/erasurecode/rawcoder/TestXORRawCoderInteroperable1.java | /**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.smartdata.erasurecode.rawcoder;
import org.junit.Assume;
import org.junit.Before;
import org.smartdata.erasurecode.ErasureCodeNative;
/**
* Test raw XOR coder implemented in Java.
*/
public class TestXORRawCoderInteroperable1 extends TestXORRawCoderBase {
@Before
public void setup() {
Assume.assumeTrue(ErasureCodeNative.isNativeCodeLoaded());
this.encoderClass = XORRawEncoder.class;
this.decoderClass = NativeXORRawDecoder.class;
setAllowDump(true);
}
}
| java | Apache-2.0 | e0c90f054687a18c4e095547ac5e31b8b313b3ef | 2026-01-05T02:41:11.405497Z | false |
Intel-bigdata/SSM | https://github.com/Intel-bigdata/SSM/blob/e0c90f054687a18c4e095547ac5e31b8b313b3ef/smart-hadoop-support/smart-erasurecodec/src/test/java/org/smartdata/erasurecode/rawcoder/TestDummyRawCoder.java | smart-hadoop-support/smart-erasurecodec/src/test/java/org/smartdata/erasurecode/rawcoder/TestDummyRawCoder.java | /**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.smartdata.erasurecode.rawcoder;
import org.junit.Before;
import org.junit.Test;
import org.smartdata.erasurecode.ECChunk;
import java.nio.ByteBuffer;
/**
* Test dummy raw coder.
*/
public class TestDummyRawCoder extends TestRawCoderBase {
@Before
public void setup() {
encoderClass = DummyRawEncoder.class;
decoderClass = DummyRawDecoder.class;
setAllowDump(false);
setChunkSize(baseChunkSize);
}
@Test
public void testCoding_6x3_erasing_d0_d2() {
prepare(null, 6, 3, new int[]{0, 2}, new int[0], false);
testCodingDoMixed();
}
@Test
public void testCoding_6x3_erasing_d0_p0() {
prepare(null, 6, 3, new int[]{0}, new int[]{0}, false);
testCodingDoMixed();
}
@Override
protected void testCoding(boolean usingDirectBuffer) {
this.usingDirectBuffer = usingDirectBuffer;
prepareCoders(true);
prepareBufferAllocator(true);
setAllowChangeInputs(false);
// Generate data and encode
ECChunk[] dataChunks = prepareDataChunksForEncoding();
markChunks(dataChunks);
ECChunk[] parityChunks = prepareParityChunksForEncoding();
encoder.encode(dataChunks, parityChunks);
compareAndVerify(parityChunks, getEmptyChunks(parityChunks.length));
// Decode
restoreChunksFromMark(dataChunks);
backupAndEraseChunks(dataChunks, parityChunks);
ECChunk[] inputChunks = prepareInputChunksForDecoding(
dataChunks, parityChunks);
ensureOnlyLeastRequiredChunks(inputChunks);
ECChunk[] recoveredChunks = prepareOutputChunksForDecoding();
decoder.decode(inputChunks, getErasedIndexesForDecoding(), recoveredChunks);
compareAndVerify(recoveredChunks, getEmptyChunks(recoveredChunks.length));
}
private ECChunk[] getEmptyChunks(int num) {
ECChunk[] chunks = new ECChunk[num];
for (int i = 0; i < chunks.length; i++) {
chunks[i] = new ECChunk(ByteBuffer.wrap(getZeroChunkBytes()));
}
return chunks;
}
}
| java | Apache-2.0 | e0c90f054687a18c4e095547ac5e31b8b313b3ef | 2026-01-05T02:41:11.405497Z | false |
Intel-bigdata/SSM | https://github.com/Intel-bigdata/SSM/blob/e0c90f054687a18c4e095547ac5e31b8b313b3ef/smart-hadoop-support/smart-erasurecodec/src/test/java/org/smartdata/erasurecode/rawcoder/TestXORRawCoder.java | smart-hadoop-support/smart-erasurecodec/src/test/java/org/smartdata/erasurecode/rawcoder/TestXORRawCoder.java | /**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.smartdata.erasurecode.rawcoder;
import org.junit.Before;
/**
* Test pure Java XOR encoding and decoding.
*/
public class TestXORRawCoder extends TestXORRawCoderBase {
@Before
public void setup() {
this.encoderClass = XORRawEncoder.class;
this.decoderClass = XORRawDecoder.class;
}
}
| java | Apache-2.0 | e0c90f054687a18c4e095547ac5e31b8b313b3ef | 2026-01-05T02:41:11.405497Z | false |
Intel-bigdata/SSM | https://github.com/Intel-bigdata/SSM/blob/e0c90f054687a18c4e095547ac5e31b8b313b3ef/smart-hadoop-support/smart-erasurecodec/src/test/java/org/smartdata/erasurecode/rawcoder/TestRawCoderBase.java | smart-hadoop-support/smart-erasurecodec/src/test/java/org/smartdata/erasurecode/rawcoder/TestRawCoderBase.java | /**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.smartdata.erasurecode.rawcoder;
import org.junit.Assert;
import org.junit.Test;
import org.smartdata.erasurecode.ECChunk;
import org.smartdata.erasurecode.ErasureCoderOptions;
import org.smartdata.erasurecode.TestCoderBase;
import java.lang.reflect.Constructor;
/**
* Raw coder test base with utilities.
*/
public abstract class TestRawCoderBase extends TestCoderBase {
protected Class<? extends RawErasureEncoder> encoderClass;
protected Class<? extends RawErasureDecoder> decoderClass;
protected RawErasureEncoder encoder;
protected RawErasureDecoder decoder;
/**
* Doing twice to test if the coders can be repeatedly reused. This matters
* as the underlying coding buffers are shared, which may have bugs.
*/
protected void testCodingDoMixAndTwice() {
testCodingDoMixed();
testCodingDoMixed();
}
/**
* Doing in mixed buffer usage model to test if the coders can be repeatedly
* reused with different buffer usage model. This matters as the underlying
* coding buffers are shared, which may have bugs.
*/
protected void testCodingDoMixed() {
testCoding(true);
testCoding(false);
}
/**
* Generating source data, encoding, recovering and then verifying.
* RawErasureCoder mainly uses ECChunk to pass input and output data buffers,
* it supports two kinds of ByteBuffers, one is array backed, the other is
* direct ByteBuffer. Use usingDirectBuffer indicate which case to test.
*
* @param usingDirectBuffer
*/
protected void testCoding(boolean usingDirectBuffer) {
this.usingDirectBuffer = usingDirectBuffer;
prepareCoders(true);
/**
* The following runs will use 3 different chunkSize for inputs and outputs,
* to verify the same encoder/decoder can process variable width of data.
*/
performTestCoding(baseChunkSize, true, false, false, false);
performTestCoding(baseChunkSize - 17, false, false, false, true);
performTestCoding(baseChunkSize + 16, true, false, false, false);
}
/**
* Similar to above, but perform negative cases using bad input for encoding.
* @param usingDirectBuffer
*/
protected void testCodingWithBadInput(boolean usingDirectBuffer) {
this.usingDirectBuffer = usingDirectBuffer;
prepareCoders(true);
try {
performTestCoding(baseChunkSize, false, true, false, true);
Assert.fail("Encoding test with bad input should fail");
} catch (Exception e) {
// Expected
}
}
/**
* Similar to above, but perform negative cases using bad output for decoding.
* @param usingDirectBuffer
*/
protected void testCodingWithBadOutput(boolean usingDirectBuffer) {
this.usingDirectBuffer = usingDirectBuffer;
prepareCoders(true);
try {
performTestCoding(baseChunkSize, false, false, true, true);
Assert.fail("Decoding test with bad output should fail");
} catch (Exception e) {
// Expected
}
}
@Test
public void testCodingWithErasingTooMany() {
try {
testCoding(true);
Assert.fail("Decoding test erasing too many should fail");
} catch (Exception e) {
// Expected
}
try {
testCoding(false);
Assert.fail("Decoding test erasing too many should fail");
} catch (Exception e) {
// Expected
}
}
private void performTestCoding(int chunkSize, boolean usingSlicedBuffer,
boolean useBadInput, boolean useBadOutput,
boolean allowChangeInputs) {
setChunkSize(chunkSize);
prepareBufferAllocator(usingSlicedBuffer);
setAllowChangeInputs(allowChangeInputs);
dumpSetting();
// Generate data and encode
ECChunk[] dataChunks = prepareDataChunksForEncoding();
if (useBadInput) {
corruptSomeChunk(dataChunks);
}
dumpChunks("Testing data chunks", dataChunks);
ECChunk[] parityChunks = prepareParityChunksForEncoding();
// Backup all the source chunks for later recovering because some coders
// may affect the source data.
ECChunk[] clonedDataChunks = cloneChunksWithData(dataChunks);
markChunks(dataChunks);
encoder.encode(dataChunks, parityChunks);
dumpChunks("Encoded parity chunks", parityChunks);
if (!allowChangeInputs) {
restoreChunksFromMark(dataChunks);
compareAndVerify(clonedDataChunks, dataChunks);
}
// Backup and erase some chunks
ECChunk[] backupChunks = backupAndEraseChunks(clonedDataChunks, parityChunks);
// Decode
ECChunk[] inputChunks = prepareInputChunksForDecoding(
clonedDataChunks, parityChunks);
// Remove unnecessary chunks, allowing only least required chunks to be read.
ensureOnlyLeastRequiredChunks(inputChunks);
ECChunk[] recoveredChunks = prepareOutputChunksForDecoding();
if (useBadOutput) {
corruptSomeChunk(recoveredChunks);
}
ECChunk[] clonedInputChunks = null;
if (!allowChangeInputs) {
markChunks(inputChunks);
clonedInputChunks = cloneChunksWithData(inputChunks);
}
dumpChunks("Decoding input chunks", inputChunks);
decoder.decode(inputChunks, getErasedIndexesForDecoding(), recoveredChunks);
dumpChunks("Decoded/recovered chunks", recoveredChunks);
if (!allowChangeInputs) {
restoreChunksFromMark(inputChunks);
compareAndVerify(clonedInputChunks, inputChunks);
}
// Compare
compareAndVerify(backupChunks, recoveredChunks);
}
protected void setAllowChangeInputs(boolean allowChangeInputs) {
this.allowChangeInputs = allowChangeInputs;
}
/**
* Set true during setup if want to dump test settings and coding data,
* useful in debugging.
* @param allowDump
*/
protected void setAllowDump(boolean allowDump) {
this.allowDump = allowDump;
}
protected void prepareCoders(boolean recreate) {
if (encoder == null || recreate) {
encoder = createEncoder();
}
if (decoder == null || recreate) {
decoder = createDecoder();
}
}
protected void ensureOnlyLeastRequiredChunks(ECChunk[] inputChunks) {
int leastRequiredNum = numDataUnits;
int erasedNum = erasedDataIndexes.length + erasedParityIndexes.length;
int goodNum = inputChunks.length - erasedNum;
int redundantNum = goodNum - leastRequiredNum;
for (int i = 0; i < inputChunks.length && redundantNum > 0; i++) {
if (inputChunks[i] != null) {
inputChunks[i] = null; // Setting it null, not needing it actually
redundantNum--;
}
}
}
/**
* Create the raw erasure encoder to test
* @return
*/
protected RawErasureEncoder createEncoder() {
ErasureCoderOptions coderConf =
new ErasureCoderOptions(numDataUnits, numParityUnits,
allowChangeInputs, allowDump);
try {
Constructor<? extends RawErasureEncoder> constructor =
encoderClass.getConstructor(ErasureCoderOptions.class);
return constructor.newInstance(coderConf);
} catch (Exception e) {
throw new RuntimeException("Failed to create encoder", e);
}
}
/**
* create the raw erasure decoder to test
* @return
*/
protected RawErasureDecoder createDecoder() {
ErasureCoderOptions coderConf =
new ErasureCoderOptions(numDataUnits, numParityUnits,
allowChangeInputs, allowDump);
try {
Constructor<? extends RawErasureDecoder> constructor =
decoderClass.getConstructor(ErasureCoderOptions.class);
return constructor.newInstance(coderConf);
} catch (Exception e) {
throw new RuntimeException("Failed to create decoder", e);
}
}
/**
* Tests that the input buffer's position is moved to the end after
* encode/decode.
*/
protected void testInputPosition(boolean usingDirectBuffer) {
this.usingDirectBuffer = usingDirectBuffer;
prepareCoders(true);
prepareBufferAllocator(false);
// verify encode
ECChunk[] dataChunks = prepareDataChunksForEncoding();
ECChunk[] parityChunks = prepareParityChunksForEncoding();
ECChunk[] clonedDataChunks = cloneChunksWithData(dataChunks);
encoder.encode(dataChunks, parityChunks);
verifyBufferPositionAtEnd(dataChunks);
// verify decode
backupAndEraseChunks(clonedDataChunks, parityChunks);
ECChunk[] inputChunks = prepareInputChunksForDecoding(
clonedDataChunks, parityChunks);
ensureOnlyLeastRequiredChunks(inputChunks);
ECChunk[] recoveredChunks = prepareOutputChunksForDecoding();
decoder.decode(inputChunks, getErasedIndexesForDecoding(), recoveredChunks);
verifyBufferPositionAtEnd(inputChunks);
}
private void verifyBufferPositionAtEnd(ECChunk[] inputChunks) {
for (ECChunk chunk : inputChunks) {
if (chunk != null) {
Assert.assertEquals(0, chunk.getBuffer().remaining());
}
}
}
}
| java | Apache-2.0 | e0c90f054687a18c4e095547ac5e31b8b313b3ef | 2026-01-05T02:41:11.405497Z | false |
Intel-bigdata/SSM | https://github.com/Intel-bigdata/SSM/blob/e0c90f054687a18c4e095547ac5e31b8b313b3ef/smart-hadoop-support/smart-erasurecodec/src/test/java/org/smartdata/erasurecode/rawcoder/TestRSRawCoder.java | smart-hadoop-support/smart-erasurecodec/src/test/java/org/smartdata/erasurecode/rawcoder/TestRSRawCoder.java | /**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.smartdata.erasurecode.rawcoder;
import org.junit.Before;
/**
* Test the new raw Reed-solomon coder implemented in Java.
*/
public class TestRSRawCoder extends TestRSRawCoderBase {
@Before
public void setup() {
this.encoderClass = RSRawEncoder.class;
this.decoderClass = RSRawDecoder.class;
setAllowDump(false);
}
}
| java | Apache-2.0 | e0c90f054687a18c4e095547ac5e31b8b313b3ef | 2026-01-05T02:41:11.405497Z | false |
Intel-bigdata/SSM | https://github.com/Intel-bigdata/SSM/blob/e0c90f054687a18c4e095547ac5e31b8b313b3ef/smart-hadoop-support/smart-erasurecodec/src/test/java/org/smartdata/erasurecode/test/TimedOutTestsListener.java | smart-hadoop-support/smart-erasurecodec/src/test/java/org/smartdata/erasurecode/test/TimedOutTestsListener.java | /**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.smartdata.erasurecode.test;
import org.apache.hadoop.util.StringUtils;
import org.junit.runner.notification.Failure;
import org.junit.runner.notification.RunListener;
import java.io.PrintWriter;
import java.io.StringWriter;
import java.lang.management.*;
import java.text.DateFormat;
import java.text.SimpleDateFormat;
import java.util.Date;
import java.util.Map;
/**
* JUnit run listener which prints full thread dump into System.err
* in case a test is failed due to timeout.
*/
public class TimedOutTestsListener extends RunListener {
static final String TEST_TIMED_OUT_PREFIX = "test timed out after";
private static String INDENT = " ";
private final PrintWriter output;
public TimedOutTestsListener() {
this.output = new PrintWriter(System.err);
}
public TimedOutTestsListener(PrintWriter output) {
this.output = output;
}
@Override
public void testFailure(Failure failure) throws Exception {
if (failure != null && failure.getMessage() != null
&& failure.getMessage().startsWith(TEST_TIMED_OUT_PREFIX)) {
output.println("====> TEST TIMED OUT. PRINTING THREAD DUMP. <====");
output.println();
output.print(buildThreadDiagnosticString());
}
}
public static String buildThreadDiagnosticString() {
StringWriter sw = new StringWriter();
PrintWriter output = new PrintWriter(sw);
DateFormat dateFormat = new SimpleDateFormat("yyyy-MM-dd hh:mm:ss,SSS");
output.println(String.format("Timestamp: %s", dateFormat.format(new Date())));
output.println();
output.println(buildThreadDump());
String deadlocksInfo = buildDeadlockInfo();
if (deadlocksInfo != null) {
output.println("====> DEADLOCKS DETECTED <====");
output.println();
output.println(deadlocksInfo);
}
return sw.toString();
}
static String buildThreadDump() {
StringBuilder dump = new StringBuilder();
Map<Thread, StackTraceElement[]> stackTraces = Thread.getAllStackTraces();
for (Map.Entry<Thread, StackTraceElement[]> e : stackTraces.entrySet()) {
Thread thread = e.getKey();
dump.append(String.format(
"\"%s\" %s prio=%d tid=%d %s\njava.lang.Thread.State: %s",
thread.getName(),
(thread.isDaemon() ? "daemon" : ""),
thread.getPriority(),
thread.getId(),
Thread.State.WAITING.equals(thread.getState()) ?
"in Object.wait()" :
StringUtils.toLowerCase(thread.getState().name()),
Thread.State.WAITING.equals(thread.getState()) ?
"WAITING (on object monitor)" : thread.getState()));
for (StackTraceElement stackTraceElement : e.getValue()) {
dump.append("\n at ");
dump.append(stackTraceElement);
}
dump.append("\n");
}
return dump.toString();
}
static String buildDeadlockInfo() {
ThreadMXBean threadBean = ManagementFactory.getThreadMXBean();
long[] threadIds = threadBean.findMonitorDeadlockedThreads();
if (threadIds != null && threadIds.length > 0) {
StringWriter stringWriter = new StringWriter();
PrintWriter out = new PrintWriter(stringWriter);
ThreadInfo[] infos = threadBean.getThreadInfo(threadIds, true, true);
for (ThreadInfo ti : infos) {
printThreadInfo(ti, out);
printLockInfo(ti.getLockedSynchronizers(), out);
out.println();
}
out.close();
return stringWriter.toString();
} else {
return null;
}
}
private static void printThreadInfo(ThreadInfo ti, PrintWriter out) {
// print thread information
printThread(ti, out);
// print stack trace with locks
StackTraceElement[] stacktrace = ti.getStackTrace();
MonitorInfo[] monitors = ti.getLockedMonitors();
for (int i = 0; i < stacktrace.length; i++) {
StackTraceElement ste = stacktrace[i];
out.println(INDENT + "at " + ste.toString());
for (MonitorInfo mi : monitors) {
if (mi.getLockedStackDepth() == i) {
out.println(INDENT + " - locked " + mi);
}
}
}
out.println();
}
private static void printThread(ThreadInfo ti, PrintWriter out) {
out.print("\"" + ti.getThreadName() + "\"" + " Id="
+ ti.getThreadId() + " in " + ti.getThreadState());
if (ti.getLockName() != null) {
out.print(" on lock=" + ti.getLockName());
}
if (ti.isSuspended()) {
out.print(" (suspended)");
}
if (ti.isInNative()) {
out.print(" (running in native)");
}
out.println();
if (ti.getLockOwnerName() != null) {
out.println(INDENT + " owned by " + ti.getLockOwnerName() + " Id="
+ ti.getLockOwnerId());
}
}
private static void printLockInfo(LockInfo[] locks, PrintWriter out) {
out.println(INDENT + "Locked synchronizers: count = " + locks.length);
for (LockInfo li : locks) {
out.println(INDENT + " - " + li);
}
out.println();
}
}
| java | Apache-2.0 | e0c90f054687a18c4e095547ac5e31b8b313b3ef | 2026-01-05T02:41:11.405497Z | false |
Intel-bigdata/SSM | https://github.com/Intel-bigdata/SSM/blob/e0c90f054687a18c4e095547ac5e31b8b313b3ef/smart-hadoop-support/smart-erasurecodec/src/test/java/org/smartdata/erasurecode/test/TestJUnitSetup.java | smart-hadoop-support/smart-erasurecodec/src/test/java/org/smartdata/erasurecode/test/TestJUnitSetup.java | /**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.smartdata.erasurecode.test;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.junit.Assert;
import org.junit.Test;
public class TestJUnitSetup {
public static final Log LOG = LogFactory.getLog(TestJUnitSetup.class);
@Test
public void testJavaAssert() {
try {
assert false : "Good! Java assert is on.";
} catch(AssertionError ae) {
LOG.info("The AssertionError is expected.", ae);
return;
}
Assert.fail("Java assert does not work.");
}
}
| java | Apache-2.0 | e0c90f054687a18c4e095547ac5e31b8b313b3ef | 2026-01-05T02:41:11.405497Z | false |
Intel-bigdata/SSM | https://github.com/Intel-bigdata/SSM/blob/e0c90f054687a18c4e095547ac5e31b8b313b3ef/smart-hadoop-support/smart-erasurecodec/src/test/java/org/smartdata/erasurecode/test/LambdaTestUtils.java | smart-hadoop-support/smart-erasurecodec/src/test/java/org/smartdata/erasurecode/test/LambdaTestUtils.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.smartdata.erasurecode.test;
import com.google.common.base.Preconditions;
import org.apache.hadoop.util.Time;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.util.concurrent.Callable;
import java.util.concurrent.TimeoutException;
/**
* Class containing methods and associated classes to make the most of Lambda
* expressions in Hadoop tests.
*
* The code has been designed from the outset to be Java-8 friendly, but
* to still be usable in Java 7.
*
* The code is modelled on {@code GenericTestUtils#waitFor(Supplier, int, int)},
* but also lifts concepts from Scalatest's {@code awaitResult} and
* its notion of pluggable retry logic (simple, backoff, maybe even things
* with jitter: test author gets to choose).
* The {@link #intercept(Class, Callable)} method is also all credit due
* Scalatest, though it's been extended to also support a string message
* check; useful when checking the contents of the exception.
*/
public final class LambdaTestUtils {
public static final Logger LOG =
LoggerFactory.getLogger(LambdaTestUtils.class);
private LambdaTestUtils() {
}
/**
* This is the string included in the assertion text in
* {@link #intercept(Class, Callable)} if
* the closure returned a null value.
*/
public static final String NULL_RESULT = "(null)";
/**
* Interface to implement for converting a timeout into some form
* of exception to raise.
*/
public interface TimeoutHandler {
/**
* Create an exception (or throw one, if desired).
* @param timeoutMillis timeout which has arisen
* @param caught any exception which was caught; may be null
* @return an exception which will then be thrown
* @throws Exception if the handler wishes to raise an exception
* that way.
*/
Exception evaluate(int timeoutMillis, Exception caught) throws Exception;
}
/**
* Wait for a condition to be met, with a retry policy returning the
* sleep time before the next attempt is made. If, at the end
* of the timeout period, the condition is still false (or failing with
* an exception), the timeout handler is invoked, passing in the timeout
* and any exception raised in the last invocation. The exception returned
* by this timeout handler is then rethrown.
* <p>
* Example: Wait 30s for a condition to be met, with a sleep of 30s
* between each probe.
* If the operation is failing, then, after 30s, the timeout handler
* is called. This returns the exception passed in (if any),
* or generates a new one.
* <pre>
* await(
* 30 * 1000,
* () -> { return 0 == filesystem.listFiles(new Path("/")).length); },
* () -> 500),
* (timeout, ex) -> ex != null ? ex : new TimeoutException("timeout"));
* </pre>
*
* @param timeoutMillis timeout in milliseconds.
* Can be zero, in which case only one attempt is made.
* @param check predicate to evaluate
* @param retry retry escalation logic
* @param timeoutHandler handler invoked on timeout;
* the returned exception will be thrown
* @return the number of iterations before the condition was satisfied
* @throws Exception the exception returned by {@code timeoutHandler} on
* timeout
* @throws FailFastException immediately if the evaluated operation raises it
* @throws InterruptedException if interrupted.
*/
public static int await(int timeoutMillis,
Callable<Boolean> check,
Callable<Integer> retry,
TimeoutHandler timeoutHandler)
throws Exception {
Preconditions.checkArgument(timeoutMillis >= 0,
"timeoutMillis must be >= 0");
Preconditions.checkNotNull(timeoutHandler);
long endTime = Time.now() + timeoutMillis;
Exception ex = null;
boolean running = true;
int iterations = 0;
while (running) {
iterations++;
try {
if (check.call()) {
return iterations;
}
// the probe failed but did not raise an exception. Reset any
// exception raised by a previous probe failure.
ex = null;
} catch (InterruptedException | FailFastException e) {
throw e;
} catch (Exception e) {
LOG.debug("eventually() iteration {}", iterations, e);
ex = e;
}
running = Time.now() < endTime;
if (running) {
int sleeptime = retry.call();
if (sleeptime >= 0) {
Thread.sleep(sleeptime);
} else {
running = false;
}
}
}
// timeout
Exception evaluate = timeoutHandler.evaluate(timeoutMillis, ex);
if (evaluate == null) {
// bad timeout handler logic; fall back to GenerateTimeout so the
// underlying problem isn't lost.
LOG.error("timeout handler {} did not throw an exception ",
timeoutHandler);
evaluate = new GenerateTimeout().evaluate(timeoutMillis, ex);
}
throw evaluate;
}
/**
* Simplified {@link #await(int, Callable, Callable, TimeoutHandler)}
* operation with a fixed interval
* and {@link GenerateTimeout} handler to generate a {@code TimeoutException}.
* <p>
* Example: await for probe to succeed:
* <pre>
* await(
* 30 * 1000, 500,
* () -> { return 0 == filesystem.listFiles(new Path("/")).length); });
* </pre>
*
* @param timeoutMillis timeout in milliseconds.
* Can be zero, in which case only one attempt is made.
* @param intervalMillis interval in milliseconds between checks
* @param check predicate to evaluate
* @return the number of iterations before the condition was satisfied
* @throws Exception returned by {@code failure} on timeout
* @throws FailFastException immediately if the evaluated operation raises it
* @throws InterruptedException if interrupted.
*/
public static int await(int timeoutMillis,
int intervalMillis,
Callable<Boolean> check) throws Exception {
return await(timeoutMillis, check,
new FixedRetryInterval(intervalMillis),
new GenerateTimeout());
}
/**
* Repeatedly execute a closure until it returns a value rather than
* raise an exception.
* Exceptions are caught and, with one exception,
* trigger a sleep and retry. This is similar of ScalaTest's
* {@code eventually(timeout, closure)} operation, though that lacks
* the ability to fail fast if the inner closure has determined that
* a failure condition is non-recoverable.
* <p>
* Example: spin until an the number of files in a filesystem is non-zero,
* returning the files found.
* The sleep interval backs off by 500 ms each iteration to a maximum of 5s.
* <pre>
* FileStatus[] files = eventually( 30 * 1000,
* () -> {
* FileStatus[] f = filesystem.listFiles(new Path("/"));
* assertEquals(0, f.length);
* return f;
* },
* new ProportionalRetryInterval(500, 5000));
* </pre>
* This allows for a fast exit, yet reduces probe frequency over time.
*
* @param <T> return type
* @param timeoutMillis timeout in milliseconds.
* Can be zero, in which case only one attempt is made before failing.
* @param eval expression to evaluate
* @param retry retry interval generator
* @return result of the first successful eval call
* @throws Exception the last exception thrown before timeout was triggered
* @throws FailFastException if raised -without any retry attempt.
* @throws InterruptedException if interrupted during the sleep operation.
*/
public static <T> T eventually(int timeoutMillis,
Callable<T> eval,
Callable<Integer> retry) throws Exception {
Preconditions.checkArgument(timeoutMillis >= 0,
"timeoutMillis must be >= 0");
long endTime = Time.now() + timeoutMillis;
Exception ex;
boolean running;
int sleeptime;
int iterations = 0;
do {
iterations++;
try {
return eval.call();
} catch (InterruptedException | FailFastException e) {
// these two exceptions trigger an immediate exit
throw e;
} catch (Exception e) {
LOG.debug("evaluate() iteration {}", iterations, e);
ex = e;
}
running = Time.now() < endTime;
if (running && (sleeptime = retry.call()) >= 0) {
Thread.sleep(sleeptime);
}
} while (running);
// timeout. Throw the last exception raised
throw ex;
}
/**
* Simplified {@link #eventually(int, Callable, Callable)} method
* with a fixed interval.
* <p>
* Example: wait 30s until an assertion holds, sleeping 1s between each
* check.
* <pre>
* eventually( 30 * 1000, 1000,
* () -> { assertEquals(0, filesystem.listFiles(new Path("/")).length); }
* );
* </pre>
*
* @param timeoutMillis timeout in milliseconds.
* Can be zero, in which case only one attempt is made before failing.
* @param intervalMillis interval in milliseconds
* @param eval expression to evaluate
* @return result of the first successful invocation of {@code eval()}
* @throws Exception the last exception thrown before timeout was triggered
* @throws FailFastException if raised -without any retry attempt.
* @throws InterruptedException if interrupted during the sleep operation.
*/
public static <T> T eventually(int timeoutMillis,
int intervalMillis,
Callable<T> eval) throws Exception {
return eventually(timeoutMillis, eval,
new FixedRetryInterval(intervalMillis));
}
/**
* Intercept an exception; throw an {@code AssertionError} if one not raised.
* The caught exception is rethrown if it is of the wrong class or
* does not contain the text defined in {@code contained}.
* <p>
* Example: expect deleting a nonexistent file to raise a
* {@code FileNotFoundException}.
* <pre>
* FileNotFoundException ioe = intercept(FileNotFoundException.class,
* () -> {
* filesystem.delete(new Path("/missing"), false);
* });
* </pre>
*
* @param clazz class of exception; the raised exception must be this class
* <i>or a subclass</i>.
* @param eval expression to eval
* @param <T> return type of expression
* @param <E> exception class
* @return the caught exception if it was of the expected type
* @throws Exception any other exception raised
* @throws AssertionError if the evaluation call didn't raise an exception.
* The error includes the {@code toString()} value of the result, if this
* can be determined.
*/
@SuppressWarnings("unchecked")
public static <T, E extends Throwable> E intercept(
Class<E> clazz,
Callable<T> eval)
throws Exception {
try {
T result = eval.call();
throw new AssertionError("Expected an exception, got "
+ robustToString(result));
} catch (Throwable e) {
if (clazz.isAssignableFrom(e.getClass())) {
return (E)e;
}
throw e;
}
}
/**
* Intercept an exception; throw an {@code AssertionError} if one not raised.
* The caught exception is rethrown if it is of the wrong class or
* does not contain the text defined in {@code contained}.
* <p>
* Example: expect deleting a nonexistent file to raise a
* {@code FileNotFoundException} with the {@code toString()} value
* containing the text {@code "missing"}.
* <pre>
* FileNotFoundException ioe = intercept(FileNotFoundException.class,
* "missing",
* () -> {
* filesystem.delete(new Path("/missing"), false);
* });
* </pre>
*
* @param clazz class of exception; the raised exception must be this class
* <i>or a subclass</i>.
* @param contained string which must be in the {@code toString()} value
* of the exception
* @param eval expression to eval
* @param <T> return type of expression
* @param <E> exception class
* @return the caught exception if it was of the expected type and contents
* @throws Exception any other exception raised
* @throws AssertionError if the evaluation call didn't raise an exception.
* The error includes the {@code toString()} value of the result, if this
* can be determined.
* @see GenericTestUtils#assertExceptionContains(String, Throwable)
*/
public static <T, E extends Throwable> E intercept(
Class<E> clazz,
String contained,
Callable<T> eval)
throws Exception {
E ex = intercept(clazz, eval);
GenericTestUtils.assertExceptionContains(contained, ex);
return ex;
}
/**
* Robust string converter for exception messages; if the {@code toString()}
* method throws an exception then that exception is caught and logged,
* then a simple string of the classname logged.
* This stops a {@code toString()} failure hiding underlying problems.
* @param o object to stringify
* @return a string for exception messages
*/
private static String robustToString(Object o) {
if (o == null) {
return NULL_RESULT;
} else {
try {
return o.toString();
} catch (Exception e) {
LOG.info("Exception calling toString()", e);
return o.getClass().toString();
}
}
}
/**
* Returns {@code TimeoutException} on a timeout. If
* there was a inner class passed in, includes it as the
* inner failure.
*/
public static class GenerateTimeout implements TimeoutHandler {
private final String message;
public GenerateTimeout(String message) {
this.message = message;
}
public GenerateTimeout() {
this("timeout");
}
/**
* Evaluate operation creates a new {@code TimeoutException}.
* @param timeoutMillis timeout in millis
* @param caught optional caught exception
* @return TimeoutException
*/
@Override
public Exception evaluate(int timeoutMillis, Exception caught)
throws Exception {
String s = String.format("%s: after %d millis", message,
timeoutMillis);
String caughtText = caught != null
? ("; " + robustToString(caught)) : "";
return (TimeoutException) (new TimeoutException(s + caughtText)
.initCause(caught));
}
}
/**
* Retry at a fixed time period between calls.
*/
public static class FixedRetryInterval implements Callable<Integer> {
private final int intervalMillis;
private int invocationCount = 0;
public FixedRetryInterval(int intervalMillis) {
Preconditions.checkArgument(intervalMillis > 0);
this.intervalMillis = intervalMillis;
}
@Override
public Integer call() throws Exception {
invocationCount++;
return intervalMillis;
}
public int getInvocationCount() {
return invocationCount;
}
@Override
public String toString() {
final StringBuilder sb = new StringBuilder(
"FixedRetryInterval{");
sb.append("interval=").append(intervalMillis);
sb.append(", invocationCount=").append(invocationCount);
sb.append('}');
return sb.toString();
}
}
/**
* Gradually increase the sleep time by the initial interval, until
* the limit set by {@code maxIntervalMillis} is reached.
*/
public static class ProportionalRetryInterval implements Callable<Integer> {
private final int intervalMillis;
private final int maxIntervalMillis;
private int current;
private int invocationCount = 0;
public ProportionalRetryInterval(int intervalMillis,
int maxIntervalMillis) {
Preconditions.checkArgument(intervalMillis > 0);
Preconditions.checkArgument(maxIntervalMillis > 0);
this.intervalMillis = intervalMillis;
this.current = intervalMillis;
this.maxIntervalMillis = maxIntervalMillis;
}
@Override
public Integer call() throws Exception {
invocationCount++;
int last = current;
if (last < maxIntervalMillis) {
current += intervalMillis;
}
return last;
}
public int getInvocationCount() {
return invocationCount;
}
@Override
public String toString() {
final StringBuilder sb = new StringBuilder(
"ProportionalRetryInterval{");
sb.append("interval=").append(intervalMillis);
sb.append(", current=").append(current);
sb.append(", limit=").append(maxIntervalMillis);
sb.append(", invocationCount=").append(invocationCount);
sb.append('}');
return sb.toString();
}
}
/**
* An exception which triggers a fast exist from the
* {@link #eventually(int, Callable, Callable)} and
* {@link #await(int, Callable, Callable, TimeoutHandler)} loops.
*/
public static class FailFastException extends Exception {
public FailFastException(String detailMessage) {
super(detailMessage);
}
public FailFastException(String message, Throwable cause) {
super(message, cause);
}
/**
* Instantiate from a format string.
* @param format format string
* @param args arguments to format
* @return an instance with the message string constructed.
*/
public static FailFastException newInstance(String format, Object...args) {
return new FailFastException(String.format(format, args));
}
}
}
| java | Apache-2.0 | e0c90f054687a18c4e095547ac5e31b8b313b3ef | 2026-01-05T02:41:11.405497Z | false |
Intel-bigdata/SSM | https://github.com/Intel-bigdata/SSM/blob/e0c90f054687a18c4e095547ac5e31b8b313b3ef/smart-hadoop-support/smart-erasurecodec/src/test/java/org/smartdata/erasurecode/test/HadoopTestBase.java | smart-hadoop-support/smart-erasurecodec/src/test/java/org/smartdata/erasurecode/test/HadoopTestBase.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.smartdata.erasurecode.test;
import org.junit.Rule;
import org.junit.rules.Timeout;
/**
* A base class for JUnit4 tests that sets a default timeout for all tests
* that subclass this test
*/
public abstract class HadoopTestBase {
/**
* System property name to set the test timeout: {@value}
*/
public static final String PROPERTY_TEST_DEFAULT_TIMEOUT =
"test.default.timeout";
/**
* The default timeout (in milliseconds) if the system property
* {@link #PROPERTY_TEST_DEFAULT_TIMEOUT}
* is not set: {@value}
*/
public static final int TEST_DEFAULT_TIMEOUT_VALUE = 100000;
/**
* The JUnit rule that sets the default timeout for tests
*/
@Rule
public Timeout defaultTimeout = retrieveTestTimeout();
/**
* Retrieve the test timeout from the system property
* {@link #PROPERTY_TEST_DEFAULT_TIMEOUT}, falling back to
* the value in {@link #TEST_DEFAULT_TIMEOUT_VALUE} if the
* property is not defined.
* @return the recommended timeout for tests
*/
public static Timeout retrieveTestTimeout() {
String propval = System.getProperty(PROPERTY_TEST_DEFAULT_TIMEOUT,
Integer.toString(
TEST_DEFAULT_TIMEOUT_VALUE));
int millis;
try {
millis = Integer.parseInt(propval);
} catch (NumberFormatException e) {
//fall back to the default value, as the property cannot be parsed
millis = TEST_DEFAULT_TIMEOUT_VALUE;
}
return new Timeout(millis);
}
}
| java | Apache-2.0 | e0c90f054687a18c4e095547ac5e31b8b313b3ef | 2026-01-05T02:41:11.405497Z | false |
Intel-bigdata/SSM | https://github.com/Intel-bigdata/SSM/blob/e0c90f054687a18c4e095547ac5e31b8b313b3ef/smart-hadoop-support/smart-erasurecodec/src/test/java/org/smartdata/erasurecode/test/MoreAsserts.java | smart-hadoop-support/smart-erasurecodec/src/test/java/org/smartdata/erasurecode/test/MoreAsserts.java | /**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.smartdata.erasurecode.test;
import org.junit.Assert;
import java.util.Iterator;
/**
* A few more asserts
*/
public class MoreAsserts {
/**
* Assert equivalence for array and iterable
* @param <T> the type of the elements
* @param s the name/message for the collection
* @param expected the expected array of elements
* @param actual the actual iterable of elements
*/
public static <T> void assertEquals(String s, T[] expected,
Iterable<T> actual) {
Iterator<T> it = actual.iterator();
int i = 0;
for (; i < expected.length && it.hasNext(); ++i) {
Assert.assertEquals("Element "+ i +" for "+ s, expected[i], it.next());
}
Assert.assertTrue("Expected more elements", i == expected.length);
Assert.assertTrue("Expected less elements", !it.hasNext());
}
/**
* Assert equality for two iterables
* @param <T> the type of the elements
* @param s
* @param expected
* @param actual
*/
public static <T> void assertEquals(String s, Iterable<T> expected,
Iterable<T> actual) {
Iterator<T> ite = expected.iterator();
Iterator<T> ita = actual.iterator();
int i = 0;
while (ite.hasNext() && ita.hasNext()) {
Assert.assertEquals("Element "+ i +" for "+s, ite.next(), ita.next());
}
Assert.assertTrue("Expected more elements", !ite.hasNext());
Assert.assertTrue("Expected less elements", !ita.hasNext());
}
}
| java | Apache-2.0 | e0c90f054687a18c4e095547ac5e31b8b313b3ef | 2026-01-05T02:41:11.405497Z | false |
Intel-bigdata/SSM | https://github.com/Intel-bigdata/SSM/blob/e0c90f054687a18c4e095547ac5e31b8b313b3ef/smart-hadoop-support/smart-erasurecodec/src/test/java/org/smartdata/erasurecode/test/MockitoUtil.java | smart-hadoop-support/smart-erasurecodec/src/test/java/org/smartdata/erasurecode/test/MockitoUtil.java | /**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.smartdata.erasurecode.test;
import org.mockito.Mockito;
import org.mockito.invocation.InvocationOnMock;
import org.mockito.stubbing.Answer;
import org.mockito.stubbing.Stubber;
import java.io.Closeable;
public abstract class MockitoUtil {
/**
* Return a mock object for an IPC protocol. This special
* method is necessary, since the IPC proxies have to implement
* Closeable in addition to their protocol interface.
* @param clazz the protocol class
*/
public static <T> T mockProtocol(Class<T> clazz) {
return Mockito.mock(clazz,
Mockito.withSettings().extraInterfaces(Closeable.class));
}
/**
* Throw an exception from the mock/spy only in the case that the
* call stack at the time the method has a line which matches the given
* pattern.
*
* @param t the Throwable to throw
* @param pattern the pattern against which to match the call stack trace
* @return the stub in progress
*/
public static Stubber doThrowWhenCallStackMatches(
final Throwable t, final String pattern) {
return Mockito.doAnswer(new Answer<Object>() {
@Override
public Object answer(InvocationOnMock invocation) throws Throwable {
t.setStackTrace(Thread.currentThread().getStackTrace());
for (StackTraceElement elem : t.getStackTrace()) {
if (elem.toString().matches(pattern)) {
throw t;
}
}
return invocation.callRealMethod();
}
});
}
}
| java | Apache-2.0 | e0c90f054687a18c4e095547ac5e31b8b313b3ef | 2026-01-05T02:41:11.405497Z | false |
Intel-bigdata/SSM | https://github.com/Intel-bigdata/SSM/blob/e0c90f054687a18c4e095547ac5e31b8b313b3ef/smart-hadoop-support/smart-erasurecodec/src/test/java/org/smartdata/erasurecode/test/UnitTestcaseTimeLimit.java | smart-hadoop-support/smart-erasurecodec/src/test/java/org/smartdata/erasurecode/test/UnitTestcaseTimeLimit.java | /**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.smartdata.erasurecode.test;
import org.junit.Rule;
import org.junit.rules.TestRule;
import org.junit.rules.Timeout;
/**
* Class for test units to extend in order that their individual tests will
* be timed out and fail automatically should they run more than 10 seconds.
* This provides an automatic regression check for tests that begin running
* longer than expected.
*/
public class UnitTestcaseTimeLimit {
public final int timeOutSecs = 10;
@Rule public TestRule globalTimeout = new Timeout(timeOutSecs * 1000);
}
| java | Apache-2.0 | e0c90f054687a18c4e095547ac5e31b8b313b3ef | 2026-01-05T02:41:11.405497Z | false |
Intel-bigdata/SSM | https://github.com/Intel-bigdata/SSM/blob/e0c90f054687a18c4e095547ac5e31b8b313b3ef/smart-hadoop-support/smart-erasurecodec/src/test/java/org/smartdata/erasurecode/test/MockitoMaker.java | smart-hadoop-support/smart-erasurecodec/src/test/java/org/smartdata/erasurecode/test/MockitoMaker.java | /**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.smartdata.erasurecode.test;
import static org.mockito.Mockito.mock;
import static org.mockito.Mockito.when;
/**
* Helper class to create one-liner stubs, so that instead of: <pre>
* SomeType someDescriptiveMock = mock(SomeType.class);
* when(someDescriptiveMock.someMethod()).thenReturn(someValue);</pre>
* <p>You can now do: <pre>
* SomeType someDescriptiveMock = make(stub(SomeType.class)
* .returning(someValue).from.someMethod());</pre>
*/
public class MockitoMaker {
/**
* Create a mock object from a mocked method call.
*
* @param <T> type of mocked object
* @param methodCall for mocked object
* @return mocked object
*/
@SuppressWarnings("unchecked")
public static <T> T make(Object methodCall) {
StubBuilder<T> sb = StubBuilder.current();
when(methodCall).thenReturn(sb.firstReturn, sb.laterReturns);
return (T) StubBuilder.current().from;
}
/**
* Create a stub builder of a mocked object.
*
* @param <T> type of the target object to be mocked
* @param target class of the target object to be mocked
* @return the stub builder of the mocked object
*/
public static <T> StubBuilder<T> stub(Class<T> target) {
return new StubBuilder<T>(mock(target));
}
/**
* Builder class for stubs
* @param <T> type of the object to be mocked
*/
public static class StubBuilder<T> {
/**
* The target mock object
*/
public final T from;
// We want to be able to use this even when the tests are run in parallel.
@SuppressWarnings("rawtypes")
private static final ThreadLocal<StubBuilder> tls =
new ThreadLocal<StubBuilder>() {
@Override protected StubBuilder initialValue() {
return new StubBuilder();
}
};
private Object firstReturn = null;
private Object[] laterReturns = {};
/**
* Default constructor for the initial stub builder
*/
public StubBuilder() {
this.from = null;
}
/**
* Construct a stub builder with a mock instance
*
* @param mockInstance the mock object
*/
public StubBuilder(T mockInstance) {
tls.set(this);
this.from = mockInstance;
}
/**
* Get the current stub builder from thread local
*
* @param <T>
* @return the stub builder of the mocked object
*/
@SuppressWarnings("unchecked")
public static <T> StubBuilder<T> current() {
return tls.get();
}
/**
* Set the return value for the current stub builder
*
* @param value the return value
* @return the stub builder
*/
public StubBuilder<T> returning(Object value) {
this.firstReturn = value;
return this;
}
/**
* Set the return values for the current stub builder
*
* @param value the first return value
* @param values the return values for later invocations
* @return the stub builder
*/
public StubBuilder<T> returning(Object value, Object... values) {
this.firstReturn = value;
this.laterReturns = values;
return this;
}
}
}
| java | Apache-2.0 | e0c90f054687a18c4e095547ac5e31b8b313b3ef | 2026-01-05T02:41:11.405497Z | false |
Intel-bigdata/SSM | https://github.com/Intel-bigdata/SSM/blob/e0c90f054687a18c4e095547ac5e31b8b313b3ef/smart-hadoop-support/smart-erasurecodec/src/test/java/org/smartdata/erasurecode/test/PlatformAssumptions.java | smart-hadoop-support/smart-erasurecodec/src/test/java/org/smartdata/erasurecode/test/PlatformAssumptions.java | /**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.smartdata.erasurecode.test;
import org.junit.internal.AssumptionViolatedException;
/**
* JUnit assumptions for the environment (OS).
*/
public final class PlatformAssumptions {
public static final String OS_NAME = System.getProperty("os.name");
public static final boolean WINDOWS = OS_NAME.startsWith("Windows");
private PlatformAssumptions() { }
public static void assumeNotWindows() {
assumeNotWindows("Expected Unix-like platform but got " + OS_NAME);
}
public static void assumeNotWindows(String message) {
if (WINDOWS) {
throw new AssumptionViolatedException(message);
}
}
public static void assumeWindows() {
if (!WINDOWS) {
throw new AssumptionViolatedException(
"Expected Windows platform but got " + OS_NAME);
}
}
}
| java | Apache-2.0 | e0c90f054687a18c4e095547ac5e31b8b313b3ef | 2026-01-05T02:41:11.405497Z | false |
Intel-bigdata/SSM | https://github.com/Intel-bigdata/SSM/blob/e0c90f054687a18c4e095547ac5e31b8b313b3ef/smart-hadoop-support/smart-erasurecodec/src/test/java/org/smartdata/erasurecode/test/MetricsAsserts.java | smart-hadoop-support/smart-erasurecodec/src/test/java/org/smartdata/erasurecode/test/MetricsAsserts.java | /**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.smartdata.erasurecode.test;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.metrics2.*;
import org.apache.hadoop.metrics2.lib.DefaultMetricsSystem;
import org.apache.hadoop.metrics2.lib.MutableQuantiles;
import org.apache.hadoop.metrics2.util.Quantile;
import org.hamcrest.Description;
import org.junit.Assert;
import org.mockito.ArgumentCaptor;
import org.mockito.ArgumentMatcher;
import org.mockito.internal.matchers.GreaterThan;
import org.mockito.invocation.InvocationOnMock;
import org.mockito.stubbing.Answer;
import static com.google.common.base.Preconditions.checkNotNull;
import static org.apache.hadoop.metrics2.lib.Interns.info;
import static org.mockito.AdditionalMatchers.geq;
import static org.mockito.Mockito.*;
/**
* Helpers for metrics source tests
*/
public class MetricsAsserts {
final static Log LOG = LogFactory.getLog(MetricsAsserts.class);
private static final double EPSILON = 0.00001;
public static MetricsSystem mockMetricsSystem() {
MetricsSystem ms = mock(MetricsSystem.class);
DefaultMetricsSystem.setInstance(ms);
return ms;
}
public static MetricsRecordBuilder mockMetricsRecordBuilder() {
final MetricsCollector mc = mock(MetricsCollector.class);
MetricsRecordBuilder rb = mock(MetricsRecordBuilder.class,
new Answer<Object>() {
@Override
public Object answer(InvocationOnMock invocation) {
Object[] args = invocation.getArguments();
StringBuilder sb = new StringBuilder();
for (Object o : args) {
if (sb.length() > 0) sb.append(", ");
sb.append(String.valueOf(o));
}
String methodName = invocation.getMethod().getName();
LOG.debug(methodName +": "+ sb);
return methodName.equals("parent") || methodName.equals("endRecord") ?
mc : invocation.getMock();
}
});
when(mc.addRecord(anyString())).thenReturn(rb);
when(mc.addRecord(anyInfo())).thenReturn(rb);
return rb;
}
/**
* Call getMetrics on source and get a record builder mock to verify
* @param source the metrics source
* @param all if true, return all metrics even if not changed
* @return the record builder mock to verifyÏ
*/
public static MetricsRecordBuilder getMetrics(MetricsSource source,
boolean all) {
MetricsRecordBuilder rb = mockMetricsRecordBuilder();
MetricsCollector mc = rb.parent();
source.getMetrics(mc, all);
return rb;
}
public static MetricsRecordBuilder getMetrics(String name) {
return getMetrics(DefaultMetricsSystem.instance().getSource(name));
}
public static MetricsRecordBuilder getMetrics(MetricsSource source) {
return getMetrics(source, true);
}
private static class InfoWithSameName extends ArgumentMatcher<MetricsInfo> {
private final String expected;
InfoWithSameName(MetricsInfo info) {
expected = checkNotNull(info.name(), "info name");
}
@Override public boolean matches(Object info) {
return expected.equals(((MetricsInfo)info).name());
}
@Override public void describeTo(Description desc) {
desc.appendText("Info with name="+ expected);
}
}
/**
* MetricInfo with the same name
* @param info to match
* @return <code>null</code>
*/
public static MetricsInfo eqName(MetricsInfo info) {
return argThat(new InfoWithSameName(info));
}
private static class AnyInfo extends ArgumentMatcher<MetricsInfo> {
@Override public boolean matches(Object info) {
return info instanceof MetricsInfo; // not null as well
}
}
public static MetricsInfo anyInfo() {
return argThat(new AnyInfo());
}
/**
* Assert an int gauge metric as expected
* @param name of the metric
* @param expected value of the metric
* @param rb the record builder mock used to getMetrics
*/
public static void assertGauge(String name, int expected,
MetricsRecordBuilder rb) {
Assert.assertEquals("Bad value for metric " + name,
expected, getIntGauge(name, rb));
}
public static int getIntGauge(String name, MetricsRecordBuilder rb) {
ArgumentCaptor<Integer> captor = ArgumentCaptor.forClass(Integer.class);
verify(rb, atLeast(0)).addGauge(eqName(info(name, "")), captor.capture());
checkCaptured(captor, name);
return captor.getValue();
}
/**
* Assert an int counter metric as expected
* @param name of the metric
* @param expected value of the metric
* @param rb the record builder mock used to getMetrics
*/
public static void assertCounter(String name, int expected,
MetricsRecordBuilder rb) {
Assert.assertEquals("Bad value for metric " + name,
expected, getIntCounter(name, rb));
}
public static int getIntCounter(String name, MetricsRecordBuilder rb) {
ArgumentCaptor<Integer> captor = ArgumentCaptor.forClass(
Integer.class);
verify(rb, atLeast(0)).addCounter(eqName(info(name, "")), captor.capture());
checkCaptured(captor, name);
return captor.getValue();
}
/**
* Assert a long gauge metric as expected
* @param name of the metric
* @param expected value of the metric
* @param rb the record builder mock used to getMetrics
*/
public static void assertGauge(String name, long expected,
MetricsRecordBuilder rb) {
Assert.assertEquals("Bad value for metric " + name,
expected, getLongGauge(name, rb));
}
public static long getLongGauge(String name, MetricsRecordBuilder rb) {
ArgumentCaptor<Long> captor = ArgumentCaptor.forClass(Long.class);
verify(rb, atLeast(0)).addGauge(eqName(info(name, "")), captor.capture());
checkCaptured(captor, name);
return captor.getValue();
}
/**
* Assert a double gauge metric as expected
* @param name of the metric
* @param expected value of the metric
* @param rb the record builder mock used to getMetrics
*/
public static void assertGauge(String name, double expected,
MetricsRecordBuilder rb) {
Assert.assertEquals("Bad value for metric " + name,
expected, getDoubleGauge(name, rb), EPSILON);
}
public static double getDoubleGauge(String name, MetricsRecordBuilder rb) {
ArgumentCaptor<Double> captor = ArgumentCaptor.forClass(Double.class);
verify(rb, atLeast(0)).addGauge(eqName(info(name, "")), captor.capture());
checkCaptured(captor, name);
return captor.getValue();
}
/**
* Assert a long counter metric as expected
* @param name of the metric
* @param expected value of the metric
* @param rb the record builder mock used to getMetrics
*/
public static void assertCounter(String name, long expected,
MetricsRecordBuilder rb) {
Assert.assertEquals("Bad value for metric " + name,
expected, getLongCounter(name, rb));
}
public static long getLongCounter(String name, MetricsRecordBuilder rb) {
ArgumentCaptor<Long> captor = ArgumentCaptor.forClass(Long.class);
verify(rb, atLeast(0)).addCounter(eqName(info(name, "")), captor.capture());
checkCaptured(captor, name);
return captor.getValue();
}
public static long getLongCounterWithoutCheck(String name,
MetricsRecordBuilder rb) {
ArgumentCaptor<Long> captor = ArgumentCaptor.forClass(Long.class);
verify(rb, atLeast(0)).addCounter(eqName(info(name, "")), captor.capture());
return captor.getValue();
}
public static String getStringMetric(String name, MetricsRecordBuilder rb) {
ArgumentCaptor<String> captor = ArgumentCaptor.forClass(String.class);
verify(rb, atLeast(0)).tag(eqName(info(name, "")), captor.capture());
checkCaptured(captor, name);
return captor.getValue();
}
/**
* Assert a float gauge metric as expected
* @param name of the metric
* @param expected value of the metric
* @param rb the record builder mock used to getMetrics
*/
public static void assertGauge(String name, float expected,
MetricsRecordBuilder rb) {
Assert.assertEquals("Bad value for metric " + name,
expected, getFloatGauge(name, rb), EPSILON);
}
public static float getFloatGauge(String name, MetricsRecordBuilder rb) {
ArgumentCaptor<Float> captor = ArgumentCaptor.forClass(Float.class);
verify(rb, atLeast(0)).addGauge(eqName(info(name, "")), captor.capture());
checkCaptured(captor, name);
return captor.getValue();
}
/**
* Check that this metric was captured exactly once.
*/
private static void checkCaptured(ArgumentCaptor<?> captor, String name) {
Assert.assertEquals("Expected exactly one metric for name " + name,
1, captor.getAllValues().size());
}
/**
* Assert an int gauge metric as expected
* @param name of the metric
* @param expected value of the metric
* @param source to get metrics from
*/
public static void assertGauge(String name, int expected,
MetricsSource source) {
assertGauge(name, expected, getMetrics(source));
}
/**
* Assert an int counter metric as expected
* @param name of the metric
* @param expected value of the metric
* @param source to get metrics from
*/
public static void assertCounter(String name, int expected,
MetricsSource source) {
assertCounter(name, expected, getMetrics(source));
}
/**
* Assert a long gauge metric as expected
* @param name of the metric
* @param expected value of the metric
* @param source to get metrics from
*/
public static void assertGauge(String name, long expected,
MetricsSource source) {
assertGauge(name, expected, getMetrics(source));
}
/**
* Assert a long counter metric as expected
* @param name of the metric
* @param expected value of the metric
* @param source to get metrics from
*/
public static void assertCounter(String name, long expected,
MetricsSource source) {
assertCounter(name, expected, getMetrics(source));
}
/**
* Assert that a long counter metric is greater than a value
* @param name of the metric
* @param greater value of the metric should be greater than this
* @param rb the record builder mock used to getMetrics
*/
public static void assertCounterGt(String name, long greater,
MetricsRecordBuilder rb) {
Assert.assertThat("Bad value for metric " + name, getLongCounter(name, rb),
new GreaterThan<Long>(greater));
}
/**
* Assert that a long counter metric is greater than a value
* @param name of the metric
* @param greater value of the metric should be greater than this
* @param source the metrics source
*/
public static void assertCounterGt(String name, long greater,
MetricsSource source) {
assertCounterGt(name, greater, getMetrics(source));
}
/**
* Assert that a double gauge metric is greater than a value
* @param name of the metric
* @param greater value of the metric should be greater than this
* @param rb the record builder mock used to getMetrics
*/
public static void assertGaugeGt(String name, double greater,
MetricsRecordBuilder rb) {
Assert.assertThat("Bad value for metric " + name, getDoubleGauge(name, rb),
new GreaterThan<Double>(greater));
}
/**
* Assert that a double gauge metric is greater than a value
* @param name of the metric
* @param greater value of the metric should be greater than this
* @param source the metrics source
*/
public static void assertGaugeGt(String name, double greater,
MetricsSource source) {
assertGaugeGt(name, greater, getMetrics(source));
}
/**
* Asserts that the NumOps and quantiles for a metric have been changed at
* some point to a non-zero value.
*
* @param prefix of the metric
* @param rb MetricsRecordBuilder with the metric
*/
public static void assertQuantileGauges(String prefix,
MetricsRecordBuilder rb) {
verify(rb).addGauge(eqName(info(prefix + "NumOps", "")), geq(0l));
for (Quantile q : MutableQuantiles.quantiles) {
String nameTemplate = prefix + "%dthPercentileLatency";
int percentile = (int) (100 * q.quantile);
verify(rb).addGauge(
eqName(info(String.format(nameTemplate, percentile), "")),
geq(0l));
}
}
}
| java | Apache-2.0 | e0c90f054687a18c4e095547ac5e31b8b313b3ef | 2026-01-05T02:41:11.405497Z | false |
Intel-bigdata/SSM | https://github.com/Intel-bigdata/SSM/blob/e0c90f054687a18c4e095547ac5e31b8b313b3ef/smart-hadoop-support/smart-erasurecodec/src/test/java/org/smartdata/erasurecode/test/TestMultithreadedTestUtil.java | smart-hadoop-support/smart-erasurecodec/src/test/java/org/smartdata/erasurecode/test/TestMultithreadedTestUtil.java | /**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.smartdata.erasurecode.test;
import org.apache.hadoop.util.Time;
import org.junit.Test;
import java.io.IOException;
import java.util.concurrent.atomic.AtomicInteger;
import static org.junit.Assert.*;
public class TestMultithreadedTestUtil {
private static final String FAIL_MSG =
"Inner thread fails an assert";
@Test
public void testNoErrors() throws Exception {
final AtomicInteger threadsRun = new AtomicInteger();
MultithreadedTestUtil.TestContext ctx = new MultithreadedTestUtil.TestContext();
for (int i = 0; i < 3; i++) {
ctx.addThread(new MultithreadedTestUtil.TestingThread(ctx) {
@Override
public void doWork() throws Exception {
threadsRun.incrementAndGet();
}
});
}
assertEquals(0, threadsRun.get());
ctx.startThreads();
long st = Time.now();
ctx.waitFor(30000);
long et = Time.now();
// All threads should have run
assertEquals(3, threadsRun.get());
// Test shouldn't have waited the full 30 seconds, since
// the threads exited faster than that.
assertTrue("Test took " + (et - st) + "ms",
et - st < 5000);
}
@Test
public void testThreadFails() throws Exception {
MultithreadedTestUtil.TestContext ctx = new MultithreadedTestUtil.TestContext();
ctx.addThread(new MultithreadedTestUtil.TestingThread(ctx) {
@Override
public void doWork() throws Exception {
fail(FAIL_MSG);
}
});
ctx.startThreads();
long st = Time.now();
try {
ctx.waitFor(30000);
fail("waitFor did not throw");
} catch (RuntimeException rte) {
// expected
assertEquals(FAIL_MSG, rte.getCause().getMessage());
}
long et = Time.now();
// Test shouldn't have waited the full 30 seconds, since
// the thread throws faster than that
assertTrue("Test took " + (et - st) + "ms",
et - st < 5000);
}
@Test
public void testThreadThrowsCheckedException() throws Exception {
MultithreadedTestUtil.TestContext ctx = new MultithreadedTestUtil.TestContext();
ctx.addThread(new MultithreadedTestUtil.TestingThread(ctx) {
@Override
public void doWork() throws Exception {
throw new IOException("my ioe");
}
});
ctx.startThreads();
long st = Time.now();
try {
ctx.waitFor(30000);
fail("waitFor did not throw");
} catch (RuntimeException rte) {
// expected
assertEquals("my ioe", rte.getCause().getMessage());
}
long et = Time.now();
// Test shouldn't have waited the full 30 seconds, since
// the thread throws faster than that
assertTrue("Test took " + (et - st) + "ms",
et - st < 5000);
}
@Test
public void testRepeatingThread() throws Exception {
final AtomicInteger counter = new AtomicInteger();
MultithreadedTestUtil.TestContext ctx = new MultithreadedTestUtil.TestContext();
ctx.addThread(new MultithreadedTestUtil.RepeatingTestThread(ctx) {
@Override
public void doAnAction() throws Exception {
counter.incrementAndGet();
}
});
ctx.startThreads();
long st = Time.now();
ctx.waitFor(3000);
ctx.stop();
long et = Time.now();
long elapsed = et - st;
// Test should have waited just about 3 seconds
assertTrue("Test took " + (et - st) + "ms",
Math.abs(elapsed - 3000) < 500);
// Counter should have been incremented lots of times in 3 full seconds
assertTrue("Counter value = " + counter.get(),
counter.get() > 1000);
}
}
| java | Apache-2.0 | e0c90f054687a18c4e095547ac5e31b8b313b3ef | 2026-01-05T02:41:11.405497Z | false |
Intel-bigdata/SSM | https://github.com/Intel-bigdata/SSM/blob/e0c90f054687a18c4e095547ac5e31b8b313b3ef/smart-hadoop-support/smart-erasurecodec/src/test/java/org/smartdata/erasurecode/test/TestTimedOutTestsListener.java | smart-hadoop-support/smart-erasurecodec/src/test/java/org/smartdata/erasurecode/test/TestTimedOutTestsListener.java | /**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.smartdata.erasurecode.test;
import org.junit.Assert;
import org.junit.Test;
import org.junit.runner.notification.Failure;
import java.io.PrintWriter;
import java.io.StringWriter;
import java.util.concurrent.CyclicBarrier;
import java.util.concurrent.locks.Lock;
import java.util.concurrent.locks.ReentrantLock;
public class TestTimedOutTestsListener {
public static class Deadlock {
private CyclicBarrier barrier = new CyclicBarrier(6);
public Deadlock() {
DeadlockThread[] dThreads = new DeadlockThread[6];
Monitor a = new Monitor("a");
Monitor b = new Monitor("b");
Monitor c = new Monitor("c");
dThreads[0] = new DeadlockThread("MThread-1", a, b);
dThreads[1] = new DeadlockThread("MThread-2", b, c);
dThreads[2] = new DeadlockThread("MThread-3", c, a);
Lock d = new ReentrantLock();
Lock e = new ReentrantLock();
Lock f = new ReentrantLock();
dThreads[3] = new DeadlockThread("SThread-4", d, e);
dThreads[4] = new DeadlockThread("SThread-5", e, f);
dThreads[5] = new DeadlockThread("SThread-6", f, d);
// make them daemon threads so that the test will exit
for (int i = 0; i < 6; i++) {
dThreads[i].setDaemon(true);
dThreads[i].start();
}
}
class DeadlockThread extends Thread {
private Lock lock1 = null;
private Lock lock2 = null;
private Monitor mon1 = null;
private Monitor mon2 = null;
private boolean useSync;
DeadlockThread(String name, Lock lock1, Lock lock2) {
super(name);
this.lock1 = lock1;
this.lock2 = lock2;
this.useSync = true;
}
DeadlockThread(String name, Monitor mon1, Monitor mon2) {
super(name);
this.mon1 = mon1;
this.mon2 = mon2;
this.useSync = false;
}
public void run() {
if (useSync) {
syncLock();
} else {
monitorLock();
}
}
private void syncLock() {
lock1.lock();
try {
try {
barrier.await();
} catch (Exception e) {
}
goSyncDeadlock();
} finally {
lock1.unlock();
}
}
private void goSyncDeadlock() {
try {
barrier.await();
} catch (Exception e) {
}
lock2.lock();
throw new RuntimeException("should not reach here.");
}
private void monitorLock() {
synchronized (mon1) {
try {
barrier.await();
} catch (Exception e) {
}
goMonitorDeadlock();
}
}
private void goMonitorDeadlock() {
try {
barrier.await();
} catch (Exception e) {
}
synchronized (mon2) {
throw new RuntimeException(getName() + " should not reach here.");
}
}
}
class Monitor {
String name;
Monitor(String name) {
this.name = name;
}
}
}
@Test(timeout=30000)
public void testThreadDumpAndDeadlocks() throws Exception {
new Deadlock();
String s = null;
while (true) {
s = TimedOutTestsListener.buildDeadlockInfo();
if (s != null)
break;
Thread.sleep(100);
}
Assert.assertEquals(3, countStringOccurrences(s, "BLOCKED"));
Failure failure = new Failure(
null, new Exception(TimedOutTestsListener.TEST_TIMED_OUT_PREFIX));
StringWriter writer = new StringWriter();
new TimedOutTestsListener(new PrintWriter(writer)).testFailure(failure);
String out = writer.toString();
Assert.assertTrue(out.contains("THREAD DUMP"));
Assert.assertTrue(out.contains("DEADLOCKS DETECTED"));
System.out.println(out);
}
private int countStringOccurrences(String s, String substr) {
int n = 0;
int index = 0;
while ((index = s.indexOf(substr, index) + 1) != 0) {
n++;
}
return n;
}
}
| java | Apache-2.0 | e0c90f054687a18c4e095547ac5e31b8b313b3ef | 2026-01-05T02:41:11.405497Z | false |
Intel-bigdata/SSM | https://github.com/Intel-bigdata/SSM/blob/e0c90f054687a18c4e095547ac5e31b8b313b3ef/smart-hadoop-support/smart-erasurecodec/src/test/java/org/smartdata/erasurecode/test/CoreTestDriver.java | smart-hadoop-support/smart-erasurecodec/src/test/java/org/smartdata/erasurecode/test/CoreTestDriver.java | /**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.smartdata.erasurecode.test;
import org.apache.hadoop.ipc.TestIPC;
import org.apache.hadoop.ipc.TestRPC;
import org.apache.hadoop.util.ProgramDriver;
import org.smartdata.erasurecode.TestArrayFile;
import org.smartdata.erasurecode.TestSetFile;
/**
* Driver for core tests.
*/
public class CoreTestDriver {
private ProgramDriver pgd;
public CoreTestDriver() {
this(new ProgramDriver());
}
public CoreTestDriver(ProgramDriver pgd) {
this.pgd = pgd;
try {
pgd.addClass("testsetfile", TestSetFile.class,
"A test for flat files of binary key/value pairs.");
pgd.addClass("testarrayfile", TestArrayFile.class,
"A test for flat files of binary key/value pairs.");
pgd.addClass("testrpc", TestRPC.class, "A test for rpc.");
pgd.addClass("testipc", TestIPC.class, "A test for ipc.");
} catch(Throwable e) {
e.printStackTrace();
}
}
public void run(String argv[]) {
int exitCode = -1;
try {
exitCode = pgd.run(argv);
} catch(Throwable e) {
e.printStackTrace();
}
System.exit(exitCode);
}
public static void main(String argv[]){
new CoreTestDriver().run(argv);
}
}
| java | Apache-2.0 | e0c90f054687a18c4e095547ac5e31b8b313b3ef | 2026-01-05T02:41:11.405497Z | false |
Intel-bigdata/SSM | https://github.com/Intel-bigdata/SSM/blob/e0c90f054687a18c4e095547ac5e31b8b313b3ef/smart-hadoop-support/smart-erasurecodec/src/test/java/org/smartdata/erasurecode/test/GenericTestUtils.java | smart-hadoop-support/smart-erasurecodec/src/test/java/org/smartdata/erasurecode/test/GenericTestUtils.java | /**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.smartdata.erasurecode.test;
import com.google.common.base.Joiner;
import com.google.common.base.Supplier;
import com.google.common.collect.Sets;
import org.apache.commons.io.IOUtils;
import org.apache.commons.lang.RandomStringUtils;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.impl.Log4JLogger;
import org.apache.hadoop.fs.FileUtil;
import org.apache.hadoop.util.StringUtils;
import org.apache.hadoop.util.Time;
import org.apache.log4j.*;
import org.junit.Assert;
import org.junit.Assume;
import org.mockito.invocation.InvocationOnMock;
import org.mockito.stubbing.Answer;
import java.io.*;
import java.lang.management.ManagementFactory;
import java.lang.management.ThreadInfo;
import java.lang.management.ThreadMXBean;
import java.lang.reflect.InvocationTargetException;
import java.util.Arrays;
import java.util.Random;
import java.util.Set;
import java.util.concurrent.CountDownLatch;
import java.util.concurrent.TimeoutException;
import java.util.concurrent.atomic.AtomicInteger;
import java.util.regex.Pattern;
/**
* Test provides some very generic helpers which might be used across the tests
*/
public abstract class GenericTestUtils {
private static final AtomicInteger sequence = new AtomicInteger();
/**
* system property for test data: {@value}
*/
public static final String SYSPROP_TEST_DATA_DIR = "test.build.data";
/**
* Default path for test data: {@value}
*/
public static final String DEFAULT_TEST_DATA_DIR =
"target" + File.separator + "org/apache/hadoop/test" + File.separator + "data";
/**
* The default path for using in Hadoop path references: {@value}
*/
public static final String DEFAULT_TEST_DATA_PATH = "target/test/data/";
@SuppressWarnings("unchecked")
public static void disableLog(Log log) {
// We expect that commons-logging is a wrapper around Log4j.
disableLog((Log4JLogger) log);
}
public static Logger toLog4j(org.slf4j.Logger logger) {
return LogManager.getLogger(logger.getName());
}
public static void disableLog(Log4JLogger log) {
log.getLogger().setLevel(Level.OFF);
}
public static void disableLog(Logger logger) {
logger.setLevel(Level.OFF);
}
public static void disableLog(org.slf4j.Logger logger) {
disableLog(toLog4j(logger));
}
@SuppressWarnings("unchecked")
public static void setLogLevel(Log log, Level level) {
// We expect that commons-logging is a wrapper around Log4j.
setLogLevel((Log4JLogger) log, level);
}
public static void setLogLevel(Log4JLogger log, Level level) {
log.getLogger().setLevel(level);
}
public static void setLogLevel(Logger logger, Level level) {
logger.setLevel(level);
}
public static void setLogLevel(org.slf4j.Logger logger, Level level) {
setLogLevel(toLog4j(logger), level);
}
/**
* Extracts the name of the method where the invocation has happened
* @return String name of the invoking method
*/
public static String getMethodName() {
return Thread.currentThread().getStackTrace()[2].getMethodName();
}
/**
* Generates a process-wide unique sequence number.
* @return an unique sequence number
*/
public static int uniqueSequenceId() {
return sequence.incrementAndGet();
}
/**
* Get the (created) base directory for tests.
* @return the absolute directory
*/
public static File getTestDir() {
String prop = System.getProperty(SYSPROP_TEST_DATA_DIR, DEFAULT_TEST_DATA_DIR);
if (prop.isEmpty()) {
// corner case: property is there but empty
prop = DEFAULT_TEST_DATA_DIR;
}
File dir = new File(prop).getAbsoluteFile();
dir.mkdirs();
assertExists(dir);
return dir;
}
/**
* Get an uncreated directory for tests.
* @return the absolute directory for tests. Caller is expected to create it.
*/
public static File getTestDir(String subdir) {
return new File(getTestDir(), subdir).getAbsoluteFile();
}
/**
* Get an uncreated directory for tests with a randomized alphanumeric
* name. This is likely to provide a unique path for tests run in parallel
* @return the absolute directory for tests. Caller is expected to create it.
*/
public static File getRandomizedTestDir() {
return new File(getRandomizedTempPath()).getAbsoluteFile();
}
/**
* Get a temp path. This may or may not be relative; it depends on what the
* {@link #SYSPROP_TEST_DATA_DIR} is set to. If unset, it returns a path
* under the relative path {@link #DEFAULT_TEST_DATA_PATH}
* @param subpath sub path, with no leading "/" character
* @return a string to use in paths
*/
public static String getTempPath(String subpath) {
String prop = System.getProperty(SYSPROP_TEST_DATA_DIR, DEFAULT_TEST_DATA_PATH);
if (prop.isEmpty()) {
// corner case: property is there but empty
prop = DEFAULT_TEST_DATA_PATH;
}
if (!prop.endsWith("/")) {
prop = prop + "/";
}
return prop + subpath;
}
/**
* Get a temp path. This may or may not be relative; it depends on what the
* {@link #SYSPROP_TEST_DATA_DIR} is set to. If unset, it returns a path
* under the relative path {@link #DEFAULT_TEST_DATA_PATH}
* @param subpath sub path, with no leading "/" character
* @return a string to use in paths
*/
public static String getRandomizedTempPath() {
return getTempPath(RandomStringUtils.randomAlphanumeric(10));
}
/**
* Assert that a given file exists.
*/
public static void assertExists(File f) {
Assert.assertTrue("File " + f + " should exist", f.exists());
}
/**
* List all of the files in 'dir' that match the regex 'pattern'.
* Then check that this list is identical to 'expectedMatches'.
* @throws IOException if the dir is inaccessible
*/
public static void assertGlobEquals(File dir, String pattern,
String ... expectedMatches) throws IOException {
Set<String> found = Sets.newTreeSet();
for (File f : FileUtil.listFiles(dir)) {
if (f.getName().matches(pattern)) {
found.add(f.getName());
}
}
Set<String> expectedSet = Sets.newTreeSet(
Arrays.asList(expectedMatches));
Assert.assertEquals("Bad files matching " + pattern + " in " + dir,
Joiner.on(",").join(expectedSet),
Joiner.on(",").join(found));
}
static final String E_NULL_THROWABLE = "Null Throwable";
static final String E_NULL_THROWABLE_STRING =
"Null Throwable.toString() value";
static final String E_UNEXPECTED_EXCEPTION = "but got unexpected exception";
/**
* Assert that an exception's <code>toString()</code> value
* contained the expected text.
* @param string expected string
* @param t thrown exception
* @throws AssertionError if the expected string is not found
*/
public static void assertExceptionContains(String string, Throwable t) {
Assert.assertNotNull(E_NULL_THROWABLE, t);
String msg = t.toString();
if (msg == null) {
throw new AssertionError(E_NULL_THROWABLE_STRING, t);
}
if (!msg.contains(string)) {
throw new AssertionError("Expected to find '" + string + "' "
+ E_UNEXPECTED_EXCEPTION + ":"
+ StringUtils.stringifyException(t),
t);
}
}
public static void waitFor(Supplier<Boolean> check,
int checkEveryMillis, int waitForMillis)
throws TimeoutException, InterruptedException
{
long st = Time.now();
do {
boolean result = check.get();
if (result) {
return;
}
Thread.sleep(checkEveryMillis);
} while (Time.now() - st < waitForMillis);
throw new TimeoutException("Timed out waiting for condition. " +
"Thread diagnostics:\n" +
TimedOutTestsListener.buildThreadDiagnosticString());
}
/**
* Prints output to one {@link PrintStream} while copying to the other.
* <p>
* Closing the main {@link PrintStream} will NOT close the other.
*/
public static class TeePrintStream extends PrintStream {
private final PrintStream other;
public TeePrintStream(OutputStream main, PrintStream other) {
super(main);
this.other = other;
}
@Override
public void flush() {
super.flush();
other.flush();
}
@Override
public void write(byte[] buf, int off, int len) {
super.write(buf, off, len);
other.write(buf, off, len);
}
}
/**
* Capture output printed to {@link System#err}.
* <p>
* Usage:
* <pre>
* try (SystemErrCapturer capture = new SystemErrCapturer()) {
* ...
* // Call capture.getOutput() to get the output string
* }
* </pre>
*
* TODO: Add lambda support once Java 8 is common.
* <pre>
* SystemErrCapturer.withCapture(capture -> {
* ...
* })
* </pre>
*/
public static class SystemErrCapturer implements AutoCloseable {
final private ByteArrayOutputStream bytes;
final private PrintStream bytesPrintStream;
final private PrintStream oldErr;
public SystemErrCapturer() {
bytes = new ByteArrayOutputStream();
bytesPrintStream = new PrintStream(bytes);
oldErr = System.err;
System.setErr(new TeePrintStream(oldErr, bytesPrintStream));
}
public String getOutput() {
return bytes.toString();
}
@Override
public void close() throws Exception {
IOUtils.closeQuietly(bytesPrintStream);
System.setErr(oldErr);
}
}
public static class LogCapturer {
private StringWriter sw = new StringWriter();
private WriterAppender appender;
private Logger logger;
public static LogCapturer captureLogs(Log l) {
Logger logger = ((Log4JLogger)l).getLogger();
return new LogCapturer(logger);
}
public static LogCapturer captureLogs(org.slf4j.Logger logger) {
return new LogCapturer(toLog4j(logger));
}
private LogCapturer(Logger logger) {
this.logger = logger;
Appender defaultAppender = Logger.getRootLogger().getAppender("stdout");
if (defaultAppender == null) {
defaultAppender = Logger.getRootLogger().getAppender("console");
}
final Layout layout = (defaultAppender == null) ? new PatternLayout() :
defaultAppender.getLayout();
this.appender = new WriterAppender(layout, sw);
logger.addAppender(this.appender);
}
public String getOutput() {
return sw.toString();
}
public void stopCapturing() {
logger.removeAppender(appender);
}
public void clearOutput() {
sw.getBuffer().setLength(0);
}
}
/**
* Mockito answer helper that triggers one latch as soon as the
* method is called, then waits on another before continuing.
*/
public static class DelayAnswer implements Answer<Object> {
private final Log LOG;
private final CountDownLatch fireLatch = new CountDownLatch(1);
private final CountDownLatch waitLatch = new CountDownLatch(1);
private final CountDownLatch resultLatch = new CountDownLatch(1);
private final AtomicInteger fireCounter = new AtomicInteger(0);
private final AtomicInteger resultCounter = new AtomicInteger(0);
// Result fields set after proceed() is called.
private volatile Throwable thrown;
private volatile Object returnValue;
public DelayAnswer(Log log) {
this.LOG = log;
}
/**
* Wait until the method is called.
*/
public void waitForCall() throws InterruptedException {
fireLatch.await();
}
/**
* Tell the method to proceed.
* This should only be called after waitForCall()
*/
public void proceed() {
waitLatch.countDown();
}
@Override
public Object answer(InvocationOnMock invocation) throws Throwable {
LOG.info("DelayAnswer firing fireLatch");
fireCounter.getAndIncrement();
fireLatch.countDown();
try {
LOG.info("DelayAnswer waiting on waitLatch");
waitLatch.await();
LOG.info("DelayAnswer delay complete");
} catch (InterruptedException ie) {
throw new IOException("Interrupted waiting on latch", ie);
}
return passThrough(invocation);
}
protected Object passThrough(InvocationOnMock invocation) throws Throwable {
try {
Object ret = invocation.callRealMethod();
returnValue = ret;
return ret;
} catch (Throwable t) {
thrown = t;
throw t;
} finally {
resultCounter.incrementAndGet();
resultLatch.countDown();
}
}
/**
* After calling proceed(), this will wait until the call has
* completed and a result has been returned to the caller.
*/
public void waitForResult() throws InterruptedException {
resultLatch.await();
}
/**
* After the call has gone through, return any exception that
* was thrown, or null if no exception was thrown.
*/
public Throwable getThrown() {
return thrown;
}
/**
* After the call has gone through, return the call's return value,
* or null in case it was void or an exception was thrown.
*/
public Object getReturnValue() {
return returnValue;
}
public int getFireCount() {
return fireCounter.get();
}
public int getResultCount() {
return resultCounter.get();
}
}
/**
* An Answer implementation that simply forwards all calls through
* to a delegate.
*
* This is useful as the default Answer for a mock object, to create
* something like a spy on an RPC proxy. For example:
* <code>
* NamenodeProtocol origNNProxy = secondary.getNameNode();
* NamenodeProtocol spyNNProxy = Mockito.mock(NameNodeProtocol.class,
* new DelegateAnswer(origNNProxy);
* doThrow(...).when(spyNNProxy).getBlockLocations(...);
* ...
* </code>
*/
public static class DelegateAnswer implements Answer<Object> {
private final Object delegate;
private final Log log;
public DelegateAnswer(Object delegate) {
this(null, delegate);
}
public DelegateAnswer(Log log, Object delegate) {
this.log = log;
this.delegate = delegate;
}
@Override
public Object answer(InvocationOnMock invocation) throws Throwable {
try {
if (log != null) {
log.info("Call to " + invocation + " on " + delegate,
new Exception("TRACE"));
}
return invocation.getMethod().invoke(
delegate, invocation.getArguments());
} catch (InvocationTargetException ite) {
throw ite.getCause();
}
}
}
/**
* An Answer implementation which sleeps for a random number of milliseconds
* between 0 and a configurable value before delegating to the real
* implementation of the method. This can be useful for drawing out race
* conditions.
*/
public static class SleepAnswer implements Answer<Object> {
private final int maxSleepTime;
private static Random r = new Random();
public SleepAnswer(int maxSleepTime) {
this.maxSleepTime = maxSleepTime;
}
@Override
public Object answer(InvocationOnMock invocation) throws Throwable {
boolean interrupted = false;
try {
Thread.sleep(r.nextInt(maxSleepTime));
} catch (InterruptedException ie) {
interrupted = true;
}
try {
return invocation.callRealMethod();
} finally {
if (interrupted) {
Thread.currentThread().interrupt();
}
}
}
}
public static void assertDoesNotMatch(String output, String pattern) {
Assert.assertFalse("Expected output to match /" + pattern + "/" +
" but got:\n" + output,
Pattern.compile(pattern).matcher(output).find());
}
public static void assertMatches(String output, String pattern) {
Assert.assertTrue("Expected output to match /" + pattern + "/" +
" but got:\n" + output,
Pattern.compile(pattern).matcher(output).find());
}
public static void assertValueNear(long expected, long actual, long allowedError) {
assertValueWithinRange(expected - allowedError, expected + allowedError, actual);
}
public static void assertValueWithinRange(long expectedMin, long expectedMax,
long actual) {
Assert.assertTrue("Expected " + actual + " to be in range (" + expectedMin + ","
+ expectedMax + ")", expectedMin <= actual && actual <= expectedMax);
}
/**
* Determine if there are any threads whose name matches the regex.
* @param pattern a Pattern object used to match thread names
* @return true if there is any thread that matches the pattern
*/
public static boolean anyThreadMatching(Pattern pattern) {
ThreadMXBean threadBean = ManagementFactory.getThreadMXBean();
ThreadInfo[] infos =
threadBean.getThreadInfo(threadBean.getAllThreadIds(), 20);
for (ThreadInfo info : infos) {
if (info == null)
continue;
if (pattern.matcher(info.getThreadName()).matches()) {
return true;
}
}
return false;
}
/**
* Assert that there are no threads running whose name matches the
* given regular expression.
* @param regex the regex to match against
*/
public static void assertNoThreadsMatching(String regex) {
Pattern pattern = Pattern.compile(regex);
if (anyThreadMatching(pattern)) {
Assert.fail("Leaked thread matches " + regex);
}
}
/**
* Periodically check and wait for any threads whose name match the
* given regular expression.
*
* @param regex the regex to match against.
* @param checkEveryMillis time (in milliseconds) between checks.
* @param waitForMillis total time (in milliseconds) to wait before throwing
* a time out exception.
* @throws TimeoutException
* @throws InterruptedException
*/
public static void waitForThreadTermination(String regex,
int checkEveryMillis, final int waitForMillis) throws TimeoutException,
InterruptedException {
final Pattern pattern = Pattern.compile(regex);
waitFor(new Supplier<Boolean>() {
@Override public Boolean get() {
return !anyThreadMatching(pattern);
}
}, checkEveryMillis, waitForMillis);
}
/**
* Skip test if native build profile of Maven is not activated.
* Sub-project using this must set 'runningWithNative' property to true
* in the definition of native profile in pom.xml.
*/
public static void assumeInNativeProfile() {
Assume.assumeTrue(
Boolean.parseBoolean(System.getProperty("runningWithNative", "false")));
}
/**
* Get the diff between two files.
*
* @param a
* @param b
* @return The empty string if there is no diff; the diff, otherwise.
*
* @throws IOException If there is an error reading either file.
*/
public static String getFilesDiff(File a, File b) throws IOException {
StringBuilder bld = new StringBuilder();
BufferedReader ra = null, rb = null;
try {
ra = new BufferedReader(
new InputStreamReader(new FileInputStream(a)));
rb = new BufferedReader(
new InputStreamReader(new FileInputStream(b)));
while (true) {
String la = ra.readLine();
String lb = rb.readLine();
if (la == null) {
if (lb != null) {
addPlusses(bld, ra);
}
break;
} else if (lb == null) {
if (la != null) {
addPlusses(bld, rb);
}
break;
}
if (!la.equals(lb)) {
bld.append(" - ").append(la).append("\n");
bld.append(" + ").append(lb).append("\n");
}
}
} finally {
IOUtils.closeQuietly(ra);
IOUtils.closeQuietly(rb);
}
return bld.toString();
}
private static void addPlusses(StringBuilder bld, BufferedReader r)
throws IOException {
String l;
while ((l = r.readLine()) != null) {
bld.append(" + ").append(l).append("\n");
}
}
}
| java | Apache-2.0 | e0c90f054687a18c4e095547ac5e31b8b313b3ef | 2026-01-05T02:41:11.405497Z | false |
Intel-bigdata/SSM | https://github.com/Intel-bigdata/SSM/blob/e0c90f054687a18c4e095547ac5e31b8b313b3ef/smart-hadoop-support/smart-erasurecodec/src/test/java/org/smartdata/erasurecode/test/TestGenericTestUtils.java | smart-hadoop-support/smart-erasurecodec/src/test/java/org/smartdata/erasurecode/test/TestGenericTestUtils.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.smartdata.erasurecode.test;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.junit.Test;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import static org.junit.Assert.assertTrue;
public class TestGenericTestUtils extends GenericTestUtils {
@Test
public void testAssertExceptionContainsNullEx() throws Throwable {
try {
assertExceptionContains("", null);
} catch (AssertionError e) {
if (!e.toString().contains(E_NULL_THROWABLE)) {
throw e;
}
}
}
@Test
public void testAssertExceptionContainsNullString() throws Throwable {
try {
assertExceptionContains("", new BrokenException());
} catch (AssertionError e) {
if (!e.toString().contains(E_NULL_THROWABLE_STRING)) {
throw e;
}
}
}
@Test
public void testAssertExceptionContainsWrongText() throws Throwable {
try {
assertExceptionContains("Expected", new Exception("(actual)"));
} catch (AssertionError e) {
String s = e.toString();
if (!s.contains(E_UNEXPECTED_EXCEPTION)
|| !s.contains("(actual)") ) {
throw e;
}
if (e.getCause() == null) {
throw new AssertionError("No nested cause in assertion", e);
}
}
}
@Test
public void testAssertExceptionContainsWorking() throws Throwable {
assertExceptionContains("Expected", new Exception("Expected"));
}
private static class BrokenException extends Exception {
public BrokenException() {
}
@Override
public String toString() {
return null;
}
}
@Test(timeout = 10000)
public void testLogCapturer() {
final Log log = LogFactory.getLog(TestGenericTestUtils.class);
LogCapturer logCapturer = LogCapturer.captureLogs(log);
final String infoMessage = "info message";
// test get output message
log.info(infoMessage);
assertTrue(logCapturer.getOutput().endsWith(
String.format(infoMessage + "%n")));
// test clear output
logCapturer.clearOutput();
assertTrue(logCapturer.getOutput().isEmpty());
// test stop capturing
logCapturer.stopCapturing();
log.info(infoMessage);
assertTrue(logCapturer.getOutput().isEmpty());
}
@Test(timeout = 10000)
public void testLogCapturerSlf4jLogger() {
final Logger logger = LoggerFactory.getLogger(TestGenericTestUtils.class);
LogCapturer logCapturer = LogCapturer.captureLogs(logger);
final String infoMessage = "info message";
// test get output message
logger.info(infoMessage);
assertTrue(logCapturer.getOutput().endsWith(
String.format(infoMessage + "%n")));
// test clear output
logCapturer.clearOutput();
assertTrue(logCapturer.getOutput().isEmpty());
// test stop capturing
logCapturer.stopCapturing();
logger.info(infoMessage);
assertTrue(logCapturer.getOutput().isEmpty());
}
}
| java | Apache-2.0 | e0c90f054687a18c4e095547ac5e31b8b313b3ef | 2026-01-05T02:41:11.405497Z | false |
Intel-bigdata/SSM | https://github.com/Intel-bigdata/SSM/blob/e0c90f054687a18c4e095547ac5e31b8b313b3ef/smart-hadoop-support/smart-erasurecodec/src/test/java/org/smartdata/erasurecode/test/MultithreadedTestUtil.java | smart-hadoop-support/smart-erasurecodec/src/test/java/org/smartdata/erasurecode/test/MultithreadedTestUtil.java | /**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.smartdata.erasurecode.test;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.util.Time;
import java.util.HashSet;
import java.util.Set;
/**
* A utility to easily test threaded/synchronized code.
* Utility works by letting you add threads that do some work to a
* test context object, and then lets you kick them all off to stress test
* your parallel code.
*
* Also propagates thread exceptions back to the runner, to let you verify.
*
* An example:
*
* <code>
* final AtomicInteger threadsRun = new AtomicInteger();
*
* TestContext ctx = new TestContext();
* // Add 3 threads to test.
* for (int i = 0; i < 3; i++) {
* ctx.addThread(new TestingThread(ctx) {
* @Override
* public void doWork() throws Exception {
* threadsRun.incrementAndGet();
* }
* });
* }
* ctx.startThreads();
* // Set a timeout period for threads to complete.
* ctx.waitFor(30000);
* assertEquals(3, threadsRun.get());
* </code>
*
* For repetitive actions, use the {@link MultithreadedTestUtil.RepeatingThread}
* instead.
*
* (More examples can be found in {@link TestMultithreadedTestUtil})
*/
public abstract class MultithreadedTestUtil {
public static final Log LOG =
LogFactory.getLog(MultithreadedTestUtil.class);
/**
* TestContext is used to setup the multithreaded test runner.
* It lets you add threads, run them, wait upon or stop them.
*/
public static class TestContext {
private Throwable err = null;
private boolean stopped = false;
private Set<TestingThread> testThreads = new HashSet<TestingThread>();
private Set<TestingThread> finishedThreads = new HashSet<TestingThread>();
/**
* Check if the context can run threads.
* Can't if its been stopped and contains an error.
* @return true if it can run, false if it can't.
*/
public synchronized boolean shouldRun() {
return !stopped && err == null;
}
/**
* Add a thread to the context for running.
* Threads can be of type {@link MultithreadedTestUtil.TestingThread}
* or {@link MultithreadedTestUtil.RepeatingTestThread}
* or other custom derivatives of the former.
* @param t the thread to add for running.
*/
public void addThread(TestingThread t) {
testThreads.add(t);
}
/**
* Starts all test threads that have been added so far.
*/
public void startThreads() {
for (TestingThread t : testThreads) {
t.start();
}
}
/**
* Waits for threads to finish or error out.
* @param millis the number of milliseconds to wait
* for threads to complete.
* @throws Exception if one or more of the threads
* have thrown up an error.
*/
public synchronized void waitFor(long millis) throws Exception {
long endTime = Time.now() + millis;
while (shouldRun() &&
finishedThreads.size() < testThreads.size()) {
long left = endTime - Time.now();
if (left <= 0) break;
checkException();
wait(left);
}
checkException();
}
/**
* Checks for thread exceptions, and if they've occurred
* throws them as RuntimeExceptions in a deferred manner.
*/
public synchronized void checkException() throws Exception {
if (err != null) {
throw new RuntimeException("Deferred", err);
}
}
/**
* Called by {@link MultithreadedTestUtil.TestingThread}s to signal
* a failed thread.
* @param t the thread that failed.
*/
public synchronized void threadFailed(Throwable t) {
if (err == null) err = t;
LOG.error("Failed!", err);
notify();
}
/**
* Called by {@link MultithreadedTestUtil.TestingThread}s to signal
* a successful completion.
* @param t the thread that finished.
*/
public synchronized void threadDone(TestingThread t) {
finishedThreads.add(t);
notify();
}
/**
* Returns after stopping all threads by joining them back.
* @throws Exception in case a thread terminated with a failure.
*/
public void stop() throws Exception {
synchronized (this) {
stopped = true;
}
for (TestingThread t : testThreads) {
t.join();
}
checkException();
}
public Iterable<? extends Thread> getTestThreads() {
return testThreads;
}
}
/**
* A thread that can be added to a test context, and properly
* passes exceptions through.
*/
public static abstract class TestingThread extends Thread {
protected final TestContext ctx;
protected boolean stopped;
public TestingThread(TestContext ctx) {
this.ctx = ctx;
}
@Override
public void run() {
try {
doWork();
} catch (Throwable t) {
ctx.threadFailed(t);
}
ctx.threadDone(this);
}
/**
* User method to add any code to test thread behavior of.
* @throws Exception throw an exception if a failure has occurred.
*/
public abstract void doWork() throws Exception;
protected void stopTestThread() {
this.stopped = true;
}
}
/**
* A test thread that performs a repeating operation.
*/
public static abstract class RepeatingTestThread extends TestingThread {
public RepeatingTestThread(TestContext ctx) {
super(ctx);
}
/**
* Repeats a given user action until the context is asked to stop
* or meets an error.
*/
@Override
public final void doWork() throws Exception {
while (ctx.shouldRun() && !stopped) {
doAnAction();
}
}
/**
* User method for any code to test repeating behavior of (as threads).
* @throws Exception throw an exception if a failure has occured.
*/
public abstract void doAnAction() throws Exception;
}
}
| java | Apache-2.0 | e0c90f054687a18c4e095547ac5e31b8b313b3ef | 2026-01-05T02:41:11.405497Z | false |
Intel-bigdata/SSM | https://github.com/Intel-bigdata/SSM/blob/e0c90f054687a18c4e095547ac5e31b8b313b3ef/smart-hadoop-support/smart-erasurecodec/src/test/java/org/smartdata/erasurecode/coder/TestErasureCoderBase.java | smart-hadoop-support/smart-erasurecodec/src/test/java/org/smartdata/erasurecode/coder/TestErasureCoderBase.java | /**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.smartdata.erasurecode.coder;
import org.smartdata.erasurecode.*;
import org.smartdata.erasurecode.*;
import java.lang.reflect.Constructor;
/**
* Erasure coder test base with utilities.
*/
public abstract class TestErasureCoderBase extends TestCoderBase {
protected Class<? extends ErasureCoder> encoderClass;
protected Class<? extends ErasureCoder> decoderClass;
private ErasureCoder encoder;
private ErasureCoder decoder;
protected int numChunksInBlock = 16;
/**
* It's just a block for this test purpose. We don't use HDFS block here
* at all for simple.
*/
protected static class TestBlock extends ECBlock {
protected ECChunk[] chunks;
// For simple, just assume the block have the chunks already ready.
// In practice we need to read/write chunks from/to the block via file IO.
public TestBlock(ECChunk[] chunks) {
this.chunks = chunks;
}
}
/**
* Generating source data, encoding, recovering and then verifying.
* RawErasureCoder mainly uses ECChunk to pass input and output data buffers,
* it supports two kinds of ByteBuffers, one is array backed, the other is
* direct ByteBuffer. Have usingDirectBuffer to indicate which case to test.
* @param usingDirectBuffer
*/
protected void testCoding(boolean usingDirectBuffer) {
this.usingDirectBuffer = usingDirectBuffer;
prepareCoders();
/**
* The following runs will use 3 different chunkSize for inputs and outputs,
* to verify the same encoder/decoder can process variable width of data.
*/
performTestCoding(baseChunkSize, true);
performTestCoding(baseChunkSize - 17, false);
performTestCoding(baseChunkSize + 16, true);
}
private void performTestCoding(int chunkSize, boolean usingSlicedBuffer) {
setChunkSize(chunkSize);
prepareBufferAllocator(usingSlicedBuffer);
// Generate data and encode
ECBlockGroup blockGroup = prepareBlockGroupForEncoding();
// Backup all the source chunks for later recovering because some coders
// may affect the source data.
TestBlock[] clonedDataBlocks =
cloneBlocksWithData((TestBlock[]) blockGroup.getDataBlocks());
TestBlock[] parityBlocks = (TestBlock[]) blockGroup.getParityBlocks();
ErasureCodingStep codingStep;
codingStep = encoder.calculateCoding(blockGroup);
performCodingStep(codingStep);
// Erase specified sources but return copies of them for later comparing
TestBlock[] backupBlocks = backupAndEraseBlocks(clonedDataBlocks, parityBlocks);
// Decode
blockGroup = new ECBlockGroup(clonedDataBlocks, blockGroup.getParityBlocks());
codingStep = decoder.calculateCoding(blockGroup);
performCodingStep(codingStep);
// Compare
compareAndVerify(backupBlocks, codingStep.getOutputBlocks());
}
/**
* This is typically how a coding step should be performed.
* @param codingStep
*/
protected void performCodingStep(ErasureCodingStep codingStep) {
// Pretend that we're opening these input blocks and output blocks.
ECBlock[] inputBlocks = codingStep.getInputBlocks();
ECBlock[] outputBlocks = codingStep.getOutputBlocks();
// We allocate input and output chunks accordingly.
ECChunk[] inputChunks = new ECChunk[inputBlocks.length];
ECChunk[] outputChunks = new ECChunk[outputBlocks.length];
for (int i = 0; i < numChunksInBlock; ++i) {
// Pretend that we're reading input chunks from input blocks.
for (int j = 0; j < inputBlocks.length; ++j) {
inputChunks[j] = ((TestBlock) inputBlocks[j]).chunks[i];
}
// Pretend that we allocate and will write output results to the blocks.
for (int j = 0; j < outputBlocks.length; ++j) {
outputChunks[j] = allocateOutputChunk();
((TestBlock) outputBlocks[j]).chunks[i] = outputChunks[j];
}
// Given the input chunks and output chunk buffers, just call it !
codingStep.performCoding(inputChunks, outputChunks);
}
codingStep.finish();
}
/**
* Compare and verify if recovered blocks data are the same with the erased
* blocks data.
* @param erasedBlocks
* @param recoveredBlocks
*/
protected void compareAndVerify(ECBlock[] erasedBlocks,
ECBlock[] recoveredBlocks) {
for (int i = 0; i < erasedBlocks.length; ++i) {
compareAndVerify(((TestBlock) erasedBlocks[i]).chunks, ((TestBlock) recoveredBlocks[i]).chunks);
}
}
private void prepareCoders() {
if (encoder == null) {
encoder = createEncoder();
}
if (decoder == null) {
decoder = createDecoder();
}
}
/**
* Create the raw erasure encoder to test
* @return
*/
protected ErasureCoder createEncoder() {
ErasureCoder encoder;
try {
ErasureCoderOptions options = new ErasureCoderOptions(
numDataUnits, numParityUnits, allowChangeInputs, allowDump);
Constructor<? extends ErasureCoder> constructor =
(Constructor<? extends ErasureCoder>)
encoderClass.getConstructor(ErasureCoderOptions.class);
encoder = constructor.newInstance(options);
} catch (Exception e) {
throw new RuntimeException("Failed to create encoder", e);
}
encoder.setConf(getConf());
return encoder;
}
/**
* create the raw erasure decoder to test
* @return
*/
protected ErasureCoder createDecoder() {
ErasureCoder decoder;
try {
ErasureCoderOptions options = new ErasureCoderOptions(
numDataUnits, numParityUnits, allowChangeInputs, allowDump);
Constructor<? extends ErasureCoder> constructor =
(Constructor<? extends ErasureCoder>)
decoderClass.getConstructor(ErasureCoderOptions.class);
decoder = constructor.newInstance(options);
} catch (Exception e) {
throw new RuntimeException("Failed to create decoder", e);
}
decoder.setConf(getConf());
return decoder;
}
/**
* Prepare a block group for encoding.
* @return
*/
protected ECBlockGroup prepareBlockGroupForEncoding() {
ECBlock[] dataBlocks = new TestBlock[numDataUnits];
ECBlock[] parityBlocks = new TestBlock[numParityUnits];
for (int i = 0; i < numDataUnits; i++) {
dataBlocks[i] = generateDataBlock();
}
for (int i = 0; i < numParityUnits; i++) {
parityBlocks[i] = allocateOutputBlock();
}
return new ECBlockGroup(dataBlocks, parityBlocks);
}
/**
* Generate random data and return a data block.
* @return
*/
protected ECBlock generateDataBlock() {
ECChunk[] chunks = new ECChunk[numChunksInBlock];
for (int i = 0; i < numChunksInBlock; ++i) {
chunks[i] = generateDataChunk();
}
return new TestBlock(chunks);
}
/**
* Erase blocks to test the recovering of them. Before erasure clone them
* first so could return themselves.
* @param dataBlocks
* @return clone of erased dataBlocks
*/
protected TestBlock[] backupAndEraseBlocks(TestBlock[] dataBlocks,
TestBlock[] parityBlocks) {
TestBlock[] toEraseBlocks = new TestBlock[erasedDataIndexes.length +
erasedParityIndexes.length];
int idx = 0;
TestBlock block;
for (int i = 0; i < erasedDataIndexes.length; i++) {
block = dataBlocks[erasedDataIndexes[i]];
toEraseBlocks[idx ++] = cloneBlockWithData(block);
eraseDataFromBlock(block);
}
for (int i = 0; i < erasedParityIndexes.length; i++) {
block = parityBlocks[erasedParityIndexes[i]];
toEraseBlocks[idx ++] = cloneBlockWithData(block);
eraseDataFromBlock(block);
}
return toEraseBlocks;
}
/**
* Allocate an output block. Note the chunk buffer will be allocated by the
* up caller when performing the coding step.
* @return
*/
protected TestBlock allocateOutputBlock() {
ECChunk[] chunks = new ECChunk[numChunksInBlock];
return new TestBlock(chunks);
}
/**
* Clone blocks with data copied along with, avoiding affecting the original
* blocks.
* @param blocks
* @return
*/
protected TestBlock[] cloneBlocksWithData(TestBlock[] blocks) {
TestBlock[] results = new TestBlock[blocks.length];
for (int i = 0; i < blocks.length; ++i) {
results[i] = cloneBlockWithData(blocks[i]);
}
return results;
}
/**
* Clone exactly a block, avoiding affecting the original block.
* @param block
* @return a new block
*/
protected TestBlock cloneBlockWithData(TestBlock block) {
ECChunk[] newChunks = cloneChunksWithData(block.chunks);
return new TestBlock(newChunks);
}
/**
* Erase data from a block.
*/
protected void eraseDataFromBlock(TestBlock theBlock) {
eraseDataFromChunks(theBlock.chunks);
theBlock.setErased(true);
}
}
| java | Apache-2.0 | e0c90f054687a18c4e095547ac5e31b8b313b3ef | 2026-01-05T02:41:11.405497Z | false |
Intel-bigdata/SSM | https://github.com/Intel-bigdata/SSM/blob/e0c90f054687a18c4e095547ac5e31b8b313b3ef/smart-hadoop-support/smart-erasurecodec/src/test/java/org/smartdata/erasurecode/coder/TestHHErasureCoderBase.java | smart-hadoop-support/smart-erasurecodec/src/test/java/org/smartdata/erasurecode/coder/TestHHErasureCoderBase.java | /**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.smartdata.erasurecode.coder;
import org.smartdata.erasurecode.ECBlock;
import org.smartdata.erasurecode.ECChunk;
/**
* Erasure coder test base with utilities for hitchhiker.
*/
public abstract class TestHHErasureCoderBase extends TestErasureCoderBase{
protected int subPacketSize = 2;
@Override
protected void performCodingStep(ErasureCodingStep codingStep) {
// Pretend that we're opening these input blocks and output blocks.
ECBlock[] inputBlocks = codingStep.getInputBlocks();
ECBlock[] outputBlocks = codingStep.getOutputBlocks();
// We allocate input and output chunks accordingly.
ECChunk[] inputChunks = new ECChunk[inputBlocks.length * subPacketSize];
ECChunk[] outputChunks = new ECChunk[outputBlocks.length * subPacketSize];
for (int i = 0; i < numChunksInBlock; i += subPacketSize) {
// Pretend that we're reading input chunks from input blocks.
for (int k = 0; k < subPacketSize; ++k) {
for (int j = 0; j < inputBlocks.length; ++j) {
inputChunks[k * inputBlocks.length + j] = ((TestBlock)
inputBlocks[j]).chunks[i + k];
}
// Pretend that we allocate and will write output results to the blocks.
for (int j = 0; j < outputBlocks.length; ++j) {
outputChunks[k * outputBlocks.length + j] = allocateOutputChunk();
((TestBlock) outputBlocks[j]).chunks[i + k] =
outputChunks[k * outputBlocks.length + j];
}
}
// Given the input chunks and output chunk buffers, just call it !
codingStep.performCoding(inputChunks, outputChunks);
}
codingStep.finish();
}
}
| java | Apache-2.0 | e0c90f054687a18c4e095547ac5e31b8b313b3ef | 2026-01-05T02:41:11.405497Z | false |
Intel-bigdata/SSM | https://github.com/Intel-bigdata/SSM/blob/e0c90f054687a18c4e095547ac5e31b8b313b3ef/smart-hadoop-support/smart-erasurecodec/src/test/java/org/smartdata/erasurecode/coder/TestRSErasureCoder.java | smart-hadoop-support/smart-erasurecodec/src/test/java/org/smartdata/erasurecode/coder/TestRSErasureCoder.java | /**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.smartdata.erasurecode.coder;
import org.apache.hadoop.conf.Configuration;
import org.junit.Before;
import org.junit.Rule;
import org.junit.Test;
import org.junit.rules.Timeout;
import org.smartdata.erasurecode.CodecUtil;
import org.smartdata.erasurecode.rawcoder.RSRawErasureCoderFactory;
/**
* Test Reed-Solomon encoding and decoding.
*/
public class TestRSErasureCoder extends TestErasureCoderBase {
@Rule
public Timeout globalTimeout = new Timeout(300000);
@Before
public void setup() {
this.encoderClass = RSErasureEncoder.class;
this.decoderClass = RSErasureDecoder.class;
this.numChunksInBlock = 10;
}
@Test
public void testCodingNoDirectBuffer_10x4_erasing_d0_p0() {
prepare(null, 10, 4, new int[] {0}, new int[] {0});
/**
* Doing twice to test if the coders can be repeatedly reused. This matters
* as the underlying coding buffers are shared, which may have bugs.
*/
testCoding(false);
testCoding(false);
}
@Test
public void testCodingDirectBufferWithConf_10x4_erasing_d0() {
/**
* This tests if the configuration items work or not.
*/
Configuration conf = new Configuration();
conf.set(CodecUtil.IO_ERASURECODE_CODEC_RS_DEFAULT_RAWCODER_KEY,
RSRawErasureCoderFactory.class.getCanonicalName());
prepare(conf, 10, 4, new int[]{0}, new int[0]);
testCoding(true);
testCoding(true);
}
@Test
public void testCodingDirectBuffer_10x4_erasing_p1() {
prepare(null, 10, 4, new int[]{}, new int[]{1});
testCoding(true);
testCoding(true);
}
@Test
public void testCodingDirectBuffer_10x4_erasing_d2() {
prepare(null, 10, 4, new int[] {2}, new int[] {});
testCoding(true);
testCoding(true);
}
@Test
public void testCodingDirectBuffer_10x4_erasing_d0_p0() {
prepare(null, 10, 4, new int[] {0}, new int[] {0});
testCoding(true);
testCoding(true);
}
@Test
public void testCodingBothBuffers_10x4_erasing_d0_p0() {
prepare(null, 10, 4, new int[] {0}, new int[] {0});
/**
* Doing in mixed buffer usage model to test if the coders can be repeatedly
* reused with different buffer usage model. This matters as the underlying
* coding buffers are shared, which may have bugs.
*/
testCoding(true);
testCoding(false);
testCoding(true);
testCoding(false);
}
@Test
public void testCodingDirectBuffer_10x4_erasure_of_d2_d4_p0() {
prepare(null, 10, 4, new int[] {2, 4}, new int[] {0});
testCoding(true);
}
@Test
public void testCodingDirectBuffer_10x4_erasing_d0_d1_p0_p1() {
prepare(null, 10, 4, new int[] {0, 1}, new int[] {0, 1});
testCoding(true);
}
@Test
public void testCodingNoDirectBuffer_3x3_erasing_d0_p0() {
prepare(null, 3, 3, new int[] {0}, new int[] {0});
testCoding(false);
}
@Test
public void testCodingDirectBuffer_6x3_erasing_d0_p0() {
prepare(null, 6, 3, new int[] {0}, new int[] {0});
testCoding(true);
}
}
| java | Apache-2.0 | e0c90f054687a18c4e095547ac5e31b8b313b3ef | 2026-01-05T02:41:11.405497Z | false |
Intel-bigdata/SSM | https://github.com/Intel-bigdata/SSM/blob/e0c90f054687a18c4e095547ac5e31b8b313b3ef/smart-hadoop-support/smart-erasurecodec/src/test/java/org/smartdata/erasurecode/coder/TestHHXORErasureCoder.java | smart-hadoop-support/smart-erasurecodec/src/test/java/org/smartdata/erasurecode/coder/TestHHXORErasureCoder.java | /**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.smartdata.erasurecode.coder;
import org.apache.hadoop.conf.Configuration;
import org.junit.Before;
import org.junit.Test;
import org.smartdata.erasurecode.CodecUtil;
import org.smartdata.erasurecode.rawcoder.RSRawErasureCoderFactory;
public class TestHHXORErasureCoder extends TestHHErasureCoderBase {
@Before
public void setup() {
this.encoderClass = HHXORErasureEncoder.class;
this.decoderClass = HHXORErasureDecoder.class;
this.numChunksInBlock = 10;
this.subPacketSize = 2;
}
@Test
public void testCodingNoDirectBuffer_10x4_erasing_d0() {
prepare(null, 10, 4, new int[]{0}, new int[0]);
/**
* Doing twice to test if the coders can be repeatedly reused. This matters
* as the underlying coding buffers are shared, which may have bugs.
*/
testCoding(false);
testCoding(false);
}
@Test
public void testCodingDirectBufferWithConf_10x4_erasing_d0() {
/**
* This tests if the configuration items work or not.
*/
Configuration conf = new Configuration();
conf.set(CodecUtil.IO_ERASURECODE_CODEC_RS_DEFAULT_RAWCODER_KEY,
RSRawErasureCoderFactory.class.getCanonicalName());
prepare(conf, 10, 4, new int[]{0}, new int[0]);
testCoding(true);
testCoding(true);
}
@Test
public void testCodingDirectBuffer_10x4_erasing_p1() {
prepare(null, 10, 4, new int[]{}, new int[]{1});
testCoding(true);
testCoding(true);
}
@Test
public void testCodingDirectBuffer_10x4_erasing_d4() {
prepare(null, 10, 4, new int[] {4}, new int[] {});
testCoding(true);
testCoding(true);
}
@Test
public void testCodingDirectBuffer_10x4_erasing_d0_p0() {
prepare(null, 10, 4, new int[] {0}, new int[] {0});
testCoding(true);
testCoding(true);
}
@Test
public void testCodingBothBuffers_10x4_erasing_d0_p0() {
prepare(null, 10, 4, new int[] {0}, new int[] {0});
/**
* Doing in mixed buffer usage model to test if the coders can be repeatedly
* reused with different buffer usage model. This matters as the underlying
* coding buffers are shared, which may have bugs.
*/
testCoding(true);
testCoding(false);
testCoding(true);
testCoding(false);
}
@Test
public void testCodingDirectBuffer_10x4_erasure_of_d2_d4_p0() {
prepare(null, 10, 4, new int[] {2, 4}, new int[] {0});
testCoding(true);
}
@Test
public void testCodingDirectBuffer_10x4_erasing_d0_d1_p0_p1() {
prepare(null, 10, 4, new int[] {0, 1}, new int[] {0, 1});
testCoding(true);
}
// @Test
// public void testCodingNoDirectBuffer_3x3_erasing_d0_p0() {
// prepare(null, 3, 3, new int[] {0}, new int[] {0});
// testCoding(false);
// }
@Test
public void testCodingDirectBuffer_6x3_erasing_d0_p0() {
prepare(null, 6, 3, new int[] {0}, new int[] {0});
testCoding(true);
}
}
| java | Apache-2.0 | e0c90f054687a18c4e095547ac5e31b8b313b3ef | 2026-01-05T02:41:11.405497Z | false |
Intel-bigdata/SSM | https://github.com/Intel-bigdata/SSM/blob/e0c90f054687a18c4e095547ac5e31b8b313b3ef/smart-hadoop-support/smart-erasurecodec/src/test/java/org/smartdata/erasurecode/coder/TestXORCoder.java | smart-hadoop-support/smart-erasurecodec/src/test/java/org/smartdata/erasurecode/coder/TestXORCoder.java | /**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.smartdata.erasurecode.coder;
import org.junit.Before;
import org.junit.Rule;
import org.junit.Test;
import org.junit.rules.Timeout;
/**
* Test XOR encoding and decoding.
*/
public class TestXORCoder extends TestErasureCoderBase {
@Rule
public Timeout globalTimeout = new Timeout(300000);
@Before
public void setup() {
this.encoderClass = XORErasureEncoder.class;
this.decoderClass = XORErasureDecoder.class;
this.numDataUnits = 10;
this.numParityUnits = 1;
this.numChunksInBlock = 10;
}
@Test
public void testCodingNoDirectBuffer_erasing_p0() {
prepare(null, 10, 1, new int[0], new int[] {0});
/**
* Doing twice to test if the coders can be repeatedly reused. This matters
* as the underlying coding buffers are shared, which may have bugs.
*/
testCoding(false);
testCoding(false);
}
@Test
public void testCodingBothBuffers_erasing_d5() {
prepare(null, 10, 1, new int[]{5}, new int[0]);
/**
* Doing in mixed buffer usage model to test if the coders can be repeatedly
* reused with different buffer usage model. This matters as the underlying
* coding buffers are shared, which may have bugs.
*/
testCoding(true);
testCoding(false);
testCoding(true);
testCoding(false);
}
}
| java | Apache-2.0 | e0c90f054687a18c4e095547ac5e31b8b313b3ef | 2026-01-05T02:41:11.405497Z | false |
Intel-bigdata/SSM | https://github.com/Intel-bigdata/SSM/blob/e0c90f054687a18c4e095547ac5e31b8b313b3ef/smart-hadoop-support/smart-erasurecodec/src/main/java/org/smartdata/erasurecode/ErasureCoderOptions.java | smart-hadoop-support/smart-erasurecodec/src/main/java/org/smartdata/erasurecode/ErasureCoderOptions.java | /**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.smartdata.erasurecode;
import org.apache.hadoop.classification.InterfaceAudience;
/**
* Erasure coder configuration that maintains schema info and coder options.
*/
@InterfaceAudience.Private
public final class ErasureCoderOptions {
private final int numDataUnits;
private final int numParityUnits;
private final int numAllUnits;
private final boolean allowChangeInputs;
private final boolean allowVerboseDump;
public ErasureCoderOptions(int numDataUnits, int numParityUnits) {
this(numDataUnits, numParityUnits, false, false);
}
public ErasureCoderOptions(int numDataUnits, int numParityUnits,
boolean allowChangeInputs, boolean allowVerboseDump) {
this.numDataUnits = numDataUnits;
this.numParityUnits = numParityUnits;
this.numAllUnits = numDataUnits + numParityUnits;
this.allowChangeInputs = allowChangeInputs;
this.allowVerboseDump = allowVerboseDump;
}
/**
* The number of data input units for the coding. A unit can be a byte,
* chunk or buffer or even a block.
* @return count of data input units
*/
public int getNumDataUnits() {
return numDataUnits;
}
/**
* The number of parity output units for the coding. A unit can be a byte,
* chunk, buffer or even a block.
* @return count of parity output units
*/
public int getNumParityUnits() {
return numParityUnits;
}
/**
* The number of all the involved units in the coding.
* @return count of all the data units and parity units
*/
public int getNumAllUnits() {
return numAllUnits;
}
/**
* Allow changing input buffer content (not positions). Maybe better
* performance if not allowed.
* @return true if allowing input content to be changed, false otherwise
*/
public boolean allowChangeInputs() {
return allowChangeInputs;
}
/**
* Allow dump verbose debug info or not.
* @return true if verbose debug info is desired, false otherwise
*/
public boolean allowVerboseDump() {
return allowVerboseDump;
}
}
| java | Apache-2.0 | e0c90f054687a18c4e095547ac5e31b8b313b3ef | 2026-01-05T02:41:11.405497Z | false |
Intel-bigdata/SSM | https://github.com/Intel-bigdata/SSM/blob/e0c90f054687a18c4e095547ac5e31b8b313b3ef/smart-hadoop-support/smart-erasurecodec/src/main/java/org/smartdata/erasurecode/ECBlock.java | smart-hadoop-support/smart-erasurecodec/src/main/java/org/smartdata/erasurecode/ECBlock.java | /**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.smartdata.erasurecode;
import org.apache.hadoop.classification.InterfaceAudience;
/**
* A wrapper of block level data source/output that {@link ECChunk}s can be
* extracted from. For HDFS, it can be an HDFS block (250MB). Note it only cares
* about erasure coding specific logic thus avoids coupling with any HDFS block
* details. We can have something like HdfsBlock extend it.
*/
@InterfaceAudience.Private
public class ECBlock {
private boolean isParity;
private boolean isErased;
/**
* A default constructor. isParity and isErased are false by default.
*/
public ECBlock() {
this(false, false);
}
/**
* A constructor specifying isParity and isErased.
* @param isParity is a parity block
* @param isErased is erased or not
*/
public ECBlock(boolean isParity, boolean isErased) {
this.isParity = isParity;
this.isErased = isErased;
}
/**
* Set true if it's for a parity block.
* @param isParity is parity or not
*/
public void setParity(boolean isParity) {
this.isParity = isParity;
}
/**
* Set true if the block is missing.
* @param isErased is erased or not
*/
public void setErased(boolean isErased) {
this.isErased = isErased;
}
/**
*
* @return true if it's parity block, otherwise false
*/
public boolean isParity() {
return isParity;
}
/**
*
* @return true if it's erased due to erasure, otherwise false
*/
public boolean isErased() {
return isErased;
}
}
| java | Apache-2.0 | e0c90f054687a18c4e095547ac5e31b8b313b3ef | 2026-01-05T02:41:11.405497Z | false |
Intel-bigdata/SSM | https://github.com/Intel-bigdata/SSM/blob/e0c90f054687a18c4e095547ac5e31b8b313b3ef/smart-hadoop-support/smart-erasurecodec/src/main/java/org/smartdata/erasurecode/ECBlockGroup.java | smart-hadoop-support/smart-erasurecodec/src/main/java/org/smartdata/erasurecode/ECBlockGroup.java | /**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.smartdata.erasurecode;
import org.apache.hadoop.classification.InterfaceAudience;
/**
* A group of blocks or {@link ECBlock} incurred in an erasure coding task.
*/
@InterfaceAudience.Private
public class ECBlockGroup {
private ECBlock[] dataBlocks;
private ECBlock[] parityBlocks;
/**
* A constructor specifying data blocks and parity blocks.
* @param dataBlocks data blocks in the group
* @param parityBlocks parity blocks in the group
*/
public ECBlockGroup(ECBlock[] dataBlocks, ECBlock[] parityBlocks) {
this.dataBlocks = dataBlocks;
this.parityBlocks = parityBlocks;
}
/**
* Get data blocks
* @return data blocks
*/
public ECBlock[] getDataBlocks() {
return dataBlocks;
}
/**
* Get parity blocks
* @return parity blocks
*/
public ECBlock[] getParityBlocks() {
return parityBlocks;
}
/**
* Any erased data block?
* @return true if any erased data block, false otherwise
*/
public boolean anyErasedDataBlock() {
for (int i = 0; i < dataBlocks.length; ++i) {
if (dataBlocks[i].isErased()) {
return true;
}
}
return false;
}
/**
* Any erased parity block?
* @return true if any erased parity block, false otherwise
*/
public boolean anyErasedParityBlock() {
for (int i = 0; i < parityBlocks.length; ++i) {
if (parityBlocks[i].isErased()) {
return true;
}
}
return false;
}
/**
* Get erased blocks count
* @return erased count of blocks
*/
public int getErasedCount() {
int erasedCount = 0;
for (ECBlock dataBlock : dataBlocks) {
if (dataBlock.isErased()) erasedCount++;
}
for (ECBlock parityBlock : parityBlocks) {
if (parityBlock.isErased()) erasedCount++;
}
return erasedCount;
}
}
| java | Apache-2.0 | e0c90f054687a18c4e095547ac5e31b8b313b3ef | 2026-01-05T02:41:11.405497Z | false |
Intel-bigdata/SSM | https://github.com/Intel-bigdata/SSM/blob/e0c90f054687a18c4e095547ac5e31b8b313b3ef/smart-hadoop-support/smart-erasurecodec/src/main/java/org/smartdata/erasurecode/CodecUtil.java | smart-hadoop-support/smart-erasurecodec/src/main/java/org/smartdata/erasurecode/CodecUtil.java | /**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.smartdata.erasurecode;
import com.google.common.base.Preconditions;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.conf.Configuration;
import org.smartdata.erasurecode.rawcoder.*;
import org.smartdata.erasurecode.codec.ErasureCodec;
import org.smartdata.erasurecode.codec.HHXORErasureCodec;
import org.smartdata.erasurecode.codec.RSErasureCodec;
import org.smartdata.erasurecode.codec.XORErasureCodec;
import org.smartdata.erasurecode.coder.ErasureDecoder;
import org.smartdata.erasurecode.coder.ErasureEncoder;
import java.lang.reflect.Constructor;
import java.lang.reflect.InvocationTargetException;
/**
* A codec & coder utility to help create coders conveniently.
*
* {@link CodecUtil} includes erasure coder configurations key and default
* values such as coder class name and erasure codec option values included
* by {@link ErasureCodecOptions}. {@link ErasureEncoder} and
* {@link ErasureDecoder} are created by createEncoder and createDecoder
* respectively.{@link RawErasureEncoder} and {@link RawErasureDecoder} are
* are created by createRawEncoder and createRawDecoder.
*/
@InterfaceAudience.Private
public final class CodecUtil {
/** Erasure coder XOR codec. */
public static final String IO_ERASURECODE_CODEC_XOR_KEY =
"io.erasurecode.codec.xor";
public static final String IO_ERASURECODE_CODEC_XOR =
XORErasureCodec.class.getCanonicalName();
/** Erasure coder Reed-Solomon codec. */
public static final String IO_ERASURECODE_CODEC_RS_DEFAULT_KEY =
"io.erasurecode.codec.rs";
public static final String IO_ERASURECODE_CODEC_RS_DEFAULT =
RSErasureCodec.class.getCanonicalName();
/** Erasure coder hitch hiker XOR codec. */
public static final String IO_ERASURECODE_CODEC_HHXOR_KEY =
"io.erasurecode.codec.hhxor";
public static final String IO_ERASURECODE_CODEC_HHXOR =
HHXORErasureCodec.class.getCanonicalName();
/** Supported erasure codec classes. */
/** Raw coder factory for the RS default codec. */
public static final String IO_ERASURECODE_CODEC_RS_DEFAULT_RAWCODER_KEY =
"io.erasurecode.codec.rs-default.rawcoder";
public static final String IO_ERASURECODE_CODEC_RS_DEFAULT_RAWCODER_DEFAULT =
RSRawErasureCoderFactory.class.getCanonicalName();
/** Raw coder factory for the RS legacy codec. */
public static final String IO_ERASURECODE_CODEC_RS_LEGACY_RAWCODER_KEY =
"io.erasurecode.codec.rs-legacy.rawcoder";
public static final String IO_ERASURECODE_CODEC_RS_LEGACY_RAWCODER_DEFAULT =
RSRawErasureCoderFactoryLegacy.class.getCanonicalName();
/** Raw coder factory for the XOR codec. */
public static final String IO_ERASURECODE_CODEC_XOR_RAWCODER_KEY =
"io.erasurecode.codec.xor.rawcoder";
public static final String IO_ERASURECODE_CODEC_XOR_RAWCODER_DEFAULT =
XORRawErasureCoderFactory.class.getCanonicalName();
private CodecUtil() { }
/**
* Create encoder corresponding to given codec.
* @param options Erasure codec options
* @return erasure encoder
*/
public static ErasureEncoder createEncoder(Configuration conf,
ErasureCodecOptions options) {
Preconditions.checkNotNull(conf);
Preconditions.checkNotNull(options);
String codecKey = getCodecClassName(conf,
options.getSchema().getCodecName());
ErasureCodec codec = createCodec(conf, codecKey, options);
return codec.createEncoder();
}
/**
* Create decoder corresponding to given codec.
* @param options Erasure codec options
* @return erasure decoder
*/
public static ErasureDecoder createDecoder(Configuration conf,
ErasureCodecOptions options) {
Preconditions.checkNotNull(conf);
Preconditions.checkNotNull(options);
String codecKey = getCodecClassName(conf,
options.getSchema().getCodecName());
ErasureCodec codec = createCodec(conf, codecKey, options);
return codec.createDecoder();
}
/**
* Create RS raw encoder according to configuration.
* @param conf configuration
* @param coderOptions coder options that's used to create the coder
* @param codec the codec to use. If null, will use the default codec
* @return raw encoder
*/
public static RawErasureEncoder createRawEncoder(
Configuration conf, String codec, ErasureCoderOptions coderOptions) {
Preconditions.checkNotNull(conf);
Preconditions.checkNotNull(codec);
String rawCoderFactoryKey = getRawCoderFactNameFromCodec(conf, codec);
RawErasureCoderFactory fact = createRawCoderFactory(conf,
rawCoderFactoryKey);
return fact.createEncoder(coderOptions);
}
/**
* Create RS raw decoder according to configuration.
* @param conf configuration
* @param coderOptions coder options that's used to create the coder
* @param codec the codec to use. If null, will use the default codec
* @return raw decoder
*/
public static RawErasureDecoder createRawDecoder(
Configuration conf, String codec, ErasureCoderOptions coderOptions) {
Preconditions.checkNotNull(conf);
Preconditions.checkNotNull(codec);
String rawCoderFactoryKey = getRawCoderFactNameFromCodec(conf, codec);
RawErasureCoderFactory fact = createRawCoderFactory(conf,
rawCoderFactoryKey);
return fact.createDecoder(coderOptions);
}
private static RawErasureCoderFactory createRawCoderFactory(
Configuration conf, String rawCoderFactoryKey) {
RawErasureCoderFactory fact;
try {
Class<? extends RawErasureCoderFactory> factClass = conf.getClassByName(
rawCoderFactoryKey).asSubclass(RawErasureCoderFactory.class);
fact = factClass.newInstance();
} catch (ClassNotFoundException | InstantiationException |
IllegalAccessException e) {
throw new RuntimeException("Failed to create raw coder factory", e);
}
if (fact == null) {
throw new RuntimeException("Failed to create raw coder factory");
}
return fact;
}
private static String getRawCoderFactNameFromCodec(Configuration conf,
String codec) {
switch (codec) {
case ErasureCodeConstants.RS_DEFAULT_CODEC_NAME:
return conf.get(
IO_ERASURECODE_CODEC_RS_DEFAULT_RAWCODER_KEY,
IO_ERASURECODE_CODEC_RS_DEFAULT_RAWCODER_DEFAULT);
case ErasureCodeConstants.RS_LEGACY_CODEC_NAME:
return conf.get(
IO_ERASURECODE_CODEC_RS_LEGACY_RAWCODER_KEY,
IO_ERASURECODE_CODEC_RS_LEGACY_RAWCODER_DEFAULT);
case ErasureCodeConstants.XOR_CODEC_NAME:
return conf.get(
IO_ERASURECODE_CODEC_XOR_RAWCODER_KEY,
IO_ERASURECODE_CODEC_XOR_RAWCODER_DEFAULT);
default:
// For custom codec, we throw exception if the factory is not configured
String rawCoderKey = "io.erasurecode.codec." + codec + ".rawcoder";
String factName = conf.get(rawCoderKey);
if (factName == null) {
throw new IllegalArgumentException("Raw coder factory not configured " +
"for custom codec " + codec);
}
return factName;
}
}
private static ErasureCodec createCodec(Configuration conf,
String codecClassName, ErasureCodecOptions options) {
ErasureCodec codec = null;
try {
Class<? extends ErasureCodec> codecClass =
conf.getClassByName(codecClassName)
.asSubclass(ErasureCodec.class);
Constructor<? extends ErasureCodec> constructor
= codecClass.getConstructor(Configuration.class,
ErasureCodecOptions.class);
codec = constructor.newInstance(conf, options);
} catch (ClassNotFoundException | InstantiationException |
IllegalAccessException | NoSuchMethodException |
InvocationTargetException e) {
throw new RuntimeException("Failed to create erasure codec", e);
}
if (codec == null) {
throw new RuntimeException("Failed to create erasure codec");
}
return codec;
}
private static String getCodecClassName(Configuration conf, String codec) {
switch (codec) {
case ErasureCodeConstants.RS_DEFAULT_CODEC_NAME:
return conf.get(
CodecUtil.IO_ERASURECODE_CODEC_RS_DEFAULT_KEY,
CodecUtil.IO_ERASURECODE_CODEC_RS_DEFAULT);
case ErasureCodeConstants.RS_LEGACY_CODEC_NAME:
//TODO:rs-legacy should be handled differently.
return conf.get(
CodecUtil.IO_ERASURECODE_CODEC_RS_DEFAULT_KEY,
CodecUtil.IO_ERASURECODE_CODEC_RS_DEFAULT);
case ErasureCodeConstants.XOR_CODEC_NAME:
return conf.get(
CodecUtil.IO_ERASURECODE_CODEC_XOR_KEY,
CodecUtil.IO_ERASURECODE_CODEC_XOR);
case ErasureCodeConstants.HHXOR_CODEC_NAME:
return conf.get(
CodecUtil.IO_ERASURECODE_CODEC_HHXOR_KEY,
CodecUtil.IO_ERASURECODE_CODEC_HHXOR);
default:
// For custom codec, we throw exception if the factory is not configured
String codecKey = "io.erasurecode.codec." + codec + ".coder";
String codecClass = conf.get(codecKey);
if (codecClass == null) {
throw new IllegalArgumentException("Codec not configured " +
"for custom codec " + codec);
}
return codecClass;
}
}
}
| java | Apache-2.0 | e0c90f054687a18c4e095547ac5e31b8b313b3ef | 2026-01-05T02:41:11.405497Z | false |
Intel-bigdata/SSM | https://github.com/Intel-bigdata/SSM/blob/e0c90f054687a18c4e095547ac5e31b8b313b3ef/smart-hadoop-support/smart-erasurecodec/src/main/java/org/smartdata/erasurecode/ErasureCodecOptions.java | smart-hadoop-support/smart-erasurecodec/src/main/java/org/smartdata/erasurecode/ErasureCodecOptions.java | /**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.smartdata.erasurecode;
import org.apache.hadoop.classification.InterfaceAudience;
/**
* Erasure codec options.
*/
@InterfaceAudience.Private
public class ErasureCodecOptions {
private ECSchema schema;
public ErasureCodecOptions(ECSchema schema) {
this.schema = schema;
}
public ECSchema getSchema() {
return schema;
}
}
| java | Apache-2.0 | e0c90f054687a18c4e095547ac5e31b8b313b3ef | 2026-01-05T02:41:11.405497Z | false |
Intel-bigdata/SSM | https://github.com/Intel-bigdata/SSM/blob/e0c90f054687a18c4e095547ac5e31b8b313b3ef/smart-hadoop-support/smart-erasurecodec/src/main/java/org/smartdata/erasurecode/ECSchema.java | smart-hadoop-support/smart-erasurecodec/src/main/java/org/smartdata/erasurecode/ECSchema.java | /**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.smartdata.erasurecode;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import java.util.Collections;
import java.util.HashMap;
import java.util.Map;
/**
* Erasure coding schema to housekeeper relevant information.
*/
@InterfaceAudience.Public
@InterfaceStability.Evolving
public final class ECSchema {
public static final String NUM_DATA_UNITS_KEY = "numDataUnits";
public static final String NUM_PARITY_UNITS_KEY = "numParityUnits";
public static final String CODEC_NAME_KEY = "org/smartdata/erasurecode/codec";
/**
* The erasure codec name associated.
*/
private final String codecName;
/**
* Number of source data units coded
*/
private final int numDataUnits;
/**
* Number of parity units generated in a coding
*/
private final int numParityUnits;
/*
* An erasure code can have its own specific advanced parameters, subject to
* itself to interpret these key-value settings.
*/
private final Map<String, String> extraOptions;
/**
* Constructor with schema name and provided all options. Note the options may
* contain additional information for the erasure codec to interpret further.
* @param allOptions all schema options
*/
public ECSchema(Map<String, String> allOptions) {
if (allOptions == null || allOptions.isEmpty()) {
throw new IllegalArgumentException("No schema options are provided");
}
this.codecName = allOptions.get(CODEC_NAME_KEY);
if (codecName == null || codecName.isEmpty()) {
throw new IllegalArgumentException("No codec option is provided");
}
int tmpNumDataUnits = extractIntOption(NUM_DATA_UNITS_KEY, allOptions);
int tmpNumParityUnits = extractIntOption(NUM_PARITY_UNITS_KEY, allOptions);
if (tmpNumDataUnits < 0 || tmpNumParityUnits < 0) {
throw new IllegalArgumentException(
"No good option for numDataUnits or numParityUnits found ");
}
this.numDataUnits = tmpNumDataUnits;
this.numParityUnits = tmpNumParityUnits;
allOptions.remove(CODEC_NAME_KEY);
allOptions.remove(NUM_DATA_UNITS_KEY);
allOptions.remove(NUM_PARITY_UNITS_KEY);
// After some cleanup
this.extraOptions = Collections.unmodifiableMap(allOptions);
}
/**
* Constructor with key parameters provided.
* @param codecName codec name
* @param numDataUnits number of data units used in the schema
* @param numParityUnits number os parity units used in the schema
*/
public ECSchema(String codecName, int numDataUnits, int numParityUnits) {
this(codecName, numDataUnits, numParityUnits, null);
}
/**
* Constructor with key parameters provided. Note the extraOptions may contain
* additional information for the erasure codec to interpret further.
* @param codecName codec name
* @param numDataUnits number of data units used in the schema
* @param numParityUnits number os parity units used in the schema
* @param extraOptions extra options to configure the codec
*/
public ECSchema(String codecName, int numDataUnits, int numParityUnits,
Map<String, String> extraOptions) {
assert (codecName != null && ! codecName.isEmpty());
assert (numDataUnits > 0 && numParityUnits > 0);
this.codecName = codecName;
this.numDataUnits = numDataUnits;
this.numParityUnits = numParityUnits;
if (extraOptions == null) {
extraOptions = new HashMap<>();
}
// After some cleanup
this.extraOptions = Collections.unmodifiableMap(extraOptions);
}
private int extractIntOption(String optionKey, Map<String, String> options) {
int result = -1;
try {
if (options.containsKey(optionKey)) {
result = Integer.parseInt(options.get(optionKey));
if (result <= 0) {
throw new IllegalArgumentException("Bad option value " + result +
" found for " + optionKey);
}
}
} catch (NumberFormatException e) {
throw new IllegalArgumentException("Option value " +
options.get(optionKey) + " for " + optionKey +
" is found. It should be an integer");
}
return result;
}
/**
* Get the codec name
* @return codec name
*/
public String getCodecName() {
return codecName;
}
/**
* Get extra options specific to a erasure code.
* @return extra options
*/
public Map<String, String> getExtraOptions() {
return extraOptions;
}
/**
* Get required data units count in a coding group
* @return count of data units
*/
public int getNumDataUnits() {
return numDataUnits;
}
/**
* Get required parity units count in a coding group
* @return count of parity units
*/
public int getNumParityUnits() {
return numParityUnits;
}
/**
* Make a meaningful string representation for log output.
* @return string representation
*/
@Override
public String toString() {
StringBuilder sb = new StringBuilder("ECSchema=[");
sb.append("Codec=" + codecName + ", ");
sb.append(NUM_DATA_UNITS_KEY + "=" + numDataUnits + ", ");
sb.append(NUM_PARITY_UNITS_KEY + "=" + numParityUnits);
sb.append((extraOptions.isEmpty() ? "" : ", "));
int i = 0;
for (String opt : extraOptions.keySet()) {
sb.append(opt + "=" + extraOptions.get(opt) +
(++i < extraOptions.size() ? ", " : ""));
}
sb.append("]");
return sb.toString();
}
@Override
public boolean equals(Object o) {
if (this == o) {
return true;
}
if (o == null || getClass() != o.getClass()) {
return false;
}
ECSchema ecSchema = (ECSchema) o;
if (numDataUnits != ecSchema.numDataUnits) {
return false;
}
if (numParityUnits != ecSchema.numParityUnits) {
return false;
}
if (!codecName.equals(ecSchema.codecName)) {
return false;
}
return extraOptions.equals(ecSchema.extraOptions);
}
@Override
public int hashCode() {
int result = codecName.hashCode();
result = 31 * result + extraOptions.hashCode();
result = 31 * result + numDataUnits;
result = 31 * result + numParityUnits;
return result;
}
}
| java | Apache-2.0 | e0c90f054687a18c4e095547ac5e31b8b313b3ef | 2026-01-05T02:41:11.405497Z | false |
Intel-bigdata/SSM | https://github.com/Intel-bigdata/SSM/blob/e0c90f054687a18c4e095547ac5e31b8b313b3ef/smart-hadoop-support/smart-erasurecodec/src/main/java/org/smartdata/erasurecode/NativeCodeLoader.java | smart-hadoop-support/smart-erasurecodec/src/main/java/org/smartdata/erasurecode/NativeCodeLoader.java | /**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.smartdata.erasurecode;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
/**
* A helper to load the native smart code i.e. libsmart.so.
* This handles the fallback to either the bundled libsmart-Linux-i386-32.so
* or the default java implementations where appropriate.
*
*/
@InterfaceAudience.Private
@InterfaceStability.Unstable
public final class NativeCodeLoader {
private static final Log LOG =
LogFactory.getLog(org.smartdata.erasurecode.NativeCodeLoader.class);
private static boolean nativeCodeLoaded = false;
static {
// Try to load native smart library and set fallback flag appropriately
if(LOG.isDebugEnabled()) {
LOG.debug("Trying to load the custom-built native-smart library...");
}
try {
System.loadLibrary("smart");
LOG.debug("Loaded the native-smart library");
nativeCodeLoaded = true;
} catch (Throwable t) {
// Ignore failure to load
if(LOG.isDebugEnabled()) {
LOG.debug("Failed to load native-smart with error: " + t);
LOG.debug("java.library.path=" +
System.getProperty("java.library.path"));
}
}
if (!nativeCodeLoaded) {
LOG.warn("Unable to load native-smart library for your platform... " +
"using builtin-java classes where applicable");
}
}
private NativeCodeLoader() {}
/**
* Check if native-smart code is loaded for this platform.
*
* @return <code>true</code> if native-smart is loaded,
* else <code>false</code>
*/
public static boolean isNativeCodeLoaded() {
return nativeCodeLoaded;
}
/**
* Returns true only if this build was compiled with support for snappy.
*/
public static native boolean buildSupportsSnappy();
/**
* Returns true only if this build was compiled with support for ISA-L.
*/
public static native boolean buildSupportsIsal();
/**
* Returns true only if this build was compiled with support for ZStandard.
*/
public static native boolean buildSupportsZstd();
/**
* Returns true only if this build was compiled with support for openssl.
*/
public static native boolean buildSupportsOpenssl();
public static native String getLibraryName();
}
| java | Apache-2.0 | e0c90f054687a18c4e095547ac5e31b8b313b3ef | 2026-01-05T02:41:11.405497Z | false |
Intel-bigdata/SSM | https://github.com/Intel-bigdata/SSM/blob/e0c90f054687a18c4e095547ac5e31b8b313b3ef/smart-hadoop-support/smart-erasurecodec/src/main/java/org/smartdata/erasurecode/ECChunk.java | smart-hadoop-support/smart-erasurecodec/src/main/java/org/smartdata/erasurecode/ECChunk.java | /**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.smartdata.erasurecode;
import org.apache.hadoop.classification.InterfaceAudience;
import java.nio.ByteBuffer;
/**
* A wrapper for ByteBuffer or bytes array for an erasure code chunk.
*/
@InterfaceAudience.Private
public class ECChunk {
private ByteBuffer chunkBuffer;
// TODO: should be in a more general flags
private boolean allZero = false;
/**
* Wrapping a ByteBuffer
* @param buffer buffer to be wrapped by the chunk
*/
public ECChunk(ByteBuffer buffer) {
this.chunkBuffer = buffer;
}
public ECChunk(ByteBuffer buffer, int offset, int len) {
ByteBuffer tmp = buffer.duplicate();
tmp.position(offset);
tmp.limit(offset + len);
this.chunkBuffer = tmp.slice();
}
/**
* Wrapping a bytes array
* @param buffer buffer to be wrapped by the chunk
*/
public ECChunk(byte[] buffer) {
this.chunkBuffer = ByteBuffer.wrap(buffer);
}
public ECChunk(byte[] buffer, int offset, int len) {
this.chunkBuffer = ByteBuffer.wrap(buffer, offset, len);
}
public boolean isAllZero() {
return allZero;
}
public void setAllZero(boolean allZero) {
this.allZero = allZero;
}
/**
* Convert to ByteBuffer
* @return ByteBuffer
*/
public ByteBuffer getBuffer() {
return chunkBuffer;
}
/**
* Convert an array of this chunks to an array of ByteBuffers
* @param chunks chunks to convert into buffers
* @return an array of ByteBuffers
*/
public static ByteBuffer[] toBuffers(ECChunk[] chunks) {
ByteBuffer[] buffers = new ByteBuffer[chunks.length];
ECChunk chunk;
for (int i = 0; i < chunks.length; i++) {
chunk = chunks[i];
if (chunk == null) {
buffers[i] = null;
} else {
buffers[i] = chunk.getBuffer();
}
}
return buffers;
}
/**
* Convert to a bytes array, just for test usage.
* @return bytes array
*/
public byte[] toBytesArray() {
byte[] bytesArr = new byte[chunkBuffer.remaining()];
// Avoid affecting the original one
chunkBuffer.mark();
chunkBuffer.get(bytesArr);
chunkBuffer.reset();
return bytesArr;
}
}
| java | Apache-2.0 | e0c90f054687a18c4e095547ac5e31b8b313b3ef | 2026-01-05T02:41:11.405497Z | false |
Intel-bigdata/SSM | https://github.com/Intel-bigdata/SSM/blob/e0c90f054687a18c4e095547ac5e31b8b313b3ef/smart-hadoop-support/smart-erasurecodec/src/main/java/org/smartdata/erasurecode/ErasureCodeNative.java | smart-hadoop-support/smart-erasurecodec/src/main/java/org/smartdata/erasurecode/ErasureCodeNative.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.smartdata.erasurecode;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.smartdata.erasurecode.NativeCodeLoader;
/**
* Erasure code native libraries (for now, Intel ISA-L) related utilities.
*/
public final class ErasureCodeNative {
private static final Log LOG =
LogFactory.getLog(ErasureCodeNative.class.getName());
/**
* The reason why ISA-L library is not available, or null if it is available.
*/
private static final String LOADING_FAILURE_REASON;
static {
if (!NativeCodeLoader.isNativeCodeLoaded()) {
LOADING_FAILURE_REASON = "smart native library cannot be loaded.";
}// else if (!NativeCodeLoader.buildSupportsIsal()) {
//LOADING_FAILURE_REASON = "libsmart was built without ISA-L support";
else {
String problem = null;
try {
loadLibrary();
} catch (Throwable t) {
problem = "Loading ISA-L failed: " + t.getMessage();
LOG.error("Loading ISA-L failed", t);
}
LOADING_FAILURE_REASON = problem;
}
}
private ErasureCodeNative() {}
/**
* Are native libraries loaded?
*/
public static boolean isNativeCodeLoaded() {
return LOADING_FAILURE_REASON == null;
}
/**
* Is the native ISA-L library loaded and initialized? Throw exception if not.
*/
public static void checkNativeCodeLoaded() {
if (LOADING_FAILURE_REASON != null) {
throw new RuntimeException(LOADING_FAILURE_REASON);
}
}
/**
* Load native library available or supported.
*/
public static native void loadLibrary();
/**
* Get the native library name that's available or supported.
*/
public static native String getLibraryName();
public static String getLoadingFailureReason() {
return LOADING_FAILURE_REASON;
}
}
| java | Apache-2.0 | e0c90f054687a18c4e095547ac5e31b8b313b3ef | 2026-01-05T02:41:11.405497Z | false |
Intel-bigdata/SSM | https://github.com/Intel-bigdata/SSM/blob/e0c90f054687a18c4e095547ac5e31b8b313b3ef/smart-hadoop-support/smart-erasurecodec/src/main/java/org/smartdata/erasurecode/ErasureCodeConstants.java | smart-hadoop-support/smart-erasurecodec/src/main/java/org/smartdata/erasurecode/ErasureCodeConstants.java | /**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.smartdata.erasurecode;
/**
* Constants related to the erasure code feature.
*/
public final class ErasureCodeConstants {
private ErasureCodeConstants() {
}
public static final String RS_DEFAULT_CODEC_NAME = "rs-default";
public static final String RS_LEGACY_CODEC_NAME = "rs-legacy";
public static final String XOR_CODEC_NAME = "xor";
public static final String HHXOR_CODEC_NAME = "hhxor";
public static final ECSchema RS_6_3_SCHEMA = new ECSchema(
RS_DEFAULT_CODEC_NAME, 6, 3);
public static final ECSchema RS_3_2_SCHEMA = new ECSchema(
RS_DEFAULT_CODEC_NAME, 3, 2);
public static final ECSchema RS_6_3_LEGACY_SCHEMA = new ECSchema(
RS_LEGACY_CODEC_NAME, 6, 3);
public static final ECSchema XOR_2_1_SCHEMA = new ECSchema(
XOR_CODEC_NAME, 2, 1);
public static final ECSchema RS_10_4_SCHEMA = new ECSchema(
RS_DEFAULT_CODEC_NAME, 10, 4);
}
| java | Apache-2.0 | e0c90f054687a18c4e095547ac5e31b8b313b3ef | 2026-01-05T02:41:11.405497Z | false |
Intel-bigdata/SSM | https://github.com/Intel-bigdata/SSM/blob/e0c90f054687a18c4e095547ac5e31b8b313b3ef/smart-hadoop-support/smart-erasurecodec/src/main/java/org/smartdata/erasurecode/codec/package-info.java | smart-hadoop-support/smart-erasurecodec/src/main/java/org/smartdata/erasurecode/codec/package-info.java | /**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
* Erasure codec framework.
*/
@InterfaceAudience.Private
@InterfaceStability.Unstable
package org.smartdata.erasurecode.codec;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
| java | Apache-2.0 | e0c90f054687a18c4e095547ac5e31b8b313b3ef | 2026-01-05T02:41:11.405497Z | false |
Intel-bigdata/SSM | https://github.com/Intel-bigdata/SSM/blob/e0c90f054687a18c4e095547ac5e31b8b313b3ef/smart-hadoop-support/smart-erasurecodec/src/main/java/org/smartdata/erasurecode/codec/ErasureCodec.java | smart-hadoop-support/smart-erasurecodec/src/main/java/org/smartdata/erasurecode/codec/ErasureCodec.java | /**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.smartdata.erasurecode.codec;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.conf.Configuration;
import org.smartdata.erasurecode.ECSchema;
import org.smartdata.erasurecode.ErasureCodecOptions;
import org.smartdata.erasurecode.ErasureCoderOptions;
import org.smartdata.erasurecode.coder.ErasureDecoder;
import org.smartdata.erasurecode.coder.ErasureEncoder;
import org.smartdata.erasurecode.grouper.BlockGrouper;
/**
* Abstract Erasure Codec is defines the interface of each actual erasure
* codec classes.
*/
@InterfaceAudience.Private
public abstract class ErasureCodec {
private ECSchema schema;
private ErasureCodecOptions codecOptions;
private ErasureCoderOptions coderOptions;
public ErasureCodec(Configuration conf,
ErasureCodecOptions options) {
this.schema = options.getSchema();
this.codecOptions = options;
boolean allowChangeInputs = false;
this.coderOptions = new ErasureCoderOptions(schema.getNumDataUnits(),
schema.getNumParityUnits(), allowChangeInputs, false);
}
public String getName() {
return schema.getCodecName();
}
public ECSchema getSchema() {
return schema;
}
/**
* Get a {@link ErasureCodecOptions}.
* @return erasure codec options
*/
public ErasureCodecOptions getCodecOptions() {
return codecOptions;
}
protected void setCodecOptions(ErasureCodecOptions options) {
this.codecOptions = options;
this.schema = options.getSchema();
}
/**
* Get a {@link ErasureCoderOptions}.
* @return erasure coder options
*/
public ErasureCoderOptions getCoderOptions() {
return coderOptions;
}
protected void setCoderOptions(ErasureCoderOptions options) {
this.coderOptions = options;
}
public abstract ErasureEncoder createEncoder();
public abstract ErasureDecoder createDecoder();
public BlockGrouper createBlockGrouper() {
BlockGrouper blockGrouper = new BlockGrouper();
blockGrouper.setSchema(getSchema());
return blockGrouper;
}
}
| java | Apache-2.0 | e0c90f054687a18c4e095547ac5e31b8b313b3ef | 2026-01-05T02:41:11.405497Z | false |
Intel-bigdata/SSM | https://github.com/Intel-bigdata/SSM/blob/e0c90f054687a18c4e095547ac5e31b8b313b3ef/smart-hadoop-support/smart-erasurecodec/src/main/java/org/smartdata/erasurecode/codec/DummyErasureCodec.java | smart-hadoop-support/smart-erasurecodec/src/main/java/org/smartdata/erasurecode/codec/DummyErasureCodec.java | /**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.smartdata.erasurecode.codec;
import org.apache.hadoop.conf.Configuration;
import org.smartdata.erasurecode.ErasureCodecOptions;
import org.smartdata.erasurecode.coder.DummyErasureDecoder;
import org.smartdata.erasurecode.coder.DummyErasureEncoder;
import org.smartdata.erasurecode.coder.ErasureDecoder;
import org.smartdata.erasurecode.coder.ErasureEncoder;
/**
* Dummy erasure coder does not real coding computing. This is used for only
* test or performance comparison with other erasure coders.
*/
public class DummyErasureCodec extends ErasureCodec {
public DummyErasureCodec(Configuration conf, ErasureCodecOptions options) {
super(conf, options);
}
@Override
public ErasureEncoder createEncoder() {
return new DummyErasureEncoder(getCoderOptions());
}
@Override
public ErasureDecoder createDecoder() {
return new DummyErasureDecoder(getCoderOptions());
}
}
| java | Apache-2.0 | e0c90f054687a18c4e095547ac5e31b8b313b3ef | 2026-01-05T02:41:11.405497Z | false |
Intel-bigdata/SSM | https://github.com/Intel-bigdata/SSM/blob/e0c90f054687a18c4e095547ac5e31b8b313b3ef/smart-hadoop-support/smart-erasurecodec/src/main/java/org/smartdata/erasurecode/codec/RSErasureCodec.java | smart-hadoop-support/smart-erasurecodec/src/main/java/org/smartdata/erasurecode/codec/RSErasureCodec.java | /**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.smartdata.erasurecode.codec;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.conf.Configuration;
import org.smartdata.erasurecode.ErasureCodecOptions;
import org.smartdata.erasurecode.coder.ErasureDecoder;
import org.smartdata.erasurecode.coder.ErasureEncoder;
import org.smartdata.erasurecode.coder.RSErasureDecoder;
import org.smartdata.erasurecode.coder.RSErasureEncoder;
/**
* A Reed-Solomon erasure codec.
*/
@InterfaceAudience.Private
public class RSErasureCodec extends ErasureCodec {
public RSErasureCodec(Configuration conf, ErasureCodecOptions options) {
super(conf, options);
}
@Override
public ErasureEncoder createEncoder() {
return new RSErasureEncoder(getCoderOptions());
}
@Override
public ErasureDecoder createDecoder() {
return new RSErasureDecoder(getCoderOptions());
}
}
| java | Apache-2.0 | e0c90f054687a18c4e095547ac5e31b8b313b3ef | 2026-01-05T02:41:11.405497Z | false |
Intel-bigdata/SSM | https://github.com/Intel-bigdata/SSM/blob/e0c90f054687a18c4e095547ac5e31b8b313b3ef/smart-hadoop-support/smart-erasurecodec/src/main/java/org/smartdata/erasurecode/codec/HHXORErasureCodec.java | smart-hadoop-support/smart-erasurecodec/src/main/java/org/smartdata/erasurecode/codec/HHXORErasureCodec.java | /**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.smartdata.erasurecode.codec;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.conf.Configuration;
import org.smartdata.erasurecode.ErasureCodecOptions;
import org.smartdata.erasurecode.coder.ErasureDecoder;
import org.smartdata.erasurecode.coder.ErasureEncoder;
import org.smartdata.erasurecode.coder.HHXORErasureDecoder;
import org.smartdata.erasurecode.coder.HHXORErasureEncoder;
/**
* A Hitchhiker-XOR erasure codec.
*/
@InterfaceAudience.Private
public class HHXORErasureCodec extends ErasureCodec {
public HHXORErasureCodec(Configuration conf, ErasureCodecOptions options) {
super(conf, options);
}
@Override
public ErasureEncoder createEncoder() {
return new HHXORErasureEncoder(getCoderOptions());
}
@Override
public ErasureDecoder createDecoder() {
return new HHXORErasureDecoder(getCoderOptions());
}
}
| java | Apache-2.0 | e0c90f054687a18c4e095547ac5e31b8b313b3ef | 2026-01-05T02:41:11.405497Z | false |
Intel-bigdata/SSM | https://github.com/Intel-bigdata/SSM/blob/e0c90f054687a18c4e095547ac5e31b8b313b3ef/smart-hadoop-support/smart-erasurecodec/src/main/java/org/smartdata/erasurecode/codec/XORErasureCodec.java | smart-hadoop-support/smart-erasurecodec/src/main/java/org/smartdata/erasurecode/codec/XORErasureCodec.java | /**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.smartdata.erasurecode.codec;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.conf.Configuration;
import org.smartdata.erasurecode.ErasureCodecOptions;
import org.smartdata.erasurecode.coder.ErasureDecoder;
import org.smartdata.erasurecode.coder.ErasureEncoder;
import org.smartdata.erasurecode.coder.XORErasureDecoder;
import org.smartdata.erasurecode.coder.XORErasureEncoder;
/**
* A XOR erasure codec.
*/
@InterfaceAudience.Private
public class XORErasureCodec extends ErasureCodec {
public XORErasureCodec(Configuration conf, ErasureCodecOptions options) {
super(conf, options);
assert(options.getSchema().getNumParityUnits() == 1);
}
@Override
public ErasureEncoder createEncoder() {
return new XORErasureEncoder(getCoderOptions());
}
@Override
public ErasureDecoder createDecoder() {
return new XORErasureDecoder(getCoderOptions());
}
}
| java | Apache-2.0 | e0c90f054687a18c4e095547ac5e31b8b313b3ef | 2026-01-05T02:41:11.405497Z | false |
Intel-bigdata/SSM | https://github.com/Intel-bigdata/SSM/blob/e0c90f054687a18c4e095547ac5e31b8b313b3ef/smart-hadoop-support/smart-erasurecodec/src/main/java/org/smartdata/erasurecode/rawcoder/XORRawDecoder.java | smart-hadoop-support/smart-erasurecodec/src/main/java/org/smartdata/erasurecode/rawcoder/XORRawDecoder.java | /**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.smartdata.erasurecode.rawcoder;
import org.apache.hadoop.classification.InterfaceAudience;
import org.smartdata.erasurecode.ErasureCoderOptions;
import java.nio.ByteBuffer;
/**
* A raw decoder in XOR code scheme in pure Java, adapted from HDFS-RAID.
*
* XOR code is an important primitive code scheme in erasure coding and often
* used in advanced codes, like HitchHiker and LRC, though itself is rarely
* deployed independently.
*/
@InterfaceAudience.Private
public class XORRawDecoder extends RawErasureDecoder {
public XORRawDecoder(ErasureCoderOptions coderOptions) {
super(coderOptions);
}
@Override
protected void doDecode(ByteBufferDecodingState decodingState) {
CoderUtil.resetOutputBuffers(decodingState.outputs,
decodingState.decodeLength);
ByteBuffer output = decodingState.outputs[0];
int erasedIdx = decodingState.erasedIndexes[0];
// Process the inputs.
int iIdx, oIdx;
for (int i = 0; i < decodingState.inputs.length; i++) {
// Skip the erased location.
if (i == erasedIdx) {
continue;
}
for (iIdx = decodingState.inputs[i].position(), oIdx = output.position();
iIdx < decodingState.inputs[i].limit();
iIdx++, oIdx++) {
output.put(oIdx, (byte) (output.get(oIdx) ^
decodingState.inputs[i].get(iIdx)));
}
}
}
@Override
protected void doDecode(ByteArrayDecodingState decodingState) {
byte[] output = decodingState.outputs[0];
int dataLen = decodingState.decodeLength;
CoderUtil.resetOutputBuffers(decodingState.outputs,
decodingState.outputOffsets, dataLen);
int erasedIdx = decodingState.erasedIndexes[0];
// Process the inputs.
int iIdx, oIdx;
for (int i = 0; i < decodingState.inputs.length; i++) {
// Skip the erased location.
if (i == erasedIdx) {
continue;
}
for (iIdx = decodingState.inputOffsets[i],
oIdx = decodingState.outputOffsets[0];
iIdx < decodingState.inputOffsets[i] + dataLen; iIdx++, oIdx++) {
output[oIdx] ^= decodingState.inputs[i][iIdx];
}
}
}
}
| java | Apache-2.0 | e0c90f054687a18c4e095547ac5e31b8b313b3ef | 2026-01-05T02:41:11.405497Z | false |
Intel-bigdata/SSM | https://github.com/Intel-bigdata/SSM/blob/e0c90f054687a18c4e095547ac5e31b8b313b3ef/smart-hadoop-support/smart-erasurecodec/src/main/java/org/smartdata/erasurecode/rawcoder/DummyRawErasureCoderFactory.java | smart-hadoop-support/smart-erasurecodec/src/main/java/org/smartdata/erasurecode/rawcoder/DummyRawErasureCoderFactory.java | /**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.smartdata.erasurecode.rawcoder;
import org.apache.hadoop.classification.InterfaceAudience;
import org.smartdata.erasurecode.ErasureCoderOptions;
/**
* A raw erasure coder factory for dummy raw coders.
*/
@InterfaceAudience.Private
public class DummyRawErasureCoderFactory implements RawErasureCoderFactory {
@Override
public RawErasureEncoder createEncoder(ErasureCoderOptions coderOptions) {
return new DummyRawEncoder(coderOptions);
}
@Override
public RawErasureDecoder createDecoder(ErasureCoderOptions coderOptions) {
return new DummyRawDecoder(coderOptions);
}
}
| java | Apache-2.0 | e0c90f054687a18c4e095547ac5e31b8b313b3ef | 2026-01-05T02:41:11.405497Z | false |
Intel-bigdata/SSM | https://github.com/Intel-bigdata/SSM/blob/e0c90f054687a18c4e095547ac5e31b8b313b3ef/smart-hadoop-support/smart-erasurecodec/src/main/java/org/smartdata/erasurecode/rawcoder/ByteArrayEncodingState.java | smart-hadoop-support/smart-erasurecodec/src/main/java/org/smartdata/erasurecode/rawcoder/ByteArrayEncodingState.java | /**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.smartdata.erasurecode.rawcoder;
import org.apache.hadoop.HadoopIllegalArgumentException;
import org.apache.hadoop.classification.InterfaceAudience;
import java.nio.ByteBuffer;
/**
* A utility class that maintains encoding state during an encode call using
* byte array inputs.
*/
@InterfaceAudience.Private
class ByteArrayEncodingState extends EncodingState {
byte[][] inputs;
byte[][] outputs;
int[] inputOffsets;
int[] outputOffsets;
ByteArrayEncodingState(RawErasureEncoder encoder,
byte[][] inputs, byte[][] outputs) {
this.encoder = encoder;
byte[] validInput = CoderUtil.findFirstValidInput(inputs);
this.encodeLength = validInput.length;
this.inputs = inputs;
this.outputs = outputs;
checkParameters(inputs, outputs);
checkBuffers(inputs);
checkBuffers(outputs);
this.inputOffsets = new int[inputs.length]; // ALL ZERO
this.outputOffsets = new int[outputs.length]; // ALL ZERO
}
ByteArrayEncodingState(RawErasureEncoder encoder,
int encodeLength,
byte[][] inputs,
int[] inputOffsets,
byte[][] outputs,
int[] outputOffsets) {
this.encoder = encoder;
this.encodeLength = encodeLength;
this.inputs = inputs;
this.outputs = outputs;
this.inputOffsets = inputOffsets;
this.outputOffsets = outputOffsets;
}
/**
* Convert to a ByteBufferEncodingState when it's backed by on-heap arrays.
*/
ByteBufferEncodingState convertToByteBufferState() {
ByteBuffer[] newInputs = new ByteBuffer[inputs.length];
ByteBuffer[] newOutputs = new ByteBuffer[outputs.length];
for (int i = 0; i < inputs.length; i++) {
newInputs[i] = CoderUtil.cloneAsDirectByteBuffer(inputs[i],
inputOffsets[i], encodeLength);
}
for (int i = 0; i < outputs.length; i++) {
newOutputs[i] = ByteBuffer.allocateDirect(encodeLength);
}
ByteBufferEncodingState bbeState = new ByteBufferEncodingState(encoder,
encodeLength, newInputs, newOutputs);
return bbeState;
}
/**
* Check and ensure the buffers are of the desired length.
* @param buffers the buffers to check
*/
void checkBuffers(byte[][] buffers) {
for (byte[] buffer : buffers) {
if (buffer == null) {
throw new HadoopIllegalArgumentException(
"Invalid buffer found, not allowing null");
}
if (buffer.length != encodeLength) {
throw new HadoopIllegalArgumentException(
"Invalid buffer not of length " + encodeLength);
}
}
}
}
| java | Apache-2.0 | e0c90f054687a18c4e095547ac5e31b8b313b3ef | 2026-01-05T02:41:11.405497Z | false |
Intel-bigdata/SSM | https://github.com/Intel-bigdata/SSM/blob/e0c90f054687a18c4e095547ac5e31b8b313b3ef/smart-hadoop-support/smart-erasurecodec/src/main/java/org/smartdata/erasurecode/rawcoder/XORRawEncoder.java | smart-hadoop-support/smart-erasurecodec/src/main/java/org/smartdata/erasurecode/rawcoder/XORRawEncoder.java | /**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.smartdata.erasurecode.rawcoder;
import org.apache.hadoop.classification.InterfaceAudience;
import org.smartdata.erasurecode.ErasureCoderOptions;
import java.nio.ByteBuffer;
/**
* A raw encoder in XOR code scheme in pure Java, adapted from HDFS-RAID.
*
* XOR code is an important primitive code scheme in erasure coding and often
* used in advanced codes, like HitchHiker and LRC, though itself is rarely
* deployed independently.
*/
@InterfaceAudience.Private
public class XORRawEncoder extends RawErasureEncoder {
public XORRawEncoder(ErasureCoderOptions coderOptions) {
super(coderOptions);
}
protected void doEncode(ByteBufferEncodingState encodingState) {
CoderUtil.resetOutputBuffers(encodingState.outputs,
encodingState.encodeLength);
ByteBuffer output = encodingState.outputs[0];
// Get the first buffer's data.
int iIdx, oIdx;
for (iIdx = encodingState.inputs[0].position(), oIdx = output.position();
iIdx < encodingState.inputs[0].limit(); iIdx++, oIdx++) {
output.put(oIdx, encodingState.inputs[0].get(iIdx));
}
// XOR with everything else.
for (int i = 1; i < encodingState.inputs.length; i++) {
for (iIdx = encodingState.inputs[i].position(), oIdx = output.position();
iIdx < encodingState.inputs[i].limit();
iIdx++, oIdx++) {
output.put(oIdx, (byte) (output.get(oIdx) ^
encodingState.inputs[i].get(iIdx)));
}
}
}
@Override
protected void doEncode(ByteArrayEncodingState encodingState) {
int dataLen = encodingState.encodeLength;
CoderUtil.resetOutputBuffers(encodingState.outputs,
encodingState.outputOffsets, dataLen);
byte[] output = encodingState.outputs[0];
// Get the first buffer's data.
int iIdx, oIdx;
for (iIdx = encodingState.inputOffsets[0],
oIdx = encodingState.outputOffsets[0];
iIdx < encodingState.inputOffsets[0] + dataLen; iIdx++, oIdx++) {
output[oIdx] = encodingState.inputs[0][iIdx];
}
// XOR with everything else.
for (int i = 1; i < encodingState.inputs.length; i++) {
for (iIdx = encodingState.inputOffsets[i],
oIdx = encodingState.outputOffsets[0];
iIdx < encodingState.inputOffsets[i] + dataLen; iIdx++, oIdx++) {
output[oIdx] ^= encodingState.inputs[i][iIdx];
}
}
}
}
| java | Apache-2.0 | e0c90f054687a18c4e095547ac5e31b8b313b3ef | 2026-01-05T02:41:11.405497Z | false |
Intel-bigdata/SSM | https://github.com/Intel-bigdata/SSM/blob/e0c90f054687a18c4e095547ac5e31b8b313b3ef/smart-hadoop-support/smart-erasurecodec/src/main/java/org/smartdata/erasurecode/rawcoder/AbstractNativeRawEncoder.java | smart-hadoop-support/smart-erasurecodec/src/main/java/org/smartdata/erasurecode/rawcoder/AbstractNativeRawEncoder.java | /**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.smartdata.erasurecode.rawcoder;
import org.apache.hadoop.classification.InterfaceAudience;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.smartdata.erasurecode.ErasureCoderOptions;
import java.nio.ByteBuffer;
/**
* Abstract native raw encoder for all native coders to extend with.
*/
@InterfaceAudience.Private
abstract class AbstractNativeRawEncoder extends RawErasureEncoder {
public static Logger LOG =
LoggerFactory.getLogger(AbstractNativeRawEncoder.class);
public AbstractNativeRawEncoder(ErasureCoderOptions coderOptions) {
super(coderOptions);
}
@Override
protected void doEncode(ByteBufferEncodingState encodingState) {
int[] inputOffsets = new int[encodingState.inputs.length];
int[] outputOffsets = new int[encodingState.outputs.length];
int dataLen = encodingState.inputs[0].remaining();
ByteBuffer buffer;
for (int i = 0; i < encodingState.inputs.length; ++i) {
buffer = encodingState.inputs[i];
inputOffsets[i] = buffer.position();
}
for (int i = 0; i < encodingState.outputs.length; ++i) {
buffer = encodingState.outputs[i];
outputOffsets[i] = buffer.position();
}
performEncodeImpl(encodingState.inputs, inputOffsets, dataLen,
encodingState.outputs, outputOffsets);
}
protected abstract void performEncodeImpl(
ByteBuffer[] inputs, int[] inputOffsets,
int dataLen, ByteBuffer[] outputs, int[] outputOffsets);
@Override
protected void doEncode(ByteArrayEncodingState encodingState) {
LOG.warn("convertToByteBufferState is invoked, " +
"not efficiently. Please use direct ByteBuffer inputs/outputs");
ByteBufferEncodingState bbeState = encodingState.convertToByteBufferState();
doEncode(bbeState);
for (int i = 0; i < encodingState.outputs.length; i++) {
bbeState.outputs[i].get(encodingState.outputs[i],
encodingState.outputOffsets[i], encodingState.encodeLength);
}
}
// To link with the underlying data structure in the native layer.
// No get/set as only used by native codes.
private long nativeCoder;
}
| java | Apache-2.0 | e0c90f054687a18c4e095547ac5e31b8b313b3ef | 2026-01-05T02:41:11.405497Z | false |
Intel-bigdata/SSM | https://github.com/Intel-bigdata/SSM/blob/e0c90f054687a18c4e095547ac5e31b8b313b3ef/smart-hadoop-support/smart-erasurecodec/src/main/java/org/smartdata/erasurecode/rawcoder/package-info.java | smart-hadoop-support/smart-erasurecodec/src/main/java/org/smartdata/erasurecode/rawcoder/package-info.java | /**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
*
* Raw erasure coders.
*
* Raw erasure coder is part of erasure codec framework, where erasure coder is
* used to encode/decode a group of blocks (BlockGroup) according to the codec
* specific BlockGroup layout and logic. An erasure coder extracts chunks of
* data from the blocks and can employ various low level raw erasure coders to
* perform encoding/decoding against the chunks.
*
* To distinguish from erasure coder, here raw erasure coder is used to mean the
* low level constructs, since it only takes care of the math calculation with
* a group of byte buffers.
*/
@InterfaceAudience.Private
@InterfaceStability.Unstable
package org.smartdata.erasurecode.rawcoder;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
| java | Apache-2.0 | e0c90f054687a18c4e095547ac5e31b8b313b3ef | 2026-01-05T02:41:11.405497Z | false |
Intel-bigdata/SSM | https://github.com/Intel-bigdata/SSM/blob/e0c90f054687a18c4e095547ac5e31b8b313b3ef/smart-hadoop-support/smart-erasurecodec/src/main/java/org/smartdata/erasurecode/rawcoder/RSRawErasureCoderFactory.java | smart-hadoop-support/smart-erasurecodec/src/main/java/org/smartdata/erasurecode/rawcoder/RSRawErasureCoderFactory.java | /**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.smartdata.erasurecode.rawcoder;
import org.apache.hadoop.classification.InterfaceAudience;
import org.smartdata.erasurecode.ErasureCoderOptions;
/**
* A raw coder factory for the new raw Reed-Solomon coder in Java.
*/
@InterfaceAudience.Private
public class RSRawErasureCoderFactory implements RawErasureCoderFactory {
@Override
public RawErasureEncoder createEncoder(ErasureCoderOptions coderOptions) {
return new RSRawEncoder(coderOptions);
}
@Override
public RawErasureDecoder createDecoder(ErasureCoderOptions coderOptions) {
return new RSRawDecoder(coderOptions);
}
}
| java | Apache-2.0 | e0c90f054687a18c4e095547ac5e31b8b313b3ef | 2026-01-05T02:41:11.405497Z | false |
Intel-bigdata/SSM | https://github.com/Intel-bigdata/SSM/blob/e0c90f054687a18c4e095547ac5e31b8b313b3ef/smart-hadoop-support/smart-erasurecodec/src/main/java/org/smartdata/erasurecode/rawcoder/RSRawErasureCoderFactoryLegacy.java | smart-hadoop-support/smart-erasurecodec/src/main/java/org/smartdata/erasurecode/rawcoder/RSRawErasureCoderFactoryLegacy.java | /**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.smartdata.erasurecode.rawcoder;
import org.apache.hadoop.classification.InterfaceAudience;
import org.smartdata.erasurecode.ErasureCoderOptions;
/**
* A raw coder factory for the legacy raw Reed-Solomon coder in Java.
*/
@InterfaceAudience.Private
public class RSRawErasureCoderFactoryLegacy implements RawErasureCoderFactory {
@Override
public RawErasureEncoder createEncoder(ErasureCoderOptions coderOptions) {
return new RSRawEncoderLegacy(coderOptions);
}
@Override
public RawErasureDecoder createDecoder(ErasureCoderOptions coderOptions) {
return new RSRawDecoderLegacy(coderOptions);
}
}
| java | Apache-2.0 | e0c90f054687a18c4e095547ac5e31b8b313b3ef | 2026-01-05T02:41:11.405497Z | false |
Intel-bigdata/SSM | https://github.com/Intel-bigdata/SSM/blob/e0c90f054687a18c4e095547ac5e31b8b313b3ef/smart-hadoop-support/smart-erasurecodec/src/main/java/org/smartdata/erasurecode/rawcoder/RSRawEncoder.java | smart-hadoop-support/smart-erasurecodec/src/main/java/org/smartdata/erasurecode/rawcoder/RSRawEncoder.java | /**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.smartdata.erasurecode.rawcoder;
import org.apache.hadoop.HadoopIllegalArgumentException;
import org.apache.hadoop.classification.InterfaceAudience;
import org.smartdata.erasurecode.ErasureCoderOptions;
import org.smartdata.erasurecode.rawcoder.util.DumpUtil;
import org.smartdata.erasurecode.rawcoder.util.RSUtil;
/**
* A raw erasure encoder in RS code scheme in pure Java in case native one
* isn't available in some environment. Please always use native implementations
* when possible. This new Java coder is about 5X faster than the one originated
* from HDFS-RAID, and also compatible with the native/ISA-L coder.
*/
@InterfaceAudience.Private
public class RSRawEncoder extends RawErasureEncoder {
// relevant to schema and won't change during encode calls.
private byte[] encodeMatrix;
/**
* Array of input tables generated from coding coefficients previously.
* Must be of size 32*k*rows
*/
private byte[] gfTables;
public RSRawEncoder(ErasureCoderOptions coderOptions) {
super(coderOptions);
if (getNumAllUnits() >= RSUtil.GF.getFieldSize()) {
throw new HadoopIllegalArgumentException(
"Invalid numDataUnits and numParityUnits");
}
encodeMatrix = new byte[getNumAllUnits() * getNumDataUnits()];
RSUtil.genCauchyMatrix(encodeMatrix, getNumAllUnits(), getNumDataUnits());
if (allowVerboseDump()) {
DumpUtil.dumpMatrix(encodeMatrix, getNumDataUnits(), getNumAllUnits());
}
gfTables = new byte[getNumAllUnits() * getNumDataUnits() * 32];
RSUtil.initTables(getNumDataUnits(), getNumParityUnits(), encodeMatrix,
getNumDataUnits() * getNumDataUnits(), gfTables);
if (allowVerboseDump()) {
System.out.println(DumpUtil.bytesToHex(gfTables, -1));
}
}
@Override
protected void doEncode(ByteBufferEncodingState encodingState) {
CoderUtil.resetOutputBuffers(encodingState.outputs,
encodingState.encodeLength);
RSUtil.encodeData(gfTables, encodingState.inputs, encodingState.outputs);
}
@Override
protected void doEncode(ByteArrayEncodingState encodingState) {
CoderUtil.resetOutputBuffers(encodingState.outputs,
encodingState.outputOffsets,
encodingState.encodeLength);
RSUtil.encodeData(gfTables, encodingState.encodeLength,
encodingState.inputs,
encodingState.inputOffsets, encodingState.outputs,
encodingState.outputOffsets);
}
}
| java | Apache-2.0 | e0c90f054687a18c4e095547ac5e31b8b313b3ef | 2026-01-05T02:41:11.405497Z | false |
Intel-bigdata/SSM | https://github.com/Intel-bigdata/SSM/blob/e0c90f054687a18c4e095547ac5e31b8b313b3ef/smart-hadoop-support/smart-erasurecodec/src/main/java/org/smartdata/erasurecode/rawcoder/NativeRSRawEncoder.java | smart-hadoop-support/smart-erasurecodec/src/main/java/org/smartdata/erasurecode/rawcoder/NativeRSRawEncoder.java | /**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.smartdata.erasurecode.rawcoder;
import org.apache.hadoop.classification.InterfaceAudience;
import org.smartdata.erasurecode.ErasureCodeNative;
import org.smartdata.erasurecode.ErasureCoderOptions;
import java.nio.ByteBuffer;
/**
* A Reed-Solomon raw encoder using Intel ISA-L library.
*/
@InterfaceAudience.Private
public class NativeRSRawEncoder extends AbstractNativeRawEncoder {
static {
ErasureCodeNative.checkNativeCodeLoaded();
}
public NativeRSRawEncoder(ErasureCoderOptions coderOptions) {
super(coderOptions);
initImpl(coderOptions.getNumDataUnits(), coderOptions.getNumParityUnits());
}
@Override
protected void performEncodeImpl(
ByteBuffer[] inputs, int[] inputOffsets, int dataLen,
ByteBuffer[] outputs, int[] outputOffsets) {
encodeImpl(inputs, inputOffsets, dataLen, outputs, outputOffsets);
}
@Override
public void release() {
destroyImpl();
}
@Override
public boolean preferDirectBuffer() {
return true;
}
private native void initImpl(int numDataUnits, int numParityUnits);
private native void encodeImpl(ByteBuffer[] inputs, int[] inputOffsets,
int dataLen, ByteBuffer[] outputs,
int[] outputOffsets);
private native void destroyImpl();
}
| java | Apache-2.0 | e0c90f054687a18c4e095547ac5e31b8b313b3ef | 2026-01-05T02:41:11.405497Z | false |
Intel-bigdata/SSM | https://github.com/Intel-bigdata/SSM/blob/e0c90f054687a18c4e095547ac5e31b8b313b3ef/smart-hadoop-support/smart-erasurecodec/src/main/java/org/smartdata/erasurecode/rawcoder/RawErasureCoderFactory.java | smart-hadoop-support/smart-erasurecodec/src/main/java/org/smartdata/erasurecode/rawcoder/RawErasureCoderFactory.java | /**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.smartdata.erasurecode.rawcoder;
import org.apache.hadoop.classification.InterfaceAudience;
import org.smartdata.erasurecode.ErasureCoderOptions;
/**
* Raw erasure coder factory that can be used to create raw encoder and decoder.
* It helps in configuration since only one factory class is needed to be
* configured.
*/
@InterfaceAudience.Private
public interface RawErasureCoderFactory {
/**
* Create raw erasure encoder.
* @param coderOptions the options used to create the encoder
* @return raw erasure encoder
*/
RawErasureEncoder createEncoder(ErasureCoderOptions coderOptions);
/**
* Create raw erasure decoder.
* @param coderOptions the options used to create the encoder
* @return raw erasure decoder
*/
RawErasureDecoder createDecoder(ErasureCoderOptions coderOptions);
}
| java | Apache-2.0 | e0c90f054687a18c4e095547ac5e31b8b313b3ef | 2026-01-05T02:41:11.405497Z | false |
Intel-bigdata/SSM | https://github.com/Intel-bigdata/SSM/blob/e0c90f054687a18c4e095547ac5e31b8b313b3ef/smart-hadoop-support/smart-erasurecodec/src/main/java/org/smartdata/erasurecode/rawcoder/RSRawDecoderLegacy.java | smart-hadoop-support/smart-erasurecodec/src/main/java/org/smartdata/erasurecode/rawcoder/RSRawDecoderLegacy.java | /**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.smartdata.erasurecode.rawcoder;
import org.apache.hadoop.HadoopIllegalArgumentException;
import org.apache.hadoop.classification.InterfaceAudience;
import org.smartdata.erasurecode.ErasureCoderOptions;
import org.smartdata.erasurecode.rawcoder.util.RSUtil;
import java.nio.ByteBuffer;
/**
* A raw erasure decoder in RS code scheme in pure Java in case native one
* isn't available in some environment. Please always use native implementations
* when possible.
*
* Currently this implementation will compute and decode not to read units
* unnecessarily due to the underlying implementation limit in GF. This will be
* addressed in HADOOP-11871.
*/
@InterfaceAudience.Private
public class RSRawDecoderLegacy extends RawErasureDecoder {
// To describe and calculate the needed Vandermonde matrix
private int[] errSignature;
private int[] primitivePower;
public RSRawDecoderLegacy(ErasureCoderOptions coderOptions) {
super(coderOptions);
if (getNumAllUnits() >= RSUtil.GF.getFieldSize()) {
throw new HadoopIllegalArgumentException(
"Invalid numDataUnits and numParityUnits");
}
this.errSignature = new int[getNumParityUnits()];
this.primitivePower = RSUtil.getPrimitivePower(getNumDataUnits(),
getNumParityUnits());
}
@Override
public void decode(ByteBuffer[] inputs, int[] erasedIndexes,
ByteBuffer[] outputs) {
// Make copies avoiding affecting original ones;
ByteBuffer[] newInputs = new ByteBuffer[inputs.length];
int[] newErasedIndexes = new int[erasedIndexes.length];
ByteBuffer[] newOutputs = new ByteBuffer[outputs.length];
// Adjust the order to match with underlying requirements.
adjustOrder(inputs, newInputs,
erasedIndexes, newErasedIndexes, outputs, newOutputs);
super.decode(newInputs, newErasedIndexes, newOutputs);
}
@Override
public void decode(byte[][] inputs, int[] erasedIndexes, byte[][] outputs) {
// Make copies avoiding affecting original ones;
byte[][] newInputs = new byte[inputs.length][];
int[] newErasedIndexes = new int[erasedIndexes.length];
byte[][] newOutputs = new byte[outputs.length][];
// Adjust the order to match with underlying requirements.
adjustOrder(inputs, newInputs,
erasedIndexes, newErasedIndexes, outputs, newOutputs);
super.decode(newInputs, newErasedIndexes, newOutputs);
}
private void doDecodeImpl(ByteBuffer[] inputs, int[] erasedIndexes,
ByteBuffer[] outputs) {
ByteBuffer valid = CoderUtil.findFirstValidInput(inputs);
int dataLen = valid.remaining();
for (int i = 0; i < erasedIndexes.length; i++) {
errSignature[i] = primitivePower[erasedIndexes[i]];
RSUtil.GF.substitute(inputs, dataLen, outputs[i], primitivePower[i]);
}
RSUtil.GF.solveVandermondeSystem(errSignature,
outputs, erasedIndexes.length);
}
private void doDecodeImpl(byte[][] inputs, int[] inputOffsets,
int dataLen, int[] erasedIndexes,
byte[][] outputs, int[] outputOffsets) {
for (int i = 0; i < erasedIndexes.length; i++) {
errSignature[i] = primitivePower[erasedIndexes[i]];
RSUtil.GF.substitute(inputs, inputOffsets, dataLen, outputs[i],
outputOffsets[i], primitivePower[i]);
}
RSUtil.GF.solveVandermondeSystem(errSignature, outputs, outputOffsets,
erasedIndexes.length, dataLen);
}
@Override
protected void doDecode(ByteArrayDecodingState decodingState) {
int dataLen = decodingState.decodeLength;
CoderUtil.resetOutputBuffers(decodingState.outputs,
decodingState.outputOffsets, dataLen);
/**
* As passed parameters are friendly to callers but not to the underlying
* implementations, so we have to adjust them before calling doDecodeImpl.
*/
byte[][] bytesArrayBuffers = new byte[getNumParityUnits()][];
byte[][] adjustedByteArrayOutputsParameter =
new byte[getNumParityUnits()][];
int[] adjustedOutputOffsets = new int[getNumParityUnits()];
int[] erasedOrNotToReadIndexes =
CoderUtil.getNullIndexes(decodingState.inputs);
// Use the caller passed buffers in erasedIndexes positions
for (int outputIdx = 0, i = 0;
i < decodingState.erasedIndexes.length; i++) {
boolean found = false;
for (int j = 0; j < erasedOrNotToReadIndexes.length; j++) {
// If this index is one requested by the caller via erasedIndexes, then
// we use the passed output buffer to avoid copying data thereafter.
if (decodingState.erasedIndexes[i] == erasedOrNotToReadIndexes[j]) {
found = true;
adjustedByteArrayOutputsParameter[j] = CoderUtil.resetBuffer(
decodingState.outputs[outputIdx],
decodingState.outputOffsets[outputIdx], dataLen);
adjustedOutputOffsets[j] = decodingState.outputOffsets[outputIdx];
outputIdx++;
}
}
if (!found) {
throw new HadoopIllegalArgumentException(
"Inputs not fully corresponding to erasedIndexes in null places");
}
}
// Use shared buffers for other positions (not set yet)
for (int bufferIdx = 0, i = 0; i < erasedOrNotToReadIndexes.length; i++) {
if (adjustedByteArrayOutputsParameter[i] == null) {
adjustedByteArrayOutputsParameter[i] = CoderUtil.resetBuffer(
checkGetBytesArrayBuffer(bytesArrayBuffers, bufferIdx, dataLen),
0, dataLen);
adjustedOutputOffsets[i] = 0; // Always 0 for such temp output
bufferIdx++;
}
}
doDecodeImpl(decodingState.inputs, decodingState.inputOffsets,
dataLen, erasedOrNotToReadIndexes,
adjustedByteArrayOutputsParameter, adjustedOutputOffsets);
}
@Override
protected void doDecode(ByteBufferDecodingState decodingState) {
int dataLen = decodingState.decodeLength;
CoderUtil.resetOutputBuffers(decodingState.outputs, dataLen);
/**
* As passed parameters are friendly to callers but not to the underlying
* implementations, so we have to adjust them before calling doDecodeImpl.
*/
int[] erasedOrNotToReadIndexes =
CoderUtil.getNullIndexes(decodingState.inputs);
ByteBuffer[] directBuffers = new ByteBuffer[getNumParityUnits()];
ByteBuffer[] adjustedDirectBufferOutputsParameter =
new ByteBuffer[getNumParityUnits()];
// Use the caller passed buffers in erasedIndexes positions
for (int outputIdx = 0, i = 0;
i < decodingState.erasedIndexes.length; i++) {
boolean found = false;
for (int j = 0; j < erasedOrNotToReadIndexes.length; j++) {
// If this index is one requested by the caller via erasedIndexes, then
// we use the passed output buffer to avoid copying data thereafter.
if (decodingState.erasedIndexes[i] == erasedOrNotToReadIndexes[j]) {
found = true;
adjustedDirectBufferOutputsParameter[j] = CoderUtil.resetBuffer(
decodingState.outputs[outputIdx++], dataLen);
}
}
if (!found) {
throw new HadoopIllegalArgumentException(
"Inputs not fully corresponding to erasedIndexes in null places");
}
}
// Use shared buffers for other positions (not set yet)
for (int bufferIdx = 0, i = 0; i < erasedOrNotToReadIndexes.length; i++) {
if (adjustedDirectBufferOutputsParameter[i] == null) {
ByteBuffer buffer = checkGetDirectBuffer(
directBuffers, bufferIdx, dataLen);
buffer.position(0);
buffer.limit(dataLen);
adjustedDirectBufferOutputsParameter[i] =
CoderUtil.resetBuffer(buffer, dataLen);
bufferIdx++;
}
}
doDecodeImpl(decodingState.inputs, erasedOrNotToReadIndexes,
adjustedDirectBufferOutputsParameter);
}
/*
* Convert data units first order to parity units first order.
*/
private <T> void adjustOrder(T[] inputs, T[] inputs2,
int[] erasedIndexes, int[] erasedIndexes2,
T[] outputs, T[] outputs2) {
// Example:
// d0 d1 d2 d3 d4 d5 : p0 p1 p2 => p0 p1 p2 : d0 d1 d2 d3 d4 d5
System.arraycopy(inputs, getNumDataUnits(), inputs2,
0, getNumParityUnits());
System.arraycopy(inputs, 0, inputs2,
getNumParityUnits(), getNumDataUnits());
int numErasedDataUnits = 0, numErasedParityUnits = 0;
int idx = 0;
for (int i = 0; i < erasedIndexes.length; i++) {
if (erasedIndexes[i] >= getNumDataUnits()) {
erasedIndexes2[idx++] = erasedIndexes[i] - getNumDataUnits();
numErasedParityUnits++;
}
}
for (int i = 0; i < erasedIndexes.length; i++) {
if (erasedIndexes[i] < getNumDataUnits()) {
erasedIndexes2[idx++] = erasedIndexes[i] + getNumParityUnits();
numErasedDataUnits++;
}
}
// Copy for data units
System.arraycopy(outputs, numErasedDataUnits, outputs2,
0, numErasedParityUnits);
// Copy for parity units
System.arraycopy(outputs, 0, outputs2,
numErasedParityUnits, numErasedDataUnits);
}
private static byte[] checkGetBytesArrayBuffer(byte[][] bytesArrayBuffers,
int idx, int bufferLen) {
if (bytesArrayBuffers[idx] == null ||
bytesArrayBuffers[idx].length < bufferLen) {
bytesArrayBuffers[idx] = new byte[bufferLen];
}
return bytesArrayBuffers[idx];
}
private static ByteBuffer checkGetDirectBuffer(ByteBuffer[] directBuffers,
int idx, int bufferLen) {
if (directBuffers[idx] == null ||
directBuffers[idx].capacity() < bufferLen) {
directBuffers[idx] = ByteBuffer.allocateDirect(bufferLen);
}
return directBuffers[idx];
}
}
| java | Apache-2.0 | e0c90f054687a18c4e095547ac5e31b8b313b3ef | 2026-01-05T02:41:11.405497Z | false |
Intel-bigdata/SSM | https://github.com/Intel-bigdata/SSM/blob/e0c90f054687a18c4e095547ac5e31b8b313b3ef/smart-hadoop-support/smart-erasurecodec/src/main/java/org/smartdata/erasurecode/rawcoder/NativeXORRawDecoder.java | smart-hadoop-support/smart-erasurecodec/src/main/java/org/smartdata/erasurecode/rawcoder/NativeXORRawDecoder.java | /**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.smartdata.erasurecode.rawcoder;
import org.apache.hadoop.classification.InterfaceAudience;
import org.smartdata.erasurecode.ErasureCodeNative;
import org.smartdata.erasurecode.ErasureCoderOptions;
import java.nio.ByteBuffer;
/**
* A XOR raw decoder using Intel ISA-L library.
*/
@InterfaceAudience.Private
public class NativeXORRawDecoder extends AbstractNativeRawDecoder {
static {
ErasureCodeNative.checkNativeCodeLoaded();
}
public NativeXORRawDecoder(ErasureCoderOptions coderOptions) {
super(coderOptions);
initImpl(coderOptions.getNumDataUnits(), coderOptions.getNumParityUnits());
}
@Override
protected void performDecodeImpl(ByteBuffer[] inputs, int[] inputOffsets,
int dataLen, int[] erased, ByteBuffer[] outputs, int[] outputOffsets) {
decodeImpl(inputs, inputOffsets, dataLen, erased, outputs, outputOffsets);
}
@Override
public void release() {
destroyImpl();
}
private native void initImpl(int numDataUnits, int numParityUnits);
private native void decodeImpl(
ByteBuffer[] inputs, int[] inputOffsets, int dataLen, int[] erased,
ByteBuffer[] outputs, int[] outputOffsets);
private native void destroyImpl();
}
| java | Apache-2.0 | e0c90f054687a18c4e095547ac5e31b8b313b3ef | 2026-01-05T02:41:11.405497Z | false |
Intel-bigdata/SSM | https://github.com/Intel-bigdata/SSM/blob/e0c90f054687a18c4e095547ac5e31b8b313b3ef/smart-hadoop-support/smart-erasurecodec/src/main/java/org/smartdata/erasurecode/rawcoder/NativeRSRawErasureCoderFactory.java | smart-hadoop-support/smart-erasurecodec/src/main/java/org/smartdata/erasurecode/rawcoder/NativeRSRawErasureCoderFactory.java | /**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.smartdata.erasurecode.rawcoder;
import org.apache.hadoop.classification.InterfaceAudience;
import org.smartdata.erasurecode.ErasureCoderOptions;
/**
* A raw coder factory for raw Reed-Solomon coder in native using Intel ISA-L.
*/
@InterfaceAudience.Private
public class NativeRSRawErasureCoderFactory implements RawErasureCoderFactory {
@Override
public RawErasureEncoder createEncoder(ErasureCoderOptions coderOptions) {
return new NativeRSRawEncoder(coderOptions);
}
@Override
public RawErasureDecoder createDecoder(ErasureCoderOptions coderOptions) {
return new NativeRSRawDecoder(coderOptions);
}
}
| java | Apache-2.0 | e0c90f054687a18c4e095547ac5e31b8b313b3ef | 2026-01-05T02:41:11.405497Z | false |
Intel-bigdata/SSM | https://github.com/Intel-bigdata/SSM/blob/e0c90f054687a18c4e095547ac5e31b8b313b3ef/smart-hadoop-support/smart-erasurecodec/src/main/java/org/smartdata/erasurecode/rawcoder/ByteBufferEncodingState.java | smart-hadoop-support/smart-erasurecodec/src/main/java/org/smartdata/erasurecode/rawcoder/ByteBufferEncodingState.java | /**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.smartdata.erasurecode.rawcoder;
import org.apache.hadoop.HadoopIllegalArgumentException;
import org.apache.hadoop.classification.InterfaceAudience;
import java.nio.ByteBuffer;
/**
* A utility class that maintains encoding state during an encode call using
* ByteBuffer inputs.
*/
@InterfaceAudience.Private
class ByteBufferEncodingState extends EncodingState {
ByteBuffer[] inputs;
ByteBuffer[] outputs;
boolean usingDirectBuffer;
ByteBufferEncodingState(RawErasureEncoder encoder,
ByteBuffer[] inputs, ByteBuffer[] outputs) {
this.encoder = encoder;
ByteBuffer validInput = CoderUtil.findFirstValidInput(inputs);
this.encodeLength = validInput.remaining();
this.usingDirectBuffer = validInput.isDirect();
this.inputs = inputs;
this.outputs = outputs;
checkParameters(inputs, outputs);
checkBuffers(inputs);
checkBuffers(outputs);
}
ByteBufferEncodingState(RawErasureEncoder encoder,
int encodeLength,
ByteBuffer[] inputs,
ByteBuffer[] outputs) {
this.encoder = encoder;
this.encodeLength = encodeLength;
this.inputs = inputs;
this.outputs = outputs;
}
/**
* Convert to a ByteArrayEncodingState when it's backed by on-heap arrays.
*/
ByteArrayEncodingState convertToByteArrayState() {
int[] inputOffsets = new int[inputs.length];
int[] outputOffsets = new int[outputs.length];
byte[][] newInputs = new byte[inputs.length][];
byte[][] newOutputs = new byte[outputs.length][];
ByteBuffer buffer;
for (int i = 0; i < inputs.length; ++i) {
buffer = inputs[i];
inputOffsets[i] = buffer.arrayOffset() + buffer.position();
newInputs[i] = buffer.array();
}
for (int i = 0; i < outputs.length; ++i) {
buffer = outputs[i];
outputOffsets[i] = buffer.arrayOffset() + buffer.position();
newOutputs[i] = buffer.array();
}
ByteArrayEncodingState baeState = new ByteArrayEncodingState(encoder,
encodeLength, newInputs, inputOffsets, newOutputs, outputOffsets);
return baeState;
}
/**
* Check and ensure the buffers are of the desired length and type, direct
* buffers or not.
* @param buffers the buffers to check
*/
void checkBuffers(ByteBuffer[] buffers) {
for (ByteBuffer buffer : buffers) {
if (buffer == null) {
throw new HadoopIllegalArgumentException(
"Invalid buffer found, not allowing null");
}
if (buffer.remaining() != encodeLength) {
throw new HadoopIllegalArgumentException(
"Invalid buffer, not of length " + encodeLength);
}
if (buffer.isDirect() != usingDirectBuffer) {
throw new HadoopIllegalArgumentException(
"Invalid buffer, isDirect should be " + usingDirectBuffer);
}
}
}
}
| java | Apache-2.0 | e0c90f054687a18c4e095547ac5e31b8b313b3ef | 2026-01-05T02:41:11.405497Z | false |
Intel-bigdata/SSM | https://github.com/Intel-bigdata/SSM/blob/e0c90f054687a18c4e095547ac5e31b8b313b3ef/smart-hadoop-support/smart-erasurecodec/src/main/java/org/smartdata/erasurecode/rawcoder/CoderUtil.java | smart-hadoop-support/smart-erasurecodec/src/main/java/org/smartdata/erasurecode/rawcoder/CoderUtil.java | /**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.smartdata.erasurecode.rawcoder;
import org.apache.hadoop.HadoopIllegalArgumentException;
import org.apache.hadoop.classification.InterfaceAudience;
import org.smartdata.erasurecode.ECChunk;
import java.nio.ByteBuffer;
import java.util.Arrays;
/**
* Helpful utilities for implementing some raw erasure coders.
*/
@InterfaceAudience.Private
public final class CoderUtil {
private CoderUtil() {
// No called
}
private static byte[] emptyChunk = new byte[4096];
/**
* Make sure to return an empty chunk buffer for the desired length.
* @param leastLength
* @return empty chunk of zero bytes
*/
static byte[] getEmptyChunk(int leastLength) {
if (emptyChunk.length >= leastLength) {
return emptyChunk; // In most time
}
synchronized (CoderUtil.class) {
emptyChunk = new byte[leastLength];
}
return emptyChunk;
}
/**
* Ensure a buffer filled with ZERO bytes from current readable/writable
* position.
* @param buffer a buffer ready to read / write certain size bytes
* @return the buffer itself, with ZERO bytes written, the position and limit
* are not changed after the call
*/
static ByteBuffer resetBuffer(ByteBuffer buffer, int len) {
int pos = buffer.position();
buffer.put(getEmptyChunk(len), 0, len);
buffer.position(pos);
return buffer;
}
/**
* Ensure the buffer (either input or output) ready to read or write with ZERO
* bytes fully in specified length of len.
* @param buffer bytes array buffer
* @return the buffer itself
*/
static byte[] resetBuffer(byte[] buffer, int offset, int len) {
byte[] empty = getEmptyChunk(len);
System.arraycopy(empty, 0, buffer, offset, len);
return buffer;
}
/**
* Initialize the output buffers with ZERO bytes.
*/
static void resetOutputBuffers(ByteBuffer[] buffers, int dataLen) {
for (ByteBuffer buffer : buffers) {
resetBuffer(buffer, dataLen);
}
}
/**
* Initialize the output buffers with ZERO bytes.
*/
static void resetOutputBuffers(byte[][] buffers, int[] offsets,
int dataLen) {
for (int i = 0; i < buffers.length; i++) {
resetBuffer(buffers[i], offsets[i], dataLen);
}
}
/**
* Convert an array of this chunks to an array of ByteBuffers
* @param chunks chunks to convertToByteArrayState into buffers
* @return an array of ByteBuffers
*/
static ByteBuffer[] toBuffers(ECChunk[] chunks) {
ByteBuffer[] buffers = new ByteBuffer[chunks.length];
ECChunk chunk;
for (int i = 0; i < chunks.length; i++) {
chunk = chunks[i];
if (chunk == null) {
buffers[i] = null;
} else {
buffers[i] = chunk.getBuffer();
if (chunk.isAllZero()) {
CoderUtil.resetBuffer(buffers[i], buffers[i].remaining());
}
}
}
return buffers;
}
/**
* Clone an input bytes array as direct ByteBuffer.
*/
static ByteBuffer cloneAsDirectByteBuffer(byte[] input, int offset, int len) {
if (input == null) { // an input can be null, if erased or not to read
return null;
}
ByteBuffer directBuffer = ByteBuffer.allocateDirect(len);
directBuffer.put(input, offset, len);
directBuffer.flip();
return directBuffer;
}
/**
* Get indexes array for items marked as null, either erased or
* not to read.
* @return indexes array
*/
static <T> int[] getNullIndexes(T[] inputs) {
int[] nullIndexes = new int[inputs.length];
int idx = 0;
for (int i = 0; i < inputs.length; i++) {
if (inputs[i] == null) {
nullIndexes[idx++] = i;
}
}
return Arrays.copyOf(nullIndexes, idx);
}
/**
* Find the valid input from all the inputs.
* @param inputs input buffers to look for valid input
* @return the first valid input
*/
static <T> T findFirstValidInput(T[] inputs) {
for (T input : inputs) {
if (input != null) {
return input;
}
}
throw new HadoopIllegalArgumentException(
"Invalid inputs are found, all being null");
}
/**
* Picking up indexes of valid inputs.
* @param inputs decoding input buffers
* @param <T>
*/
static <T> int[] getValidIndexes(T[] inputs) {
int[] validIndexes = new int[inputs.length];
int idx = 0;
for (int i = 0; i < inputs.length; i++) {
if (inputs[i] != null) {
validIndexes[idx++] = i;
}
}
return Arrays.copyOf(validIndexes, idx);
}
}
| java | Apache-2.0 | e0c90f054687a18c4e095547ac5e31b8b313b3ef | 2026-01-05T02:41:11.405497Z | false |
Intel-bigdata/SSM | https://github.com/Intel-bigdata/SSM/blob/e0c90f054687a18c4e095547ac5e31b8b313b3ef/smart-hadoop-support/smart-erasurecodec/src/main/java/org/smartdata/erasurecode/rawcoder/RawErasureEncoder.java | smart-hadoop-support/smart-erasurecodec/src/main/java/org/smartdata/erasurecode/rawcoder/RawErasureEncoder.java | /**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.smartdata.erasurecode.rawcoder;
import org.apache.hadoop.classification.InterfaceAudience;
import org.smartdata.erasurecode.ECChunk;
import org.smartdata.erasurecode.ErasureCoderOptions;
import java.nio.ByteBuffer;
/**
* An abstract raw erasure encoder that's to be inherited by new encoders.
*
* Raw erasure coder is part of erasure codec framework, where erasure coder is
* used to encode/decode a group of blocks (BlockGroup) according to the codec
* specific BlockGroup layout and logic. An erasure coder extracts chunks of
* data from the blocks and can employ various low level raw erasure coders to
* perform encoding/decoding against the chunks.
*
* To distinguish from erasure coder, here raw erasure coder is used to mean the
* low level constructs, since it only takes care of the math calculation with
* a group of byte buffers.
*
* Note it mainly provides encode() calls, which should be stateless and may be
* made thread-safe in future.
*/
@InterfaceAudience.Private
public abstract class RawErasureEncoder {
private final ErasureCoderOptions coderOptions;
public RawErasureEncoder(ErasureCoderOptions coderOptions) {
this.coderOptions = coderOptions;
}
/**
* Encode with inputs and generates outputs.
*
* Note, for both inputs and outputs, no mixing of on-heap buffers and direct
* buffers are allowed.
*
* If the coder option ALLOW_CHANGE_INPUTS is set true (false by default), the
* content of input buffers may change after the call, subject to concrete
* implementation. Anyway the positions of input buffers will move forward.
*
* @param inputs input buffers to read data from. The buffers' remaining will
* be 0 after encoding
* @param outputs output buffers to put the encoded data into, ready to read
* after the call
*/
public void encode(ByteBuffer[] inputs, ByteBuffer[] outputs) {
ByteBufferEncodingState bbeState = new ByteBufferEncodingState(
this, inputs, outputs);
boolean usingDirectBuffer = bbeState.usingDirectBuffer;
int dataLen = bbeState.encodeLength;
if (dataLen == 0) {
return;
}
int[] inputPositions = new int[inputs.length];
for (int i = 0; i < inputPositions.length; i++) {
if (inputs[i] != null) {
inputPositions[i] = inputs[i].position();
}
}
if (usingDirectBuffer) {
doEncode(bbeState);
} else {
ByteArrayEncodingState baeState = bbeState.convertToByteArrayState();
doEncode(baeState);
}
for (int i = 0; i < inputs.length; i++) {
if (inputs[i] != null) {
// dataLen bytes consumed
inputs[i].position(inputPositions[i] + dataLen);
}
}
}
/**
* Perform the real encoding work using direct ByteBuffer.
* @param encodingState the encoding state
*/
protected abstract void doEncode(ByteBufferEncodingState encodingState);
/**
* Encode with inputs and generates outputs. More see above.
*
* @param inputs input buffers to read data from
* @param outputs output buffers to put the encoded data into, read to read
* after the call
*/
public void encode(byte[][] inputs, byte[][] outputs) {
ByteArrayEncodingState baeState = new ByteArrayEncodingState(
this, inputs, outputs);
int dataLen = baeState.encodeLength;
if (dataLen == 0) {
return;
}
doEncode(baeState);
}
/**
* Perform the real encoding work using bytes array, supporting offsets
* and lengths.
* @param encodingState the encoding state
*/
protected abstract void doEncode(ByteArrayEncodingState encodingState);
/**
* Encode with inputs and generates outputs. More see above.
*
* @param inputs input buffers to read data from
* @param outputs output buffers to put the encoded data into, read to read
* after the call
*/
public void encode(ECChunk[] inputs, ECChunk[] outputs) {
ByteBuffer[] newInputs = ECChunk.toBuffers(inputs);
ByteBuffer[] newOutputs = ECChunk.toBuffers(outputs);
encode(newInputs, newOutputs);
}
public int getNumDataUnits() {
return coderOptions.getNumDataUnits();
}
public int getNumParityUnits() {
return coderOptions.getNumParityUnits();
}
public int getNumAllUnits() {
return coderOptions.getNumAllUnits();
}
/**
* Tell if direct buffer is preferred or not. It's for callers to
* decide how to allocate coding chunk buffers, using DirectByteBuffer or
* bytes array. It will return false by default.
* @return true if native buffer is preferred for performance consideration,
* otherwise false.
*/
public boolean preferDirectBuffer() {
return false;
}
/**
* Allow change into input buffers or not while perform encoding/decoding.
* @return true if it's allowed to change inputs, false otherwise
*/
public boolean allowChangeInputs() {
return coderOptions.allowChangeInputs();
}
/**
* Allow to dump verbose info during encoding/decoding.
* @return true if it's allowed to do verbose dump, false otherwise.
*/
public boolean allowVerboseDump() {
return coderOptions.allowVerboseDump();
}
/**
* Should be called when release this coder. Good chance to release encoding
* or decoding buffers
*/
public void release() {
// Nothing to do here.
}
}
| java | Apache-2.0 | e0c90f054687a18c4e095547ac5e31b8b313b3ef | 2026-01-05T02:41:11.405497Z | false |
Intel-bigdata/SSM | https://github.com/Intel-bigdata/SSM/blob/e0c90f054687a18c4e095547ac5e31b8b313b3ef/smart-hadoop-support/smart-erasurecodec/src/main/java/org/smartdata/erasurecode/rawcoder/ByteArrayDecodingState.java | smart-hadoop-support/smart-erasurecodec/src/main/java/org/smartdata/erasurecode/rawcoder/ByteArrayDecodingState.java | /**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.smartdata.erasurecode.rawcoder;
import org.apache.hadoop.HadoopIllegalArgumentException;
import org.apache.hadoop.classification.InterfaceAudience;
import java.nio.ByteBuffer;
/**
* A utility class that maintains decoding state during a decode call using
* byte array inputs.
*/
@InterfaceAudience.Private
class ByteArrayDecodingState extends DecodingState {
byte[][] inputs;
int[] inputOffsets;
int[] erasedIndexes;
byte[][] outputs;
int[] outputOffsets;
ByteArrayDecodingState(RawErasureDecoder decoder, byte[][] inputs,
int[] erasedIndexes, byte[][] outputs) {
this.decoder = decoder;
this.inputs = inputs;
this.outputs = outputs;
this.erasedIndexes = erasedIndexes;
byte[] validInput = CoderUtil.findFirstValidInput(inputs);
this.decodeLength = validInput.length;
checkParameters(inputs, erasedIndexes, outputs);
checkInputBuffers(inputs);
checkOutputBuffers(outputs);
this.inputOffsets = new int[inputs.length]; // ALL ZERO
this.outputOffsets = new int[outputs.length]; // ALL ZERO
}
ByteArrayDecodingState(RawErasureDecoder decoder,
int decodeLength,
int[] erasedIndexes,
byte[][] inputs,
int[] inputOffsets,
byte[][] outputs,
int[] outputOffsets) {
this.decoder = decoder;
this.decodeLength = decodeLength;
this.erasedIndexes = erasedIndexes;
this.inputs = inputs;
this.outputs = outputs;
this.inputOffsets = inputOffsets;
this.outputOffsets = outputOffsets;
}
/**
* Convert to a ByteBufferDecodingState when it's backed by on-heap arrays.
*/
ByteBufferDecodingState convertToByteBufferState() {
ByteBuffer[] newInputs = new ByteBuffer[inputs.length];
ByteBuffer[] newOutputs = new ByteBuffer[outputs.length];
for (int i = 0; i < inputs.length; i++) {
newInputs[i] = CoderUtil.cloneAsDirectByteBuffer(inputs[i],
inputOffsets[i], decodeLength);
}
for (int i = 0; i < outputs.length; i++) {
newOutputs[i] = ByteBuffer.allocateDirect(decodeLength);
}
ByteBufferDecodingState bbdState = new ByteBufferDecodingState(decoder,
decodeLength, erasedIndexes, newInputs, newOutputs);
return bbdState;
}
/**
* Check and ensure the buffers are of the desired length.
* @param buffers the buffers to check
*/
void checkInputBuffers(byte[][] buffers) {
int validInputs = 0;
for (byte[] buffer : buffers) {
if (buffer == null) {
continue;
}
if (buffer.length != decodeLength) {
throw new HadoopIllegalArgumentException(
"Invalid buffer, not of length " + decodeLength);
}
validInputs++;
}
if (validInputs < decoder.getNumDataUnits()) {
throw new HadoopIllegalArgumentException(
"No enough valid inputs are provided, not recoverable");
}
}
/**
* Check and ensure the buffers are of the desired length.
* @param buffers the buffers to check
*/
void checkOutputBuffers(byte[][] buffers) {
for (byte[] buffer : buffers) {
if (buffer == null) {
throw new HadoopIllegalArgumentException(
"Invalid buffer found, not allowing null");
}
if (buffer.length != decodeLength) {
throw new HadoopIllegalArgumentException(
"Invalid buffer not of length " + decodeLength);
}
}
}
}
| java | Apache-2.0 | e0c90f054687a18c4e095547ac5e31b8b313b3ef | 2026-01-05T02:41:11.405497Z | false |
Intel-bigdata/SSM | https://github.com/Intel-bigdata/SSM/blob/e0c90f054687a18c4e095547ac5e31b8b313b3ef/smart-hadoop-support/smart-erasurecodec/src/main/java/org/smartdata/erasurecode/rawcoder/DummyRawDecoder.java | smart-hadoop-support/smart-erasurecodec/src/main/java/org/smartdata/erasurecode/rawcoder/DummyRawDecoder.java | /**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.smartdata.erasurecode.rawcoder;
import org.apache.hadoop.classification.InterfaceAudience;
import org.smartdata.erasurecode.ErasureCoderOptions;
/**
* A dummy raw decoder that does no real computation.
* Instead, it just returns zero bytes.
* This decoder can be used to isolate the performance issue to HDFS side logic
* instead of codec, and is intended for test only.
*/
@InterfaceAudience.Private
public class DummyRawDecoder extends RawErasureDecoder {
public DummyRawDecoder(ErasureCoderOptions coderOptions) {
super(coderOptions);
}
@Override
protected void doDecode(ByteBufferDecodingState decodingState) {
// Nothing to do. Output buffers have already been reset
}
@Override
protected void doDecode(ByteArrayDecodingState decodingState) {
// Nothing to do. Output buffers have already been reset
}
}
| java | Apache-2.0 | e0c90f054687a18c4e095547ac5e31b8b313b3ef | 2026-01-05T02:41:11.405497Z | false |
Intel-bigdata/SSM | https://github.com/Intel-bigdata/SSM/blob/e0c90f054687a18c4e095547ac5e31b8b313b3ef/smart-hadoop-support/smart-erasurecodec/src/main/java/org/smartdata/erasurecode/rawcoder/EncodingState.java | smart-hadoop-support/smart-erasurecodec/src/main/java/org/smartdata/erasurecode/rawcoder/EncodingState.java | /**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.smartdata.erasurecode.rawcoder;
import org.apache.hadoop.HadoopIllegalArgumentException;
import org.apache.hadoop.classification.InterfaceAudience;
/**
* A utility class that maintains encoding state during an encode call.
*/
@InterfaceAudience.Private
abstract class EncodingState {
RawErasureEncoder encoder;
int encodeLength;
/**
* Check and validate decoding parameters, throw exception accordingly.
* @param inputs input buffers to check
* @param outputs output buffers to check
*/
<T> void checkParameters(T[] inputs, T[] outputs) {
if (inputs.length != encoder.getNumDataUnits()) {
throw new HadoopIllegalArgumentException("Invalid inputs length");
}
if (outputs.length != encoder.getNumParityUnits()) {
throw new HadoopIllegalArgumentException("Invalid outputs length");
}
}
}
| java | Apache-2.0 | e0c90f054687a18c4e095547ac5e31b8b313b3ef | 2026-01-05T02:41:11.405497Z | false |
Intel-bigdata/SSM | https://github.com/Intel-bigdata/SSM/blob/e0c90f054687a18c4e095547ac5e31b8b313b3ef/smart-hadoop-support/smart-erasurecodec/src/main/java/org/smartdata/erasurecode/rawcoder/NativeRSRawDecoder.java | smart-hadoop-support/smart-erasurecodec/src/main/java/org/smartdata/erasurecode/rawcoder/NativeRSRawDecoder.java | /**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.smartdata.erasurecode.rawcoder;
import org.apache.hadoop.classification.InterfaceAudience;
import org.smartdata.erasurecode.ErasureCodeNative;
import org.smartdata.erasurecode.ErasureCoderOptions;
import java.nio.ByteBuffer;
/**
* A Reed-Solomon raw decoder using Intel ISA-L library.
*/
@InterfaceAudience.Private
public class NativeRSRawDecoder extends AbstractNativeRawDecoder {
static {
ErasureCodeNative.checkNativeCodeLoaded();
}
public NativeRSRawDecoder(ErasureCoderOptions coderOptions) {
super(coderOptions);
initImpl(coderOptions.getNumDataUnits(), coderOptions.getNumParityUnits());
}
@Override
protected void performDecodeImpl(ByteBuffer[] inputs, int[] inputOffsets,
int dataLen, int[] erased,
ByteBuffer[] outputs, int[] outputOffsets) {
decodeImpl(inputs, inputOffsets, dataLen, erased, outputs, outputOffsets);
}
@Override
public void release() {
destroyImpl();
}
@Override
public boolean preferDirectBuffer() {
return true;
}
private native void initImpl(int numDataUnits, int numParityUnits);
private native void decodeImpl(
ByteBuffer[] inputs, int[] inputOffsets, int dataLen, int[] erased,
ByteBuffer[] outputs, int[] outputOffsets);
private native void destroyImpl();
}
| java | Apache-2.0 | e0c90f054687a18c4e095547ac5e31b8b313b3ef | 2026-01-05T02:41:11.405497Z | false |
Intel-bigdata/SSM | https://github.com/Intel-bigdata/SSM/blob/e0c90f054687a18c4e095547ac5e31b8b313b3ef/smart-hadoop-support/smart-erasurecodec/src/main/java/org/smartdata/erasurecode/rawcoder/XORRawErasureCoderFactory.java | smart-hadoop-support/smart-erasurecodec/src/main/java/org/smartdata/erasurecode/rawcoder/XORRawErasureCoderFactory.java | /**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.smartdata.erasurecode.rawcoder;
import org.apache.hadoop.classification.InterfaceAudience;
import org.smartdata.erasurecode.ErasureCoderOptions;
/**
* A raw coder factory for raw XOR coder.
*/
@InterfaceAudience.Private
public class XORRawErasureCoderFactory implements RawErasureCoderFactory {
@Override
public RawErasureEncoder createEncoder(ErasureCoderOptions coderOptions) {
return new XORRawEncoder(coderOptions);
}
@Override
public RawErasureDecoder createDecoder(ErasureCoderOptions coderOptions) {
return new XORRawDecoder(coderOptions);
}
}
| java | Apache-2.0 | e0c90f054687a18c4e095547ac5e31b8b313b3ef | 2026-01-05T02:41:11.405497Z | false |
Intel-bigdata/SSM | https://github.com/Intel-bigdata/SSM/blob/e0c90f054687a18c4e095547ac5e31b8b313b3ef/smart-hadoop-support/smart-erasurecodec/src/main/java/org/smartdata/erasurecode/rawcoder/AbstractNativeRawDecoder.java | smart-hadoop-support/smart-erasurecodec/src/main/java/org/smartdata/erasurecode/rawcoder/AbstractNativeRawDecoder.java | /**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.smartdata.erasurecode.rawcoder;
import org.apache.hadoop.classification.InterfaceAudience;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.smartdata.erasurecode.ErasureCoderOptions;
import java.nio.ByteBuffer;
/**
* Abstract native raw decoder for all native coders to extend with.
*/
@InterfaceAudience.Private
abstract class AbstractNativeRawDecoder extends RawErasureDecoder {
public static Logger LOG =
LoggerFactory.getLogger(AbstractNativeRawDecoder.class);
public AbstractNativeRawDecoder(ErasureCoderOptions coderOptions) {
super(coderOptions);
}
@Override
protected void doDecode(ByteBufferDecodingState decodingState) {
int[] inputOffsets = new int[decodingState.inputs.length];
int[] outputOffsets = new int[decodingState.outputs.length];
ByteBuffer buffer;
for (int i = 0; i < decodingState.inputs.length; ++i) {
buffer = decodingState.inputs[i];
if (buffer != null) {
inputOffsets[i] = buffer.position();
}
}
for (int i = 0; i < decodingState.outputs.length; ++i) {
buffer = decodingState.outputs[i];
outputOffsets[i] = buffer.position();
}
performDecodeImpl(decodingState.inputs, inputOffsets,
decodingState.decodeLength, decodingState.erasedIndexes,
decodingState.outputs, outputOffsets);
}
protected abstract void performDecodeImpl(ByteBuffer[] inputs,
int[] inputOffsets, int dataLen,
int[] erased, ByteBuffer[] outputs,
int[] outputOffsets);
@Override
protected void doDecode(ByteArrayDecodingState decodingState) {
LOG.warn("convertToByteBufferState is invoked, " +
"not efficiently. Please use direct ByteBuffer inputs/outputs");
ByteBufferDecodingState bbdState = decodingState.convertToByteBufferState();
doDecode(bbdState);
for (int i = 0; i < decodingState.outputs.length; i++) {
bbdState.outputs[i].get(decodingState.outputs[i],
decodingState.outputOffsets[i], decodingState.decodeLength);
}
}
// To link with the underlying data structure in the native layer.
// No get/set as only used by native codes.
private long nativeCoder;
}
| java | Apache-2.0 | e0c90f054687a18c4e095547ac5e31b8b313b3ef | 2026-01-05T02:41:11.405497Z | false |
Intel-bigdata/SSM | https://github.com/Intel-bigdata/SSM/blob/e0c90f054687a18c4e095547ac5e31b8b313b3ef/smart-hadoop-support/smart-erasurecodec/src/main/java/org/smartdata/erasurecode/rawcoder/RawErasureDecoder.java | smart-hadoop-support/smart-erasurecodec/src/main/java/org/smartdata/erasurecode/rawcoder/RawErasureDecoder.java | /**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.smartdata.erasurecode.rawcoder;
import org.apache.hadoop.classification.InterfaceAudience;
import org.smartdata.erasurecode.ECChunk;
import org.smartdata.erasurecode.ErasureCoderOptions;
import java.nio.ByteBuffer;
/**
* An abstract raw erasure decoder that's to be inherited by new decoders.
*
* Raw erasure coder is part of erasure codec framework, where erasure coder is
* used to encode/decode a group of blocks (BlockGroup) according to the codec
* specific BlockGroup layout and logic. An erasure coder extracts chunks of
* data from the blocks and can employ various low level raw erasure coders to
* perform encoding/decoding against the chunks.
*
* To distinguish from erasure coder, here raw erasure coder is used to mean the
* low level constructs, since it only takes care of the math calculation with
* a group of byte buffers.
*
* Note it mainly provides decode() calls, which should be stateless and may be
* made thread-safe in future.
*/
@InterfaceAudience.Private
public abstract class RawErasureDecoder {
private final ErasureCoderOptions coderOptions;
public RawErasureDecoder(ErasureCoderOptions coderOptions) {
this.coderOptions = coderOptions;
}
/**
* Decode with inputs and erasedIndexes, generates outputs.
* How to prepare for inputs:
* 1. Create an array containing data units + parity units. Please note the
* data units should be first or before the parity units.
* 2. Set null in the array locations specified via erasedIndexes to indicate
* they're erased and no data are to read from;
* 3. Set null in the array locations for extra redundant items, as they're
* not necessary to read when decoding. For example in RS-6-3, if only 1
* unit is really erased, then we have 2 extra items as redundant. They can
* be set as null to indicate no data will be used from them.
*
* For an example using RS (6, 3), assuming sources (d0, d1, d2, d3, d4, d5)
* and parities (p0, p1, p2), d2 being erased. We can and may want to use only
* 6 units like (d1, d3, d4, d5, p0, p2) to recover d2. We will have:
* inputs = [null(d0), d1, null(d2), d3, d4, d5, p0, null(p1), p2]
* erasedIndexes = [2] // index of d2 into inputs array
* outputs = [a-writable-buffer]
*
* Note, for both inputs and outputs, no mixing of on-heap buffers and direct
* buffers are allowed.
*
* If the coder option ALLOW_CHANGE_INPUTS is set true (false by default), the
* content of input buffers may change after the call, subject to concrete
* implementation.
*
* @param inputs input buffers to read data from. The buffers' remaining will
* be 0 after decoding
* @param erasedIndexes indexes of erased units in the inputs array
* @param outputs output buffers to put decoded data into according to
* erasedIndexes, ready for read after the call
*/
public void decode(ByteBuffer[] inputs, int[] erasedIndexes,
ByteBuffer[] outputs) {
ByteBufferDecodingState decodingState = new ByteBufferDecodingState(this,
inputs, erasedIndexes, outputs);
boolean usingDirectBuffer = decodingState.usingDirectBuffer;
int dataLen = decodingState.decodeLength;
if (dataLen == 0) {
return;
}
int[] inputPositions = new int[inputs.length];
for (int i = 0; i < inputPositions.length; i++) {
if (inputs[i] != null) {
inputPositions[i] = inputs[i].position();
}
}
if (usingDirectBuffer) {
doDecode(decodingState);
} else {
ByteArrayDecodingState badState = decodingState.convertToByteArrayState();
doDecode(badState);
}
for (int i = 0; i < inputs.length; i++) {
if (inputs[i] != null) {
// dataLen bytes consumed
inputs[i].position(inputPositions[i] + dataLen);
}
}
}
/**
* Perform the real decoding using Direct ByteBuffer.
* @param decodingState the decoding state
*/
protected abstract void doDecode(ByteBufferDecodingState decodingState);
/**
* Decode with inputs and erasedIndexes, generates outputs. More see above.
*
* @param inputs input buffers to read data from
* @param erasedIndexes indexes of erased units in the inputs array
* @param outputs output buffers to put decoded data into according to
* erasedIndexes, ready for read after the call
*/
public void decode(byte[][] inputs, int[] erasedIndexes, byte[][] outputs) {
ByteArrayDecodingState decodingState = new ByteArrayDecodingState(this,
inputs, erasedIndexes, outputs);
if (decodingState.decodeLength == 0) {
return;
}
doDecode(decodingState);
}
/**
* Perform the real decoding using bytes array, supporting offsets and
* lengths.
* @param decodingState the decoding state
*/
protected abstract void doDecode(ByteArrayDecodingState decodingState);
/**
* Decode with inputs and erasedIndexes, generates outputs. More see above.
*
* Note, for both input and output ECChunks, no mixing of on-heap buffers and
* direct buffers are allowed.
*
* @param inputs input buffers to read data from
* @param erasedIndexes indexes of erased units in the inputs array
* @param outputs output buffers to put decoded data into according to
* erasedIndexes, ready for read after the call
*/
public void decode(ECChunk[] inputs, int[] erasedIndexes,
ECChunk[] outputs) {
ByteBuffer[] newInputs = CoderUtil.toBuffers(inputs);
ByteBuffer[] newOutputs = CoderUtil.toBuffers(outputs);
decode(newInputs, erasedIndexes, newOutputs);
}
public int getNumDataUnits() {
return coderOptions.getNumDataUnits();
}
public int getNumParityUnits() {
return coderOptions.getNumParityUnits();
}
protected int getNumAllUnits() {
return coderOptions.getNumAllUnits();
}
/**
* Tell if direct buffer is preferred or not. It's for callers to
* decide how to allocate coding chunk buffers, using DirectByteBuffer or
* bytes array. It will return false by default.
* @return true if native buffer is preferred for performance consideration,
* otherwise false.
*/
public boolean preferDirectBuffer() {
return false;
}
/**
* Allow change into input buffers or not while perform encoding/decoding.
* @return true if it's allowed to change inputs, false otherwise
*/
public boolean allowChangeInputs() {
return coderOptions.allowChangeInputs();
}
/**
* Allow to dump verbose info during encoding/decoding.
* @return true if it's allowed to do verbose dump, false otherwise.
*/
public boolean allowVerboseDump() {
return coderOptions.allowVerboseDump();
}
/**
* Should be called when release this coder. Good chance to release encoding
* or decoding buffers
*/
public void release() {
// Nothing to do here.
}
}
| java | Apache-2.0 | e0c90f054687a18c4e095547ac5e31b8b313b3ef | 2026-01-05T02:41:11.405497Z | false |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.