repo stringclasses 1k
values | file_url stringlengths 96 373 | file_path stringlengths 11 294 | content stringlengths 0 32.8k | language stringclasses 1
value | license stringclasses 6
values | commit_sha stringclasses 1k
values | retrieved_at stringdate 2026-01-04 14:45:56 2026-01-04 18:30:23 | truncated bool 2
classes |
|---|---|---|---|---|---|---|---|---|
Intel-bigdata/SSM | https://github.com/Intel-bigdata/SSM/blob/e0c90f054687a18c4e095547ac5e31b8b313b3ef/smart-server/src/test/java/org/smartdata/server/TestSmartServer.java | smart-server/src/test/java/org/smartdata/server/TestSmartServer.java | /**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.smartdata.server;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hdfs.server.balancer.TestBalancer;
import org.junit.After;
import org.junit.Before;
import org.junit.Test;
import org.smartdata.conf.SmartConf;
import org.smartdata.conf.SmartConfKeys;
import org.smartdata.metastore.TestDBUtil;
import org.smartdata.metastore.utils.MetaStoreUtils;
public class TestSmartServer {
protected SmartConf conf;
protected SmartServer ssm;
protected String dbFile;
protected String dbUrl;
private static final int DEFAULT_BLOCK_SIZE = 100;
static {
TestBalancer.initTestSetup();
}
@Before
public void setUp() throws Exception {
conf = new SmartConf();
initConf(conf);
// Set db used
dbFile = TestDBUtil.getUniqueEmptySqliteDBFile();
dbUrl = MetaStoreUtils.SQLITE_URL_PREFIX + dbFile;
conf.set(SmartConfKeys.SMART_METASTORE_DB_URL_KEY, dbUrl);
// rpcServer start in SmartServer
ssm = SmartServer.launchWith(conf);
}
private void initConf(Configuration conf) {
}
@Test
public void test() throws InterruptedException {
//Thread.sleep(1000000);
}
@After
public void cleanUp() {
if (ssm != null) {
ssm.shutdown();
}
}
}
| java | Apache-2.0 | e0c90f054687a18c4e095547ac5e31b8b313b3ef | 2026-01-05T02:41:11.405497Z | false |
Intel-bigdata/SSM | https://github.com/Intel-bigdata/SSM/blob/e0c90f054687a18c4e095547ac5e31b8b313b3ef/smart-server/src/test/java/org/smartdata/server/TestCopy2S3Scheduler.java | smart-server/src/test/java/org/smartdata/server/TestCopy2S3Scheduler.java | /**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.smartdata.server;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hdfs.DFSTestUtil;
import org.apache.hadoop.hdfs.DistributedFileSystem;
import org.junit.Assert;
import org.junit.Test;
import org.smartdata.admin.SmartAdmin;
import org.smartdata.metastore.MetaStore;
import org.smartdata.model.ActionInfo;
import org.smartdata.model.RuleState;
import org.smartdata.model.S3FileState;
import java.util.ArrayList;
import java.util.List;
public class TestCopy2S3Scheduler extends MiniSmartClusterHarness {
@Test(timeout = 45000)
public void testDir() throws Exception {
waitTillSSMExitSafeMode();
MetaStore metaStore = ssm.getMetaStore();
SmartAdmin admin = new SmartAdmin(smartContext.getConf());
DistributedFileSystem dfs = cluster.getFileSystem();
final String srcPath = "/src/";
dfs.mkdirs(new Path(srcPath));
// Write to src
for (int i = 0; i < 3; i++) {
// Create test files
DFSTestUtil.createFile(dfs, new Path(srcPath + i),
1024, (short) 1, 1);
}
long ruleId = admin.submitRule(
"file: path matches \"/src/*\"| copy2s3 -dest s3a://xxxctest/dest/",
RuleState.ACTIVE);
List<ActionInfo> actions;
do {
actions = metaStore.getActions(ruleId, 0);
Thread.sleep(1000);
} while (actions.size() < 3);
}
@Test(timeout = 45000)
public void testZeroLength() throws Exception {
waitTillSSMExitSafeMode();
MetaStore metaStore = ssm.getMetaStore();
SmartAdmin admin = new SmartAdmin(smartContext.getConf());
DistributedFileSystem dfs = cluster.getFileSystem();
final String srcPath = "/src/";
dfs.mkdirs(new Path(srcPath));
// Write to src
for (int i = 0; i < 3; i++) {
// Create test files
DFSTestUtil.createFile(dfs, new Path(srcPath + i),
0, (short) 1, 1);
}
long ruleId = admin.submitRule(
"file: path matches \"/src/*\"| copy2s3 -dest s3a://xxxctest/dest/",
RuleState.ACTIVE);
Thread.sleep(2500);
List<ActionInfo> actions = metaStore.getActions(ruleId, 0);
Assert.assertEquals(actions.size(), 0);
}
@Test(timeout = 45000)
public void testOnS3() throws Exception {
waitTillSSMExitSafeMode();
MetaStore metaStore = ssm.getMetaStore();
SmartAdmin admin = new SmartAdmin(smartContext.getConf());
DistributedFileSystem dfs = cluster.getFileSystem();
final String srcPath = "/src/";
dfs.mkdirs(new Path(srcPath));
List<String> sps = new ArrayList<>();
// Write to src
for (int i = 0; i < 3; i++) {
// Create test files
// Not 0 because this file may be not be truncated yet
sps.add(srcPath + i);
DFSTestUtil.createFile(dfs, new Path(srcPath + i),
10, (short) 1, 1);
}
do {
Thread.sleep(1000);
if (metaStore.getFilesByPaths(sps).size() == sps.size()) {
break;
}
} while (true);
for (String p : sps) {
metaStore.insertUpdateFileState(new S3FileState(p));
}
long ruleId = admin.submitRule(
"file: path matches \"/src/*\"| copy2s3 -dest s3a://xxxctest/dest/",
RuleState.ACTIVE);
Thread.sleep(2500);
List<ActionInfo> actions = metaStore.getActions(ruleId, 0);
Assert.assertEquals(0, actions.size());
}
}
| java | Apache-2.0 | e0c90f054687a18c4e095547ac5e31b8b313b3ef | 2026-01-05T02:41:11.405497Z | false |
Intel-bigdata/SSM | https://github.com/Intel-bigdata/SSM/blob/e0c90f054687a18c4e095547ac5e31b8b313b3ef/smart-server/src/test/java/org/smartdata/server/TestSmartClient.java | smart-server/src/test/java/org/smartdata/server/TestSmartClient.java | /**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.smartdata.server;
import org.apache.hadoop.conf.Configuration;
import org.junit.Assert;
import org.junit.Test;
import org.smartdata.client.SmartClient;
import org.smartdata.conf.SmartConfKeys;
import org.smartdata.metastore.MetaStore;
import org.smartdata.model.FileState;
import org.smartdata.model.NormalFileState;
public class TestSmartClient extends MiniSmartClusterHarness {
@Test
public void testGetFileState() throws Exception {
waitTillSSMExitSafeMode();
MetaStore metaStore = ssm.getMetaStore();
String path = "/file1";
FileState fileState = new NormalFileState(path);
SmartClient client = new SmartClient(smartContext.getConf());
FileState fileState1;
// No entry in file_state table (Normal type as default)
fileState1 = client.getFileState(path);
Assert.assertEquals(fileState, fileState1);
metaStore.insertUpdateFileState(fileState);
fileState1 = client.getFileState(path);
Assert.assertEquals(fileState, fileState1);
}
@Test
public void testDataIgnoreAndCover() throws Exception {
waitTillSSMExitSafeMode();
// Configuration can also be used for initializing SmartClient.
Configuration conf = new Configuration();
conf.set(SmartConfKeys.SMART_IGNORE_DIRS_KEY, "/test1");
conf.set(SmartConfKeys.SMART_COVER_DIRS_KEY, "/test2");
SmartClient client = new SmartClient(conf);
Assert.assertTrue("This test file should be ignored",
client.shouldIgnore("/test1/a.txt"));
Assert.assertFalse("This test file should not be ignored",
client.shouldIgnore("/test2/b.txt"));
}
}
| java | Apache-2.0 | e0c90f054687a18c4e095547ac5e31b8b313b3ef | 2026-01-05T02:41:11.405497Z | false |
Intel-bigdata/SSM | https://github.com/Intel-bigdata/SSM/blob/e0c90f054687a18c4e095547ac5e31b8b313b3ef/smart-server/src/test/java/org/smartdata/server/TestSmartServerReConfig.java | smart-server/src/test/java/org/smartdata/server/TestSmartServerReConfig.java | /**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.smartdata.server;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hdfs.DFSConfigKeys;
import org.apache.hadoop.hdfs.DFSUtil;
import org.apache.hadoop.hdfs.MiniDFSCluster;
import org.junit.Assert;
import org.junit.Test;
import org.smartdata.SmartServiceState;
import org.smartdata.conf.SmartConf;
import org.smartdata.conf.SmartConfKeys;
import org.smartdata.metastore.TestDBUtil;
import org.smartdata.metastore.utils.MetaStoreUtils;
import java.net.URI;
import java.util.ArrayList;
import java.util.Collection;
import java.util.List;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_HTTP_ADDRESS_KEY;
public class TestSmartServerReConfig {
protected SmartConf conf;
protected MiniDFSCluster cluster;
protected SmartServer ssm;
protected String dbFile;
protected String dbUrl;
private static final int DEFAULT_BLOCK_SIZE = 100;
@Test
public void setUp() throws Exception {
try {
conf = new SmartConf();
initConf(conf);
cluster = new MiniDFSCluster.Builder(conf)
.numDataNodes(3)
.build();
Collection<URI> namenodes = DFSUtil.getInternalNsRpcUris(conf);
List<URI> uriList = new ArrayList<>(namenodes);
conf.set(DFS_NAMENODE_HTTP_ADDRESS_KEY, uriList.get(0).toString());
conf.set(SmartConfKeys.SMART_DFS_NAMENODE_RPCSERVER_KEY,
uriList.get(0).toString());
// Set db used
dbFile = TestDBUtil.getUniqueEmptySqliteDBFile();
dbUrl = MetaStoreUtils.SQLITE_URL_PREFIX + dbFile;
conf.set(SmartConfKeys.SMART_METASTORE_DB_URL_KEY, dbUrl);
SmartConf serverConf = new SmartConf();
serverConf.set(SmartConfKeys.SMART_DFS_ENABLED, "false");
serverConf.set(SmartConfKeys.SMART_METASTORE_DB_URL_KEY, dbUrl);
// rpcServer start in SmartServer
ssm = SmartServer.launchWith(serverConf);
Assert.assertNotNull(ssm);
Thread.sleep(2000);
Assert.assertTrue(ssm.getSSMServiceState() == SmartServiceState.DISABLED);
serverConf.set(SmartConfKeys.SMART_DFS_NAMENODE_RPCSERVER_KEY,
uriList.get(0).toString());
ssm.enable();
Thread.sleep(2000);
Assert.assertTrue(ssm.getSSMServiceState() == SmartServiceState.ACTIVE);
} finally {
if (ssm != null) {
ssm.shutdown();
}
if (cluster != null) {
cluster.shutdown();
}
}
}
private void initConf(Configuration conf) {
conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, DEFAULT_BLOCK_SIZE);
conf.setInt(DFSConfigKeys.DFS_BYTES_PER_CHECKSUM_KEY, DEFAULT_BLOCK_SIZE);
conf.setLong(DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_KEY, 1L);
conf.setLong(DFSConfigKeys.DFS_BALANCER_MOVEDWINWIDTH_KEY, 2000L);
}
}
| java | Apache-2.0 | e0c90f054687a18c4e095547ac5e31b8b313b3ef | 2026-01-05T02:41:11.405497Z | false |
Intel-bigdata/SSM | https://github.com/Intel-bigdata/SSM/blob/e0c90f054687a18c4e095547ac5e31b8b313b3ef/smart-server/src/test/java/org/smartdata/server/TestSmartAdmin.java | smart-server/src/test/java/org/smartdata/server/TestSmartAdmin.java | /**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.smartdata.server;
import org.junit.Assert;
import org.junit.Test;
import org.smartdata.admin.SmartAdmin;
import org.smartdata.model.ActionDescriptor;
import org.smartdata.model.ActionInfo;
import org.smartdata.model.CmdletInfo;
import org.smartdata.model.RuleInfo;
import org.smartdata.model.RuleState;
import java.io.IOException;
import java.util.List;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertNotEquals;
import static org.junit.Assert.assertTrue;
public class TestSmartAdmin extends MiniSmartClusterHarness {
@Test
public void test() throws Exception {
waitTillSSMExitSafeMode();
SmartAdmin admin = null;
try {
admin = new SmartAdmin(smartContext.getConf());
//test listRulesInfo and submitRule
List<RuleInfo> ruleInfos = admin.listRulesInfo();
int ruleCounts0 = ruleInfos.size();
long ruleId = admin.submitRule(
"file: every 5s | path matches \"/foo*\"| cache",
RuleState.DRYRUN);
ruleInfos = admin.listRulesInfo();
int ruleCounts1 = ruleInfos.size();
assertEquals(1, ruleCounts1 - ruleCounts0);
//test checkRule
//if success ,no Exception throw
admin.checkRule("file: every 5s | path matches \"/foo*\"| cache");
boolean caughtException = false;
try {
admin.checkRule("file.path");
} catch (IOException e) {
caughtException = true;
}
assertTrue(caughtException);
//test getRuleInfo
RuleInfo ruleInfo = admin.getRuleInfo(ruleId);
assertNotEquals(null, ruleInfo);
//test disableRule
admin.disableRule(ruleId, true);
assertEquals(RuleState.DISABLED, admin.getRuleInfo(ruleId).getState());
//test activateRule
admin.activateRule(ruleId);
assertEquals(RuleState.ACTIVE, admin.getRuleInfo(ruleId).getState());
//test deleteRule
admin.deleteRule(ruleId, true);
assertEquals(RuleState.DELETED, admin.getRuleInfo(ruleId).getState());
//test cmdletInfo
long id = admin.submitCmdlet("cache -file /foo*");
CmdletInfo cmdletInfo = admin.getCmdletInfo(id);
assertTrue("cache -file /foo*".equals(cmdletInfo.getParameters()));
//test actioninfo
List<Long> aidlist = cmdletInfo.getAids();
assertNotEquals(0, aidlist.size());
ActionInfo actionInfo = admin.getActionInfo(aidlist.get(0));
assertEquals(id, actionInfo.getCmdletId());
//test listActionInfoOfLastActions
admin.listActionInfoOfLastActions(2);
List<ActionDescriptor> actions = admin.listActionsSupported();
assertTrue(actions.size() > 0);
//test client close
admin.close();
try {
admin.getRuleInfo(ruleId);
Assert.fail("Should fail because admin has closed.");
} catch (IOException e) {
}
admin = null;
} finally {
if (admin != null) {
admin.close();
}
}
}
}
| java | Apache-2.0 | e0c90f054687a18c4e095547ac5e31b8b313b3ef | 2026-01-05T02:41:11.405497Z | false |
Intel-bigdata/SSM | https://github.com/Intel-bigdata/SSM/blob/e0c90f054687a18c4e095547ac5e31b8b313b3ef/smart-server/src/test/java/org/smartdata/server/TestSmartServerCli.java | smart-server/src/test/java/org/smartdata/server/TestSmartServerCli.java | /**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.smartdata.server;
import org.apache.hadoop.hdfs.DFSUtil;
import org.junit.Assert;
import org.junit.Test;
import org.smartdata.conf.SmartConf;
import org.smartdata.conf.SmartConfKeys;
import org.smartdata.hdfs.MiniClusterHarness;
import org.smartdata.metastore.TestDBUtil;
import org.smartdata.metastore.utils.MetaStoreUtils;
import java.net.URI;
import java.util.ArrayList;
import java.util.Collection;
import java.util.List;
public class TestSmartServerCli extends MiniClusterHarness {
@Test
public void testConfNameNodeRPCAddr() throws Exception {
try {
Collection<URI> namenodes = DFSUtil.getInternalNsRpcUris(smartContext.getConf());
List<URI> uriList = new ArrayList<>(namenodes);
SmartConf conf = new SmartConf();
// Set db used
String dbFile = TestDBUtil.getUniqueEmptySqliteDBFile();
String dbUrl = MetaStoreUtils.SQLITE_URL_PREFIX + dbFile;
conf.set(SmartConfKeys.SMART_METASTORE_DB_URL_KEY, dbUrl);
// rpcServer start in SmartServer
SmartServer ssm = null;
try {
ssm = SmartServer.launchWith(conf);
Thread.sleep(2000);
} catch (Exception e) {
Assert.fail("Should work without specifying NN");
} finally {
if (ssm != null) {
ssm.shutdown();
}
}
conf.set(SmartConfKeys.SMART_DFS_NAMENODE_RPCSERVER_KEY,
uriList.get(0).toString());
String[] args = new String[]{
"-D",
SmartConfKeys.SMART_DFS_NAMENODE_RPCSERVER_KEY + "="
+ uriList.get(0).toString()
};
SmartServer regServer = SmartServer.launchWith(args, conf);
Assert.assertNotNull(regServer);
Thread.sleep(1000);
regServer.shutdown();
args = new String[] {
"-h"
};
SmartServer.launchWith(args, conf);
} finally {
cluster.shutdown();
}
}
}
| java | Apache-2.0 | e0c90f054687a18c4e095547ac5e31b8b313b3ef | 2026-01-05T02:41:11.405497Z | false |
Intel-bigdata/SSM | https://github.com/Intel-bigdata/SSM/blob/e0c90f054687a18c4e095547ac5e31b8b313b3ef/smart-server/src/test/java/org/smartdata/server/util/TestJsonUtil.java | smart-server/src/test/java/org/smartdata/server/util/TestJsonUtil.java | /**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.smartdata.server.util;
import com.google.gson.Gson;
import com.google.gson.reflect.TypeToken;
import org.junit.Assert;
import org.junit.Test;
import java.util.HashMap;
import java.util.Map;
public class TestJsonUtil {
@Test
public void testTransitionBetweenMapAndString() throws Exception {
Map<String, String> mapParams = new HashMap<>();
mapParams.put("id", "avcde@#$%^^&~!@#$%^&*()3,./;'[]\\<>?:\"{}|\"");
mapParams.put("k:[{'", "1024");
String jsonString = toJsonString(mapParams);
Map<String, String> mapRevert = toStringStringMap(jsonString);
Assert.assertTrue(mapParams.size() == mapRevert.size());
for (String key : mapRevert.keySet()) {
Assert.assertTrue(mapParams.get(key).equals(mapRevert.get(key)));
}
}
public String toJsonString(Map<String, String> map) {
Gson gson = new Gson();
return gson.toJson(map);
}
public Map<String, String> toStringStringMap(String jsonString) {
Gson gson = new Gson();
Map<String, String> res = gson.fromJson(jsonString,
new TypeToken<Map<String, String>>(){}.getType());
return res;
}
}
| java | Apache-2.0 | e0c90f054687a18c4e095547ac5e31b8b313b3ef | 2026-01-05T02:41:11.405497Z | false |
Intel-bigdata/SSM | https://github.com/Intel-bigdata/SSM/blob/e0c90f054687a18c4e095547ac5e31b8b313b3ef/smart-server/src/test/java/org/smartdata/server/engine/TestCmdletManager.java | smart-server/src/test/java/org/smartdata/server/engine/TestCmdletManager.java | /**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.smartdata.server.engine;
import org.apache.hadoop.fs.FSDataOutputStream;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hdfs.DistributedFileSystem;
import org.junit.Assert;
import org.junit.Rule;
import org.junit.Test;
import org.junit.rules.ExpectedException;
import org.smartdata.action.ActionRegistry;
import org.smartdata.conf.SmartConf;
import org.smartdata.metastore.MetaStore;
import org.smartdata.metastore.MetaStoreException;
import org.smartdata.model.ActionInfo;
import org.smartdata.model.CmdletDescriptor;
import org.smartdata.model.CmdletInfo;
import org.smartdata.model.CmdletState;
import org.smartdata.protocol.message.ActionStatus;
import org.smartdata.protocol.message.CmdletStatusUpdate;
import org.smartdata.protocol.message.StatusReport;
import org.smartdata.server.MiniSmartClusterHarness;
import org.smartdata.server.engine.cmdlet.CmdletDispatcher;
import java.io.IOException;
import java.util.Arrays;
import java.util.List;
import static org.mockito.Matchers.any;
import static org.mockito.Mockito.mock;
import static org.mockito.Mockito.times;
import static org.mockito.Mockito.verify;
import static org.mockito.Mockito.when;
public class TestCmdletManager extends MiniSmartClusterHarness {
@Rule public ExpectedException thrown = ExpectedException.none();
private CmdletDescriptor generateCmdletDescriptor(String cmd) throws Exception {
CmdletDescriptor cmdletDescriptor = new CmdletDescriptor(cmd);
cmdletDescriptor.setRuleId(1);
return cmdletDescriptor;
}
@Test
public void testCreateFromDescriptor() throws Exception {
waitTillSSMExitSafeMode();
String cmd =
"allssd -file /testMoveFile/file1 ; cache -file /testCacheFile ; "
+ "write -file /test -length 1024";
CmdletDescriptor cmdletDescriptor = generateCmdletDescriptor(cmd);
List<ActionInfo> actionInfos = ssm.getCmdletManager().createActionInfos(cmdletDescriptor, 0);
Assert.assertTrue(cmdletDescriptor.getActionSize() == actionInfos.size());
}
@Test
public void testSubmitAPI() throws Exception {
waitTillSSMExitSafeMode();
DistributedFileSystem dfs = cluster.getFileSystem();
Path dir = new Path("/testMoveFile");
dfs.mkdirs(dir);
// Move to SSD
dfs.setStoragePolicy(dir, "HOT");
FSDataOutputStream out1 = dfs.create(new Path("/testMoveFile/file1"), true, 1024);
out1.writeChars("/testMoveFile/file1");
out1.close();
Path dir3 = new Path("/testCacheFile");
dfs.mkdirs(dir3);
Assert.assertTrue(ActionRegistry.supportedActions().size() > 0);
CmdletManager cmdletManager = ssm.getCmdletManager();
long cmdId = cmdletManager.submitCmdlet(
"allssd -file /testMoveFile/file1 ; cache -file /testCacheFile ; "
+ "write -file /test -length 1024");
Thread.sleep(1200);
List<ActionInfo> actionInfos = cmdletManager.listNewCreatedActions(10);
Assert.assertTrue(actionInfos.size() > 0);
while (true) {
CmdletState state = cmdletManager.getCmdletInfo(cmdId).getState();
if (state == CmdletState.DONE) {
break;
}
Assert.assertFalse(CmdletState.isTerminalState(state));
System.out.printf("Cmdlet still running.\n");
Thread.sleep(1000);
}
List<CmdletInfo> com = ssm.getMetaStore().getCmdlets(null, null, CmdletState.DONE);
Assert.assertTrue(com.size() >= 1);
List<ActionInfo> result = ssm.getMetaStore().getActions(null, null);
Assert.assertTrue(result.size() == 3);
}
@Test
public void wrongCmdlet() throws Exception {
waitTillSSMExitSafeMode();
CmdletManager cmdletManager = ssm.getCmdletManager();
try {
cmdletManager.submitCmdlet(
"allssd -file /testMoveFile/file1 ; cache -file /testCacheFile ; bug /bug bug bug");
} catch (IOException e) {
System.out.println("Wrong cmdlet is detected!");
Assert.assertTrue(true);
}
Thread.sleep(1200);
List<ActionInfo> actionInfos = cmdletManager.listNewCreatedActions(10);
Assert.assertTrue(actionInfos.size() == 0);
}
@Test
public void testGetListDeleteCmdlet() throws Exception {
waitTillSSMExitSafeMode();
MetaStore metaStore = ssm.getMetaStore();
String cmd =
"allssd -file /testMoveFile/file1 ; cache -file /testCacheFile ; "
+ "write -file /test -length 1024";
CmdletDescriptor cmdletDescriptor = generateCmdletDescriptor(cmd);
CmdletInfo cmdletInfo =
new CmdletInfo(
0,
cmdletDescriptor.getRuleId(),
CmdletState.PENDING,
cmdletDescriptor.getCmdletString(),
123178333L,
232444994L);
CmdletInfo[] cmdlets = {cmdletInfo};
metaStore.insertCmdlets(cmdlets);
CmdletManager cmdletManager = ssm.getCmdletManager();
Assert.assertTrue(cmdletManager.listCmdletsInfo(1, null).size() == 1);
Assert.assertTrue(cmdletManager.getCmdletInfo(0) != null);
cmdletManager.deleteCmdlet(0);
Assert.assertTrue(cmdletManager.listCmdletsInfo(1, null).size() == 0);
}
@Test
public void testWithoutCluster() throws MetaStoreException, IOException, InterruptedException {
long cmdletId = 10;
long actionId = 101;
MetaStore metaStore = mock(MetaStore.class);
Assert.assertNotNull(metaStore);
when(metaStore.getMaxCmdletId()).thenReturn(cmdletId);
when(metaStore.getMaxActionId()).thenReturn(actionId);
CmdletDispatcher dispatcher = mock(CmdletDispatcher.class);
Assert.assertNotNull(dispatcher);
when(dispatcher.canDispatchMore()).thenReturn(true);
ServerContext serverContext = new ServerContext(new SmartConf(), metaStore);
serverContext.setServiceMode(ServiceMode.HDFS);
CmdletManager cmdletManager = new CmdletManager(serverContext);
cmdletManager.init();
cmdletManager.setDispatcher(dispatcher);
cmdletManager.start();
cmdletManager.submitCmdlet("echo");
Thread.sleep(500);
verify(metaStore, times(1)).insertCmdlets(any(CmdletInfo[].class));
verify(metaStore, times(1)).insertActions(any(ActionInfo[].class));
Thread.sleep(500);
long startTime = System.currentTimeMillis();
ActionStatus actionStatus = new ActionStatus(cmdletId, true, actionId, startTime, null);
StatusReport statusReport = new StatusReport(Arrays.asList(actionStatus));
cmdletManager.updateStatus(statusReport);
ActionInfo actionInfo = cmdletManager.getActionInfo(actionId);
CmdletInfo cmdletInfo = cmdletManager.getCmdletInfo(cmdletId);
Assert.assertNotNull(actionInfo);
cmdletManager.updateStatus(
new CmdletStatusUpdate(cmdletId, System.currentTimeMillis(), CmdletState.EXECUTING));
CmdletInfo info = cmdletManager.getCmdletInfo(cmdletId);
Assert.assertNotNull(info);
Assert.assertEquals(info.getParameters(), "echo");
Assert.assertEquals(info.getAids().size(), 1);
Assert.assertTrue(info.getAids().get(0) == actionId);
Assert.assertEquals(info.getState(), CmdletState.EXECUTING);
long finishTime = System.currentTimeMillis();
actionStatus = new ActionStatus(cmdletId, true, actionId, null, startTime,
finishTime, null, true);
statusReport = new StatusReport(Arrays.asList(actionStatus));
cmdletManager.updateStatus(statusReport);
Assert.assertTrue(actionInfo.isFinished());
Assert.assertTrue(actionInfo.isSuccessful());
Assert.assertEquals(actionInfo.getCreateTime(), startTime);
Assert.assertEquals(actionInfo.getFinishTime(), finishTime);
Assert.assertEquals(cmdletInfo.getState(), CmdletState.DONE);
cmdletManager.updateStatus(
new CmdletStatusUpdate(cmdletId, System.currentTimeMillis(), CmdletState.DONE));
Assert.assertEquals(info.getState(), CmdletState.DONE);
Thread.sleep(500);
verify(metaStore, times(2)).insertCmdlets(any(CmdletInfo[].class));
verify(metaStore, times(2)).insertActions(any(ActionInfo[].class));
cmdletManager.stop();
}
@Test(timeout = 40000)
public void testReloadCmdletsInDB() throws Exception {
waitTillSSMExitSafeMode();
CmdletManager cmdletManager = ssm.getCmdletManager();
// Stop cmdletmanager
// cmdletManager.stop();
cmdletManager.setTimeout(1000);
MetaStore metaStore = ssm.getMetaStore();
String cmd = "write -file /test -length 1024; read -file /test";
CmdletDescriptor cmdletDescriptor = generateCmdletDescriptor(cmd);
long submitTime = System.currentTimeMillis();
CmdletInfo cmdletInfo0 =
new CmdletInfo(
0,
cmdletDescriptor.getRuleId(),
CmdletState.DISPATCHED,
cmdletDescriptor.getCmdletString(),
submitTime,
submitTime);
CmdletInfo cmdletInfo1 =
new CmdletInfo(
1,
cmdletDescriptor.getRuleId(),
CmdletState.PENDING,
cmdletDescriptor.getCmdletString(),
submitTime,
submitTime);
List<ActionInfo> actionInfos0 =
cmdletManager.createActionInfos(cmdletDescriptor, cmdletInfo0.getCid());
flushToDB(metaStore, actionInfos0, cmdletInfo0);
List<ActionInfo> actionInfos1 =
cmdletManager.createActionInfos(cmdletDescriptor, cmdletInfo1.getCid());
flushToDB(metaStore, actionInfos1, cmdletInfo1);
// init cmdletmanager
cmdletManager.init();
// cmdletManager.start();
CmdletInfo cmdlet0 = cmdletManager.getCmdletInfo(cmdletInfo0.getCid());
CmdletInfo cmdlet1 = cmdletManager.getCmdletInfo(cmdletInfo1.getCid());
while (cmdlet0.getState() != CmdletState.FAILED && cmdlet1.getState() != CmdletState.DONE) {
Thread.sleep(100);
}
}
public void flushToDB(MetaStore metaStore,
List<ActionInfo> actionInfos, CmdletInfo cmdletInfo) throws Exception{
for (ActionInfo actionInfo: actionInfos) {
cmdletInfo.addAction(actionInfo.getActionId());
}
metaStore.insertCmdlet(cmdletInfo);
metaStore.insertActions(actionInfos.toArray(new ActionInfo[actionInfos.size()]));
}
}
| java | Apache-2.0 | e0c90f054687a18c4e095547ac5e31b8b313b3ef | 2026-01-05T02:41:11.405497Z | false |
Intel-bigdata/SSM | https://github.com/Intel-bigdata/SSM/blob/e0c90f054687a18c4e095547ac5e31b8b313b3ef/smart-server/src/test/java/org/smartdata/server/engine/rule/TestGetRuleInfo.java | smart-server/src/test/java/org/smartdata/server/engine/rule/TestGetRuleInfo.java | /**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.smartdata.server.engine.rule;
import org.junit.Assert;
import org.junit.Test;
import org.smartdata.admin.SmartAdmin;
import org.smartdata.model.RuleInfo;
import org.smartdata.model.RuleState;
import org.smartdata.server.MiniSmartClusterHarness;
import java.io.IOException;
import java.util.List;
public class TestGetRuleInfo extends MiniSmartClusterHarness {
@Test
public void testGetSingleRuleInfo() throws Exception {
waitTillSSMExitSafeMode();
String rule = "file: every 1s \n | length > 10 | cache";
SmartAdmin client = new SmartAdmin(smartContext.getConf());
long ruleId = client.submitRule(rule, RuleState.ACTIVE);
RuleInfo info1 = client.getRuleInfo(ruleId);
System.out.println(info1);
Assert.assertTrue(info1.getRuleText().equals(rule));
RuleInfo infoTemp = info1;
for (int i = 0; i < 3; i++) {
Thread.sleep(1000);
infoTemp = client.getRuleInfo(ruleId);
System.out.println(infoTemp);
}
Assert.assertTrue(infoTemp.getNumChecked() >= info1.getNumChecked() + 2);
long fakeRuleId = 10999999999L;
try {
client.getRuleInfo(fakeRuleId);
Assert.fail("Should raise an exception when using a invalid rule id");
} catch (IOException e) {
}
}
@Test
public void testMultiRules() throws Exception {
waitTillSSMExitSafeMode();
String rule = "file: every 1s \n | length > 10 | cache";
SmartAdmin client = new SmartAdmin(smartContext.getConf());
int nRules = 10;
for (int i = 0; i < nRules; i++) {
client.submitRule(rule, RuleState.ACTIVE);
}
List<RuleInfo> ruleInfos = client.listRulesInfo();
for (RuleInfo info : ruleInfos) {
System.out.println(info);
}
Assert.assertTrue(ruleInfos.size() == nRules);
}
}
| java | Apache-2.0 | e0c90f054687a18c4e095547ac5e31b8b313b3ef | 2026-01-05T02:41:11.405497Z | false |
Intel-bigdata/SSM | https://github.com/Intel-bigdata/SSM/blob/e0c90f054687a18c4e095547ac5e31b8b313b3ef/smart-server/src/test/java/org/smartdata/server/engine/rule/TestRuleManager.java | smart-server/src/test/java/org/smartdata/server/engine/rule/TestRuleManager.java | /**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.smartdata.server.engine.rule;
import org.junit.After;
import org.junit.Assert;
import org.junit.Before;
import org.junit.Test;
import org.smartdata.conf.SmartConf;
import org.smartdata.metastore.MetaStore;
import org.smartdata.metastore.TestDaoUtil;
import org.smartdata.model.FileInfo;
import org.smartdata.model.RuleInfo;
import org.smartdata.model.RuleState;
import org.smartdata.server.engine.RuleManager;
import org.smartdata.server.engine.ServerContext;
import org.smartdata.server.engine.ServiceMode;
import java.io.IOException;
import java.util.List;
import java.util.Random;
/**
* Testing RuleManager service.
*/
public class TestRuleManager extends TestDaoUtil {
private RuleManager ruleManager;
private MetaStore metaStore;
private SmartConf smartConf;
@Before
public void init() throws Exception {
initDao();
smartConf = new SmartConf();
metaStore = new MetaStore(druidPool);
ServerContext serverContext = new ServerContext(smartConf, metaStore);
serverContext.setServiceMode(ServiceMode.HDFS);
ruleManager = new RuleManager(serverContext, null, null);
ruleManager.init();
ruleManager.start();
}
@After
public void close() throws Exception {
ruleManager.stop();
ruleManager = null;
metaStore = null;
closeDao();
}
@Test
public void testSubmitNewActiveRule() throws Exception {
String rule = "file: every 1s \n | accessCount(5s) > 3 | cache";
long id = ruleManager.submitRule(rule, RuleState.ACTIVE);
RuleInfo ruleInfo = ruleManager.getRuleInfo(id);
Assert.assertTrue(ruleInfo.getRuleText().equals(rule));
RuleInfo info = ruleInfo;
for (int i = 0; i < 5; i++) {
Thread.sleep(1000);
info = ruleManager.getRuleInfo(id);
System.out.println(info);
}
Assert.assertTrue(info.getNumChecked()
- ruleInfo.getNumChecked() > 3);
}
@Test
public void testSubmitDeletedRule() throws Exception {
String rule = "file: every 1s \n | length > 300 | cache";
try {
long id = ruleManager.submitRule(rule, RuleState.DELETED);
} catch (IOException e) {
Assert.assertTrue(e.getMessage().contains("Invalid initState"));
}
}
@Test
public void testSubmitNewDisabledRule() throws Exception {
String rule = "file: every 1s \n | length > 300 | cache";
long id = ruleManager.submitRule(rule, RuleState.DISABLED);
RuleInfo ruleInfo = ruleManager.getRuleInfo(id);
Assert.assertTrue(ruleInfo.getRuleText().equals(rule));
RuleInfo info = ruleInfo;
for (int i = 0; i < 5; i++) {
Thread.sleep(1000);
info = ruleManager.getRuleInfo(id);
System.out.println(info);
}
Assert.assertTrue(info.getNumChecked()
- ruleInfo.getNumChecked() == 0);
}
@Test
public void testSubmitAutoEndsRule() throws Exception {
String rule = "file: every 1s from now to now + 2s \n | "
+ "length > 300 | cache";
long id = ruleManager.submitRule(rule, RuleState.ACTIVE);
RuleInfo ruleInfo = ruleManager.getRuleInfo(id);
Assert.assertTrue(ruleInfo.getRuleText().equals(rule));
RuleInfo info = ruleInfo;
for (int i = 0; i < 5; i++) {
Thread.sleep(1000);
info = ruleManager.getRuleInfo(id);
System.out.println(info);
}
Assert.assertTrue(info.getState() == RuleState.FINISHED);
Assert.assertTrue(info.getNumChecked()
- ruleInfo.getNumChecked() <= 3);
}
@Test
public void testStopRule() throws Exception {
String rule = "file: every 1s from now to now + 100s \n | "
+ "length > 300 | cache";
long id = ruleManager.submitRule(rule, RuleState.ACTIVE);
RuleInfo ruleInfo = ruleManager.getRuleInfo(id);
Assert.assertTrue(ruleInfo.getRuleText().equals(rule));
RuleInfo info = ruleInfo;
for (int i = 0; i < 2; i++) {
Thread.sleep(1000);
info = ruleManager.getRuleInfo(id);
System.out.println(info);
}
ruleManager.deleteRule(ruleInfo.getId(), true);
Thread.sleep(3000);
RuleInfo endInfo = ruleManager.getRuleInfo(info.getId());
System.out.println(endInfo);
Assert.assertTrue(endInfo.getState() == RuleState.DELETED);
Assert.assertTrue(endInfo.getNumChecked()
- info.getNumChecked() <= 1);
}
@Test
public void testResumeRule() throws Exception {
String rule = "file: every 1s from now to now + 100s \n | "
+ "length > 300 | cache";
long id = ruleManager.submitRule(rule, RuleState.ACTIVE);
RuleInfo ruleInfo = ruleManager.getRuleInfo(id);
Assert.assertTrue(ruleInfo.getRuleText().equals(rule));
RuleInfo info = ruleInfo;
for (int i = 0; i < 2; i++) {
Thread.sleep(1000);
info = ruleManager.getRuleInfo(id);
System.out.println(info);
}
Assert.assertTrue(info.getNumChecked()
> ruleInfo.getNumChecked());
ruleManager.disableRule(ruleInfo.getId(), true);
Thread.sleep(1000);
RuleInfo info2 = ruleManager.getRuleInfo(id);
for (int i = 0; i < 3; i++) {
Thread.sleep(1000);
info = ruleManager.getRuleInfo(id);
System.out.println(info);
}
Assert.assertTrue(info.getNumChecked()
== info2.getNumChecked());
RuleInfo info3 = info;
ruleManager.activateRule(ruleInfo.getId());
for (int i = 0; i < 3; i++) {
Thread.sleep(1000);
info = ruleManager.getRuleInfo(id);
System.out.println(info);
}
Assert.assertTrue(info.getNumChecked()
> info3.getNumChecked());
}
@Test
public void testSubmitNewMultiRules() throws Exception {
String rule = "file: every 1s \n | length > 300 | cache";
// id increasing
int nRules = 3;
long[] ids = new long[nRules];
for (int i = 0; i < nRules; i++) {
ids[i] = ruleManager.submitRule(rule, RuleState.DISABLED);
System.out.println(ruleManager.getRuleInfo(ids[i]));
if (i > 0) {
Assert.assertTrue(ids[i] - ids[i - 1] == 1);
}
}
for (int i = 0; i < nRules; i++) {
ruleManager.deleteRule(ids[i], true);
RuleInfo info = ruleManager.getRuleInfo(ids[i]);
Assert.assertTrue(info.getState() == RuleState.DELETED);
}
long[] ids2 = new long[nRules];
for (int i = 0; i < nRules; i++) {
ids2[i] = ruleManager.submitRule(rule, RuleState.DISABLED);
System.out.println(ruleManager.getRuleInfo(ids2[i]));
if (i > 0) {
Assert.assertTrue(ids2[i] - ids2[i - 1] == 1);
}
Assert.assertTrue(ids2[i] > ids[nRules - 1]);
}
System.out.println("\nFinal state:");
List<RuleInfo> allRules = ruleManager.listRulesInfo();
// Deleted rules are not included in the list
Assert.assertTrue(allRules.size() == nRules);
for (RuleInfo info : allRules) {
System.out.println(info);
}
}
@Test
public void testMultiThreadUpdate() throws Exception {
String rule = "file: every 1s \n | length > 10 | cache";
long now = System.currentTimeMillis();
long rid = ruleManager.submitRule(rule, RuleState.DISABLED);
ruleManager.updateRuleInfo(rid, null, now, 1, 1);
long start = System.currentTimeMillis();
Thread[] threads = new Thread[] {
new Thread(new RuleInfoUpdater(rid, 3)),
// new Thread(new RuleInfoUpdater(rid, 7)),
// new Thread(new RuleInfoUpdater(rid, 11)),
new Thread(new RuleInfoUpdater(rid, 17))};
for (Thread t : threads) {
t.start();
}
for (Thread t : threads) {
t.join();
}
long end = System.currentTimeMillis();
System.out.println("Time used = " + (end - start) + " ms");
RuleInfo res = ruleManager.getRuleInfo(rid);
System.out.println(res);
}
private class RuleInfoUpdater implements Runnable {
private long ruleid;
private int index;
public RuleInfoUpdater(long ruleid, int index) {
this.ruleid = ruleid;
this.index = index;
}
@Override
public void run() {
long lastCheckTime;
long checkedCount;
int cmdletsGen;
try {
for (int i = 0; i < 200; i++) {
RuleInfo info = ruleManager.getRuleInfo(ruleid);
lastCheckTime = System.currentTimeMillis();
checkedCount = info.getNumChecked();
cmdletsGen = (int) info.getNumCmdsGen();
//System.out.println("" + index + ": " + lastCheckTime + " "
// + checkedCount + " " + cmdletsGen);
Assert.assertTrue(checkedCount == cmdletsGen);
ruleManager.updateRuleInfo(ruleid, null,
lastCheckTime, index, index);
}
} catch (Exception e) {
Assert.fail("Can not have exception here.");
}
}
}
@Test
public void testMultiThreadChangeState() throws Exception {
String rule = "file: every 1s \n | length > 10 | cache";
long now = System.currentTimeMillis();
long length = 100;
long fid = 10000;
FileInfo[] files = { new FileInfo("/tmp/testfile", fid, length, false, (short) 3,
1024, now, now, (short) 1, null, null, (byte) 3, (byte) 0) };
metaStore.insertFiles(files);
long rid = ruleManager.submitRule(rule, RuleState.ACTIVE);
long start = System.currentTimeMillis();
int nThreads = 2;
Thread[] threads = new Thread[nThreads];
for (int i = 0; i < nThreads; i++) {
threads[i] = new Thread(new StateChangeWorker(rid));
}
for (Thread t : threads) {
t.start();
}
for (Thread t : threads) {
t.join();
}
long end = System.currentTimeMillis();
System.out.println("Time used = " + (end - start) + " ms");
Thread.sleep(1000); // This is needed due to async threads
RuleInfo res = ruleManager.getRuleInfo(rid);
System.out.println(res);
Thread.sleep(5000);
RuleInfo after = ruleManager.getRuleInfo(rid);
System.out.println(after);
if (res.getState() == RuleState.ACTIVE) {
Assert.assertTrue(after.getNumCmdsGen() - res.getNumCmdsGen() <= 6);
} else {
Assert.assertTrue(after.getNumCmdsGen() == res.getNumCmdsGen());
}
}
private class StateChangeWorker implements Runnable {
private long ruleId;
public StateChangeWorker(long ruleId) {
this.ruleId = ruleId;
}
@Override
public void run() {
Random r = new Random();
try {
for (int i = 0; i < 200; i++) {
int rand = r.nextInt() % 2;
//System.out.println(rand == 0 ? "Active" : "Disable");
switch (rand) {
case 0:
ruleManager.activateRule(ruleId);
break;
case 1:
ruleManager.disableRule(ruleId, true);
break;
}
}
} catch (Exception e) {
Assert.fail("Should not happen!");
}
}
}
}
| java | Apache-2.0 | e0c90f054687a18c4e095547ac5e31b8b313b3ef | 2026-01-05T02:41:11.405497Z | false |
Intel-bigdata/SSM | https://github.com/Intel-bigdata/SSM/blob/e0c90f054687a18c4e095547ac5e31b8b313b3ef/smart-server/src/test/java/org/smartdata/server/engine/rule/TestRuleExecutorPlugin.java | smart-server/src/test/java/org/smartdata/server/engine/rule/TestRuleExecutorPlugin.java | /**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.smartdata.server.engine.rule;
import org.junit.Assert;
import org.junit.Test;
import org.smartdata.admin.SmartAdmin;
import org.smartdata.model.CmdletDescriptor;
import org.smartdata.model.RuleInfo;
import org.smartdata.model.RuleState;
import org.smartdata.model.rule.RuleExecutorPlugin;
import org.smartdata.model.rule.RuleExecutorPluginManager;
import org.smartdata.model.rule.TranslateResult;
import org.smartdata.server.MiniSmartClusterHarness;
import java.util.List;
public class TestRuleExecutorPlugin extends MiniSmartClusterHarness {
@Test
public void testPlugin() throws Exception {
waitTillSSMExitSafeMode();
TestPlugin plugin = new TestPlugin();
try {
RuleExecutorPluginManager.addPlugin(plugin);
RuleExecutorPluginManager.addPlugin(plugin);
String rule = "file: every 1s \n | length > 10 | cache";
SmartAdmin client = new SmartAdmin(smartContext.getConf());
long ruleId = client.submitRule(rule, RuleState.ACTIVE);
Assert.assertEquals(plugin.getNumOnNewRuleExecutor(), 1);
Thread.sleep(3000);
Assert.assertEquals(plugin.getNumOnNewRuleExecutor(), 1);
Assert.assertTrue(plugin.getNumPreExecution() >= 2);
Assert.assertTrue(plugin.getNumPreSubmitCmdlet() >= 2);
Assert.assertTrue(plugin.getNumOnRuleExecutorExit() == 0);
client.disableRule(ruleId, true);
Thread.sleep(1100);
int numPreExecution = plugin.getNumPreExecution();
int numPreSubmitCmdlet = plugin.getNumPreSubmitCmdlet();
Thread.sleep(2500);
Assert.assertTrue(plugin.getNumOnNewRuleExecutor() == 1);
Assert.assertTrue(plugin.getNumPreExecution() == numPreExecution);
Assert.assertTrue(plugin.getNumPreSubmitCmdlet() == numPreSubmitCmdlet);
Assert.assertTrue(plugin.getNumOnRuleExecutorExit() == 1);
RuleExecutorPluginManager.deletePlugin(plugin);
client.activateRule(ruleId);
Thread.sleep(500);
Assert.assertTrue(plugin.getNumOnNewRuleExecutor() == 1);
Assert.assertTrue(plugin.getNumPreExecution() == numPreExecution);
} finally {
RuleExecutorPluginManager.deletePlugin(plugin);
}
}
private class TestPlugin implements RuleExecutorPlugin {
private int numOnNewRuleExecutor = 0;
private int numPreExecution = 0;
private int numPreSubmitCmdlet = 0;
private int numOnRuleExecutorExit = 0;
public TestPlugin() {
}
public void onNewRuleExecutor(final RuleInfo ruleInfo, TranslateResult tResult) {
numOnNewRuleExecutor++;
}
public boolean preExecution(final RuleInfo ruleInfo, TranslateResult tResult) {
numPreExecution++;
return true;
}
public List<String> preSubmitCmdlet(final RuleInfo ruleInfo, List<String> objects) {
numPreSubmitCmdlet++;
return objects;
}
public CmdletDescriptor preSubmitCmdletDescriptor(
final RuleInfo ruleInfo, TranslateResult tResult, CmdletDescriptor descriptor) {
return descriptor;
}
public void onRuleExecutorExit(final RuleInfo ruleInfo) {
numOnRuleExecutorExit++;
}
public int getNumOnNewRuleExecutor() {
return numOnNewRuleExecutor;
}
public int getNumPreExecution() {
return numPreExecution;
}
public int getNumPreSubmitCmdlet() {
return numPreSubmitCmdlet;
}
public int getNumOnRuleExecutorExit() {
return numOnRuleExecutorExit;
}
}
}
| java | Apache-2.0 | e0c90f054687a18c4e095547ac5e31b8b313b3ef | 2026-01-05T02:41:11.405497Z | false |
Intel-bigdata/SSM | https://github.com/Intel-bigdata/SSM/blob/e0c90f054687a18c4e095547ac5e31b8b313b3ef/smart-server/src/test/java/org/smartdata/server/engine/rule/TestSubmitRule.java | smart-server/src/test/java/org/smartdata/server/engine/rule/TestSubmitRule.java | /**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.smartdata.server.engine.rule;
import org.junit.Assert;
import org.junit.Test;
import org.smartdata.admin.SmartAdmin;
import org.smartdata.model.RuleState;
import org.smartdata.server.MiniSmartClusterHarness;
import java.io.IOException;
public class TestSubmitRule extends MiniSmartClusterHarness {
@Test
public void testSubmitRule() throws Exception {
waitTillSSMExitSafeMode();
String rule = "file: every 1s \n | length > 10 | cache";
SmartAdmin client = new SmartAdmin(smartContext.getConf());
long ruleId = client.submitRule(rule, RuleState.ACTIVE);
for (int i = 0; i < 10; i++) {
long id = client.submitRule(rule, RuleState.ACTIVE);
Assert.assertTrue(ruleId + i + 1 == id);
}
String badRule = "something else";
try {
client.submitRule(badRule, RuleState.ACTIVE);
Assert.fail("Should have an exception here");
} catch (IOException e) {
}
try {
client.checkRule(badRule);
Assert.fail("Should have an exception here");
} catch (IOException e) {
}
}
}
| java | Apache-2.0 | e0c90f054687a18c4e095547ac5e31b8b313b3ef | 2026-01-05T02:41:11.405497Z | false |
Intel-bigdata/SSM | https://github.com/Intel-bigdata/SSM/blob/e0c90f054687a18c4e095547ac5e31b8b313b3ef/smart-server/src/test/java/org/smartdata/server/engine/rule/TestMoveRule.java | smart-server/src/test/java/org/smartdata/server/engine/rule/TestMoveRule.java | ///**
// * Licensed to the Apache Software Foundation (ASF) under one
// * or more contributor license agreements. See the NOTICE file
// * distributed with this work for additional information
// * regarding copyright ownership. The ASF licenses this file
// * to you under the Apache License, Version 2.0 (the
// * "License"); you may not use this file except in compliance
// * with the License. You may obtain a copy of the License at
// *
// * http://www.apache.org/licenses/LICENSE-2.0
// *
// * Unless required by applicable law or agreed to in writing, software
// * distributed under the License is distributed on an "AS IS" BASIS,
// * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// * See the License for the specific language governing permissions and
// * limitations under the License.
// */
//package org.smartdata.server.engine.rule;
//
//import org.apache.hadoop.fs.Path;
//import org.apache.hadoop.hdfs.DFSTestUtil;
//import org.junit.Test;
//import org.smartdata.admin.SmartAdmin;
//import org.smartdata.model.ActionInfo;
//import org.smartdata.model.RuleState;
//import org.smartdata.server.MiniSmartClusterHarness;
//
//import java.util.List;
//
//public class TestMoveRule extends MiniSmartClusterHarness {
//
// @Test
// public void testMoveDir() throws Exception {
// waitTillSSMExitSafeMode();
//
// dfs.mkdirs(new Path("/test"));
// dfs.setStoragePolicy(new Path("/test"), "HOT");
// dfs.mkdirs(new Path("/test/dir1"));
// DFSTestUtil.createFile(dfs, new Path("/test/dir1/f1"), DEFAULT_BLOCK_SIZE * 3, (short) 3, 0);
//
// String rule = "file: path matches \"/test/*\" | allssd";
// SmartAdmin admin = new SmartAdmin(smartContext.getConf());
//
// long ruleId = admin.submitRule(rule, RuleState.ACTIVE);
//
// int idx = 0;
// while (idx++ < 6) {
// Thread.sleep(1000);
// List<ActionInfo> infos = admin.listActionInfoOfLastActions(100);
// System.out.println(idx + " round:");
// for (ActionInfo info : infos) {
// System.out.println("\t" + info);
// }
// System.out.println();
// }
// }
//}
| java | Apache-2.0 | e0c90f054687a18c4e095547ac5e31b8b313b3ef | 2026-01-05T02:41:11.405497Z | false |
Intel-bigdata/SSM | https://github.com/Intel-bigdata/SSM/blob/e0c90f054687a18c4e095547ac5e31b8b313b3ef/smart-server/src/test/java/org/smartdata/server/engine/rule/TestSmallFileRule.java | smart-server/src/test/java/org/smartdata/server/engine/rule/TestSmallFileRule.java | /**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.smartdata.server.engine.rule;
import org.apache.hadoop.fs.FSDataOutputStream;
import org.apache.hadoop.fs.Path;
import org.junit.After;
import org.junit.Assert;
import org.junit.Before;
import org.junit.Test;
import org.smartdata.admin.SmartAdmin;
import org.smartdata.model.RuleInfo;
import org.smartdata.model.RuleState;
import org.smartdata.server.MiniSmartClusterHarness;
import java.util.List;
import java.util.Random;
public class TestSmallFileRule extends MiniSmartClusterHarness {
@Before
@Override
public void init() throws Exception {
super.init();
createTestFiles();
}
private void createTestFiles() throws Exception {
Path path = new Path("/test/small_files/");
dfs.mkdirs(path);
for (int i = 0; i < 3; i++) {
String fileName = "/test/small_files/file_" + i;
FSDataOutputStream out = dfs.create(new Path(fileName), (short) 1);
long fileLen;
fileLen = 5 + (int) (Math.random() * 11);
byte[] buf = new byte[20];
Random rb = new Random(2018);
int bytesRemaining = (int) fileLen;
while (bytesRemaining > 0) {
rb.nextBytes(buf);
int bytesToWrite = (bytesRemaining < buf.length) ? bytesRemaining : buf.length;
out.write(buf, 0, bytesToWrite);
bytesRemaining -= bytesToWrite;
}
out.close();
}
}
@Test(timeout = 180000)
public void testRule() throws Exception {
waitTillSSMExitSafeMode();
String rule = "file: path matches \"/test/small_files/file*\" and length < 20KB"
+ " | compact -containerFile \"/test/small_files/container_file_1\"";
SmartAdmin admin = new SmartAdmin(smartContext.getConf());
admin.submitRule(rule, RuleState.ACTIVE);
Thread.sleep(6000);
List<RuleInfo> ruleInfoList = admin.listRulesInfo();
for (RuleInfo info : ruleInfoList) {
System.out.println(info);
}
Assert.assertEquals(1, ruleInfoList.size());
}
@After
public void tearDown() throws Exception {
dfs.getClient().delete("/test", true);
}
}
| java | Apache-2.0 | e0c90f054687a18c4e095547ac5e31b8b313b3ef | 2026-01-05T02:41:11.405497Z | false |
Intel-bigdata/SSM | https://github.com/Intel-bigdata/SSM/blob/e0c90f054687a18c4e095547ac5e31b8b313b3ef/smart-server/src/test/java/org/smartdata/server/engine/rule/TestRulePlugin.java | smart-server/src/test/java/org/smartdata/server/engine/rule/TestRulePlugin.java | /**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.smartdata.server.engine.rule;
import org.junit.Assert;
import org.junit.Test;
import org.smartdata.admin.SmartAdmin;
import org.smartdata.model.RuleInfo;
import org.smartdata.model.RuleState;
import org.smartdata.model.rule.RulePlugin;
import org.smartdata.model.rule.RulePluginManager;
import org.smartdata.model.rule.TranslateResult;
import org.smartdata.server.MiniSmartClusterHarness;
import java.io.IOException;
public class TestRulePlugin extends MiniSmartClusterHarness {
@Test
public void testPlugin() throws Exception {
waitTillSSMExitSafeMode();
SimplePlugin plugin = new SimplePlugin();
try {
RulePluginManager.addPlugin(plugin);
int adding = plugin.getAdding();
int added = plugin.getAdded();
SmartAdmin admin = new SmartAdmin(smartContext.getConf());
admin.submitRule("file : path matches \"/home/*\" | cache", RuleState.ACTIVE);
Assert.assertTrue(adding + 1 == plugin.getAdding());
Assert.assertTrue(added + 1 == plugin.getAdded());
try {
admin.submitRule("file : path matches \"/user/*\" | cache", RuleState.DISABLED);
Assert.fail("Should not success.");
} catch (Exception e) {
Assert.assertTrue(e.getMessage().contains("MUST ACTIVE"));
}
Assert.assertTrue(adding + 1 == plugin.getAdding());
Assert.assertTrue(added + 1 == plugin.getAdded());
} finally {
RulePluginManager.deletePlugin(plugin);
}
}
private class SimplePlugin implements RulePlugin {
private int adding = 0;
private int added = 0;
public SimplePlugin() {
}
public int getAdding() {
return adding;
}
public int getAdded() {
return added;
}
public void onAddingNewRule(RuleInfo ruleInfo, TranslateResult tr)
throws IOException {
if (ruleInfo.getState() == RuleState.ACTIVE) {
adding++;
} else {
throw new IOException("MUST ACTIVE");
}
}
public void onNewRuleAdded(RuleInfo ruleInfo, TranslateResult tr) {
added++;
}
}
}
| java | Apache-2.0 | e0c90f054687a18c4e095547ac5e31b8b313b3ef | 2026-01-05T02:41:11.405497Z | false |
Intel-bigdata/SSM | https://github.com/Intel-bigdata/SSM/blob/e0c90f054687a18c4e095547ac5e31b8b313b3ef/smart-server/src/test/java/org/smartdata/server/engine/cmdlet/TestCompressDecompress.java | smart-server/src/test/java/org/smartdata/server/engine/cmdlet/TestCompressDecompress.java | /**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.smartdata.server.engine.cmdlet;
import org.apache.hadoop.fs.BlockLocation;
import org.apache.hadoop.fs.LocatedFileStatus;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.RemoteIterator;
import org.apache.hadoop.hdfs.CompressionCodec;
import org.apache.hadoop.hdfs.DFSClient;
import org.apache.hadoop.hdfs.DFSInputStream;
import org.apache.hadoop.hdfs.DFSTestUtil;
import org.apache.hadoop.hdfs.protocol.HdfsFileStatus;
import org.junit.Assert;
import org.junit.Before;
import org.junit.Test;
import org.smartdata.hadoop.filesystem.SmartFileSystem;
import org.smartdata.hdfs.HadoopUtil;
import org.smartdata.hdfs.client.SmartDFSClient;
import org.smartdata.hdfs.scheduler.CompressionScheduler;
import org.smartdata.metastore.MetaStore;
import org.smartdata.model.CmdletInfo;
import org.smartdata.model.CmdletState;
import org.smartdata.model.CompressionFileState;
import org.smartdata.model.FileState;
import org.smartdata.model.action.ActionScheduler;
import org.smartdata.server.MiniSmartClusterHarness;
import org.smartdata.server.engine.CmdletManager;
import java.io.IOException;
import java.io.OutputStream;
import java.lang.reflect.Array;
import java.util.Arrays;
import java.util.List;
import java.util.Random;
public class TestCompressDecompress extends MiniSmartClusterHarness {
private DFSClient smartDFSClient;
private String codec;
@Override
@Before
public void init() throws Exception {
DEFAULT_BLOCK_SIZE = 1024 * 1024;
super.init();
// this.compressionImpl = "snappy";
// this.compressionImpl = "Lz4";
// this.compressionImpl = "Bzip2";
this.codec = CompressionCodec.ZLIB;
smartDFSClient = new SmartDFSClient(ssm.getContext().getConf());
}
@Test
public void testSubmitCompressionAction() throws Exception {
// if (!loadedNative()) {
// return;
// }
waitTillSSMExitSafeMode();
// initDB();
int arraySize = 1024 * 1024 * 80;
String fileName = "/ssm/compression/file1";
byte[] bytes = prepareFile(fileName, arraySize);
MetaStore metaStore = ssm.getMetaStore();
int bufSize = 1024 * 1024 * 10;
CmdletManager cmdletManager = ssm.getCmdletManager();
long cmdId = cmdletManager.submitCmdlet("compress -file " + fileName
+ " -bufSize " + bufSize + " -codec " + codec);
waitTillActionDone(cmdId);
FileState fileState = null;
// metastore test
int n = 0;
while (true) {
fileState = metaStore.getFileState(fileName);
if (FileState.FileType.COMPRESSION.equals(fileState.getFileType())) {
break;
}
Thread.sleep(1000);
if (n++ >= 20) {
throw new Exception("Time out in waiting for getting expect file state.");
}
}
Assert.assertEquals(FileState.FileStage.DONE, fileState.getFileStage());
Assert.assertTrue(fileState instanceof CompressionFileState);
CompressionFileState compressionFileState = (CompressionFileState) fileState;
Assert.assertEquals(fileName, compressionFileState.getPath());
Assert.assertEquals(bufSize, compressionFileState.getBufferSize());
Assert.assertEquals(codec, compressionFileState.getCompressionImpl());
Assert.assertEquals(arraySize, compressionFileState.getOriginalLength());
Assert.assertTrue(compressionFileState.getCompressedLength() > 0);
Assert.assertTrue(compressionFileState.getCompressedLength()
< compressionFileState.getOriginalLength());
// data accuracy test
byte[] input = new byte[arraySize];
DFSInputStream dfsInputStream = smartDFSClient.open(fileName);
int offset = 0;
while (true) {
int len = dfsInputStream.read(input, offset, arraySize - offset);
if (len <= 0) {
break;
}
offset += len;
}
Assert.assertArrayEquals(
"original array not equals compress/decompressed array", input, bytes);
}
// @Test(timeout = 90000)
// public void testCompressEmptyFile() throws Exception {
// waitTillSSMExitSafeMode();
//
// // initDB();
// String fileName = "/ssm/compression/file2";
// prepareFile(fileName, 0);
// MetaStore metaStore = ssm.getMetaStore();
//
// int bufSize = 1024 * 1024;
// CmdletManager cmdletManager = ssm.getCmdletManager();
// long cmdId = cmdletManager.submitCmdlet("compress -file " + fileName
// + " -bufSize " + bufSize + " -compressImpl " + compressionImpl);
//
// waitTillActionDone(cmdId);
// FileState fileState = metaStore.getFileState(fileName);
// while (!fileState.getFileType().equals(FileState.FileType.COMPRESSION)) {
// Thread.sleep(200);
// fileState = metaStore.getFileState(fileName);
// }
//
// // metastore test
//// Assert.assertEquals(FileState.FileType.COMPRESSION, fileState.getFileType());
// Assert.assertEquals(FileState.FileStage.DONE, fileState.getFileStage());
// Assert.assertTrue(fileState instanceof CompressionFileState);
// CompressionFileState compressionFileState = (CompressionFileState) fileState;
// Assert.assertEquals(fileName, compressionFileState.getPath());
// Assert.assertEquals(bufSize, compressionFileState.getBufferSize());
// Assert.assertEquals(compressionImpl, compressionFileState.getCompressionImpl());
// Assert.assertEquals(0, compressionFileState.getOriginalLength());
// Assert.assertEquals(0, compressionFileState.getCompressedLength());
//
// // File length test
// Assert.assertEquals(0, dfsClient.getFileInfo(fileName).getLen());
// }
@Test
public void testCompressedFileRandomRead() throws Exception {
// if (!loadedNative()) {
// return;
// }
waitTillSSMExitSafeMode();
// initDB();
int arraySize = 1024 * 1024 * 8;
String fileName = "/ssm/compression/file3";
byte[] bytes = prepareFile(fileName, arraySize);
int bufSize = 1024 * 1024;
CmdletManager cmdletManager = ssm.getCmdletManager();
long cmdId = cmdletManager.submitCmdlet("compress -file " + fileName
+ " -bufSize " + bufSize + " -codec " + codec);
waitTillActionDone(cmdId);
// Test random read
Random rnd = new Random(System.currentTimeMillis());
DFSInputStream dfsInputStream = smartDFSClient.open(fileName);
int randomReadSize = 500;
byte[] randomReadBuffer = new byte[randomReadSize];
for (int i = 0; i < 5; i++) {
int pos = rnd.nextInt(arraySize - 500);
byte[] subBytes = Arrays.copyOfRange(bytes, pos, pos + 500);
dfsInputStream.seek(pos);
Assert.assertEquals(pos, dfsInputStream.getPos());
int off = 0;
while (off < randomReadSize) {
int len = dfsInputStream.read(randomReadBuffer, off, randomReadSize - off);
off += len;
}
Assert.assertArrayEquals(subBytes, randomReadBuffer);
Assert.assertEquals(pos + 500, dfsInputStream.getPos());
}
}
@Test
public void testDecompress() throws Exception {
int arraySize = 1024 * 1024 * 8;
String filePath = "/ssm/compression/file4";
prepareFile(filePath, arraySize);
dfsClient.setStoragePolicy(filePath, "COLD");
HdfsFileStatus fileStatusBefore = dfsClient.getFileInfo(filePath);
CmdletManager cmdletManager = ssm.getCmdletManager();
// Expect that a common file cannot be decompressed.
List<ActionScheduler> schedulers = cmdletManager.getSchedulers("decompress");
Assert.assertTrue(schedulers.size() == 1);
ActionScheduler scheduler = schedulers.get(0);
Assert.assertTrue(scheduler instanceof CompressionScheduler);
Assert.assertFalse(((CompressionScheduler) scheduler).supportDecompression(filePath));
// Compress the given file
long cmdId = cmdletManager.submitCmdlet(
"compress -file " + filePath + " -codec " + codec);
waitTillActionDone(cmdId);
FileState fileState = HadoopUtil.getFileState(dfsClient, filePath);
Assert.assertTrue(fileState instanceof CompressionFileState);
// The storage policy should not be changed
HdfsFileStatus fileStatusAfterCompress = dfsClient.getFileInfo(filePath);
if (fileStatusBefore.getStoragePolicy() != 0) {
// To make sure the consistency of storage policy
Assert.assertEquals(fileStatusBefore.getStoragePolicy(),
fileStatusAfterCompress.getStoragePolicy());
}
// Try to decompress a compressed file
cmdId = cmdletManager.submitCmdlet("decompress -file " + filePath);
waitTillActionDone(cmdId);
fileState = HadoopUtil.getFileState(dfsClient, filePath);
Assert.assertFalse(fileState instanceof CompressionFileState);
// The storage policy should not be changed.
HdfsFileStatus fileStatusAfterDeCompress = dfsClient.getFileInfo(filePath);
if (fileStatusBefore.getStoragePolicy() != 0) {
// To make sure the consistency of storage policy
Assert.assertEquals(fileStatusBefore.getStoragePolicy(),
fileStatusAfterDeCompress.getStoragePolicy());
}
}
@Test
public void testCompressDecompressDir() throws Exception {
String dir = "/ssm/compression";
dfsClient.mkdirs(dir, null, true);
CmdletManager cmdletManager = ssm.getCmdletManager();
List<ActionScheduler> schedulers = cmdletManager.getSchedulers(
"decompress");
Assert.assertTrue(schedulers.size() == 1);
ActionScheduler scheduler = schedulers.get(0);
Assert.assertTrue(scheduler instanceof CompressionScheduler);
// Expect that a dir cannot be compressed.
Assert.assertFalse(((
CompressionScheduler) scheduler).supportCompression(dir));
// Expect that a dir cannot be decompressed.
Assert.assertFalse(((
CompressionScheduler) scheduler).supportDecompression(dir));
}
@Test
public void testCheckCompressAction() throws Exception {
int arraySize = 1024 * 1024 * 8;
String fileDir = "/ssm/compression/";
String fileName = "file5";
String filePath = fileDir + fileName;
prepareFile(filePath, arraySize);
CmdletManager cmdletManager = ssm.getCmdletManager();
long cmdId = cmdletManager.submitCmdlet(
"checkcompress -file " + filePath);
waitTillActionDone(cmdId);
// Test directory case.
cmdId = cmdletManager.submitCmdlet("checkcompress -file " + fileDir);
waitTillActionDone(cmdId);
}
@Test
public void testListLocatedStatus() throws Exception {
// if (!loadedNative()) {
// return;
// }
waitTillSSMExitSafeMode();
// initDB();
SmartFileSystem smartDfs = new SmartFileSystem();
smartDfs.initialize(dfs.getUri(), ssm.getContext().getConf());
int arraySize = 1024 * 1024 * 8;
String fileName = "/ssm/compression/file4";
byte[] bytes = prepareFile(fileName, arraySize);
// For uncompressed file, SmartFileSystem and DistributedFileSystem behave exactly the same
RemoteIterator<LocatedFileStatus> iter1 = dfs.listLocatedStatus(new Path(fileName));
LocatedFileStatus stat1 = iter1.next();
RemoteIterator<LocatedFileStatus> iter2 = smartDfs.listLocatedStatus(new Path(fileName));
LocatedFileStatus stat2 = iter2.next();
Assert.assertEquals(stat1.getPath(), stat2.getPath());
Assert.assertEquals(stat1.getBlockSize(), stat2.getBlockSize());
Assert.assertEquals(stat1.getLen(), stat2.getLen());
BlockLocation[] blockLocations1 = stat1.getBlockLocations();
BlockLocation[] blockLocations2 = stat2.getBlockLocations();
Assert.assertEquals(blockLocations1.length, blockLocations2.length);
for (int i = 0; i < blockLocations1.length; i++) {
Assert.assertEquals(blockLocations1[i].getLength(), blockLocations2[i].getLength());
Assert.assertEquals(blockLocations1[i].getOffset(), blockLocations2[i].getOffset());
}
// Test compressed file
int bufSize = 1024 * 1024;
CmdletManager cmdletManager = ssm.getCmdletManager();
long cmdId = cmdletManager.submitCmdlet("compress -file " + fileName
+ " -bufSize " + bufSize + " -codec " + codec);
waitTillActionDone(cmdId);
RemoteIterator<LocatedFileStatus> iter3 = dfs.listLocatedStatus(new Path(fileName));
LocatedFileStatus stat3 = iter3.next();
BlockLocation[] blockLocations3 = stat3.getBlockLocations();
RemoteIterator<LocatedFileStatus> iter4 = smartDfs.listLocatedStatus(new Path(fileName));
LocatedFileStatus stat4 = iter4.next();
BlockLocation[] blockLocations4 = stat4.getBlockLocations();
Assert.assertEquals(stat1.getPath(), stat4.getPath());
Assert.assertEquals(stat1.getBlockSize(), stat4.getBlockSize());
Assert.assertEquals(stat1.getLen(), stat4.getLen());
}
@Test
public void testRename() throws Exception {
// Create raw file
Path path = new Path("/test/compress_files/");
dfs.mkdirs(path);
int rawLength = 1024 * 1024 * 8;
String fileName = "/test/compress_files/file_0";
DFSTestUtil.createFile(dfs, new Path(fileName),
rawLength, (short) 1, 1);
int bufSize = 1024 * 1024;
waitTillSSMExitSafeMode();
CmdletManager cmdletManager = ssm.getCmdletManager();
// Compress files
long cmdId = cmdletManager.submitCmdlet("compress -file " + fileName
+ " -bufSize " + bufSize + " -codec " + codec);
waitTillActionDone(cmdId);
SmartDFSClient smartDFSClient = new SmartDFSClient(smartContext.getConf());
smartDFSClient.rename("/test/compress_files/file_0",
"/test/compress_files/file_4");
Assert.assertTrue(smartDFSClient.exists("/test/compress_files/file_4"));
HdfsFileStatus fileStatus =
smartDFSClient.getFileInfo("/test/compress_files/file_4");
Assert.assertEquals(rawLength, fileStatus.getLen());
}
@Test
public void testUnsupportedMethod() throws Exception {
// Concat, truncate and append are not supported
// Create raw file
Path path = new Path("/test/compress_files/");
dfs.mkdirs(path);
int rawLength = 1024 * 1024 * 8;
String fileName = "/test/compress_files/file_0";
DFSTestUtil.createFile(dfs, new Path(fileName),
rawLength, (short) 1, 1);
int bufSize = 1024 * 1024;
waitTillSSMExitSafeMode();
CmdletManager cmdletManager = ssm.getCmdletManager();
// Compress files
long cmdId = cmdletManager.submitCmdlet("compress -file " + fileName
+ " -bufSize " + bufSize + " -codec " + codec);
waitTillActionDone(cmdId);
SmartDFSClient smartDFSClient = new SmartDFSClient(smartContext.getConf());
// Test unsupported methods on compressed file
try {
smartDFSClient.concat(fileName + "target", new String[]{fileName});
} catch (IOException e) {
Assert.assertTrue(e.getMessage().contains("Compressed"));
}
/*try {
smartDFSClient.truncate(fileName, 100L);
} catch (IOException e) {
Assert.assertTrue(e.getMessage().contains("Compressed"));
}*/
}
private void waitTillActionDone(long cmdId) throws Exception {
int n = 0;
while (true) {
Thread.sleep(1000);
CmdletManager cmdletManager = ssm.getCmdletManager();
CmdletInfo info = cmdletManager.getCmdletInfo(cmdId);
if (info == null) {
continue;
}
CmdletState state = info.getState();
if (state == CmdletState.DONE) {
return;
} else if (state == CmdletState.FAILED) {
// Reasonably assume that there is only one action wrapped by a given cmdlet.
long aid = cmdletManager.getCmdletInfo(cmdId).getAids().get(0);
Assert.fail(
"Action failed. " + cmdletManager.getActionInfo(aid).getLog());
} else {
System.out.println(state);
}
// Wait for 20s.
if (++n == 20) {
throw new Exception("Time out in waiting for cmdlet: " + cmdletManager.
getCmdletInfo(cmdId).toString());
}
}
}
private byte[] prepareFile(String fileName, int fileSize) throws IOException {
byte[] bytes = TestCompressDecompress.BytesGenerator.get(fileSize);
// Create HDFS file
OutputStream outputStream = dfsClient.create(fileName, true);
outputStream.write(bytes);
outputStream.close();
return bytes;
}
static final class BytesGenerator {
private static final byte[] CACHE = new byte[]{0x0, 0x1, 0x2, 0x3, 0x4,
0x5, 0x6, 0x7, 0x8, 0x9, 0xA, 0xB, 0xC, 0xD, 0xE, 0xF};
private static final Random rnd = new Random(12345L);
private BytesGenerator() {
}
public static byte[] get(int size) {
byte[] array = (byte[]) Array.newInstance(byte.class, size);
for (int i = 0; i < size; i++) {
array[i] = CACHE[rnd.nextInt(CACHE.length - 1)];
}
return array;
}
}
private boolean loadedNative() {
return CompressionCodec.getNativeCodeLoaded();
}
}
| java | Apache-2.0 | e0c90f054687a18c4e095547ac5e31b8b313b3ef | 2026-01-05T02:41:11.405497Z | false |
Intel-bigdata/SSM | https://github.com/Intel-bigdata/SSM/blob/e0c90f054687a18c4e095547ac5e31b8b313b3ef/smart-server/src/test/java/org/smartdata/server/engine/cmdlet/TestSmallFileScheduler.java | smart-server/src/test/java/org/smartdata/server/engine/cmdlet/TestSmallFileScheduler.java | /**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.smartdata.server.engine.cmdlet;
import org.apache.hadoop.fs.FSDataOutputStream;
import org.apache.hadoop.fs.Path;
import org.junit.After;
import org.junit.Assert;
import org.junit.Before;
import org.junit.Test;
import org.smartdata.model.CmdletState;
import org.smartdata.server.MiniSmartClusterHarness;
import org.smartdata.server.engine.CmdletManager;
import java.util.Random;
public class TestSmallFileScheduler extends MiniSmartClusterHarness {
private long sumFileLen;
@Before
@Override
public void init() throws Exception {
super.init();
sumFileLen = 0L;
createTestFiles();
}
private void createTestFiles() throws Exception {
Path path = new Path("/test/small_files/");
dfs.mkdirs(path);
for (int i = 0; i < 2; i++) {
String fileName = "/test/small_files/file_" + i;
FSDataOutputStream out = dfs.create(new Path(fileName), (short) 1);
long fileLen = 5 + (int) (Math.random() * 11);
byte[] buf = new byte[20];
Random rb = new Random(2018);
int bytesRemaining = (int) fileLen;
while (bytesRemaining > 0) {
rb.nextBytes(buf);
int bytesToWrite = (bytesRemaining < buf.length) ? bytesRemaining : buf.length;
out.write(buf, 0, bytesToWrite);
bytesRemaining -= bytesToWrite;
}
out.close();
sumFileLen += fileLen;
}
}
@Test(timeout = 180000)
public void testScheduler() throws Exception {
waitTillSSMExitSafeMode();
Thread.sleep(2000);
CmdletManager cmdletManager = ssm.getCmdletManager();
long cmdId = cmdletManager.submitCmdlet("compact -file "
+ "['/test/small_files/file_0','/test/small_files/file_1'] "
+ "-containerFile /test/small_files/container_file_2");
while (true) {
Thread.sleep(3000);
CmdletState state = cmdletManager.getCmdletInfo(cmdId).getState();
if (state == CmdletState.DONE) {
long containerFileLen = dfsClient.getFileInfo(
"/test/small_files/container_file_2").getLen();
Assert.assertEquals(sumFileLen, containerFileLen);
Assert.assertEquals(0,
dfsClient.getFileInfo("/test/small_files/file_1").getLen());
return;
} else if (state == CmdletState.FAILED) {
Assert.fail("Compact failed.");
} else {
System.out.println(state);
}
}
}
@After
public void tearDown() throws Exception {
dfs.getClient().delete("/test", true);
}
}
| java | Apache-2.0 | e0c90f054687a18c4e095547ac5e31b8b313b3ef | 2026-01-05T02:41:11.405497Z | false |
Intel-bigdata/SSM | https://github.com/Intel-bigdata/SSM/blob/e0c90f054687a18c4e095547ac5e31b8b313b3ef/smart-server/src/test/java/org/smartdata/server/engine/cmdlet/TestActionRpc.java | smart-server/src/test/java/org/smartdata/server/engine/cmdlet/TestActionRpc.java | /**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.smartdata.server.engine.cmdlet;
import org.junit.Assert;
import org.junit.Test;
import org.smartdata.admin.SmartAdmin;
import org.smartdata.model.ActionInfo;
import org.smartdata.model.CmdletInfo;
import org.smartdata.server.MiniSmartClusterHarness;
public class TestActionRpc extends MiniSmartClusterHarness {
@Test
public void testActionProgress() throws Exception {
waitTillSSMExitSafeMode();
SmartAdmin admin = new SmartAdmin(smartContext.getConf());
long cmdId = admin.submitCmdlet("sleep -ms 6000");
try {
CmdletInfo cinfo = admin.getCmdletInfo(cmdId);
long actId = cinfo.getAids().get(0);
ActionInfo actionInfo;
while (true) {
actionInfo = admin.getActionInfo(actId);
if (actionInfo.isFinished()) {
Assert.fail("No intermediate progress observed.");
}
if (actionInfo.getProgress() > 0 && actionInfo.getProgress() < 1.0) {
return;
}
Thread.sleep(500);
}
} finally {
admin.close();
}
}
}
| java | Apache-2.0 | e0c90f054687a18c4e095547ac5e31b8b313b3ef | 2026-01-05T02:41:11.405497Z | false |
Intel-bigdata/SSM | https://github.com/Intel-bigdata/SSM/blob/e0c90f054687a18c4e095547ac5e31b8b313b3ef/smart-server/src/test/java/org/smartdata/server/engine/cmdlet/TestCacheScheduler.java | smart-server/src/test/java/org/smartdata/server/engine/cmdlet/TestCacheScheduler.java | /**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.smartdata.server.engine.cmdlet;
import org.apache.hadoop.fs.FSDataOutputStream;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.RemoteIterator;
import org.apache.hadoop.hdfs.protocol.CachePoolEntry;
import org.junit.Test;
import org.smartdata.hdfs.scheduler.CacheScheduler;
import org.smartdata.model.CmdletState;
import org.smartdata.model.action.ActionScheduler;
import org.smartdata.server.MiniSmartClusterHarness;
import org.smartdata.server.engine.CmdletManager;
import java.util.Set;
import static org.junit.Assert.assertTrue;
import static org.junit.Assert.fail;
public class TestCacheScheduler extends MiniSmartClusterHarness {
@Test(timeout = 100000)
public void testCacheUncacheFile() throws Exception {
waitTillSSMExitSafeMode();
String filePath = new String("/testFile");
FSDataOutputStream out = dfs.create(new Path(filePath));
out.writeChars("test content");
out.close();
CmdletManager cmdletManager = ssm.getCmdletManager();
long cid = cmdletManager.submitCmdlet("cache -file " + filePath);
while (true) {
if (cmdletManager.getCmdletInfo(cid).getState().equals(CmdletState.DONE)) {
break;
}
}
RemoteIterator<CachePoolEntry> poolEntries = dfsClient.listCachePools();
while (poolEntries.hasNext()) {
CachePoolEntry poolEntry = poolEntries.next();
if (poolEntry.getInfo().getPoolName().equals(CacheScheduler.SSM_POOL)) {
return;
}
fail("A cache pool should be created by SSM: " + CacheScheduler.SSM_POOL);
}
// Currently, there is only one scheduler for cache action
ActionScheduler actionScheduler = cmdletManager.getSchedulers("cache").get(0);
assertTrue(actionScheduler instanceof CacheScheduler);
Set<String> fileLock = ((CacheScheduler) actionScheduler).getFileLock();
// There is no file locked after the action is finished.
assertTrue(fileLock.isEmpty());
long cid1 = cmdletManager.submitCmdlet("uncache -file " + filePath);
while (true) {
if (cmdletManager.getCmdletInfo(cid1).getState().equals(CmdletState.DONE)) {
break;
}
}
// There is no file locked after the action is finished.
assertTrue(fileLock.isEmpty());
}
}
| java | Apache-2.0 | e0c90f054687a18c4e095547ac5e31b8b313b3ef | 2026-01-05T02:41:11.405497Z | false |
Intel-bigdata/SSM | https://github.com/Intel-bigdata/SSM/blob/e0c90f054687a18c4e095547ac5e31b8b313b3ef/smart-server/src/test/java/org/smartdata/server/engine/cmdlet/TestMoverScheduler.java | smart-server/src/test/java/org/smartdata/server/engine/cmdlet/TestMoverScheduler.java | /**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.smartdata.server.engine.cmdlet;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hdfs.DFSTestUtil;
import org.apache.hadoop.hdfs.DistributedFileSystem;
import org.junit.Assert;
import org.junit.Test;
import org.smartdata.model.CmdletState;
import org.smartdata.server.MiniSmartClusterHarness;
import org.smartdata.server.engine.CmdletManager;
public class TestMoverScheduler extends MiniSmartClusterHarness {
@Test(timeout = 40000)
public void testScheduler() throws Exception {
waitTillSSMExitSafeMode();
String file = "/testfile";
Path filePath = new Path(file);
int numBlocks = 2;
DistributedFileSystem fs = cluster.getFileSystem();
DFSTestUtil.createFile(fs, filePath, numBlocks * DEFAULT_BLOCK_SIZE, (short) 3, 100);
fs.setStoragePolicy(filePath, "ALL_SSD");
CmdletManager cmdletManager = ssm.getCmdletManager();
long cmdId = cmdletManager.submitCmdlet("allssd -file /testfile");
while (true) {
Thread.sleep(1000);
CmdletState state = cmdletManager.getCmdletInfo(cmdId).getState();
if (state == CmdletState.DONE) {
return;
} else if (state == CmdletState.FAILED) {
Assert.fail("Mover failed.");
} else {
System.out.println(state);
}
}
}
}
| java | Apache-2.0 | e0c90f054687a18c4e095547ac5e31b8b313b3ef | 2026-01-05T02:41:11.405497Z | false |
Intel-bigdata/SSM | https://github.com/Intel-bigdata/SSM/blob/e0c90f054687a18c4e095547ac5e31b8b313b3ef/smart-server/src/test/java/org/smartdata/server/engine/cmdlet/TestCmdlet.java | smart-server/src/test/java/org/smartdata/server/engine/cmdlet/TestCmdlet.java | /**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.smartdata.server.engine.cmdlet;
import org.apache.hadoop.fs.FSDataOutputStream;
import org.apache.hadoop.fs.Path;
import org.junit.Test;
import org.smartdata.action.SmartAction;
import org.smartdata.hdfs.MiniClusterHarness;
import org.smartdata.hdfs.action.CacheFileAction;
import org.smartdata.hdfs.action.HdfsAction;
import org.smartdata.model.CmdletState;
import java.io.IOException;
import java.util.Arrays;
import java.util.HashMap;
import java.util.Map;
/**
* Cmdlet Unit Test.
*/
public class TestCmdlet extends MiniClusterHarness {
@Test
public void testRunCmdlet() throws Exception {
generateTestFiles();
Cmdlet cmd = runHelper();
cmd.run();
while (!cmd.isFinished()) {
Thread.sleep(1000);
}
}
private void generateTestFiles() throws IOException {
// New dir
Path dir = new Path("/testMoveFile");
dfs.mkdirs(dir);
// Move to SSD
dfs.setStoragePolicy(dir, "HOT");
final FSDataOutputStream out1 = dfs.create(new Path("/testMoveFile/file1"),
true, 1024);
out1.writeChars("/testMoveFile/file1");
out1.close();
// Move to Archive
final FSDataOutputStream out2 = dfs.create(new Path("/testMoveFile/file2"),
true, 1024);
out2.writeChars("/testMoveFile/file2");
out2.close();
// Move to CacheObject
Path dir3 = new Path("/testCacheFile");
dfs.mkdirs(dir3);
}
private Cmdlet runHelper() throws IOException {
SmartAction[] actions = new SmartAction[4];
// New action
// actions[0] = new AllSsdFileAction();
// actions[0].setDfsClient(client);
// actions[0].setContext(new SmartContext(smartConf));
// actions[0].getDNStorageReports(new String[]{"/testMoveFile/file1"});
// actions[1] = new MoveFileAction();
// actions[1].setDfsClient(client);
// actions[1].setContext(new SmartContext(smartConf));
// actions[1].getDNStorageReports(new String[]{"/testMoveFile/file2", "COLD"});
actions[2] = new CacheFileAction();
((HdfsAction) actions[2]).setDfsClient(dfsClient);
actions[2].setContext(smartContext);
Map<String, String> args = new HashMap();
args.put(CacheFileAction.FILE_PATH, "/testCacheFile");
actions[2].init(args);
// New Cmdlet
Cmdlet cmd = new Cmdlet(Arrays.asList(actions));
cmd.setId(1);
cmd.setRuleId(1);
cmd.setState(CmdletState.PENDING);
return cmd;
}
}
| java | Apache-2.0 | e0c90f054687a18c4e095547ac5e31b8b313b3ef | 2026-01-05T02:41:11.405497Z | false |
Intel-bigdata/SSM | https://github.com/Intel-bigdata/SSM/blob/e0c90f054687a18c4e095547ac5e31b8b313b3ef/smart-server/src/main/java/org/smartdata/server/SmartRpcServer.java | smart-server/src/main/java/org/smartdata/server/SmartRpcServer.java | /**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.smartdata.server;
import com.google.protobuf.BlockingService;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.CommonConfigurationKeys;
import org.apache.hadoop.hdfs.DFSUtil;
import org.apache.hadoop.ipc.ProtobufRpcEngine;
import org.apache.hadoop.ipc.RPC;
import org.apache.hadoop.ipc.RetriableException;
import org.smartdata.SmartPolicyProvider;
import org.smartdata.SmartServiceState;
import org.smartdata.action.ActionRegistry;
import org.smartdata.conf.SmartConfKeys;
import org.smartdata.metastore.MetaStoreException;
import org.smartdata.metrics.FileAccessEvent;
import org.smartdata.model.ActionDescriptor;
import org.smartdata.model.ActionInfo;
import org.smartdata.model.CmdletInfo;
import org.smartdata.model.CmdletState;
import org.smartdata.model.FileState;
import org.smartdata.model.RuleInfo;
import org.smartdata.model.RuleState;
import org.smartdata.protocol.AdminServerProto;
import org.smartdata.protocol.ClientServerProto;
import org.smartdata.protocol.SmartServerProtocols;
import org.smartdata.protocol.protobuffer.AdminProtocolProtoBuffer;
import org.smartdata.protocol.protobuffer.ClientProtocolProtoBuffer;
import org.smartdata.protocol.protobuffer.ServerProtocolsServerSideTranslator;
import java.io.IOException;
import java.net.InetSocketAddress;
import java.util.List;
/**
* Implements the rpc calls.
* TODO: Implement statistics for SSM rpc server
*/
public class SmartRpcServer implements SmartServerProtocols {
protected SmartServer ssm;
protected Configuration conf;
protected final InetSocketAddress clientRpcAddress;
protected int serviceHandlerCount;
protected final RPC.Server clientRpcServer;
private final boolean serviceAuthEnabled;
public SmartRpcServer(SmartServer ssm, Configuration conf) throws IOException {
this.ssm = ssm;
this.conf = conf;
// TODO: implement ssm SmartAdminProtocol
InetSocketAddress rpcAddr = getRpcServerAddress();
RPC.setProtocolEngine(conf, AdminProtocolProtoBuffer.class, ProtobufRpcEngine.class);
ServerProtocolsServerSideTranslator clientSSMProtocolServerSideTranslatorPB =
new ServerProtocolsServerSideTranslator(this);
BlockingService adminSmartPbService = AdminServerProto.protoService
.newReflectiveBlockingService(clientSSMProtocolServerSideTranslatorPB);
BlockingService clientSmartPbService = ClientServerProto.protoService
.newReflectiveBlockingService(clientSSMProtocolServerSideTranslatorPB);
serviceHandlerCount = conf.getInt(
SmartConfKeys.SMART_SERVER_RPC_HANDLER_COUNT_KEY,
SmartConfKeys.SMART_SERVER_RPC_HANDLER_COUNT_DEFAULT);
// TODO: provide service for SmartClientProtocol and SmartAdminProtocol
// TODO: in different port and server
clientRpcServer = new RPC.Builder(conf)
.setProtocol(AdminProtocolProtoBuffer.class)
.setInstance(adminSmartPbService)
.setBindAddress(rpcAddr.getHostName())
.setPort(rpcAddr.getPort())
.setNumHandlers(serviceHandlerCount)
.setVerbose(true)
.build();
InetSocketAddress listenAddr = clientRpcServer.getListenerAddress();
clientRpcAddress = new InetSocketAddress(
rpcAddr.getHostName(), listenAddr.getPort());
DFSUtil.addPBProtocol(conf, AdminProtocolProtoBuffer.class,
adminSmartPbService, clientRpcServer);
DFSUtil.addPBProtocol(conf, ClientProtocolProtoBuffer.class,
clientSmartPbService, clientRpcServer);
// set service-level authorization security policy
if (serviceAuthEnabled = conf.getBoolean(
CommonConfigurationKeys.HADOOP_SECURITY_AUTHORIZATION, false)) {
if (clientRpcServer != null) {
clientRpcServer.refreshServiceAcl(conf, new SmartPolicyProvider());
}
}
}
private InetSocketAddress getRpcServerAddress() {
String[] strings = conf.get(SmartConfKeys.SMART_SERVER_RPC_ADDRESS_KEY,
SmartConfKeys.SMART_SERVER_RPC_ADDRESS_DEFAULT).split(":");
return new InetSocketAddress(strings[strings.length - 2]
, Integer.parseInt(strings[strings.length - 1]));
}
/**
* Start SSM RPC service.
*/
public void start() {
if (clientRpcServer != null) {
clientRpcServer.start();
}
}
/**
* Stop SSM RPC service.
*/
public void stop() {
if (clientRpcServer != null) {
clientRpcServer.stop();
}
}
/*
* Waiting for RPC threads to exit.
*/
public void join() throws InterruptedException {
if (clientRpcServer != null) {
clientRpcServer.join();
}
}
@Override
public SmartServiceState getServiceState() {
return ssm.getSSMServiceState();
}
private void checkIfActive() throws IOException {
if (!ssm.isActive()) {
throw new RetriableException("SSM services not ready...");
}
}
@Override
public long submitRule(String rule, RuleState initState) throws IOException {
checkIfActive();
return ssm.getRuleManager().submitRule(rule, initState);
}
@Override
public void checkRule(String rule) throws IOException {
checkIfActive();
ssm.getRuleManager().checkRule(rule);
}
@Override
public RuleInfo getRuleInfo(long ruleId) throws IOException {
checkIfActive();
return ssm.getRuleManager().getRuleInfo(ruleId);
}
@Override
public List<RuleInfo> listRulesInfo() throws IOException {
checkIfActive();
return ssm.getRuleManager().listRulesInfo();
}
@Override
public void deleteRule(long ruleID, boolean dropPendingCmdlets)
throws IOException {
checkIfActive();
ssm.getRuleManager().deleteRule(ruleID, dropPendingCmdlets);
}
@Override
public void activateRule(long ruleID) throws IOException {
checkIfActive();
ssm.getRuleManager().activateRule(ruleID);
}
@Override
public void disableRule(long ruleID, boolean dropPendingCmdlets)
throws IOException {
checkIfActive();
ssm.getRuleManager().disableRule(ruleID, dropPendingCmdlets);
}
@Override
public CmdletInfo getCmdletInfo(long cmdletID) throws IOException {
checkIfActive();
return ssm.getCmdletManager().getCmdletInfo(cmdletID);
}
@Override
public List<CmdletInfo> listCmdletInfo(long rid, CmdletState cmdletState)
throws IOException {
checkIfActive();
return ssm.getCmdletManager().listCmdletsInfo(rid, cmdletState);
}
@Override
public void activateCmdlet(long cmdletID) throws IOException {
checkIfActive();
ssm.getCmdletManager().activateCmdlet(cmdletID);
}
@Override
public void disableCmdlet(long cmdletID) throws IOException {
checkIfActive();
ssm.getCmdletManager().disableCmdlet(cmdletID);
}
@Override
public void deleteCmdlet(long cmdletID) throws IOException {
checkIfActive();
ssm.getCmdletManager().deleteCmdlet(cmdletID);
}
@Override
public ActionInfo getActionInfo(long actionID) throws IOException {
checkIfActive();
return ssm.getCmdletManager().getActionInfo(actionID);
}
@Override
public List<ActionInfo> listActionInfoOfLastActions(int maxNumActions)
throws IOException {
checkIfActive();
return ssm.getCmdletManager().listNewCreatedActions(maxNumActions);
}
@Override
public void reportFileAccessEvent(FileAccessEvent event)
throws IOException {
checkIfActive();
ssm.getStatesManager().reportFileAccessEvent(event);
}
@Override
public long submitCmdlet(String cmd) throws IOException {
checkIfActive();
return ssm.getCmdletManager().submitCmdlet(cmd);
}
@Override
public List<ActionDescriptor> listActionsSupported() throws IOException {
return ActionRegistry.supportedActions();
}
@Override
public FileState getFileState(String filePath) throws IOException {
checkIfActive();
try {
return ssm.getMetaStore().getFileState(filePath);
} catch (MetaStoreException e) {
throw new IOException(e);
}
}
}
| java | Apache-2.0 | e0c90f054687a18c4e095547ac5e31b8b313b3ef | 2026-01-05T02:41:11.405497Z | false |
Intel-bigdata/SSM | https://github.com/Intel-bigdata/SSM/blob/e0c90f054687a18c4e095547ac5e31b8b313b3ef/smart-server/src/main/java/org/smartdata/server/SmartDaemon.java | smart-server/src/main/java/org/smartdata/server/SmartDaemon.java | /**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.smartdata.server;
import com.hazelcast.core.HazelcastInstance;
import org.apache.hadoop.security.UserGroupInformation;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.smartdata.SmartContext;
import org.smartdata.conf.SmartConf;
import org.smartdata.conf.SmartConfKeys;
import org.smartdata.hdfs.HadoopUtil;
import org.smartdata.server.cluster.ClusterMembershipListener;
import org.smartdata.server.cluster.HazelcastInstanceProvider;
import org.smartdata.server.cluster.HazelcastWorker;
import org.smartdata.server.cluster.ServerDaemon;
import org.smartdata.server.utils.HazelcastUtil;
import org.smartdata.utils.SecurityUtil;
import java.io.IOException;
public class SmartDaemon implements ServerDaemon {
private static final Logger LOG = LoggerFactory.getLogger(SmartDaemon.class);
private final String[] args;
//Todo: maybe we can make worker as an interface
private HazelcastWorker hazelcastWorker;
public SmartDaemon(String[] args) {
this.args = args;
}
public void start() throws IOException, InterruptedException {
SmartConf conf = new SmartConf();
authentication(conf);
HazelcastInstance instance = HazelcastInstanceProvider.getInstance();
if (HazelcastUtil.isMaster(instance)) {
SmartServer.main(args);
} else {
HadoopUtil.setSmartConfByHadoop(conf);
String rpcHost = HazelcastUtil
.getMasterMember(HazelcastInstanceProvider.getInstance())
.getAddress()
.getHost();
String rpcPort = conf
.get(SmartConfKeys.SMART_SERVER_RPC_ADDRESS_KEY,
SmartConfKeys.SMART_SERVER_RPC_ADDRESS_DEFAULT)
.split(":")[1];
conf.set(SmartConfKeys.SMART_SERVER_RPC_ADDRESS_KEY, rpcHost + ":" + rpcPort);
instance.getCluster().addMembershipListener(new ClusterMembershipListener(this, conf));
this.hazelcastWorker = new HazelcastWorker(new SmartContext(conf));
this.hazelcastWorker.start();
}
}
@Override
public void becomeActive() {
if (this.hazelcastWorker != null) {
this.hazelcastWorker.stop();
}
SmartServer.main(args);
}
private void authentication(SmartConf conf) throws IOException {
if (!SecurityUtil.isSecurityEnabled(conf)) {
return;
}
// Load Hadoop configuration files
try {
HadoopUtil.loadHadoopConf(conf);
} catch (IOException e) {
LOG.info("Running in secure mode, but cannot find Hadoop configuration file. "
+ "Please config smart.hadoop.conf.path property in smart-site.xml.");
conf.set("hadoop.security.authentication", "kerberos");
conf.set("hadoop.security.authorization", "true");
}
UserGroupInformation.setConfiguration(conf);
String keytabFilename = conf.get(SmartConfKeys.SMART_SERVER_KEYTAB_FILE_KEY);
String principalConfig = conf.get(SmartConfKeys.SMART_SERVER_KERBEROS_PRINCIPAL_KEY);
String principal =
org.apache.hadoop.security.SecurityUtil.getServerPrincipal(principalConfig, (String) null);
SecurityUtil.loginUsingKeytab(keytabFilename, principal);
}
public static void main(String[] args) {
SmartDaemon daemon = new SmartDaemon(args);
try {
daemon.start();
} catch (Exception e) {
e.printStackTrace();
}
}
}
| java | Apache-2.0 | e0c90f054687a18c4e095547ac5e31b8b313b3ef | 2026-01-05T02:41:11.405497Z | false |
Intel-bigdata/SSM | https://github.com/Intel-bigdata/SSM/blob/e0c90f054687a18c4e095547ac5e31b8b313b3ef/smart-server/src/main/java/org/smartdata/server/SmartServer.java | smart-server/src/main/java/org/smartdata/server/SmartServer.java | /**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.smartdata.server;
import org.apache.commons.cli.CommandLine;
import org.apache.commons.cli.CommandLineParser;
import org.apache.commons.cli.Option;
import org.apache.commons.cli.Options;
import org.apache.commons.cli.ParseException;
import org.apache.commons.cli.PosixParser;
import org.apache.zeppelin.server.SmartZeppelinServer;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.slf4j.bridge.SLF4JBridgeHandler;
import org.smartdata.SmartServiceState;
import org.smartdata.conf.SmartConf;
import org.smartdata.conf.SmartConfKeys;
import org.smartdata.hdfs.HadoopUtil;
import org.smartdata.metastore.MetaStore;
import org.smartdata.metastore.utils.MetaStoreUtils;
import org.smartdata.server.engine.CmdletManager;
import org.smartdata.server.engine.ConfManager;
import org.smartdata.server.engine.RuleManager;
import org.smartdata.server.engine.ServerContext;
import org.smartdata.server.engine.ServiceMode;
import org.smartdata.server.engine.StatesManager;
import org.smartdata.server.engine.cmdlet.agent.AgentMaster;
import org.smartdata.server.utils.GenericOptionsParser;
import static org.smartdata.SmartConstants.NUMBER_OF_SMART_AGENT;
import java.io.File;
import java.io.FileNotFoundException;
import java.io.IOException;
import java.io.PrintStream;
import java.util.ArrayList;
import java.util.List;
import java.util.Scanner;
/**
* From this Smart Storage Management begins.
*/
public class SmartServer {
public static final Logger LOG = LoggerFactory.getLogger(SmartServer.class);
private ConfManager confMgr;
private final SmartConf conf;
private SmartEngine engine;
private ServerContext context;
private boolean enabled;
private SmartRpcServer rpcServer;
private SmartZeppelinServer zeppelinServer;
static {
SLF4JBridgeHandler.removeHandlersForRootLogger();
SLF4JBridgeHandler.install();
}
public SmartServer(SmartConf conf) {
this.conf = conf;
this.confMgr = new ConfManager(conf);
this.enabled = false;
}
public void initWith() throws Exception {
LOG.info("Start Init Smart Server");
HadoopUtil.setSmartConfByHadoop(conf);
MetaStore metaStore = MetaStoreUtils.getDBAdapter(conf);
context = new ServerContext(conf, metaStore);
initServiceMode(conf);
engine = new SmartEngine(context);
rpcServer = new SmartRpcServer(this, conf);
zeppelinServer = new SmartZeppelinServer(conf, engine);
LOG.info("Finish Init Smart Server");
}
public StatesManager getStatesManager() {
return engine.getStatesManager();
}
public RuleManager getRuleManager() {
return engine.getRuleManager();
}
public CmdletManager getCmdletManager() {
return engine.getCmdletManager();
}
public MetaStore getMetaStore() {
return this.context.getMetaStore();
}
public ServerContext getContext() {
return this.context;
}
public static StartupOption processArgs(String[] args, SmartConf conf) throws Exception {
if (args == null) {
args = new String[0];
}
StartupOption startOpt = StartupOption.REGULAR;
List<String> list = new ArrayList<>();
for (String arg : args) {
if (StartupOption.FORMAT.getName().equalsIgnoreCase(arg)) {
startOpt = StartupOption.FORMAT;
} else if (StartupOption.REGULAR.getName().equalsIgnoreCase(arg)) {
startOpt = StartupOption.REGULAR;
} else if (arg.equals("-h") || arg.equals("-help")) {
if (parseHelpArgument(new String[]{arg}, USAGE, System.out, true)) {
return null;
}
} else {
list.add(arg);
}
}
if (list != null) {
String remainArgs[] = list.toArray(new String[list.size()]);
new GenericOptionsParser(conf, remainArgs);
}
return startOpt;
}
public static void setAgentNum(SmartConf conf) {
String agentConfFile = conf.get(SmartConfKeys.SMART_CONF_DIR_KEY,
SmartConfKeys.SMART_CONF_DIR_DEFAULT) + "/agents";
Scanner sc = null;
try {
sc = new Scanner(new File(agentConfFile));
} catch (FileNotFoundException ex) {
LOG.error("Cannot find the config file: {}!", agentConfFile);
}
int num = 0;
while (sc.hasNextLine()) {
String host = sc.nextLine().trim();
if (!host.startsWith("#") && !host.isEmpty()) {
num++;
}
}
conf.setInt(NUMBER_OF_SMART_AGENT, num);
}
static SmartServer processWith(StartupOption startOption, SmartConf conf) throws Exception {
// New AgentMaster
AgentMaster.getAgentMaster(conf);
if (startOption == StartupOption.FORMAT) {
LOG.info("Formatting DataBase ...");
MetaStoreUtils.formatDatabase(conf);
LOG.info("Formatting DataBase finished successfully!");
} else {
MetaStoreUtils.checkTables(conf);
}
SmartServer ssm = new SmartServer(conf);
try {
ssm.initWith();
ssm.run();
return ssm;
} catch (Exception e) {
ssm.shutdown();
throw e;
}
}
private static final String USAGE =
"Usage: ssm [options]\n"
+ " -h\n\tShow this usage information.\n\n"
+ " -format\n\tFormat the configured database.\n\n"
+ " -D property=value\n"
+ "\tSpecify or overwrite an configure option.\n"
+ "\tE.g. -D smart.dfs.namenode.rpcserver=hdfs://localhost:43543\n";
private static final Options helpOptions = new Options();
private static final Option helpOpt = new Option("h", "help", false,
"get help information");
static {
helpOptions.addOption(helpOpt);
}
private static boolean parseHelpArgument(String[] args,
String helpDescription, PrintStream out, boolean printGenericCmdletUsage) {
try {
CommandLineParser parser = new PosixParser();
CommandLine cmdLine = parser.parse(helpOptions, args);
if (cmdLine.hasOption(helpOpt.getOpt())
|| cmdLine.hasOption(helpOpt.getLongOpt())) {
// should print out the help information
out.println(helpDescription + "\n");
return true;
}
} catch (ParseException pe) {
//LOG.warn("Parse help exception", pe);
return false;
}
return false;
}
/**
* Bring up all the daemon threads needed.
*
* @throws Exception
*/
private void run() throws Exception {
boolean enabled = conf.getBoolean(SmartConfKeys.SMART_DFS_ENABLED,
SmartConfKeys.SMART_DFS_ENABLED_DEFAULT);
if (enabled) {
startEngines();
}
rpcServer.start();
if (zeppelinServer != null) {
zeppelinServer.start();
}
}
private void startEngines() throws Exception {
enabled = true;
engine.init();
engine.start();
}
public void enable() throws IOException {
if (getSSMServiceState() == SmartServiceState.DISABLED) {
try {
startEngines();
} catch (Exception e) {
throw new IOException(e);
}
}
}
public SmartServiceState getSSMServiceState() {
if (!enabled) {
return SmartServiceState.DISABLED;
} else if (!engine.inSafeMode()) {
return SmartServiceState.ACTIVE;
} else {
return SmartServiceState.SAFEMODE;
}
}
public boolean isActive() {
return getSSMServiceState() == SmartServiceState.ACTIVE;
}
private void stop() throws Exception {
if (engine != null) {
engine.stop();
}
if (zeppelinServer != null) {
zeppelinServer.stop();
}
try {
if (rpcServer != null) {
rpcServer.stop();
}
} catch (Exception e) {
}
}
public void shutdown() {
try {
stop();
//join();
} catch (Exception e) {
LOG.error("SmartServer shutdown error", e);
}
}
private enum StartupOption {
FORMAT("-format"),
REGULAR("-regular");
private String name;
StartupOption(String arg) {
this.name = arg;
}
public String getName() {
return name;
}
}
private void initServiceMode(SmartConf conf) {
String serviceModeStr = conf.get(SmartConfKeys.SMART_SERVICE_MODE_KEY,
SmartConfKeys.SMART_SERVICE_MODE_DEFAULT);
try {
context.setServiceMode(ServiceMode.valueOf(serviceModeStr.trim().toUpperCase()));
} catch (IllegalStateException e) {
String errorMsg =
"Illegal service mode '"
+ serviceModeStr
+ "' set in property: "
+ SmartConfKeys.SMART_SERVICE_MODE_KEY
+ "!";
LOG.error(errorMsg);
throw e;
}
LOG.info("Initialized service mode: " + context.getServiceMode().getName() + ".");
}
public static SmartServer launchWith(SmartConf conf) throws Exception {
return launchWith(null, conf);
}
public static SmartServer launchWith(String[] args, SmartConf conf) throws Exception {
if (conf == null) {
conf = new SmartConf();
}
StartupOption startOption = processArgs(args, conf);
if (startOption == null) {
return null;
}
return processWith(startOption, conf);
}
public static void main(String[] args) {
int errorCode = 0; // if SSM exit normally then the errorCode is 0
try {
final SmartServer inst = launchWith(args, null);
if (inst != null) {
Runtime.getRuntime().addShutdownHook(new Thread() {
@Override
public void run() {
LOG.info("Shutting down SmartServer ... ");
try {
inst.shutdown();
} catch (Exception e) {
LOG.error("Error while stopping servlet container", e);
}
LOG.info("SmartServer was down.");
}
});
//Todo: when to break
while (true) {
Thread.sleep(1000);
}
}
} catch (Exception e) {
LOG.error("Failed to create SmartServer", e);
errorCode = 1;
} finally {
System.exit(errorCode);
}
}
}
| java | Apache-2.0 | e0c90f054687a18c4e095547ac5e31b8b313b3ef | 2026-01-05T02:41:11.405497Z | false |
Intel-bigdata/SSM | https://github.com/Intel-bigdata/SSM/blob/e0c90f054687a18c4e095547ac5e31b8b313b3ef/smart-engine/src/test/java/org/smartdata/server/engine/rule/TestRuleExecutor.java | smart-engine/src/test/java/org/smartdata/server/engine/rule/TestRuleExecutor.java | /**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.smartdata.server.engine.rule;
import org.junit.After;
import org.junit.Assert;
import org.junit.Before;
import org.junit.Test;
import org.smartdata.metastore.MetaStore;
import org.smartdata.metastore.TestDaoUtil;
import org.smartdata.metastore.dao.MetaStoreHelper;
import java.util.ArrayList;
import java.util.List;
public class TestRuleExecutor extends TestDaoUtil {
private MetaStoreHelper metaStoreHelper;
private MetaStore adapter;
@Before
public void initActionDao() throws Exception {
initDao();
metaStoreHelper = new MetaStoreHelper(druidPool.getDataSource());
adapter = new MetaStore(druidPool);
}
@After
public void closeActionDao() throws Exception {
closeDao();
metaStoreHelper = null;
}
@Test
public void generateSQL() throws Exception {
String countFilter = "";
String newTable = "test";
List<String> tableNames = new ArrayList<>();
tableNames.add("blank_access_count_info");
String sql;
/*sql = "CREATE TABLE actual as SELECT fid, SUM(count)" +
" as count FROM (SELECT * FROM blank_access_count_info " +
"UNION ALL SELECT * FROM blank_access_count_info " +
"UNION ALL SELECT * FROM blank_access_count_info) as tmp GROUP BY fid";
metaStoreHelper.execute(sql);
metaStoreHelper.dropTable("actual");*/
// Test single element
sql = RuleExecutor.generateSQL(tableNames, newTable, countFilter, adapter);
try {
metaStoreHelper.execute(sql);
metaStoreHelper.dropTable(newTable);
} catch (Exception e) {
Assert.assertTrue(false);
}
// Test multiple elements
tableNames.add("blank_access_count_info");
sql = RuleExecutor.generateSQL(tableNames, newTable, countFilter, adapter);
try {
metaStoreHelper.execute(sql);
metaStoreHelper.dropTable(newTable);
} catch (Exception e) {
Assert.assertTrue(false);
}
}
}
| java | Apache-2.0 | e0c90f054687a18c4e095547ac5e31b8b313b3ef | 2026-01-05T02:41:11.405497Z | false |
Intel-bigdata/SSM | https://github.com/Intel-bigdata/SSM/blob/e0c90f054687a18c4e095547ac5e31b8b313b3ef/smart-engine/src/test/java/org/smartdata/server/engine/cmdlet/TestCmdletExecutor.java | smart-engine/src/test/java/org/smartdata/server/engine/cmdlet/TestCmdletExecutor.java | /**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.smartdata.server.engine.cmdlet;
import org.junit.Assert;
import org.junit.Test;
import org.smartdata.action.EchoAction;
import org.smartdata.action.SmartAction;
import org.smartdata.conf.SmartConf;
import org.smartdata.protocol.message.ActionStatus;
import org.smartdata.protocol.message.StatusMessage;
import org.smartdata.protocol.message.StatusReport;
import org.smartdata.protocol.message.StatusReporter;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.Vector;
import java.util.concurrent.Executors;
import java.util.concurrent.ScheduledExecutorService;
import java.util.concurrent.TimeUnit;
public class TestCmdletExecutor {
@Test
public void testCmdletExecutor() throws InterruptedException {
final List<StatusMessage> statusMessages = new ArrayList<>();
StatusReporter reporter =
new StatusReporter() {
@Override
public void report(StatusMessage status) {
statusMessages.add(status);
}
};
CmdletExecutor executor = new CmdletExecutor(new SmartConf());
StatusReportTask statusReportTask = new StatusReportTask(reporter, executor, new SmartConf());
ScheduledExecutorService executorService = Executors.newSingleThreadScheduledExecutor();
executorService.scheduleAtFixedRate(
statusReportTask, 100, 10, TimeUnit.MILLISECONDS);
SmartAction action = new EchoAction();
Map<String, String> args = new HashMap<>();
args.put(EchoAction.PRINT_MESSAGE, "message success");
action.setArguments(args);
action.setActionId(101);
Cmdlet cmdlet = new Cmdlet(Arrays.asList(action));
executor.execute(cmdlet);
Thread.sleep(2000);
StatusReport lastReport = (StatusReport) statusMessages.get(statusMessages.size() - 1);
ActionStatus status = lastReport.getActionStatuses().get(0);
Assert.assertTrue(status.isFinished());
Assert.assertNull(status.getThrowable());
executor.shutdown();
}
class HangingAction extends SmartAction {
@Override
protected void execute() throws Exception {
Thread.sleep(10000);
}
}
@Test
public void testStop() throws InterruptedException {
final List<StatusMessage> statusMessages = new Vector<>();
StatusReporter reporter =
new StatusReporter() {
@Override
public void report(StatusMessage status) {
statusMessages.add(status);
}
};
CmdletExecutor executor = new CmdletExecutor(new SmartConf());
StatusReportTask statusReportTask = new StatusReportTask(reporter, executor, new SmartConf());
ScheduledExecutorService executorService = Executors.newSingleThreadScheduledExecutor();
executorService.scheduleAtFixedRate(
statusReportTask, 100, 10, TimeUnit.MILLISECONDS);
SmartAction action = new HangingAction();
action.setActionId(101);
Cmdlet cmdlet = new Cmdlet(Arrays.asList(action));
cmdlet.setId(10);
executor.execute(cmdlet);
Thread.sleep(1000);
executor.stop(10L);
Thread.sleep(2000);
StatusReport lastReport = (StatusReport) statusMessages.get(statusMessages.size() - 1);
ActionStatus status = lastReport.getActionStatuses().get(0);
Assert.assertTrue(status.isFinished());
Assert.assertNotNull(status.getThrowable());
executor.shutdown();
}
}
| java | Apache-2.0 | e0c90f054687a18c4e095547ac5e31b8b313b3ef | 2026-01-05T02:41:11.405497Z | false |
Intel-bigdata/SSM | https://github.com/Intel-bigdata/SSM/blob/e0c90f054687a18c4e095547ac5e31b8b313b3ef/smart-engine/src/test/java/org/smartdata/server/engine/cmdlet/TestCmdletDescriptor.java | smart-engine/src/test/java/org/smartdata/server/engine/cmdlet/TestCmdletDescriptor.java | /**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.smartdata.server.engine.cmdlet;
import org.junit.Assert;
import org.junit.Test;
import org.smartdata.model.CmdletDescriptor;
import java.util.HashMap;
import java.util.Map;
/**
* The tests is only about the cmdlet string translation.
*/
public class TestCmdletDescriptor {
@Test
public void testStringToDescriptor() throws Exception {
String cmd = "someaction -arg1 -arg2 /dir/foo ; cache -file /testFile; action3";
CmdletDescriptor des = CmdletDescriptor.fromCmdletString(cmd);
Assert.assertTrue(des.getActionSize() == 3);
Assert.assertTrue(des.getActionName(2).equals("action3"));
Assert.assertTrue(des.getActionArgs(2).size() == 0);
}
@Test
public void testTrans() throws Exception {
CmdletDescriptor des = new CmdletDescriptor();
Map<String, String> args1 = new HashMap<>();
args1.put("-filePath", "/dir/foo x");
args1.put("-len", "100");
Map<String, String> args2 = new HashMap<>();
args2.put("-version", "");
Map<String, String> args3 = new HashMap<>();
args3.put("-storage", "ONE_SSD");
args3.put("-time", "2016-03-19 19:42:00");
des.addAction("action1", args1);
des.addAction("action2", args2);
des.addAction("action3", args3);
String cmdString = des.getCmdletString();
CmdletDescriptor transDes = new CmdletDescriptor(cmdString);
Assert.assertTrue(des.getActionSize() == transDes.getActionSize());
Assert.assertTrue(transDes.equals(des));
}
}
| java | Apache-2.0 | e0c90f054687a18c4e095547ac5e31b8b313b3ef | 2026-01-05T02:41:11.405497Z | false |
Intel-bigdata/SSM | https://github.com/Intel-bigdata/SSM/blob/e0c90f054687a18c4e095547ac5e31b8b313b3ef/smart-engine/src/test/java/org/smartdata/server/engine/cmdlet/TestCmdletFactory.java | smart-engine/src/test/java/org/smartdata/server/engine/cmdlet/TestCmdletFactory.java | /**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.smartdata.server.engine.cmdlet;
import org.junit.Rule;
import org.junit.Test;
import org.junit.rules.ExpectedException;
import org.smartdata.SmartContext;
import org.smartdata.action.ActionException;
import org.smartdata.conf.SmartConf;
import org.smartdata.conf.SmartConfKeys;
import org.smartdata.model.LaunchAction;
import java.util.HashMap;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_HTTP_ADDRESS_KEY;
import static org.mockito.Mockito.mock;
import static org.mockito.Mockito.when;
public class TestCmdletFactory {
@Rule
public ExpectedException expectedException = ExpectedException.none();
@Test
public void testCreateAction() throws ActionException {
SmartContext smartContext = mock(SmartContext.class);
SmartConf conf = new SmartConf();
conf.set(DFS_NAMENODE_HTTP_ADDRESS_KEY, "http://0.0.0.0:8088");
conf.set(SmartConfKeys.SMART_DFS_NAMENODE_RPCSERVER_KEY, "hdfs://0.0.0.0:8089");
when(smartContext.getConf()).thenReturn(conf);
CmdletFactory cmdletFactory = new CmdletFactory(smartContext);
LaunchAction launchAction1 = new LaunchAction(10, "allssd", new HashMap<String, String>());
// SmartAction action = cmdletFactory.createAction(launchAction1);
// Assert.assertNotNull(action);
// Assert.assertEquals(10, action.getActionId());
//
// LaunchAction launchAction = new LaunchAction(10, "test", new HashMap<String, String>());
// expectedException.expect(ActionException.class);
// Assert.assertNull(cmdletFactory.createAction(launchAction));
}
}
| java | Apache-2.0 | e0c90f054687a18c4e095547ac5e31b8b313b3ef | 2026-01-05T02:41:11.405497Z | false |
Intel-bigdata/SSM | https://github.com/Intel-bigdata/SSM/blob/e0c90f054687a18c4e095547ac5e31b8b313b3ef/smart-engine/src/test/java/org/smartdata/server/engine/cmdlet/agent/ActorSystemHarness.java | smart-engine/src/test/java/org/smartdata/server/engine/cmdlet/agent/ActorSystemHarness.java | /**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.smartdata.server.engine.cmdlet.agent;
import akka.actor.ActorSystem;
import com.typesafe.config.ConfigFactory;
import org.junit.After;
import org.junit.Before;
public abstract class ActorSystemHarness {
private ActorSystem system;
@Before
public void startActorSystem() {
system = ActorSystem.apply("Test", ConfigFactory.load(AgentConstants.AKKA_CONF_FILE));
}
@After
public void stopActorSystem() {
system.shutdown();
}
public ActorSystem getActorSystem() {
return system;
}
}
| java | Apache-2.0 | e0c90f054687a18c4e095547ac5e31b8b313b3ef | 2026-01-05T02:41:11.405497Z | false |
Intel-bigdata/SSM | https://github.com/Intel-bigdata/SSM/blob/e0c90f054687a18c4e095547ac5e31b8b313b3ef/smart-engine/src/test/java/org/smartdata/server/engine/cmdlet/agent/TestAgentMaster.java | smart-engine/src/test/java/org/smartdata/server/engine/cmdlet/agent/TestAgentMaster.java | /**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.smartdata.server.engine.cmdlet.agent;
import org.junit.Test;
import org.smartdata.server.engine.cmdlet.agent.messages.AgentToMaster;
import org.smartdata.server.engine.cmdlet.agent.messages.MasterToAgent;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertTrue;
public class TestAgentMaster {
@Test
public void testAgentMaster() throws Exception {
AgentMaster master = AgentMaster.getAgentMaster();
// Wait for master to start
while (master.getMasterActor() == null) {
// Do nothing
}
String instId = "instance-0";
Object answer = master.askMaster(AgentToMaster.RegisterNewAgent.getInstance(instId));
assertTrue(answer instanceof MasterToAgent.AgentRegistered);
MasterToAgent.AgentRegistered registered = (MasterToAgent.AgentRegistered) answer;
assertEquals(instId, registered.getAgentId().getId());
}
}
| java | Apache-2.0 | e0c90f054687a18c4e095547ac5e31b8b313b3ef | 2026-01-05T02:41:11.405497Z | false |
Intel-bigdata/SSM | https://github.com/Intel-bigdata/SSM/blob/e0c90f054687a18c4e095547ac5e31b8b313b3ef/smart-engine/src/main/java/org/smartdata/server/SmartEngine.java | smart-engine/src/main/java/org/smartdata/server/SmartEngine.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.smartdata.server;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.smartdata.AbstractService;
import org.smartdata.conf.SmartConf;
import org.smartdata.model.StorageCapacity;
import org.smartdata.model.Utilization;
import org.smartdata.server.cluster.NodeInfo;
import org.smartdata.server.engine.ActiveServerInfo;
import org.smartdata.server.engine.CmdletManager;
import org.smartdata.server.engine.ConfManager;
import org.smartdata.server.engine.RuleManager;
import org.smartdata.server.engine.ServerContext;
import org.smartdata.server.engine.StandbyServerInfo;
import org.smartdata.server.engine.StatesManager;
import org.smartdata.server.engine.cmdlet.HazelcastExecutorService;
import org.smartdata.server.engine.cmdlet.agent.AgentExecutorService;
import org.smartdata.server.engine.cmdlet.agent.AgentInfo;
import java.io.IOException;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.LinkedList;
import java.util.List;
import java.util.Random;
import java.util.Set;
public class SmartEngine extends AbstractService {
private ConfManager confMgr;
private SmartConf conf;
private ServerContext serverContext;
private StatesManager statesMgr;
private RuleManager ruleMgr;
private CmdletManager cmdletManager;
private AgentExecutorService agentService;
private HazelcastExecutorService hazelcastService;
private List<AbstractService> services = new ArrayList<>();
public static final Logger LOG = LoggerFactory.getLogger(SmartEngine.class);
public SmartEngine(ServerContext context) {
super(context);
this.serverContext = context;
this.conf = serverContext.getConf();
}
@Override
public void init() throws IOException {
statesMgr = new StatesManager(serverContext);
services.add(statesMgr);
cmdletManager = new CmdletManager(serverContext);
services.add(cmdletManager);
agentService = new AgentExecutorService(conf, cmdletManager);
hazelcastService = new HazelcastExecutorService(cmdletManager);
cmdletManager.registerExecutorService(agentService);
cmdletManager.registerExecutorService(hazelcastService);
ruleMgr = new RuleManager(serverContext, statesMgr, cmdletManager);
services.add(ruleMgr);
for (AbstractService s : services) {
s.init();
}
}
@Override
public boolean inSafeMode() {
if (services.isEmpty()) { //Not initiated
return true;
}
for (AbstractService service : services) {
if (service.inSafeMode()) {
return true;
}
}
return false;
}
@Override
public void start() throws IOException {
for (AbstractService s : services) {
s.start();
}
}
@Override
public void stop() throws IOException {
for (int i = services.size() - 1; i >= 0; i--) {
stopEngineService(services.get(i));
}
}
private void stopEngineService(AbstractService service) {
try {
if (service != null) {
service.stop();
}
} catch (IOException e) {
LOG.error("Error while stopping "
+ service.getClass().getCanonicalName(), e);
}
}
public List<StandbyServerInfo> getStandbyServers() {
return hazelcastService.getStandbyServers();
}
public Set<String> getAgentHosts() {
return conf.getAgentHosts();
}
public Set<String> getServerHosts() {
return conf.getServerHosts();
}
public List<AgentInfo> getAgents() {
return agentService.getAgentInfos();
}
public ConfManager getConfMgr() {
return confMgr;
}
public SmartConf getConf() {
return serverContext.getConf();
}
public StatesManager getStatesManager() {
return statesMgr;
}
public RuleManager getRuleManager() {
return ruleMgr;
}
public CmdletManager getCmdletManager() {
return cmdletManager;
}
public Utilization getUtilization(String resourceName) throws IOException {
return getStatesManager().getStorageUtilization(resourceName);
}
public List<Utilization> getHistUtilization(String resourceName, long granularity,
long begin, long end) throws IOException {
long now = System.currentTimeMillis();
if (begin == end && Math.abs(begin - now) <= 5) {
return Arrays.asList(getUtilization(resourceName));
}
List<StorageCapacity> cs = serverContext.getMetaStore().getStorageHistoryData(
resourceName, granularity, begin, end);
List<Utilization> us = new ArrayList<>(cs.size());
for (StorageCapacity c : cs) {
us.add(new Utilization(c.getTimeStamp(), c.getCapacity(), c.getUsed()));
}
return us;
}
private List<Utilization> getFackData(String resourceName, long granularity,
long begin, long end) {
List<Utilization> utils = new ArrayList<>();
long ts = begin;
if (ts % granularity != 0) {
ts += granularity;
ts = (ts / granularity) * granularity;
}
Random rand = new Random(ts);
for (; ts <= end; ts += granularity) {
utils.add(new Utilization(ts, 100, rand.nextInt(100)));
}
return utils;
}
public List<NodeInfo> getSsmNodesInfo() {
List<NodeInfo> ret = new LinkedList<>();
ret.addAll(Arrays.asList(ActiveServerInfo.getInstance()));
ret.addAll(getStandbyServers());
ret.addAll(getAgents());
return ret;
}
}
| java | Apache-2.0 | e0c90f054687a18c4e095547ac5e31b8b313b3ef | 2026-01-05T02:41:11.405497Z | false |
Intel-bigdata/SSM | https://github.com/Intel-bigdata/SSM/blob/e0c90f054687a18c4e095547ac5e31b8b313b3ef/smart-engine/src/main/java/org/smartdata/server/utils/HazelcastUtil.java | smart-engine/src/main/java/org/smartdata/server/utils/HazelcastUtil.java | /**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.smartdata.server.utils;
import com.hazelcast.core.HazelcastInstance;
import com.hazelcast.core.Member;
import java.util.ArrayList;
import java.util.List;
public class HazelcastUtil {
//Todo: find a better way to determine whether instance is the master node
public static boolean isMaster(HazelcastInstance instance) {
Member master = getMasterMember(instance);
return master.getSocketAddress().equals(instance.getLocalEndpoint().getSocketAddress());
}
public static Member getMasterMember(HazelcastInstance instance) {
return instance.getCluster().getMembers().iterator().next();
}
public static List<Member> getWorkerMembers(HazelcastInstance instance) {
List<Member> members = new ArrayList<>();
Member master = getMasterMember(instance);
for (Member member : instance.getCluster().getMembers()) {
if (!master.equals(member)) {
members.add(member);
}
}
return members;
}
}
| java | Apache-2.0 | e0c90f054687a18c4e095547ac5e31b8b313b3ef | 2026-01-05T02:41:11.405497Z | false |
Intel-bigdata/SSM | https://github.com/Intel-bigdata/SSM/blob/e0c90f054687a18c4e095547ac5e31b8b313b3ef/smart-engine/src/main/java/org/smartdata/server/utils/GenericOptionsParser.java | smart-engine/src/main/java/org/smartdata/server/utils/GenericOptionsParser.java | /**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.smartdata.server.utils;
import org.apache.commons.cli.CommandLine;
import org.apache.commons.cli.CommandLineParser;
import org.apache.commons.cli.GnuParser;
import org.apache.commons.cli.HelpFormatter;
import org.apache.commons.cli.Option;
import org.apache.commons.cli.OptionBuilder;
import org.apache.commons.cli.Options;
import org.apache.commons.cli.ParseException;
import org.apache.hadoop.conf.Configuration;
import java.io.IOException;
import java.util.ArrayList;
import java.util.List;
public class GenericOptionsParser {
private Configuration conf;
private CommandLine cmdLine;
public GenericOptionsParser(Configuration conf, String[] args) throws IOException {
this(conf, new Options(), args);
}
public GenericOptionsParser(Configuration conf, Options options, String[] args)
throws IOException {
this.conf = conf;
parseGeneralOptions(options, args);
}
public String[] getRemainingArgs() {
return (cmdLine == null) ? new String[] {} : cmdLine.getArgs();
}
public Configuration getConfiguration() {
return conf;
}
public CommandLine getCommandLine() {
return cmdLine;
}
private Options buildGeneralOptions(Options opts) {
Option property =
OptionBuilder.withArgName("property=value")
.hasArg()
.withDescription("use value for given property")
.create('D');
opts.addOption(property);
return opts;
}
private void processGeneralOptions(CommandLine line) throws IOException {
if (line.hasOption('D')) {
String[] property = line.getOptionValues('D');
for (String prop : property) {
String[] keyval = prop.split("=", 2);
if (keyval.length == 2) {
conf.set(keyval[0], keyval[1], "from command line");
}
}
}
}
private String[] preProcessForWindows(String[] args) {
boolean isWindows = false;
String osName = System.getProperty("os.name");
if (osName.startsWith("Windows")) {
isWindows = true;
}
if (!isWindows) {
return args;
}
if (args == null) {
return null;
}
List<String> newArgs = new ArrayList<String>(args.length);
for (int i = 0; i < args.length; i++) {
String prop = null;
if (args[i].equals("-D")) {
newArgs.add(args[i]);
if (i < args.length - 1) {
prop = args[++i];
}
} else if (args[i].startsWith("-D")) {
prop = args[i];
} else {
newArgs.add(args[i]);
}
if (prop != null) {
if (prop.contains("=")) {
// everything good
} else {
if (i < args.length - 1) {
prop += "=" + args[++i];
}
}
newArgs.add(prop);
}
}
return newArgs.toArray(new String[newArgs.size()]);
}
private void parseGeneralOptions(Options opts, String[] args) throws IOException {
opts = buildGeneralOptions(opts);
CommandLineParser parser = new GnuParser();
try {
cmdLine = parser.parse(opts, preProcessForWindows(args), true);
processGeneralOptions(cmdLine);
} catch (ParseException e) {
HelpFormatter formatter = new HelpFormatter();
formatter.printHelp("general options are: ", opts);
}
}
}
| java | Apache-2.0 | e0c90f054687a18c4e095547ac5e31b8b313b3ef | 2026-01-05T02:41:11.405497Z | false |
Intel-bigdata/SSM | https://github.com/Intel-bigdata/SSM/blob/e0c90f054687a18c4e095547ac5e31b8b313b3ef/smart-engine/src/main/java/org/smartdata/server/utils/tools/GetSsmVersionInfo.java | smart-engine/src/main/java/org/smartdata/server/utils/tools/GetSsmVersionInfo.java | /**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.smartdata.server.utils.tools;
import org.smartdata.versioninfo.SsmVersionInfo;
public class GetSsmVersionInfo {
public static void main(String[] args) {
System.out.println(SsmVersionInfo.infoString());
System.exit(0);
}
}
| java | Apache-2.0 | e0c90f054687a18c4e095547ac5e31b8b313b3ef | 2026-01-05T02:41:11.405497Z | false |
Intel-bigdata/SSM | https://github.com/Intel-bigdata/SSM/blob/e0c90f054687a18c4e095547ac5e31b8b313b3ef/smart-engine/src/main/java/org/smartdata/server/utils/tools/GetConf.java | smart-engine/src/main/java/org/smartdata/server/utils/tools/GetConf.java | /**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.smartdata.server.utils.tools;
import com.hazelcast.config.ClasspathXmlConfig;
import java.io.PrintStream;
import java.util.List;
public class GetConf {
public static final String USAGE =
"USAGE: GetConf <Option>\n"
+ "\t'Option' can be:\n"
+ "\t\tHelp Show this help message\n"
+ "\t\tSmartServers List SmartServers for the cluster(defined in hazelcast.xml)\n";
public static int getSmartServers(PrintStream p) {
ClasspathXmlConfig conf = new ClasspathXmlConfig("hazelcast.xml");
List<String> ret = conf.getNetworkConfig().getJoin().getTcpIpConfig().getMembers();
for (String s : ret) {
p.println(s);
}
return 0;
}
public static void main(String[] args) {
int exitCode = 0;
if (args == null || args.length == 0) {
System.err.println(USAGE);
System.exit(1);
}
try {
if (args[0].equalsIgnoreCase("SmartServers")) {
exitCode = getSmartServers(System.out);
} else if (args[0].equalsIgnoreCase("Help")) {
System.out.println(USAGE);
}
} catch (Throwable t) {
System.out.println(t.getMessage());
exitCode = 1;
}
System.exit(exitCode);
}
}
| java | Apache-2.0 | e0c90f054687a18c4e095547ac5e31b8b313b3ef | 2026-01-05T02:41:11.405497Z | false |
Intel-bigdata/SSM | https://github.com/Intel-bigdata/SSM/blob/e0c90f054687a18c4e095547ac5e31b8b313b3ef/smart-engine/src/main/java/org/smartdata/server/cluster/ActiveServerNodeCmdletMetrics.java | smart-engine/src/main/java/org/smartdata/server/cluster/ActiveServerNodeCmdletMetrics.java | /**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.smartdata.server.cluster;
/**
* Contains metrics specific for active SSM server related with cmdlet execution.
*
*/
public class ActiveServerNodeCmdletMetrics extends NodeCmdletMetrics {
private int maxPendingSchedule;
private int numPendingSchedule;
private int maxPendingDispatch;
private int numPendingDispatch;
private int numInExecution;
private int maxInExecution;
public int getNumPendingSchedule() {
return numPendingSchedule;
}
public void setNumPendingSchedule(int numPendingSchedule) {
this.numPendingSchedule = numPendingSchedule;
}
public int getNumPendingDispatch() {
return numPendingDispatch;
}
public void setNumPendingDispatch(int numPendingDispatch) {
this.numPendingDispatch = numPendingDispatch;
}
public int getMaxPendingSchedule() {
return maxPendingSchedule;
}
public void setMaxPendingSchedule(int maxPendingSchedule) {
this.maxPendingSchedule = maxPendingSchedule;
}
public int getMaxPendingDispatch() {
return maxPendingDispatch;
}
public void setMaxPendingDispatch(int maxPendingDispatch) {
this.maxPendingDispatch = maxPendingDispatch;
}
public int getNumInExecution() {
return numInExecution;
}
public void setNumInExecution(int numInExecution) {
this.numInExecution = numInExecution;
}
public int getMaxInExecution() {
return maxInExecution;
}
public void setMaxInExecution(int maxInExecution) {
this.maxInExecution = maxInExecution;
}
}
| java | Apache-2.0 | e0c90f054687a18c4e095547ac5e31b8b313b3ef | 2026-01-05T02:41:11.405497Z | false |
Intel-bigdata/SSM | https://github.com/Intel-bigdata/SSM/blob/e0c90f054687a18c4e095547ac5e31b8b313b3ef/smart-engine/src/main/java/org/smartdata/server/cluster/HazelcastInstanceProvider.java | smart-engine/src/main/java/org/smartdata/server/cluster/HazelcastInstanceProvider.java | /**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.smartdata.server.cluster;
import com.hazelcast.config.ClasspathXmlConfig;
import com.hazelcast.config.JoinConfig;
import com.hazelcast.config.NetworkConfig;
import com.hazelcast.core.Hazelcast;
import com.hazelcast.core.HazelcastInstance;
import org.apache.hadoop.conf.Configuration;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.smartdata.conf.SmartConfKeys;
import java.io.File;
import java.io.FileNotFoundException;
import java.util.Scanner;
public class HazelcastInstanceProvider {
private static final String CONFIG_FILE = "hazelcast.xml";
private static HazelcastInstance instance;
private static final Logger LOG = LoggerFactory.getLogger(HazelcastInstanceProvider.class);
static {
String typeKey = "hazelcast.logging.type";
String loggerType = System.getProperty(typeKey);
if (loggerType == null) {
System.setProperty(typeKey, "slf4j");
}
}
private HazelcastInstanceProvider() {}
public static void addMemberConfig(ClasspathXmlConfig config) {
NetworkConfig network = config.getNetworkConfig();
JoinConfig join = network.getJoin();
String serverConfFile = new Configuration().get(SmartConfKeys.SMART_CONF_DIR_KEY,
SmartConfKeys.SMART_CONF_DIR_DEFAULT) + "/servers";
Scanner sc = null;
try {
sc = new Scanner(new File(serverConfFile));
} catch (FileNotFoundException ex) {
LOG.error("Cannot find the config file: {}!", serverConfFile);
}
if (sc != null) {
while (sc.hasNextLine()) {
String host = sc.nextLine().trim();
if (!host.startsWith("#") && !host.isEmpty()) {
join.getTcpIpConfig().addMember(host);
}
}
}
}
public static HazelcastInstance getInstance() {
if (instance == null) {
ClasspathXmlConfig config = new ClasspathXmlConfig(CONFIG_FILE);
addMemberConfig(config);
instance = Hazelcast.newHazelcastInstance(config);
Runtime.getRuntime().addShutdownHook(new Thread(){
@Override public void run() {
instance.getLifecycleService().shutdown();
}
});
}
return instance;
}
}
| java | Apache-2.0 | e0c90f054687a18c4e095547ac5e31b8b313b3ef | 2026-01-05T02:41:11.405497Z | false |
Intel-bigdata/SSM | https://github.com/Intel-bigdata/SSM/blob/e0c90f054687a18c4e095547ac5e31b8b313b3ef/smart-engine/src/main/java/org/smartdata/server/cluster/HazelcastWorker.java | smart-engine/src/main/java/org/smartdata/server/cluster/HazelcastWorker.java | /**
* Licensed to the Apache Software Foundation (ASF) under one or more contributor license
* agreements. See the NOTICE file distributed with this work for additional information regarding
* copyright ownership. The ASF licenses this file to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance with the License. You may obtain a
* copy of the License at
*
* <p>http://www.apache.org/licenses/LICENSE-2.0
*
* <p>Unless required by applicable law or agreed to in writing, software distributed under the
* License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
* express or implied. See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.smartdata.server.cluster;
import com.hazelcast.core.HazelcastInstance;
import com.hazelcast.core.ITopic;
import com.hazelcast.core.Message;
import com.hazelcast.core.MessageListener;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.smartdata.SmartContext;
import org.smartdata.action.ActionException;
import org.smartdata.conf.SmartConf;
import org.smartdata.conf.SmartConfKeys;
import org.smartdata.model.CmdletState;
import org.smartdata.protocol.message.CmdletStatusUpdate;
import org.smartdata.protocol.message.LaunchCmdlet;
import org.smartdata.protocol.message.StatusMessage;
import org.smartdata.protocol.message.StatusReporter;
import org.smartdata.protocol.message.StopCmdlet;
import org.smartdata.server.engine.cmdlet.CmdletExecutor;
import org.smartdata.server.engine.cmdlet.CmdletFactory;
import org.smartdata.server.engine.cmdlet.HazelcastExecutorService;
import org.smartdata.server.engine.cmdlet.StatusReportTask;
import java.io.Serializable;
import java.util.concurrent.Executors;
import java.util.concurrent.ScheduledExecutorService;
import java.util.concurrent.TimeUnit;
// Todo: recover and reconnect when master is offline
public class HazelcastWorker implements StatusReporter {
private static final Logger LOG = LoggerFactory.getLogger(HazelcastWorker.class);
private final HazelcastInstance instance;
private ScheduledExecutorService executorService;
private ITopic<Serializable> masterMessages;
private ITopic<StatusMessage> statusTopic;
private CmdletExecutor cmdletExecutor;
private CmdletFactory factory;
private SmartConf smartConf;
public HazelcastWorker(SmartContext smartContext) {
this.smartConf = smartContext.getConf();
this.factory = new CmdletFactory(smartContext, this);
this.cmdletExecutor = new CmdletExecutor(smartContext.getConf());
this.executorService = Executors.newSingleThreadScheduledExecutor();
this.instance = HazelcastInstanceProvider.getInstance();
this.statusTopic = instance.getTopic(HazelcastExecutorService.STATUS_TOPIC);
String instanceId = instance.getCluster().getLocalMember().getUuid();
this.masterMessages =
instance.getTopic(HazelcastExecutorService.WORKER_TOPIC_PREFIX + instanceId);
this.masterMessages.addMessageListener(new MasterMessageListener());
}
public void start() {
int reportPeriod = smartConf.getInt(SmartConfKeys.SMART_STATUS_REPORT_PERIOD_KEY,
SmartConfKeys.SMART_STATUS_REPORT_PERIOD_DEFAULT);
StatusReportTask statusReportTask = new StatusReportTask(this, cmdletExecutor, smartConf);
executorService.scheduleAtFixedRate(
statusReportTask, 1000, reportPeriod, TimeUnit.MILLISECONDS);
}
public void stop() {
executorService.shutdown();
cmdletExecutor.shutdown();
}
@Override
public void report(StatusMessage status) {
statusTopic.publish(status);
}
private class MasterMessageListener implements MessageListener<Serializable> {
@Override
public void onMessage(Message<Serializable> message) {
Serializable msg = message.getMessageObject();
if (msg instanceof LaunchCmdlet) {
LaunchCmdlet launchCmdlet = (LaunchCmdlet) msg;
try {
cmdletExecutor.execute(factory.createCmdlet(launchCmdlet));
} catch (ActionException e) {
LOG.error("Failed to create cmdlet from {}", launchCmdlet, e);
report(
new CmdletStatusUpdate(
launchCmdlet.getCmdletId(), System.currentTimeMillis(), CmdletState.FAILED));
}
} else if (msg instanceof StopCmdlet) {
StopCmdlet stopCmdlet = (StopCmdlet) msg;
cmdletExecutor.stop(stopCmdlet.getCmdletId());
}
}
}
}
| java | Apache-2.0 | e0c90f054687a18c4e095547ac5e31b8b313b3ef | 2026-01-05T02:41:11.405497Z | false |
Intel-bigdata/SSM | https://github.com/Intel-bigdata/SSM/blob/e0c90f054687a18c4e095547ac5e31b8b313b3ef/smart-engine/src/main/java/org/smartdata/server/cluster/NodeCmdletMetrics.java | smart-engine/src/main/java/org/smartdata/server/cluster/NodeCmdletMetrics.java | /**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.smartdata.server.cluster;
/**
* Contains metrics for SSM nodes related with cmdlet execution.
* These metrics are not persisted. So after cluster restarts,
* they will be re-counted.
*/
public class NodeCmdletMetrics {
private NodeInfo nodeInfo;
private long registTime;
private int numExecutors;
private long cmdletsExecuted;
private int cmdletsInExecution;
public NodeInfo getNodeInfo() {
return nodeInfo;
}
public void setNodeInfo(NodeInfo nodeInfo) {
this.nodeInfo = nodeInfo;
}
public long getRegistTime() {
return registTime;
}
public void setRegistTime(long registTime) {
this.registTime = registTime;
}
public int getNumExecutors() {
return numExecutors;
}
public void setNumExecutors(int numExecutors) {
this.numExecutors = numExecutors;
}
public long getCmdletsExecuted() {
return cmdletsExecuted;
}
public void setCmdletsExecuted(long cmdletsExecuted) {
this.cmdletsExecuted = cmdletsExecuted;
}
public synchronized void incCmdletsExecuted() {
cmdletsExecuted++;
}
public int getCmdletsInExecution() {
return cmdletsInExecution;
}
public void setCmdletsInExecution(int cmdletsInExecution) {
this.cmdletsInExecution = cmdletsInExecution;
}
public synchronized void incCmdletsInExecution() {
cmdletsInExecution++;
}
public synchronized void finishCmdlet() {
cmdletsExecuted++;
if (cmdletsInExecution > 0) { // TODO: restore
cmdletsInExecution--;
}
}
}
| java | Apache-2.0 | e0c90f054687a18c4e095547ac5e31b8b313b3ef | 2026-01-05T02:41:11.405497Z | false |
Intel-bigdata/SSM | https://github.com/Intel-bigdata/SSM/blob/e0c90f054687a18c4e095547ac5e31b8b313b3ef/smart-engine/src/main/java/org/smartdata/server/cluster/NodeInfo.java | smart-engine/src/main/java/org/smartdata/server/cluster/NodeInfo.java | /**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.smartdata.server.cluster;
import org.smartdata.model.ExecutorType;
/**
* Represent each nodes that SSM services (SmartServers and SmartAgents) running on.
*
*/
public class NodeInfo {
private String id;
private String host;
private int port;
private ExecutorType executorType;
public NodeInfo(String id, String location, ExecutorType executorType) {
this.id = id;
this.executorType = executorType;
doSetLocation(location);
}
public String getId() {
return id;
}
public void setId(String id) {
this.id = id;
}
public String getLocation() {
return host + ":" + port;
}
public void setLocation(String location) {
doSetLocation(location);
}
private void doSetLocation(String location) {
host = null;
port = 0;
if (location != null) {
String[] its = location.split(":");
if (its.length > 1) {
port = Integer.valueOf(its[1]);
}
host = its[0];
}
}
public String getHost() {
return host;
}
public int getPort() {
return port;
}
public ExecutorType getExecutorType() {
return executorType;
}
@Override
public String toString() {
return String.format("{id=%s, location=%s, executorType=%s}", id, getLocation(), executorType);
}
}
| java | Apache-2.0 | e0c90f054687a18c4e095547ac5e31b8b313b3ef | 2026-01-05T02:41:11.405497Z | false |
Intel-bigdata/SSM | https://github.com/Intel-bigdata/SSM/blob/e0c90f054687a18c4e095547ac5e31b8b313b3ef/smart-engine/src/main/java/org/smartdata/server/cluster/ClusterMembershipListener.java | smart-engine/src/main/java/org/smartdata/server/cluster/ClusterMembershipListener.java | /**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.smartdata.server.cluster;
import com.hazelcast.core.MemberAttributeEvent;
import com.hazelcast.core.MembershipEvent;
import com.hazelcast.core.MembershipListener;
import org.smartdata.conf.SmartConf;
import org.smartdata.conf.SmartConfKeys;
import org.smartdata.server.utils.HazelcastUtil;
public class ClusterMembershipListener implements MembershipListener {
private final ServerDaemon daemon;
private SmartConf conf;
public ClusterMembershipListener(ServerDaemon daemon, SmartConf conf) {
this.daemon = daemon;
this.conf = conf;
}
@Override
public void memberAdded(MembershipEvent membershipEvent) {
}
/**
* Every time get the member remove event, need reset smart server rpc address
* with master host's name (even though master host doesn't change in some cases).
*
* @param membershipEvent
*/
@Override
public void memberRemoved(MembershipEvent membershipEvent) {
String rpcHost = HazelcastUtil
.getMasterMember(HazelcastInstanceProvider.getInstance())
.getAddress()
.getHost();
String rpcPort = conf
.get(SmartConfKeys.SMART_SERVER_RPC_ADDRESS_KEY,
SmartConfKeys.SMART_SERVER_RPC_ADDRESS_DEFAULT)
.split(":")[1];
conf.set(SmartConfKeys.SMART_SERVER_RPC_ADDRESS_KEY, rpcHost + ":" + rpcPort);
if (HazelcastUtil.isMaster(HazelcastInstanceProvider.getInstance())) {
this.daemon.becomeActive();
}
}
@Override
public void memberAttributeChanged(MemberAttributeEvent memberAttributeEvent) {
}
}
| java | Apache-2.0 | e0c90f054687a18c4e095547ac5e31b8b313b3ef | 2026-01-05T02:41:11.405497Z | false |
Intel-bigdata/SSM | https://github.com/Intel-bigdata/SSM/blob/e0c90f054687a18c4e095547ac5e31b8b313b3ef/smart-engine/src/main/java/org/smartdata/server/cluster/ServerDaemon.java | smart-engine/src/main/java/org/smartdata/server/cluster/ServerDaemon.java | /**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.smartdata.server.cluster;
public interface ServerDaemon {
void becomeActive();
}
| java | Apache-2.0 | e0c90f054687a18c4e095547ac5e31b8b313b3ef | 2026-01-05T02:41:11.405497Z | false |
Intel-bigdata/SSM | https://github.com/Intel-bigdata/SSM/blob/e0c90f054687a18c4e095547ac5e31b8b313b3ef/smart-engine/src/main/java/org/smartdata/server/engine/RuleManager.java | smart-engine/src/main/java/org/smartdata/server/engine/RuleManager.java | /**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.smartdata.server.engine;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.smartdata.AbstractService;
import org.smartdata.action.ActionRegistry;
import org.smartdata.conf.SmartConfKeys;
import org.smartdata.metastore.MetaStore;
import org.smartdata.metastore.MetaStoreException;
import org.smartdata.model.CmdletDescriptor;
import org.smartdata.model.DetailedRuleInfo;
import org.smartdata.model.RuleInfo;
import org.smartdata.model.RuleState;
import org.smartdata.model.WhitelistHelper;
import org.smartdata.model.rule.RuleExecutorPluginManager;
import org.smartdata.model.rule.RulePluginManager;
import org.smartdata.model.rule.TimeBasedScheduleInfo;
import org.smartdata.model.rule.TranslateResult;
import org.smartdata.rule.parser.SmartRuleStringParser;
import org.smartdata.rule.parser.TranslationContext;
import org.smartdata.server.engine.rule.ErasureCodingPlugin;
import org.smartdata.server.engine.rule.ExecutorScheduler;
import org.smartdata.server.engine.rule.FileCopy2S3Plugin;
import org.smartdata.server.engine.rule.FileCopyDrPlugin;
import org.smartdata.server.engine.rule.RuleExecutor;
import org.smartdata.server.engine.rule.RuleInfoRepo;
import org.smartdata.server.engine.rule.SmallFilePlugin;
import java.io.IOException;
import java.util.ArrayList;
import java.util.Collection;
import java.util.List;
import java.util.concurrent.ConcurrentHashMap;
/**
* Manage and execute rules. We can have 'cache' here to decrease the needs to execute a SQL query.
*/
public class RuleManager extends AbstractService {
private ServerContext serverContext;
private StatesManager statesManager;
private CmdletManager cmdletManager;
private MetaStore metaStore;
private boolean isClosed = false;
public static final Logger LOG = LoggerFactory.getLogger(RuleManager.class.getName());
private ConcurrentHashMap<Long, RuleInfoRepo> mapRules = new ConcurrentHashMap<>();
public ExecutorScheduler execScheduler;
public RuleManager(
ServerContext context, StatesManager statesManager, CmdletManager cmdletManager) {
super(context);
int numExecutors =
context
.getConf()
.getInt(
SmartConfKeys.SMART_RULE_EXECUTORS_KEY, SmartConfKeys.SMART_RULE_EXECUTORS_DEFAULT);
execScheduler = new ExecutorScheduler(numExecutors);
this.statesManager = statesManager;
this.cmdletManager = cmdletManager;
this.serverContext = context;
this.metaStore = context.getMetaStore();
if (serverContext.getServiceMode() == ServiceMode.HDFS) {
RuleExecutorPluginManager.addPlugin(new FileCopyDrPlugin(context.getMetaStore()));
RuleExecutorPluginManager.addPlugin(new FileCopy2S3Plugin());
RuleExecutorPluginManager.addPlugin(new SmallFilePlugin(context, cmdletManager));
RuleExecutorPluginManager.addPlugin(new ErasureCodingPlugin(context));
}
}
/**
* Submit a rule to RuleManger.
*
* @param rule
* @param initState
* @return
* @throws IOException
*/
public long submitRule(String rule, RuleState initState) throws IOException {
LOG.debug("Received Rule -> [" + rule + "]");
if (initState != RuleState.ACTIVE
&& initState != RuleState.DISABLED
&& initState != RuleState.DRYRUN) {
throw new IOException(
"Invalid initState = "
+ initState
+ ", it MUST be one of ["
+ RuleState.ACTIVE
+ ", "
+ RuleState.DRYRUN
+ ", "
+ RuleState.DISABLED
+ "]");
}
TranslateResult tr = doCheckRule(rule, null);
doCheckActions(tr.getCmdDescriptor());
//check whitelist
if (WhitelistHelper.isEnabled(serverContext.getConf())) {
for (String path : tr.getGlobPathCheck()) {
if (!WhitelistHelper.isInWhitelist(path, serverContext.getConf())) {
throw new IOException("Path " + path + " is not in the whitelist.");
}
}
}
RuleInfo.Builder builder = RuleInfo.newBuilder();
builder.setRuleText(rule).setState(initState);
RuleInfo ruleInfo = builder.build();
RulePluginManager.onAddingNewRule(ruleInfo, tr);
try {
metaStore.insertNewRule(ruleInfo);
} catch (MetaStoreException e) {
throw new IOException("RuleText = " + rule, e);
}
RuleInfoRepo infoRepo = new RuleInfoRepo(ruleInfo, metaStore, serverContext.getConf());
mapRules.put(ruleInfo.getId(), infoRepo);
submitRuleToScheduler(infoRepo.launchExecutor(this));
RulePluginManager.onNewRuleAdded(ruleInfo, tr);
return ruleInfo.getId();
}
private void doCheckActions(CmdletDescriptor cd) throws IOException {
String error = "";
for (int i = 0; i < cd.getActionSize(); i++) {
if (!ActionRegistry.registeredAction(cd.getActionName(i))) {
error += "Action '" + cd.getActionName(i) + "' not supported.\n";
}
}
if (error.length() > 0) {
throw new IOException(error);
}
}
private TranslateResult doCheckRule(String rule, TranslationContext ctx) throws IOException {
SmartRuleStringParser parser = new SmartRuleStringParser(rule, ctx, serverContext.getConf());
return parser.translate();
}
public void checkRule(String rule) throws IOException {
doCheckRule(rule, null);
}
public MetaStore getMetaStore() {
return metaStore;
}
/**
* Delete a rule in SSM. if dropPendingCmdlets equals false then the rule record will still be
* kept in Table 'rules', the record will be deleted sometime later.
*
* @param ruleID
* @param dropPendingCmdlets pending cmdlets triggered by the rule will be discarded if true.
* @throws IOException
*/
public void deleteRule(long ruleID, boolean dropPendingCmdlets) throws IOException {
RuleInfoRepo infoRepo = checkIfExists(ruleID);
try {
if (dropPendingCmdlets && getCmdletManager() != null) {
getCmdletManager().deleteCmdletByRule(ruleID);
}
} finally {
infoRepo.delete();
}
}
public void activateRule(long ruleID) throws IOException {
RuleInfoRepo infoRepo = checkIfExists(ruleID);
submitRuleToScheduler(infoRepo.activate(this));
}
public void disableRule(long ruleID, boolean dropPendingCmdlets) throws IOException {
RuleInfoRepo infoRepo = checkIfExists(ruleID);
infoRepo.disable();
if (dropPendingCmdlets && getCmdletManager() != null) {
getCmdletManager().dropRuleCmdlets(ruleID);
}
}
private RuleInfoRepo checkIfExists(long ruleID) throws IOException {
RuleInfoRepo infoRepo = mapRules.get(ruleID);
if (infoRepo == null) {
throw new IOException("Rule with ID = " + ruleID + " not found");
}
return infoRepo;
}
public RuleInfo getRuleInfo(long ruleID) throws IOException {
RuleInfoRepo infoRepo = checkIfExists(ruleID);
return infoRepo.getRuleInfo();
}
public List<DetailedRuleInfo> listRulesMoveInfo() throws IOException {
try {
return metaStore.listMoveRules();
} catch (MetaStoreException e) {
throw new IOException(e);
}
}
public List<DetailedRuleInfo> listRulesSyncInfo() throws IOException {
try {
return metaStore.listSyncRules();
} catch (MetaStoreException e) {
throw new IOException(e);
}
}
public List<RuleInfo> listRulesInfo() throws IOException {
Collection<RuleInfoRepo> infoRepos = mapRules.values();
List<RuleInfo> retInfos = new ArrayList<>();
for (RuleInfoRepo infoRepo : infoRepos) {
RuleInfo info = infoRepo.getRuleInfo();
if (info.getState() != RuleState.DELETED) {
retInfos.add(info);
}
}
return retInfos;
}
public void updateRuleInfo(
long ruleId, RuleState rs, long lastCheckTime, long checkedCount, int cmdletsGen)
throws IOException {
RuleInfoRepo infoRepo = checkIfExists(ruleId);
infoRepo.updateRuleInfo(rs, lastCheckTime, checkedCount, cmdletsGen);
}
public boolean isClosed() {
return isClosed;
}
public StatesManager getStatesManager() {
return statesManager;
}
public CmdletManager getCmdletManager() {
return cmdletManager;
}
/**
* Init RuleManager, this includes: 1. Load related data from local storage or HDFS 2. Initial
*
* @throws IOException
*/
@Override
public void init() throws IOException {
LOG.info("Initializing ...");
// Load rules table
List<RuleInfo> rules = null;
try {
rules = metaStore.getRuleInfo();
} catch (MetaStoreException e) {
LOG.error("Can not load rules from database:\n" + e.getMessage());
}
for (RuleInfo rule : rules) {
mapRules.put(rule.getId(), new RuleInfoRepo(rule, metaStore, serverContext.getConf()));
}
LOG.info("Initialized. Totally " + rules.size() + " rules loaded from DataBase.");
if (LOG.isDebugEnabled()) {
for (RuleInfo info : rules) {
LOG.debug("\t" + info);
}
}
}
private boolean submitRuleToScheduler(RuleExecutor executor) throws IOException {
if (executor == null || executor.isExited()) {
return false;
}
execScheduler.addPeriodicityTask(executor);
return true;
}
/** Start services. */
@Override
public void start() throws IOException {
LOG.info("Starting ...");
// after StateManager be ready
int numLaunched = 0;
// Submit runnable rules to scheduler
for (RuleInfoRepo infoRepo : mapRules.values()) {
RuleInfo rule = infoRepo.getRuleInfoRef();
if (rule.getState() == RuleState.ACTIVE || rule.getState() == RuleState.DRYRUN) {
RuleExecutor ruleExecutor = infoRepo.launchExecutor(this);
TranslateResult tr = ruleExecutor.getTranslateResult();
TimeBasedScheduleInfo si = tr.getTbScheduleInfo();
if (rule.getLastCheckTime() != 0) {
si.setFirstCheckTime(rule.getLastCheckTime());
}
boolean sub = submitRuleToScheduler(ruleExecutor);
numLaunched += sub ? 1 : 0;
}
}
LOG.info("Started. " + numLaunched + " rules launched for execution.");
}
/** Stop services. */
@Override
public void stop() throws IOException {
LOG.info("Stopping ...");
isClosed = true;
if (execScheduler != null) {
execScheduler.shutdown();
}
LOG.info("Stopped.");
}
}
| java | Apache-2.0 | e0c90f054687a18c4e095547ac5e31b8b313b3ef | 2026-01-05T02:41:11.405497Z | false |
Intel-bigdata/SSM | https://github.com/Intel-bigdata/SSM/blob/e0c90f054687a18c4e095547ac5e31b8b313b3ef/smart-engine/src/main/java/org/smartdata/server/engine/StandbyServerInfo.java | smart-engine/src/main/java/org/smartdata/server/engine/StandbyServerInfo.java | /**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.smartdata.server.engine;
import org.smartdata.model.ExecutorType;
import org.smartdata.server.cluster.NodeInfo;
public class StandbyServerInfo extends NodeInfo {
public StandbyServerInfo(String id, String location) {
super(id, location, ExecutorType.REMOTE_SSM);
}
}
| java | Apache-2.0 | e0c90f054687a18c4e095547ac5e31b8b313b3ef | 2026-01-05T02:41:11.405497Z | false |
Intel-bigdata/SSM | https://github.com/Intel-bigdata/SSM/blob/e0c90f054687a18c4e095547ac5e31b8b313b3ef/smart-engine/src/main/java/org/smartdata/server/engine/ServerContext.java | smart-engine/src/main/java/org/smartdata/server/engine/ServerContext.java | /**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.smartdata.server.engine;
import org.smartdata.SmartContext;
import org.smartdata.conf.SmartConf;
import org.smartdata.metaservice.MetaService;
import org.smartdata.metastore.MetaStore;
public class ServerContext extends SmartContext {
private MetaStore metaStore;
private ServiceMode serviceMode;
public ServerContext(MetaStore metaStore) {
this.metaStore = metaStore;
}
public ServerContext(SmartConf conf, MetaStore metaStore) {
super(conf);
this.metaStore = metaStore;
}
public MetaStore getMetaStore() {
return metaStore;
}
public MetaService getMetaService() {
return metaStore;
}
public ServiceMode getServiceMode() {
return serviceMode;
}
public void setServiceMode(ServiceMode serviceMode) {
this.serviceMode = serviceMode;
}
}
| java | Apache-2.0 | e0c90f054687a18c4e095547ac5e31b8b313b3ef | 2026-01-05T02:41:11.405497Z | false |
Intel-bigdata/SSM | https://github.com/Intel-bigdata/SSM/blob/e0c90f054687a18c4e095547ac5e31b8b313b3ef/smart-engine/src/main/java/org/smartdata/server/engine/ServiceMode.java | smart-engine/src/main/java/org/smartdata/server/engine/ServiceMode.java | /**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.smartdata.server.engine;
public enum ServiceMode {
HDFS(1),
ALLUXIO(2);
private int value;
ServiceMode(int value) {
this.value = value;
}
public static ServiceMode fromValue(int v) {
for (ServiceMode s : values()) {
if (s.getValue() == v) {
return s;
}
}
return null;
}
public int getValue() {
return value;
}
public String getName() {
return toString();
}
}
| java | Apache-2.0 | e0c90f054687a18c4e095547ac5e31b8b313b3ef | 2026-01-05T02:41:11.405497Z | false |
Intel-bigdata/SSM | https://github.com/Intel-bigdata/SSM/blob/e0c90f054687a18c4e095547ac5e31b8b313b3ef/smart-engine/src/main/java/org/smartdata/server/engine/StatesManager.java | smart-engine/src/main/java/org/smartdata/server/engine/StatesManager.java | /**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.smartdata.server.engine;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.smartdata.AbstractService;
import org.smartdata.conf.Reconfigurable;
import org.smartdata.conf.ReconfigurableRegistry;
import org.smartdata.conf.ReconfigureException;
import org.smartdata.conf.SmartConfKeys;
import org.smartdata.metastore.MetaStoreException;
import org.smartdata.metastore.dao.AccessCountTable;
import org.smartdata.metastore.dao.AccessCountTableManager;
import org.smartdata.metrics.FileAccessEvent;
import org.smartdata.metrics.FileAccessEventSource;
import org.smartdata.metrics.impl.MetricsFactory;
import org.smartdata.model.CachedFileStatus;
import org.smartdata.model.FileAccessInfo;
import org.smartdata.model.FileInfo;
import org.smartdata.model.StorageCapacity;
import org.smartdata.model.Utilization;
import org.smartdata.model.WhitelistHelper;
import org.smartdata.server.engine.data.AccessEventFetcher;
import java.io.IOException;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.List;
import java.util.concurrent.Executors;
import java.util.concurrent.ScheduledExecutorService;
/**
* Polls metrics and events from NameNode.
*/
public class StatesManager extends AbstractService implements Reconfigurable {
private ServerContext serverContext;
private ScheduledExecutorService executorService;
private AccessCountTableManager accessCountTableManager;
private AccessEventFetcher accessEventFetcher;
private FileAccessEventSource fileAccessEventSource;
private AbstractService statesUpdaterService;
private volatile boolean working = false;
private List<String> ignoreDirs = new ArrayList<String>();
public static final Logger LOG = LoggerFactory.getLogger(StatesManager.class);
public StatesManager(ServerContext context) {
super(context);
this.serverContext = context;
}
/**
* Load configure/data to initialize.
*
* @return true if initialized successfully
*/
@Override
public void init() throws IOException {
LOG.info("Initializing ...");
this.executorService = Executors.newScheduledThreadPool(4);
this.accessCountTableManager = new AccessCountTableManager(
serverContext.getMetaStore(), executorService);
this.fileAccessEventSource = MetricsFactory.createAccessEventSource(serverContext.getConf());
this.accessEventFetcher =
new AccessEventFetcher(
serverContext.getConf(), accessCountTableManager,
executorService, fileAccessEventSource.getCollector());
initStatesUpdaterService();
if (statesUpdaterService == null) {
ReconfigurableRegistry.registReconfigurableProperty(
getReconfigurableProperties(), this);
}
ignoreDirs = serverContext.getConf().getIgnoreDir();
LOG.info("Initialized.");
}
@Override
public boolean inSafeMode() {
if (statesUpdaterService == null) {
return true;
}
return statesUpdaterService.inSafeMode();
}
/**
* Start daemon threads in StatesManager for function.
*/
@Override
public void start() throws IOException {
LOG.info("Starting ...");
accessEventFetcher.start();
if (statesUpdaterService != null) {
statesUpdaterService.start();
}
working = true;
LOG.info("Started. ");
}
@Override
public void stop() throws IOException {
working = false;
LOG.info("Stopping ...");
if (accessEventFetcher != null) {
this.accessEventFetcher.stop();
}
if (this.fileAccessEventSource != null) {
this.fileAccessEventSource.close();
}
if (statesUpdaterService != null) {
statesUpdaterService.stop();
}
LOG.info("Stopped.");
}
public List<CachedFileStatus> getCachedList() throws MetaStoreException {
return serverContext.getMetaStore().getCachedFileStatus();
}
public List<AccessCountTable> getTablesInLast(long timeInMills) throws MetaStoreException {
return this.accessCountTableManager.getTables(timeInMills);
}
public void reportFileAccessEvent(FileAccessEvent event) throws IOException {
String path = event.getPath();
path = path + (path.endsWith("/") ? "" : "/");
for (String s : ignoreDirs) {
if (path.startsWith(s)) {
return;
}
}
if (WhitelistHelper.isEnabled(serverContext.getConf())) {
if (!WhitelistHelper.isInWhitelist(path, serverContext.getConf())) {
LOG.debug("Path " + path + " is not in the whitelist. "
+ "Report file access event failed.");
return;
}
}
event.setTimeStamp(System.currentTimeMillis());
this.fileAccessEventSource.insertEventFromSmartClient(event);
}
public List<FileAccessInfo> getHotFiles(List<AccessCountTable> tables,
int topNum) throws IOException {
try {
if (topNum == 0) {
topNum = serverContext.getConf().getInt(SmartConfKeys.SMART_TOP_HOT_FILES_NUM_KEY,
SmartConfKeys.SMART_TOP_HOT_FILES_NUM_DEFAULT);
return serverContext.getMetaStore().getHotFiles(tables, topNum);
}
return serverContext.getMetaStore().getHotFiles(tables, topNum);
} catch (MetaStoreException e) {
throw new IOException(e);
}
}
public List<CachedFileStatus> getCachedFileStatus() throws IOException {
try {
return serverContext.getMetaStore().getCachedFileStatus();
} catch (MetaStoreException e) {
throw new IOException(e);
}
}
public Utilization getStorageUtilization(String resourceName) throws IOException {
try {
long now = System.currentTimeMillis();
if (!resourceName.equals("cache")) {
long capacity =
serverContext.getMetaStore().getStoreCapacityOfDifferentStorageType(resourceName);
long free = serverContext.getMetaStore().getStoreFreeOfDifferentStorageType(resourceName);
return new Utilization(now, capacity, capacity - free);
} else {
StorageCapacity storageCapacity = serverContext.getMetaStore().getStorageCapacity("cache");
return new Utilization(now,
storageCapacity.getCapacity(),
storageCapacity.getCapacity() - storageCapacity.getFree());
}
} catch (MetaStoreException e) {
throw new IOException(e);
}
}
public FileInfo getFileInfo(String path) throws IOException {
try {
return serverContext.getMetaStore().getFile(path);
} catch (MetaStoreException e) {
throw new IOException(e);
}
}
public void reconfigureProperty(String property, String newVal)
throws ReconfigureException {
LOG.debug("Received reconfig event: property={} newVal={}",
property, newVal);
if (SmartConfKeys.SMART_DFS_NAMENODE_RPCSERVER_KEY.equals(property)) {
if (statesUpdaterService != null) {
throw new ReconfigureException(
"States update service already been initialized.");
}
if (working) {
initStatesUpdaterService();
}
}
}
public List<String> getReconfigurableProperties() {
return Arrays.asList(
SmartConfKeys.SMART_DFS_NAMENODE_RPCSERVER_KEY);
}
private synchronized void initStatesUpdaterService() {
try {
try {
statesUpdaterService = AbstractServiceFactory
.createStatesUpdaterService(getContext().getConf(),
serverContext, serverContext.getMetaStore());
statesUpdaterService.init();
} catch (IOException e) {
statesUpdaterService = null;
LOG.warn("================================================================");
LOG.warn(" Failed to create states updater service for: " + e.getMessage());
LOG.warn(" This may leads to rule/action execution error. The reason why SSM "
+ "does not exit under this condition is some other feature depends on this.");
LOG.warn("================================================================");
}
if (working) {
try {
statesUpdaterService.start();
} catch (IOException e) {
LOG.error("Failed to start states updater service.", e);
statesUpdaterService = null;
}
}
} catch (Throwable t) {
LOG.info("", t);
}
}
}
| java | Apache-2.0 | e0c90f054687a18c4e095547ac5e31b8b313b3ef | 2026-01-05T02:41:11.405497Z | false |
Intel-bigdata/SSM | https://github.com/Intel-bigdata/SSM/blob/e0c90f054687a18c4e095547ac5e31b8b313b3ef/smart-engine/src/main/java/org/smartdata/server/engine/ActiveServerInfo.java | smart-engine/src/main/java/org/smartdata/server/engine/ActiveServerInfo.java | /**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.smartdata.server.engine;
import org.smartdata.model.ExecutorType;
import org.smartdata.server.cluster.NodeInfo;
public class ActiveServerInfo extends NodeInfo {
private static final String ACTIVE_SERVER_ID = "ActiveSSMServer@";
private static ActiveServerInfo inst;
private ActiveServerInfo(String id, String location) {
super(id, location, ExecutorType.LOCAL);
}
public static ActiveServerInfo getInstance() {
assert inst != null;
return inst;
}
public static void setInstance(String location) {
inst = new ActiveServerInfo(ACTIVE_SERVER_ID + location, location);
}
}
| java | Apache-2.0 | e0c90f054687a18c4e095547ac5e31b8b313b3ef | 2026-01-05T02:41:11.405497Z | false |
Intel-bigdata/SSM | https://github.com/Intel-bigdata/SSM/blob/e0c90f054687a18c4e095547ac5e31b8b313b3ef/smart-engine/src/main/java/org/smartdata/server/engine/ConfManager.java | smart-engine/src/main/java/org/smartdata/server/engine/ConfManager.java | /**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.smartdata.server.engine;
import org.smartdata.conf.ReconfigurableBase;
import org.smartdata.conf.ReconfigureException;
import org.smartdata.conf.SmartConf;
import org.smartdata.conf.SmartConfKeys;
import java.util.Arrays;
import java.util.List;
public class ConfManager extends ReconfigurableBase {
private SmartConf conf;
public ConfManager(SmartConf conf) {
this.conf = conf;
}
@Override
public void reconfigureProperty(String property, String newVal)
throws ReconfigureException {
if (property.equals(SmartConfKeys.SMART_DFS_NAMENODE_RPCSERVER_KEY)) {
conf.set(property, newVal);
}
}
@Override
public List<String> getReconfigurableProperties() {
return Arrays.asList(
SmartConfKeys.SMART_DFS_NAMENODE_RPCSERVER_KEY
);
}
}
| java | Apache-2.0 | e0c90f054687a18c4e095547ac5e31b8b313b3ef | 2026-01-05T02:41:11.405497Z | false |
Intel-bigdata/SSM | https://github.com/Intel-bigdata/SSM/blob/e0c90f054687a18c4e095547ac5e31b8b313b3ef/smart-engine/src/main/java/org/smartdata/server/engine/AbstractServiceFactory.java | smart-engine/src/main/java/org/smartdata/server/engine/AbstractServiceFactory.java | /**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.smartdata.server.engine;
import org.apache.hadoop.conf.Configuration;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.smartdata.AbstractService;
import org.smartdata.SmartConstants;
import org.smartdata.SmartContext;
import org.smartdata.hdfs.scheduler.ActionSchedulerService;
import org.smartdata.metastore.MetaStore;
import org.smartdata.metastore.StatesUpdateService;
import java.io.IOException;
import java.lang.reflect.Constructor;
import java.lang.reflect.Field;
import java.lang.reflect.InvocationTargetException;
import java.util.ArrayList;
import java.util.List;
public class AbstractServiceFactory {
private static final Logger LOG = LoggerFactory.getLogger(AbstractServiceFactory.class);
public static AbstractService createStatesUpdaterService(Configuration conf,
ServerContext context, MetaStore metaStore) throws IOException {
String source = getStatesUpdaterName(context.getServiceMode());
try {
Class clazz = Class.forName(source);
Constructor c = clazz.getConstructor(SmartContext.class, MetaStore.class);
return (StatesUpdateService) c.newInstance(context, metaStore);
} catch (ClassNotFoundException | IllegalAccessException
| InstantiationException | NoSuchMethodException
| InvocationTargetException | NullPointerException e) {
throw new IOException(e);
}
}
public static String getStatesUpdaterName(ServiceMode mode)
throws IOException {
String template = "SMART_@@_STATES_UPDATE_SERVICE_IMPL";
try {
return getConstantValue(mode, template);
} catch (Exception e) {
throw new IOException("Can not get value of SmartConstants."
+ getFieldName(mode, template), e);
}
}
public static List<ActionSchedulerService> createActionSchedulerServices(Configuration conf,
ServerContext context, MetaStore metaStore, boolean allMustSuccess) throws IOException {
List<ActionSchedulerService> services = new ArrayList<>();
String[] serviceNames = getActionSchedulerNames(context.getServiceMode());
for (String name : serviceNames) {
try {
Class clazz = Class.forName(name);
Constructor c = clazz.getConstructor(SmartContext.class, MetaStore.class);
services.add((ActionSchedulerService) c.newInstance(context, metaStore));
} catch (ClassNotFoundException | IllegalAccessException
| InstantiationException | NoSuchMethodException
| InvocationTargetException | NullPointerException e) {
if (allMustSuccess) {
throw new IOException(e);
} else {
LOG.warn("Error while create action scheduler service '" + name + "'.", e);
}
}
}
return services;
}
public static String[] getActionSchedulerNames(ServiceMode mode) {
String template = "SMART_@@_ACTION_SCHEDULER_SERVICE_IMPL";
try {
return getConstantValue(mode, template).trim().split("\\s*,\\s*");
} catch (Exception e) {
LOG.warn("Can not get value of SmartConstants."
+ getFieldName(mode, template), e);
}
return new String[0];
}
public static String getConstantValue(ServiceMode serviceMode, String template)
throws NoSuchFieldException, SecurityException,
IllegalArgumentException, IllegalAccessException {
String fieldName = getFieldName(serviceMode, template);
Field field = SmartConstants.class.getField(fieldName);
return (String) field.get(SmartConstants.class);
}
public static String getFieldName(ServiceMode serviceMode, String template) {
return template.replaceAll("@@", serviceMode.getName());
}
}
| java | Apache-2.0 | e0c90f054687a18c4e095547ac5e31b8b313b3ef | 2026-01-05T02:41:11.405497Z | false |
Intel-bigdata/SSM | https://github.com/Intel-bigdata/SSM/blob/e0c90f054687a18c4e095547ac5e31b8b313b3ef/smart-engine/src/main/java/org/smartdata/server/engine/EngineEventBus.java | smart-engine/src/main/java/org/smartdata/server/engine/EngineEventBus.java | /**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.smartdata.server.engine;
import com.google.common.eventbus.EventBus;
public class EngineEventBus {
private static EventBus eventBus = new EventBus("EngineEventBus");
public static void post(Object event) {
eventBus.post(event);
}
public static void register(Object listener) {
eventBus.register(listener);
}
public static void unregister(Object listener) {
eventBus.unregister(listener);
}
}
| java | Apache-2.0 | e0c90f054687a18c4e095547ac5e31b8b313b3ef | 2026-01-05T02:41:11.405497Z | false |
Intel-bigdata/SSM | https://github.com/Intel-bigdata/SSM/blob/e0c90f054687a18c4e095547ac5e31b8b313b3ef/smart-engine/src/main/java/org/smartdata/server/engine/CmdletManager.java | smart-engine/src/main/java/org/smartdata/server/engine/CmdletManager.java | /**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.smartdata.server.engine;
import com.google.common.annotations.VisibleForTesting;
import com.google.common.collect.ArrayListMultimap;
import com.google.common.collect.ListMultimap;
import com.google.common.collect.Lists;
import org.apache.commons.lang.StringUtils;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.smartdata.AbstractService;
import org.smartdata.action.ActionException;
import org.smartdata.action.ActionRegistry;
import org.smartdata.action.SmartAction;
import org.smartdata.conf.SmartConf;
import org.smartdata.conf.SmartConfKeys;
import org.smartdata.exception.QueueFullException;
import org.smartdata.hdfs.action.move.AbstractMoveFileAction;
import org.smartdata.hdfs.scheduler.ActionSchedulerService;
import org.smartdata.metastore.MetaStore;
import org.smartdata.metastore.MetaStoreException;
import org.smartdata.model.ActionInfo;
import org.smartdata.model.CmdletDescriptor;
import org.smartdata.model.CmdletInfo;
import org.smartdata.model.CmdletState;
import org.smartdata.model.DetailedFileAction;
import org.smartdata.model.LaunchAction;
import org.smartdata.model.UserInfo;
import org.smartdata.model.WhitelistHelper;
import org.smartdata.model.action.ActionScheduler;
import org.smartdata.model.action.ScheduleResult;
import org.smartdata.protocol.message.ActionStatus;
import org.smartdata.protocol.message.ActionStatusFactory;
import org.smartdata.protocol.message.CmdletStatus;
import org.smartdata.protocol.message.CmdletStatusUpdate;
import org.smartdata.protocol.message.LaunchCmdlet;
import org.smartdata.protocol.message.StatusMessage;
import org.smartdata.protocol.message.StatusReport;
import org.smartdata.server.cluster.ActiveServerNodeCmdletMetrics;
import org.smartdata.server.cluster.NodeCmdletMetrics;
import org.smartdata.server.engine.cmdlet.CmdletDispatcher;
import org.smartdata.server.engine.cmdlet.CmdletExecutorService;
import org.smartdata.server.engine.cmdlet.TaskTracker;
import org.smartdata.utils.StringUtil;
import java.io.IOException;
import java.text.ParseException;
import java.util.ArrayList;
import java.util.Collection;
import java.util.HashMap;
import java.util.Iterator;
import java.util.LinkedList;
import java.util.List;
import java.util.Map;
import java.util.Queue;
import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.Executors;
import java.util.concurrent.LinkedBlockingQueue;
import java.util.concurrent.ScheduledExecutorService;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.atomic.AtomicLong;
/**
* When a Cmdlet is submitted, it's string descriptor will be stored into set submittedCmdlets
* to avoid duplicated Cmdlet, then enqueue into pendingCmdlet. When the Cmdlet is scheduled it
* will be remove out of the queue and marked in the runningCmdlets.
*
* <p>The map idToCmdlets stores all the recent CmdletInfos, including pending and running Cmdlets.
* After the Cmdlet is finished or cancelled or failed, it's status will be flush to DB.
*/
public class CmdletManager extends AbstractService {
private static final Logger LOG = LoggerFactory.getLogger(CmdletManager.class);
public static final int TIMEOUT_MULTIPLIER = 100;
public static final int TIMEOUT_MIN_MILLISECOND = 30000;
private ScheduledExecutorService executorService;
private CmdletDispatcher dispatcher;
private MetaStore metaStore;
private AtomicLong maxActionId;
private AtomicLong maxCmdletId;
// cache sync threshold
private int cacheCmdTh;
private int maxNumPendingCmdlets;
private List<Long> pendingCmdlet;
private List<Long> schedulingCmdlet;
private Queue<Long> scheduledCmdlet;
private Map<Long, LaunchCmdlet> idToLaunchCmdlet;
private List<Long> runningCmdlets;
private Map<Long, CmdletInfo> idToCmdlets;
// Track a CmdletDescriptor from the submission to
// the finish.
private TaskTracker tracker;
private Map<Long, ActionInfo> idToActions;
private Map<Long, CmdletInfo> cacheCmd;
private List<Long> tobeDeletedCmd;
private ListMultimap<String, ActionScheduler> schedulers = ArrayListMultimap.create();
private List<ActionSchedulerService> schedulerServices = new ArrayList<>();
private AtomicLong numCmdletsGen = new AtomicLong(0);
private AtomicLong numCmdletsFinished = new AtomicLong(0);
private long totalScheduled = 0;
private ActionGroup tmpActions = new ActionGroup();
private long timeout;
private ActionGroup cache;
public CmdletManager(ServerContext context) throws IOException {
super(context);
this.metaStore = context.getMetaStore();
this.executorService = Executors.newScheduledThreadPool(4);
this.runningCmdlets = new ArrayList<>();
this.pendingCmdlet = new LinkedList<>();
this.schedulingCmdlet = new LinkedList<>();
this.scheduledCmdlet = new LinkedBlockingQueue<>();
this.idToLaunchCmdlet = new ConcurrentHashMap<>();
this.idToCmdlets = new ConcurrentHashMap<>();
this.tracker = new TaskTracker();
this.idToActions = new ConcurrentHashMap<>();
this.cacheCmd = new ConcurrentHashMap<>();
this.tobeDeletedCmd = new LinkedList<>();
this.dispatcher = new CmdletDispatcher(context, this, scheduledCmdlet,
idToLaunchCmdlet, runningCmdlets, schedulers);
maxNumPendingCmdlets = context.getConf()
.getInt(SmartConfKeys.SMART_CMDLET_MAX_NUM_PENDING_KEY,
SmartConfKeys.SMART_CMDLET_MAX_NUM_PENDING_DEFAULT);
cacheCmdTh = context.getConf()
.getInt(SmartConfKeys.SMART_CMDLET_CACHE_BATCH,
SmartConfKeys.SMART_CMDLET_CACHE_BATCH_DEFAULT);
int reportPeriod = context.getConf().getInt(SmartConfKeys.SMART_STATUS_REPORT_PERIOD_KEY,
SmartConfKeys.SMART_STATUS_REPORT_PERIOD_DEFAULT);
// Max interval of status report, by default 500ms.
int maxInterval = reportPeriod * context.getConf().getInt(
SmartConfKeys.SMART_STATUS_REPORT_PERIOD_MULTIPLIER_KEY,
SmartConfKeys.SMART_STATUS_REPORT_PERIOD_MULTIPLIER_DEFAULT);
// TIMEOUT_MULTIPLIER * maxInterval, 50s by default, is a potential timeout
// value. And the least timeout value is 30s according to the below code.
this.timeout = TIMEOUT_MULTIPLIER * maxInterval < TIMEOUT_MIN_MILLISECOND
? TIMEOUT_MIN_MILLISECOND : TIMEOUT_MULTIPLIER * maxInterval;
}
@VisibleForTesting
public void setTimeout(long timeout) {
this.timeout = timeout;
}
@VisibleForTesting
void setDispatcher(CmdletDispatcher dispatcher) {
this.dispatcher = dispatcher;
}
@Override
public void init() throws IOException {
LOG.info("Initializing ...");
try {
maxActionId = new AtomicLong(metaStore.getMaxActionId());
maxCmdletId = new AtomicLong(metaStore.getMaxCmdletId());
numCmdletsFinished.addAndGet(metaStore.getNumCmdletsInTerminiatedStates());
schedulerServices = AbstractServiceFactory.createActionSchedulerServices(
getContext().getConf(), (ServerContext) getContext(), metaStore, false);
for (ActionSchedulerService s : schedulerServices) {
s.init();
List<String> actions = s.getSupportedActions();
for (String a : actions) {
schedulers.put(a, s);
}
}
recover();
LOG.info("Initialized.");
} catch (MetaStoreException e) {
LOG.error("DB Connection error! Failed to get Max CmdletId/ActionId!", e);
throw new IOException(e);
} catch (IOException e) {
throw e;
} catch (Throwable t) {
throw new IOException(t);
}
}
private void recover() throws IOException {
reloadCmdletsInDB();
}
@VisibleForTesting
public List<ActionScheduler> getSchedulers(String actionName) {
return schedulers.get(actionName);
}
private void reloadCmdletsInDB() throws IOException{
LOG.info("reloading the dispatched and pending cmdlets in DB.");
List<CmdletInfo> cmdletInfos;
try {
cmdletInfos = metaStore.getCmdlets(CmdletState.DISPATCHED);
if (cmdletInfos != null && cmdletInfos.size() != 0) {
for (CmdletInfo cmdletInfo : cmdletInfos) {
//track reload cmdlets
CmdletDescriptor cmdletDescriptor =
CmdletDescriptor.fromCmdletString(cmdletInfo.getParameters());
cmdletDescriptor.setRuleId(cmdletInfo.getRid());
tracker.track(cmdletInfo.getCid(), cmdletDescriptor);
List<ActionInfo> actionInfos = getActions(cmdletInfo.getAids());
for (ActionInfo actionInfo: actionInfos) {
actionInfo.setCreateTime(cmdletInfo.getGenerateTime());
actionInfo.setFinishTime(System.currentTimeMillis());
// Recover scheduler status according to dispatched action.
recoverSchedulerStatus(actionInfo);
}
syncCmdAction(cmdletInfo, actionInfos);
}
}
cmdletInfos = metaStore.getCmdlets(CmdletState.PENDING);
if (cmdletInfos != null && cmdletInfos.size() != 0) {
for (CmdletInfo cmdletInfo : cmdletInfos) {
CmdletDescriptor cmdletDescriptor =
CmdletDescriptor.fromCmdletString(cmdletInfo.getParameters());
cmdletDescriptor.setRuleId(cmdletInfo.getRid());
// Pending task also needs to be tracked.
tracker.track(cmdletInfo.getCid(), cmdletDescriptor);
LOG.debug(String.format("Reload pending cmdlet: {}", cmdletInfo));
List<ActionInfo> actionInfos = getActions(cmdletInfo.getAids());
syncCmdAction(cmdletInfo, actionInfos);
}
}
} catch (MetaStoreException e) {
LOG.error("DB connection error occurs when ssm is reloading cmdlets!");
return;
} catch (ParseException pe) {
LOG.error("Failed to parse cmdlet string for tracking task", pe);
}
}
/**
* Only recover scheduler status according to dispatched task.
*/
public void recoverSchedulerStatus(ActionInfo actionInfo) {
try {
CmdletInfo cmdletInfo = getCmdletInfo(actionInfo.getCmdletId());
if (cmdletInfo.getState() != CmdletState.DISPATCHED) {
return;
}
} catch (IOException e) {
return;
}
for (ActionScheduler p : schedulers.get(actionInfo.getActionName())) {
p.recover(actionInfo);
}
}
/**
* Check if action names in cmdletDescriptor are correct.
* @param cmdletDescriptor
* @throws IOException
*/
private void checkActionNames(
CmdletDescriptor cmdletDescriptor) throws IOException {
for (int index = 0; index < cmdletDescriptor.getActionSize(); index++) {
if (!ActionRegistry
.registeredAction(cmdletDescriptor.getActionName(index))) {
throw new IOException(
String.format(
"Submit Cmdlet %s error! Action names are not correct!",
cmdletDescriptor));
}
}
}
/**
* Let Scheduler check actioninfo onsubmit and add them to cmdletinfo.
* TODO: remove useless actionIndex.
* @param cmdletInfo
* @param actionInfos
* @throws IOException
*/
private void checkActionsOnSubmit(CmdletInfo cmdletInfo,
List<ActionInfo> actionInfos) throws IOException {
for (ActionInfo actionInfo : actionInfos) {
cmdletInfo.addAction(actionInfo.getActionId());
}
int actionIndex = 0;
for (ActionInfo actionInfo : actionInfos) {
for (ActionScheduler p : schedulers.get(actionInfo.getActionName())) {
if (!p.onSubmit(cmdletInfo, actionInfo, actionIndex)) {
throw new IOException(
String.format("Action rejected by scheduler", actionInfo));
}
}
actionIndex++;
}
}
@Override
public void start() throws IOException {
LOG.info("Starting ...");
executorService.scheduleAtFixedRate(new CmdletPurgeTask(getContext().getConf()),
10, 5000, TimeUnit.MILLISECONDS);
executorService.scheduleAtFixedRate(new ScheduleTask(), 100, 50, TimeUnit.MILLISECONDS);
executorService.scheduleAtFixedRate(new FlushCachedCmdletsTask(), 200, 50,
TimeUnit.MILLISECONDS);
executorService.scheduleAtFixedRate(new DetectFailedActionTask(), 1000, 5000,
TimeUnit.MILLISECONDS);
for (ActionSchedulerService s : schedulerServices) {
s.start();
}
dispatcher.start();
LOG.info("Started.");
}
@Override
public void stop() throws IOException {
LOG.info("Stopping ...");
dispatcher.stop();
for (int i = schedulerServices.size() - 1; i >= 0; i--) {
schedulerServices.get(i).stop();
}
executorService.shutdown();
cacheCmdTh = Integer.MAX_VALUE;
try {
batchSyncCmdAction();
} catch (Exception e) {
throw new IOException(e);
}
dispatcher.shutDownExcutorServices();
LOG.info("Stopped.");
}
/**
* Register agentExecutorService & hazelcastExecutorService.
*/
public void registerExecutorService(CmdletExecutorService executorService) {
dispatcher.registerExecutorService(executorService);
}
public void addNewUser(UserInfo userInfo) throws MetaStoreException {
metaStore.insertUserInfo(userInfo);
}
public void newPassword(
UserInfo userInfo) throws MetaStoreException {
try {
metaStore.newPassword(userInfo);
} catch (Exception e) {
throw new MetaStoreException(e);
}
}
/**
* Compare userInfo(userName, password) with the one recorded in metastore.
* @param userInfo its password should be encrypted by SHA512.
* @return true if the given user info equals the one recorded in metastore.
* @throws MetaStoreException
*/
public boolean authentic(UserInfo userInfo) throws MetaStoreException {
try {
UserInfo origin = metaStore.getUserInfoByUserName(userInfo.getUserName());
if (origin == null) {
LOG.warn("The given user is not registered: " + userInfo.getUserName());
return false;
}
return origin.equals(userInfo);
} catch (Exception e) {
throw new MetaStoreException(e);
}
}
public long submitCmdlet(String cmdlet) throws IOException {
LOG.debug(String.format("Received Cmdlet -> [ %s ]", cmdlet));
try {
if (StringUtils.isBlank(cmdlet)) {
throw new IOException("Cannot submit an empty action!");
}
CmdletDescriptor cmdletDescriptor = CmdletDescriptor.fromCmdletString(cmdlet);
return submitCmdlet(cmdletDescriptor);
} catch (ParseException e) {
LOG.debug("Cmdlet -> [ {} ], format is not correct", cmdlet, e);
throw new IOException(e);
}
}
public long submitCmdlet(CmdletDescriptor cmdletDescriptor) throws IOException {
// To avoid repeatedly submitting task. If tracker contains one CmdletDescriptor
// with the same rule id and cmdlet string, return -1.
if (tracker.contains(cmdletDescriptor)) {
LOG.debug("Refuse to repeatedly submit Cmdlet for {}", cmdletDescriptor);
return -1;
}
if (LOG.isDebugEnabled()) {
LOG.debug(String.format("Received Cmdlet -> [ %s ]", cmdletDescriptor.getCmdletString()));
}
if (maxNumPendingCmdlets <= pendingCmdlet.size() + schedulingCmdlet.size()) {
throw new QueueFullException("Pending cmdlets exceeds value specified by key '"
+ SmartConfKeys.SMART_CMDLET_MAX_NUM_PENDING_KEY + "' = " + maxNumPendingCmdlets);
}
long submitTime = System.currentTimeMillis();
CmdletInfo cmdletInfo =
new CmdletInfo(
maxCmdletId.getAndIncrement(),
cmdletDescriptor.getRuleId(),
CmdletState.PENDING,
cmdletDescriptor.getCmdletString(),
submitTime,
submitTime,
submitTime + cmdletDescriptor.getDeferIntervalMs());
List<ActionInfo> actionInfos =
createActionInfos(cmdletDescriptor, cmdletInfo.getCid());
// Check action names
checkActionNames(cmdletDescriptor);
// Check if action path is in whitelist
if (WhitelistHelper.isEnabled(getContext().getConf())) {
if (!WhitelistHelper.isCmdletInWhitelist(cmdletDescriptor)) {
throw new IOException("This path is not in the whitelist.");
}
}
// Let Scheduler check actioninfo onsubmit and add them to cmdletinfo
checkActionsOnSubmit(cmdletInfo, actionInfos);
// Insert cmdletinfo and actionInfos to metastore and cache.
syncCmdAction(cmdletInfo, actionInfos);
// Track in the submission portal. For cmdlets recovered from DB
// (see #recover), they will be not be tracked.
tracker.track(cmdletInfo.getCid(), cmdletDescriptor);
return cmdletInfo.getCid();
}
/**
* Insert cmdletinfo and actions to metastore and cache.
*
* @param cmdletInfo
* @param actionInfos
* @throws IOException
*/
private void syncCmdAction(CmdletInfo cmdletInfo,
List<ActionInfo> actionInfos) throws IOException {
LOG.debug("Cache cmd {}", cmdletInfo);
for (ActionInfo actionInfo : actionInfos) {
idToActions.put(actionInfo.getActionId(), actionInfo);
}
idToCmdlets.put(cmdletInfo.getCid(), cmdletInfo);
if (cmdletInfo.getState() == CmdletState.PENDING) {
numCmdletsGen.incrementAndGet();
cacheCmd.put(cmdletInfo.getCid(), cmdletInfo);
synchronized (pendingCmdlet) {
pendingCmdlet.add(cmdletInfo.getCid());
}
} else if (cmdletInfo.getState() == CmdletState.DISPATCHED) {
runningCmdlets.add(cmdletInfo.getCid());
LaunchCmdlet launchCmdlet = createLaunchCmdlet(cmdletInfo);
idToLaunchCmdlet.put(cmdletInfo.getCid(), launchCmdlet);
}
}
private void batchSyncCmdAction() throws Exception {
if (cacheCmd.size() == 0 && tobeDeletedCmd.size() == 0) {
return;
}
List<CmdletInfo> cmdletInfos = new ArrayList<>();
List<ActionInfo> actionInfos = new ArrayList<>();
List<CmdletInfo> cmdletFinished = new ArrayList<>();
LOG.debug("Number of cached cmds {}", cacheCmd.size());
int todelSize;
synchronized (cacheCmd) {
synchronized (tobeDeletedCmd) {
todelSize = tobeDeletedCmd.size();
for (Long cid : tobeDeletedCmd) {
cacheCmd.remove(cid);
}
}
for (Long cid : cacheCmd.keySet()) {
CmdletInfo cmdletInfo = cacheCmd.remove(cid);
if (cmdletInfo.getState() != CmdletState.DISABLED) {
cmdletInfos.add(cmdletInfo);
for (Long aid : cmdletInfo.getAids()) {
ActionInfo actionInfo = idToActions.get(aid);
if (actionInfo != null) {
actionInfos.add(actionInfo);
}
}
}
if (CmdletState.isTerminalState(cmdletInfo.getState())) {
cmdletFinished.add(cmdletInfo);
}
if (cmdletInfos.size() >= cacheCmdTh) {
break;
}
}
for (CmdletInfo cmdletInfo : cmdletFinished) {
idToCmdlets.remove(cmdletInfo.getCid());
try {
tracker.untrack(cmdletInfo.getCid());
} catch (Exception e) {
LOG.warn("Failed to untrack task!", e);
}
for (Long aid : cmdletInfo.getAids()) {
idToActions.remove(aid);
}
}
}
if (cmdletInfos.size() > 0) {
LOG.debug("Number of cmds {} to submit", cmdletInfos.size());
try {
metaStore.insertActions(
actionInfos.toArray(new ActionInfo[actionInfos.size()]));
metaStore.insertCmdlets(
cmdletInfos.toArray(new CmdletInfo[cmdletInfos.size()]));
} catch (MetaStoreException e) {
LOG.error("CmdletIds -> [ {} ], submit to DB error", cmdletInfos, e);
}
}
if (todelSize > 0) {
List<Long> del = new LinkedList<>();
synchronized (tobeDeletedCmd) {
del.addAll(tobeDeletedCmd.subList(0, todelSize > cacheCmdTh ? cacheCmdTh : todelSize));
tobeDeletedCmd.removeAll(del);
}
if (del.size() > 0) {
LOG.debug("Number of cmds {} to delete", del.size());
try {
metaStore.batchDeleteCmdlet(del);
metaStore.batchDeleteCmdletActions(del);
} catch (MetaStoreException e) {
LOG.error("CmdletIds -> [ {} ], delete from DB error", del, e);
}
}
}
}
private boolean shouldStopSchedule() {
int left = dispatcher.getTotalSlotsLeft();
int total = dispatcher.getTotalSlots();
if (scheduledCmdlet.size() >= left + total * 0.2) {
return true;
}
return false;
}
private int getNumPendingScheduleCmdlets() {
return pendingCmdlet.size() + schedulingCmdlet.size();
}
public void updateNodeCmdletMetrics(ActiveServerNodeCmdletMetrics metrics) {
metrics.setMaxPendingSchedule(maxNumPendingCmdlets);
metrics.setNumPendingSchedule(getNumPendingScheduleCmdlets());
}
public Collection<NodeCmdletMetrics> getAllNodeCmdletMetrics() {
return dispatcher.getNodeCmdletMetrics();
}
private int scheduleCmdlet() throws IOException {
int nScheduled = 0;
synchronized (pendingCmdlet) {
if (pendingCmdlet.size() > 0) {
schedulingCmdlet.addAll(pendingCmdlet);
pendingCmdlet.clear();
}
}
long curr = System.currentTimeMillis();
Iterator<Long> it = schedulingCmdlet.iterator();
while (it.hasNext() && !shouldStopSchedule()) {
long id = it.next();
if (nScheduled % 20 == 0) {
curr = System.currentTimeMillis();
}
CmdletInfo cmdlet = idToCmdlets.get(id);
if (cmdlet == null) {
it.remove();
continue;
}
synchronized (cmdlet) {
switch (cmdlet.getState()) {
case CANCELLED:
case DISABLED:
it.remove();
break;
case PENDING:
if (cmdlet.getDeferedToTime() > curr) {
break;
}
LaunchCmdlet launchCmdlet = createLaunchCmdlet(cmdlet);
ScheduleResult result;
try {
result = scheduleCmdletActions(cmdlet, launchCmdlet);
} catch (Throwable t) {
LOG.error("Schedule " + cmdlet + " failed.", t);
result = ScheduleResult.FAIL;
}
if (result != ScheduleResult.RETRY) {
it.remove();
} else {
continue;
}
try {
if (result == ScheduleResult.SUCCESS) {
idToLaunchCmdlet.put(cmdlet.getCid(), launchCmdlet);
cmdlet.setState(CmdletState.SCHEDULED);
cmdlet.setStateChangedTime(System.currentTimeMillis());
scheduledCmdlet.add(id);
nScheduled++;
} else if (result == ScheduleResult.FAIL) {
cmdlet.updateState(CmdletState.CANCELLED);
CmdletStatus cmdletStatus = new CmdletStatus(
cmdlet.getCid(), cmdlet.getStateChangedTime(), cmdlet.getState());
// Mark all actions as finished
cmdletFinishedInternal(cmdlet, false);
onCmdletStatusUpdate(cmdletStatus);
} else if (result == ScheduleResult.SUCCESS_NO_EXECUTION) {
cmdlet.updateState(CmdletState.DONE);
cmdletFinishedInternal(cmdlet, true);
CmdletStatus cmdletStatus = new CmdletStatus(
cmdlet.getCid(), cmdlet.getStateChangedTime(), cmdlet.getState());
onCmdletStatusUpdate(cmdletStatus);
}
} catch (Throwable t) {
LOG.error("Post schedule cmdlet " + cmdlet + " error.", t);
}
break;
}
}
}
return nScheduled;
}
private ScheduleResult scheduleCmdletActions(CmdletInfo info,
LaunchCmdlet launchCmdlet) {
List<Long> actIds = info.getAids();
int idx = 0;
int schIdx = 0;
ActionInfo actionInfo;
LaunchAction launchAction;
List<ActionScheduler> actSchedulers;
boolean skipped = false;
ScheduleResult scheduleResult = ScheduleResult.SUCCESS_NO_EXECUTION;
ScheduleResult resultTmp;
for (idx = 0; idx < actIds.size(); idx++) {
actionInfo = idToActions.get(actIds.get(idx));
launchAction = launchCmdlet.getLaunchActions().get(idx);
actSchedulers = schedulers.get(actionInfo.getActionName());
if (actSchedulers == null || actSchedulers.size() == 0) {
skipped = true;
continue;
}
for (schIdx = 0; schIdx < actSchedulers.size(); schIdx++) {
ActionScheduler s = actSchedulers.get(schIdx);
try {
resultTmp = s.onSchedule(info, actionInfo, launchCmdlet, launchAction, idx);
} catch (Throwable t) {
actionInfo.appendLogLine("\nOnSchedule exception: " + t);
resultTmp = ScheduleResult.FAIL;
}
if (resultTmp != ScheduleResult.SUCCESS
&& resultTmp != ScheduleResult.SUCCESS_NO_EXECUTION) {
scheduleResult = resultTmp;
} else {
if (scheduleResult == ScheduleResult.SUCCESS_NO_EXECUTION) {
scheduleResult = resultTmp;
}
}
if (scheduleResult != ScheduleResult.SUCCESS
&& scheduleResult != ScheduleResult.SUCCESS_NO_EXECUTION) {
break;
}
}
if (scheduleResult != ScheduleResult.SUCCESS
&& scheduleResult != ScheduleResult.SUCCESS_NO_EXECUTION) {
break;
}
}
if (scheduleResult == ScheduleResult.SUCCESS
|| scheduleResult == ScheduleResult.SUCCESS_NO_EXECUTION) {
idx--;
schIdx--;
if (skipped) {
scheduleResult = ScheduleResult.SUCCESS;
}
}
postscheduleCmdletActions(info, actIds, scheduleResult, idx, schIdx);
return scheduleResult;
}
private void postscheduleCmdletActions(CmdletInfo cmdletInfo,
List<Long> actions, ScheduleResult result,
int lastAction, int lastScheduler) {
List<ActionScheduler> actSchedulers;
for (int aidx = lastAction; aidx >= 0; aidx--) {
ActionInfo info = idToActions.get(actions.get(aidx));
actSchedulers = schedulers.get(info.getActionName());
if (actSchedulers == null || actSchedulers.size() == 0) {
continue;
}
if (lastScheduler < 0) {
lastScheduler = actSchedulers.size() - 1;
}
for (int sidx = lastScheduler; sidx >= 0; sidx--) {
try {
actSchedulers.get(sidx).postSchedule(cmdletInfo, info, sidx, result);
} catch (Throwable t) {
info.setLog((info.getLog() == null ? "" : info.getLog())
+ "\nPostSchedule exception: " + t);
}
}
lastScheduler = -1;
}
}
private LaunchCmdlet createLaunchCmdlet(CmdletInfo cmdletInfo) {
if (cmdletInfo == null) {
return null;
}
Map<String, String> args;
List<LaunchAction> launchActions = new ArrayList<>();
for (Long aid : cmdletInfo.getAids()) {
if (idToActions.containsKey(aid)) {
ActionInfo toLaunch = idToActions.get(aid);
args = new HashMap<>();
args.putAll(toLaunch.getArgs());
launchActions.add(
new LaunchAction(toLaunch.getActionId(), toLaunch.getActionName(), args));
}
}
return new LaunchCmdlet(cmdletInfo.getCid(), launchActions);
}
public CmdletInfo getCmdletInfo(long cid) throws IOException {
if (idToCmdlets.containsKey(cid)) {
return idToCmdlets.get(cid);
}
try {
return metaStore.getCmdletById(cid);
} catch (MetaStoreException e) {
LOG.error("CmdletId -> [ {} ], get CmdletInfo from DB error", cid, e);
throw new IOException(e);
}
}
public CmdletGroup listCmdletsInfo(long rid, long pageIndex, long numPerPage,
List<String> orderBy,
List<Boolean> isDesc) throws IOException, MetaStoreException {
List<CmdletInfo> cmdlets = metaStore.listPageCmdlets(rid,
(pageIndex - 1) * numPerPage, numPerPage, orderBy, isDesc);
return new CmdletGroup(cmdlets, metaStore.getNumCmdletsByRid(rid));
}
public List<CmdletInfo> listCmdletsInfo(long rid, CmdletState cmdletState) throws IOException {
List<CmdletInfo> result = new ArrayList<>();
try {
if (rid == -1) {
result.addAll(metaStore.getCmdlets(null, null, cmdletState));
} else {
result.addAll(metaStore.getCmdlets(null, String.format("= %d", rid), cmdletState));
}
} catch (MetaStoreException e) {
LOG.error("RuleId -> [ {} ], List CmdletInfo from DB error", rid, e);
throw new IOException(e);
}
for (CmdletInfo info : idToCmdlets.values()) {
if (info.getRid() == rid && info.getState().equals(cmdletState)) {
result.add(info);
}
}
return result;
}
public List<CmdletInfo> listCmdletsInfo(long rid) throws IOException {
Map<Long, CmdletInfo> result = new HashMap<>();
try {
String ridCondition = rid == -1 ? null : String.format("= %d", rid);
for (CmdletInfo info : metaStore.getCmdlets(null, ridCondition, null)) {
result.put(info.getCid(), info);
}
} catch (MetaStoreException e) {
LOG.error("RuleId -> [ {} ], List CmdletInfo from DB error", rid, e);
throw new IOException(e);
}
for (CmdletInfo info : idToCmdlets.values()) {
if (info.getRid() == rid) {
result.put(info.getCid(), info);
}
}
return Lists.newArrayList(result.values());
}
public void activateCmdlet(long cid) throws IOException {
// Currently the default cmdlet status is pending, do nothing here
}
public void disableCmdlet(long cid) throws IOException {
if (idToCmdlets.containsKey(cid)) {
CmdletInfo info = idToCmdlets.get(cid);
onCmdletStatusUpdate(
new CmdletStatus(info.getCid(), System.currentTimeMillis(), CmdletState.DISABLED));
synchronized (pendingCmdlet) {
if (pendingCmdlet.contains(cid)) {
pendingCmdlet.remove(cid);
}
}
if (schedulingCmdlet.contains(cid)) {
schedulingCmdlet.remove(cid);
}
if (scheduledCmdlet.contains(cid)) {
scheduledCmdlet.remove(cid);
}
// Wait status update from status reporter, so need to update to MetaStore
if (runningCmdlets.contains(cid)) {
dispatcher.stopCmdlet(cid);
}
}
}
/**
* Drop all unfinished cmdlets.
*
* @param ruleId
* @throws IOException
*/
public void dropRuleCmdlets(long ruleId) throws IOException {
List<Long> cids = new ArrayList<>();
for (CmdletInfo info : idToCmdlets.values()) {
if (info.getRid() == ruleId && !CmdletState.isTerminalState(info.getState())) {
cids.add(info.getCid());
}
}
batchDeleteCmdlet(cids);
}
//Todo: optimize this function.
private void cmdletFinished(long cmdletId) throws IOException {
numCmdletsFinished.incrementAndGet();
CmdletInfo cmdletInfo = idToCmdlets.get(cmdletId);
if (cmdletInfo == null) {
LOG.debug("CmdletInfo [id={}] does not exist in idToCmdlets.", cmdletId);
return;
}
dispatcher.onCmdletFinished(cmdletInfo.getCid());
runningCmdlets.remove(cmdletId);
idToLaunchCmdlet.remove(cmdletId);
flushCmdletInfo(cmdletInfo);
}
private void cmdletFinishedInternal(CmdletInfo cmdletInfo, boolean success) throws IOException {
numCmdletsFinished.incrementAndGet();
ActionInfo actionInfo;
for (Long aid : cmdletInfo.getAids()) {
actionInfo = idToActions.get(aid);
synchronized (actionInfo) {
// Set all action as finished
actionInfo.setProgress(1.0F);
actionInfo.setFinished(true);
actionInfo.setCreateTime(cmdletInfo.getStateChangedTime());
actionInfo.setFinishTime(cmdletInfo.getStateChangedTime());
actionInfo.setExecHost(ActiveServerInfo.getInstance().getId());
actionInfo.setSuccessful(success);
}
}
}
public void deleteCmdlet(long cid) throws IOException {
this.disableCmdlet(cid);
try {
metaStore.deleteCmdlet(cid);
metaStore.deleteCmdletActions(cid);
} catch (MetaStoreException e) {
LOG.error("CmdletId -> [ {} ], delete from DB error", cid, e);
throw new IOException(e);
}
| java | Apache-2.0 | e0c90f054687a18c4e095547ac5e31b8b313b3ef | 2026-01-05T02:41:11.405497Z | true |
Intel-bigdata/SSM | https://github.com/Intel-bigdata/SSM/blob/e0c90f054687a18c4e095547ac5e31b8b313b3ef/smart-engine/src/main/java/org/smartdata/server/engine/rule/FileCopyDrPlugin.java | smart-engine/src/main/java/org/smartdata/server/engine/rule/FileCopyDrPlugin.java | /**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.smartdata.server.engine.rule;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.smartdata.action.SyncAction;
import org.smartdata.metastore.MetaStore;
import org.smartdata.metastore.MetaStoreException;
import org.smartdata.model.BackUpInfo;
import org.smartdata.model.CmdletDescriptor;
import org.smartdata.model.FileDiff;
import org.smartdata.model.FileDiffType;
import org.smartdata.model.RuleInfo;
import org.smartdata.model.rule.RuleExecutorPlugin;
import org.smartdata.model.rule.TranslateResult;
import org.smartdata.utils.StringUtil;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.HashMap;
import java.util.LinkedList;
import java.util.List;
import java.util.Map;
public class FileCopyDrPlugin implements RuleExecutorPlugin {
private MetaStore metaStore;
private Map<Long, List<BackUpInfo>> backups = new HashMap<>();
private static final Logger LOG =
LoggerFactory.getLogger(FileCopyDrPlugin.class.getName());
public FileCopyDrPlugin(MetaStore metaStore) {
this.metaStore = metaStore;
}
public void onNewRuleExecutor(final RuleInfo ruleInfo, TranslateResult tResult) {
long ruleId = ruleInfo.getId();
List<String> pathsCheckGlob = tResult.getGlobPathCheck();
if (pathsCheckGlob.size() == 0) {
pathsCheckGlob = Arrays.asList("/*");
}
List<String> pathsCheck = getPathMatchesList(pathsCheckGlob);
String dirs = StringUtil.join(",", pathsCheck);
CmdletDescriptor des = tResult.getCmdDescriptor();
for (int i = 0; i < des.getActionSize(); i++) {
if (des.getActionName(i).equals("sync")) {
List<String> statements = tResult.getSqlStatements();
String before = statements.get(statements.size() - 1);
String after = before.replace(";", " UNION " + referenceNonExists(tResult, pathsCheck));
statements.set(statements.size() - 1, after);
BackUpInfo backUpInfo = new BackUpInfo();
backUpInfo.setRid(ruleId);
backUpInfo.setSrc(dirs);
String dest = des.getActionArgs(i).get(SyncAction.DEST);
if (!dest.endsWith("/")) {
dest += "/";
des.addActionArg(i, SyncAction.DEST, dest);
}
backUpInfo.setDest(dest);
backUpInfo.setPeriod(tResult.getTbScheduleInfo().getMinimalEvery());
des.addActionArg(i, SyncAction.SRC, dirs);
LOG.debug("Rule executor added for sync rule {} src={} dest={}", ruleInfo, dirs, dest);
synchronized (backups) {
if (!backups.containsKey(ruleId)) {
backups.put(ruleId, new LinkedList<BackUpInfo>());
}
}
List<BackUpInfo> infos = backups.get(ruleId);
synchronized (infos) {
try {
metaStore.deleteBackUpInfo(ruleId);
// Add base Sync tag
FileDiff fileDiff = new FileDiff(FileDiffType.BASESYNC);
fileDiff.setSrc(backUpInfo.getSrc());
fileDiff.getParameters().put("-dest", backUpInfo.getDest());
metaStore.insertFileDiff(fileDiff);
metaStore.insertBackUpInfo(backUpInfo);
infos.add(backUpInfo);
} catch (MetaStoreException e) {
LOG.error("Insert backup info error:" + backUpInfo, e);
}
}
break;
}
}
}
private List<String> getPathMatchesList(List<String> paths) {
List<String> ret = new ArrayList<>();
for (String p : paths) {
String dir = StringUtil.getBaseDir(p);
if (dir == null) {
continue;
}
ret.add(dir);
}
return ret;
}
private String referenceNonExists(TranslateResult tr, List<String> dirs) {
String temp = "SELECT src FROM file_diff WHERE "
+ "state = 0 AND diff_type IN (1,2) AND (%s);";
String srcs = "src LIKE '" + dirs.get(0) + "%'";
for (int i = 1; i < dirs.size(); i++) {
srcs += " OR src LIKE '" + dirs.get(i) + "%'";
}
return String.format(temp, srcs);
}
public boolean preExecution(final RuleInfo ruleInfo, TranslateResult tResult) {
return true;
}
public List<String> preSubmitCmdlet(final RuleInfo ruleInfo, List<String> objects) {
return objects;
}
public CmdletDescriptor preSubmitCmdletDescriptor(
final RuleInfo ruleInfo, TranslateResult tResult, CmdletDescriptor descriptor) {
return descriptor;
}
public void onRuleExecutorExit(final RuleInfo ruleInfo) {
long ruleId = ruleInfo.getId();
List<BackUpInfo> infos = backups.get(ruleId);
if (infos == null) {
return;
}
synchronized (infos) {
try {
if (infos.size() != 0) {
infos.remove(0);
}
if (infos.size() == 0) {
backups.remove(ruleId);
metaStore.deleteBackUpInfo(ruleId);
}
} catch (MetaStoreException e) {
LOG.error("Remove backup info error:" + ruleInfo, e);
}
}
}
}
| java | Apache-2.0 | e0c90f054687a18c4e095547ac5e31b8b313b3ef | 2026-01-05T02:41:11.405497Z | false |
Intel-bigdata/SSM | https://github.com/Intel-bigdata/SSM/blob/e0c90f054687a18c4e095547ac5e31b8b313b3ef/smart-engine/src/main/java/org/smartdata/server/engine/rule/ErasureCodingPlugin.java | smart-engine/src/main/java/org/smartdata/server/engine/rule/ErasureCodingPlugin.java | /**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.smartdata.server.engine.rule;
import org.apache.hadoop.hdfs.DFSClient;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.smartdata.hdfs.CompatibilityHelperLoader;
import org.smartdata.hdfs.HadoopUtil;
import org.smartdata.metastore.MetaStore;
import org.smartdata.model.CmdletDescriptor;
import org.smartdata.model.ErasureCodingPolicyInfo;
import org.smartdata.model.RuleInfo;
import org.smartdata.model.rule.RuleExecutorPlugin;
import org.smartdata.model.rule.TranslateResult;
import org.smartdata.server.engine.ServerContext;
import java.net.URI;
import java.util.ArrayList;
import java.util.List;
import java.util.Map;
import java.util.concurrent.ConcurrentHashMap;
public class ErasureCodingPlugin implements RuleExecutorPlugin {
private ServerContext context;
private MetaStore metaStore;
private final Map<Long, List<String>> ecPolicies = new ConcurrentHashMap<>();
private final List<ErasureCodingPolicyInfo> ecInfos = new ArrayList<>();
private long lastUpdateTime = 0;
private URI nnUri = null;
private DFSClient client = null;
private static final Logger LOG =
LoggerFactory.getLogger(ErasureCodingPlugin.class);
public ErasureCodingPlugin(ServerContext context) {
this.context = context;
metaStore = context.getMetaStore();
try {
for (ErasureCodingPolicyInfo info : metaStore.getAllEcPolicies()) {
ecInfos.add(info);
}
} catch (Exception e) {
// ignore this
LOG.warn("Load ErasureCoding Policy failed!");
}
}
@Override
public void onNewRuleExecutor(RuleInfo ruleInfo,
TranslateResult tResult) {
long ruleId = ruleInfo.getId();
CmdletDescriptor des = tResult.getCmdDescriptor();
for (int i = 0; i < des.getActionSize(); i++) {
if (des.getActionName(i).equals("ec")) {
String policy = des.getActionArgs(i).get("-policy");
if (policy == null) {
continue;
}
if (!ecPolicies.containsKey(ruleId)) {
ecPolicies.put(ruleId, new ArrayList<String>());
}
ecPolicies.get(ruleId).add(policy);
}
}
}
private void initClient() {
try {
if (nnUri == null) {
nnUri = HadoopUtil.getNameNodeUri(context.getConf());
}
if (nnUri != null && client == null) {
client = HadoopUtil.getDFSClient(nnUri, context.getConf());
}
} catch (Exception e) {
LOG.error("Init client connection failed: " + e.getLocalizedMessage());
}
}
private void updateErasureCodingPolices() {
try {
initClient();
if (client == null) {
LOG.error("Failed to refresh EC policies due to can not setup connection to HDFS!");
return;
}
Map<Byte, String> idToPolicyName =
CompatibilityHelperLoader.getHelper().getErasureCodingPolicies(client);
if (idToPolicyName != null) {
ecInfos.clear();
for (Byte id : idToPolicyName.keySet()) {
ecInfos.add(new ErasureCodingPolicyInfo(id, idToPolicyName.get(id)));
}
metaStore.deleteAllEcPolicies();
metaStore.insertEcPolicies(ecInfos);
}
} catch (Exception e) {
LOG.warn("Failed to refresh EC policies!");
}
}
@Override
public boolean preExecution(RuleInfo ruleInfo,
TranslateResult tResult) {
if (!ecPolicies.containsKey(ruleInfo.getId())) {
return true;
}
List<String> polices = ecPolicies.get(ruleInfo.getId());
String notIn = null;
synchronized (ecInfos) {
for (String policy : polices) {
notIn = policy;
for (ErasureCodingPolicyInfo info : ecInfos) {
if (info.getEcPolicyName().equals(policy)) {
notIn = null;
break;
}
}
if (notIn != null) {
break;
}
}
}
if (notIn != null) {
synchronized (ecInfos) {
long curr = System.currentTimeMillis();
if (curr - lastUpdateTime >= 5000) {
LOG.info("Refresh EC policies for policy: " + notIn);
updateErasureCodingPolices();
lastUpdateTime = curr;
}
}
}
return true;
}
@Override
public List<String> preSubmitCmdlet(RuleInfo ruleInfo,
List<String> objects) {
return objects;
}
@Override
public CmdletDescriptor preSubmitCmdletDescriptor(RuleInfo ruleInfo,
TranslateResult tResult, CmdletDescriptor descriptor) {
return descriptor;
}
@Override
public void onRuleExecutorExit(RuleInfo ruleInfo) {
ecPolicies.remove(ruleInfo.getId());
}
}
| java | Apache-2.0 | e0c90f054687a18c4e095547ac5e31b8b313b3ef | 2026-01-05T02:41:11.405497Z | false |
Intel-bigdata/SSM | https://github.com/Intel-bigdata/SSM/blob/e0c90f054687a18c4e095547ac5e31b8b313b3ef/smart-engine/src/main/java/org/smartdata/server/engine/rule/RuleInfoRepo.java | smart-engine/src/main/java/org/smartdata/server/engine/rule/RuleInfoRepo.java | /**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.smartdata.server.engine.rule;
import org.smartdata.conf.SmartConf;
import org.smartdata.metastore.MetaStore;
import org.smartdata.metastore.MetaStoreException;
import org.smartdata.model.RuleInfo;
import org.smartdata.model.RuleState;
import org.smartdata.model.rule.RuleExecutorPlugin;
import org.smartdata.model.rule.RuleExecutorPluginManager;
import org.smartdata.model.rule.TranslateResult;
import org.smartdata.rule.parser.SmartRuleStringParser;
import org.smartdata.rule.parser.TranslationContext;
import org.smartdata.server.engine.RuleManager;
import org.smartdata.server.engine.data.ExecutionContext;
import java.io.IOException;
import java.util.List;
import java.util.concurrent.locks.ReentrantReadWriteLock;
/**
* Contains detailed info about a rule.
*/
public class RuleInfoRepo {
private RuleInfo ruleInfo = null;
private RuleExecutor executor = null;
private MetaStore metaStore = null;
private SmartConf conf = null;
private ReentrantReadWriteLock rwl = new ReentrantReadWriteLock();
public RuleInfoRepo(RuleInfo ruleInfo, MetaStore metaStore, SmartConf conf) {
this.ruleInfo = ruleInfo;
this.metaStore = metaStore;
this.conf = conf;
}
public RuleInfo getRuleInfo() {
lockRead();
RuleInfo ret = ruleInfo.newCopy();
unlockRead();
return ret;
}
public RuleInfo getRuleInfoRef() {
return ruleInfo;
}
public void disable() throws IOException {
lockWrite();
try {
changeRuleState(RuleState.DISABLED);
} finally {
unlockWrite();
}
}
public void delete() throws IOException {
lockWrite();
try {
changeRuleState(RuleState.DELETED);
} finally {
unlockWrite();
}
}
public RuleExecutor activate(RuleManager ruleManager)
throws IOException {
lockWrite();
try {
changeRuleState(RuleState.ACTIVE);
return doLaunchExecutor(ruleManager);
} finally {
unlockWrite();
}
}
public RuleExecutor launchExecutor(RuleManager ruleManager)
throws IOException {
lockWrite();
try {
return doLaunchExecutor(ruleManager);
} finally {
unlockWrite();
}
}
public boolean updateRuleInfo(RuleState rs, long lastCheckTime,
long checkedCount, int cmdletsGen) throws IOException {
lockWrite();
try {
boolean ret = true;
changeRuleState(rs, false);
ruleInfo.updateRuleInfo(rs, lastCheckTime, checkedCount, cmdletsGen);
if (metaStore != null) {
try {
ret = metaStore.updateRuleInfo(ruleInfo.getId(),
rs, lastCheckTime, ruleInfo.getNumChecked(), (int) ruleInfo.getNumCmdsGen());
} catch (MetaStoreException e) {
throw new IOException(ruleInfo.toString(), e);
}
}
return ret;
} finally {
unlockWrite();
}
}
private RuleExecutor doLaunchExecutor(RuleManager ruleManager)
throws IOException {
RuleState state = ruleInfo.getState();
if (state == RuleState.ACTIVE || state == RuleState.DRYRUN) {
if (executor != null && !executor.isExited()) {
return null;
}
ExecutionContext ctx = new ExecutionContext();
ctx.setRuleId(ruleInfo.getId());
TranslationContext transCtx = new TranslationContext(ruleInfo.getId(),
ruleInfo.getSubmitTime());
TranslateResult tr = executor != null ? executor.getTranslateResult() :
new SmartRuleStringParser(ruleInfo.getRuleText(), transCtx, conf).translate();
List<RuleExecutorPlugin> plugins = RuleExecutorPluginManager.getPlugins();
for (RuleExecutorPlugin plugin : plugins) {
plugin.onNewRuleExecutor(ruleInfo, tr);
}
executor = new RuleExecutor(
ruleManager, ctx, tr, ruleManager.getMetaStore());
return executor;
}
return null;
}
private void markWorkExit() {
if (executor != null) {
executor.setExited();
notifyRuleExecutorExit();
//System.out.println(executor + " -> disabled");
}
}
private void notifyRuleExecutorExit() {
List<RuleExecutorPlugin> plugins = RuleExecutorPluginManager.getPlugins();
for (RuleExecutorPlugin plugin : plugins) {
plugin.onRuleExecutorExit(ruleInfo);
}
}
private boolean changeRuleState(RuleState newState)
throws IOException {
return changeRuleState(newState, true);
}
private boolean changeRuleState(RuleState newState,
boolean updateDb) throws IOException {
RuleState oldState = ruleInfo.getState();
if (newState == null || oldState == newState) {
return false;
}
try {
switch (newState) {
case ACTIVE:
if (oldState == RuleState.DISABLED || oldState == RuleState.DRYRUN) {
ruleInfo.setState(newState);
if (updateDb && metaStore != null) {
metaStore.updateRuleState(ruleInfo.getId(), newState);
}
return true;
}
break;
case DISABLED:
if (oldState == RuleState.ACTIVE || oldState == RuleState.DRYRUN) {
ruleInfo.setState(newState);
markWorkExit();
if (updateDb && metaStore != null) {
metaStore.updateRuleState(ruleInfo.getId(), newState);
}
return true;
}
break;
case DELETED:
ruleInfo.setState(newState);
markWorkExit();
if (updateDb && metaStore != null) {
metaStore.updateRuleState(ruleInfo.getId(), newState);
}
return true;
case FINISHED:
if (oldState == RuleState.ACTIVE || oldState == RuleState.DRYRUN) {
ruleInfo.setState(newState);
if (updateDb && metaStore != null) {
metaStore.updateRuleState(ruleInfo.getId(), newState);
}
return true;
}
break;
}
} catch (MetaStoreException e) {
throw new IOException(ruleInfo.toString(), e);
}
throw new IOException("This rule state transition is not supported: "
+ oldState.name() + " -> " + newState.name()); // TODO: unsupported
}
private void lockWrite() {
rwl.writeLock().lock();
}
private void unlockWrite() {
rwl.writeLock().unlock();
}
private void lockRead() {
rwl.readLock().lock();
}
private void unlockRead() {
rwl.readLock().unlock();
}
}
| java | Apache-2.0 | e0c90f054687a18c4e095547ac5e31b8b313b3ef | 2026-01-05T02:41:11.405497Z | false |
Intel-bigdata/SSM | https://github.com/Intel-bigdata/SSM/blob/e0c90f054687a18c4e095547ac5e31b8b313b3ef/smart-engine/src/main/java/org/smartdata/server/engine/rule/RuleExecutor.java | smart-engine/src/main/java/org/smartdata/server/engine/rule/RuleExecutor.java | /**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.smartdata.server.engine.rule;
import com.google.common.annotations.VisibleForTesting;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.smartdata.exception.QueueFullException;
import org.smartdata.metastore.MetaStore;
import org.smartdata.metastore.MetaStoreException;
import org.smartdata.metastore.dao.AccessCountTable;
import org.smartdata.model.CmdletDescriptor;
import org.smartdata.model.RuleInfo;
import org.smartdata.model.RuleState;
import org.smartdata.model.rule.RuleExecutorPlugin;
import org.smartdata.model.rule.RuleExecutorPluginManager;
import org.smartdata.model.rule.TimeBasedScheduleInfo;
import org.smartdata.model.rule.TranslateResult;
import org.smartdata.server.engine.RuleManager;
import org.smartdata.server.engine.data.ExecutionContext;
import java.io.IOException;
import java.lang.reflect.Method;
import java.text.ParseException;
import java.util.ArrayList;
import java.util.List;
import java.util.Stack;
import java.util.regex.Matcher;
import java.util.regex.Pattern;
/** Execute rule queries and return result. */
public class RuleExecutor implements Runnable {
private RuleManager ruleManager;
private TranslateResult tr;
private ExecutionContext ctx;
private MetaStore adapter;
private volatile boolean exited = false;
private long exitTime;
private Stack<String> dynamicCleanups = new Stack<>();
private static final Logger LOG = LoggerFactory.getLogger(RuleExecutor.class.getName());
private static Pattern varPattern = Pattern.compile("\\$([a-zA-Z_]+[a-zA-Z0-9_]*)");
private static Pattern callPattern =
Pattern.compile("\\$@([a-zA-Z_]+[a-zA-Z0-9_]*)\\(([a-zA-Z_][a-zA-Z0-9_]*)?\\)");
public RuleExecutor(
RuleManager ruleManager, ExecutionContext ctx, TranslateResult tr, MetaStore adapter) {
this.ruleManager = ruleManager;
this.ctx = ctx;
this.tr = tr;
this.adapter = adapter;
}
public TranslateResult getTranslateResult() {
return tr;
}
private String unfoldSqlStatement(String sql) {
return unfoldVariables(unfoldFunctionCalls(sql));
}
private String unfoldVariables(String sql) {
String ret = sql;
ctx.setProperty("NOW", System.currentTimeMillis());
Matcher m = varPattern.matcher(sql);
while (m.find()) {
String rep = m.group();
String varName = m.group(1);
String value = ctx.getString(varName);
ret = ret.replace(rep, value);
}
return ret;
}
private String unfoldFunctionCalls(String sql) {
String ret = sql;
Matcher m = callPattern.matcher(sql);
while (m.find()) {
String rep = m.group();
String funcName = m.group(1);
String paraName = m.groupCount() == 2 ? m.group(2) : null;
List<Object> params = tr.getParameter(paraName);
String value = callFunction(funcName, params);
ret = ret.replace(rep, value == null ? "" : value);
}
return ret;
}
public List<String> executeFileRuleQuery() {
int index = 0;
List<String> ret = new ArrayList<>();
for (String sql : tr.getSqlStatements()) {
sql = unfoldSqlStatement(sql);
try {
if (LOG.isDebugEnabled()) {
LOG.debug("Rule " + ctx.getRuleId() + " --> " + sql);
}
if (index == tr.getRetSqlIndex()) {
ret = adapter.executeFilesPathQuery(sql);
} else {
sql = sql.trim();
if (sql.length() > 5) {
adapter.execute(sql);
}
}
index++;
} catch (MetaStoreException e) {
LOG.error("Rule " + ctx.getRuleId() + " exception", e);
return ret;
}
}
while (!dynamicCleanups.empty()) {
String sql = dynamicCleanups.pop();
try {
adapter.execute(sql);
} catch (MetaStoreException e) {
LOG.error("Rule " + ctx.getRuleId() + " exception", e);
}
}
return ret;
}
public String callFunction(String funcName, List<Object> parameters) {
try {
Method m = getClass().getMethod(funcName, List.class);
String ret = (String) (m.invoke(this, parameters));
return ret;
} catch (Exception e) {
LOG.error("Rule " + ctx.getRuleId() + " exception when call " + funcName, e);
return null;
}
}
public String genVirtualAccessCountTableTopValue(List<Object> parameters) {
genVirtualAccessCountTableValue(parameters, true);
return null;
}
public String genVirtualAccessCountTableBottomValue(List<Object> parameters) {
genVirtualAccessCountTableValue(parameters, false);
return null;
}
private void genVirtualAccessCountTableValue(List<Object> parameters, boolean top) {
List<Object> paraList = (List<Object>) parameters.get(0);
String table = (String) parameters.get(1);
String var = (String) parameters.get(2);
Long num = (Long) paraList.get(1);
String sql0 = String.format(
"SELECT %s(count) FROM ( SELECT * FROM %s ORDER BY count %sLIMIT %d ) AS %s_TMP;",
top ? "min" : "max", table, top ? "DESC " : "", num, table);
Long count = null;
try {
count = adapter.queryForLong(sql0);
} catch (MetaStoreException e) {
LOG.error("Get " + (top ? "top" : "bottom") + " access count from table '"
+ table + "' error.", e);
}
ctx.setProperty(var, count == null ? 0L : count);
}
public String genVirtualAccessCountTableTopValueOnStoragePolicy(List<Object> parameters) {
genVirtualAccessCountTableValueOnStoragePolicy(parameters, true);
return null;
}
public String genVirtualAccessCountTableBottomValueOnStoragePolicy(List<Object> parameters) {
genVirtualAccessCountTableValueOnStoragePolicy(parameters, false);
return null;
}
private void genVirtualAccessCountTableValueOnStoragePolicy(List<Object> parameters,
boolean top) {
List<Object> paraList = (List<Object>) parameters.get(0);
String table = (String) parameters.get(1);
String var = (String) parameters.get(2);
Long num = (Long) paraList.get(1);
String storage = ((String) paraList.get(2)).toUpperCase();
String sqlsub;
if (storage.equals("CACHE")) {
sqlsub = String.format("SELECT %s.fid, %s.count FROM %s LEFT JOIN cached_file ON "
+ "(%s.fid = cached_file.fid)", table, table, table, table);
} else {
Integer id = null;
try {
id = adapter.getStoragePolicyID(storage);
} catch (Exception e) {
// Ignore
}
if (id == null) {
id = -1; // safe return
}
sqlsub = String.format("SELECT %s.fid, %s.count FROM %s LEFT JOIN file ON "
+ "(%s.fid = file.fid) WHERE file.sid = %d",
table, table, table, table, id);
}
String sql0 = String.format(
"SELECT %s(count) FROM ( SELECT * FROM (%s) AS %s ORDER BY count %sLIMIT %d ) AS %s;",
top ? "min" : "max",
sqlsub,
table + "_AL1_TMP",
top ? "DESC " : "",
num,
table + "_AL2_TMP");
Long count = null;
try {
count = adapter.queryForLong(sql0);
} catch (MetaStoreException e) {
LOG.error(String.format("Get %s access count on storage [%s] from table '%s' error [%s].",
top ? "top" : "bottom", storage, table, sql0), e);
}
ctx.setProperty(var, count == null ? 0L : count);
}
public String genVirtualAccessCountTable(List<Object> parameters) {
List<Object> paraList = (List<Object>) parameters.get(0);
String newTable = (String) parameters.get(1);
Long interval = (Long) paraList.get(0);
String countFilter = "";
List<String> tableNames = getAccessCountTablesDuringLast(interval);
return generateSQL(tableNames, newTable, countFilter, adapter);
}
@VisibleForTesting
static String generateSQL(
List<String> tableNames, String newTable, String countFilter, MetaStore adapter) {
String sqlFinal, sqlCreate;
if (tableNames.size() <= 1) {
String tableName = tableNames.size() == 0 ? "blank_access_count_info" : tableNames.get(0);
sqlCreate = "CREATE TABLE " + newTable + "(fid INTEGER NOT NULL, count INTEGER NOT NULL);";
try {
adapter.execute(sqlCreate);
} catch (MetaStoreException e) {
LOG.error("Cannot create table " + newTable, e);
}
sqlFinal = "INSERT INTO " + newTable + " SELECT * FROM " + tableName + ";";
} else {
String sqlPrefix = "SELECT fid, SUM(count) AS count FROM (\n";
String sqlUnion = "SELECT fid, count FROM " + tableNames.get(0) + " \n";
for (int i = 1; i < tableNames.size(); i++) {
sqlUnion += "UNION ALL\n" + "SELECT fid, count FROM " + tableNames.get(i) + " \n";
}
String sqlSufix = ") as tmp GROUP BY fid ";
String sqlCountFilter =
(countFilter == null || countFilter.length() == 0)
? ""
: "HAVING SUM(count) " + countFilter;
String sqlRe = sqlPrefix + sqlUnion + sqlSufix + sqlCountFilter;
sqlCreate = "CREATE TABLE " + newTable + "(fid INTEGER NOT NULL, count INTEGER NOT NULL);";
try {
adapter.execute(sqlCreate);
} catch (MetaStoreException e) {
LOG.error("Cannot create table " + newTable, e);
}
sqlFinal = "INSERT INTO " + newTable + " SELECT * FROM (" + sqlRe + ") temp;";
}
return sqlFinal;
}
/**
* @param lastInterval
* @return
*/
private List<String> getAccessCountTablesDuringLast(long lastInterval) {
List<String> tableNames = new ArrayList<>();
if (ruleManager == null || ruleManager.getStatesManager() == null) {
return tableNames;
}
List<AccessCountTable> accTables = null;
try {
accTables = ruleManager.getStatesManager().getTablesInLast(lastInterval);
} catch (MetaStoreException e) {
LOG.error("Rule " + ctx.getRuleId() + " get access info tables exception", e);
}
if (LOG.isDebugEnabled()) {
LOG.debug("Rule " + ctx.getRuleId() + " got " + accTables.size() + " tables:");
int idx = 1;
for (AccessCountTable t : accTables) {
LOG.debug(
idx + ". " + (t.isEphemeral() ? " [TABLE] " : " ") + t.getTableName() + " ");
}
}
if (accTables == null || accTables.size() == 0) {
return tableNames;
}
for (AccessCountTable t : accTables) {
tableNames.add(t.getTableName());
if (t.isEphemeral()) {
dynamicCleanups.push("DROP TABLE IF EXISTS " + t.getTableName() + ";");
}
}
return tableNames;
}
@Override
public void run() {
long startCheckTime = System.currentTimeMillis();
if (exited) {
exitSchedule();
}
if (!tr.getTbScheduleInfo().isExecutable(startCheckTime)) {
return;
}
List<RuleExecutorPlugin> plugins = RuleExecutorPluginManager.getPlugins();
long rid = ctx.getRuleId();
try {
if (ruleManager.isClosed()) {
exitSchedule();
}
long endCheckTime;
int numCmdSubmitted = 0;
List<String> files = new ArrayList<>();
RuleInfo info = ruleManager.getRuleInfo(rid);
boolean doExec = true;
for (RuleExecutorPlugin plugin : plugins) {
doExec &= plugin.preExecution(info, tr);
if (!doExec) {
break;
}
}
RuleState state = info.getState();
if (exited
|| state == RuleState.DELETED
|| state == RuleState.FINISHED
|| state == RuleState.DISABLED) {
exitSchedule();
}
TimeBasedScheduleInfo scheduleInfo = tr.getTbScheduleInfo();
if (!scheduleInfo.isOnce() && scheduleInfo.getEndTime() != TimeBasedScheduleInfo.FOR_EVER) {
boolean befExit = false;
if (scheduleInfo.isOneShot()) {
// The subScheduleTime is set in triggering time.
if (scheduleInfo.getSubScheduleTime() > scheduleInfo.getEndTime()) {
befExit = true;
}
} else if (startCheckTime - scheduleInfo.getEndTime() > 0) {
befExit = true;
}
if (befExit) {
LOG.info("Rule " + ctx.getRuleId() + " exit rule executor due to time passed");
ruleManager.updateRuleInfo(rid, RuleState.FINISHED, startCheckTime, 0, 0);
exitSchedule();
}
}
if (doExec) {
files = executeFileRuleQuery();
if (exited) {
exitSchedule();
}
}
endCheckTime = System.currentTimeMillis();
if (doExec) {
for (RuleExecutorPlugin plugin : plugins) {
files = plugin.preSubmitCmdlet(info, files);
}
numCmdSubmitted = submitCmdlets(info, files);
}
ruleManager.updateRuleInfo(rid, null, startCheckTime, 1, numCmdSubmitted);
long endProcessTime = System.currentTimeMillis();
if (endProcessTime - startCheckTime > 2000 || LOG.isDebugEnabled()) {
LOG.warn(
"Rule "
+ ctx.getRuleId()
+ " execution took "
+ (endProcessTime - startCheckTime)
+ "ms. QueryTime = "
+ (endCheckTime - startCheckTime)
+ "ms, SubmitTime = "
+ (endProcessTime - endCheckTime)
+ "ms, fileNum = "
+ numCmdSubmitted
+ ".");
}
if (scheduleInfo.isOneShot()) {
ruleManager.updateRuleInfo(rid, RuleState.FINISHED, startCheckTime, 0, 0);
exitSchedule();
}
if (endProcessTime + scheduleInfo.getBaseEvery() > scheduleInfo.getEndTime()) {
LOG.info("Rule " + ctx.getRuleId() + " exit rule executor due to finished");
ruleManager.updateRuleInfo(rid, RuleState.FINISHED, startCheckTime, 0, 0);
exitSchedule();
}
if (exited) {
exitSchedule();
}
} catch (IOException e) {
LOG.error("Rule " + ctx.getRuleId() + " exception", e);
}
}
private void exitSchedule() {
// throw an exception
exitTime = System.currentTimeMillis();
exited = true;
if (LOG.isDebugEnabled()) {
LOG.debug("Rule " + ctx.getRuleId() + " exit rule executor.");
}
String[] temp = new String[1];
temp[1] += "The exception is created deliberately";
}
private int submitCmdlets(RuleInfo ruleInfo, List<String> files) {
long ruleId = ruleInfo.getId();
if (files == null || files.size() == 0 || ruleManager.getCmdletManager() == null) {
return 0;
}
int nSubmitted = 0;
List<RuleExecutorPlugin> plugins = RuleExecutorPluginManager.getPlugins();
String template = tr.getCmdDescriptor().toCmdletString();
for (String file : files) {
if (!exited) {
try {
CmdletDescriptor cmd = new CmdletDescriptor(template, ruleId);
cmd.setCmdletParameter(CmdletDescriptor.HDFS_FILE_PATH, file);
for (RuleExecutorPlugin plugin : plugins) {
cmd = plugin.preSubmitCmdletDescriptor(ruleInfo, tr, cmd);
}
long cid = ruleManager.getCmdletManager().submitCmdlet(cmd);
// Not really submitted if cid is -1.
if (cid != -1) {
nSubmitted++;
}
} catch (QueueFullException e) {
break;
} catch (IOException e) {
// it's common here, ignore this and continue submit
LOG.debug("Failed to submit cmdlet for file: " + file, e);
} catch (ParseException e) {
LOG.error("Failed to submit cmdlet for file: " + file, e);
}
} else {
break;
}
}
return nSubmitted;
}
public boolean isExited() {
return exited;
}
public void setExited() {
exitTime = System.currentTimeMillis();
exited = true;
}
public long getExitTime() {
return exitTime;
}
}
| java | Apache-2.0 | e0c90f054687a18c4e095547ac5e31b8b313b3ef | 2026-01-05T02:41:11.405497Z | false |
Intel-bigdata/SSM | https://github.com/Intel-bigdata/SSM/blob/e0c90f054687a18c4e095547ac5e31b8b313b3ef/smart-engine/src/main/java/org/smartdata/server/engine/rule/ExecutorScheduler.java | smart-engine/src/main/java/org/smartdata/server/engine/rule/ExecutorScheduler.java | /**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.smartdata.server.engine.rule;
import org.smartdata.model.rule.TimeBasedScheduleInfo;
import org.smartdata.rule.ScheduleInfo;
import java.util.concurrent.Executors;
import java.util.concurrent.ScheduledExecutorService;
import java.util.concurrent.TimeUnit;
/**
* Schedule the execution.
*/
public class ExecutorScheduler {
private ScheduledExecutorService service;
public ExecutorScheduler(int numThreads) {
service = Executors.newScheduledThreadPool(numThreads);
}
public void addPeriodicityTask(RuleExecutor re) {
TimeBasedScheduleInfo si = re.getTranslateResult().getTbScheduleInfo();
long now = System.currentTimeMillis();
si.setSubScheduleTime(now);
if (si.getStartTime() == -1L && si.getEndTime() == -1L) {
si.setStartTime(now);
si.setEndTime(now);
}
// The start time should not earlier than subSchedule time.
long startDelay = si.getStartTime() - now;
if (startDelay < 0) {
startDelay = 0;
}
if (si.getFirstCheckTime() == 0) {
si.setFirstCheckTime(now + startDelay);
}
long every = si.getMinimalEvery();
if (every <= 0) {
every = 5000;
}
service.scheduleAtFixedRate(re, startDelay, every, TimeUnit.MILLISECONDS);
}
public void addPeriodicityTask(ScheduleInfo schInfo, Runnable work) {
long now = System.currentTimeMillis();
service.scheduleAtFixedRate(work, schInfo.getStartTime() - now,
schInfo.getRate(), TimeUnit.MILLISECONDS);
}
// TODO: to be defined
public void addEventTask() {
}
public void shutdown() {
try {
service.shutdown();
if (!service.awaitTermination(3000, TimeUnit.MILLISECONDS)) {
service.shutdownNow();
}
} catch (InterruptedException e) {
service.shutdownNow();
}
}
/**
* This will be used for extension: a full event based scheduler.
*/
private class EventGenTask implements Runnable {
private long id;
private ScheduleInfo scheduleInfo;
private int triggered;
public EventGenTask(long id) {
this.id = id;
}
@Override
public void run() {
triggered++;
if (triggered <= scheduleInfo.getRounds()) {
} else {
exitSchduler();
}
}
private void exitSchduler() {
String[] temp = new String[1];
temp[1] += "The exception is created deliberately";
}
}
}
| java | Apache-2.0 | e0c90f054687a18c4e095547ac5e31b8b313b3ef | 2026-01-05T02:41:11.405497Z | false |
Intel-bigdata/SSM | https://github.com/Intel-bigdata/SSM/blob/e0c90f054687a18c4e095547ac5e31b8b313b3ef/smart-engine/src/main/java/org/smartdata/server/engine/rule/FileCopy2S3Plugin.java | smart-engine/src/main/java/org/smartdata/server/engine/rule/FileCopy2S3Plugin.java | /**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.smartdata.server.engine.rule;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.smartdata.hdfs.action.Copy2S3Action;
import org.smartdata.model.CmdletDescriptor;
import org.smartdata.model.RuleInfo;
import org.smartdata.model.rule.RuleExecutorPlugin;
import org.smartdata.model.rule.TranslateResult;
import org.smartdata.utils.StringUtil;
import java.util.ArrayList;
import java.util.Collections;
import java.util.List;
public class FileCopy2S3Plugin implements RuleExecutorPlugin {
private static final Logger LOG =
LoggerFactory.getLogger(FileCopy2S3Plugin.class.getName());
private List<String> srcBases;
public FileCopy2S3Plugin() {
srcBases = null;
}
@Override
public void onNewRuleExecutor(RuleInfo ruleInfo,
TranslateResult tResult) {
srcBases = new ArrayList<>();
List<String> pathsCheckGlob = tResult.getGlobPathCheck();
if (pathsCheckGlob.size() == 0) {
pathsCheckGlob = Collections.singletonList("/*");
}
// Get src base list
srcBases = getPathMatchesList(pathsCheckGlob);
LOG.debug("Source base list = {}", srcBases);
}
private List<String> getPathMatchesList(List<String> paths) {
List<String> ret = new ArrayList<>();
for (String p : paths) {
String dir = StringUtil.getBaseDir(p);
if (dir == null) {
continue;
}
ret.add(dir);
}
return ret;
}
@Override
public boolean preExecution(RuleInfo ruleInfo,
TranslateResult tResult) {
return true;
}
@Override
public List<String> preSubmitCmdlet(RuleInfo ruleInfo,
List<String> objects) {
return objects;
}
@Override
public CmdletDescriptor preSubmitCmdletDescriptor(RuleInfo ruleInfo,
TranslateResult tResult, CmdletDescriptor descriptor) {
for (int i = 0; i < descriptor.getActionSize(); i++) {
// O(n)
if (descriptor.getActionName(i).equals("copy2s3")) {
String srcPath = descriptor.getActionArgs(i).get(Copy2S3Action.SRC);
String destBase = descriptor.getActionArgs(i).get(Copy2S3Action.DEST);
String workPath = null;
// O(n)
for (String srcBase : srcBases) {
if (srcPath.startsWith(srcBase)) {
workPath = srcPath.replaceFirst(srcBase, "");
break;
}
}
if (workPath == null) {
LOG.error("Rule {} CmdletDescriptor {} Working Path is empty!", ruleInfo, descriptor);
}
// Update dest path
// dest base + work path = dest full path
descriptor.addActionArg(i, Copy2S3Action.DEST, destBase + workPath);
}
}
return descriptor;
}
@Override
public void onRuleExecutorExit(RuleInfo ruleInfo) {
srcBases = null;
}
}
| java | Apache-2.0 | e0c90f054687a18c4e095547ac5e31b8b313b3ef | 2026-01-05T02:41:11.405497Z | false |
Intel-bigdata/SSM | https://github.com/Intel-bigdata/SSM/blob/e0c90f054687a18c4e095547ac5e31b8b313b3ef/smart-engine/src/main/java/org/smartdata/server/engine/rule/SmallFilePlugin.java | smart-engine/src/main/java/org/smartdata/server/engine/rule/SmallFilePlugin.java | /**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.smartdata.server.engine.rule;
import com.google.gson.Gson;
import com.google.gson.reflect.TypeToken;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.smartdata.SmartFilePermission;
import org.smartdata.conf.SmartConfKeys;
import org.smartdata.hdfs.action.HdfsAction;
import org.smartdata.hdfs.action.SmallFileCompactAction;
import org.smartdata.metastore.MetaStore;
import org.smartdata.metastore.MetaStoreException;
import org.smartdata.model.ActionInfo;
import org.smartdata.model.CmdletDescriptor;
import org.smartdata.model.CmdletInfo;
import org.smartdata.model.CmdletState;
import org.smartdata.model.FileInfo;
import org.smartdata.model.FileState;
import org.smartdata.model.RuleInfo;
import org.smartdata.model.rule.RuleExecutorPlugin;
import org.smartdata.model.rule.TranslateResult;
import org.smartdata.server.engine.CmdletManager;
import org.smartdata.server.engine.ServerContext;
import java.io.IOException;
import java.util.ArrayList;
import java.util.Collections;
import java.util.Comparator;
import java.util.HashMap;
import java.util.Iterator;
import java.util.LinkedHashMap;
import java.util.List;
import java.util.Map;
import java.util.UUID;
import java.util.concurrent.ConcurrentHashMap;
public class SmallFilePlugin implements RuleExecutorPlugin {
private int batchSize;
private MetaStore metaStore;
private CmdletManager cmdletManager;
private long containerFileSizeThreshold;
private Map<String, FileInfo> firstFileInfoCache;
private Map<RuleInfo, Map<String, FileInfo>> containerFileInfoCache;
private static final String COMPACT_ACTION_NAME = "compact";
private static final String CONTAINER_FILE_PREFIX = "_container_file_";
private static final Logger LOG = LoggerFactory.getLogger(SmallFilePlugin.class);
public SmallFilePlugin(ServerContext context, CmdletManager cmdletManager) {
this.metaStore = context.getMetaStore();
this.batchSize = context.getConf().getInt(
SmartConfKeys.SMART_COMPACT_BATCH_SIZE_KEY,
SmartConfKeys.SMART_COMPACT_BATCH_SIZE_DEFAULT);
this.cmdletManager = cmdletManager;
long containerFileThresholdMB = context.getConf().getLong(
SmartConfKeys.SMART_COMPACT_CONTAINER_FILE_THRESHOLD_MB_KEY,
SmartConfKeys.SMART_COMPACT_CONTAINER_FILE_THRESHOLD_MB_DEFAULT);
this.containerFileSizeThreshold = containerFileThresholdMB * 1024 * 1024;
this.firstFileInfoCache = new ConcurrentHashMap<>();
this.containerFileInfoCache = new ConcurrentHashMap<>();
}
@Override
public void onNewRuleExecutor(final RuleInfo ruleInfo, TranslateResult tResult) {
}
@Override
public boolean preExecution(final RuleInfo ruleInfo, TranslateResult tResult) {
return true;
}
@Override
public List<String> preSubmitCmdlet(final RuleInfo ruleInfo, List<String> objects) {
if (ruleInfo.getRuleText().contains(COMPACT_ACTION_NAME)) {
if (objects == null || objects.isEmpty()) {
LOG.debug("Objects is null or empty.");
return objects;
}
// Split valid small files according to the file permission
Map<String, FileInfo> containerFileInfoMap = getContainerFileInfos();
Map<SmallFileStatus, List<String>> smallFileStateMap = new HashMap<>();
for (String object : objects) {
LOG.debug("Start handling the file: {}.", object);
// Check if the file is container file
if (!object.endsWith("/")) {
String fileName = object.substring(
object.lastIndexOf("/") + 1, object.length());
if (fileName.startsWith(CONTAINER_FILE_PREFIX)
|| containerFileInfoMap.containsKey(object)) {
LOG.debug("{} is container file.", object);
continue;
}
}
// Check file info and state
try {
FileInfo fileInfo = metaStore.getFile(object);
FileState fileState = metaStore.getFileState(object);
if (fileInfo != null
&& fileInfo.getLength() > 0
&& fileInfo.getLength() < containerFileSizeThreshold
&& fileState.getFileType().equals(FileState.FileType.NORMAL)
&& fileState.getFileStage().equals(FileState.FileStage.DONE)) {
SmallFileStatus smallFileStatus = new SmallFileStatus(fileInfo);
if (smallFileStateMap.containsKey(smallFileStatus)) {
smallFileStateMap.get(smallFileStatus).add(object);
} else {
firstFileInfoCache.put(object, fileInfo);
List<String> list = new ArrayList<>();
list.add(object);
smallFileStateMap.put(smallFileStatus, list);
}
} else {
LOG.debug("Invalid file {} for small file compact.", object);
}
} catch (MetaStoreException e) {
LOG.error(String.format("Failed to get file info of %s.", object), e);
}
}
// Split small files according to the batch size
List<String> smallFileList = new ArrayList<>();
for (List<String> listElement : smallFileStateMap.values()) {
int size = listElement.size();
for (int i = 0; i < size; i += batchSize) {
int toIndex = (i + batchSize <= size) ? i + batchSize : size;
String smallFiles = new Gson().toJson(listElement.subList(i, toIndex));
smallFileList.add(smallFiles);
}
}
// Update container file info cache for preSubmitCmdletDescriptor
updateContainerFileInfoCache(ruleInfo, containerFileInfoMap);
return smallFileList;
} else {
return objects;
}
}
/**
* Get container file info map from meta store.
*/
private Map<String, FileInfo> getContainerFileInfos() {
Map<String, FileInfo> ret = new LinkedHashMap<>();
try {
List<String> containerFiles = metaStore.getAllContainerFiles();
if (!containerFiles.isEmpty()) {
List<FileInfo> fileInfos = metaStore.getFilesByPaths(containerFiles);
// Sort file infos based on the file length
Collections.sort(fileInfos, new Comparator<FileInfo>(){
@Override
public int compare(FileInfo a, FileInfo b) {
return Long.compare(a.getLength(), b.getLength());
}
});
for (FileInfo fileInfo : fileInfos) {
ret.put(fileInfo.getPath(), fileInfo);
}
}
} catch (MetaStoreException e) {
LOG.error("Failed to get file info of all the container files.", e);
}
return ret;
}
/**
* Update container file info cache based on containerFileSizeThreshold
* and cmdlet.
*/
private void updateContainerFileInfoCache(RuleInfo ruleInfo,
Map<String, FileInfo> containerFileInfoMap) {
if (!containerFileInfoMap.isEmpty()) {
// Remove container file whose size is greater than containerFileSizeThreshold
for (Map.Entry<String, FileInfo> entry : containerFileInfoMap.entrySet()) {
if (entry.getValue().getLength() >= containerFileSizeThreshold) {
containerFileInfoMap.remove(entry.getKey());
}
}
// Remove container file which is being used
try {
List<Long> aids = new ArrayList<>();
List<CmdletInfo> list = cmdletManager.listCmdletsInfo(ruleInfo.getId());
for (CmdletInfo cmdletInfo : list) {
if (!CmdletState.isTerminalState(cmdletInfo.getState())) {
aids.addAll(cmdletInfo.getAids());
}
}
List<ActionInfo> actionInfos = cmdletManager.getActions(aids);
for (ActionInfo actionInfo : actionInfos) {
Map<String, String> args = actionInfo.getArgs();
if (args.containsKey(SmallFileCompactAction.CONTAINER_FILE)) {
containerFileInfoMap.remove(
args.get(SmallFileCompactAction.CONTAINER_FILE));
}
}
} catch (IOException e) {
LOG.error("Failed to get cmdlet and action info.", e);
}
}
containerFileInfoCache.put(ruleInfo, containerFileInfoMap);
}
@Override
public CmdletDescriptor preSubmitCmdletDescriptor(
final RuleInfo ruleInfo, TranslateResult tResult, CmdletDescriptor descriptor) {
for (int i = 0; i < descriptor.getActionSize(); i++) {
if (COMPACT_ACTION_NAME.equals(descriptor.getActionName(i))) {
String smallFiles = descriptor.getActionArgs(i).get(HdfsAction.FILE_PATH);
if (smallFiles != null && !smallFiles.isEmpty()) {
// Check if small file list is empty
ArrayList<String> smallFileList = new Gson().fromJson(
smallFiles, new TypeToken<ArrayList<String>>() {
}.getType());
if (smallFileList == null || smallFileList.isEmpty()) {
continue;
}
// Get the first small file info
String firstFile = smallFileList.get(0);
FileInfo firstFileInfo;
if (firstFileInfoCache.containsKey(firstFile)) {
firstFileInfo = firstFileInfoCache.get(firstFile);
} else {
try {
firstFileInfo = metaStore.getFile(firstFile);
if (firstFileInfo == null) {
LOG.debug("{} is not exist!!!", firstFile);
continue;
}
} catch (MetaStoreException e) {
LOG.error(String.format("Failed to get file info of: %s.", firstFile), e);
continue;
}
}
// Get valid compact action arguments
SmartFilePermission firstFilePermission = new SmartFilePermission(
firstFileInfo);
String firstFileDir = firstFile.substring(0, firstFile.lastIndexOf("/") + 1);
CompactActionArgs args = getCompactActionArgs(ruleInfo, firstFileDir,
firstFilePermission, smallFileList);
// Set container file path and its permission, file path of this action
descriptor.addActionArg(
i, SmallFileCompactAction.CONTAINER_FILE, args.containerFile);
descriptor.addActionArg(
i, SmallFileCompactAction.CONTAINER_FILE_PERMISSION,
new Gson().toJson(args.containerFilePermission));
descriptor.addActionArg(
i, HdfsAction.FILE_PATH, new Gson().toJson(args.smartFiles));
}
}
}
return descriptor;
}
/**
* Construct compact action arguments.
*/
private class CompactActionArgs {
private String containerFile;
private SmartFilePermission containerFilePermission;
private List<String> smartFiles;
private CompactActionArgs(String containerFile,
SmartFilePermission containerFilePermission, List<String> smartFiles) {
this.containerFile = containerFile;
this.containerFilePermission = containerFilePermission;
this.smartFiles = smartFiles;
}
}
/**
* Get valid compact action arguments.
*/
private CompactActionArgs getCompactActionArgs(RuleInfo ruleInfo, String firstFileDir,
SmartFilePermission firstFilePermission, List<String> smallFileList) {
Map<String, FileInfo> containerFileMap = containerFileInfoCache.get(ruleInfo);
for (Iterator<Map.Entry<String, FileInfo>> iter =
containerFileMap.entrySet().iterator(); iter.hasNext();) {
Map.Entry<String, FileInfo> entry = iter.next();
String containerFilePath = entry.getKey();
FileInfo containerFileInfo = entry.getValue();
iter.remove();
// Get compact action arguments
String containerFileDir = containerFilePath.substring(
0, containerFilePath.lastIndexOf("/") + 1);
if (firstFileDir.equals(containerFileDir)
&& firstFilePermission.equals(
new SmartFilePermission(containerFileInfo))) {
List<String> validSmallFiles;
try {
validSmallFiles = getValidSmallFiles(containerFileInfo, smallFileList);
} catch (MetaStoreException e) {
LOG.error("Failed to get file info of small files.", e);
continue;
}
if (validSmallFiles != null) {
return new CompactActionArgs(containerFilePath, null, validSmallFiles);
}
}
}
return genCompactActionArgs(firstFileDir, firstFilePermission, smallFileList);
}
/**
* Generate new compact action arguments based on first file info.
*/
private CompactActionArgs genCompactActionArgs(String firstFileDir,
SmartFilePermission firstFilePermission, List<String> smallFileList) {
// Generate new container file
String containerFilePath = firstFileDir + CONTAINER_FILE_PREFIX
+ UUID.randomUUID().toString().replace("-", "");
return new CompactActionArgs(containerFilePath,
firstFilePermission, smallFileList);
}
/**
* Get valid small files according to container file.
*/
private List<String> getValidSmallFiles(FileInfo containerFileInfo,
List<String> smallFileList) throws MetaStoreException {
// Get container file len
long containerFileLen = containerFileInfo.getLength();
// Sort small file list for getting most eligible small files
List<String> ret = new ArrayList<>();
List<FileInfo> smallFileInfos = metaStore.getFilesByPaths(smallFileList);
Collections.sort(smallFileInfos, new Comparator<FileInfo>(){
@Override
public int compare(FileInfo a, FileInfo b) {
return Long.compare(a.getLength(), b.getLength());
}
});
// Get small files can be compacted to container file
for (FileInfo fileInfo : smallFileInfos) {
long fileLen = fileInfo.getLength();
if (fileLen > 0) {
containerFileLen += fileLen;
if (containerFileLen < containerFileSizeThreshold * 1.2) {
ret.add(fileInfo.getPath());
}
if (containerFileLen >= containerFileSizeThreshold) {
break;
}
}
}
if (!ret.isEmpty()) {
return ret;
} else {
return null;
}
}
/**
* Handle small file status.
*/
private class SmallFileStatus {
private String dir;
private SmartFilePermission smartFilePermission;
private SmallFileStatus(FileInfo fileInfo) {
String path = fileInfo.getPath();
this.dir = path.substring(0, path.lastIndexOf("/") + 1);
this.smartFilePermission = new SmartFilePermission(fileInfo);
}
@Override
public int hashCode() {
return dir.hashCode() ^ smartFilePermission.hashCode();
}
@Override
public boolean equals(Object smallFileStatus) {
if (this == smallFileStatus) {
return true;
}
if (smallFileStatus instanceof SmallFileStatus) {
SmallFileStatus anSmallFileStatus = (SmallFileStatus) smallFileStatus;
return (this.dir.equals(anSmallFileStatus.dir))
&& this.smartFilePermission.equals(anSmallFileStatus.smartFilePermission);
}
return false;
}
}
@Override
public void onRuleExecutorExit(final RuleInfo ruleInfo) {
}
}
| java | Apache-2.0 | e0c90f054687a18c4e095547ac5e31b8b313b3ef | 2026-01-05T02:41:11.405497Z | false |
Intel-bigdata/SSM | https://github.com/Intel-bigdata/SSM/blob/e0c90f054687a18c4e095547ac5e31b8b313b3ef/smart-engine/src/main/java/org/smartdata/server/engine/cmdlet/CmdletDispatcherHelper.java | smart-engine/src/main/java/org/smartdata/server/engine/cmdlet/CmdletDispatcherHelper.java | /**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.smartdata.server.engine.cmdlet;
import com.google.common.eventbus.Subscribe;
import org.smartdata.server.engine.EngineEventBus;
import org.smartdata.server.engine.message.AddNodeMessage;
import org.smartdata.server.engine.message.NodeMessage;
import org.smartdata.server.engine.message.RemoveNodeMessage;
import java.util.LinkedList;
import java.util.List;
public class CmdletDispatcherHelper {
private static CmdletDispatcherHelper inst;
private List<NodeMessage> msgs = new LinkedList<>();
private List<Boolean> opers = new LinkedList<>();
private CmdletDispatcher dispatcher = null;
public void register(CmdletDispatcher dispatcher) {
synchronized (msgs) {
this.dispatcher = dispatcher;
for (int i = 0; i < msgs.size(); i++) {
dispatcher.onNodeMessage(msgs.get(i), opers.get(i));
}
msgs.clear();
opers.clear();
}
}
public void unregister() {
synchronized (msgs) {
dispatcher = null;
}
}
public static void init() {
inst = new CmdletDispatcherHelper();
EngineEventBus.register(inst);
}
/**
* The instance will be registered by EngineEventBus.
* Node add/remove event will be posted by SmartServer,
* standby server and agent master.
*/
public static CmdletDispatcherHelper getInst() {
return inst;
}
@Subscribe
public void onAddNodeMessage(AddNodeMessage msg) {
onNodeMessage(msg, true);
}
@Subscribe
public void onRemoveNodeMessage(RemoveNodeMessage msg) {
onNodeMessage(msg, false);
}
private void onNodeMessage(NodeMessage msg, boolean add) {
synchronized (msgs) {
if (dispatcher == null) {
// Dispatcher is not registered, but we need to keep message
// in msgs and ask dispatcher to tackle in #register later.
msgs.add(msg);
opers.add(add);
} else {
dispatcher.onNodeMessage(msg, add);
}
}
}
}
| java | Apache-2.0 | e0c90f054687a18c4e095547ac5e31b8b313b3ef | 2026-01-05T02:41:11.405497Z | false |
Intel-bigdata/SSM | https://github.com/Intel-bigdata/SSM/blob/e0c90f054687a18c4e095547ac5e31b8b313b3ef/smart-engine/src/main/java/org/smartdata/server/engine/cmdlet/StatusReportTask.java | smart-engine/src/main/java/org/smartdata/server/engine/cmdlet/StatusReportTask.java | /**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.smartdata.server.engine.cmdlet;
import org.smartdata.conf.SmartConf;
import org.smartdata.conf.SmartConfKeys;
import org.smartdata.protocol.message.ActionStatus;
import org.smartdata.protocol.message.StatusReport;
import org.smartdata.protocol.message.StatusReporter;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
public class StatusReportTask implements Runnable {
private StatusReporter statusReporter;
private CmdletExecutor cmdletExecutor;
private long lastReportTime;
private int interval;
public double ratio;
private Map<Long, ActionStatus> idToActionStatus;
public StatusReportTask(
StatusReporter statusReporter, CmdletExecutor cmdletExecutor, SmartConf conf) {
this.statusReporter = statusReporter;
this.cmdletExecutor = cmdletExecutor;
this.lastReportTime = System.currentTimeMillis();
int period = conf.getInt(SmartConfKeys.SMART_STATUS_REPORT_PERIOD_KEY,
SmartConfKeys.SMART_STATUS_REPORT_PERIOD_DEFAULT);
int multiplier = conf.getInt(SmartConfKeys.SMART_STATUS_REPORT_PERIOD_MULTIPLIER_KEY,
SmartConfKeys.SMART_STATUS_REPORT_PERIOD_MULTIPLIER_DEFAULT);
this.interval = period * multiplier;
this.ratio = conf.getDouble(SmartConfKeys.SMART_STATUS_REPORT_RATIO_KEY,
SmartConfKeys.SMART_STATUS_REPORT_RATIO_DEFAULT);
this.idToActionStatus = new HashMap<>();
}
@Override
public void run() {
StatusReport statusReport = cmdletExecutor.getStatusReport();
if (statusReport != null) {
List<ActionStatus> actionStatuses = statusReport.getActionStatuses();
for (ActionStatus actionStatus : actionStatuses) {
idToActionStatus.put(actionStatus.getActionId(), actionStatus);
}
if (!idToActionStatus.values().isEmpty()) {
int finishedNum = 0;
for (ActionStatus actionStatus : idToActionStatus.values()) {
if (actionStatus.isFinished()) {
finishedNum++;
}
}
long currentTime = System.currentTimeMillis();
if (currentTime - lastReportTime >= interval
|| (double) finishedNum / idToActionStatus.size() >= ratio) {
statusReporter.report(new StatusReport(new ArrayList(idToActionStatus.values())));
idToActionStatus.clear();
lastReportTime = currentTime;
}
}
}
}
}
| java | Apache-2.0 | e0c90f054687a18c4e095547ac5e31b8b313b3ef | 2026-01-05T02:41:11.405497Z | false |
Intel-bigdata/SSM | https://github.com/Intel-bigdata/SSM/blob/e0c90f054687a18c4e095547ac5e31b8b313b3ef/smart-engine/src/main/java/org/smartdata/server/engine/cmdlet/CmdletExecutor.java | smart-engine/src/main/java/org/smartdata/server/engine/cmdlet/CmdletExecutor.java | /**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.smartdata.server.engine.cmdlet;
import com.google.common.util.concurrent.FutureCallback;
import com.google.common.util.concurrent.Futures;
import com.google.common.util.concurrent.ListenableFuture;
import com.google.common.util.concurrent.ListeningExecutorService;
import com.google.common.util.concurrent.MoreExecutors;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.smartdata.conf.SmartConf;
import org.smartdata.conf.SmartConfKeys;
import org.smartdata.model.CmdletState;
import org.smartdata.protocol.message.ActionStatus;
import org.smartdata.protocol.message.StatusReport;
import java.io.UnsupportedEncodingException;
import java.util.ArrayList;
import java.util.Iterator;
import java.util.List;
import java.util.Map;
import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.Executors;
import java.util.concurrent.Future;
//Todo: 1. make this a interface so that we could have different executor implementation
// 2. add api providing available resource
public class CmdletExecutor {
static final Logger LOG = LoggerFactory.getLogger(CmdletExecutor.class);
private final SmartConf smartConf;
private Map<Long, Future> listenableFutures;
private Map<Long, Cmdlet> runningCmdlets;
private Map<Long, Cmdlet> idToReportCmdlet;
private ListeningExecutorService executorService;
public CmdletExecutor(SmartConf smartConf) {
this.smartConf = smartConf;
this.listenableFutures = new ConcurrentHashMap<>();
this.runningCmdlets = new ConcurrentHashMap<>();
this.idToReportCmdlet = new ConcurrentHashMap<>();
int nThreads =
smartConf.getInt(
SmartConfKeys.SMART_CMDLET_EXECUTORS_KEY,
SmartConfKeys.SMART_CMDLET_EXECUTORS_DEFAULT);
this.executorService = MoreExecutors.listeningDecorator(Executors.newFixedThreadPool(nThreads));
}
public void execute(Cmdlet cmdlet) {
ListenableFuture<?> future = this.executorService.submit(cmdlet);
Futures.addCallback(future, new CmdletCallBack(cmdlet), executorService);
this.listenableFutures.put(cmdlet.getId(), future);
this.runningCmdlets.put(cmdlet.getId(), cmdlet);
idToReportCmdlet.put(cmdlet.getId(), cmdlet);
}
public void stop(Long cmdletId) {
if (this.listenableFutures.containsKey(cmdletId)) {
runningCmdlets.get(cmdletId).setState(CmdletState.FAILED);
this.listenableFutures.get(cmdletId).cancel(true);
}
removeCmdlet(cmdletId);
}
public void shutdown() {
this.executorService.shutdown();
}
public StatusReport getStatusReport() {
if (idToReportCmdlet.isEmpty()) {
return null;
}
List<ActionStatus> actionStatusList = new ArrayList<>();
Iterator<Cmdlet> iter = idToReportCmdlet.values().iterator();
while (iter.hasNext()) {
Cmdlet cmdlet = iter.next();
try {
List<ActionStatus> statuses = cmdlet.getActionStatuses();
if (statuses != null) {
actionStatusList.addAll(statuses);
} else {
iter.remove();
}
} catch (UnsupportedEncodingException e) {
LOG.error("Get actionStatus for cmdlet [id={}] error", cmdlet.getId(), e);
}
}
return new StatusReport(actionStatusList);
}
private void removeCmdlet(long cmdletId) {
this.runningCmdlets.remove(cmdletId);
this.listenableFutures.remove(cmdletId);
}
private class CmdletCallBack implements FutureCallback<Object> {
private final Cmdlet cmdlet;
public CmdletCallBack(Cmdlet cmdlet) {
this.cmdlet = cmdlet;
}
@Override
public void onSuccess(Object result) {
removeCmdlet(cmdlet.getId());
}
@Override
public void onFailure(Throwable t) {
removeCmdlet(cmdlet.getId());
}
}
}
| java | Apache-2.0 | e0c90f054687a18c4e095547ac5e31b8b313b3ef | 2026-01-05T02:41:11.405497Z | false |
Intel-bigdata/SSM | https://github.com/Intel-bigdata/SSM/blob/e0c90f054687a18c4e095547ac5e31b8b313b3ef/smart-engine/src/main/java/org/smartdata/server/engine/cmdlet/Cmdlet.java | smart-engine/src/main/java/org/smartdata/server/engine/cmdlet/Cmdlet.java | /**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.smartdata.server.engine.cmdlet;
import com.google.common.collect.Lists;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.smartdata.action.SmartAction;
import org.smartdata.model.CmdletState;
import org.smartdata.protocol.message.ActionStatus;
import java.io.UnsupportedEncodingException;
import java.util.ArrayList;
import java.util.Iterator;
import java.util.List;
import java.util.ListIterator;
/**
* Action is the minimum unit of execution. A cmdlet can contain more than one
* actions. Different cmdlets can be executed at the same time, but actions
* belonging to a cmdlet can only be executed in sequence.
*
* <p>The cmdlet get executed when rule conditions fulfills.
*/
// Todo: Cmdlet's state should be maintained by itself
public class Cmdlet implements Runnable {
static final Logger LOG = LoggerFactory.getLogger(Cmdlet.class);
private long ruleId; // id of the rule that this cmdlet comes from
private long id;
private CmdletState state = CmdletState.NOTINITED;
private long stateUpdateTime;
private final List<SmartAction> actions;
private List<SmartAction> actionReportList;
public Cmdlet(List<SmartAction> actions) {
this.actions = actions;
this.actionReportList = new ArrayList<>();
ListIterator<SmartAction> iter = actions.listIterator(actions.size());
while (iter.hasPrevious()) {
this.actionReportList.add(iter.previous());
}
}
public long getRuleId() {
return ruleId;
}
public void setId(long id) {
this.id = id;
}
public void setRuleId(long ruleId) {
this.ruleId = ruleId;
}
public long getId() {
return id;
}
public CmdletState getState() {
return state;
}
//Todo: remove this method
public void setState(CmdletState state) {
this.state = state;
}
public String toString() {
return "Rule-" + ruleId + "-Cmd-" + id;
}
public boolean isFinished() {
return CmdletState.isTerminalState(state);
}
private void runAllActions() {
state = CmdletState.EXECUTING;
stateUpdateTime = System.currentTimeMillis();
Iterator<SmartAction> iter = actions.iterator();
while (iter.hasNext()) {
SmartAction act = iter.next();
if (act == null) {
continue;
}
// Init Action
// TODO: this statement maybe can be removed.
act.init(act.getArguments());
act.run();
if (!act.isSuccessful()) {
while (iter.hasNext()) {
SmartAction nextAct = iter.next();
synchronized (this) {
actionReportList.remove(nextAct);
}
}
state = CmdletState.FAILED;
stateUpdateTime = System.currentTimeMillis();
LOG.error("Executing Cmdlet [id={}] meets failed.", getId());
return;
}
}
state = CmdletState.DONE;
stateUpdateTime = System.currentTimeMillis();
// TODO catch MetaStoreException and handle
}
@Override
public void run() {
runAllActions();
}
public synchronized List<ActionStatus> getActionStatuses() throws UnsupportedEncodingException {
if (actionReportList.isEmpty()) {
return null;
}
// get status in the order of the descend action id.
// The cmdletmanager should update action status in the ascend order.
List<ActionStatus> statuses = new ArrayList<>();
Iterator<SmartAction> iter = actionReportList.iterator();
while (iter.hasNext()) {
SmartAction action = iter.next();
ActionStatus status = action.getActionStatus();
statuses.add(status);
if (status.isFinished()) {
iter.remove();
}
}
return Lists.reverse(statuses);
}
}
| java | Apache-2.0 | e0c90f054687a18c4e095547ac5e31b8b313b3ef | 2026-01-05T02:41:11.405497Z | false |
Intel-bigdata/SSM | https://github.com/Intel-bigdata/SSM/blob/e0c90f054687a18c4e095547ac5e31b8b313b3ef/smart-engine/src/main/java/org/smartdata/server/engine/cmdlet/CmdletDispatcherStat.java | smart-engine/src/main/java/org/smartdata/server/engine/cmdlet/CmdletDispatcherStat.java | /**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.smartdata.server.engine.cmdlet;
public class CmdletDispatcherStat {
private int statRound = 0;
private int statFail = 0;
private int statDispatched = 0;
private int statNoMoreCmdlet = 0;
private int statFull = 0;
public CmdletDispatcherStat() {
}
public CmdletDispatcherStat(int statRound, int statFail, int statDispatched,
int statNoMoreCmdlet, int statFull) {
this.statRound = statRound;
this.statFail = statFail;
this.statDispatched = statDispatched;
this.statNoMoreCmdlet = statNoMoreCmdlet;
this.statFull = statFull;
}
public int getStatRound() {
return statRound;
}
public void setStatRound(int statRound) {
this.statRound = statRound;
}
public void addStatRound(int val) {
this.statRound += val;
}
public int getStatFail() {
return statFail;
}
public void setStatFail(int statFail) {
this.statFail = statFail;
}
public void addStatFail(int val) {
this.statFail += val;
}
public int getStatDispatched() {
return statDispatched;
}
public void setStatDispatched(int statDispatched) {
this.statDispatched = statDispatched;
}
public void addStatDispatched(int val) {
this.statDispatched += val;
}
public int getStatNoMoreCmdlet() {
return statNoMoreCmdlet;
}
public void setStatNoMoreCmdlet(int statNoMoreCmdlet) {
this.statNoMoreCmdlet = statNoMoreCmdlet;
}
public void addStatNoMoreCmdlet(int val) {
this.statNoMoreCmdlet += val;
}
public int getStatFull() {
return statFull;
}
public void setStatFull(int statFull) {
this.statFull = statFull;
}
public void addStatFull(int val) {
this.statFull += val;
}
public void add(CmdletDispatcherStat stat) {
this.statRound += stat.statRound;
this.statFail += stat.statFail;
this.statDispatched += stat.statDispatched;
this.statFull += stat.statFull;
this.statNoMoreCmdlet += stat.statNoMoreCmdlet;
}
}
| java | Apache-2.0 | e0c90f054687a18c4e095547ac5e31b8b313b3ef | 2026-01-05T02:41:11.405497Z | false |
Intel-bigdata/SSM | https://github.com/Intel-bigdata/SSM/blob/e0c90f054687a18c4e095547ac5e31b8b313b3ef/smart-engine/src/main/java/org/smartdata/server/engine/cmdlet/CmdletDispatcher.java | smart-engine/src/main/java/org/smartdata/server/engine/cmdlet/CmdletDispatcher.java | /**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.smartdata.server.engine.cmdlet;
import com.google.common.collect.ListMultimap;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.smartdata.SmartContext;
import org.smartdata.action.ActionException;
import org.smartdata.conf.SmartConf;
import org.smartdata.conf.SmartConfKeys;
import org.smartdata.model.CmdletState;
import org.smartdata.model.ExecutorType;
import org.smartdata.model.LaunchAction;
import org.smartdata.model.action.ActionScheduler;
import org.smartdata.protocol.message.ActionStatus;
import org.smartdata.protocol.message.CmdletStatus;
import org.smartdata.protocol.message.LaunchCmdlet;
import org.smartdata.server.cluster.ActiveServerNodeCmdletMetrics;
import org.smartdata.server.cluster.NodeCmdletMetrics;
import org.smartdata.server.engine.ActiveServerInfo;
import org.smartdata.server.engine.CmdletManager;
import org.smartdata.server.engine.message.NodeMessage;
import java.io.IOException;
import java.util.ArrayList;
import java.util.Collection;
import java.util.Collections;
import java.util.Comparator;
import java.util.HashMap;
import java.util.LinkedList;
import java.util.List;
import java.util.Map;
import java.util.Queue;
import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.Executors;
import java.util.concurrent.ScheduledExecutorService;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.atomic.AtomicInteger;
public class CmdletDispatcher {
private static final Logger LOG = LoggerFactory.getLogger(CmdletDispatcher.class);
private Queue<Long> pendingCmdlets;
private final CmdletManager cmdletManager;
private final List<Long> runningCmdlets;
private final Map<Long, LaunchCmdlet> idToLaunchCmdlet;
private final ListMultimap<String, ActionScheduler> schedulers;
private final ScheduledExecutorService schExecService;
private CmdletExecutorService[] cmdExecServices;
private int[] cmdExecSrvInsts;
private int cmdExecSrvTotalInsts;
private AtomicInteger[] execSrvSlotsLeft;
private AtomicInteger totalSlotsLeft = new AtomicInteger();
private Map<Long, ExecutorType> dispatchedToSrvs;
private boolean disableLocalExec;
private boolean logDispResult;
private DispatchTask[] dispatchTasks;
private int outputDispMetricsInterval; // 0 means no output
// TODO: to be refined
private final int defaultSlots;
private final int executorsNum;
private AtomicInteger index = new AtomicInteger(0);
private Map<String, AtomicInteger> regNodes = new HashMap<>();
private Map<String, NodeCmdletMetrics> regNodeInfos = new HashMap<>();
private List<List<String>> cmdExecSrvNodeIds = new ArrayList<>();
private String[] completeOn = new String[ExecutorType.values().length];
private SmartConf conf;
public CmdletDispatcher(SmartContext smartContext, CmdletManager cmdletManager,
Queue<Long> scheduledCmdlets, Map<Long, LaunchCmdlet> idToLaunchCmdlet,
List<Long> runningCmdlets, ListMultimap<String, ActionScheduler> schedulers) {
this.conf = smartContext.getConf();
this.cmdletManager = cmdletManager;
this.pendingCmdlets = scheduledCmdlets;
this.runningCmdlets = runningCmdlets;
this.idToLaunchCmdlet = idToLaunchCmdlet;
this.schedulers = schedulers;
this.executorsNum = conf.getInt(SmartConfKeys.SMART_CMDLET_EXECUTORS_KEY,
SmartConfKeys.SMART_CMDLET_EXECUTORS_DEFAULT);
int delta = conf.getInt(SmartConfKeys.SMART_DISPATCH_CMDLETS_EXTRA_NUM_KEY,
SmartConfKeys.SMART_DISPATCH_CMDLETS_EXTRA_NUM_DEFAULT);
this.defaultSlots = executorsNum + delta;
this.cmdExecServices = new CmdletExecutorService[ExecutorType.values().length];
this.cmdExecSrvInsts = new int[ExecutorType.values().length];
this.execSrvSlotsLeft = new AtomicInteger[ExecutorType.values().length];
for (int i = 0; i < execSrvSlotsLeft.length; i++) {
execSrvSlotsLeft[i] = new AtomicInteger(0);
cmdExecSrvNodeIds.add(new ArrayList<String>());
}
this.cmdExecSrvTotalInsts = 0;
this.dispatchedToSrvs = new ConcurrentHashMap<>();
this.disableLocalExec = conf.getBoolean(
SmartConfKeys.SMART_ACTION_LOCAL_EXECUTION_DISABLED_KEY,
SmartConfKeys.SMART_ACTION_LOCAL_EXECUTION_DISABLED_DEFAULT);
this.logDispResult = conf.getBoolean(
SmartConfKeys.SMART_CMDLET_DISPATCHER_LOG_DISP_RESULT_KEY,
SmartConfKeys.SMART_CMDLET_DISPATCHER_LOG_DISP_RESULT_DEFAULT);
int numDisp = conf.getInt(SmartConfKeys.SMART_CMDLET_DISPATCHERS_KEY,
SmartConfKeys.SMART_CMDLET_DISPATCHERS_DEFAULT);
if (numDisp <= 0) {
numDisp = 1;
}
this.dispatchTasks = new DispatchTask[numDisp];
for (int i = 0; i < numDisp; i++) {
dispatchTasks[i] = new DispatchTask(this, i);
}
this.schExecService = Executors.newScheduledThreadPool(numDisp + 1);
this.outputDispMetricsInterval = conf.getInt(
SmartConfKeys.SMART_CMDLET_DISPATCHER_LOG_DISP_METRICS_INTERVAL_KEY,
SmartConfKeys.SMART_CMDLET_DISPATCHER_LOG_DISP_METRICS_INTERVAL_DEFAULT);
}
public void registerExecutorService(CmdletExecutorService executorService) {
// No need to register for disabled local executor service.
if (executorService.getExecutorType() == ExecutorType.LOCAL && disableLocalExec) {
return;
}
this.cmdExecServices[executorService.getExecutorType().ordinal()] = executorService;
}
public boolean canDispatchMore() {
return getTotalSlotsLeft() > 0;
}
public void stopCmdlet(long cmdletId) {
ExecutorType t = dispatchedToSrvs.get(cmdletId);
if (t != null) {
cmdExecServices[t.ordinal()].stop(cmdletId);
}
synchronized (dispatchedToSrvs) {
NodeCmdletMetrics metrics = regNodeInfos.get(idToLaunchCmdlet.get(cmdletId).getNodeId());
if (metrics != null) {
metrics.finishCmdlet();
}
}
}
//Todo: move this function to a proper place
public void shutDownExcutorServices() {
for (CmdletExecutorService service : cmdExecServices) {
if (service != null) {
service.shutdown();
}
}
}
public LaunchCmdlet getNextCmdletToRun() throws IOException {
Long cmdletId = pendingCmdlets.poll();
if (cmdletId == null) {
return null;
}
LaunchCmdlet launchCmdlet = idToLaunchCmdlet.get(cmdletId);
runningCmdlets.add(cmdletId);
return launchCmdlet;
}
private void updateCmdActionStatus(LaunchCmdlet cmdlet, String host) {
if (cmdletManager != null) {
try {
cmdletManager.updateCmdletExecHost(cmdlet.getCmdletId(), host);
} catch (IOException e) {
// Ignore this
}
}
try {
LaunchAction action;
ActionStatus actionStatus;
for (int i = 0; i < cmdlet.getLaunchActions().size(); i++) {
action = cmdlet.getLaunchActions().get(i);
actionStatus = new ActionStatus(cmdlet.getCmdletId(),
i == cmdlet.getLaunchActions().size() - 1,
action.getActionId(), System.currentTimeMillis());
cmdletManager.onActionStatusUpdate(actionStatus);
}
CmdletStatus cmdletStatus = new CmdletStatus(cmdlet.getCmdletId(),
System.currentTimeMillis(), CmdletState.DISPATCHED);
cmdletManager.onCmdletStatusUpdate(cmdletStatus);
} catch (IOException e) {
LOG.info("update status failed.", e);
} catch (ActionException e) {
LOG.info("update action status failed.", e);
}
}
private class DispatchTask implements Runnable {
private final CmdletDispatcher dispatcher;
private final int taskId;
private int statRound = 0;
private int statFail = 0;
private int statDispatched = 0;
private int statNoMoreCmdlet = 0;
private int statFull = 0;
private LaunchCmdlet launchCmdlet = null;
private int[] dispInstIdxs = new int[ExecutorType.values().length];
public DispatchTask(CmdletDispatcher dispatcher, int taskId) {
this.dispatcher = dispatcher;
this.taskId = taskId;
}
public CmdletDispatcherStat getStat() {
CmdletDispatcherStat stat = new CmdletDispatcherStat(statRound, statFail,
statDispatched, statNoMoreCmdlet, statFull);
statRound = 0;
statFail = 0;
statDispatched = 0;
statFull = 0;
statNoMoreCmdlet = 0;
return stat;
}
@Override
public void run() {
statRound++;
if (cmdExecSrvTotalInsts == 0) {
LOG.warn("No available executor service to execute action! "
+ "This can happen when only one smart server is running and "
+ "`smart.action.local.execution.disabled` is set to true.");
return;
}
if (!dispatcher.canDispatchMore()) {
statFull++;
return;
}
boolean redisp = launchCmdlet != null;
boolean disped;
while (resvExecSlot()) {
disped = false;
try {
if (launchCmdlet == null) {
launchCmdlet = getNextCmdletToRun();
}
if (launchCmdlet == null) {
statNoMoreCmdlet++;
break;
} else {
if (!redisp) {
cmdletPreExecutionProcess(launchCmdlet);
} else {
redisp = false;
}
if (!dispatch(launchCmdlet)) {
if (LOG.isDebugEnabled()) {
LOG.debug("Stop this round dispatch due : " + launchCmdlet);
}
statFail++;
break;
}
disped = true;
statDispatched++;
}
} catch (Throwable t) {
LOG.error("Cmdlet dispatcher error", t);
} finally {
if (!disped) {
freeExecSlot();
} else {
launchCmdlet = null;
}
}
}
}
private boolean dispatch(LaunchCmdlet cmdlet) {
int mod = index.incrementAndGet() % cmdExecSrvTotalInsts;
int idx = 0;
for (int nround = 0; nround < 2 && mod >= 0; nround++) {
for (idx = 0; idx < cmdExecSrvInsts.length; idx++) {
mod -= cmdExecSrvInsts[idx];
if (mod < 0) {
break;
}
}
try {
Thread.sleep(10);
} catch (InterruptedException e) {
// ignore
}
}
if (mod >= 0) {
return false;
}
CmdletExecutorService selected = null;
for (int i = 0; i < ExecutorType.values().length; i++) {
idx = idx % ExecutorType.values().length;
int left;
do {
left = execSrvSlotsLeft[idx].get();
if (left > 0) {
if (execSrvSlotsLeft[idx].compareAndSet(left, left - 1)) {
selected = cmdExecServices[idx];
break;
}
}
} while (left > 0);
if (selected != null) {
break;
}
idx++;
}
if (selected == null) {
LOG.error("No cmdlet executor service available. " + cmdlet);
return false;
}
int srvId = selected.getExecutorType().ordinal();
boolean sFlag = true;
String nodeId;
AtomicInteger counter;
do {
dispInstIdxs[srvId] = (dispInstIdxs[srvId] + 1) % cmdExecSrvNodeIds.get(srvId).size();
nodeId = cmdExecSrvNodeIds.get(srvId).get(dispInstIdxs[srvId]);
counter = regNodes.get(nodeId);
int left = counter.get();
if (left > 0) {
if (counter.compareAndSet(left, left - 1)) {
break;
}
}
if (sFlag && completeOn[srvId] != null) {
dispInstIdxs[srvId] = cmdExecSrvNodeIds.get(srvId).indexOf(completeOn[srvId]);
sFlag = false;
}
} while (true);
cmdlet.setNodeId(nodeId);
boolean dispSucc = false;
try {
selected.execute(cmdlet);
dispSucc = true;
} finally {
if (!dispSucc) {
counter.incrementAndGet();
execSrvSlotsLeft[idx].incrementAndGet();
}
}
if (!dispSucc) {
return false;
}
NodeCmdletMetrics metrics = regNodeInfos.get(nodeId);
if (metrics != null) {
metrics.incCmdletsInExecution();
}
updateCmdActionStatus(cmdlet, nodeId);
dispatchedToSrvs.put(cmdlet.getCmdletId(), selected.getExecutorType());
if (logDispResult) {
LOG.info(String.format("Dispatching cmdlet->[%s] to executor: %s",
cmdlet.getCmdletId(), nodeId));
}
return true;
}
}
private class LogStatTask implements Runnable {
public DispatchTask[] tasks;
private long lastReportNoExecutor = 0;
private long lastInfo = System.currentTimeMillis();
public LogStatTask(DispatchTask[] tasks) {
this.tasks = tasks;
}
@Override
public void run() {
long curr = System.currentTimeMillis();
CmdletDispatcherStat stat = new CmdletDispatcherStat();
for (DispatchTask task : tasks) {
stat.add(task.getStat());
}
if (!(stat.getStatDispatched() == 0 && stat.getStatRound() == stat.getStatNoMoreCmdlet())) {
if (cmdExecSrvTotalInsts != 0 || stat.getStatFull() != 0) {
LOG.info("timeInterval={} statRound={} statFail={} statDispatched={} "
+ "statNoMoreCmdlet={} statFull={} pendingCmdlets={} numExecutor={}",
curr - lastInfo, stat.getStatRound(), stat.getStatFail(), stat.getStatDispatched(),
stat.getStatNoMoreCmdlet(), stat.getStatFull(), pendingCmdlets.size(),
cmdExecSrvTotalInsts);
} else {
if (curr - lastReportNoExecutor >= 600 * 1000L) {
LOG.info("No cmdlet executor. pendingCmdlets={}", pendingCmdlets.size());
lastReportNoExecutor = curr;
}
}
}
lastInfo = System.currentTimeMillis();
}
}
public void cmdletPreExecutionProcess(LaunchCmdlet cmdlet) {
int actionIndex = 0;
for (LaunchAction action : cmdlet.getLaunchActions()) {
for (ActionScheduler p : schedulers.get(action.getActionType())) {
p.onPreDispatch(cmdlet, action, actionIndex);
}
actionIndex++;
}
}
public void onCmdletFinished(long cmdletId) {
synchronized (dispatchedToSrvs) {
if (dispatchedToSrvs.containsKey(cmdletId)) {
LaunchCmdlet cmdlet = idToLaunchCmdlet.get(cmdletId);
if (cmdlet == null) {
return;
}
if (regNodes.get(cmdlet.getNodeId()) != null) {
regNodes.get(cmdlet.getNodeId()).incrementAndGet();
}
NodeCmdletMetrics metrics = regNodeInfos.get(cmdlet.getNodeId());
if (metrics != null) {
metrics.finishCmdlet();
}
ExecutorType t = dispatchedToSrvs.remove(cmdletId);
updateSlotsLeft(t.ordinal(), 1);
completeOn[t.ordinal()] = cmdlet.getNodeId();
}
}
}
/**
* Maintain SSM cluster nodes. Add the node if {@code isAdd} is true.
* Otherwise, remove the node.
* If local executor is disabled, we will not tackle the node message
* for active server. And the metrics for it will be set at {@link
* #start start}
*/
public void onNodeMessage(NodeMessage msg, boolean isAdd) {
// New standby server can be added to an active SSM cluster by
// executing start-standby-server.sh.
if (msg.getNodeInfo().getExecutorType() == ExecutorType.REMOTE_SSM) {
conf.addServerHosts(msg.getNodeInfo().getHost());
}
// New agent can be added to an active SSM cluster by executing
// start-agent.sh.
if (msg.getNodeInfo().getExecutorType() == ExecutorType.AGENT) {
conf.addAgentHost(msg.getNodeInfo().getHost());
}
synchronized (cmdExecSrvInsts) {
String nodeId = msg.getNodeInfo().getId();
if (isAdd) {
if (regNodes.containsKey(nodeId)) {
LOG.warn("Skip duplicate add node for {}", msg.getNodeInfo());
return;
}
regNodes.put(nodeId, new AtomicInteger(defaultSlots));
NodeCmdletMetrics metrics =
msg.getNodeInfo().getExecutorType() == ExecutorType.LOCAL
? new ActiveServerNodeCmdletMetrics() : new NodeCmdletMetrics();
// Here, we consider all nodes have same configuration for executorsNum.
int actualExecutorsNum =
metrics instanceof ActiveServerNodeCmdletMetrics && disableLocalExec
? 0 : executorsNum;
metrics.setNumExecutors(actualExecutorsNum);
metrics.setRegistTime(System.currentTimeMillis());
metrics.setNodeInfo(msg.getNodeInfo());
regNodeInfos.put(nodeId, metrics);
} else {
if (!regNodes.containsKey(nodeId)) {
LOG.warn("Skip duplicate remove node for {}", msg.getNodeInfo());
return;
}
regNodes.remove(nodeId);
regNodeInfos.remove(nodeId);
}
// Ignore local executor if it is disabled.
if (disableLocalExec && msg.getNodeInfo().getExecutorType()
== ExecutorType.LOCAL) {
return;
}
// Maintain executor service in the below code.
if (isAdd) {
cmdExecSrvNodeIds.get(
msg.getNodeInfo().getExecutorType().ordinal()).add(nodeId);
} else {
cmdExecSrvNodeIds.get(
msg.getNodeInfo().getExecutorType().ordinal()).remove(nodeId);
}
int v = isAdd ? 1 : -1;
int idx = msg.getNodeInfo().getExecutorType().ordinal();
cmdExecSrvInsts[idx] += v;
cmdExecSrvTotalInsts += v;
updateSlotsLeft(idx, v * defaultSlots);
}
LOG.info(String.format("Node "
+ msg.getNodeInfo() + (isAdd ? " added." : " removed.")));
}
private void updateSlotsLeft(int idx, int delta) {
execSrvSlotsLeft[idx].addAndGet(delta);
totalSlotsLeft.addAndGet(delta);
}
public int getTotalSlotsLeft() {
return totalSlotsLeft.get();
}
public boolean resvExecSlot() {
if (totalSlotsLeft.decrementAndGet() >= 0) {
return true;
}
totalSlotsLeft.incrementAndGet();
return false;
}
public void freeExecSlot() {
totalSlotsLeft.incrementAndGet();
}
public int getTotalSlots() {
return cmdExecSrvTotalInsts * defaultSlots;
}
public Collection<NodeCmdletMetrics> getNodeCmdletMetrics() {
ActiveServerNodeCmdletMetrics metrics = (ActiveServerNodeCmdletMetrics) regNodeInfos.get(
ActiveServerInfo.getInstance().getId());
if (metrics != null) {
metrics.setNumPendingDispatch(pendingCmdlets.size());
metrics.setMaxPendingDispatch(getTotalSlotsLeft() + (int) (getTotalSlots() * 0.2));
metrics.setMaxInExecution(getTotalSlots());
metrics.setNumInExecution(getTotalSlots() - getTotalSlotsLeft());
cmdletManager.updateNodeCmdletMetrics(metrics);
}
// TODO: temp implementation
List<NodeCmdletMetrics> ret = new LinkedList<>();
ret.addAll(regNodeInfos.values());
Collections.sort(ret, new Comparator<NodeCmdletMetrics>() {
@Override
public int compare(NodeCmdletMetrics a, NodeCmdletMetrics b) {
int tp = a.getNodeInfo().getExecutorType().ordinal()
- b.getNodeInfo().getExecutorType().ordinal();
return tp == 0 ? a.getNodeInfo().getId().compareToIgnoreCase(b.getNodeInfo().getId()) : tp;
}
});
return ret;
}
public void start() {
// Instantiate and register LocalCmdletExecutorService.
CmdletExecutorService exe =
new LocalCmdletExecutorService(conf, cmdletManager);
exe.start();
registerExecutorService(exe);
CmdletDispatcherHelper.getInst().register(this);
int idx = 0;
for (DispatchTask task : dispatchTasks) {
schExecService.scheduleAtFixedRate(task, idx * 200 / dispatchTasks.length,
100, TimeUnit.MILLISECONDS);
idx++;
}
if (outputDispMetricsInterval > 0) {
schExecService.scheduleAtFixedRate(new LogStatTask(dispatchTasks),
5000, outputDispMetricsInterval, TimeUnit.MILLISECONDS);
}
}
public void stop() {
CmdletDispatcherHelper.getInst().unregister();
schExecService.shutdown();
}
}
| java | Apache-2.0 | e0c90f054687a18c4e095547ac5e31b8b313b3ef | 2026-01-05T02:41:11.405497Z | false |
Intel-bigdata/SSM | https://github.com/Intel-bigdata/SSM/blob/e0c90f054687a18c4e095547ac5e31b8b313b3ef/smart-engine/src/main/java/org/smartdata/server/engine/cmdlet/CmdletFactory.java | smart-engine/src/main/java/org/smartdata/server/engine/cmdlet/CmdletFactory.java | /**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.smartdata.server.engine.cmdlet;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.smartdata.SmartContext;
import org.smartdata.action.ActionException;
import org.smartdata.action.ActionRegistry;
import org.smartdata.action.SmartAction;
//import org.smartdata.alluxio.AlluxioUtil;
//import org.smartdata.alluxio.action.AlluxioAction;
//import alluxio.client.file.FileSystem;
import org.smartdata.conf.SmartConfKeys;
import org.smartdata.hdfs.HadoopUtil;
import org.smartdata.hdfs.action.HdfsAction;
import org.smartdata.hdfs.client.SmartDFSClient;
import org.smartdata.model.LaunchAction;
import org.smartdata.protocol.message.LaunchCmdlet;
import org.smartdata.protocol.message.StatusReporter;
import java.io.IOException;
import java.net.InetSocketAddress;
import java.util.ArrayList;
import java.util.List;
public class CmdletFactory {
static final Logger LOG = LoggerFactory.getLogger(CmdletFactory.class);
private final SmartContext smartContext;
private final StatusReporter reporter;
public CmdletFactory(SmartContext smartContext) {
this(smartContext, null);
}
public CmdletFactory(SmartContext smartContext, StatusReporter reporter) {
this.smartContext = smartContext;
this.reporter = reporter;
}
public Cmdlet createCmdlet(LaunchCmdlet launchCmdlet) throws ActionException {
List<SmartAction> actions = new ArrayList<>();
int idx = 0;
for (LaunchAction action : launchCmdlet.getLaunchActions()) {
idx++;
actions.add(createAction(launchCmdlet.getCmdletId(),
idx == launchCmdlet.getLaunchActions().size(), action));
}
Cmdlet cmdlet = new Cmdlet(actions);
cmdlet.setId(launchCmdlet.getCmdletId());
return cmdlet;
}
public SmartAction createAction(long cmdletId, boolean isLastAction, LaunchAction launchAction)
throws ActionException {
SmartAction smartAction = ActionRegistry.createAction(launchAction.getActionType());
smartAction.setContext(smartContext);
smartAction.setCmdletId(cmdletId);
smartAction.setLastAction(isLastAction);
smartAction.init(launchAction.getArgs());
smartAction.setActionId(launchAction.getActionId());
if (smartAction instanceof HdfsAction) {
try {
((HdfsAction) smartAction)
.setDfsClient(
new SmartDFSClient(
HadoopUtil.getNameNodeUri(smartContext.getConf()),
smartContext.getConf(),
getRpcServerAddress()));
} catch (IOException e) {
LOG.error("smartAction aid={} setDfsClient error", launchAction.getActionId(), e);
throw new ActionException(e);
}
}
/*
else if (smartAction instanceof AlluxioAction) {
FileSystem fs;
try {
fs = AlluxioUtil.getAlluxioFs(smartContext);
} catch (Exception e) {
LOG.error("smartAction aid={} alluxio filesystem error", launchAction.getActionId(), e);
throw new ActionException(e);
}
((AlluxioAction) smartAction).setFileSystem(fs);
}
*/
return smartAction;
}
private InetSocketAddress getRpcServerAddress() {
String[] strings =
smartContext
.getConf()
.get(
SmartConfKeys.SMART_SERVER_RPC_ADDRESS_KEY,
SmartConfKeys.SMART_SERVER_RPC_ADDRESS_DEFAULT)
.split(":");
return new InetSocketAddress(
strings[strings.length - 2], Integer.parseInt(strings[strings.length - 1]));
}
}
| java | Apache-2.0 | e0c90f054687a18c4e095547ac5e31b8b313b3ef | 2026-01-05T02:41:11.405497Z | false |
Intel-bigdata/SSM | https://github.com/Intel-bigdata/SSM/blob/e0c90f054687a18c4e095547ac5e31b8b313b3ef/smart-engine/src/main/java/org/smartdata/server/engine/cmdlet/CmdletExecutorService.java | smart-engine/src/main/java/org/smartdata/server/engine/cmdlet/CmdletExecutorService.java | /**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.smartdata.server.engine.cmdlet;
import org.smartdata.model.ExecutorType;
import org.smartdata.protocol.message.LaunchCmdlet;
import org.smartdata.server.cluster.NodeInfo;
import org.smartdata.server.engine.CmdletManager;
import java.util.List;
public abstract class CmdletExecutorService {
protected CmdletManager cmdletManager;
private ExecutorType executorType;
public CmdletExecutorService(CmdletManager cmdletManager, ExecutorType executorType) {
this.cmdletManager = cmdletManager;
this.executorType = executorType;
}
public void start() {
}
public abstract boolean canAcceptMore();
// TODO: to be refined
/**
* Send cmdlet to end executor for execution.
*
* @param cmdlet
* @return Node ID that the cmdlet been dispatched to, null if failed
*/
public abstract String execute(LaunchCmdlet cmdlet);
public abstract void stop(long cmdletId);
public abstract void shutdown();
public ExecutorType getExecutorType() {
return executorType;
}
public abstract int getNumNodes(); // return number of nodes contained
public abstract List<NodeInfo> getNodesInfo();
}
| java | Apache-2.0 | e0c90f054687a18c4e095547ac5e31b8b313b3ef | 2026-01-05T02:41:11.405497Z | false |
Intel-bigdata/SSM | https://github.com/Intel-bigdata/SSM/blob/e0c90f054687a18c4e095547ac5e31b8b313b3ef/smart-engine/src/main/java/org/smartdata/server/engine/cmdlet/TaskTracker.java | smart-engine/src/main/java/org/smartdata/server/engine/cmdlet/TaskTracker.java | /**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.smartdata.server.engine.cmdlet;
import org.smartdata.model.CmdletDescriptor;
import java.util.Map;
import java.util.Optional;
import java.util.Set;
import java.util.concurrent.ConcurrentHashMap;
/**
* Track CmdletDescriptor from the submission of the corresponding Cmdlet to
* the finish of that Cmdlet. CmdletDescriptor defines a task and it is wrapped
* by Cmdlet or CmdletInfo to
*/
public class TaskTracker {
// Contains CmdletDescriptor being tackled.
private Set<CmdletDescriptor> tacklingCmdDesptors;
// The ID of a submitted cmdlet to the corresponding CmdletDescriptor.
private Map<Long, CmdletDescriptor> cidToCmdDesptor;
public TaskTracker() {
this.tacklingCmdDesptors = ConcurrentHashMap.newKeySet();
this.cidToCmdDesptor = new ConcurrentHashMap<>();
}
/**
* Start tracking the CmdletDescriptor which is wrapped by a executable
* cmdlet whose ID is cid.
**/
public void track(long cid, CmdletDescriptor cmdDesptor) {
tacklingCmdDesptors.add(cmdDesptor);
cidToCmdDesptor.put(cid, cmdDesptor);
}
/**
* Untrack the CmdletDescriptor when the corresponding Cmdlet is finished.
* @param cid the ID of the finished Cmdlet.
*/
public void untrack(long cid) {
Optional.ofNullable(cidToCmdDesptor.remove(cid)).ifPresent(
cmdDesptor -> tacklingCmdDesptors.remove(cmdDesptor));
}
/**
* Used to avoid repeatedly submitting cmdlet for same CmdletDescriptor.
*/
public boolean contains(CmdletDescriptor cmdDesptor) {
return tacklingCmdDesptors.contains(cmdDesptor);
}
}
| java | Apache-2.0 | e0c90f054687a18c4e095547ac5e31b8b313b3ef | 2026-01-05T02:41:11.405497Z | false |
Intel-bigdata/SSM | https://github.com/Intel-bigdata/SSM/blob/e0c90f054687a18c4e095547ac5e31b8b313b3ef/smart-engine/src/main/java/org/smartdata/server/engine/cmdlet/HazelcastExecutorService.java | smart-engine/src/main/java/org/smartdata/server/engine/cmdlet/HazelcastExecutorService.java | /**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.smartdata.server.engine.cmdlet;
import com.hazelcast.core.HazelcastInstance;
import com.hazelcast.core.ITopic;
import com.hazelcast.core.Member;
import com.hazelcast.core.MemberAttributeEvent;
import com.hazelcast.core.MembershipEvent;
import com.hazelcast.core.MembershipListener;
import com.hazelcast.core.Message;
import com.hazelcast.core.MessageListener;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.smartdata.model.ExecutorType;
import org.smartdata.protocol.message.ActionStatus;
import org.smartdata.protocol.message.LaunchCmdlet;
import org.smartdata.protocol.message.StatusMessage;
import org.smartdata.protocol.message.StatusReport;
import org.smartdata.protocol.message.StopCmdlet;
import org.smartdata.server.cluster.HazelcastInstanceProvider;
import org.smartdata.server.cluster.NodeInfo;
import org.smartdata.server.engine.CmdletManager;
import org.smartdata.server.engine.EngineEventBus;
import org.smartdata.server.engine.StandbyServerInfo;
import org.smartdata.server.engine.message.AddNodeMessage;
import org.smartdata.server.engine.message.RemoveNodeMessage;
import org.smartdata.server.utils.HazelcastUtil;
import java.io.Serializable;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
public class HazelcastExecutorService extends CmdletExecutorService {
private static final Logger LOG = LoggerFactory.getLogger(HazelcastExecutorService.class);
public static final String WORKER_TOPIC_PREFIX = "worker_";
public static final String STATUS_TOPIC = "status_topic";
private final HazelcastInstance instance;
private Map<String, ITopic<Serializable>> masterToWorkers;
private Map<Long, String> executingCmdlets;
private Map<String, Member> members;
private ITopic<StatusMessage> statusTopic;
public HazelcastExecutorService(CmdletManager cmdletManager) {
super(cmdletManager, ExecutorType.REMOTE_SSM);
this.executingCmdlets = new HashMap<>();
this.masterToWorkers = new HashMap<>();
this.members = new HashMap<>();
this.instance = HazelcastInstanceProvider.getInstance();
this.statusTopic = instance.getTopic(STATUS_TOPIC);
this.statusTopic.addMessageListener(new StatusMessageListener());
initChannels();
instance.getCluster().addMembershipListener(new ClusterMembershipListener(instance));
}
/**
* Suppose there are three Smart Server. After one server is down, one of
* the remaining server will be elected as master and the other will
* continue serve as standby. Obviously, the new master server will not
* receive message for adding standby node (i.e., trigger #memberAdded),
* so the new master will just know the standby server is a hazelcast
* member, but not realize that standby node is serving as remote executor.
* Thus, we need to call the below method during the start of new active
* server to deliver the message about standby node to CmdletDispatcherHelper.
*/
private void initChannels() {
for (Member worker : HazelcastUtil.getWorkerMembers(instance)) {
addMember(worker);
}
}
/**
* Keep the new hazelcast member in maps and post the add-member event to
* CmdletDispatcherHelper. See #removeMember.
* The id is firstly checked to avoid repeated message delivery.
* It is supposed that #addMember & #removeMember will be called by only
* one thread.
*
* @param member
*/
public void addMember(Member member) {
String id = getMemberNodeId(member);
if (!masterToWorkers.containsKey(id)) {
ITopic<Serializable> topic =
instance.getTopic(WORKER_TOPIC_PREFIX + member.getUuid());
this.masterToWorkers.put(id, topic);
members.put(id, member);
EngineEventBus.post(new AddNodeMessage(memberToNodeInfo(member)));
} else {
LOG.warn("The member is already added: id = " + id);
}
}
/**
* Remove the member and post the remove-member event to
* CmdletDispatcherHelper. See #addMember.
*
* @param member
*/
public void removeMember(Member member) {
String id = getMemberNodeId(member);
if (masterToWorkers.containsKey(id)) {
masterToWorkers.get(id).destroy();
// Consider a case: standby server crashed and then it was launched again.
// If this server is not removed from masterToWorkers, the AddNodeMessage
// will not be posted in #addMember.
masterToWorkers.remove(id);
members.remove(id);
EngineEventBus.post(new RemoveNodeMessage(memberToNodeInfo(member)));
} else {
LOG.warn("It is supposed that the member was not added, "
+ "maybe no need to remove it: id = ", id);
// Todo: recover
}
}
public List<StandbyServerInfo> getStandbyServers() {
List<StandbyServerInfo> infos = new ArrayList<>();
for (Member worker : HazelcastUtil.getWorkerMembers(instance)) {
infos.add(new StandbyServerInfo(getMemberNodeId(worker),
worker.getAddress().getHost() + ":" + worker.getAddress().getPort()));
}
return infos;
}
public int getNumNodes() {
return masterToWorkers.size();
}
public List<NodeInfo> getNodesInfo() {
List<StandbyServerInfo> infos = getStandbyServers();
List<NodeInfo> ret = new ArrayList<>(infos.size());
for (StandbyServerInfo info : infos) {
ret.add(info);
}
return ret;
}
private NodeInfo memberToNodeInfo(Member member) {
return new StandbyServerInfo(getMemberNodeId(member),
member.getAddress().getHost() + ":" + member.getAddress().getPort());
}
private String getMemberNodeId(Member member) {
return "StandbySSMServer@" + member.getAddress().getHost();
}
@Override
public boolean canAcceptMore() {
return !HazelcastUtil.getWorkerMembers(instance).isEmpty();
}
@Override
public String execute(LaunchCmdlet cmdlet) {
String member = cmdlet.getNodeId();
masterToWorkers.get(member).publish(cmdlet);
executingCmdlets.put(cmdlet.getCmdletId(), member);
LOG.debug("Executing cmdlet {} on worker {}", cmdlet.getCmdletId(), member);
return member;
}
@Override
public void stop(long cmdletId) {
if (executingCmdlets.containsKey(cmdletId)) {
String member = executingCmdlets.get(cmdletId);
if (member != null) {
masterToWorkers.get(member).publish(new StopCmdlet(cmdletId));
}
}
}
@Override
public void shutdown() {
}
public void onStatusMessage(StatusMessage message) {
if (message instanceof StatusReport) {
StatusReport report = (StatusReport) message;
for (ActionStatus s : report.getActionStatuses()) {
if (s.isFinished() && s.isLastAction()) {
executingCmdlets.remove(s.getCmdletId());
}
}
}
cmdletManager.updateStatus(message);
}
private class ClusterMembershipListener implements MembershipListener {
private final HazelcastInstance instance;
public ClusterMembershipListener(HazelcastInstance instance) {
this.instance = instance;
}
@Override
public void memberAdded(MembershipEvent membershipEvent) {
Member member = membershipEvent.getMember();
addMember(member);
}
@Override
public void memberRemoved(MembershipEvent membershipEvent) {
Member member = membershipEvent.getMember();
removeMember(member);
}
@Override
public void memberAttributeChanged(MemberAttributeEvent memberAttributeEvent) {
}
}
private class StatusMessageListener implements MessageListener<StatusMessage> {
@Override
public void onMessage(Message<StatusMessage> message) {
onStatusMessage(message.getMessageObject());
}
}
}
| java | Apache-2.0 | e0c90f054687a18c4e095547ac5e31b8b313b3ef | 2026-01-05T02:41:11.405497Z | false |
Intel-bigdata/SSM | https://github.com/Intel-bigdata/SSM/blob/e0c90f054687a18c4e095547ac5e31b8b313b3ef/smart-engine/src/main/java/org/smartdata/server/engine/cmdlet/LocalCmdletExecutorService.java | smart-engine/src/main/java/org/smartdata/server/engine/cmdlet/LocalCmdletExecutorService.java | /**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.smartdata.server.engine.cmdlet;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.smartdata.action.ActionException;
import org.smartdata.conf.SmartConf;
import org.smartdata.conf.SmartConfKeys;
import org.smartdata.model.ExecutorType;
import org.smartdata.protocol.message.LaunchCmdlet;
import org.smartdata.protocol.message.StatusMessage;
import org.smartdata.protocol.message.StatusReporter;
import org.smartdata.server.cluster.NodeInfo;
import org.smartdata.server.engine.ActiveServerInfo;
import org.smartdata.server.engine.CmdletManager;
import org.smartdata.server.engine.EngineEventBus;
import org.smartdata.server.engine.message.AddNodeMessage;
import java.net.InetAddress;
import java.net.UnknownHostException;
import java.util.ArrayList;
import java.util.List;
import java.util.concurrent.Executors;
import java.util.concurrent.ScheduledExecutorService;
import java.util.concurrent.TimeUnit;
public class LocalCmdletExecutorService extends CmdletExecutorService implements StatusReporter {
private static final Logger LOG = LoggerFactory.getLogger(LocalCmdletExecutorService.class);
private SmartConf conf;
private CmdletFactory cmdletFactory;
private CmdletExecutor cmdletExecutor;
private ScheduledExecutorService executorService;
private boolean disableLocalExec;
public LocalCmdletExecutorService(SmartConf smartConf, CmdletManager cmdletManager) {
super(cmdletManager, ExecutorType.LOCAL);
this.conf = smartConf;
// If local executor is disabled, there is no need to execute the remain code in the
// instantiation.
this.disableLocalExec = conf.getBoolean(
SmartConfKeys.SMART_ACTION_LOCAL_EXECUTION_DISABLED_KEY,
SmartConfKeys.SMART_ACTION_LOCAL_EXECUTION_DISABLED_DEFAULT);
if (disableLocalExec) {
return;
}
this.cmdletFactory = new CmdletFactory(cmdletManager.getContext(), this);
this.cmdletExecutor = new CmdletExecutor(smartConf);
this.executorService = Executors.newSingleThreadScheduledExecutor();
}
@Override
public void start() {
ActiveServerInfo.setInstance(getActiveServerAddress());
EngineEventBus.post(new AddNodeMessage(ActiveServerInfo.getInstance()));
if (disableLocalExec) {
return;
}
int reportPeriod = conf.getInt(SmartConfKeys.SMART_STATUS_REPORT_PERIOD_KEY,
SmartConfKeys.SMART_STATUS_REPORT_PERIOD_DEFAULT);
StatusReportTask statusReportTask = new StatusReportTask(this, cmdletExecutor, conf);
this.executorService.scheduleAtFixedRate(
statusReportTask, 1000, reportPeriod, TimeUnit.MILLISECONDS);
}
@Override
public boolean canAcceptMore() {
return true;
}
public int getNumNodes() {
return 1;
}
public List<NodeInfo> getNodesInfo() {
// TODO: to be refined
List<NodeInfo> ret = new ArrayList<>(1);
ret.add(ActiveServerInfo.getInstance());
return ret;
}
@Override
public String execute(LaunchCmdlet cmdlet) {
try {
this.cmdletExecutor.execute(cmdletFactory.createCmdlet(cmdlet));
return ActiveServerInfo.getInstance().getId();
} catch (ActionException e) {
LOG.error("Failed to execute cmdlet {}" , cmdlet.getCmdletId(), e);
return null;
}
}
@Override
public void stop(long cmdletId) {
this.cmdletExecutor.stop(cmdletId);
}
@Override
public void shutdown() {
this.executorService.shutdown();
this.cmdletExecutor.shutdown();
}
@Override
public void report(StatusMessage status) {
LOG.debug("Reporting status message " + status);
cmdletManager.updateStatus(status);
}
private String getActiveServerAddress() {
String srv = conf.get(SmartConfKeys.SMART_AGENT_MASTER_ADDRESS_KEY);
if (srv == null || srv.length() == 0) {
try {
srv = InetAddress.getLocalHost().getHostName();
} catch (UnknownHostException e) {
srv = "127.0.0.1";
}
}
return srv;
}
}
| java | Apache-2.0 | e0c90f054687a18c4e095547ac5e31b8b313b3ef | 2026-01-05T02:41:11.405497Z | false |
Intel-bigdata/SSM | https://github.com/Intel-bigdata/SSM/blob/e0c90f054687a18c4e095547ac5e31b8b313b3ef/smart-engine/src/main/java/org/smartdata/server/engine/cmdlet/agent/AgentManager.java | smart-engine/src/main/java/org/smartdata/server/engine/cmdlet/agent/AgentManager.java | /**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.smartdata.server.engine.cmdlet.agent;
import akka.actor.ActorRef;
import akka.actor.Address;
import org.smartdata.server.cluster.NodeInfo;
import org.smartdata.server.engine.EngineEventBus;
import org.smartdata.server.engine.cmdlet.agent.messages.MasterToAgent.AgentId;
import org.smartdata.server.engine.message.AddNodeMessage;
import org.smartdata.server.engine.message.RemoveNodeMessage;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.LinkedList;
import java.util.List;
import java.util.Map;
public class AgentManager {
private final Map<ActorRef, AgentId> agents = new HashMap<>();
private final Map<ActorRef, NodeInfo> agentNodeInfos = new HashMap<>();
private final Map<String, ActorRef> agentActorRefs = new HashMap<>();
private List<ActorRef> resources = new ArrayList<>();
private List<NodeInfo> nodeInfos = new LinkedList<>();
private int dispatchIndex = 0;
void addAgent(ActorRef agent, AgentId id) {
agents.put(agent, id);
resources.add(agent);
String location = AgentUtils.getHostPort(agent);
NodeInfo info = new AgentInfo(String.valueOf(id.getId()), location);
nodeInfos.add(info);
agentNodeInfos.put(agent, info);
agentActorRefs.put(info.getId(), agent);
EngineEventBus.post(new AddNodeMessage(info));
}
AgentId removeAgent(ActorRef agent) {
AgentId id = agents.remove(agent);
resources.remove(agent);
NodeInfo info = agentNodeInfos.remove(agent);
nodeInfos.remove(info);
agentActorRefs.remove(info.getId());
EngineEventBus.post(new RemoveNodeMessage(info));
return id;
}
boolean hasFreeAgent() {
return !resources.isEmpty();
}
ActorRef dispatch(String nodeId) {
return agentActorRefs.get(nodeId);
}
Map<ActorRef, AgentId> getAgents() {
return agents;
}
List<NodeInfo> getNodeInfos() {
return nodeInfos;
}
AgentId getAgentId(ActorRef agentActorRef) {
return agents.get(agentActorRef);
}
ActorRef getAgentActorByAddress(Address addr) {
for (ActorRef agent : agents.keySet()) {
if (agent.path().address().equals(addr)) {
return agent;
}
}
return null;
}
}
| java | Apache-2.0 | e0c90f054687a18c4e095547ac5e31b8b313b3ef | 2026-01-05T02:41:11.405497Z | false |
Intel-bigdata/SSM | https://github.com/Intel-bigdata/SSM/blob/e0c90f054687a18c4e095547ac5e31b8b313b3ef/smart-engine/src/main/java/org/smartdata/server/engine/cmdlet/agent/AgentMaster.java | smart-engine/src/main/java/org/smartdata/server/engine/cmdlet/agent/AgentMaster.java | /**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.smartdata.server.engine.cmdlet.agent;
import akka.actor.ActorRef;
import akka.actor.ActorSystem;
import akka.actor.Props;
import akka.actor.Terminated;
import akka.actor.UntypedActor;
import akka.pattern.Patterns;
import akka.remote.AssociationEvent;
import akka.remote.DisassociatedEvent;
import akka.util.Timeout;
import com.google.common.annotations.VisibleForTesting;
import com.typesafe.config.Config;
import com.typesafe.config.ConfigFactory;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.smartdata.conf.SmartConf;
import org.smartdata.protocol.message.LaunchCmdlet;
import org.smartdata.protocol.message.StatusMessage;
import org.smartdata.protocol.message.StopCmdlet;
import org.smartdata.server.engine.CmdletManager;
import org.smartdata.server.engine.cmdlet.CmdletDispatcherHelper;
import org.smartdata.server.engine.cmdlet.agent.messages.AgentToMaster.RegisterAgent;
import org.smartdata.server.engine.cmdlet.agent.messages.AgentToMaster.RegisterNewAgent;
import org.smartdata.server.engine.cmdlet.agent.messages.MasterToAgent.AgentId;
import org.smartdata.server.engine.cmdlet.agent.messages.MasterToAgent.AgentRegistered;
import java.io.IOException;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.concurrent.TimeUnit;
import scala.concurrent.Await;
import scala.concurrent.Future;
import scala.concurrent.duration.Duration;
public class AgentMaster {
private static final Logger LOG = LoggerFactory.getLogger(AgentMaster.class);
public static final Timeout TIMEOUT =
new Timeout(Duration.create(5, TimeUnit.SECONDS));
private ActorSystem system;
private ActorRef master;
private AgentManager agentManager;
private static CmdletManager statusUpdater;
private static AgentMaster agentMaster = null;
private AgentMaster(SmartConf conf) throws IOException {
String[] addresses = AgentUtils.getMasterAddress(conf);
if (addresses == null) {
throw new IOException("AgentMaster address not configured!");
}
String address = addresses[0];
LOG.info("Agent master: " + address);
Config config = AgentUtils.overrideRemoteAddress(
ConfigFactory.load(AgentConstants.AKKA_CONF_FILE), address);
CmdletDispatcherHelper.init();
this.agentManager = new AgentManager();
Props props = Props.create(MasterActor.class, null, agentManager);
ActorSystemLauncher launcher = new ActorSystemLauncher(config, props);
launcher.start();
}
public static AgentMaster getAgentMaster() throws IOException {
return getAgentMaster(new SmartConf());
}
public static AgentMaster getAgentMaster(SmartConf conf)
throws IOException {
if (agentMaster == null) {
agentMaster = new AgentMaster(conf);
return agentMaster;
} else {
return agentMaster;
}
}
public static void setCmdletManager(CmdletManager statusUpdater) {
AgentMaster.statusUpdater = statusUpdater;
}
public boolean canAcceptMore() {
return agentManager.hasFreeAgent();
}
public String launchCmdlet(LaunchCmdlet launch) {
try {
AgentId agentId = (AgentId) askMaster(launch);
return agentId.getId();
} catch (Exception e) {
LOG.error(
"Failed to launch Cmdlet {} due to {}", launch, e.getMessage());
return null;
}
}
public void stopCmdlet(long cmdletId) {
try {
askMaster(new StopCmdlet(cmdletId));
} catch (Exception e) {
LOG.error(
"Failed to stop Cmdlet {} due to {}", cmdletId, e.getMessage());
}
}
public void shutdown() {
if (system != null && !system.isTerminated()) {
if (master != null && !master.isTerminated()) {
LOG.info("Shutting down master {}...",
AgentUtils.getFullPath(system, master.path()));
system.stop(master);
}
LOG.info("Shutting down system {}...",
AgentUtils.getSystemAddres(system));
system.shutdown();
}
}
public List<AgentInfo> getAgentInfos() {
List<AgentInfo> infos = new ArrayList<>();
for (Map.Entry<ActorRef, AgentId> entry :
agentManager.getAgents().entrySet()) {
String location = AgentUtils.getHostPort(entry.getKey());
infos.add(new AgentInfo(String.valueOf(
entry.getValue().getId()), location));
}
return infos;
}
public int getNumAgents() {
return agentManager.getAgents().size();
}
@VisibleForTesting
ActorRef getMasterActor() {
return master;
}
Object askMaster(Object message) throws Exception {
Future<Object> answer = Patterns.ask(master, message, TIMEOUT);
return Await.result(answer, TIMEOUT.duration());
}
class ActorSystemLauncher extends Thread {
private final Props masterProps;
private final Config config;
public ActorSystemLauncher(Config config, Props masterProps) {
this.config = config;
this.masterProps = masterProps;
}
@Override
public void run() {
system = ActorSystem.apply(
AgentConstants.MASTER_ACTOR_SYSTEM_NAME, config);
master = system.actorOf(masterProps, AgentConstants.MASTER_ACTOR_NAME);
LOG.info("MasterActor created at {}",
AgentUtils.getFullPath(system, master.path()));
final Thread currentThread = Thread.currentThread();
Runtime.getRuntime().addShutdownHook(new Thread() {
@Override
public void run() {
shutdown();
try {
currentThread.join();
} catch (InterruptedException e) {
// Ignore
}
}
});
system.awaitTermination();
}
}
static class MasterActor extends UntypedActor {
private final Map<Long, ActorRef> dispatches = new HashMap<>();
private AgentManager agentManager;
public MasterActor(CmdletManager statusUpdater,
AgentManager agentManager) {
this(agentManager);
if (statusUpdater != null) {
setCmdletManager(statusUpdater);
}
}
public MasterActor(AgentManager agentManager) {
this.agentManager = agentManager;
}
/**
* Subscribe an event: {@code DisassociatedEvent}. It will be
* handled by {@link #handleDisassociatedEvent method}.
*/
@Override
public void preStart() {
this.context().system().eventStream().subscribe(
self(), DisassociatedEvent.class);
}
@Override
public void onReceive(Object message) throws Exception {
Boolean handled =
handleAgentMessage(message)
|| handleClientMessage(message)
|| handleTerminatedMessage(message)
|| handleDisassociatedEvent(message);
if (!handled) {
unhandled(message);
}
}
private boolean handleAgentMessage(Object message) {
if (message instanceof RegisterNewAgent) {
getSelf().forward(new RegisterAgent(
((RegisterNewAgent) message).getId()), getContext());
return true;
} else if (message instanceof RegisterAgent) {
RegisterAgent register = (RegisterAgent) message;
ActorRef agent = getSender();
// Watch this agent to listen messages delivered from it.
getContext().watch(agent);
AgentId id = register.getId();
AgentRegistered registered = new AgentRegistered(id);
this.agentManager.addAgent(agent, id);
agent.tell(registered, getSelf());
LOG.info("Register SmartAgent {} from {}", id, agent);
return true;
} else if (message instanceof StatusMessage) {
AgentMaster.statusUpdater.updateStatus((StatusMessage) message);
return true;
} else {
return false;
}
}
private boolean handleClientMessage(Object message) {
if (message instanceof LaunchCmdlet) {
if (agentManager.hasFreeAgent()) {
LaunchCmdlet launch = (LaunchCmdlet) message;
ActorRef agent = this.agentManager.dispatch(launch.getNodeId());
AgentId agentId = this.agentManager.getAgentId(agent);
agent.tell(launch, getSelf());
dispatches.put(launch.getCmdletId(), agent);
getSender().tell(agentId, getSelf());
}
return true;
} else if (message instanceof StopCmdlet) {
long cmdletId = ((StopCmdlet) message).getCmdletId();
if (dispatches.containsKey(cmdletId)) {
dispatches.get(cmdletId).tell(message, getSelf());
getSender().tell("Succeed", getSelf());
} else {
getSender().tell("NotFound", getSelf());
}
return true;
} else {
return false;
}
}
private boolean handleTerminatedMessage(Object message) {
if (message instanceof Terminated) {
Terminated terminated = (Terminated) message;
ActorRef agent = terminated.actor();
AgentId id = this.agentManager.removeAgent(agent);
// Unwatch this agent to avoid trying re-association.
this.context().unwatch(agent);
LOG.warn("SmartAgent ({} {} down", id, agent);
return true;
} else {
return false;
}
}
/**
* Remove agent if {@code DisassociatedEvent} is received.
*/
private boolean handleDisassociatedEvent(Object message) {
if (!(message instanceof DisassociatedEvent)) {
return false;
}
AssociationEvent associEvent = (AssociationEvent) message;
ActorRef agent = agentManager.getAgentActorByAddress(
associEvent.getRemoteAddress());
// The agent may be already removed. Return true to indicate
// the message has been handled.
if (agent == null) {
return true;
}
LOG.warn("Received event: {}, details: {}",
associEvent.eventName(), associEvent.toString());
LOG.warn("Removing the disassociated agent: " + agent.path().address());
agentManager.removeAgent(agent);
// Unwatch this agent to avoid trying re-association.
this.context().unwatch(agent);
return true;
}
}
}
| java | Apache-2.0 | e0c90f054687a18c4e095547ac5e31b8b313b3ef | 2026-01-05T02:41:11.405497Z | false |
Intel-bigdata/SSM | https://github.com/Intel-bigdata/SSM/blob/e0c90f054687a18c4e095547ac5e31b8b313b3ef/smart-engine/src/main/java/org/smartdata/server/engine/cmdlet/agent/AgentExecutorService.java | smart-engine/src/main/java/org/smartdata/server/engine/cmdlet/agent/AgentExecutorService.java | /**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.smartdata.server.engine.cmdlet.agent;
import org.smartdata.conf.SmartConf;
import org.smartdata.model.ExecutorType;
import org.smartdata.protocol.message.LaunchCmdlet;
import org.smartdata.server.cluster.NodeInfo;
import org.smartdata.server.engine.CmdletManager;
import org.smartdata.server.engine.cmdlet.CmdletExecutorService;
import java.io.IOException;
import java.util.ArrayList;
import java.util.List;
public class AgentExecutorService extends CmdletExecutorService {
private AgentMaster master;
public AgentExecutorService(SmartConf conf, CmdletManager cmdletManager) throws IOException {
super(cmdletManager, ExecutorType.AGENT);
master = AgentMaster.getAgentMaster(conf);
AgentMaster.setCmdletManager(cmdletManager);
}
@Override
public boolean canAcceptMore() {
return master.canAcceptMore();
}
@Override
public String execute(LaunchCmdlet cmdlet) {
return master.launchCmdlet(cmdlet);
}
@Override
public void stop(long cmdletId) {
master.stopCmdlet(cmdletId);
}
@Override
public void shutdown() {
master.shutdown();
}
public List<AgentInfo> getAgentInfos() {
return master.getAgentInfos();
}
public int getNumNodes() {
return master.getNumAgents();
}
public List<NodeInfo> getNodesInfo() {
List<AgentInfo> infos = getAgentInfos();
List<NodeInfo> ret = new ArrayList<>(infos.size());
for (AgentInfo info : infos) {
ret.add(info);
}
return ret;
}
}
| java | Apache-2.0 | e0c90f054687a18c4e095547ac5e31b8b313b3ef | 2026-01-05T02:41:11.405497Z | false |
Intel-bigdata/SSM | https://github.com/Intel-bigdata/SSM/blob/e0c90f054687a18c4e095547ac5e31b8b313b3ef/smart-engine/src/main/java/org/smartdata/server/engine/cmdlet/agent/SmartAgentContext.java | smart-engine/src/main/java/org/smartdata/server/engine/cmdlet/agent/SmartAgentContext.java | /**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.smartdata.server.engine.cmdlet.agent;
import org.smartdata.SmartContext;
import org.smartdata.conf.SmartConf;
import org.smartdata.protocol.message.StatusReporter;
public class SmartAgentContext extends SmartContext {
private final StatusReporter reporter;
public SmartAgentContext(SmartConf conf, StatusReporter reporter) {
super(conf);
this.reporter = reporter;
}
public StatusReporter getStatusReporter() {
return reporter;
}
}
| java | Apache-2.0 | e0c90f054687a18c4e095547ac5e31b8b313b3ef | 2026-01-05T02:41:11.405497Z | false |
Intel-bigdata/SSM | https://github.com/Intel-bigdata/SSM/blob/e0c90f054687a18c4e095547ac5e31b8b313b3ef/smart-engine/src/main/java/org/smartdata/server/engine/cmdlet/agent/AgentConstants.java | smart-engine/src/main/java/org/smartdata/server/engine/cmdlet/agent/AgentConstants.java | /**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.smartdata.server.engine.cmdlet.agent;
public class AgentConstants {
public static final String MASTER_ACTOR_SYSTEM_NAME = "AgentMaster";
public static final String MASTER_ACTOR_NAME = "master";
public static final String AKKA_REMOTE_HOST_KEY = "akka.remote.netty.tcp.hostname";
public static final String AKKA_REMOTE_PORT_KEY = "akka.remote.netty.tcp.port";
public static final String AKKA_CONF_FILE = "agent.conf";
}
| java | Apache-2.0 | e0c90f054687a18c4e095547ac5e31b8b313b3ef | 2026-01-05T02:41:11.405497Z | false |
Intel-bigdata/SSM | https://github.com/Intel-bigdata/SSM/blob/e0c90f054687a18c4e095547ac5e31b8b313b3ef/smart-engine/src/main/java/org/smartdata/server/engine/cmdlet/agent/AgentUtils.java | smart-engine/src/main/java/org/smartdata/server/engine/cmdlet/agent/AgentUtils.java | /**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.smartdata.server.engine.cmdlet.agent;
import akka.actor.ActorPath;
import akka.actor.ActorRef;
import akka.actor.ActorSystem;
import akka.actor.Address;
import akka.actor.Cancellable;
import akka.actor.ExtendedActorSystem;
import akka.actor.Scheduler;
import com.typesafe.config.Config;
import com.typesafe.config.ConfigValueFactory;
import org.smartdata.conf.SmartConf;
import org.smartdata.conf.SmartConfKeys;
import java.io.IOException;
import java.net.InetAddress;
import java.util.Objects;
import scala.concurrent.ExecutionContextExecutor;
import scala.concurrent.duration.FiniteDuration;
public class AgentUtils {
public static Address getSystemAddres(ActorSystem system) {
return ((ExtendedActorSystem) system).provider().getDefaultAddress();
}
public static String getFullPath(ActorSystem system, ActorPath path) {
return path.toStringWithAddress(getSystemAddres(system));
}
public static String getHostPort(ActorRef ref) {
return ref.path().address().hostPort().replaceFirst("^.*@", "");
}
public static Cancellable repeatActionUntil(ActorSystem system,
FiniteDuration initialDelay, FiniteDuration interval, FiniteDuration timeout,
Runnable action, Runnable onTimeout) {
final Scheduler scheduler = system.scheduler();
final ExecutionContextExecutor dispatcher = system.dispatcher();
final Cancellable run =
scheduler.schedule(initialDelay, interval, action,
dispatcher);
final Cancellable cancelRun = scheduler.scheduleOnce(timeout, new Runnable() {
@Override
public void run() {
run.cancel();
}
}, dispatcher);
final Cancellable fail = scheduler.scheduleOnce(timeout, onTimeout, dispatcher);
return new Cancellable() {
@Override
public boolean cancel() {
return run.cancel() && cancelRun.cancel() && fail.cancel();
}
@Override
public boolean isCancelled() {
return run.isCancelled() && cancelRun.isCancelled() && fail.isCancelled();
}
};
}
public static String[] getMasterActorPaths(String[] masters) {
String[] paths = new String[masters.length];
for (int i = 0; i < masters.length; i++) {
paths[i] = getMasterActorPath(masters[i]);
}
return paths;
}
private static String getMasterActorPath(String masterAddress) {
HostPort hostPort = new HostPort(masterAddress);
return String.format("akka.tcp://%s@%s:%s/user/%s",
AgentConstants.MASTER_ACTOR_SYSTEM_NAME,
hostPort.getHost(), hostPort.getPort(),
AgentConstants.MASTER_ACTOR_NAME);
}
public static Config overrideRemoteAddress(Config config, String address) {
AgentUtils.HostPort hostPort = new AgentUtils.HostPort(address);
return config.withValue(AgentConstants.AKKA_REMOTE_HOST_KEY,
ConfigValueFactory.fromAnyRef(hostPort.getHost()))
.withValue(AgentConstants.AKKA_REMOTE_PORT_KEY,
ConfigValueFactory.fromAnyRef(hostPort.getPort()));
}
/**
* Return master address list.
*
* @param conf
* @return address array if valid address found, else null
*/
public static String[] getMasterAddress(SmartConf conf) {
String[] masters = conf.getStrings(SmartConfKeys.SMART_AGENT_MASTER_ADDRESS_KEY);
int masterDefPort = conf.getInt(SmartConfKeys.SMART_AGENT_MASTER_PORT_KEY,
SmartConfKeys.SMART_AGENT_MASTER_PORT_DEFAULT);
if (masters == null || masters.length == 0) {
return null;
}
for (int i = 0; i < masters.length; i++) {
if (!masters[i].contains(":")) {
masters[i] += ":" + masterDefPort;
}
}
return masters;
}
/**
* Return agent address.
*
* @param conf
* @return
* @throws IOException
*/
public static String getAgentAddress(SmartConf conf) throws IOException {
String agentAddress = conf.get(SmartConfKeys.SMART_AGENT_ADDRESS_KEY);
if (agentAddress == null) {
agentAddress = InetAddress.getLocalHost().getHostName();
}
int agentDefPort =
conf.getInt(SmartConfKeys.SMART_AGENT_PORT_KEY, SmartConfKeys.SMART_AGENT_PORT_DEFAULT);
if (!agentAddress.contains(":")) {
agentAddress += ":" + agentDefPort;
}
return agentAddress;
}
public static class HostPort {
private final String host;
private final String port;
public HostPort(String address) {
String[] hostPort = address.split(":");
host = hostPort[0];
port = hostPort[1];
}
public String getHost() {
return host;
}
public String getPort() {
return port;
}
@Override
public boolean equals(Object o) {
if (this == o) {
return true;
}
if (o == null || getClass() != o.getClass()) {
return false;
}
HostPort hostPort = (HostPort) o;
return Objects.equals(host, hostPort.host) && Objects.equals(port, hostPort.port);
}
@Override
public int hashCode() {
return Objects.hash(host, port);
}
@Override
public String toString() {
return "HostPort{ host='" + host + '\'' + ", port='" + port + '\'' + '}';
}
}
}
| java | Apache-2.0 | e0c90f054687a18c4e095547ac5e31b8b313b3ef | 2026-01-05T02:41:11.405497Z | false |
Intel-bigdata/SSM | https://github.com/Intel-bigdata/SSM/blob/e0c90f054687a18c4e095547ac5e31b8b313b3ef/smart-engine/src/main/java/org/smartdata/server/engine/cmdlet/agent/AgentCmdletService.java | smart-engine/src/main/java/org/smartdata/server/engine/cmdlet/agent/AgentCmdletService.java | /**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.smartdata.server.engine.cmdlet.agent;
import org.smartdata.AgentService;
import org.smartdata.SmartConstants;
import org.smartdata.conf.SmartConf;
import org.smartdata.protocol.message.LaunchCmdlet;
import org.smartdata.protocol.message.StopCmdlet;
import org.smartdata.server.engine.cmdlet.CmdletExecutor;
import org.smartdata.server.engine.cmdlet.CmdletFactory;
import java.io.IOException;
public class AgentCmdletService extends AgentService {
private CmdletExecutor executor;
private CmdletFactory factory;
public AgentCmdletService() {
}
@Override
public void init() throws IOException {
SmartAgentContext context = (SmartAgentContext) getContext();
SmartConf conf = context.getConf();
this.executor = new CmdletExecutor(conf);
this.factory = new CmdletFactory(context, context.getStatusReporter());
}
@Override
public void start() throws IOException {
}
@Override
public void stop() throws IOException {
executor.shutdown();
}
@Override
public void execute(Message message) throws Exception {
if (message instanceof LaunchCmdlet) {
executor.execute(factory.createCmdlet((LaunchCmdlet) message));
} else if (message instanceof StopCmdlet) {
executor.stop(((StopCmdlet) message).getCmdletId());
} else {
throw new IllegalArgumentException("unknown message " + message);
}
}
@Override
public String getServiceName() {
return SmartConstants.AGENT_CMDLET_SERVICE_NAME;
}
public CmdletExecutor getCmdletExecutor() {
return executor;
}
}
| java | Apache-2.0 | e0c90f054687a18c4e095547ac5e31b8b313b3ef | 2026-01-05T02:41:11.405497Z | false |
Intel-bigdata/SSM | https://github.com/Intel-bigdata/SSM/blob/e0c90f054687a18c4e095547ac5e31b8b313b3ef/smart-engine/src/main/java/org/smartdata/server/engine/cmdlet/agent/AgentInfo.java | smart-engine/src/main/java/org/smartdata/server/engine/cmdlet/agent/AgentInfo.java | /**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.smartdata.server.engine.cmdlet.agent;
import org.smartdata.model.ExecutorType;
import org.smartdata.server.cluster.NodeInfo;
public class AgentInfo extends NodeInfo {
public AgentInfo(String id, String location) {
super(id, location, ExecutorType.AGENT);
}
}
| java | Apache-2.0 | e0c90f054687a18c4e095547ac5e31b8b313b3ef | 2026-01-05T02:41:11.405497Z | false |
Intel-bigdata/SSM | https://github.com/Intel-bigdata/SSM/blob/e0c90f054687a18c4e095547ac5e31b8b313b3ef/smart-engine/src/main/java/org/smartdata/server/engine/cmdlet/agent/messages/AgentToMaster.java | smart-engine/src/main/java/org/smartdata/server/engine/cmdlet/agent/messages/AgentToMaster.java | /**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.smartdata.server.engine.cmdlet.agent.messages;
import org.smartdata.server.engine.cmdlet.agent.AgentMaster;
import org.smartdata.server.engine.cmdlet.agent.messages.MasterToAgent.AgentId;
import java.io.Serializable;
/**
* Messages sent from SmartAgent to {@link AgentMaster}.
*/
public class AgentToMaster {
public static class RegisterNewAgent implements Serializable {
private static final long serialVersionUID = -2967492906579132942L;
private static RegisterNewAgent instance = new RegisterNewAgent();
private MasterToAgent.AgentId id;
private RegisterNewAgent() {
id = new AgentId("Default");
}
public static RegisterNewAgent getInstance() {
return instance;
}
public static RegisterNewAgent getInstance(String id) {
instance = new RegisterNewAgent();
instance.setId(new AgentId(id));
return instance;
}
public AgentId getId() {
return id;
}
public void setId(AgentId id) {
this.id = id;
}
}
public static class RegisterAgent implements Serializable {
private static final long serialVersionUID = 5566241875786339983L;
private final MasterToAgent.AgentId id;
public RegisterAgent(MasterToAgent.AgentId id) {
this.id = id;
}
public MasterToAgent.AgentId getId() {
return id;
}
@Override
public boolean equals(Object o) {
if (this == o) {
return true;
}
if (o == null || getClass() != o.getClass()) {
return false;
}
RegisterAgent that = (RegisterAgent) o;
return id.equals(that.id);
}
@Override
public int hashCode() {
return id.hashCode();
}
@Override
public String toString() {
return "RegisterAgent{ id=" + id + "}";
}
}
}
| java | Apache-2.0 | e0c90f054687a18c4e095547ac5e31b8b313b3ef | 2026-01-05T02:41:11.405497Z | false |
Intel-bigdata/SSM | https://github.com/Intel-bigdata/SSM/blob/e0c90f054687a18c4e095547ac5e31b8b313b3ef/smart-engine/src/main/java/org/smartdata/server/engine/cmdlet/agent/messages/MasterToAgent.java | smart-engine/src/main/java/org/smartdata/server/engine/cmdlet/agent/messages/MasterToAgent.java | /**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.smartdata.server.engine.cmdlet.agent.messages;
import java.io.Serializable;
public class MasterToAgent {
public static class AgentId implements scala.Serializable {
private static final long serialVersionUID = -4032231012646281770L;
private final String id;
public AgentId(String id) {
this.id = id;
}
public String getId() {
return id;
}
@Override
public boolean equals(Object o) {
if (this == o) {
return true;
}
if (o == null || getClass() != o.getClass()) {
return false;
}
AgentId agentId = (AgentId) o;
return id.equals(agentId.id);
}
@Override
public int hashCode() {
return id.hashCode();
}
@Override
public String toString() {
return "AgentId{id=" + id + "}";
}
}
public static class AgentRegistered implements Serializable {
private static final long serialVersionUID = -7212238600261028430L;
private final AgentId id;
public AgentRegistered(AgentId id) {
this.id = id;
}
public AgentId getAgentId() {
return id;
}
@Override
public boolean equals(Object o) {
if (this == o) {
return true;
}
if (o == null || getClass() != o.getClass()) {
return false;
}
AgentRegistered that = (AgentRegistered) o;
return id.equals(that.id);
}
@Override
public int hashCode() {
return id != null ? id.hashCode() : 0;
}
@Override
public String toString() {
return "AgentRegistered{id=" + id + "}";
}
}
}
| java | Apache-2.0 | e0c90f054687a18c4e095547ac5e31b8b313b3ef | 2026-01-05T02:41:11.405497Z | false |
Intel-bigdata/SSM | https://github.com/Intel-bigdata/SSM/blob/e0c90f054687a18c4e095547ac5e31b8b313b3ef/smart-engine/src/main/java/org/smartdata/server/engine/message/AddNodeMessage.java | smart-engine/src/main/java/org/smartdata/server/engine/message/AddNodeMessage.java | /**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.smartdata.server.engine.message;
import org.smartdata.server.cluster.NodeInfo;
public class AddNodeMessage extends NodeMessage {
public AddNodeMessage(NodeInfo nodeInfo) {
super(nodeInfo);
}
}
| java | Apache-2.0 | e0c90f054687a18c4e095547ac5e31b8b313b3ef | 2026-01-05T02:41:11.405497Z | false |
Intel-bigdata/SSM | https://github.com/Intel-bigdata/SSM/blob/e0c90f054687a18c4e095547ac5e31b8b313b3ef/smart-engine/src/main/java/org/smartdata/server/engine/message/NodeMessage.java | smart-engine/src/main/java/org/smartdata/server/engine/message/NodeMessage.java | /**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.smartdata.server.engine.message;
import org.smartdata.server.cluster.NodeInfo;
public class NodeMessage {
private NodeInfo nodeInfo;
public NodeMessage(NodeInfo nodeInfo) {
this.nodeInfo = nodeInfo;
}
public NodeInfo getNodeInfo() {
return nodeInfo;
}
}
| java | Apache-2.0 | e0c90f054687a18c4e095547ac5e31b8b313b3ef | 2026-01-05T02:41:11.405497Z | false |
Intel-bigdata/SSM | https://github.com/Intel-bigdata/SSM/blob/e0c90f054687a18c4e095547ac5e31b8b313b3ef/smart-engine/src/main/java/org/smartdata/server/engine/message/RemoveNodeMessage.java | smart-engine/src/main/java/org/smartdata/server/engine/message/RemoveNodeMessage.java | /**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.smartdata.server.engine.message;
import org.smartdata.server.cluster.NodeInfo;
public class RemoveNodeMessage extends NodeMessage {
public RemoveNodeMessage(NodeInfo nodeInfo) {
super(nodeInfo);
}
}
| java | Apache-2.0 | e0c90f054687a18c4e095547ac5e31b8b313b3ef | 2026-01-05T02:41:11.405497Z | false |
Intel-bigdata/SSM | https://github.com/Intel-bigdata/SSM/blob/e0c90f054687a18c4e095547ac5e31b8b313b3ef/smart-engine/src/main/java/org/smartdata/server/engine/data/AccessEventFetcher.java | smart-engine/src/main/java/org/smartdata/server/engine/data/AccessEventFetcher.java | /**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.smartdata.server.engine.data;
import org.apache.hadoop.conf.Configuration;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.smartdata.metastore.dao.AccessCountTableManager;
import org.smartdata.metrics.FileAccessEvent;
import org.smartdata.metrics.FileAccessEventCollector;
import java.io.IOException;
import java.util.List;
import java.util.concurrent.Executors;
import java.util.concurrent.ScheduledExecutorService;
import java.util.concurrent.ScheduledFuture;
import java.util.concurrent.TimeUnit;
public class AccessEventFetcher {
static final Logger LOG = LoggerFactory.getLogger(AccessEventFetcher.class);
private static final Long DEFAULT_INTERVAL = 1 * 1000L;
private final ScheduledExecutorService scheduledExecutorService;
private final Long fetchInterval;
private ScheduledFuture scheduledFuture;
private FetchTask fetchTask;
public AccessEventFetcher(
Configuration conf,
AccessCountTableManager manager,
ScheduledExecutorService service,
FileAccessEventCollector collector) {
this(DEFAULT_INTERVAL, conf, manager, service, collector);
}
public AccessEventFetcher(
Long fetchInterval,
Configuration conf,
AccessCountTableManager manager,
FileAccessEventCollector collector) {
this(fetchInterval, conf, manager, Executors.newSingleThreadScheduledExecutor(), collector);
}
public AccessEventFetcher(
Long fetchInterval,
Configuration conf,
AccessCountTableManager manager,
ScheduledExecutorService service,
FileAccessEventCollector collector) {
this.fetchInterval = fetchInterval;
this.fetchTask = new FetchTask(conf, manager, collector);
this.scheduledExecutorService = service;
}
public void start() {
Long current = System.currentTimeMillis();
Long toWait = fetchInterval - (current % fetchInterval);
this.scheduledFuture = scheduledExecutorService.scheduleAtFixedRate(
fetchTask, toWait, fetchInterval, TimeUnit.MILLISECONDS);
}
public void stop() {
if (scheduledFuture != null) {
this.scheduledFuture.cancel(false);
}
}
private static class FetchTask implements Runnable {
private final Configuration conf;
private final AccessCountTableManager manager;
private final FileAccessEventCollector collector;
public FetchTask(
Configuration conf, AccessCountTableManager manager, FileAccessEventCollector collector) {
this.conf = conf;
this.manager = manager;
this.collector = collector;
}
@Override
public void run() {
try {
List<FileAccessEvent> events = this.collector.collect();
if (events.size() > 0) {
this.manager.onAccessEventsArrived(events);
}
} catch (IOException e) {
LOG.error("IngestionTask onAccessEventsArrived error", e);
}
}
}
}
| java | Apache-2.0 | e0c90f054687a18c4e095547ac5e31b8b313b3ef | 2026-01-05T02:41:11.405497Z | false |
Intel-bigdata/SSM | https://github.com/Intel-bigdata/SSM/blob/e0c90f054687a18c4e095547ac5e31b8b313b3ef/smart-engine/src/main/java/org/smartdata/server/engine/data/ExecutionContext.java | smart-engine/src/main/java/org/smartdata/server/engine/data/ExecutionContext.java | /**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.smartdata.server.engine.data;
import java.util.HashMap;
import java.util.Map;
/**
* Abstract of rule execution environment.
*/
public class ExecutionContext {
public static final String RULE_ID = "RuleId";
private Map<String, Object> envVariables = new HashMap<>();
public long getRuleId() {
return getLong(RULE_ID);
}
public void setRuleId(long ruleId) {
envVariables.put(RULE_ID, ruleId);
}
public void setProperties(Map<String, Object> properties) {
if (properties == null) {
envVariables.clear();
} else {
envVariables = properties;
}
}
public void setProperty(String property, Object value) {
envVariables.put(property, value);
}
public String getString(String property) {
Object val = envVariables.get(property);
if (val == null) {
return null;
}
return val.toString();
}
public Long getLong(String property) {
Object val = envVariables.get(property);
if (val == null) {
return null;
}
if (val instanceof Integer) {
return Long.valueOf((Integer) val);
} else if (val instanceof Long) {
return (Long) val;
} else if (val instanceof String) {
try {
return Long.parseLong((String) val);
} catch (NumberFormatException e) {
return null;
}
}
return null;
}
}
| java | Apache-2.0 | e0c90f054687a18c4e095547ac5e31b8b313b3ef | 2026-01-05T02:41:11.405497Z | false |
Intel-bigdata/SSM | https://github.com/Intel-bigdata/SSM/blob/e0c90f054687a18c4e095547ac5e31b8b313b3ef/smart-rule/src/test/java/org/smartdata/rule/TestSmartRuleStringParser.java | smart-rule/src/test/java/org/smartdata/rule/TestSmartRuleStringParser.java | /**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.smartdata.rule;
import org.junit.Test;
import org.smartdata.conf.SmartConf;
import org.smartdata.model.rule.TranslateResult;
import org.smartdata.rule.parser.SmartRuleStringParser;
import org.smartdata.rule.parser.TranslationContext;
import java.util.LinkedList;
import java.util.List;
public class TestSmartRuleStringParser {
@Test
public void testRuleTranslate() throws Exception {
List<String> rules = new LinkedList<>();
rules.add("file : path matches \"/src/*\" | sync -dest \"hdfs://remotecluster:port/dest\"");
rules.add("file : accessCount(10min) > accessCountTop(10min, 10) | sleep -ms 0");
rules.add("file : ac(10min) > acTop(10min, 10) | sleep -ms 0");
rules.add("file : accessCount(10min) > accessCountBottom(10min, 10) | sleep -ms 0");
rules.add("file : ac(10min) > acBot(10min, 10) | sleep -ms 0");
rules.add("file : ac(10min) > accessCountTopOnStoragePolicy(10min, 10, \"ALL_SSD\") "
+ "| sleep -ms 0");
rules.add("file : ac(10min) > acTopSp(10min, 10, \"ALL_SSD\") | sleep -ms 0");
rules.add("file : ac(10min) > accessCountBottomOnStoragePolicy(10min, 10, \"CACHE\") "
+ "| sleep -ms 0");
rules.add("file : ac(10min) > acBotSp(10min, 10, \"CACHE\") | sleep -ms 0");
rules.add("file : ac(10min) > acBotSp(10min, 10, \"HOT\") and acBotSp(10min, 10, \"HOT\") > 0 "
+ "| sleep -ms 0");
rules.add("file : every 5h / 1h/ 20min | length > 19 | sleep -ms 10");
for (String rule : rules) {
parseRule(rule);
}
}
private void parseRule(String rule) throws Exception {
TranslationContext tc = new TranslationContext(1, System.currentTimeMillis());
SmartRuleStringParser parser = new SmartRuleStringParser(rule, tc, new SmartConf());
TranslateResult tr = parser.translate();
int index = 1;
System.out.println("\n" + rule);
for (String sql : tr.getSqlStatements()) {
System.out.println("\t" + index + ". " + sql);
index++;
}
}
}
| java | Apache-2.0 | e0c90f054687a18c4e095547ac5e31b8b313b3ef | 2026-01-05T02:41:11.405497Z | false |
Intel-bigdata/SSM | https://github.com/Intel-bigdata/SSM/blob/e0c90f054687a18c4e095547ac5e31b8b313b3ef/smart-rule/src/test/java/org/smartdata/rule/TestSmartRuleParser.java | smart-rule/src/test/java/org/smartdata/rule/TestSmartRuleParser.java | /**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.smartdata.rule;
import org.antlr.v4.runtime.ANTLRInputStream;
import org.antlr.v4.runtime.BaseErrorListener;
import org.antlr.v4.runtime.CommonTokenStream;
import org.antlr.v4.runtime.Parser;
import org.antlr.v4.runtime.RecognitionException;
import org.antlr.v4.runtime.Recognizer;
import org.antlr.v4.runtime.tree.ParseTree;
import org.junit.Assert;
import org.junit.Test;
import org.smartdata.model.rule.TranslateResult;
import org.smartdata.rule.parser.SmartRuleLexer;
import org.smartdata.rule.parser.SmartRuleParser;
import org.smartdata.rule.parser.SmartRuleVisitTranslator;
import java.io.ByteArrayInputStream;
import java.io.IOException;
import java.io.InputStream;
import java.util.ArrayList;
import java.util.Collections;
import java.util.LinkedList;
import java.util.List;
public class TestSmartRuleParser {
List<RecognitionException> parseErrors = new ArrayList<RecognitionException>();
public class SSMRuleErrorListener extends BaseErrorListener {
@Override
public void syntaxError(Recognizer<?, ?> recognizer,
Object offendingSymbol, int line, int charPositionInLine,
String msg, RecognitionException e) {
List<String> stack = ((Parser) recognizer).getRuleInvocationStack();
Collections.reverse(stack);
System.err.println("rule stack: " + stack);
System.err.println("line " + line + ":" + charPositionInLine + " at "
+ offendingSymbol + ": " + msg);
parseErrors.add(e);
}
}
@Test
public void testValidRule() throws Exception {
List<String> rules = new LinkedList<>();
rules.add("file : accessCount(10m) > 10 and accessCount(10m) < 20 "
+ "| cache");
rules.add("file with path matches \"/a/b*.dat\" : "
+ "every 5s from \"2013-07-09 19:21:34\" to now + (7d + 4s ) | "
+ "inCache or accessCount(10m) > 10 and 10d > 20s | cache");
rules.add("file with length > 1GB : "
+ "blocksize > 1 + 3 and accessCount(30s) > 3 "
+ "and storage.free(\"SSD\") > 100 | cache");
rules.add("file with length > 3 : "
+ "storage.free(\"SSD\") > 100 and not inCache | cache");
rules.add("file : accessCount(10min) > 20 | cache");
rules.add("file: every 5s from now to now + 10d | length > 3 | cache");
rules.add("file: every 5s | length > 100mb | onessd");
rules.add("file: every 50ms | length > 100mb | onessd");
rules.add("file : every 1s | age > 100day | cache");
rules.add("file : every 1s | mtime > \"2016-09-13 12:05:06\" | cache");
rules.add("file : every 1s | mtime > now - 70day | cache");
rules.add("file : every 1s | storagePolicy == \"ALL_SSD\" | cache");
rules.add("file : accessCount(10min) < 20 | uncache");
rules.add("file : accessCount(10min) == 0 | uncache");
rules.add("file : accessCount(10min) <= 1 | uncache");
rules.add("file : accessCount(1min) > 5 | cache -replica 2");
rules.add("file : age <= 1 | echo -msg \"crul world\"");
rules.add("file : age <= 1 | read ; read");
rules.add("file : age <= 1 | read ; sync -dest hdfs://{}[]@&$=?!");
for (String rule : rules) {
parseAndExecuteRule(rule);
}
}
@Test
public void testInvalidRule() throws Exception {
List<String> rules = new LinkedList<>();
rules.add("someobject: length > 3mb | cache");
rules.add("file : length > 3day | cache");
rules.add("file : length() > 3tb | cache");
rules.add("file : accessCount(10m) > 2 and length() > 3 | cache");
rules.add("file : every 1s | mtime > 100s | cache");
for (String rule : rules) {
try {
parseAndExecuteRule(rule);
Assert.fail("Should have exception here!");
} catch (Exception e) {
// ignore
}
}
}
private void parseAndExecuteRule(String rule) throws Exception {
System.out.println("--> " + rule);
InputStream input = new ByteArrayInputStream(rule.getBytes());
ANTLRInputStream antlrInput = new ANTLRInputStream(input);
SmartRuleLexer lexer = new SmartRuleLexer(antlrInput);
CommonTokenStream tokens = new CommonTokenStream(lexer);
SmartRuleParser parser = new SmartRuleParser(tokens);
parser.removeErrorListeners();
parser.addErrorListener(new SSMRuleErrorListener());
ParseTree tree = parser.ssmrule();
System.out.println("Parser tree: " + tree.toStringTree(parser));
System.out.println("Total number of errors: " + parseErrors.size());
SmartRuleVisitTranslator visitor = new SmartRuleVisitTranslator();
visitor.visit(tree);
System.out.println("\nQuery:");
TranslateResult result = visitor.generateSql();
int index = 1;
for (String sql : result.getSqlStatements()) {
System.out.println("" + index + ". " + sql);
index++;
}
if (parseErrors.size() > 0) {
throw new IOException("Error while parse rule");
}
}
}
| java | Apache-2.0 | e0c90f054687a18c4e095547ac5e31b8b313b3ef | 2026-01-05T02:41:11.405497Z | false |
Intel-bigdata/SSM | https://github.com/Intel-bigdata/SSM/blob/e0c90f054687a18c4e095547ac5e31b8b313b3ef/smart-rule/src/test/java/org/smartdata/rule/objects/TestProperty.java | smart-rule/src/test/java/org/smartdata/rule/objects/TestProperty.java | /**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.smartdata.rule.objects;
import org.junit.Assert;
import org.junit.Test;
import org.smartdata.rule.parser.ValueType;
import java.util.Arrays;
/**
* Tests to test Property.
*/
public class TestProperty {
@Test
public void testEqual() {
Property p1 = new Property("test", ValueType.LONG,
Arrays.asList(ValueType.TIMEINTVAL),
"access_count_table", "", false);
Property p2 = new Property("test", ValueType.LONG,
Arrays.asList(ValueType.LONG),
"access_count_table", "", false);
Property p3 = new Property("test", ValueType.LONG,
Arrays.asList(ValueType.TIMEINTVAL),
"access_count_table", "", false);
Property p4 = new Property("test", ValueType.TIMEINTVAL,
Arrays.asList(ValueType.TIMEINTVAL),
"access_count_table", "", false);
Property p5 = new Property("test", ValueType.LONG,
null,
"access_count_table", "", false);
Assert.assertTrue(!p1.equals(p2));
Assert.assertTrue(p1.equals(p3));
Assert.assertTrue(!p1.equals(p4));
Assert.assertTrue(!p1.equals(p5));
}
}
| java | Apache-2.0 | e0c90f054687a18c4e095547ac5e31b8b313b3ef | 2026-01-05T02:41:11.405497Z | false |
Intel-bigdata/SSM | https://github.com/Intel-bigdata/SSM/blob/e0c90f054687a18c4e095547ac5e31b8b313b3ef/smart-rule/src/test/java/org/smartdata/rule/objects/TestPropertyRealParas.java | smart-rule/src/test/java/org/smartdata/rule/objects/TestPropertyRealParas.java | /**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.smartdata.rule.objects;
import org.junit.Assert;
import org.junit.Test;
import org.smartdata.rule.parser.ValueType;
import java.util.Arrays;
import java.util.List;
/**
* Tests to test PropertyRealParas.
*/
public class TestPropertyRealParas {
@Test
public void testEqual() {
Property p = new Property("test", ValueType.LONG,
Arrays.asList(ValueType.LONG),
"test", "", false);
List<Object> v1 = Arrays.asList((Object) new Long(1));
List<Object> v2 = Arrays.asList((Object) new Integer(1));
List<Object> v3 = Arrays.asList((Object) new String("1"));
List<Object> v4 = Arrays.asList((Object) new Long(100));
List<Object> v5 = Arrays.asList((Object) new Long(1));
PropertyRealParas rp1 = new PropertyRealParas(p, v1);
PropertyRealParas rp2 = new PropertyRealParas(p, v2);
PropertyRealParas rp3 = new PropertyRealParas(p, v3);
PropertyRealParas rp4 = new PropertyRealParas(p, v4);
PropertyRealParas rp5 = new PropertyRealParas(p, v5);
PropertyRealParas rp6 = new PropertyRealParas(p, null);
Assert.assertFalse(rp1.equals(rp2));
Assert.assertFalse(rp1.equals(rp3));
Assert.assertFalse(rp1.equals(rp4));
Assert.assertFalse(!rp1.equals(rp5));
Assert.assertFalse(rp1.equals(rp6));
}
}
| java | Apache-2.0 | e0c90f054687a18c4e095547ac5e31b8b313b3ef | 2026-01-05T02:41:11.405497Z | false |
Intel-bigdata/SSM | https://github.com/Intel-bigdata/SSM/blob/e0c90f054687a18c4e095547ac5e31b8b313b3ef/smart-rule/src/main/java/org/smartdata/rule/ScheduleInfo.java | smart-rule/src/main/java/org/smartdata/rule/ScheduleInfo.java | /**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.smartdata.rule;
public interface ScheduleInfo {
long getStartTime();
int getRate();
int getRounds();
}
| java | Apache-2.0 | e0c90f054687a18c4e095547ac5e31b8b313b3ef | 2026-01-05T02:41:11.405497Z | false |
Intel-bigdata/SSM | https://github.com/Intel-bigdata/SSM/blob/e0c90f054687a18c4e095547ac5e31b8b313b3ef/smart-rule/src/main/java/org/smartdata/rule/exceptions/RuleParserException.java | smart-rule/src/main/java/org/smartdata/rule/exceptions/RuleParserException.java | /**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.smartdata.rule.exceptions;
/**
* Represent an error in rule parser.
*/
public class RuleParserException extends RuntimeException {
public RuleParserException(String info) {
super(info);
}
}
| java | Apache-2.0 | e0c90f054687a18c4e095547ac5e31b8b313b3ef | 2026-01-05T02:41:11.405497Z | false |
Intel-bigdata/SSM | https://github.com/Intel-bigdata/SSM/blob/e0c90f054687a18c4e095547ac5e31b8b313b3ef/smart-rule/src/main/java/org/smartdata/rule/exceptions/RuleParameterException.java | smart-rule/src/main/java/org/smartdata/rule/exceptions/RuleParameterException.java | /**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.smartdata.rule.exceptions;
public class RuleParameterException extends RuntimeException{
public RuleParameterException(String e) {
super(e);
}
}
| java | Apache-2.0 | e0c90f054687a18c4e095547ac5e31b8b313b3ef | 2026-01-05T02:41:11.405497Z | false |
Intel-bigdata/SSM | https://github.com/Intel-bigdata/SSM/blob/e0c90f054687a18c4e095547ac5e31b8b313b3ef/smart-rule/src/main/java/org/smartdata/rule/triggers/TriggerBase.java | smart-rule/src/main/java/org/smartdata/rule/triggers/TriggerBase.java | package org.smartdata.rule.triggers;
/**
* Base of rule triggers supported.
*/
public class TriggerBase {
}
| java | Apache-2.0 | e0c90f054687a18c4e095547ac5e31b8b313b3ef | 2026-01-05T02:41:11.405497Z | false |
Intel-bigdata/SSM | https://github.com/Intel-bigdata/SSM/blob/e0c90f054687a18c4e095547ac5e31b8b313b3ef/smart-rule/src/main/java/org/smartdata/rule/objects/DirectoryObject.java | smart-rule/src/main/java/org/smartdata/rule/objects/DirectoryObject.java | /**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.smartdata.rule.objects;
/**
* Definition of rule object 'Directory'.
*/
public class DirectoryObject {
}
| java | Apache-2.0 | e0c90f054687a18c4e095547ac5e31b8b313b3ef | 2026-01-05T02:41:11.405497Z | false |
Intel-bigdata/SSM | https://github.com/Intel-bigdata/SSM/blob/e0c90f054687a18c4e095547ac5e31b8b313b3ef/smart-rule/src/main/java/org/smartdata/rule/objects/Property.java | smart-rule/src/main/java/org/smartdata/rule/objects/Property.java | /**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.smartdata.rule.objects;
import org.smartdata.rule.parser.ValueType;
import java.util.List;
import java.util.Objects;
/**
* Property of SSM object.
*/
public class Property {
private String propertyName;
private ValueType retType;
private List<ValueType> paramsTypes;
private String tableName;
private String tableItemName;
private String formatTemplate;
private boolean isGlobal;
public Property(String propertyName, ValueType retType, List<ValueType> paramsTypes,
String tableName, String tableItemName, boolean isGlobal) {
this.propertyName = propertyName;
this.retType = retType;
this.paramsTypes = paramsTypes;
this.tableName = tableName;
this.tableItemName = tableItemName;
this.isGlobal = isGlobal;
}
// TODO: re-arch to couple paramsTypes and formatTemplate
public Property(String propertyName, ValueType retType,
List<ValueType> paramsTypes, String tableName,
String tableItemName, boolean isGlobal,
String formatTemplate) {
this.propertyName = propertyName;
this.retType = retType;
this.paramsTypes = paramsTypes;
this.tableName = tableName;
this.tableItemName = tableItemName;
this.formatTemplate = formatTemplate;
this.isGlobal = isGlobal;
}
public String getPropertyName() {
return propertyName;
}
public ValueType getValueType() {
return retType;
}
public List<ValueType> getParamsTypes() {
return paramsTypes;
}
public String getTableName() {
return tableName;
}
public String getTableItemName() {
return tableItemName;
}
public boolean isGlobal() {
return isGlobal;
}
public boolean hasParameters() {
return paramsTypes != null;
}
@Override
public boolean equals(Object o) {
if (this == o) {
return true;
}
if (o == null || getClass() != o.getClass()) {
return false;
}
Property property = (Property) o;
return isGlobal == property.isGlobal
&& Objects.equals(propertyName, property.propertyName)
&& retType == property.retType
&& Objects.equals(paramsTypes, property.paramsTypes)
&& Objects.equals(tableName, property.tableName)
&& Objects.equals(tableItemName, property.tableItemName)
&& Objects.equals(formatTemplate, property.formatTemplate);
}
@Override
public int hashCode() {
return Objects.hash(
propertyName, retType, paramsTypes, tableName, tableItemName, formatTemplate, isGlobal);
}
public String instId(List<Object> values) {
if (getParamsTypes() == null) {
return propertyName;
}
String ret = propertyName;
assert(values.size() == getParamsTypes().size());
for (int i = 0; i < values.size(); i++) {
switch (getValueType()) {
case TIMEINTVAL:
case LONG:
ret += "_" + ((Long) values.get(i));
break;
case STRING:
ret += "_" + ((String) values.get(i)).replaceAll("[\t -\"']+", "_");
break;
default:
assert (false); // TODO: throw exception
}
}
return ret;
}
public String formatParameters(List<Object> values) {
if (formatTemplate == null) {
return tableItemName;
}
if (values == null) {
return formatTemplate;
}
String ret = formatTemplate;
// TODO: need more checks to ensure replace correctly
for (int i = 0; i < values.size(); i++) {
if (ret.contains("$" + i)) {
String v;
switch (paramsTypes.get(i)) {
case TIMEINTVAL:
case LONG:
v = "" + ((Long) values.get(i));
break;
case STRING:
v = "'" + ((String) values.get(i)) + "'";
break;
default:
v = null; // TODO: throw exception
}
if (v != null) {
ret = ret.replaceAll("\\$" + i, v);
}
}
}
return ret;
}
}
| java | Apache-2.0 | e0c90f054687a18c4e095547ac5e31b8b313b3ef | 2026-01-05T02:41:11.405497Z | false |
Intel-bigdata/SSM | https://github.com/Intel-bigdata/SSM/blob/e0c90f054687a18c4e095547ac5e31b8b313b3ef/smart-rule/src/main/java/org/smartdata/rule/objects/CacheObject.java | smart-rule/src/main/java/org/smartdata/rule/objects/CacheObject.java | /**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.smartdata.rule.objects;
/**
* Definition of rule object 'Cache'.
*/
public class CacheObject {
}
| java | Apache-2.0 | e0c90f054687a18c4e095547ac5e31b8b313b3ef | 2026-01-05T02:41:11.405497Z | false |
Intel-bigdata/SSM | https://github.com/Intel-bigdata/SSM/blob/e0c90f054687a18c4e095547ac5e31b8b313b3ef/smart-rule/src/main/java/org/smartdata/rule/objects/ObjectType.java | smart-rule/src/main/java/org/smartdata/rule/objects/ObjectType.java | /**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.smartdata.rule.objects;
/**
* Type of this object.
*/
public enum ObjectType {
FILE("file"), CACHE("cache"), STORAGE("storage"), DIRECTORY("directory");
private String name;
ObjectType(String name) {
this.name = name;
}
public String getName() {
return name;
}
public static ObjectType fromName(String name) {
for (ObjectType v : values()) {
if (v.getName().equals(name)) {
return v;
}
}
return null;
}
}
| java | Apache-2.0 | e0c90f054687a18c4e095547ac5e31b8b313b3ef | 2026-01-05T02:41:11.405497Z | false |
Intel-bigdata/SSM | https://github.com/Intel-bigdata/SSM/blob/e0c90f054687a18c4e095547ac5e31b8b313b3ef/smart-rule/src/main/java/org/smartdata/rule/objects/SmartObject.java | smart-rule/src/main/java/org/smartdata/rule/objects/SmartObject.java | /**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.smartdata.rule.objects;
import java.util.List;
import java.util.Map;
/**
* Acts as base of SSM objects.
*/
public abstract class SmartObject {
private ObjectType type;
public SmartObject(ObjectType type) {
this.type = type;
}
public ObjectType getType() {
return type;
}
private List<Property> requiredProperties;
public static SmartObject getInstance(String typeName) {
// TODO: create through class name
switch (typeName) {
case "file":
return new FileObject();
case "storage":
return new StorageObject();
default:
return null;
}
}
/**
* The following PROPERTIES of this Object are required.
* @param properties
*/
public void setRequiredProperties(List<Property> properties) {
requiredProperties = properties;
}
public List<Property> getPropertyRequired() {
return requiredProperties;
}
public boolean containsProperty(String propertyName) {
return getProperties().get(propertyName) != null;
}
public Property getProperty(String propertyName) {
return getProperties().get(propertyName);
}
public abstract Map<String, Property> getProperties();
}
| java | Apache-2.0 | e0c90f054687a18c4e095547ac5e31b8b313b3ef | 2026-01-05T02:41:11.405497Z | false |
Intel-bigdata/SSM | https://github.com/Intel-bigdata/SSM/blob/e0c90f054687a18c4e095547ac5e31b8b313b3ef/smart-rule/src/main/java/org/smartdata/rule/objects/StorageObject.java | smart-rule/src/main/java/org/smartdata/rule/objects/StorageObject.java | /**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.smartdata.rule.objects;
import org.smartdata.rule.parser.ValueType;
import java.util.Arrays;
import java.util.HashMap;
import java.util.Map;
/**
* Definition of rule object 'Storage'.
*/
public class StorageObject extends SmartObject {
public static final Map<String, Property> PROPERTIES;
static {
PROPERTIES = new HashMap<>();
PROPERTIES.put("capacity", new Property("capacity", ValueType.LONG,
Arrays.asList(ValueType.STRING), "storage", "capacity", true,
"type = $0 AND capacity"));
PROPERTIES.put("free", new Property("free", ValueType.LONG,
Arrays.asList(ValueType.STRING), "storage", "free", true,
"type = $0 AND free"));
PROPERTIES.put("utilization", new Property("utilization", ValueType.LONG,
Arrays.asList(ValueType.STRING), "storage", "free", true,
"type = $0 AND (capacity - free) * 100.0 / capacity"));
}
public StorageObject() {
super(ObjectType.STORAGE);
}
public Map<String, Property> getProperties() {
return PROPERTIES;
}
}
| java | Apache-2.0 | e0c90f054687a18c4e095547ac5e31b8b313b3ef | 2026-01-05T02:41:11.405497Z | false |
Intel-bigdata/SSM | https://github.com/Intel-bigdata/SSM/blob/e0c90f054687a18c4e095547ac5e31b8b313b3ef/smart-rule/src/main/java/org/smartdata/rule/objects/PropertyRealParas.java | smart-rule/src/main/java/org/smartdata/rule/objects/PropertyRealParas.java | /**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.smartdata.rule.objects;
import java.util.List;
import java.util.Objects;
/**
* Log parameters for a property.
*/
public class PropertyRealParas {
private Property property;
private List<Object> values;
public PropertyRealParas(Property p, List<Object> values) {
this.property = p;
this.values = values;
}
public Property getProperty() {
return property;
}
public List<Object> getValues() {
return values;
}
@Override
public boolean equals(Object o) {
if (this == o) {
return true;
}
if (o == null || getClass() != o.getClass()) {
return false;
}
PropertyRealParas that = (PropertyRealParas) o;
return Objects.equals(property, that.property) && Objects.equals(values, that.values);
}
@Override
public int hashCode() {
return Objects.hash(property, values);
}
public String formatParameters() {
return property.formatParameters(values);
}
public String instId() {
return property.instId(values);
}
public String instId(int s, int e) {
return property.instId(values.subList(s, e));
}
}
| java | Apache-2.0 | e0c90f054687a18c4e095547ac5e31b8b313b3ef | 2026-01-05T02:41:11.405497Z | false |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.