repo stringclasses 1k
values | file_url stringlengths 96 373 | file_path stringlengths 11 294 | content stringlengths 0 32.8k | language stringclasses 1
value | license stringclasses 6
values | commit_sha stringclasses 1k
values | retrieved_at stringdate 2026-01-04 14:45:56 2026-01-04 18:30:23 | truncated bool 2
classes |
|---|---|---|---|---|---|---|---|---|
Intel-bigdata/SSM | https://github.com/Intel-bigdata/SSM/blob/e0c90f054687a18c4e095547ac5e31b8b313b3ef/smart-metastore/src/test/java/org/smartdata/metastore/dao/TestXattrDao.java | smart-metastore/src/test/java/org/smartdata/metastore/dao/TestXattrDao.java | /**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.smartdata.metastore.dao;
import org.junit.After;
import org.junit.Before;
import org.junit.Test;
import org.smartdata.metastore.TestDaoUtil;
public class TestXattrDao extends TestDaoUtil {
private XattrDao xattrDao;
@Before
public void initOtherDao() throws Exception {
initDao();
xattrDao = new XattrDao(druidPool.getDataSource());
}
@After
public void closeOtherDao() throws Exception {
closeDao();
xattrDao = null;
}
@Test
public void testgetXattrList() {
}
}
| java | Apache-2.0 | e0c90f054687a18c4e095547ac5e31b8b313b3ef | 2026-01-05T02:41:11.405497Z | false |
Intel-bigdata/SSM | https://github.com/Intel-bigdata/SSM/blob/e0c90f054687a18c4e095547ac5e31b8b313b3ef/smart-metastore/src/test/java/org/smartdata/metastore/dao/TestErasureCodingPolicyDao.java | smart-metastore/src/test/java/org/smartdata/metastore/dao/TestErasureCodingPolicyDao.java | /**
* Created by qwc on 18-9-29.
* <p>
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.smartdata.metastore.dao;
import org.junit.After;
import org.junit.Assert;
import org.junit.Before;
import org.junit.Test;
import org.smartdata.metastore.TestDaoUtil;
import org.smartdata.model.ErasureCodingPolicyInfo;
import java.util.ArrayList;
import java.util.List;
public class TestErasureCodingPolicyDao extends TestDaoUtil {
private ErasureCodingPolicyDao ecPolicyDao;
@Before
public void initErasureCodingPolicyDao() throws Exception {
initDao();
ecPolicyDao = new ErasureCodingPolicyDao(druidPool.getDataSource());
}
@Test
public void testInsert() throws Exception {
ErasureCodingPolicyInfo ecPolicyInfo = new ErasureCodingPolicyInfo((byte) 2, "PolicyInfo1");
ecPolicyDao.insert(ecPolicyInfo);
Assert.assertTrue(ecPolicyDao.getEcPolicyByName("PolicyInfo1").equals(ecPolicyInfo));
Assert.assertTrue(ecPolicyDao.getEcPolicyById((byte) 2).equals(ecPolicyInfo));
}
@Test
public void testInsertAll() throws Exception {
ecPolicyDao.deleteAll();
List<ErasureCodingPolicyInfo> list = new ArrayList<>();
list.add(new ErasureCodingPolicyInfo((byte) 1, "PolicyInfo1"));
list.add(new ErasureCodingPolicyInfo((byte) 3, "PolicyInfo3"));
ecPolicyDao.insert(list);
List<ErasureCodingPolicyInfo> getList = ecPolicyDao.getAllEcPolicies();
Assert.assertTrue(getList.get(0).equals(list.get(0)) && getList.get(1).equals(list.get(1)));
}
@After
public void closeErasureCodingPolicyDao() throws Exception {
closeDao();
ecPolicyDao = null;
}
}
| java | Apache-2.0 | e0c90f054687a18c4e095547ac5e31b8b313b3ef | 2026-01-05T02:41:11.405497Z | false |
Intel-bigdata/SSM | https://github.com/Intel-bigdata/SSM/blob/e0c90f054687a18c4e095547ac5e31b8b313b3ef/smart-metastore/src/test/java/org/smartdata/metastore/dao/TestFileStateDao.java | smart-metastore/src/test/java/org/smartdata/metastore/dao/TestFileStateDao.java | /**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.smartdata.metastore.dao;
import org.junit.Assert;
import org.junit.Before;
import org.junit.Test;
import org.smartdata.metastore.TestDaoUtil;
import org.smartdata.model.FileState;
import org.springframework.dao.EmptyResultDataAccessException;
import java.util.List;
public class TestFileStateDao extends TestDaoUtil {
private FileStateDao fileStateDao;
@Before
public void initFileDao() throws Exception {
initDao();
fileStateDao = new FileStateDao(druidPool.getDataSource());
}
@Test
public void testInsertUpdate() throws Exception {
FileState fileState1 = new FileState("/file1", FileState.FileType.COMPACT,
FileState.FileStage.PROCESSING);
FileState fileState2 = new FileState("/file2", FileState.FileType.COMPRESSION,
FileState.FileStage.DONE);
fileStateDao.insertUpdate(fileState1);
fileStateDao.insertUpdate(fileState2);
List<FileState> fileStates = fileStateDao.getAll();
Assert.assertEquals(2, fileStates.size());
Assert.assertEquals(fileState1, fileStateDao.getByPath("/file1"));
Assert.assertEquals(fileState2, fileStateDao.getByPath("/file2"));
fileState1 = new FileState("/file1", FileState.FileType.COMPACT,
FileState.FileStage.DONE);
fileStateDao.insertUpdate(fileState1);
fileStates = fileStateDao.getAll();
Assert.assertEquals(2, fileStates.size());
Assert.assertEquals(fileState1, fileStateDao.getByPath("/file1"));
Assert.assertEquals(fileState2, fileStateDao.getByPath("/file2"));
}
@Test
public void testDelete() throws Exception {
FileState fileState1 = new FileState("/file1", FileState.FileType.COMPACT,
FileState.FileStage.PROCESSING);
FileState fileState2 = new FileState("/file2", FileState.FileType.COMPRESSION,
FileState.FileStage.DONE);
FileState fileState3 = new FileState("/file3", FileState.FileType.S3,
FileState.FileStage.DONE);
fileStateDao.insertUpdate(fileState1);
fileStateDao.insertUpdate(fileState2);
fileStateDao.insertUpdate(fileState3);
fileStateDao.deleteByPath(fileState1.getPath(), false);
List<FileState> fileStates = fileStateDao.getAll();
Assert.assertEquals(2, fileStates.size());
try {
fileStateDao.getByPath(fileState1.getPath());
Assert.fail();
} catch (EmptyResultDataAccessException e) {
// It is correct if no entry found
}
fileStateDao.deleteAll();
fileStates = fileStateDao.getAll();
Assert.assertEquals(0, fileStates.size());
}
}
| java | Apache-2.0 | e0c90f054687a18c4e095547ac5e31b8b313b3ef | 2026-01-05T02:41:11.405497Z | false |
Intel-bigdata/SSM | https://github.com/Intel-bigdata/SSM/blob/e0c90f054687a18c4e095547ac5e31b8b313b3ef/smart-metastore/src/test/java/org/smartdata/metastore/dao/TestClusterConfigDao.java | smart-metastore/src/test/java/org/smartdata/metastore/dao/TestClusterConfigDao.java | /**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.smartdata.metastore.dao;
import org.junit.After;
import org.junit.Assert;
import org.junit.Before;
import org.junit.Test;
import org.smartdata.metastore.TestDaoUtil;
import org.smartdata.model.ClusterConfig;
public class TestClusterConfigDao extends TestDaoUtil {
private ClusterConfigDao clusterConfigDao;
@Before
public void initClusterConfigDao() throws Exception {
initDao();
clusterConfigDao = new ClusterConfigDao(druidPool.getDataSource());
}
@After
public void closeClusterConfigDAO() throws Exception {
closeDao();
clusterConfigDao = null;
}
@Test
public void testInsertAndGet() {
ClusterConfig clusterConfig = new ClusterConfig(1, "test", "test");
clusterConfigDao.insert(clusterConfig);
Assert.assertTrue(clusterConfigDao.getById(1).equals(clusterConfig));
Assert.assertTrue(clusterConfigDao.getByName("test").equals(clusterConfig));
}
@Test
public void testUpdate() {
ClusterConfig clusterConfig = new ClusterConfig(1, "test", "test1");
clusterConfigDao.insert(clusterConfig);
clusterConfigDao.updateById(1, "test2");
clusterConfig.setConfig_path("test2");
Assert.assertTrue(clusterConfigDao.getById(1).equals(clusterConfig));
}
@Test
public void testgetCountByName() {
Assert.assertTrue(clusterConfigDao.getCountByName("test") == 0);
ClusterConfig clusterConfig = new ClusterConfig(1, "test", "test1");
clusterConfigDao.insert(clusterConfig);
Assert.assertTrue(clusterConfigDao.getCountByName("test") == 1);
}
@Test
public void testBatchInsert() {
ClusterConfig[] clusterConfigs = new ClusterConfig[2];
clusterConfigs[0] = new ClusterConfig(0, "test1", "test1");
clusterConfigs[1] = new ClusterConfig(0, "test2", "test2");
ClusterConfig clusterConfig = new ClusterConfig(0, "test", "test");
clusterConfigDao.insert(clusterConfigs);
clusterConfigs[0].setCid(1);
clusterConfigs[1].setCid(2);
Assert.assertTrue(clusterConfigs[0].equals(clusterConfigDao.getByName("test1")));
Assert.assertTrue(clusterConfigs[1].equals(clusterConfigDao.getByName("test2")));
}
}
| java | Apache-2.0 | e0c90f054687a18c4e095547ac5e31b8b313b3ef | 2026-01-05T02:41:11.405497Z | false |
Intel-bigdata/SSM | https://github.com/Intel-bigdata/SSM/blob/e0c90f054687a18c4e095547ac5e31b8b313b3ef/smart-metastore/src/test/java/org/smartdata/metastore/dao/TestStorageDao.java | smart-metastore/src/test/java/org/smartdata/metastore/dao/TestStorageDao.java | /**
* Created by cy on 17-6-19.
*/
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.smartdata.metastore.dao;
import org.junit.After;
import org.junit.Assert;
import org.junit.Before;
import org.junit.Test;
import org.smartdata.metastore.TestDaoUtil;
import org.smartdata.model.StorageCapacity;
import org.smartdata.model.StoragePolicy;
import java.util.Map;
public class TestStorageDao extends TestDaoUtil {
private StorageDao storageDao;
@Before
public void initStorageDao() throws Exception {
initDao();
storageDao = new StorageDao(druidPool.getDataSource());
}
@After
public void closeStorageDao() throws Exception {
closeDao();
storageDao = null;
}
@Test
public void testInsertGetStorageTable() throws Exception {
StorageCapacity[] storageCapacities = new StorageCapacity[2];
storageCapacities[0] = new StorageCapacity("type1", 1L, 1L);
storageCapacities[1] = new StorageCapacity("type2", 2L, 2L);
storageDao.insertUpdateStoragesTable(storageCapacities);
Assert.assertTrue(storageDao.getStorageCapacity("type1").equals(storageCapacities[0]));
Map<String, StorageCapacity> map = storageDao.getStorageTablesItem();
Assert.assertTrue(map.get("type2").equals(storageCapacities[1]));
}
@Test
public void testInsertGetStorage_policyTable() throws Exception {
StoragePolicy storagePolicy = new StoragePolicy((byte) 1, "pName");
storageDao.insertStoragePolicyTable(storagePolicy);
Assert.assertTrue(storageDao.getStoragePolicyName(1).equals("pName"));
}
}
| java | Apache-2.0 | e0c90f054687a18c4e095547ac5e31b8b313b3ef | 2026-01-05T02:41:11.405497Z | false |
Intel-bigdata/SSM | https://github.com/Intel-bigdata/SSM/blob/e0c90f054687a18c4e095547ac5e31b8b313b3ef/smart-metastore/src/test/java/org/smartdata/metastore/dao/TestTableEvictor.java | smart-metastore/src/test/java/org/smartdata/metastore/dao/TestTableEvictor.java | /**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.smartdata.metastore.dao;
import org.junit.Assert;
import org.junit.Test;
import org.smartdata.metastore.MetaStore;
import static org.mockito.Mockito.mock;
public class TestTableEvictor {
MetaStore adapter = mock(MetaStore.class);
@Test
public void testCountEvictor() {
TableEvictor evictor = new CountEvictor(adapter, 3);
AccessCountTableDeque deque = new AccessCountTableDeque(evictor);
AccessCountTable first = new AccessCountTable(0L, 1L);
deque.addAndNotifyListener(first);
Assert.assertTrue(deque.size() == 1);
AccessCountTable second = new AccessCountTable(1L, 2L);
deque.addAndNotifyListener(second);
Assert.assertTrue(deque.size() == 2);
deque.addAndNotifyListener(new AccessCountTable(2L, 3L));
Assert.assertTrue(deque.size() == 3);
deque.addAndNotifyListener(new AccessCountTable(3L, 4L));
Assert.assertTrue(deque.size() == 3);
Assert.assertTrue(!deque.contains(first));
deque.addAndNotifyListener(new AccessCountTable(4L, 5L));
Assert.assertTrue(deque.size() == 3);
Assert.assertTrue(!deque.contains(second));
}
@Test
public void testDurationEvictor() {
TableEvictor evictor = new DurationEvictor(adapter, 10);
AccessCountTableDeque deque = new AccessCountTableDeque(evictor);
AccessCountTable first = new AccessCountTable(0L, 3L);
deque.addAndNotifyListener(first);
Assert.assertTrue(deque.size() == 1);
AccessCountTable second = new AccessCountTable(3L, 7L);
deque.addAndNotifyListener(second);
Assert.assertTrue(deque.size() == 2);
deque.addAndNotifyListener(new AccessCountTable(7L, 10L));
Assert.assertTrue(deque.size() == 3);
deque.addAndNotifyListener(new AccessCountTable(11L, 12L));
Assert.assertTrue(deque.size() == 3);
Assert.assertTrue(!deque.contains(first));
deque.addAndNotifyListener(new AccessCountTable(12L, 13L));
Assert.assertTrue(deque.size() == 4);
Assert.assertTrue(deque.contains(second));
deque.addAndNotifyListener(new AccessCountTable(13L, 22L));
Assert.assertTrue(deque.size() == 2);
Assert.assertTrue(!deque.contains(second));
}
}
| java | Apache-2.0 | e0c90f054687a18c4e095547ac5e31b8b313b3ef | 2026-01-05T02:41:11.405497Z | false |
Intel-bigdata/SSM | https://github.com/Intel-bigdata/SSM/blob/e0c90f054687a18c4e095547ac5e31b8b313b3ef/smart-metastore/src/test/java/org/smartdata/metastore/dao/TestSystemInfoDao.java | smart-metastore/src/test/java/org/smartdata/metastore/dao/TestSystemInfoDao.java | /**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.smartdata.metastore.dao;
import org.junit.After;
import org.junit.Assert;
import org.junit.Before;
import org.junit.Test;
import org.smartdata.metastore.TestDaoUtil;
import org.smartdata.model.SystemInfo;
import java.util.List;
public class TestSystemInfoDao extends TestDaoUtil {
private SystemInfoDao systemInfoDao;
@Before
public void initSystemInfoDao() throws Exception {
initDao();
systemInfoDao = new SystemInfoDao(druidPool.getDataSource());
}
@After
public void closeSystemInfoDao() throws Exception {
closeDao();
systemInfoDao = null;
}
@Test
public void testInsertAndGet() {
SystemInfo systemInfo = new SystemInfo("test", "test");
systemInfoDao.insert(systemInfo);
Assert.assertTrue(systemInfoDao.getByProperty("test").equals(systemInfo));
}
@Test
public void testBatchInsertAndQuery() {
SystemInfo[] systemInfos = new SystemInfo[2];
systemInfos[0] = new SystemInfo("test", "test");
systemInfos[1] = new SystemInfo("test1", "test1");
systemInfoDao.insert(systemInfos);
List<SystemInfo> systemInfoList = systemInfoDao.getAll();
for (int i = 0; i < 2; i++) {
Assert.assertTrue(systemInfoList.get(i).equals(systemInfos[i]));
}
}
@Test
public void testUpdate() {
SystemInfo systemInfo = new SystemInfo("test", "test");
systemInfoDao.insert(systemInfo);
SystemInfo newSystemInfo = new SystemInfo("test", "test1");
systemInfoDao.update(newSystemInfo);
Assert.assertTrue(systemInfoDao.getByProperty("test").equals(newSystemInfo));
}
}
| java | Apache-2.0 | e0c90f054687a18c4e095547ac5e31b8b313b3ef | 2026-01-05T02:41:11.405497Z | false |
Intel-bigdata/SSM | https://github.com/Intel-bigdata/SSM/blob/e0c90f054687a18c4e095547ac5e31b8b313b3ef/smart-metastore/src/test/java/org/smartdata/metastore/dao/TestDataNodeInfoDao.java | smart-metastore/src/test/java/org/smartdata/metastore/dao/TestDataNodeInfoDao.java | /**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.smartdata.metastore.dao;
import org.junit.After;
import org.junit.Assert;
import org.junit.Before;
import org.junit.Test;
import org.smartdata.metastore.TestDaoUtil;
import org.smartdata.model.DataNodeInfo;
import java.util.List;
public class TestDataNodeInfoDao extends TestDaoUtil {
private DataNodeInfoDao dataNodeInfoDao;
@Before
public void initDataNodeInfoDao() throws Exception {
initDao();
dataNodeInfoDao = new DataNodeInfoDao(druidPool.getDataSource());
}
@After
public void closeDataNodeInfoDao() throws Exception {
closeDao();
dataNodeInfoDao = null;
}
@Test
public void testInsertGetDataInfo() throws Exception {
DataNodeInfo insertInfo1 = new DataNodeInfo(
"UUID1", "hostname", "www.ssm.com", 10000, 50, "lab");
dataNodeInfoDao.insert(insertInfo1);
List<DataNodeInfo> getInfo1 = dataNodeInfoDao.getByUuid("UUID1");
Assert.assertEquals(1, getInfo1.size());
Assert.assertTrue(insertInfo1.equals(getInfo1.get(0)));
DataNodeInfo insertInfo2 = new DataNodeInfo(
"UUID2", "HOSTNAME", "www.ssm.com", 0, 0, null);
dataNodeInfoDao.insert(insertInfo2);
List<DataNodeInfo> getInfo2 = dataNodeInfoDao.getByUuid("UUID2");
Assert.assertEquals(1, getInfo2.size());
Assert.assertTrue(insertInfo2.equals(getInfo2.get(0)));
List<DataNodeInfo> infos = dataNodeInfoDao.getAll();
Assert.assertTrue(infos.size() == 2);
dataNodeInfoDao.delete(insertInfo1.getUuid());
infos = dataNodeInfoDao.getAll();
Assert.assertTrue(infos.size() == 1);
dataNodeInfoDao.deleteAll();
infos = dataNodeInfoDao.getAll();
Assert.assertTrue(infos.size() == 0);
}
}
| java | Apache-2.0 | e0c90f054687a18c4e095547ac5e31b8b313b3ef | 2026-01-05T02:41:11.405497Z | false |
Intel-bigdata/SSM | https://github.com/Intel-bigdata/SSM/blob/e0c90f054687a18c4e095547ac5e31b8b313b3ef/smart-metastore/src/test/java/org/smartdata/metastore/dao/TestActionDao.java | smart-metastore/src/test/java/org/smartdata/metastore/dao/TestActionDao.java | /**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.smartdata.metastore.dao;
import org.junit.After;
import org.junit.Assert;
import org.junit.Before;
import org.junit.Rule;
import org.junit.Test;
import org.junit.rules.ExpectedException;
import org.smartdata.metastore.TestDaoUtil;
import org.smartdata.model.ActionInfo;
import org.springframework.dao.EmptyResultDataAccessException;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
public class TestActionDao extends TestDaoUtil {
@Rule
public ExpectedException expectedException = ExpectedException.none();
private ActionDao actionDao;
@Before
public void initActionDao() throws Exception {
initDao();
actionDao = new ActionDao(druidPool.getDataSource());
}
@After
public void closeActionDao() throws Exception {
actionDao = null;
closeDao();
}
@Test
public void testGetAPageOfAction() {
Map<String, String> args = new HashMap<>();
// ActionInfo's result and log queried from Metastore will be discarded
// and they are set by "".
ActionInfo actionInfo = new ActionInfo(1, 1,
"cache", args, "",
"", false, 123213213L, true, 123123L,
100);
ActionInfo actionInfo1 = new ActionInfo(2, 1,
"cache", args, "",
"", false, 123213213L, true, 123123L,
100);
ActionInfo actionInfo2 = new ActionInfo(3, 1,
"cache", args, "",
"", false, 123213213L, true, 123123L,
100);
ActionInfo actionInfo3 = new ActionInfo(4, 1,
"cache", args, "",
"", false, 123213213L, true, 123123L,
100);
actionDao.insert(new ActionInfo[]{actionInfo, actionInfo1, actionInfo2, actionInfo3});
List<String> order = new ArrayList<>();
order.add("aid");
List<Boolean> desc = new ArrayList<>();
desc.add(false);
Assert.assertTrue(actionDao.getAPageOfAction(2, 1, order, desc).get(0).equals(actionInfo2));
}
@Test
public void testInsertGetAction() throws Exception {
Map<String, String> args = new HashMap<>();
ActionInfo actionInfo = new ActionInfo(1, 1,
"cache", args, "Test",
"Test", false, 123213213L, true, 123123L,
100);
actionDao.insert(new ActionInfo[]{actionInfo});
ActionInfo dbActionInfo = actionDao.getById(1L);
Assert.assertTrue(actionInfo.equals(dbActionInfo));
// Get wrong id
expectedException.expect(EmptyResultDataAccessException.class);
actionDao.getById(100L);
}
@Test
public void testUpdateAction() throws Exception {
Map<String, String> args = new HashMap<>();
ActionInfo actionInfo = new ActionInfo(1, 1,
"cache", args, "Test",
"Test", false, 123213213L, true, 123123L,
100);
actionDao.insert(actionInfo);
actionInfo.setSuccessful(true);
actionDao.update(actionInfo);
ActionInfo dbActionInfo = actionDao.getById(actionInfo.getActionId());
Assert.assertTrue(actionInfo.equals(dbActionInfo));
}
@Test
public void testGetNewDeleteAction() throws Exception {
Map<String, String> args = new HashMap<>();
ActionInfo actionInfo = new ActionInfo(1, 1,
"cache", args, "Test",
"Test", false, 123213213L, true, 123123L,
100);
List<ActionInfo> actionInfoList = actionDao.getLatestActions(0);
// Get from empty table
Assert.assertTrue(actionInfoList.size() == 0);
actionDao.insert(actionInfo);
actionInfo.setActionId(2);
actionDao.insert(actionInfo);
actionInfoList = actionDao.getLatestActions(0);
Assert.assertTrue(actionInfoList.size() == 2);
actionInfoList = actionDao.getByIds(Arrays.asList(1L, 2L));
Assert.assertTrue(actionInfoList.size() == 2);
actionDao.delete(actionInfo.getActionId());
actionInfoList = actionDao.getAll();
Assert.assertTrue(actionInfoList.size() == 1);
}
@Test
public void testGetLatestActionListByFinishAndSuccess() {
Map<String, String> args = new HashMap<>();
ActionInfo actionInfo = new ActionInfo(1, 1,
"cache", args, "Test",
"Test", false, 123213213L, true, 123123L,
100);
List<ActionInfo> actionInfoList =
actionDao.getLatestActions("cache", 0, false, true);
//Get from empty table
Assert.assertTrue(actionInfoList.size() == 0);
actionDao.insert(actionInfo);
actionInfo.setActionId(2);
actionDao.insert(actionInfo);
actionInfoList = actionDao.getLatestActions("cache", 0, false, true);
Assert.assertTrue(actionInfoList.size() == 2);
actionInfoList = actionDao.getByIds(Arrays.asList(1L, 2L));
Assert.assertTrue(actionInfoList.size() == 2);
actionInfoList = actionDao.getLatestActions("cache", 1, false, true);
Assert.assertTrue(actionInfoList.size() == 1);
}
@Test
public void testGetLatestActionListByFinish() {
Map<String, String> args = new HashMap<>();
ActionInfo actionInfo = new ActionInfo(1, 1,
"cache", args, "Test",
"Test", false, 123213213L, true, 123123L,
100);
List<ActionInfo> actionInfoList =
actionDao.getLatestActions("cache", 0);
//Get from empty table
Assert.assertTrue(actionInfoList.size() == 0);
actionDao.insert(actionInfo);
actionInfo.setActionId(2);
actionDao.insert(actionInfo);
actionInfoList = actionDao.getLatestActions("cache", 0, true);
Assert.assertTrue(actionInfoList.size() == 2);
actionInfoList = actionDao.getByIds(Arrays.asList(1L, 2L));
Assert.assertTrue(actionInfoList.size() == 2);
actionInfoList = actionDao.getLatestActions("cache", 1, true);
Assert.assertTrue(actionInfoList.size() == 1);
}
@Test
public void testGetLatestActionListBySuccess() {
Map<String, String> args = new HashMap<>();
ActionInfo actionInfo = new ActionInfo(1, 1,
"cache", args, "Test",
"Test", false, 123213213L, true, 123123L,
100);
List<ActionInfo> actionInfoList =
actionDao.getLatestActions("cache", false, 0);
//Get from empty table
Assert.assertTrue(actionInfoList.size() == 0);
actionDao.insert(actionInfo);
actionInfo.setActionId(2);
actionDao.insert(actionInfo);
actionInfoList = actionDao.getLatestActions("cache", false, 0);
Assert.assertTrue(actionInfoList.size() == 2);
actionInfoList = actionDao.getByIds(Arrays.asList(1L, 2L));
Assert.assertTrue(actionInfoList.size() == 2);
actionInfoList = actionDao.getLatestActions("cache", false, 1);
Assert.assertTrue(actionInfoList.size() == 1);
}
@Test
public void testMaxId() throws Exception {
Map<String, String> args = new HashMap<>();
ActionInfo actionInfo = new ActionInfo(1, 1,
"cache", args, "Test",
"Test", false, 123213213L, true, 123123L,
100);
Assert.assertTrue(actionDao.getMaxId() == 0);
actionDao.insert(actionInfo);
Assert.assertTrue(actionDao.getMaxId() == 2);
}
}
| java | Apache-2.0 | e0c90f054687a18c4e095547ac5e31b8b313b3ef | 2026-01-05T02:41:11.405497Z | false |
Intel-bigdata/SSM | https://github.com/Intel-bigdata/SSM/blob/e0c90f054687a18c4e095547ac5e31b8b313b3ef/smart-metastore/src/test/java/org/smartdata/metastore/dao/TestRuleDao.java | smart-metastore/src/test/java/org/smartdata/metastore/dao/TestRuleDao.java | /**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.smartdata.metastore.dao;
import org.junit.After;
import org.junit.Assert;
import org.junit.Before;
import org.junit.Test;
import org.smartdata.metastore.TestDaoUtil;
import org.smartdata.model.RuleInfo;
import org.smartdata.model.RuleState;
import java.util.List;
public class TestRuleDao extends TestDaoUtil {
private RuleDao ruleDao;
@Before
public void initRuleDao() throws Exception {
initDao();
ruleDao = new RuleDao(druidPool.getDataSource());
}
@After
public void closeRuleDao() throws Exception {
closeDao();
ruleDao = null;
}
@Test
public void testInsertGetRule() throws Exception {
String rule = "file : accessCount(10m) > 20 \n\n"
+ "and length() > 3 | cache";
long submitTime = System.currentTimeMillis();
RuleInfo info1 = new RuleInfo(0, submitTime,
rule, RuleState.ACTIVE, 0, 0, 0);
ruleDao.insert(info1);
RuleInfo info11 = ruleDao.getById(info1.getId());
Assert.assertTrue(info1.equals(info11));
RuleInfo info2 = new RuleInfo(1, submitTime,
rule, RuleState.ACTIVE, 0, 0, 0);
ruleDao.insert(info2);
RuleInfo info21 = ruleDao.getById(info2.getId());
Assert.assertFalse(info11.equals(info21));
List<RuleInfo> infos = ruleDao.getAll();
Assert.assertTrue(infos.size() == 2);
ruleDao.delete(info1.getId());
infos = ruleDao.getAll();
Assert.assertTrue(infos.size() == 1);
ruleDao.deleteAll();
infos = ruleDao.getAll();
Assert.assertTrue(infos.size() == 0);
}
@Test
public void testUpdateRule() throws Exception {
String rule = "file : accessCount(10m) > 20 \n\n"
+ "and length() > 3 | cache";
long submitTime = System.currentTimeMillis();
RuleInfo info1 = new RuleInfo(20L, submitTime,
rule, RuleState.ACTIVE,
12, 12, 12);
ruleDao.insert(info1);
long rid = ruleDao.update(info1.getId(),
RuleState.DISABLED.getValue());
Assert.assertTrue(rid == info1.getId());
info1 = ruleDao.getById(info1.getId());
Assert.assertTrue(info1.getNumChecked() == 12L);
ruleDao.update(rid, System.currentTimeMillis(), 100, 200);
RuleInfo info2 = ruleDao.getById(rid);
Assert.assertTrue(info2.getNumChecked() == 100L);
}
}
| java | Apache-2.0 | e0c90f054687a18c4e095547ac5e31b8b313b3ef | 2026-01-05T02:41:11.405497Z | false |
Intel-bigdata/SSM | https://github.com/Intel-bigdata/SSM/blob/e0c90f054687a18c4e095547ac5e31b8b313b3ef/smart-metastore/src/test/java/org/smartdata/metastore/dao/TestCmdletDao.java | smart-metastore/src/test/java/org/smartdata/metastore/dao/TestCmdletDao.java | /**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.smartdata.metastore.dao;
import org.junit.After;
import org.junit.Assert;
import org.junit.Before;
import org.junit.Test;
import org.smartdata.metastore.TestDaoUtil;
import org.smartdata.model.CmdletInfo;
import org.smartdata.model.CmdletState;
import org.springframework.dao.EmptyResultDataAccessException;
import java.util.ArrayList;
import java.util.List;
public class TestCmdletDao extends TestDaoUtil {
private CmdletDao cmdletDao;
@Before
public void initCmdletDao() throws Exception {
initDao();
cmdletDao = new CmdletDao(druidPool.getDataSource());
}
@After
public void closeCmdletDao() throws Exception {
closeDao();
cmdletDao = null;
}
@Test
public void testInsertGetCmdlet() throws Exception {
CmdletInfo cmdlet1 = new CmdletInfo(0, 1,
CmdletState.EXECUTING, "test", 123123333L, 232444444L);
CmdletInfo cmdlet2 = new CmdletInfo(1, 78,
CmdletState.PAUSED, "tt", 123178333L, 232444994L);
cmdletDao.insert(new CmdletInfo[]{cmdlet1, cmdlet2});
List<CmdletInfo> cmdlets = cmdletDao.getAll();
Assert.assertTrue(cmdlets.size() == 2);
}
@Test
public void testGetAPageOfAction() {
CmdletInfo cmdlet1 = new CmdletInfo(0, 1,
CmdletState.EXECUTING, "test", 123123333L, 232444444L);
CmdletInfo cmdlet2 = new CmdletInfo(1, 78,
CmdletState.PAUSED, "tt", 123178333L, 232444994L);
cmdletDao.insert(new CmdletInfo[]{cmdlet1, cmdlet2});
List<String> order = new ArrayList<>();
order.add("cid");
List<Boolean> desc = new ArrayList<>();
desc.add(false);
Assert.assertTrue(cmdletDao.getAPageOfCmdlet(1, 1,
order, desc).get(0).equals(cmdlet2));
}
@Test
public void testUpdateCmdlet() throws Exception {
CmdletInfo cmdlet1 = new CmdletInfo(0, 1,
CmdletState.EXECUTING, "test", 123123333L, 232444444L);
CmdletInfo cmdlet2 = new CmdletInfo(1, 78,
CmdletState.PAUSED, "tt", 123178333L, 232444994L);
cmdletDao.insert(new CmdletInfo[]{cmdlet1, cmdlet2});
cmdlet1.setState(CmdletState.DONE);
cmdletDao.update(cmdlet1);
CmdletInfo dbcmdlet1 = cmdletDao.getById(cmdlet1.getCid());
Assert.assertTrue(dbcmdlet1.equals(cmdlet1));
try {
cmdletDao.getById(2000L);
} catch (EmptyResultDataAccessException e) {
Assert.assertTrue(true);
}
}
@Test
public void testGetByCondition() throws Exception {
CmdletInfo command1 = new CmdletInfo(0, 1,
CmdletState.EXECUTING, "test", 123123333L, 232444444L);
CmdletInfo command2 = new CmdletInfo(1, 78,
CmdletState.PAUSED, "tt", 123178333L, 232444994L);
cmdletDao.insert(new CmdletInfo[]{command1, command2});
List<CmdletInfo> commandInfos = cmdletDao
.getByCondition(null, null, null);
Assert.assertTrue(commandInfos.size() == 2);
commandInfos = cmdletDao
.getByCondition(null, null, CmdletState.PAUSED);
Assert.assertTrue(commandInfos.size() == 1);
}
@Test
public void testDeleteACmdlet() throws Exception {
CmdletInfo cmdlet1 = new CmdletInfo(0, 1,
CmdletState.EXECUTING, "test", 123123333L, 232444444L);
CmdletInfo cmdlet2 = new CmdletInfo(1, 78,
CmdletState.PAUSED, "tt", 123178333L, 232444994L);
cmdletDao.insert(new CmdletInfo[]{cmdlet1, cmdlet2});
cmdletDao.delete(1);
List<CmdletInfo> cmdlets = cmdletDao.getAll();
Assert.assertTrue(cmdlets.size() == 1);
}
@Test
public void testMaxId() throws Exception {
CmdletInfo cmdlet1 = new CmdletInfo(0, 1,
CmdletState.EXECUTING, "test", 123123333L, 232444444L);
CmdletInfo cmdlet2 = new CmdletInfo(1, 78,
CmdletState.PAUSED, "tt", 123178333L, 232444994L);
Assert.assertTrue(cmdletDao.getMaxId() == 0);
cmdletDao.insert(new CmdletInfo[]{cmdlet1, cmdlet2});
Assert.assertTrue(cmdletDao.getMaxId() == 2);
}
@Test
public void testgetByRid() throws Exception{
CmdletInfo cmdlet1 = new CmdletInfo(0, 1,
CmdletState.EXECUTING, "test", 123123333L, 232444444L);
CmdletInfo cmdlet2 = new CmdletInfo(1, 1,
CmdletState.PAUSED, "tt", 123178333L, 232444994L);
CmdletInfo cmdlet3 = new CmdletInfo(2, 1,
CmdletState.EXECUTING, "test", 123123333L, 232444444L);
CmdletInfo cmdlet4 = new CmdletInfo(3, 1,
CmdletState.PAUSED, "tt", 123178333L, 232444994L);
CmdletInfo cmdlet5 = new CmdletInfo(4, 1,
CmdletState.EXECUTING, "test", 123123333L, 232444444L);
CmdletInfo cmdlet6 = new CmdletInfo(5, 1,
CmdletState.PAUSED, "tt", 123178333L, 232444994L);
cmdletDao.insert(new CmdletInfo[]{cmdlet1, cmdlet2, cmdlet3, cmdlet4, cmdlet5, cmdlet6});
List<CmdletInfo> cmdlets = cmdletDao.getByRid(1, 1, 2);
List<String> order = new ArrayList<>();
order.add("cid");
List<Boolean> desc = new ArrayList<>();
desc.add(false);
Assert.assertTrue(cmdlets.size() == 2);
cmdlets = cmdletDao.getByRid(1, 1, 2, order, desc);
Assert.assertTrue(cmdlets.size() == 2);
Assert.assertTrue(cmdlets.get(0).equals(cmdlet2));
Assert.assertTrue(cmdletDao.getNumByRid(1) == 6);
}
}
| java | Apache-2.0 | e0c90f054687a18c4e095547ac5e31b8b313b3ef | 2026-01-05T02:41:11.405497Z | false |
Intel-bigdata/SSM | https://github.com/Intel-bigdata/SSM/blob/e0c90f054687a18c4e095547ac5e31b8b313b3ef/smart-metastore/src/test/java/org/smartdata/metastore/dao/TestClusterInfoDao.java | smart-metastore/src/test/java/org/smartdata/metastore/dao/TestClusterInfoDao.java | /**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.smartdata.metastore.dao;
import org.junit.After;
import org.junit.Assert;
import org.junit.Before;
import org.junit.Test;
import org.smartdata.metastore.TestDaoUtil;
import org.smartdata.model.ClusterInfo;
import java.util.List;
public class TestClusterInfoDao extends TestDaoUtil {
private ClusterInfoDao clusterInfoDao;
@Before
public void initClusterDao() throws Exception {
initDao();
clusterInfoDao = new ClusterInfoDao(druidPool.getDataSource());
}
@After
public void closeClusterDao() throws Exception {
closeDao();
clusterInfoDao = null;
}
@Test
public void testInsertAndGetSingleRecord() {
ClusterInfo clusterInfo = new ClusterInfo();
clusterInfo.setCid(1);
clusterInfo.setType("test");
clusterInfo.setState("test");
clusterInfo.setConfPath("test");
clusterInfo.setUrl("test");
clusterInfo.setName("test");
clusterInfoDao.insert(clusterInfo);
Assert.assertTrue(clusterInfoDao.getById(1).equals(clusterInfo));
}
@Test
public void testBatchInssertAndQuery(){
ClusterInfo[] clusterInfos = new ClusterInfo[2];
clusterInfos[0] = new ClusterInfo();
clusterInfos[0].setCid(1);
clusterInfos[0].setType("test");
clusterInfos[0].setState("test");
clusterInfos[0].setConfPath("test");
clusterInfos[0].setUrl("test");
clusterInfos[0].setName("test");
clusterInfos[1] = new ClusterInfo();
clusterInfos[1].setCid(1);
clusterInfos[1].setType("test1");
clusterInfos[1].setState("test1");
clusterInfos[1].setConfPath("test1");
clusterInfos[1].setUrl("test1");
clusterInfos[1].setName("test1");
clusterInfoDao.insert(clusterInfos);
clusterInfos[1].setCid(2);
List<ClusterInfo> clusterInfoList = clusterInfoDao.getAll();
for (int i = 0; i < 2; i++){
Assert.assertTrue(clusterInfoList.get(i).equals(clusterInfos[i]));
}
}
@Test
public void testUpdate() {
ClusterInfo clusterInfo = new ClusterInfo();
clusterInfo.setCid(1);
clusterInfo.setType("test");
clusterInfo.setState("test");
clusterInfo.setConfPath("test");
clusterInfo.setUrl("test");
clusterInfo.setName("test");
clusterInfoDao.insert(clusterInfo);
clusterInfo.setState("test1");
clusterInfo.setType("test1");
clusterInfoDao.updateState(1, "test1");
clusterInfoDao.updateType(1, "test1");
Assert.assertTrue(clusterInfoDao.getById(1).equals(clusterInfo));
}
}
| java | Apache-2.0 | e0c90f054687a18c4e095547ac5e31b8b313b3ef | 2026-01-05T02:41:11.405497Z | false |
Intel-bigdata/SSM | https://github.com/Intel-bigdata/SSM/blob/e0c90f054687a18c4e095547ac5e31b8b313b3ef/smart-metastore/src/test/java/org/smartdata/metastore/dao/TestAddTableOpListener.java | smart-metastore/src/test/java/org/smartdata/metastore/dao/TestAddTableOpListener.java | /**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.smartdata.metastore.dao;
import org.junit.Assert;
import org.junit.Test;
import org.smartdata.metastore.MetaStore;
import java.util.concurrent.ExecutorService;
import java.util.concurrent.Executors;
import static org.mockito.Mockito.mock;
public class TestAddTableOpListener {
MetaStore adapter = mock(MetaStore.class);
ExecutorService executorService = Executors.newFixedThreadPool(4);
AccessCountTableAggregator aggregator = new AccessCountTableAggregator(
mock(MetaStore.class));
@Test
public void testMinuteTableListener() throws InterruptedException {
Long oneSec = 1000L;
TableEvictor tableEvictor = new CountEvictor(adapter, 10);
AccessCountTableDeque minuteTableDeque = new AccessCountTableDeque(tableEvictor);
TableAddOpListener minuteTableListener =
new TableAddOpListener.MinuteTableListener(minuteTableDeque, aggregator, executorService);
AccessCountTableDeque secondTableDeque =
new AccessCountTableDeque(tableEvictor, minuteTableListener);
AccessCountTable table1 =
new AccessCountTable(45 * oneSec, 50 * oneSec);
AccessCountTable table2 =
new AccessCountTable(50 * oneSec, 55 * oneSec);
AccessCountTable table3 =
new AccessCountTable(55 * oneSec, 60 * oneSec);
secondTableDeque.addAndNotifyListener(table1);
Assert.assertTrue(minuteTableDeque.size() == 0);
secondTableDeque.addAndNotifyListener(table2);
Assert.assertTrue(minuteTableDeque.size() == 0);
secondTableDeque.addAndNotifyListener(table3);
Thread.sleep(1000);
Assert.assertTrue(minuteTableDeque.size() == 1);
AccessCountTable expected = new AccessCountTable(0L, 60 * oneSec);
Assert.assertEquals(minuteTableDeque.poll(), expected);
}
@Test
public void testHourTableListener() throws InterruptedException {
Long oneMin = 60 * 1000L;
TableEvictor tableEvictor = new CountEvictor(adapter, 10);
AccessCountTableDeque hourTableDeque = new AccessCountTableDeque(tableEvictor);
TableAddOpListener hourTableListener =
new TableAddOpListener.HourTableListener(hourTableDeque, aggregator, executorService);
AccessCountTableDeque minuteTableDeque =
new AccessCountTableDeque(tableEvictor, hourTableListener);
AccessCountTable table1 =
new AccessCountTable(57 * oneMin, 58 * oneMin);
AccessCountTable table2 =
new AccessCountTable(58 * oneMin, 59 * oneMin);
AccessCountTable table3 =
new AccessCountTable(59 * oneMin, 60 * oneMin);
minuteTableDeque.addAndNotifyListener(table1);
Assert.assertTrue(hourTableDeque.size() == 0);
minuteTableDeque.addAndNotifyListener(table2);
Assert.assertTrue(hourTableDeque.size() == 0);
minuteTableDeque.addAndNotifyListener(table3);
Thread.sleep(1000);
Assert.assertTrue(hourTableDeque.size() == 1);
AccessCountTable expected = new AccessCountTable(0L, 60 * oneMin);
Assert.assertEquals(hourTableDeque.poll(), expected);
}
@Test
public void testDayTableListener() throws InterruptedException {
Long oneHour = 60 * 60 * 1000L;
TableEvictor tableEvictor = new CountEvictor(adapter, 10);
AccessCountTableDeque dayTableDeque = new AccessCountTableDeque(tableEvictor);
TableAddOpListener dayTableListener =
new TableAddOpListener.DayTableListener(dayTableDeque, aggregator, executorService);
AccessCountTableDeque hourTableDeque =
new AccessCountTableDeque(tableEvictor, dayTableListener);
AccessCountTable table1 =
new AccessCountTable(21 * oneHour, 22 * oneHour);
AccessCountTable table2 =
new AccessCountTable(22 * oneHour, 23 * oneHour);
AccessCountTable table3 =
new AccessCountTable(23 * oneHour, 24 * oneHour);
hourTableDeque.addAndNotifyListener(table1);
Assert.assertTrue(dayTableDeque.size() == 0);
hourTableDeque.addAndNotifyListener(table2);
Assert.assertTrue(dayTableDeque.size() == 0);
hourTableDeque.addAndNotifyListener(table3);
Thread.sleep(1000);
Assert.assertTrue(dayTableDeque.size() == 1);
AccessCountTable today = new AccessCountTable(0L, 24 * oneHour);
Assert.assertEquals(dayTableDeque.poll(), today);
}
}
| java | Apache-2.0 | e0c90f054687a18c4e095547ac5e31b8b313b3ef | 2026-01-05T02:41:11.405497Z | false |
Intel-bigdata/SSM | https://github.com/Intel-bigdata/SSM/blob/e0c90f054687a18c4e095547ac5e31b8b313b3ef/smart-metastore/src/test/java/org/smartdata/metastore/dao/TestDataNodeStorageInfoDao.java | smart-metastore/src/test/java/org/smartdata/metastore/dao/TestDataNodeStorageInfoDao.java | /**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.smartdata.metastore.dao;
import org.junit.After;
import org.junit.Assert;
import org.junit.Before;
import org.junit.Test;
import org.smartdata.metastore.TestDaoUtil;
import org.smartdata.model.DataNodeStorageInfo;
import java.util.List;
public class TestDataNodeStorageInfoDao extends TestDaoUtil {
private DataNodeStorageInfoDao dataNodeStorageInfoDao;
@Before
public void initDataNodeInfoDao() throws Exception {
initDao();
dataNodeStorageInfoDao = new DataNodeStorageInfoDao(druidPool.getDataSource());
}
@After
public void closeDataNodeInfoDao() throws Exception {
closeDao();
dataNodeStorageInfoDao = null;
}
@Test
public void testInsertGetDataInfo() throws Exception {
DataNodeStorageInfo insertInfo1 = new DataNodeStorageInfo("uuid", 10, 10,
"storage_id", 0, 0, 0, 0, 0);
dataNodeStorageInfoDao.insert(insertInfo1);
List<DataNodeStorageInfo> getInfo1 = dataNodeStorageInfoDao.getByUuid("uuid");
Assert.assertTrue(insertInfo1.equals(getInfo1.get(0)));
DataNodeStorageInfo insertInfo2 = new DataNodeStorageInfo("UUID", 10, 10,
"STORAGE_ID", 1, 1, 1, 1, 1);
dataNodeStorageInfoDao.insert(insertInfo2);
List<DataNodeStorageInfo> getInfo2 = dataNodeStorageInfoDao.getByUuid("UUID");
Assert.assertTrue(insertInfo2.equals(getInfo2.get(0)));
List<DataNodeStorageInfo> infos = dataNodeStorageInfoDao.getAll();
Assert.assertTrue(infos.size() == 2);
dataNodeStorageInfoDao.delete(insertInfo1.getUuid());
infos = dataNodeStorageInfoDao.getAll();
Assert.assertTrue(infos.size() == 1);
dataNodeStorageInfoDao.deleteAll();
infos = dataNodeStorageInfoDao.getAll();
Assert.assertTrue(infos.size() == 0);
}
}
| java | Apache-2.0 | e0c90f054687a18c4e095547ac5e31b8b313b3ef | 2026-01-05T02:41:11.405497Z | false |
Intel-bigdata/SSM | https://github.com/Intel-bigdata/SSM/blob/e0c90f054687a18c4e095547ac5e31b8b313b3ef/smart-metastore/src/test/java/org/smartdata/metastore/dao/TestStorageHistoryDao.java | smart-metastore/src/test/java/org/smartdata/metastore/dao/TestStorageHistoryDao.java | /**
* Created by cy on 17-6-19.
*/
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.smartdata.metastore.dao;
import org.junit.After;
import org.junit.Assert;
import org.junit.Before;
import org.junit.Test;
import org.smartdata.metastore.TestDaoUtil;
import org.smartdata.model.StorageCapacity;
import java.util.List;
public class TestStorageHistoryDao extends TestDaoUtil {
private StorageHistoryDao storageHistDao;
@Before
public void initStorageDao() throws Exception {
initDao();
storageHistDao = new StorageHistoryDao(druidPool.getDataSource());
}
@After
public void closeStorageDao() throws Exception {
closeDao();
storageHistDao = null;
}
@Test
public void testInsertGetStorageTable() throws Exception {
StorageCapacity[] storageCapacities = new StorageCapacity[3];
storageCapacities[0] = new StorageCapacity("type1", 1000L, 10L, 1L);
storageCapacities[1] = new StorageCapacity("type1", 2000L, 10L, 2L);
storageCapacities[2] = new StorageCapacity("type1", 3000L, 10L, 3L);
storageHistDao.insertStorageHistTable(storageCapacities, 1000);
List<StorageCapacity> capacities = storageHistDao.getStorageHistoryData(
"type1", 1000, 1000, 3000);
Assert.assertTrue(capacities.size() == storageCapacities.length);
Assert.assertEquals(storageCapacities.length,
storageHistDao.getNumberOfStorageHistoryData("type1", 1000));
storageHistDao.deleteOldRecords("type1", 1000, 1000L);
Assert.assertEquals(storageCapacities.length - 1,
storageHistDao.getNumberOfStorageHistoryData("type1", 1000));
}
}
| java | Apache-2.0 | e0c90f054687a18c4e095547ac5e31b8b313b3ef | 2026-01-05T02:41:11.405497Z | false |
Intel-bigdata/SSM | https://github.com/Intel-bigdata/SSM/blob/e0c90f054687a18c4e095547ac5e31b8b313b3ef/smart-metastore/src/test/java/org/smartdata/metastore/dao/TestFileInfoDao.java | smart-metastore/src/test/java/org/smartdata/metastore/dao/TestFileInfoDao.java | /**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.smartdata.metastore.dao;
import org.junit.After;
import org.junit.Assert;
import org.junit.Before;
import org.junit.Test;
import org.smartdata.metastore.TestDaoUtil;
import org.smartdata.model.FileInfo;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
public class TestFileInfoDao extends TestDaoUtil {
private FileInfoDao fileInfoDao;
@Before
public void initFileDao() throws Exception {
initDao();
fileInfoDao = new FileInfoDao(druidPool.getDataSource());
}
@After
public void closeFileDao() throws Exception {
closeDao();
fileInfoDao = null;
}
@Test
public void testInsetGetDeleteFiles() throws Exception {
String path = "/testFile";
long length = 123L;
boolean isDir = false;
short blockReplication = 1;
long blockSize = 128 * 1024L;
long modTime = 123123123L;
long accessTime = 123123120L;
short permission = 1;
String owner = "root";
String group = "admin";
long fileId = 312321L;
byte storagePolicy = 0;
byte erasureCodingPolicy = 0;
FileInfo fileInfo = new FileInfo(path, fileId, length, isDir, blockReplication, blockSize,
modTime, accessTime, permission, owner, group, storagePolicy, erasureCodingPolicy);
fileInfoDao.insert(fileInfo);
FileInfo file1 = fileInfoDao.getByPath("/testFile");
Assert.assertTrue(fileInfo.equals(file1));
FileInfo file2 = fileInfoDao.getById(fileId);
Assert.assertTrue(fileInfo.equals(file2));
FileInfo fileInfo1 = new FileInfo(path, fileId + 1, length, isDir, blockReplication, blockSize,
modTime, accessTime, permission, owner, group, storagePolicy, erasureCodingPolicy);
fileInfoDao.insert(fileInfo1);
List<FileInfo> fileInfos = fileInfoDao.getFilesByPrefix("/testaaFile");
Assert.assertTrue(fileInfos.size() == 0);
fileInfos = fileInfoDao.getFilesByPrefix("/testFile");
Assert.assertTrue(fileInfos.size() == 2);
fileInfoDao.deleteById(fileId);
fileInfos = fileInfoDao.getAll();
Assert.assertTrue(fileInfos.size() == 1);
fileInfoDao.deleteAll();
fileInfos = fileInfoDao.getAll();
Assert.assertTrue(fileInfos.size() == 0);
}
@Test
public void testInsertUpdateFiles() throws Exception {
String path = "/testFile";
long length = 123L;
boolean isDir = false;
short blockReplication = 1;
long blockSize = 128 * 1024L;
long modTime = 123123123L;
long accessTime = 123123120L;
short permission = 1;
String owner = "root";
String group = "admin";
long fileId = 312321L;
byte storagePolicy = 0;
byte erasureCodingPolicy = 0;
Map<Integer, String> mapOwnerIdName = new HashMap<>();
mapOwnerIdName.put(1, "root");
Map<Integer, String> mapGroupIdName = new HashMap<>();
mapGroupIdName.put(1, "admin");
FileInfo fileInfo = new FileInfo(path, fileId, length, isDir, blockReplication, blockSize,
modTime, accessTime, permission, owner, group, storagePolicy, erasureCodingPolicy);
fileInfoDao.insert(fileInfo);
fileInfoDao.update(path, 10);
FileInfo file = fileInfoDao.getById(fileId);
fileInfo.setStoragePolicy((byte) 10);
Assert.assertTrue(file.equals(fileInfo));
}
}
| java | Apache-2.0 | e0c90f054687a18c4e095547ac5e31b8b313b3ef | 2026-01-05T02:41:11.405497Z | false |
Intel-bigdata/SSM | https://github.com/Intel-bigdata/SSM/blob/e0c90f054687a18c4e095547ac5e31b8b313b3ef/smart-metastore/src/test/java/org/smartdata/metastore/dao/TestAccessCountTableManager.java | smart-metastore/src/test/java/org/smartdata/metastore/dao/TestAccessCountTableManager.java | /**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.smartdata.metastore.dao;
import org.dbunit.Assertion;
import org.dbunit.dataset.IDataSet;
import org.dbunit.dataset.ITable;
import org.dbunit.dataset.SortedTable;
import org.dbunit.dataset.xml.XmlDataSet;
import org.junit.Assert;
import org.junit.Test;
import org.smartdata.metastore.DBTest;
import org.smartdata.metastore.MetaStore;
import org.smartdata.metastore.MetaStoreException;
import org.smartdata.metastore.utils.Constants;
import org.smartdata.metastore.utils.TimeGranularity;
import org.smartdata.metrics.FileAccessEvent;
import org.smartdata.model.FileInfo;
import java.sql.Connection;
import java.sql.Statement;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import static org.mockito.Mockito.mock;
public class TestAccessCountTableManager extends DBTest {
@Test
public void testAccessCountTableManager() throws InterruptedException {
MetaStore adapter = mock(MetaStore.class);
// Used by AccessCountTableAggregator
AccessCountTableManager manager = new AccessCountTableManager(adapter);
Long firstDayEnd = 24 * 60 * 60 * 1000L;
AccessCountTable accessCountTable =
new AccessCountTable(firstDayEnd - 5 * 1000, firstDayEnd);
manager.addTable(accessCountTable);
Thread.sleep(5000);
Map<TimeGranularity, AccessCountTableDeque> map = manager.getTableDeques();
AccessCountTableDeque second = map.get(TimeGranularity.SECOND);
Assert.assertTrue(second.size() == 1);
Assert.assertEquals(second.peek(), accessCountTable);
AccessCountTableDeque minute = map.get(TimeGranularity.MINUTE);
AccessCountTable minuteTable =
new AccessCountTable(firstDayEnd - 60 * 1000, firstDayEnd);
Assert.assertTrue(minute.size() == 1);
Assert.assertEquals(minute.peek(), minuteTable);
AccessCountTableDeque hour = map.get(TimeGranularity.HOUR);
AccessCountTable hourTable =
new AccessCountTable(firstDayEnd - 60 * 60 * 1000, firstDayEnd);
Assert.assertTrue(hour.size() == 1);
Assert.assertEquals(hour.peek(), hourTable);
AccessCountTableDeque day = map.get(TimeGranularity.DAY);
AccessCountTable dayTable =
new AccessCountTable(firstDayEnd - 24 * 60 * 60 * 1000, firstDayEnd);
Assert.assertTrue(day.size() == 1);
Assert.assertEquals(day.peek(), dayTable);
}
private void createTables(Connection connection) throws Exception {
Statement statement = connection.createStatement();
statement.execute(AccessCountDao.createAccessCountTableSQL("expect1"));
statement.execute(AccessCountDao.createAccessCountTableSQL("expect2"));
statement.execute(AccessCountDao.createAccessCountTableSQL("expect3"));
statement.close();
}
@Test
public void testAddAccessCountInfo() throws Exception {
AccessCountTableManager manager = initTestEnvironment();
List<FileAccessEvent> accessEvents = new ArrayList<>();
accessEvents.add(new FileAccessEvent("file1", 0));
accessEvents.add(new FileAccessEvent("file2", 1));
accessEvents.add(new FileAccessEvent("file2", 2));
accessEvents.add(new FileAccessEvent("file3", 2));
accessEvents.add(new FileAccessEvent("file3", 3));
accessEvents.add(new FileAccessEvent("file3", 4));
accessEvents.add(new FileAccessEvent("file3", 5000));
manager.onAccessEventsArrived(accessEvents);
assertTableEquals(new AccessCountTable(0L, 5000L).getTableName(), "expect1");
}
@Test
public void testAccessFileNotInNamespace() throws Exception {
AccessCountTableManager manager = initTestEnvironment();
List<FileAccessEvent> accessEvents = new ArrayList<>();
accessEvents.add(new FileAccessEvent("file1", 0));
accessEvents.add(new FileAccessEvent("file2", 1));
accessEvents.add(new FileAccessEvent("file2", 2));
accessEvents.add(new FileAccessEvent("file3", 2));
accessEvents.add(new FileAccessEvent("file3", 3));
accessEvents.add(new FileAccessEvent("file3", 4));
accessEvents.add(new FileAccessEvent("file4", 5));
accessEvents.add(new FileAccessEvent("file3", 5000));
accessEvents.add(new FileAccessEvent("file3", 10000));
manager.onAccessEventsArrived(accessEvents);
assertTableEquals(new AccessCountTable(0L, 5000L).getTableName(), "expect1");
assertTableEquals(new AccessCountTable(5000L, 10000L).getTableName(), "expect2");
accessEvents.clear();
accessEvents.add(new FileAccessEvent("file4", 10001));
accessEvents.add(new FileAccessEvent("file4", 10002));
accessEvents.add(new FileAccessEvent("file3", 15000));
accessEvents.add(new FileAccessEvent("file4", 15001));
accessEvents.add(new FileAccessEvent("file3", 20000));
manager.onAccessEventsArrived(accessEvents);
assertTableEquals(new AccessCountTable(10000L, 15000L).getTableName(), "expect2");
assertTableEquals(new AccessCountTable(15000L, 20000L).getTableName(), "expect2");
insertNewFile(new MetaStore(druidPool), "file4", 4L);
accessEvents.clear();
accessEvents.add(new FileAccessEvent("file4", 25000));
manager.onAccessEventsArrived(accessEvents);
assertTableEquals(new AccessCountTable(20000L, 25000L).getTableName(), "expect3");
}
private AccessCountTableManager initTestEnvironment() throws Exception {
MetaStore metaStore = new MetaStore(druidPool);
createTables(databaseTester.getConnection().getConnection());
IDataSet dataSet = new XmlDataSet(getClass().getClassLoader().getResourceAsStream("files.xml"));
databaseTester.setDataSet(dataSet);
databaseTester.onSetup();
prepareFiles(metaStore);
return new AccessCountTableManager(metaStore);
}
private void assertTableEquals(String actualTableName, String expectedDataSet) throws Exception {
ITable actual = databaseTester.getConnection().createTable(actualTableName);
ITable expect = databaseTester.getDataSet().getTable(expectedDataSet);
SortedTable sortedActual = new SortedTable(actual, new String[] {"fid"});
sortedActual.setUseComparable(true);
Assertion.assertEquals(expect, sortedActual);
}
private void prepareFiles(MetaStore metaStore) throws MetaStoreException {
List<FileInfo> statusInternals = new ArrayList<>();
for (int id = 1; id < 4; id++) {
statusInternals.add(
new FileInfo("file" + id,
id,
123L,
false,
(short) 1,
128 * 1024L,
123123123L,
123123120L,
(short) 1,
"root",
"admin",
(byte) 0,
(byte) 0));
}
metaStore.insertFiles(statusInternals.toArray(new FileInfo[0]));
}
private void insertNewFile(MetaStore metaStore, String file, Long fid)
throws MetaStoreException {
FileInfo finfo = new FileInfo(file,
fid,
123L,
false,
(short) 1,
128 * 1024L,
123123123L,
123123120L,
(short) 1,
"root",
"admin",
(byte) 0,
(byte) 0);
metaStore.insertFile(finfo);
}
@Test
public void testGetTables() throws MetaStoreException {
MetaStore adapter = mock(MetaStore.class);
TableEvictor tableEvictor = new CountEvictor(adapter, 20);
Map<TimeGranularity, AccessCountTableDeque> map = new HashMap<>();
AccessCountTableDeque dayDeque = new AccessCountTableDeque(tableEvictor);
AccessCountTable firstDay = new AccessCountTable(0L, Constants.ONE_DAY_IN_MILLIS);
dayDeque.addAndNotifyListener(firstDay);
map.put(TimeGranularity.DAY, dayDeque);
AccessCountTableDeque hourDeque = new AccessCountTableDeque(tableEvictor);
AccessCountTable firstHour =
new AccessCountTable(23 * Constants.ONE_HOUR_IN_MILLIS, 24 * Constants.ONE_HOUR_IN_MILLIS);
AccessCountTable secondHour =
new AccessCountTable(24 * Constants.ONE_HOUR_IN_MILLIS, 25 * Constants.ONE_HOUR_IN_MILLIS);
hourDeque.addAndNotifyListener(firstHour);
hourDeque.addAndNotifyListener(secondHour);
map.put(TimeGranularity.HOUR, hourDeque);
AccessCountTableDeque minuteDeque = new AccessCountTableDeque(tableEvictor);
Integer numMins = 25 * 60;
AccessCountTable firstMin =
new AccessCountTable(
(numMins - 1) * Constants.ONE_MINUTE_IN_MILLIS,
numMins * Constants.ONE_MINUTE_IN_MILLIS);
AccessCountTable secondMin =
new AccessCountTable(
numMins * Constants.ONE_MINUTE_IN_MILLIS,
(numMins + 1) * Constants.ONE_MINUTE_IN_MILLIS);
minuteDeque.addAndNotifyListener(firstMin);
minuteDeque.addAndNotifyListener(secondMin);
map.put(TimeGranularity.MINUTE, minuteDeque);
AccessCountTableDeque secondDeque = new AccessCountTableDeque(tableEvictor);
Integer numSeconds = (25 * 60 + 1) * 60;
AccessCountTable firstFiveSeconds =
new AccessCountTable(
(numSeconds - 5) * Constants.ONE_SECOND_IN_MILLIS,
numSeconds * Constants.ONE_SECOND_IN_MILLIS);
AccessCountTable secondFiveSeconds =
new AccessCountTable(
numSeconds * Constants.ONE_SECOND_IN_MILLIS,
(numSeconds + 5) * Constants.ONE_SECOND_IN_MILLIS);
secondDeque.addAndNotifyListener(firstFiveSeconds);
secondDeque.addAndNotifyListener(secondFiveSeconds);
map.put(TimeGranularity.SECOND, secondDeque);
List<AccessCountTable> firstResult =
AccessCountTableManager.getTables(
map, adapter, (numSeconds + 5) * Constants.ONE_SECOND_IN_MILLIS);
Assert.assertTrue(firstResult.size() == 4);
Assert.assertEquals(firstResult.get(0), firstDay);
Assert.assertEquals(firstResult.get(1), secondHour);
Assert.assertEquals(firstResult.get(2), secondMin);
Assert.assertEquals(firstResult.get(3), secondFiveSeconds);
List<AccessCountTable> secondResult =
AccessCountTableManager.getTables(
map, adapter, numSeconds * Constants.ONE_SECOND_IN_MILLIS);
Assert.assertTrue(secondResult.size() == 4);
AccessCountTable expectDay =
new AccessCountTable(5 * Constants.ONE_SECOND_IN_MILLIS, Constants.ONE_DAY_IN_MILLIS);
Assert.assertEquals(expectDay, secondResult.get(0));
List<AccessCountTable> thirdResult =
AccessCountTableManager.getTables(
map, adapter, secondFiveSeconds.getEndTime() - 23 * Constants.ONE_HOUR_IN_MILLIS);
Assert.assertTrue(thirdResult.size() == 4);
Assert.assertEquals(thirdResult.get(0), firstHour);
List<AccessCountTable> fourthResult =
AccessCountTableManager.getTables(
map, adapter, secondFiveSeconds.getEndTime() - 24 * Constants.ONE_HOUR_IN_MILLIS);
Assert.assertTrue(fourthResult.size() == 3);
Assert.assertEquals(fourthResult.get(0), secondHour);
}
@Test
public void testGetTablesCornerCase() throws MetaStoreException {
MetaStore adapter = mock(MetaStore.class);
TableEvictor tableEvictor = new CountEvictor(adapter, 20);
Map<TimeGranularity, AccessCountTableDeque> map = new HashMap<>();
AccessCountTableDeque minute = new AccessCountTableDeque(tableEvictor);
map.put(TimeGranularity.MINUTE, minute);
AccessCountTableDeque secondDeque = new AccessCountTableDeque(tableEvictor);
AccessCountTable firstFiveSeconds =
new AccessCountTable(0L, 5 * Constants.ONE_SECOND_IN_MILLIS);
AccessCountTable secondFiveSeconds =
new AccessCountTable(5 * Constants.ONE_SECOND_IN_MILLIS,
10 * Constants.ONE_SECOND_IN_MILLIS);
secondDeque.addAndNotifyListener(firstFiveSeconds);
secondDeque.addAndNotifyListener(secondFiveSeconds);
map.put(TimeGranularity.SECOND, secondDeque);
List<AccessCountTable> result = AccessCountTableManager.getTables(map, adapter,
2 * Constants.ONE_MINUTE_IN_MILLIS);
Assert.assertTrue(result.size() == 2);
Assert.assertTrue(result.get(0).equals(firstFiveSeconds));
Assert.assertTrue(result.get(1).equals(secondFiveSeconds));
}
@Test
public void testGetTablesCornerCase2() throws MetaStoreException {
MetaStore adapter = mock(MetaStore.class);
TableEvictor tableEvictor = new CountEvictor(adapter, 20);
Map<TimeGranularity, AccessCountTableDeque> map = new HashMap<>();
AccessCountTableDeque minute = new AccessCountTableDeque(tableEvictor);
AccessCountTable firstMinute =
new AccessCountTable(0L, Constants.ONE_MINUTE_IN_MILLIS);
minute.addAndNotifyListener(firstMinute);
map.put(TimeGranularity.MINUTE, minute);
AccessCountTableDeque secondDeque = new AccessCountTableDeque(tableEvictor);
AccessCountTable firstFiveSeconds =
new AccessCountTable(
55 * Constants.ONE_SECOND_IN_MILLIS, 60 * Constants.ONE_SECOND_IN_MILLIS);
AccessCountTable secondFiveSeconds =
new AccessCountTable(60 * Constants.ONE_SECOND_IN_MILLIS,
65 * Constants.ONE_SECOND_IN_MILLIS);
AccessCountTable thirdFiveSeconds =
new AccessCountTable(110 * Constants.ONE_SECOND_IN_MILLIS,
115 * Constants.ONE_SECOND_IN_MILLIS);
secondDeque.addAndNotifyListener(firstFiveSeconds);
secondDeque.addAndNotifyListener(secondFiveSeconds);
secondDeque.addAndNotifyListener(thirdFiveSeconds);
map.put(TimeGranularity.SECOND, secondDeque);
List<AccessCountTable> result = AccessCountTableManager.getTables(map, adapter,
Constants.ONE_MINUTE_IN_MILLIS);
Assert.assertTrue(result.size() == 3);
Assert.assertTrue(result.get(0).equals(firstFiveSeconds));
Assert.assertFalse(result.get(0).isEphemeral());
Assert.assertTrue(result.get(1).equals(secondFiveSeconds));
Assert.assertTrue(result.get(2).equals(thirdFiveSeconds));
}
}
| java | Apache-2.0 | e0c90f054687a18c4e095547ac5e31b8b313b3ef | 2026-01-05T02:41:11.405497Z | false |
Intel-bigdata/SSM | https://github.com/Intel-bigdata/SSM/blob/e0c90f054687a18c4e095547ac5e31b8b313b3ef/smart-metastore/src/test/java/org/smartdata/metastore/dao/TestTableAggregator.java | smart-metastore/src/test/java/org/smartdata/metastore/dao/TestTableAggregator.java | /**
* Licensed to the Apache Software Foundation (ASF) under one or more contributor license
* agreements. See the NOTICE file distributed with this work for additional information regarding
* copyright ownership. The ASF licenses this file to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance with the License. You may obtain a
* copy of the License at
*
* <p>http://www.apache.org/licenses/LICENSE-2.0
*
* <p>Unless required by applicable law or agreed to in writing, software distributed under the
* License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
* express or implied. See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.smartdata.metastore.dao;
import com.google.common.collect.Lists;
import org.dbunit.Assertion;
import org.dbunit.database.IDatabaseConnection;
import org.dbunit.dataset.IDataSet;
import org.dbunit.dataset.ITable;
import org.dbunit.dataset.xml.XmlDataSet;
import org.junit.Assert;
import org.junit.Test;
import org.smartdata.metastore.DBTest;
import org.smartdata.metastore.MetaStore;
import org.smartdata.metastore.MetaStoreException;
import org.smartdata.model.FileAccessInfo;
import org.smartdata.model.FileInfo;
import java.sql.Statement;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.List;
public class TestTableAggregator extends DBTest {
private void createTables(IDatabaseConnection connection) throws Exception {
Statement statement = connection.getConnection().createStatement();
statement.execute(AccessCountDao.createAccessCountTableSQL("table1"));
statement.execute(AccessCountDao.createAccessCountTableSQL("table2"));
statement.execute(AccessCountDao.createAccessCountTableSQL("table3"));
statement.execute(AccessCountDao.createAccessCountTableSQL("expect"));
statement.close();
}
@Test
public void testAggregate() throws Exception {
createTables(databaseTester.getConnection());
IDataSet dataSet =
new XmlDataSet(getClass().getClassLoader().getResourceAsStream("accessCountTable.xml"));
databaseTester.setDataSet(dataSet);
databaseTester.onSetup();
MetaStore metaStore = new MetaStore(druidPool);
prepareFiles(metaStore);
AccessCountTable result = new AccessCountTable("actual", 0L, 0L, false);
AccessCountTable table1 = new AccessCountTable("table1", 0L, 0L, false);
AccessCountTable table2 = new AccessCountTable("table2", 0L, 0L, false);
AccessCountTable table3 = new AccessCountTable("table3", 0L, 0L, false);
metaStore.aggregateTables(result, Lists.newArrayList(table1, table2, table3));
ITable actual = databaseTester.getConnection().createTable(result.getTableName());
ITable expect = databaseTester.getDataSet().getTable("expect");
Assertion.assertEquals(expect, actual);
}
@Test
public void testGetTopN() throws Exception {
createTables(databaseTester.getConnection());
IDataSet dataSet =
new XmlDataSet(getClass().getClassLoader().getResourceAsStream("accessCountTable.xml"));
databaseTester.setDataSet(dataSet);
databaseTester.onSetup();
MetaStore metaStore = new MetaStore(druidPool);
prepareFiles(metaStore);
AccessCountTable table1 = new AccessCountTable("table1", 0L, 0L, false);
AccessCountTable table2 = new AccessCountTable("table2", 0L, 0L, false);
AccessCountTable table3 = new AccessCountTable("table3", 0L, 0L, false);
List<FileAccessInfo> accessInfos =
metaStore.getHotFiles(Arrays.asList(table1, table2, table3), 1);
Assert.assertTrue(accessInfos.size() == 1);
FileAccessInfo expected1 = new FileAccessInfo(103L, "/file3", 7);
Assert.assertTrue(accessInfos.get(0).equals(expected1));
List<FileAccessInfo> accessInfos2 =
metaStore.getHotFiles(Arrays.asList(table1, table2, table3), 2);
List<FileAccessInfo> expected2 =
Arrays.asList(expected1, new FileAccessInfo(102L, "/file2", 6));
Assert.assertTrue(accessInfos2.size() == expected2.size());
Assert.assertTrue(accessInfos2.containsAll(expected2));
}
private void prepareFiles(MetaStore metaStore) throws MetaStoreException {
List<FileInfo> statusInternals = new ArrayList<>();
for (int id = 1; id < 6; id++) {
statusInternals.add(
new FileInfo(
"/file" + id,
id + 100,
123L,
false,
(short) 1,
128 * 1024L,
123123123L,
123123120L,
(short) 1,
"root",
"admin",
(byte) 0,
(byte) 0));
}
metaStore.insertFiles(statusInternals.toArray(new FileInfo[0]));
}
}
| java | Apache-2.0 | e0c90f054687a18c4e095547ac5e31b8b313b3ef | 2026-01-05T02:41:11.405497Z | false |
Intel-bigdata/SSM | https://github.com/Intel-bigdata/SSM/blob/e0c90f054687a18c4e095547ac5e31b8b313b3ef/smart-metastore/src/test/java/org/smartdata/metastore/dao/TestAccessEventAggregator.java | smart-metastore/src/test/java/org/smartdata/metastore/dao/TestAccessEventAggregator.java | /**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.smartdata.metastore.dao;
import com.google.common.collect.Lists;
import org.junit.Test;
import org.smartdata.metastore.MetaStore;
import org.smartdata.metastore.MetaStoreException;
import org.smartdata.metrics.FileAccessEvent;
import static org.mockito.Matchers.any;
import static org.mockito.Matchers.anyString;
import static org.mockito.Mockito.mock;
import static org.mockito.Mockito.never;
import static org.mockito.Mockito.times;
import static org.mockito.Mockito.verify;
public class TestAccessEventAggregator {
@Test
public void testAccessEventAggregator() throws MetaStoreException {
MetaStore adapter = mock(MetaStore.class);
AccessCountTableManager manager = mock(AccessCountTableManager.class);
AccessEventAggregator aggregator = new AccessEventAggregator(adapter, manager);
aggregator.addAccessEvents(Lists.newArrayList(new FileAccessEvent("", 3000)));
verify(adapter, never()).execute(anyString());
aggregator.addAccessEvents(Lists.newArrayList(new FileAccessEvent("", 6000)));
verify(adapter, times(1)).execute(anyString());
verify(manager, times(1)).addTable(any(AccessCountTable.class));
aggregator.addAccessEvents(
Lists.newArrayList(
new FileAccessEvent("abc", 8000),
new FileAccessEvent("def", 14000),
new FileAccessEvent("", 18000)));
verify(adapter, times(3)).execute(anyString());
verify(manager, times(3)).addTable(any(AccessCountTable.class));
}
}
| java | Apache-2.0 | e0c90f054687a18c4e095547ac5e31b8b313b3ef | 2026-01-05T02:41:11.405497Z | false |
Intel-bigdata/SSM | https://github.com/Intel-bigdata/SSM/blob/e0c90f054687a18c4e095547ac5e31b8b313b3ef/smart-metastore/src/test/java/org/smartdata/metastore/dao/TestGlobalConfigDao.java | smart-metastore/src/test/java/org/smartdata/metastore/dao/TestGlobalConfigDao.java | /**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.smartdata.metastore.dao;
import org.junit.After;
import org.junit.Assert;
import org.junit.Before;
import org.junit.Test;
import org.smartdata.metastore.TestDaoUtil;
import org.smartdata.model.GlobalConfig;
public class TestGlobalConfigDao extends TestDaoUtil{
private GlobalConfigDao globalConfigDao;
@Before
public void initGlobalConfigDao() throws Exception {
initDao();
globalConfigDao = new GlobalConfigDao(druidPool.getDataSource());
}
@After
public void closeGlobalConfigDao() throws Exception {
closeDao();
globalConfigDao = null;
}
@Test
public void testInsertAndGetSingleRecord(){
GlobalConfig globalConfig = new GlobalConfig();
globalConfig.setCid(1);
globalConfig.setPropertyName("test");
globalConfig.setPropertyValue("test1");
globalConfigDao.insert(globalConfig);
Assert.assertTrue(globalConfigDao.getById(1).equals(globalConfig));
}
@Test
public void testBatchInsert() {
GlobalConfig[] globalConfigs = new GlobalConfig[2];
globalConfigs[0] = new GlobalConfig(0, "test1", "test1");
globalConfigs[1] = new GlobalConfig(0, "test2", "test2");
globalConfigDao.insert(globalConfigs);
globalConfigs[0].setCid(1);
globalConfigs[1].setCid(2);
Assert.assertTrue(globalConfigDao.getById(1).equals(globalConfigs[0]));
Assert.assertTrue(globalConfigDao.getById(2).equals(globalConfigs[1]));
}
@Test
public void testUpdate() {
GlobalConfig globalConfig = new GlobalConfig();
globalConfig.setCid(1);
globalConfig.setPropertyName("test");
globalConfig.setPropertyValue("test1");
globalConfigDao.insert(globalConfig);
globalConfigDao.update("test", "test2");
globalConfig.setPropertyValue("test2");
Assert.assertTrue(globalConfigDao.getById(1).equals(globalConfig));
}
}
| java | Apache-2.0 | e0c90f054687a18c4e095547ac5e31b8b313b3ef | 2026-01-05T02:41:11.405497Z | false |
Intel-bigdata/SSM | https://github.com/Intel-bigdata/SSM/blob/e0c90f054687a18c4e095547ac5e31b8b313b3ef/smart-metastore/src/test/java/org/smartdata/metastore/dao/TestBackUpInfoDao.java | smart-metastore/src/test/java/org/smartdata/metastore/dao/TestBackUpInfoDao.java | /**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.smartdata.metastore.dao;
import org.junit.After;
import org.junit.Assert;
import org.junit.Before;
import org.junit.Test;
import org.smartdata.metastore.TestDaoUtil;
import org.smartdata.model.BackUpInfo;
import org.springframework.dao.EmptyResultDataAccessException;
import java.util.List;
public class TestBackUpInfoDao extends TestDaoUtil {
private BackUpInfoDao backUpInfoDao;
@Before
public void initBackUpInfoDao() throws Exception {
initDao();
backUpInfoDao = new BackUpInfoDao(druidPool.getDataSource());
}
@After
public void closeBackUpInfoDao() throws Exception {
closeDao();
backUpInfoDao = null;
}
@Test
public void testInsertAndGetSingleRecord() {
BackUpInfo backUpInfo = new BackUpInfo();
backUpInfo.setRid(1);
backUpInfo.setPeriod(1);
backUpInfo.setDest("");
backUpInfo.setSrc("");
backUpInfoDao.insert(backUpInfo);
Assert.assertTrue(backUpInfoDao.getByRid(1).equals(backUpInfo));
}
@Test
public void testDelete() {
backUpInfoDao.delete(1L);
BackUpInfo[] backUpInfos = new BackUpInfo[2];
backUpInfos[0] = new BackUpInfo(1, "test", "test", 1);
backUpInfos[1] = new BackUpInfo(2, "test", "test", 1);
backUpInfoDao.insert(backUpInfos);
backUpInfoDao.delete(1L);
Assert.assertTrue(backUpInfoDao.getByRid(2).equals(backUpInfos[1]));
try {
backUpInfoDao.getByRid(1);
} catch (EmptyResultDataAccessException e) {
}
}
@Test
public void testBatchInsert() {
BackUpInfo[] backUpInfos = new BackUpInfo[2];
backUpInfos[0] = new BackUpInfo(1, "test", "test", 1);
backUpInfos[1] = new BackUpInfo(2, "test", "test", 1);
backUpInfoDao.insert(backUpInfos);
Assert.assertTrue(backUpInfoDao.getByRid(1).equals(backUpInfos[0]));
Assert.assertTrue(backUpInfoDao.getByRid(2).equals(backUpInfos[1]));
}
@Test
public void testUpdate() {
BackUpInfo backUpInfo = new BackUpInfo();
backUpInfo.setRid(1);
backUpInfo.setSrc("test");
backUpInfo.setDest("test");
backUpInfo.setPeriod(1);
backUpInfoDao.insert(backUpInfo);
backUpInfoDao.update(1, 2);
backUpInfo.setPeriod(2);
Assert.assertTrue(backUpInfoDao.getByRid(1).equals(backUpInfo));
}
@Test
public void testgetBySrc() {
Assert.assertTrue(backUpInfoDao.getByDest("1").size() == 0);
BackUpInfo[] backUpInfos = new BackUpInfo[2];
backUpInfos[0] = new BackUpInfo(1, "test", "test", 1);
backUpInfos[1] = new BackUpInfo(2, "test", "test", 1);
backUpInfoDao.insert(backUpInfos);
List<BackUpInfo> list = backUpInfoDao.getBySrc("test");
Assert.assertTrue(list.size() == 2);
Assert.assertTrue(list.get(0).equals(backUpInfos[0]));
Assert.assertTrue(list.get(1).equals(backUpInfos[1]));
Assert.assertTrue(backUpInfoDao.getCountByRid(1) == 0);
}
}
| java | Apache-2.0 | e0c90f054687a18c4e095547ac5e31b8b313b3ef | 2026-01-05T02:41:11.405497Z | false |
Intel-bigdata/SSM | https://github.com/Intel-bigdata/SSM/blob/e0c90f054687a18c4e095547ac5e31b8b313b3ef/smart-metastore/src/test/java/org/smartdata/metastore/dao/TestCacheFileDao.java | smart-metastore/src/test/java/org/smartdata/metastore/dao/TestCacheFileDao.java | /**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.smartdata.metastore.dao;
import org.junit.After;
import org.junit.Assert;
import org.junit.Before;
import org.junit.Test;
import org.smartdata.metastore.TestDaoUtil;
import org.smartdata.metrics.FileAccessEvent;
import org.smartdata.model.CachedFileStatus;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
public class TestCacheFileDao extends TestDaoUtil {
private CacheFileDao cacheFileDao;
@Before
public void initCacheFileDao() throws Exception {
initDao();
cacheFileDao = new CacheFileDao(druidPool.getDataSource());
}
@After
public void closeCacheFileDao() throws Exception {
closeDao();
cacheFileDao = null;
}
@Test
public void testUpdateCachedFiles() throws Exception {
CachedFileStatus first = new CachedFileStatus(80L,
"testPath", 1000L, 2000L, 100);
cacheFileDao.insert(first);
CachedFileStatus second = new CachedFileStatus(90L,
"testPath2", 2000L, 3000L, 200);
cacheFileDao.insert(second);
Map<String, Long> pathToId = new HashMap<>();
pathToId.put("testPath", 80L);
pathToId.put("testPath2", 90L);
pathToId.put("testPath3", 100L);
List<FileAccessEvent> events = new ArrayList<>();
events.add(new FileAccessEvent("testPath", 3000L));
events.add(new FileAccessEvent("testPath", 4000L));
events.add(new FileAccessEvent("testPath2", 4000L));
events.add(new FileAccessEvent("testPath2", 5000L));
events.add(new FileAccessEvent("testPath3", 8000L));
events.add(new FileAccessEvent("testPath3", 9000L));
// Sync status
first.setLastAccessTime(4000L);
first.setNumAccessed(first.getNumAccessed() + 2);
second.setLastAccessTime(5000L);
second.setNumAccessed(second.getNumAccessed() + 2);
cacheFileDao.update(pathToId, events);
List<CachedFileStatus> statuses = cacheFileDao.getAll();
Assert.assertTrue(statuses.size() == 2);
Map<Long, CachedFileStatus> statusMap = new HashMap<>();
for (CachedFileStatus status : statuses) {
statusMap.put(status.getFid(), status);
}
Assert.assertTrue(statusMap.containsKey(80L));
CachedFileStatus dbFirst = statusMap.get(80L);
Assert.assertTrue(dbFirst.equals(first));
Assert.assertTrue(statusMap.containsKey(90L));
CachedFileStatus dbSecond = statusMap.get(90L);
Assert.assertTrue(dbSecond.equals(second));
}
@Test
public void testInsertDeleteCachedFiles() throws Exception {
cacheFileDao
.insert(80L,
"testPath", 123456L, 234567L, 456);
Assert.assertTrue(cacheFileDao.getById(
80L).getFromTime() == 123456L);
// Update record with 80l id
cacheFileDao.update(80L,
123455L, 460);
Assert.assertTrue(cacheFileDao
.getAll().get(0)
.getLastAccessTime() == 123455L);
CachedFileStatus[] cachedFileStatuses = new CachedFileStatus[] {
new CachedFileStatus(321L, "testPath",
113334L, 222222L, 222)};
cacheFileDao.insert(cachedFileStatuses);
Assert.assertTrue(cacheFileDao.getById(321L)
.equals(cachedFileStatuses[0]));
Assert.assertTrue(cacheFileDao.getAll().size() == 2);
// Delete one record
cacheFileDao.deleteById(321L);
Assert.assertTrue(cacheFileDao.getAll().size() == 1);
// Clear all records
cacheFileDao.deleteAll();
Assert.assertTrue(cacheFileDao.getAll().size() == 0);
}
@Test
public void testGetCachedFileStatus() throws Exception {
cacheFileDao.insert(6L, "testPath", 1490918400000L,
234567L, 456);
CachedFileStatus cachedFileStatus = new CachedFileStatus(6L, "testPath", 1490918400000L,
234567L, 456);
cacheFileDao.insert(19L, "testPath", 1490918400000L,
234567L, 456);
cacheFileDao.insert(23L, "testPath", 1490918400000L,
234567L, 456);
CachedFileStatus dbcachedFileStatus = cacheFileDao.getById(6);
Assert.assertTrue(dbcachedFileStatus.equals(cachedFileStatus));
List<CachedFileStatus> cachedFileList = cacheFileDao.getAll();
List<Long> fids = cacheFileDao.getFids();
Assert.assertTrue(fids.size() == 3);
Assert.assertTrue(cachedFileList.get(0).getFid() == 6);
Assert.assertTrue(cachedFileList.get(1).getFid() == 19);
Assert.assertTrue(cachedFileList.get(2).getFid() == 23);
}
}
| java | Apache-2.0 | e0c90f054687a18c4e095547ac5e31b8b313b3ef | 2026-01-05T02:41:11.405497Z | false |
Intel-bigdata/SSM | https://github.com/Intel-bigdata/SSM/blob/e0c90f054687a18c4e095547ac5e31b8b313b3ef/smart-metastore/src/main/java/org/smartdata/metastore/DBType.java | smart-metastore/src/main/java/org/smartdata/metastore/DBType.java | /**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.smartdata.metastore;
/**
* Type of database.
*/
public enum DBType {
SQLITE,
MYSQL
}
| java | Apache-2.0 | e0c90f054687a18c4e095547ac5e31b8b313b3ef | 2026-01-05T02:41:11.405497Z | false |
Intel-bigdata/SSM | https://github.com/Intel-bigdata/SSM/blob/e0c90f054687a18c4e095547ac5e31b8b313b3ef/smart-metastore/src/main/java/org/smartdata/metastore/DruidPool.java | smart-metastore/src/main/java/org/smartdata/metastore/DruidPool.java | /**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.smartdata.metastore;
import com.alibaba.druid.pool.DruidDataSource;
import com.alibaba.druid.pool.DruidDataSourceFactory;
import javax.sql.DataSource;
import java.sql.Connection;
import java.sql.SQLException;
import java.util.Properties;
public class DruidPool implements DBPool {
private final DruidDataSource ds;
public DruidPool(Properties properties) throws MetaStoreException {
try {
ds =
(DruidDataSource) DruidDataSourceFactory.createDataSource(properties);
} catch (Exception e) {
throw new MetaStoreException(e);
}
}
public DataSource getDataSource() {
return ds;
}
public Connection getConnection() throws SQLException {
return ds.getConnection();
}
public void closeConnection(Connection conn) throws SQLException {
conn.close();
}
public void close() {
ds.close();
}
}
| java | Apache-2.0 | e0c90f054687a18c4e095547ac5e31b8b313b3ef | 2026-01-05T02:41:11.405497Z | false |
Intel-bigdata/SSM | https://github.com/Intel-bigdata/SSM/blob/e0c90f054687a18c4e095547ac5e31b8b313b3ef/smart-metastore/src/main/java/org/smartdata/metastore/DBPool.java | smart-metastore/src/main/java/org/smartdata/metastore/DBPool.java | /**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.smartdata.metastore;
import javax.sql.DataSource;
import java.sql.Connection;
import java.sql.SQLException;
public interface DBPool {
Connection getConnection() throws SQLException;
DataSource getDataSource();
void closeConnection(Connection conn) throws SQLException;
void close();
}
| java | Apache-2.0 | e0c90f054687a18c4e095547ac5e31b8b313b3ef | 2026-01-05T02:41:11.405497Z | false |
Intel-bigdata/SSM | https://github.com/Intel-bigdata/SSM/blob/e0c90f054687a18c4e095547ac5e31b8b313b3ef/smart-metastore/src/main/java/org/smartdata/metastore/StatesUpdateService.java | smart-metastore/src/main/java/org/smartdata/metastore/StatesUpdateService.java | /**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.smartdata.metastore;
import org.smartdata.AbstractService;
import org.smartdata.SmartContext;
public abstract class StatesUpdateService extends AbstractService {
protected MetaStore metaStore;
public StatesUpdateService(SmartContext context, MetaStore metaStore) {
super(context);
this.metaStore = metaStore;
}
}
| java | Apache-2.0 | e0c90f054687a18c4e095547ac5e31b8b313b3ef | 2026-01-05T02:41:11.405497Z | false |
Intel-bigdata/SSM | https://github.com/Intel-bigdata/SSM/blob/e0c90f054687a18c4e095547ac5e31b8b313b3ef/smart-metastore/src/main/java/org/smartdata/metastore/MetaStore.java | smart-metastore/src/main/java/org/smartdata/metastore/MetaStore.java | /**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.smartdata.metastore;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.smartdata.metaservice.BackupMetaService;
import org.smartdata.metaservice.CmdletMetaService;
import org.smartdata.metaservice.CopyMetaService;
import org.smartdata.metastore.dao.AccessCountDao;
import org.smartdata.metastore.dao.AccessCountTable;
import org.smartdata.metastore.dao.ActionDao;
import org.smartdata.metastore.dao.BackUpInfoDao;
import org.smartdata.metastore.dao.CacheFileDao;
import org.smartdata.metastore.dao.ClusterConfigDao;
import org.smartdata.metastore.dao.ClusterInfoDao;
import org.smartdata.metastore.dao.CmdletDao;
import org.smartdata.metastore.dao.CompressionFileDao;
import org.smartdata.metastore.dao.DataNodeInfoDao;
import org.smartdata.metastore.dao.DataNodeStorageInfoDao;
import org.smartdata.metastore.dao.ErasureCodingPolicyDao;
import org.smartdata.metastore.dao.FileDiffDao;
import org.smartdata.metastore.dao.FileInfoDao;
import org.smartdata.metastore.dao.FileStateDao;
import org.smartdata.metastore.dao.GeneralDao;
import org.smartdata.metastore.dao.GlobalConfigDao;
import org.smartdata.metastore.dao.MetaStoreHelper;
import org.smartdata.metastore.dao.RuleDao;
import org.smartdata.metastore.dao.SmallFileDao;
import org.smartdata.metastore.dao.StorageDao;
import org.smartdata.metastore.dao.StorageHistoryDao;
import org.smartdata.metastore.dao.SystemInfoDao;
import org.smartdata.metastore.dao.UserInfoDao;
import org.smartdata.metastore.dao.WhitelistDao;
import org.smartdata.metastore.dao.XattrDao;
import org.smartdata.metastore.utils.MetaStoreUtils;
import org.smartdata.metrics.FileAccessEvent;
import org.smartdata.model.ActionInfo;
import org.smartdata.model.BackUpInfo;
import org.smartdata.model.CachedFileStatus;
import org.smartdata.model.ClusterConfig;
import org.smartdata.model.ClusterInfo;
import org.smartdata.model.CmdletInfo;
import org.smartdata.model.CmdletState;
import org.smartdata.model.CompactFileState;
import org.smartdata.model.CompressionFileState;
import org.smartdata.model.DataNodeInfo;
import org.smartdata.model.DataNodeStorageInfo;
import org.smartdata.model.DetailedFileAction;
import org.smartdata.model.DetailedRuleInfo;
import org.smartdata.model.ErasureCodingPolicyInfo;
import org.smartdata.model.FileAccessInfo;
import org.smartdata.model.FileDiff;
import org.smartdata.model.FileDiffState;
import org.smartdata.model.FileInfo;
import org.smartdata.model.FileState;
import org.smartdata.model.GlobalConfig;
import org.smartdata.model.NormalFileState;
import org.smartdata.model.RuleInfo;
import org.smartdata.model.RuleState;
import org.smartdata.model.S3FileState;
import org.smartdata.model.StorageCapacity;
import org.smartdata.model.StoragePolicy;
import org.smartdata.model.SystemInfo;
import org.smartdata.model.UserInfo;
import org.smartdata.model.XAttribute;
import org.springframework.dao.EmptyResultDataAccessException;
import java.sql.Connection;
import java.sql.SQLException;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Collection;
import java.util.HashMap;
import java.util.HashSet;
import java.util.Iterator;
import java.util.List;
import java.util.Map;
import java.util.Set;
import java.util.concurrent.locks.ReentrantLock;
/**
* Operations supported for upper functions.
*/
public class MetaStore implements CopyMetaService, CmdletMetaService, BackupMetaService {
static final Logger LOG = LoggerFactory.getLogger(MetaStore.class);
private DBPool pool = null;
private DBType dbType;
private Map<Integer, String> mapStoragePolicyIdName = null;
private Map<String, Integer> mapStoragePolicyNameId = null;
private Map<String, StorageCapacity> mapStorageCapacity = null;
private Set<String> setBackSrc = null;
private RuleDao ruleDao;
private CmdletDao cmdletDao;
private ActionDao actionDao;
private FileInfoDao fileInfoDao;
private CacheFileDao cacheFileDao;
private StorageDao storageDao;
private StorageHistoryDao storageHistoryDao;
private XattrDao xattrDao;
private FileDiffDao fileDiffDao;
private AccessCountDao accessCountDao;
private MetaStoreHelper metaStoreHelper;
private ClusterConfigDao clusterConfigDao;
private GlobalConfigDao globalConfigDao;
private DataNodeInfoDao dataNodeInfoDao;
private DataNodeStorageInfoDao dataNodeStorageInfoDao;
private BackUpInfoDao backUpInfoDao;
private ClusterInfoDao clusterInfoDao;
private SystemInfoDao systemInfoDao;
private UserInfoDao userInfoDao;
private FileStateDao fileStateDao;
private CompressionFileDao compressionFileDao;
private GeneralDao generalDao;
private SmallFileDao smallFileDao;
private ErasureCodingPolicyDao ecDao;
private WhitelistDao whitelistDao;
private final ReentrantLock accessCountLock;
public MetaStore(DBPool pool) throws MetaStoreException {
this.pool = pool;
initDbInfo();
ruleDao = new RuleDao(pool.getDataSource());
cmdletDao = new CmdletDao(pool.getDataSource());
actionDao = new ActionDao(pool.getDataSource());
fileInfoDao = new FileInfoDao(pool.getDataSource());
xattrDao = new XattrDao(pool.getDataSource());
cacheFileDao = new CacheFileDao(pool.getDataSource());
storageDao = new StorageDao(pool.getDataSource());
storageHistoryDao = new StorageHistoryDao(pool.getDataSource());
accessCountDao = new AccessCountDao(pool.getDataSource());
fileDiffDao = new FileDiffDao(pool.getDataSource());
metaStoreHelper = new MetaStoreHelper(pool.getDataSource());
clusterConfigDao = new ClusterConfigDao(pool.getDataSource());
globalConfigDao = new GlobalConfigDao(pool.getDataSource());
dataNodeInfoDao = new DataNodeInfoDao(pool.getDataSource());
dataNodeStorageInfoDao = new DataNodeStorageInfoDao(pool.getDataSource());
backUpInfoDao = new BackUpInfoDao(pool.getDataSource());
clusterInfoDao = new ClusterInfoDao(pool.getDataSource());
systemInfoDao = new SystemInfoDao(pool.getDataSource());
userInfoDao = new UserInfoDao(pool.getDataSource());
fileStateDao = new FileStateDao(pool.getDataSource());
compressionFileDao = new CompressionFileDao(pool.getDataSource());
generalDao = new GeneralDao(pool.getDataSource());
smallFileDao = new SmallFileDao(pool.getDataSource());
ecDao = new ErasureCodingPolicyDao(pool.getDataSource());
whitelistDao = new WhitelistDao(pool.getDataSource());
accessCountLock = new ReentrantLock();
}
private void initDbInfo() throws MetaStoreException {
Connection conn = null;
try {
try {
conn = getConnection();
String driver = conn.getMetaData().getDriverName();
driver = driver.toLowerCase();
if (driver.contains("sqlite")) {
dbType = DBType.SQLITE;
} else if (driver.contains("mysql")) {
dbType = DBType.MYSQL;
} else {
throw new MetaStoreException("Unknown database: " + driver);
}
} finally {
if (conn != null) {
closeConnection(conn);
}
}
} catch (SQLException e) {
throw new MetaStoreException(e);
}
}
public Connection getConnection() throws MetaStoreException {
if (pool != null) {
try {
return pool.getConnection();
} catch (SQLException e) {
throw new MetaStoreException(e);
}
}
return null;
}
private void closeConnection(Connection conn) throws MetaStoreException {
if (pool != null) {
try {
pool.closeConnection(conn);
} catch (SQLException e) {
throw new MetaStoreException(e);
}
}
}
public DBType getDbType() {
return dbType;
}
public Long queryForLong(String sql) throws MetaStoreException {
try {
return generalDao.queryForLong(sql);
} catch (Exception e) {
throw new MetaStoreException(e);
}
}
/**
* Store a single file info into database.
*
* @param file
*/
public void insertFile(FileInfo file)
throws MetaStoreException {
updateCache();
fileInfoDao.insert(file);
}
/**
* Store files info into database.
*
* @param files
*/
public void insertFiles(FileInfo[] files)
throws MetaStoreException {
updateCache();
fileInfoDao.insert(files);
}
public int updateFileStoragePolicy(String path, String policyName)
throws MetaStoreException {
if (mapStoragePolicyIdName == null) {
updateCache();
}
if (!mapStoragePolicyNameId.containsKey(policyName)) {
throw new MetaStoreException("Unknown storage policy name '"
+ policyName + "'");
}
try {
return storageDao.updateFileStoragePolicy(path, mapStoragePolicyNameId.get(policyName));
} catch (Exception e) {
throw new MetaStoreException(e);
}
}
public FileInfo getFile(long fid) throws MetaStoreException {
updateCache();
try {
return fileInfoDao.getById(fid);
} catch (EmptyResultDataAccessException e) {
return null;
} catch (Exception e) {
throw new MetaStoreException(e);
}
}
public FileInfo getFile(String path) throws MetaStoreException {
updateCache();
try {
return fileInfoDao.getByPath(path);
} catch (EmptyResultDataAccessException e) {
return null;
} catch (Exception e) {
throw new MetaStoreException(e);
}
}
public List<FileInfo> getFile() throws MetaStoreException {
updateCache();
try {
return fileInfoDao.getAll();
} catch (EmptyResultDataAccessException e) {
return new ArrayList<>();
} catch (Exception e) {
throw new MetaStoreException(e);
}
}
public List<FileInfo> getFilesByPrefix(String path) throws MetaStoreException {
updateCache();
try {
return fileInfoDao.getFilesByPrefix(path);
} catch (EmptyResultDataAccessException e) {
return new ArrayList<>();
} catch (Exception e) {
throw new MetaStoreException(e);
}
}
public List<FileInfo> getFilesByPrefixInOrder(String path) throws MetaStoreException {
updateCache();
try {
return fileInfoDao.getFilesByPrefixInOrder(path);
} catch (EmptyResultDataAccessException e) {
return new ArrayList<>();
} catch (Exception e) {
throw new MetaStoreException(e);
}
}
public List<FileInfo> getFilesByPaths(Collection<String> paths)
throws MetaStoreException {
try {
return fileInfoDao.getFilesByPaths(paths);
} catch (EmptyResultDataAccessException e) {
return new ArrayList<>();
} catch (Exception e) {
throw new MetaStoreException(e);
}
}
public Map<String, Long> getFileIDs(Collection<String> paths)
throws MetaStoreException {
try {
return fileInfoDao.getPathFids(paths);
} catch (EmptyResultDataAccessException e) {
return new HashMap<>();
} catch (Exception e) {
throw new MetaStoreException(e);
}
}
public Map<Long, String> getFilePaths(Collection<Long> ids)
throws MetaStoreException {
try {
return fileInfoDao.getFidPaths(ids);
} catch (EmptyResultDataAccessException e) {
return new HashMap<>();
} catch (Exception e) {
throw new MetaStoreException(e);
}
}
public List<FileAccessInfo> getHotFiles(
List<AccessCountTable> tables,
int topNum) throws MetaStoreException {
Iterator<AccessCountTable> tableIterator = tables.iterator();
if (tableIterator.hasNext()) {
try {
Map<Long, Integer> accessCounts =
accessCountDao.getHotFiles(tables, topNum);
if (accessCounts.size() == 0) {
return new ArrayList<>();
}
Map<Long, String> idToPath = getFilePaths(accessCounts.keySet());
List<FileAccessInfo> result = new ArrayList<>();
for (Map.Entry<Long, Integer> entry : accessCounts.entrySet()) {
Long fid = entry.getKey();
if (idToPath.containsKey(fid) && entry.getValue() > 0) {
result.add(
new FileAccessInfo(fid, idToPath.get(fid), entry.getValue()));
}
}
return result;
} catch (EmptyResultDataAccessException e) {
return new ArrayList<>();
} catch (Exception e) {
throw new MetaStoreException(e);
} finally {
for (AccessCountTable accessCountTable : tables) {
if (accessCountTable.isEphemeral()) {
this.dropTable(accessCountTable.getTableName());
}
}
}
} else {
return new ArrayList<>();
}
}
public ReentrantLock getAccessCountLock() {
return accessCountLock;
}
/**
* @param fidSrc the fid of old file.
* @param fidDest the fid of new file that will take over the access
* count of old file.
* @throws MetaStoreException
*/
public void updateAccessCountTableFid(long fidSrc, long fidDest)
throws MetaStoreException {
if (fidSrc == fidDest) {
LOG.warn("No need to update fid for access count table "
+ "with same fid: " + fidDest);
return;
}
accessCountLock.lock();
try {
accessCountDao.updateFid(fidSrc, fidDest);
} catch (Exception e) {
throw new MetaStoreException(e);
} finally {
accessCountLock.unlock();
}
}
public void deleteAllFileInfo() throws MetaStoreException {
try {
fileInfoDao.deleteAll();
} catch (Exception e) {
throw new MetaStoreException(e);
}
}
public void deleteAllEcPolicies() throws MetaStoreException {
try {
ecDao.deleteAll();
} catch (Exception e) {
throw new MetaStoreException(e);
}
}
public void insertEcPolicies(List<ErasureCodingPolicyInfo> ecInfos) throws MetaStoreException {
try {
ecDao.insert(ecInfos);
} catch (Exception e) {
throw new MetaStoreException(e);
}
}
public List<ErasureCodingPolicyInfo> getAllEcPolicies() throws MetaStoreException {
try {
return ecDao.getAllEcPolicies();
} catch (Exception e) {
throw new MetaStoreException(e);
}
}
public void deleteFileByPath(String path) throws MetaStoreException {
try {
fileInfoDao.deleteByPath(path);
} catch (Exception e) {
throw new MetaStoreException(e);
}
}
public List<AccessCountTable> getAllSortedTables() throws MetaStoreException {
try {
return accessCountDao.getAllSortedTables();
} catch (Exception e) {
throw new MetaStoreException(e);
}
}
public void deleteAccessCountTable(
AccessCountTable table) throws MetaStoreException {
accessCountLock.lock();
try {
accessCountDao.delete(table);
} catch (Exception e) {
throw new MetaStoreException(e);
} finally {
accessCountLock.unlock();
}
}
public void insertAccessCountTable(
AccessCountTable accessCountTable) throws MetaStoreException {
try {
if (accessCountDao.getAccessCountTableByName(accessCountTable.getTableName()).isEmpty()) {
accessCountDao.insert(accessCountTable);
}
} catch (Exception e) {
throw new MetaStoreException(e);
}
}
public void insertUpdateStoragesTable(StorageCapacity[] storages)
throws MetaStoreException {
mapStorageCapacity = null;
try {
storageDao.insertUpdateStoragesTable(storages);
} catch (Exception e) {
throw new MetaStoreException(e);
}
}
public void insertUpdateStoragesTable(List<StorageCapacity> storages)
throws MetaStoreException {
mapStorageCapacity = null;
try {
storageDao.insertUpdateStoragesTable(
storages.toArray(new StorageCapacity[storages.size()]));
} catch (Exception e) {
throw new MetaStoreException(e);
}
}
public void insertUpdateStoragesTable(StorageCapacity storage)
throws MetaStoreException {
insertUpdateStoragesTable(new StorageCapacity[]{storage});
}
public Map<String, StorageCapacity> getStorageCapacity() throws MetaStoreException {
updateCache();
Map<String, StorageCapacity> ret = new HashMap<>();
Map<String, StorageCapacity> currentMapStorageCapacity = mapStorageCapacity;
if (currentMapStorageCapacity != null) {
for (String key : currentMapStorageCapacity.keySet()) {
ret.put(key, currentMapStorageCapacity.get(key));
}
}
return ret;
}
public void deleteStorage(String storageType) throws MetaStoreException {
try {
mapStorageCapacity = null;
storageDao.deleteStorage(storageType);
} catch (Exception e) {
throw new MetaStoreException(e);
}
}
public StorageCapacity getStorageCapacity(
String type) throws MetaStoreException {
updateCache();
Map<String, StorageCapacity> currentMapStorageCapacity = mapStorageCapacity;
while (currentMapStorageCapacity == null) {
try {
Thread.sleep(100);
} catch (InterruptedException ex) {
LOG.error(ex.getMessage());
}
currentMapStorageCapacity = mapStorageCapacity;
}
try {
return currentMapStorageCapacity.get(type);
} catch (EmptyResultDataAccessException e) {
return null;
} catch (Exception e) {
throw new MetaStoreException(e);
}
}
public boolean updateStoragesTable(String type,
Long capacity, Long free) throws MetaStoreException {
try {
mapStorageCapacity = null;
return storageDao.updateStoragesTable(type, capacity, free);
} catch (Exception e) {
throw new MetaStoreException(e);
}
}
public void insertStorageHistTable(StorageCapacity[] storages, long interval)
throws MetaStoreException {
try {
storageHistoryDao.insertStorageHistTable(storages, interval);
} catch (Exception e) {
throw new MetaStoreException(e);
}
}
public List<StorageCapacity> getStorageHistoryData(String type, long interval,
long startTime, long endTime) {
return storageHistoryDao.getStorageHistoryData(type, interval, startTime, endTime);
}
public void deleteStorageHistoryOldRecords(String type, long interval, long beforTimeStamp)
throws MetaStoreException {
try {
storageHistoryDao.deleteOldRecords(type, interval, beforTimeStamp);
} catch (Exception e) {
throw new MetaStoreException(e);
}
}
private void updateCache() throws MetaStoreException {
if (mapStoragePolicyIdName == null) {
mapStoragePolicyNameId = null;
try {
mapStoragePolicyIdName = storageDao.getStoragePolicyIdNameMap();
} catch (Exception e) {
throw new MetaStoreException(e);
}
mapStoragePolicyNameId = new HashMap<>();
for (Integer key : mapStoragePolicyIdName.keySet()) {
mapStoragePolicyNameId.put(mapStoragePolicyIdName.get(key), key);
}
}
if (mapStorageCapacity == null) {
try {
mapStorageCapacity = storageDao.getStorageTablesItem();
} catch (Exception e) {
throw new MetaStoreException(e);
}
}
}
public void insertCachedFiles(long fid, String path,
long fromTime,
long lastAccessTime, int numAccessed) throws MetaStoreException {
try {
cacheFileDao.insert(fid, path, fromTime, lastAccessTime, numAccessed);
} catch (Exception e) {
throw new MetaStoreException(e);
}
}
public void insertCachedFiles(List<CachedFileStatus> s)
throws MetaStoreException {
try {
cacheFileDao.insert(s.toArray(new CachedFileStatus[s.size()]));
} catch (Exception e) {
throw new MetaStoreException(e);
}
}
public void deleteAllCachedFile() throws MetaStoreException {
try {
cacheFileDao.deleteAll();
} catch (Exception e) {
throw new MetaStoreException(e);
}
}
public boolean updateCachedFiles(Long fid,
Long lastAccessTime,
Integer numAccessed) throws MetaStoreException {
try {
return cacheFileDao.update(fid, lastAccessTime, numAccessed) >= 0;
} catch (Exception e) {
throw new MetaStoreException(e);
}
}
public void updateCachedFiles(Map<String, Long> pathToIds,
List<FileAccessEvent> events)
throws MetaStoreException {
try {
cacheFileDao.update(pathToIds, events);
} catch (Exception e) {
throw new MetaStoreException(e);
}
}
public void deleteCachedFile(long fid) throws MetaStoreException {
try {
cacheFileDao.deleteById(fid);
} catch (Exception e) {
throw new MetaStoreException(e);
}
}
public List<CachedFileStatus> getCachedFileStatus() throws MetaStoreException {
try {
return cacheFileDao.getAll();
} catch (EmptyResultDataAccessException e) {
return new ArrayList<>();
} catch (Exception e) {
throw new MetaStoreException(e);
}
}
public List<Long> getCachedFids() throws MetaStoreException {
try {
return cacheFileDao.getFids();
} catch (EmptyResultDataAccessException e) {
return new ArrayList<>();
} catch (Exception e) {
throw new MetaStoreException(e);
}
}
public CachedFileStatus getCachedFileStatus(
long fid) throws MetaStoreException {
try {
return cacheFileDao.getById(fid);
} catch (EmptyResultDataAccessException e) {
return null;
} catch (Exception e) {
throw new MetaStoreException(e);
}
}
public void createProportionTable(AccessCountTable dest,
AccessCountTable source)
throws MetaStoreException {
try {
accessCountDao.createProportionTable(dest, source);
} catch (Exception e) {
throw new MetaStoreException(e);
}
}
public void dropTable(String tableName) throws MetaStoreException {
try {
LOG.debug("Drop table = {}", tableName);
metaStoreHelper.dropTable(tableName);
} catch (Exception e) {
throw new MetaStoreException(e);
}
}
public void execute(String sql) throws MetaStoreException {
try {
LOG.debug("Execute sql = {}", sql);
metaStoreHelper.execute(sql);
} catch (Exception e) {
throw new MetaStoreException(e);
}
}
//Todo: optimize
public void execute(List<String> statements) throws MetaStoreException {
for (String statement : statements) {
execute(statement);
}
}
public List<String> executeFilesPathQuery(
String sql) throws MetaStoreException {
try {
LOG.debug("ExecuteFilesPathQuery sql = {}", sql);
return metaStoreHelper.getFilesPath(sql);
} catch (EmptyResultDataAccessException e) {
return new ArrayList<>();
} catch (Exception e) {
throw new MetaStoreException(e);
}
}
public List<DetailedFileAction> listFileActions(long rid,
int size) throws MetaStoreException {
if (mapStoragePolicyIdName == null) {
updateCache();
}
List<ActionInfo> actionInfos = getActions(rid, size);
List<DetailedFileAction> detailedFileActions = new ArrayList<>();
for (ActionInfo actionInfo : actionInfos) {
DetailedFileAction detailedFileAction = new DetailedFileAction(actionInfo);
String filePath = actionInfo.getArgs().get("-file");
FileInfo fileInfo = getFile(filePath);
if (fileInfo == null) {
// LOG.debug("Namespace is not sync! File {} not in file table!", filePath);
// Add a mock fileInfo
fileInfo = new FileInfo(filePath, 0L, 0L, false,
(short) 0, 0L, 0L, 0L, (short) 0,
"root", "root", (byte) 0, (byte) 0);
}
detailedFileAction.setFileLength(fileInfo.getLength());
detailedFileAction.setFilePath(filePath);
if (actionInfo.getActionName().contains("allssd")
|| actionInfo.getActionName().contains("onessd")
|| actionInfo.getActionName().contains("archive")
|| actionInfo.getActionName().contains("alldisk")
|| actionInfo.getActionName().contains("onedisk")
|| actionInfo.getActionName().contains("ramdisk")) {
detailedFileAction.setTarget(actionInfo.getActionName());
detailedFileAction.setSrc(mapStoragePolicyIdName.get((int) fileInfo.getStoragePolicy()));
} else {
detailedFileAction.setSrc(actionInfo.getArgs().get("-src"));
detailedFileAction.setTarget(actionInfo.getArgs().get("-dest"));
}
detailedFileActions.add(detailedFileAction);
}
return detailedFileActions;
}
public List<DetailedFileAction> listFileActions(long rid, long start, long offset)
throws MetaStoreException {
if (mapStoragePolicyIdName == null) {
updateCache();
}
List<ActionInfo> actionInfos = getActions(rid, start, offset);
List<DetailedFileAction> detailedFileActions = new ArrayList<>();
for (ActionInfo actionInfo : actionInfos) {
DetailedFileAction detailedFileAction = new DetailedFileAction(actionInfo);
String filePath = actionInfo.getArgs().get("-file");
FileInfo fileInfo = getFile(filePath);
if (fileInfo == null) {
// LOG.debug("Namespace is not sync! File {} not in file table!", filePath);
// Add a mock fileInfo
fileInfo = new FileInfo(filePath, 0L, 0L, false,
(short) 0, 0L, 0L, 0L, (short) 0,
"root", "root", (byte) 0, (byte) 0);
}
detailedFileAction.setFileLength(fileInfo.getLength());
detailedFileAction.setFilePath(filePath);
if (actionInfo.getActionName().contains("allssd")
|| actionInfo.getActionName().contains("onessd")
|| actionInfo.getActionName().contains("archive")
|| actionInfo.getActionName().contains("alldisk")
|| actionInfo.getActionName().contains("onedisk")
|| actionInfo.getActionName().contains("ramdisk")) {
detailedFileAction.setTarget(actionInfo.getActionName());
detailedFileAction.setSrc(mapStoragePolicyIdName.get((int) fileInfo.getStoragePolicy()));
} else {
detailedFileAction.setSrc(actionInfo.getArgs().get("-src"));
detailedFileAction.setTarget(actionInfo.getArgs().get("-dest"));
}
detailedFileActions.add(detailedFileAction);
}
return detailedFileActions;
}
public long getNumFileAction(long rid) throws MetaStoreException {
return listFileActions(rid, 0).size();
}
public List<DetailedRuleInfo> listMoveRules() throws MetaStoreException {
List<RuleInfo> ruleInfos = getRuleInfo();
List<DetailedRuleInfo> detailedRuleInfos = new ArrayList<>();
for (RuleInfo ruleInfo : ruleInfos) {
int lastIndex = ruleInfo.getRuleText().lastIndexOf("|");
String lastPart = ruleInfo.getRuleText().substring(lastIndex + 1);
if (lastPart.contains("sync")) {
continue;
} else if (lastPart.contains("allssd")
|| lastPart.contains("onessd")
|| lastPart.contains("archive")
|| lastPart.contains("alldisk")
|| lastPart.contains("onedisk")
|| lastPart.contains("ramdisk")) {
DetailedRuleInfo detailedRuleInfo = new DetailedRuleInfo(ruleInfo);
// Add mover progress
List<CmdletInfo> cmdletInfos = cmdletDao.getByRid(ruleInfo.getId());
int currPos = 0;
for (CmdletInfo cmdletInfo : cmdletInfos) {
if (cmdletInfo.getState().getValue() <= 4) {
break;
}
currPos += 1;
}
int countRunning = 0;
for (CmdletInfo cmdletInfo : cmdletInfos) {
if (cmdletInfo.getState().getValue() <= 4) {
countRunning++;
}
}
detailedRuleInfo
.setBaseProgress(cmdletInfos.size() - currPos);
detailedRuleInfo.setRunningProgress(countRunning);
if (detailedRuleInfo.getState() != RuleState.DELETED){
detailedRuleInfos.add(detailedRuleInfo);
}
}
}
return detailedRuleInfos;
}
public List<DetailedRuleInfo> listSyncRules() throws MetaStoreException {
List<RuleInfo> ruleInfos = getRuleInfo();
List<DetailedRuleInfo> detailedRuleInfos = new ArrayList<>();
for (RuleInfo ruleInfo : ruleInfos) {
if (ruleInfo.getState() == RuleState.DELETED) {
continue;
}
int lastIndex = ruleInfo.getRuleText().lastIndexOf("|");
String lastPart = ruleInfo.getRuleText().substring(lastIndex + 1);
if (lastPart.contains("sync")) {
DetailedRuleInfo detailedRuleInfo = new DetailedRuleInfo(ruleInfo);
// Add sync progress
BackUpInfo backUpInfo = getBackUpInfo(ruleInfo.getId());
// Get total matched files
if (backUpInfo != null) {
detailedRuleInfo
.setBaseProgress(getFilesByPrefix(backUpInfo.getSrc()).size());
long count = fileDiffDao.getPendingDiff(backUpInfo.getSrc()).size();
count += fileDiffDao.getByState(backUpInfo.getSrc(), FileDiffState.RUNNING).size();
if (count > detailedRuleInfo.baseProgress) {
count = detailedRuleInfo.baseProgress;
}
detailedRuleInfo.setRunningProgress(count);
} else {
detailedRuleInfo
.setBaseProgress(0);
detailedRuleInfo.setRunningProgress(0);
}
if (detailedRuleInfo.getState() != RuleState.DELETED){
detailedRuleInfos.add(detailedRuleInfo);
}
}
}
return detailedRuleInfos;
}
public boolean insertNewRule(RuleInfo info)
throws MetaStoreException {
try {
return ruleDao.insert(info) >= 0;
} catch (Exception e) {
throw new MetaStoreException(e);
}
}
public boolean updateRuleInfo(long ruleId, RuleState rs,
long lastCheckTime, long checkedCount, int commandsGen)
throws MetaStoreException {
try {
if (rs == null) {
return ruleDao.update(ruleId,
lastCheckTime, checkedCount, commandsGen) >= 0;
}
return ruleDao.update(ruleId,
rs.getValue(), lastCheckTime, checkedCount, commandsGen) >= 0;
} catch (Exception e) {
throw new MetaStoreException(e);
}
}
public boolean updateRuleState(long ruleId, RuleState rs)
throws MetaStoreException {
if (rs == null) {
throw new MetaStoreException("Rule state can not be null, ruleId = " + ruleId);
}
try {
return ruleDao.update(ruleId, rs.getValue()) >= 0;
} catch (Exception e) {
throw new MetaStoreException(e);
}
}
public RuleInfo getRuleInfo(long ruleId) throws MetaStoreException {
try {
return ruleDao.getById(ruleId);
} catch (EmptyResultDataAccessException e) {
return null;
} catch (Exception e) {
throw new MetaStoreException(e);
}
}
public List<RuleInfo> listPageRule(long start, long offset, List<String> orderBy,
List<Boolean> desc)
throws MetaStoreException {
LOG.debug("List Rule, start {}, offset {}", start, offset);
try {
if (orderBy.size() == 0) {
return ruleDao.getAPageOfRule(start, offset);
} else {
return ruleDao.getAPageOfRule(start, offset, orderBy, desc);
}
} catch (Exception e) {
throw new MetaStoreException(e);
}
}
public List<RuleInfo> getRuleInfo() throws MetaStoreException {
try {
return ruleDao.getAll();
} catch (EmptyResultDataAccessException e) {
return new ArrayList<>();
} catch (Exception e) {
throw new MetaStoreException(e);
}
}
public List<CmdletInfo> listPageCmdlets(long rid, long start, long offset,
List<String> orderBy, List<Boolean> desc)
throws MetaStoreException {
LOG.debug("List cmdlet, start {}, offset {}", start, offset);
try {
if (orderBy.size() == 0) {
return cmdletDao.getByRid(rid, start, offset);
} else {
return cmdletDao.getByRid(rid, start, offset, orderBy, desc);
}
} catch (Exception e) {
throw new MetaStoreException(e);
}
}
public long getNumCmdletsByRid(long rid) {
try {
return cmdletDao.getNumByRid(rid);
} catch (Exception e) {
return 0;
}
}
public List<CmdletInfo> listPageCmdlets(long start, long offset,
| java | Apache-2.0 | e0c90f054687a18c4e095547ac5e31b8b313b3ef | 2026-01-05T02:41:11.405497Z | true |
Intel-bigdata/SSM | https://github.com/Intel-bigdata/SSM/blob/e0c90f054687a18c4e095547ac5e31b8b313b3ef/smart-metastore/src/main/java/org/smartdata/metastore/MetaStoreException.java | smart-metastore/src/main/java/org/smartdata/metastore/MetaStoreException.java | /**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.smartdata.metastore;
import org.smartdata.metaservice.MetaServiceException;
public class MetaStoreException extends MetaServiceException {
public MetaStoreException(String errorMsg) {
super(errorMsg);
}
public MetaStoreException(String errorMsg, Throwable throwable) {
super(errorMsg, throwable);
}
public MetaStoreException(Throwable throwable) {
super(throwable);
}
}
| java | Apache-2.0 | e0c90f054687a18c4e095547ac5e31b8b313b3ef | 2026-01-05T02:41:11.405497Z | false |
Intel-bigdata/SSM | https://github.com/Intel-bigdata/SSM/blob/e0c90f054687a18c4e095547ac5e31b8b313b3ef/smart-metastore/src/main/java/org/smartdata/metastore/dao/ClusterConfigDao.java | smart-metastore/src/main/java/org/smartdata/metastore/dao/ClusterConfigDao.java | /**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.smartdata.metastore.dao;
import org.apache.commons.lang.StringUtils;
import org.smartdata.model.ClusterConfig;
import org.springframework.jdbc.core.JdbcTemplate;
import org.springframework.jdbc.core.RowMapper;
import org.springframework.jdbc.core.simple.SimpleJdbcInsert;
import javax.sql.DataSource;
import java.sql.ResultSet;
import java.sql.SQLException;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
public class ClusterConfigDao {
private DataSource dataSource;
public void setDataSource(DataSource dataSource) {
this.dataSource = dataSource;
}
public ClusterConfigDao(DataSource dataSource) {
this.dataSource = dataSource;
}
public List<ClusterConfig> getAll() {
JdbcTemplate jdbcTemplate = new JdbcTemplate(dataSource);
return jdbcTemplate.query("SELECT * FROM cluster_config", new ClusterConfigRowMapper());
}
public List<ClusterConfig> getByIds(List<Long> cids) {
JdbcTemplate jdbcTemplate = new JdbcTemplate(dataSource);
return jdbcTemplate.query("SELECT * FROM cluster_config WHERE cid IN (?)",
new Object[]{StringUtils.join(cids, ",")},
new ClusterConfigRowMapper());
}
public ClusterConfig getById(long cid) {
JdbcTemplate jdbcTemplate = new JdbcTemplate(dataSource);
return jdbcTemplate.queryForObject("SELECT * FROM cluster_config WHERE cid = ?",
new Object[]{cid}, new ClusterConfigRowMapper());
}
public long getCountByName(String name) {
JdbcTemplate jdbcTemplate = new JdbcTemplate(dataSource);
return jdbcTemplate.queryForObject(
"SELECT COUNT(*) FROM cluster_config WHERE node_name = ?", Long.class, name);
}
public ClusterConfig getByName(String name) {
JdbcTemplate jdbcTemplate = new JdbcTemplate(dataSource);
return jdbcTemplate.queryForObject("SELECT * FROM cluster_config WHERE node_name = ?",
new Object[]{name}, new ClusterConfigRowMapper());
}
public void delete(long cid) {
JdbcTemplate jdbcTemplate = new JdbcTemplate(dataSource);
final String sql = "DELETE FROM cluster_config WHERE cid = ?";
jdbcTemplate.update(sql, cid);
}
public long insert(ClusterConfig clusterConfig) {
SimpleJdbcInsert simpleJdbcInsert = new SimpleJdbcInsert(dataSource);
simpleJdbcInsert.setTableName("cluster_config");
simpleJdbcInsert.usingGeneratedKeyColumns("cid");
long cid = simpleJdbcInsert.executeAndReturnKey(toMap(clusterConfig)).longValue();
clusterConfig.setCid(cid);
return cid;
}
// TODO slove the increment of key
public void insert(ClusterConfig[] clusterConfigs) {
SimpleJdbcInsert simpleJdbcInsert = new SimpleJdbcInsert(dataSource);
simpleJdbcInsert.setTableName("cluster_config");
simpleJdbcInsert.usingGeneratedKeyColumns("cid");
Map<String, Object>[] maps = new Map[clusterConfigs.length];
for (int i = 0; i < clusterConfigs.length; i++) {
maps[i] = toMap(clusterConfigs[i]);
}
int[] cids = simpleJdbcInsert.executeBatch(maps);
for (int i = 0; i < clusterConfigs.length; i++) {
clusterConfigs[i].setCid(cids[i]);
}
}
public int updateById(int cid, String configPath){
JdbcTemplate jdbcTemplate = new JdbcTemplate(dataSource);
final String sql = "UPDATE cluster_config SET config_path = ? WHERE cid = ?";
return jdbcTemplate.update(sql, configPath, cid);
}
public int updateByNodeName(String nodeName, String configPath){
JdbcTemplate jdbcTemplate = new JdbcTemplate(dataSource);
final String sql = "UPDATE cluster_config SET config_path = ? WHERE node_name = ?";
return jdbcTemplate.update(sql, configPath, nodeName);
}
private Map<String, Object> toMap(ClusterConfig clusterConfig) {
Map<String, Object> parameters = new HashMap<>();
parameters.put("cid", clusterConfig.getCid());
parameters.put("config_path", clusterConfig.getConfigPath());
parameters.put("node_name", clusterConfig.getNodeName());
return parameters;
}
class ClusterConfigRowMapper implements RowMapper<ClusterConfig> {
@Override
public ClusterConfig mapRow(ResultSet resultSet, int i) throws SQLException {
ClusterConfig clusterConfig = new ClusterConfig();
clusterConfig.setCid(resultSet.getLong("cid"));
clusterConfig.setConfig_path(resultSet.getString("config_path"));
clusterConfig.setNodeName(resultSet.getString("node_name"));
return clusterConfig;
}
}
}
| java | Apache-2.0 | e0c90f054687a18c4e095547ac5e31b8b313b3ef | 2026-01-05T02:41:11.405497Z | false |
Intel-bigdata/SSM | https://github.com/Intel-bigdata/SSM/blob/e0c90f054687a18c4e095547ac5e31b8b313b3ef/smart-metastore/src/main/java/org/smartdata/metastore/dao/AccessCountTableDeque.java | smart-metastore/src/main/java/org/smartdata/metastore/dao/AccessCountTableDeque.java | /**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.smartdata.metastore.dao;
import java.util.ArrayDeque;
import java.util.ArrayList;
import java.util.List;
/**
* Use deque to accelerate remove operation.
*/
public class AccessCountTableDeque extends ArrayDeque<AccessCountTable> {
private TableAddOpListener listener;
private TableEvictor tableEvictor;
public AccessCountTableDeque(TableEvictor tableEvictor) {
this(tableEvictor, null);
}
public AccessCountTableDeque(TableEvictor tableEvictor, TableAddOpListener listener) {
super();
this.listener = listener;
this.tableEvictor = tableEvictor;
}
public boolean addAndNotifyListener(AccessCountTable table) {
if (!this.isEmpty()) {
assert table.getEndTime() > this.peekLast().getEndTime();
}
super.add(table);
if (this.listener != null) {
this.listener.tableAdded(this, table);
}
tableEvictor.evictTables(this, this.size());
return true;
}
public List<AccessCountTable> getTables(Long start, Long end) {
List<AccessCountTable> results = new ArrayList<>();
for (AccessCountTable table : this) {
if (table.getStartTime() >= start && table.getEndTime() <= end) {
results.add(table);
}
}
return results;
}
}
| java | Apache-2.0 | e0c90f054687a18c4e095547ac5e31b8b313b3ef | 2026-01-05T02:41:11.405497Z | false |
Intel-bigdata/SSM | https://github.com/Intel-bigdata/SSM/blob/e0c90f054687a18c4e095547ac5e31b8b313b3ef/smart-metastore/src/main/java/org/smartdata/metastore/dao/RuleDao.java | smart-metastore/src/main/java/org/smartdata/metastore/dao/RuleDao.java | /**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.smartdata.metastore.dao;
import org.smartdata.model.RuleInfo;
import org.smartdata.model.RuleState;
import org.springframework.jdbc.core.JdbcTemplate;
import org.springframework.jdbc.core.RowMapper;
import org.springframework.jdbc.core.simple.SimpleJdbcInsert;
import javax.sql.DataSource;
import java.sql.ResultSet;
import java.sql.SQLException;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
public class RuleDao {
private DataSource dataSource;
public void setDataSource(DataSource dataSource) {
this.dataSource = dataSource;
}
public RuleDao(DataSource dataSource) {
this.dataSource = dataSource;
}
public List<RuleInfo> getAll() {
JdbcTemplate jdbcTemplate = new JdbcTemplate(dataSource);
return jdbcTemplate.query("SELECT * FROM rule",
new RuleRowMapper());
}
public RuleInfo getById(long id) {
JdbcTemplate jdbcTemplate = new JdbcTemplate(dataSource);
return jdbcTemplate.queryForObject("SELECT * FROM rule WHERE id = ?",
new Object[]{id}, new RuleRowMapper());
}
public List<RuleInfo> getAPageOfRule(long start, long offset, List<String> orderBy,
List<Boolean> isDesc) {
JdbcTemplate jdbcTemplate = new JdbcTemplate(dataSource);
boolean ifHasAid = false;
String sql = "SELECT * FROM rule ORDER BY ";
for (int i = 0; i < orderBy.size(); i++) {
if (orderBy.get(i).equals("rid")) {
ifHasAid = true;
}
sql = sql + orderBy.get(i);
if (isDesc.size() > i) {
if (isDesc.get(i)) {
sql = sql + " desc ";
}
sql = sql + ",";
}
}
if (!ifHasAid) {
sql = sql + "rid,";
}
sql = sql.substring(0, sql.length() - 1);
sql = sql + " LIMIT " + start + "," + offset + ";";
return jdbcTemplate.query(sql, new RuleRowMapper());
}
public List<RuleInfo> getAPageOfRule(long start, long offset) {
JdbcTemplate jdbcTemplate = new JdbcTemplate(dataSource);
String sql = "SELECT * FROM rule LIMIT " + start + "," + offset + ";";
return jdbcTemplate.query(sql, new RuleRowMapper());
}
public long insert(RuleInfo ruleInfo) {
SimpleJdbcInsert simpleJdbcInsert = new SimpleJdbcInsert(dataSource);
simpleJdbcInsert.setTableName("rule");
simpleJdbcInsert.usingGeneratedKeyColumns("id");
long id = simpleJdbcInsert.executeAndReturnKey(toMap(ruleInfo)).longValue();
ruleInfo.setId(id);
return id;
}
public int update(long ruleId, long lastCheckTime, long checkedCount, int cmdletsGen) {
JdbcTemplate jdbcTemplate = new JdbcTemplate(dataSource);
String sql =
"UPDATE rule SET last_check_time = ?, checked_count = ?, "
+ "generated_cmdlets = ? WHERE id = ?";
return jdbcTemplate.update(sql, lastCheckTime, checkedCount, cmdletsGen, ruleId);
}
public int update(long ruleId, int rs, long lastCheckTime, long checkedCount, int cmdletsGen) {
JdbcTemplate jdbcTemplate = new JdbcTemplate(dataSource);
String sql =
"UPDATE rule SET state = ?, last_check_time = ?, checked_count = ?, "
+ "generated_cmdlets = ? WHERE id = ?";
return jdbcTemplate.update(sql, rs, lastCheckTime, checkedCount, cmdletsGen, ruleId);
}
public int update(long ruleId, int rs) {
JdbcTemplate jdbcTemplate = new JdbcTemplate(dataSource);
String sql = "UPDATE rule SET state = ? WHERE id = ?";
return jdbcTemplate.update(sql, rs, ruleId);
}
public void delete(long id) {
JdbcTemplate jdbcTemplate = new JdbcTemplate(dataSource);
final String sql = "DELETE FROM rule WHERE id = ?";
jdbcTemplate.update(sql, id);
}
public void deleteAll() {
JdbcTemplate jdbcTemplate = new JdbcTemplate(dataSource);
final String sql = "DELETE FROM rule";
jdbcTemplate.update(sql);
}
private Map<String, Object> toMap(RuleInfo ruleInfo) {
Map<String, Object> parameters = new HashMap<>();
if (ruleInfo.getSubmitTime() == 0) {
ruleInfo.setSubmitTime(System.currentTimeMillis());
}
parameters.put("submit_time", ruleInfo.getSubmitTime());
parameters.put("rule_text", ruleInfo.getRuleText());
parameters.put("state", ruleInfo.getState().getValue());
parameters.put("checked_count", ruleInfo.getNumChecked());
parameters.put("generated_cmdlets", ruleInfo.getNumCmdsGen());
parameters.put("last_check_time", ruleInfo.getLastCheckTime());
return parameters;
}
class RuleRowMapper implements RowMapper<RuleInfo> {
@Override
public RuleInfo mapRow(ResultSet resultSet, int i) throws SQLException {
RuleInfo ruleInfo = new RuleInfo();
ruleInfo.setId(resultSet.getLong("id"));
ruleInfo.setSubmitTime(resultSet.getLong("submit_time"));
ruleInfo.setRuleText(resultSet.getString("rule_text"));
ruleInfo.setState(RuleState.fromValue((int) resultSet.getByte("state")));
ruleInfo.setNumChecked(resultSet.getLong("checked_count"));
ruleInfo.setNumCmdsGen(resultSet.getLong("generated_cmdlets"));
ruleInfo.setLastCheckTime(resultSet.getLong("last_check_time"));
return ruleInfo;
}
}
}
| java | Apache-2.0 | e0c90f054687a18c4e095547ac5e31b8b313b3ef | 2026-01-05T02:41:11.405497Z | false |
Intel-bigdata/SSM | https://github.com/Intel-bigdata/SSM/blob/e0c90f054687a18c4e095547ac5e31b8b313b3ef/smart-metastore/src/main/java/org/smartdata/metastore/dao/FileStateDao.java | smart-metastore/src/main/java/org/smartdata/metastore/dao/FileStateDao.java | /**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.smartdata.metastore.dao;
import org.smartdata.model.FileState;
import org.springframework.jdbc.core.BatchPreparedStatementSetter;
import org.springframework.jdbc.core.JdbcTemplate;
import org.springframework.jdbc.core.RowMapper;
import org.springframework.jdbc.core.namedparam.MapSqlParameterSource;
import org.springframework.jdbc.core.namedparam.NamedParameterJdbcTemplate;
import javax.sql.DataSource;
import java.sql.PreparedStatement;
import java.sql.ResultSet;
import java.sql.SQLException;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
public class FileStateDao {
private static final String TABLE_NAME = "file_state";
private DataSource dataSource;
public void setDataSource(DataSource dataSource) {
this.dataSource = dataSource;
}
public FileStateDao(DataSource dataSource) {
this.dataSource = dataSource;
}
public void insertUpdate(FileState fileState) {
JdbcTemplate jdbcTemplate = new JdbcTemplate(dataSource);
String sql = "REPLACE INTO " + TABLE_NAME + " (path, type, stage) VALUES (?,?,?)";
jdbcTemplate.update(sql, fileState.getPath(), fileState.getFileType().getValue(),
fileState.getFileStage().getValue());
}
public int[] batchInsertUpdate(final FileState[] fileStates) {
JdbcTemplate jdbcTemplate = new JdbcTemplate(dataSource);
String sql = "REPLACE INTO " + TABLE_NAME + " (path, type, stage) VALUES (?,?,?)";
return jdbcTemplate.batchUpdate(sql, new BatchPreparedStatementSetter() {
@Override
public void setValues(PreparedStatement ps,
int i) throws SQLException {
ps.setString(1, fileStates[i].getPath());
ps.setInt(2, fileStates[i].getFileType().getValue());
ps.setInt(3, fileStates[i].getFileStage().getValue());
}
@Override
public int getBatchSize() {
return fileStates.length;
}
});
}
public FileState getByPath(String path) {
JdbcTemplate jdbcTemplate = new JdbcTemplate(dataSource);
return jdbcTemplate.queryForObject("SELECT * FROM " + TABLE_NAME + " WHERE path = ?",
new Object[]{path}, new FileStateRowMapper());
}
public Map<String, FileState> getByPaths(List<String> paths) {
NamedParameterJdbcTemplate namedParameterJdbcTemplate =
new NamedParameterJdbcTemplate(dataSource);
Map<String, FileState> fileStateMap = new HashMap<>();
MapSqlParameterSource parameterSource = new MapSqlParameterSource();
parameterSource.addValue("paths", paths);
List<FileState> fileStates = namedParameterJdbcTemplate.query(
"SELECT * FROM " + TABLE_NAME + " WHERE path IN (:paths)",
parameterSource,
new FileStateRowMapper());
for (FileState fileState : fileStates) {
fileStateMap.put(fileState.getPath(), fileState);
}
return fileStateMap;
}
public List<FileState> getAll() {
JdbcTemplate jdbcTemplate = new JdbcTemplate(dataSource);
return jdbcTemplate.query("SELECT * FROM " + TABLE_NAME,
new FileStateRowMapper());
}
public void deleteByPath(String path, boolean recursive) {
JdbcTemplate jdbcTemplate = new JdbcTemplate(dataSource);
String sql = "DELETE FROM " + TABLE_NAME + " WHERE path = ?";
jdbcTemplate.update(sql, path);
if (recursive) {
sql = "DELETE FROM " + TABLE_NAME + " WHERE path LIKE ?";
jdbcTemplate.update(sql, path + "/%");
}
}
public int[] batchDelete(final List<String> paths) {
JdbcTemplate jdbcTemplate = new JdbcTemplate(dataSource);
final String sql = "DELETE FROM " + TABLE_NAME + " WHERE path = ?";
return jdbcTemplate.batchUpdate(sql, new BatchPreparedStatementSetter() {
@Override
public void setValues(PreparedStatement ps, int i) throws SQLException {
ps.setString(1, paths.get(i));
}
@Override
public int getBatchSize() {
return paths.size();
}
});
}
public void deleteAll() {
JdbcTemplate jdbcTemplate = new JdbcTemplate(dataSource);
final String sql = "DELETE FROM " + TABLE_NAME;
jdbcTemplate.execute(sql);
}
class FileStateRowMapper implements RowMapper<FileState> {
@Override
public FileState mapRow(ResultSet resultSet, int i)
throws SQLException {
return new FileState(resultSet.getString("path"),
FileState.FileType.fromValue(resultSet.getInt("type")),
FileState.FileStage.fromValue(resultSet.getInt("stage")));
}
}
}
| java | Apache-2.0 | e0c90f054687a18c4e095547ac5e31b8b313b3ef | 2026-01-05T02:41:11.405497Z | false |
Intel-bigdata/SSM | https://github.com/Intel-bigdata/SSM/blob/e0c90f054687a18c4e095547ac5e31b8b313b3ef/smart-metastore/src/main/java/org/smartdata/metastore/dao/SystemInfoDao.java | smart-metastore/src/main/java/org/smartdata/metastore/dao/SystemInfoDao.java | /**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.smartdata.metastore.dao;
import org.apache.commons.lang.StringUtils;
import org.smartdata.model.SystemInfo;
import org.springframework.jdbc.core.JdbcTemplate;
import org.springframework.jdbc.core.RowMapper;
import org.springframework.jdbc.core.simple.SimpleJdbcInsert;
import javax.sql.DataSource;
import java.sql.ResultSet;
import java.sql.SQLException;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
public class SystemInfoDao {
private static final String TABLE_NAME = "sys_info";
private DataSource dataSource;
public void setDataSource(DataSource dataSource) {
this.dataSource = dataSource;
}
public SystemInfoDao(DataSource dataSource) {
this.dataSource = dataSource;
}
public List<SystemInfo> getAll() {
JdbcTemplate jdbcTemplate = new JdbcTemplate(dataSource);
return jdbcTemplate.query("SELECT * FROM " + TABLE_NAME, new SystemInfoRowMapper());
}
public boolean containsProperty(String property) {
return !list(property).isEmpty();
}
private List<SystemInfo> list(String property) {
return new JdbcTemplate(dataSource)
.query(
"SELECT * FROM " + TABLE_NAME + " WHERE property = ?",
new Object[] {property},
new SystemInfoRowMapper());
}
public SystemInfo getByProperty(String property) {
List<SystemInfo> infos = list(property);
return infos.isEmpty() ? null : infos.get(0);
}
public List<SystemInfo> getByProperties(List<String> properties) {
JdbcTemplate jdbcTemplate = new JdbcTemplate(dataSource);
return jdbcTemplate.query("SELECT * FROM " + TABLE_NAME + " WHERE property IN (?)",
new Object[]{StringUtils.join(properties, ",")},
new SystemInfoRowMapper());
}
public void delete(String property) {
JdbcTemplate jdbcTemplate = new JdbcTemplate(dataSource);
final String sql = "DELETE FROM " + TABLE_NAME + " WHERE property = ?";
jdbcTemplate.update(sql, property);
}
public void insert(SystemInfo systemInfo) {
SimpleJdbcInsert simpleJdbcInsert = new SimpleJdbcInsert(dataSource);
simpleJdbcInsert.setTableName(TABLE_NAME);
simpleJdbcInsert.execute(toMap(systemInfo));
}
public void insert(SystemInfo[] systemInfos) {
SimpleJdbcInsert simpleJdbcInsert = new SimpleJdbcInsert(dataSource);
simpleJdbcInsert.setTableName(TABLE_NAME);
Map<String, Object>[] maps = new Map[systemInfos.length];
for (int i = 0; i < systemInfos.length; i++){
maps[i] = toMap(systemInfos[i]);
}
simpleJdbcInsert.executeBatch(maps);
}
public int update(SystemInfo systemInfo) {
JdbcTemplate jdbcTemplate = new JdbcTemplate(dataSource);
String sql = "UPDATE " + TABLE_NAME + " SET value = ? WHERE property = ?";
return jdbcTemplate.update(sql, systemInfo.getValue(), systemInfo.getProperty());
}
public void deleteAll() {
JdbcTemplate jdbcTemplate = new JdbcTemplate(dataSource);
final String sql = "DELETE FROM " + TABLE_NAME;
jdbcTemplate.execute(sql);
}
private Map<String, Object> toMap(SystemInfo systemInfo) {
Map<String, Object> parameters = new HashMap<>();
parameters.put("property", systemInfo.getProperty());
parameters.put("value", systemInfo.getValue());
return parameters;
}
class SystemInfoRowMapper implements RowMapper<SystemInfo> {
@Override
public SystemInfo mapRow(ResultSet resultSet, int i) throws SQLException {
return new SystemInfo(resultSet.getString("property"), resultSet.getString("value"));
}
}
}
| java | Apache-2.0 | e0c90f054687a18c4e095547ac5e31b8b313b3ef | 2026-01-05T02:41:11.405497Z | false |
Intel-bigdata/SSM | https://github.com/Intel-bigdata/SSM/blob/e0c90f054687a18c4e095547ac5e31b8b313b3ef/smart-metastore/src/main/java/org/smartdata/metastore/dao/FileDiffDao.java | smart-metastore/src/main/java/org/smartdata/metastore/dao/FileDiffDao.java | /**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.smartdata.metastore.dao;
import org.apache.commons.lang.StringUtils;
import org.smartdata.model.FileDiff;
import org.smartdata.model.FileDiffState;
import org.smartdata.model.FileDiffType;
import org.springframework.jdbc.core.BatchPreparedStatementSetter;
import org.springframework.jdbc.core.JdbcTemplate;
import org.springframework.jdbc.core.RowMapper;
import org.springframework.jdbc.core.simple.SimpleJdbcInsert;
import javax.sql.DataSource;
import java.sql.PreparedStatement;
import java.sql.ResultSet;
import java.sql.SQLException;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
public class FileDiffDao {
private static final String TABLE_NAME = "file_diff";
private DataSource dataSource;
public String uselessFileDiffStates;
public void setDataSource(DataSource dataSource) {
this.dataSource = dataSource;
}
public FileDiffDao(DataSource dataSource) {
this.dataSource = dataSource;
this.uselessFileDiffStates = getUselessFileDiffState();
}
public String getUselessFileDiffState() {
List<String> stateValues = new ArrayList<>();
for (FileDiffState state: FileDiffState.getUselessFileDiffState()) {
stateValues.add(String.valueOf(state.getValue()));
}
return StringUtils.join(stateValues, ",");
}
public List<FileDiff> getAll() {
JdbcTemplate jdbcTemplate = new JdbcTemplate(dataSource);
return jdbcTemplate.query("SELECT * FROM " + TABLE_NAME, new FileDiffRowMapper());
}
public List<FileDiff> getPendingDiff() {
JdbcTemplate jdbcTemplate = new JdbcTemplate(dataSource);
return jdbcTemplate.query(
"SELECT * FROM " + TABLE_NAME + " WHERE state = 0", new FileDiffRowMapper());
}
public List<FileDiff> getByState(FileDiffState fileDiffState) {
JdbcTemplate jdbcTemplate = new JdbcTemplate(dataSource);
return jdbcTemplate
.query("SELECT * FROM " + TABLE_NAME + " WHERE state = ?",
new Object[]{fileDiffState.getValue()}, new FileDiffRowMapper());
}
public List<FileDiff> getByState(String prefix, FileDiffState fileDiffState) {
JdbcTemplate jdbcTemplate = new JdbcTemplate(dataSource);
return jdbcTemplate
.query(
"SELECT * FROM " + TABLE_NAME + " WHERE src LIKE ? and state = ?",
new Object[]{prefix + "%", fileDiffState.getValue()},
new FileDiffRowMapper());
}
public List<FileDiff> getPendingDiff(long rid) {
JdbcTemplate jdbcTemplate = new JdbcTemplate(dataSource);
return jdbcTemplate.query("SELECT * FROM " + TABLE_NAME + " WHERE did = ? and state = 0",
new Object[]{rid},
new FileDiffRowMapper());
}
public List<FileDiff> getPendingDiff(String prefix) {
JdbcTemplate jdbcTemplate = new JdbcTemplate(dataSource);
return jdbcTemplate.query("SELECT * FROM " + TABLE_NAME + " WHERE src LIKE ? and state = 0",
new FileDiffRowMapper(), prefix + "%");
}
public List<FileDiff> getByIds(List<Long> dids) {
JdbcTemplate jdbcTemplate = new JdbcTemplate(dataSource);
return jdbcTemplate.query("SELECT * FROM " + TABLE_NAME + " WHERE did IN (?)",
new Object[]{StringUtils.join(dids, ",")},
new FileDiffRowMapper());
}
public List<FileDiff> getByFileName(String fileName) {
JdbcTemplate jdbcTemplate = new JdbcTemplate(dataSource);
return jdbcTemplate.query("SELECT * FROM " + TABLE_NAME + " WHERE src = ?",
new Object[]{fileName}, new FileDiffRowMapper());
}
public List<String> getSyncPath(int size) {
JdbcTemplate jdbcTemplate = new JdbcTemplate(dataSource);
if (size != 0) {
jdbcTemplate.setMaxRows(size);
}
String sql = "SELECT DISTINCT src FROM " + TABLE_NAME + " WHERE state = ?";
return jdbcTemplate
.queryForList(sql, String.class, FileDiffState.RUNNING.getValue());
}
public FileDiff getById(long did) {
JdbcTemplate jdbcTemplate = new JdbcTemplate(dataSource);
return jdbcTemplate.queryForObject("SELECT * FROM " + TABLE_NAME + " WHERE did = ?",
new Object[]{did}, new FileDiffRowMapper());
}
public void delete(long did) {
JdbcTemplate jdbcTemplate = new JdbcTemplate(dataSource);
final String sql = "DELETE FROM " + TABLE_NAME + " WHERE did = ?";
jdbcTemplate.update(sql, did);
}
public int getUselessRecordsNum() {
JdbcTemplate jdbcTemplate = new JdbcTemplate(dataSource);
final String query = "SELECT count(*) FROM " + TABLE_NAME + " WHERE state IN ("
+ uselessFileDiffStates + ")";
return jdbcTemplate.queryForObject(query, Integer.class);
}
public int deleteUselessRecords(int num) {
JdbcTemplate jdbcTemplate = new JdbcTemplate(dataSource);
final String queryDids = "SELECT did FROM " + TABLE_NAME + " WHERE state IN ("
+ uselessFileDiffStates + ") ORDER BY create_time DESC LIMIT 1000 OFFSET " + num;
List<Long> dids = jdbcTemplate.queryForList(queryDids, Long.class);
if (dids.isEmpty()) {
return 0;
}
String unusedDids = StringUtils.join(dids, ",");
final String deleteUnusedFileDiff = "DELETE FROM " + TABLE_NAME + " where did IN ("
+ unusedDids + ")";
jdbcTemplate.update(deleteUnusedFileDiff);
return dids.size();
}
public long insert(FileDiff fileDiff) {
SimpleJdbcInsert simpleJdbcInsert = new SimpleJdbcInsert(dataSource);
simpleJdbcInsert.setTableName(TABLE_NAME);
simpleJdbcInsert.usingGeneratedKeyColumns("did");
// return did
long did = simpleJdbcInsert.executeAndReturnKey(toMap(fileDiff)).longValue();
fileDiff.setDiffId(did);
return did;
}
public void insert(FileDiff[] fileDiffs) {
SimpleJdbcInsert simpleJdbcInsert = new SimpleJdbcInsert(dataSource);
simpleJdbcInsert.setTableName(TABLE_NAME);
Map[] maps = new Map[fileDiffs.length];
for (int i = 0; i < fileDiffs.length; i++) {
maps[i] = toMap(fileDiffs[i]);
}
simpleJdbcInsert.executeBatch(maps);
}
public Long[] insert(List<FileDiff> fileDiffs) {
List<Long> dids = new ArrayList<>();
for (FileDiff fileDiff : fileDiffs) {
dids.add(insert(fileDiff));
}
return dids.toArray(new Long[dids.size()]);
}
public int[] batchUpdate(
final List<Long> dids, final List<FileDiffState> states,
final List<String> parameters) {
JdbcTemplate jdbcTemplate = new JdbcTemplate(dataSource);
final String sql = "UPDATE " + TABLE_NAME + " SET state = ?, "
+ "parameters = ? WHERE did = ?";
return jdbcTemplate.batchUpdate(sql, new BatchPreparedStatementSetter() {
@Override
public void setValues(PreparedStatement ps, int i) throws SQLException {
ps.setShort(1, (short) states.get(i).getValue());
ps.setString(2, parameters.get(i));
ps.setLong(3, dids.get(i));
}
@Override
public int getBatchSize() {
return dids.size();
}
});
}
public int[] batchUpdate(
final List<Long> dids, final FileDiffState state) {
JdbcTemplate jdbcTemplate = new JdbcTemplate(dataSource);
final String sql = "UPDATE " + TABLE_NAME + " SET state = ? "
+ "WHERE did = ?";
return jdbcTemplate.batchUpdate(sql, new BatchPreparedStatementSetter() {
@Override
public void setValues(PreparedStatement ps, int i) throws SQLException {
ps.setShort(1, (short) state.getValue());
ps.setLong(2, dids.get(i));
}
@Override
public int getBatchSize() {
return dids.size();
}
});
}
public int update(long did, FileDiffState state) {
JdbcTemplate jdbcTemplate = new JdbcTemplate(dataSource);
String sql = "UPDATE " + TABLE_NAME + " SET state = ? WHERE did = ?";
return jdbcTemplate.update(sql, state.getValue(), did);
}
public int update(long did, String src) {
JdbcTemplate jdbcTemplate = new JdbcTemplate(dataSource);
String sql = "UPDATE " + TABLE_NAME + " SET src = ? WHERE did = ?";
return jdbcTemplate.update(sql, src, did);
}
public int update(long did, FileDiffState state,
String parameters) {
JdbcTemplate jdbcTemplate = new JdbcTemplate(dataSource);
String sql = "UPDATE " + TABLE_NAME + " SET state = ?, "
+ "parameters = ? WHERE did = ?";
return jdbcTemplate.update(sql, state.getValue(), parameters, did);
}
public int[] update(final FileDiff[] fileDiffs) {
JdbcTemplate jdbcTemplate = new JdbcTemplate(dataSource);
String sql = "UPDATE " + TABLE_NAME + " SET "
+ "rid = ?, "
+ "diff_type = ?, "
+ "src = ?, "
+ "parameters = ?, "
+ "state = ?, "
+ "create_time = ? "
+ "WHERE did = ?";
return jdbcTemplate.batchUpdate(sql,
new BatchPreparedStatementSetter() {
@Override
public void setValues(PreparedStatement ps,
int i) throws SQLException {
ps.setLong(1, fileDiffs[i].getRuleId());
ps.setInt(2, fileDiffs[i].getDiffType().getValue());
ps.setString(3, fileDiffs[i].getSrc());
ps.setString(4, fileDiffs[i].getParametersJsonString());
ps.setInt(5, fileDiffs[i].getState().getValue());
ps.setLong(6, fileDiffs[i].getCreateTime());
ps.setLong(7, fileDiffs[i].getDiffId());
}
@Override
public int getBatchSize() {
return fileDiffs.length;
}
});
}
public int update(final FileDiff fileDiff) {
JdbcTemplate jdbcTemplate = new JdbcTemplate(dataSource);
String sql = "UPDATE " + TABLE_NAME + " SET "
+ "rid = ?, "
+ "diff_type = ?, "
+ "src = ?, "
+ "parameters = ?, "
+ "state = ?, "
+ "create_time = ? "
+ "WHERE did = ?";
return jdbcTemplate.update(sql, fileDiff.getRuleId(),
fileDiff.getDiffType().getValue(), fileDiff.getSrc(),
fileDiff.getParametersJsonString(), fileDiff.getState().getValue(),
fileDiff.getCreateTime(), fileDiff.getDiffId());
}
public void deleteAll() {
JdbcTemplate jdbcTemplate = new JdbcTemplate(dataSource);
final String sql = "DELETE FROM " + TABLE_NAME;
jdbcTemplate.execute(sql);
}
private Map<String, Object> toMap(FileDiff fileDiff) {
// System.out.println(fileDiff.getDiffType());
Map<String, Object> parameters = new HashMap<>();
parameters.put("did", fileDiff.getDiffId());
parameters.put("rid", fileDiff.getRuleId());
parameters.put("diff_type", fileDiff.getDiffType().getValue());
parameters.put("src", fileDiff.getSrc());
parameters.put("parameters", fileDiff.getParametersJsonString());
parameters.put("state", fileDiff.getState().getValue());
parameters.put("create_time", fileDiff.getCreateTime());
return parameters;
}
class FileDiffRowMapper implements RowMapper<FileDiff> {
@Override
public FileDiff mapRow(ResultSet resultSet, int i) throws SQLException {
FileDiff fileDiff = new FileDiff();
fileDiff.setDiffId(resultSet.getLong("did"));
fileDiff.setRuleId(resultSet.getLong("rid"));
fileDiff.setDiffType(FileDiffType.fromValue((int) resultSet.getByte("diff_type")));
fileDiff.setSrc(resultSet.getString("src"));
fileDiff.setParametersFromJsonString(resultSet.getString("parameters"));
fileDiff.setState(FileDiffState.fromValue((int) resultSet.getByte("state")));
fileDiff.setCreateTime(resultSet.getLong("create_time"));
return fileDiff;
}
}
}
| java | Apache-2.0 | e0c90f054687a18c4e095547ac5e31b8b313b3ef | 2026-01-05T02:41:11.405497Z | false |
Intel-bigdata/SSM | https://github.com/Intel-bigdata/SSM/blob/e0c90f054687a18c4e095547ac5e31b8b313b3ef/smart-metastore/src/main/java/org/smartdata/metastore/dao/FileInfoDao.java | smart-metastore/src/main/java/org/smartdata/metastore/dao/FileInfoDao.java | /**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.smartdata.metastore.dao;
import org.smartdata.model.FileInfo;
import org.springframework.jdbc.core.JdbcTemplate;
import org.springframework.jdbc.core.RowMapper;
import org.springframework.jdbc.core.namedparam.MapSqlParameterSource;
import org.springframework.jdbc.core.namedparam.NamedParameterJdbcTemplate;
import org.springframework.jdbc.core.simple.SimpleJdbcInsert;
import javax.sql.DataSource;
import java.sql.ResultSet;
import java.sql.SQLException;
import java.util.Collection;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
public class FileInfoDao {
private DataSource dataSource;
public void setDataSource(DataSource dataSource) {
this.dataSource = dataSource;
}
public FileInfoDao(DataSource dataSource) {
this.dataSource = dataSource;
}
public List<FileInfo> getAll() {
JdbcTemplate jdbcTemplate = new JdbcTemplate(dataSource);
return jdbcTemplate.query("SELECT * FROM file",
new FileInfoDao.FileInfoRowMapper());
}
public List<FileInfo> getFilesByPrefix(String path) {
JdbcTemplate jdbcTemplate = new JdbcTemplate(dataSource);
return jdbcTemplate.query("SELECT * FROM file WHERE path LIKE ?",
new FileInfoDao.FileInfoRowMapper(), path + "%");
}
public List<FileInfo> getFilesByPrefixInOrder(String path) {
JdbcTemplate jdbcTemplate = new JdbcTemplate(dataSource);
return jdbcTemplate.query("SELECT * FROM file WHERE path LIKE ? ORDER BY path ASC",
new FileInfoDao.FileInfoRowMapper(), path + "%");
}
public List<FileInfo> getFilesByPaths(Collection<String> paths) {
NamedParameterJdbcTemplate namedParameterJdbcTemplate =
new NamedParameterJdbcTemplate(dataSource);
String sql = "SELECT * FROM file WHERE path IN (:paths)";
MapSqlParameterSource parameterSource = new MapSqlParameterSource();
parameterSource.addValue("paths", paths);
return namedParameterJdbcTemplate.query(sql,
parameterSource, new FileInfoRowMapper());
}
public FileInfo getById(long fid) {
JdbcTemplate jdbcTemplate = new JdbcTemplate(dataSource);
return jdbcTemplate.queryForObject("SELECT * FROM file WHERE fid = ?",
new Object[]{fid}, new FileInfoDao.FileInfoRowMapper());
}
public FileInfo getByPath(String path) {
JdbcTemplate jdbcTemplate = new JdbcTemplate(dataSource);
return jdbcTemplate.queryForObject("SELECT * FROM file WHERE path = ?",
new Object[]{path}, new FileInfoDao.FileInfoRowMapper());
}
public Map<String, Long> getPathFids(Collection<String> paths)
throws SQLException {
NamedParameterJdbcTemplate namedParameterJdbcTemplate =
new NamedParameterJdbcTemplate(dataSource);
Map<String, Long> pathToId = new HashMap<>();
String sql = "SELECT * FROM file WHERE path IN (:paths)";
MapSqlParameterSource parameterSource = new MapSqlParameterSource();
parameterSource.addValue("paths", paths);
List<FileInfo> files = namedParameterJdbcTemplate.query(sql,
parameterSource, new FileInfoRowMapper());
for (FileInfo file : files) {
pathToId.put(file.getPath(), file.getFileId());
}
return pathToId;
}
public Map<Long, String> getFidPaths(Collection<Long> ids)
throws SQLException {
NamedParameterJdbcTemplate namedParameterJdbcTemplate =
new NamedParameterJdbcTemplate(dataSource);
Map<Long, String> idToPath = new HashMap<>();
String sql = "SELECT * FROM file WHERE fid IN (:ids)";
MapSqlParameterSource parameterSource = new MapSqlParameterSource();
parameterSource.addValue("ids", ids);
List<FileInfo> files = namedParameterJdbcTemplate.query(sql,
parameterSource, new FileInfoRowMapper());
for (FileInfo file : files) {
idToPath.put(file.getFileId(), file.getPath());
}
return idToPath;
}
public void insert(FileInfo fileInfo) {
SimpleJdbcInsert simpleJdbcInsert = new SimpleJdbcInsert(dataSource);
simpleJdbcInsert.setTableName("file");
simpleJdbcInsert.execute(toMap(fileInfo));
}
public void insert(FileInfo[] fileInfos) {
SimpleJdbcInsert simpleJdbcInsert = new SimpleJdbcInsert(dataSource);
simpleJdbcInsert.setTableName("file");
Map<String, Object>[] maps = new Map[fileInfos.length];
for (int i = 0; i < fileInfos.length; i++) {
maps[i] = toMap(fileInfos[i]);
}
simpleJdbcInsert.executeBatch(maps);
}
public int update(String path, int storagePolicy) {
JdbcTemplate jdbcTemplate = new JdbcTemplate(dataSource);
final String sql = "UPDATE file SET sid =? WHERE path = ?;";
return jdbcTemplate.update(sql, storagePolicy, path);
}
public void deleteById(long fid) {
JdbcTemplate jdbcTemplate = new JdbcTemplate(dataSource);
final String sql = "DELETE FROM file WHERE fid = ?";
jdbcTemplate.update(sql, fid);
}
public void deleteByPath(String path) {
JdbcTemplate jdbcTemplate = new JdbcTemplate(dataSource);
final String sql = "DELETE FROM file WHERE path = ?";
jdbcTemplate.update(sql, path);
}
public void deleteAll() {
JdbcTemplate jdbcTemplate = new JdbcTemplate(dataSource);
final String sql = "DELETE FROM file";
jdbcTemplate.execute(sql);
}
private Map<String, Object> toMap(FileInfo fileInfo) {
Map<String, Object> parameters = new HashMap<>();
parameters.put("path", fileInfo.getPath());
parameters.put("fid", fileInfo.getFileId());
parameters.put("length", fileInfo.getLength());
parameters.put("block_replication", fileInfo.getBlockReplication());
parameters.put("block_size", fileInfo.getBlocksize());
parameters.put("modification_time", fileInfo.getModificationTime());
parameters.put("access_time", fileInfo.getAccessTime());
parameters.put("is_dir", fileInfo.isdir());
parameters.put("sid", fileInfo.getStoragePolicy());
parameters
.put("owner", fileInfo.getOwner());
parameters
.put("owner_group", fileInfo.getGroup());
parameters.put("permission", fileInfo.getPermission());
parameters.put("ec_policy_id", fileInfo.getErasureCodingPolicy());
return parameters;
}
class FileInfoRowMapper implements RowMapper<FileInfo> {
@Override
public FileInfo mapRow(ResultSet resultSet, int i)
throws SQLException {
FileInfo fileInfo = new FileInfo(resultSet.getString("path"),
resultSet.getLong("fid"),
resultSet.getLong("length"),
resultSet.getBoolean("is_dir"),
resultSet.getShort("block_replication"),
resultSet.getLong("block_size"),
resultSet.getLong("modification_time"),
resultSet.getLong("access_time"),
resultSet.getShort("permission"),
resultSet.getString("owner"),
resultSet.getString("owner_group"),
resultSet.getByte("sid"),
resultSet.getByte("ec_policy_id")
);
return fileInfo;
}
}
}
| java | Apache-2.0 | e0c90f054687a18c4e095547ac5e31b8b313b3ef | 2026-01-05T02:41:11.405497Z | false |
Intel-bigdata/SSM | https://github.com/Intel-bigdata/SSM/blob/e0c90f054687a18c4e095547ac5e31b8b313b3ef/smart-metastore/src/main/java/org/smartdata/metastore/dao/GeneralDao.java | smart-metastore/src/main/java/org/smartdata/metastore/dao/GeneralDao.java | /**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.smartdata.metastore.dao;
import org.springframework.jdbc.core.JdbcTemplate;
import javax.sql.DataSource;
public class GeneralDao {
private DataSource dataSource;
public GeneralDao(DataSource dataSource) {
this.dataSource = dataSource;
}
public Long queryForLong(String sql) {
JdbcTemplate jdbcTemplate = new JdbcTemplate(dataSource);
return jdbcTemplate.queryForObject(sql, Long.class);
}
}
| java | Apache-2.0 | e0c90f054687a18c4e095547ac5e31b8b313b3ef | 2026-01-05T02:41:11.405497Z | false |
Intel-bigdata/SSM | https://github.com/Intel-bigdata/SSM/blob/e0c90f054687a18c4e095547ac5e31b8b313b3ef/smart-metastore/src/main/java/org/smartdata/metastore/dao/ErasureCodingPolicyDao.java | smart-metastore/src/main/java/org/smartdata/metastore/dao/ErasureCodingPolicyDao.java | /**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.smartdata.metastore.dao;
import org.smartdata.metastore.MetaStoreException;
import org.smartdata.model.ErasureCodingPolicyInfo;
import org.springframework.jdbc.core.JdbcTemplate;
import org.springframework.jdbc.core.RowMapper;
import org.springframework.jdbc.core.simple.SimpleJdbcInsert;
import javax.sql.DataSource;
import java.sql.ResultSet;
import java.sql.SQLException;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
public class ErasureCodingPolicyDao {
private static final String TABLE_NAME = "ec_policy";
private static final String ID = "id";
private static final String NAME = "policy_name";
private DataSource dataSource;
public ErasureCodingPolicyDao(DataSource dataSource) throws MetaStoreException {
this.dataSource = dataSource;
}
public ErasureCodingPolicyInfo getEcPolicyById(byte id) throws MetaStoreException {
JdbcTemplate jdbcTemplate = new JdbcTemplate(dataSource);
return jdbcTemplate.queryForObject
("SELECT * FROM " + TABLE_NAME + " WHERE id=?", new Object[]{id}, new EcPolicyRowMapper());
}
public ErasureCodingPolicyInfo getEcPolicyByName(String policyName) throws MetaStoreException {
JdbcTemplate jdbcTemplate = new JdbcTemplate(dataSource);
return jdbcTemplate.queryForObject("SELECT * FROM " + TABLE_NAME + " WHERE policy_name=?",
new Object[]{policyName}, new EcPolicyRowMapper());
}
public List<ErasureCodingPolicyInfo> getAllEcPolicies() {
JdbcTemplate jdbcTemplate = new JdbcTemplate(dataSource);
return jdbcTemplate.query("SELECT * FROM " + TABLE_NAME, new EcPolicyRowMapper());
}
public void insert(ErasureCodingPolicyInfo ecPolicy) {
SimpleJdbcInsert simpleJdbcInsert = new SimpleJdbcInsert(dataSource);
simpleJdbcInsert.setTableName(TABLE_NAME);
simpleJdbcInsert.execute(toMap(ecPolicy));
}
public void insert(List<ErasureCodingPolicyInfo> ecInfos) {
SimpleJdbcInsert simpleJdbcInsert = new SimpleJdbcInsert(dataSource);
simpleJdbcInsert.setTableName(TABLE_NAME);
Map<String, Object>[] maps = new Map[ecInfos.size()];
for (int i = 0; i < ecInfos.size(); i++) {
maps[i] = toMap(ecInfos.get(i));
}
simpleJdbcInsert.executeBatch(maps);
}
public void deleteAll() {
JdbcTemplate jdbcTemplate = new JdbcTemplate(dataSource);
final String sql = "DELETE FROM " + TABLE_NAME;
jdbcTemplate.execute(sql);
}
private Map<String, Object> toMap(ErasureCodingPolicyInfo ecPolicy) {
Map<String, Object> map = new HashMap<>();
map.put(ID, ecPolicy.getID());
map.put(NAME, ecPolicy.getEcPolicyName());
return map;
}
class EcPolicyRowMapper implements RowMapper<ErasureCodingPolicyInfo> {
@Override
public ErasureCodingPolicyInfo mapRow(ResultSet resultSet, int i) throws SQLException {
ErasureCodingPolicyInfo ecPolicy = new ErasureCodingPolicyInfo(resultSet.getByte("id"),
resultSet.getString("policy_name"));
return ecPolicy;
}
}
}
| java | Apache-2.0 | e0c90f054687a18c4e095547ac5e31b8b313b3ef | 2026-01-05T02:41:11.405497Z | false |
Intel-bigdata/SSM | https://github.com/Intel-bigdata/SSM/blob/e0c90f054687a18c4e095547ac5e31b8b313b3ef/smart-metastore/src/main/java/org/smartdata/metastore/dao/CompressionFileDao.java | smart-metastore/src/main/java/org/smartdata/metastore/dao/CompressionFileDao.java | /**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.smartdata.metastore.dao;
import com.google.gson.Gson;
import com.google.gson.reflect.TypeToken;
import org.smartdata.model.CompressionFileState;
import org.springframework.jdbc.core.JdbcTemplate;
import org.springframework.jdbc.core.RowMapper;
import org.springframework.jdbc.core.simple.SimpleJdbcInsert;
import javax.sql.DataSource;
import java.sql.ResultSet;
import java.sql.SQLException;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
/**
* CompressionFileDao.
*/
public class CompressionFileDao {
private static final String TABLE_NAME = "compression_file";
private DataSource dataSource;
public void setDataSource(DataSource dataSource) {
this.dataSource = dataSource;
}
public CompressionFileDao(DataSource dataSource) {
this.dataSource = dataSource;
}
public void insert(CompressionFileState compressionInfo) {
SimpleJdbcInsert simpleJdbcInsert = new SimpleJdbcInsert(dataSource);
simpleJdbcInsert.setTableName(TABLE_NAME);
simpleJdbcInsert.execute(toMap(compressionInfo));
}
public void insertUpdate(CompressionFileState compressionInfo) {
JdbcTemplate jdbcTemplate = new JdbcTemplate(dataSource);
Gson gson = new Gson();
String sql = "REPLACE INTO " + TABLE_NAME
+ "(path, buffer_size, compression_impl, "
+ "original_length, compressed_length, originalPos, compressedPos)"
+ " VALUES(?,?,?,?,?,?,?);";
jdbcTemplate.update(sql, compressionInfo.getPath(),
compressionInfo.getBufferSize(),
compressionInfo.getCompressionImpl(),
compressionInfo.getOriginalLength(),
compressionInfo.getCompressedLength(),
gson.toJson(compressionInfo.getOriginalPos()),
gson.toJson(compressionInfo.getCompressedPos()));
}
public void deleteByPath(String filePath) {
JdbcTemplate jdbcTemplate = new JdbcTemplate(dataSource);
final String sql = "DELETE FROM " + TABLE_NAME + " WHERE path = ?";
jdbcTemplate.update(sql, filePath);
}
public void deleteAll() {
JdbcTemplate jdbcTemplate = new JdbcTemplate(dataSource);
final String sql = "DELETE FROM " + TABLE_NAME;
jdbcTemplate.execute(sql);
}
public List<CompressionFileState> getAll() {
JdbcTemplate jdbcTemplate = new JdbcTemplate(dataSource);
return jdbcTemplate.query("SELECT * FROM " + TABLE_NAME,
new CompressFileRowMapper());
}
public CompressionFileState getInfoByPath(String filePath) {
JdbcTemplate jdbcTemplate = new JdbcTemplate(dataSource);
return jdbcTemplate.queryForObject("SELECT * FROM " + TABLE_NAME + " WHERE path = ?",
new Object[]{filePath}, new CompressFileRowMapper());
}
private Map<String, Object> toMap(CompressionFileState compressionInfo) {
Gson gson = new Gson();
Map<String, Object> parameters = new HashMap<>();
Long[] originalPos = compressionInfo.getOriginalPos();
Long[] compressedPos = compressionInfo.getCompressedPos();
String originalPosGson = gson.toJson(originalPos);
String compressedPosGson = gson.toJson(compressedPos);
parameters.put("path", compressionInfo.getPath());
parameters.put("buffer_size", compressionInfo.getBufferSize());
parameters.put("compression_impl", compressionInfo.getCompressionImpl());
parameters.put("original_length", compressionInfo.getOriginalLength());
parameters.put("compressed_length", compressionInfo.getCompressedLength());
parameters.put("originalPos", originalPosGson);
parameters.put("compressedPos", compressedPosGson);
return parameters;
}
class CompressFileRowMapper implements RowMapper<CompressionFileState> {
@Override
public CompressionFileState mapRow(ResultSet resultSet, int i) throws SQLException {
Gson gson = new Gson();
String originalPosGson = resultSet.getString("originalPos");
String compressedPosGson = resultSet.getString("compressedPos");
Long[] originalPos = gson.fromJson(originalPosGson, new TypeToken<Long[]>(){}.getType());
Long[] compressedPos = gson.fromJson(compressedPosGson, new TypeToken<Long[]>(){}.getType());
CompressionFileState compressionInfo =
CompressionFileState.newBuilder()
.setFileName(resultSet.getString("path"))
.setBufferSize(resultSet.getInt("buffer_size"))
.setCompressImpl(resultSet.getString("compression_impl"))
.setOriginalLength(resultSet.getLong("original_length"))
.setCompressedLength(resultSet.getLong("compressed_length"))
.setOriginalPos(originalPos)
.setCompressedPos(compressedPos)
.build();
return compressionInfo;
}
}
}
| java | Apache-2.0 | e0c90f054687a18c4e095547ac5e31b8b313b3ef | 2026-01-05T02:41:11.405497Z | false |
Intel-bigdata/SSM | https://github.com/Intel-bigdata/SSM/blob/e0c90f054687a18c4e095547ac5e31b8b313b3ef/smart-metastore/src/main/java/org/smartdata/metastore/dao/DataNodeInfoDao.java | smart-metastore/src/main/java/org/smartdata/metastore/dao/DataNodeInfoDao.java | /**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.smartdata.metastore.dao;
import org.smartdata.model.DataNodeInfo;
import org.springframework.jdbc.core.JdbcTemplate;
import org.springframework.jdbc.core.RowMapper;
import org.springframework.jdbc.core.simple.SimpleJdbcInsert;
import javax.sql.DataSource;
import java.sql.ResultSet;
import java.sql.SQLException;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
public class DataNodeInfoDao {
private DataSource dataSource;
private static final String TABLE_NAME = "datanode_info";
public void setDataSource(DataSource dataSource) {
this.dataSource = dataSource;
}
public DataNodeInfoDao(DataSource dataSource) {
this.dataSource = dataSource;
}
public List<DataNodeInfo> getAll() {
JdbcTemplate jdbcTemplate = new JdbcTemplate(dataSource);
return jdbcTemplate.query("SELECT * FROM " + TABLE_NAME,
new DataNodeInfoRowMapper());
}
public List<DataNodeInfo> getByUuid(String uuid) {
JdbcTemplate jdbcTemplate = new JdbcTemplate(dataSource);
return jdbcTemplate.query(
"SELECT * FROM " + TABLE_NAME + " WHERE uuid = ?",
new Object[]{uuid}, new DataNodeInfoRowMapper());
}
public void insert(DataNodeInfo dataNodeInfo) {
SimpleJdbcInsert simpleJdbcInsert = new SimpleJdbcInsert(dataSource);
simpleJdbcInsert.setTableName(TABLE_NAME);
simpleJdbcInsert.execute(toMap(dataNodeInfo));
}
public void insert(DataNodeInfo[] dataNodeInfos) {
SimpleJdbcInsert simpleJdbcInsert = new SimpleJdbcInsert(dataSource);
simpleJdbcInsert.setTableName(TABLE_NAME);
Map<String, Object>[] maps = new Map[dataNodeInfos.length];
for (int i = 0; i < dataNodeInfos.length; i++) {
maps[i] = toMap(dataNodeInfos[i]);
}
simpleJdbcInsert.executeBatch(maps);
}
public void insert(List<DataNodeInfo> dataNodeInfos) {
SimpleJdbcInsert simpleJdbcInsert = new SimpleJdbcInsert(dataSource);
simpleJdbcInsert.setTableName(TABLE_NAME);
Map<String, Object>[] maps = new Map[dataNodeInfos.size()];
for (int i = 0; i < dataNodeInfos.size(); i++) {
maps[i] = toMap(dataNodeInfos.get(i));
}
simpleJdbcInsert.executeBatch(maps);
}
public void delete(String uuid) {
JdbcTemplate jdbcTemplate = new JdbcTemplate(dataSource);
final String sql = "DELETE FROM " + TABLE_NAME + " WHERE uuid = ?";
jdbcTemplate.update(sql, uuid);
}
public void deleteAll() {
JdbcTemplate jdbcTemplate = new JdbcTemplate(dataSource);
final String sql = "DELETE FROM " + TABLE_NAME;
jdbcTemplate.update(sql);
}
private Map<String, Object> toMap(DataNodeInfo dataNodeInfo) {
Map<String, Object> parameters = new HashMap<>();
parameters.put("uuid", dataNodeInfo.getUuid());
parameters.put("hostname", dataNodeInfo.getHostname());
parameters.put("rpcAddress", dataNodeInfo.getRpcAddress());
parameters.put("cache_capacity", dataNodeInfo.getCacheCapacity());
parameters.put("cache_used", dataNodeInfo.getCacheUsed());
parameters.put("location", dataNodeInfo.getLocation());
return parameters;
}
class DataNodeInfoRowMapper implements RowMapper<DataNodeInfo> {
@Override
public DataNodeInfo mapRow(ResultSet resultSet, int i) throws SQLException {
return DataNodeInfo.newBuilder()
.setUuid(resultSet.getString("uuid"))
.setHostName(resultSet.getString("hostname"))
.setRpcAddress(resultSet.getString("rpcAddress"))
.setCacheCapacity(resultSet.getLong("cache_capacity"))
.setCacheUsed(resultSet.getLong("cache_used"))
.setLocation(resultSet.getString("location"))
.build();
}
}
}
| java | Apache-2.0 | e0c90f054687a18c4e095547ac5e31b8b313b3ef | 2026-01-05T02:41:11.405497Z | false |
Intel-bigdata/SSM | https://github.com/Intel-bigdata/SSM/blob/e0c90f054687a18c4e095547ac5e31b8b313b3ef/smart-metastore/src/main/java/org/smartdata/metastore/dao/SmallFileDao.java | smart-metastore/src/main/java/org/smartdata/metastore/dao/SmallFileDao.java | /**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.smartdata.metastore.dao;
import org.smartdata.model.CompactFileState;
import org.smartdata.model.FileContainerInfo;
import org.smartdata.model.FileState;
import org.springframework.jdbc.core.BatchPreparedStatementSetter;
import org.springframework.jdbc.core.JdbcTemplate;
import org.springframework.jdbc.core.RowMapper;
import javax.sql.DataSource;
import java.sql.PreparedStatement;
import java.sql.ResultSet;
import java.sql.SQLException;
import java.util.List;
public class SmallFileDao {
private DataSource dataSource;
public void setDataSource(DataSource dataSource) {
this.dataSource = dataSource;
}
public SmallFileDao(DataSource dataSource) {
this.dataSource = dataSource;
}
public void insertUpdate(CompactFileState compactFileState) {
JdbcTemplate jdbcTemplate = new JdbcTemplate(dataSource);
String sql = "REPLACE INTO small_file (path, container_file_path, offset, length)"
+ " VALUES (?,?,?,?)";
jdbcTemplate.update(sql, compactFileState.getPath(),
compactFileState.getFileContainerInfo().getContainerFilePath(),
compactFileState.getFileContainerInfo().getOffset(),
compactFileState.getFileContainerInfo().getLength());
}
public int[] batchInsertUpdate(final CompactFileState[] fileStates) {
JdbcTemplate jdbcTemplate = new JdbcTemplate(dataSource);
String sql = "REPLACE INTO small_file (path, container_file_path, offset, length)"
+ " VALUES (?,?,?,?)";
return jdbcTemplate.batchUpdate(sql, new BatchPreparedStatementSetter() {
@Override
public void setValues(PreparedStatement ps,
int i) throws SQLException {
ps.setString(1, fileStates[i].getPath());
ps.setString(2, fileStates[i].getFileContainerInfo().getContainerFilePath());
ps.setLong(3, fileStates[i].getFileContainerInfo().getOffset());
ps.setLong(4, fileStates[i].getFileContainerInfo().getLength());
}
@Override
public int getBatchSize() {
return fileStates.length;
}
});
}
public void deleteByPath(String path, boolean recursive) {
JdbcTemplate jdbcTemplate = new JdbcTemplate(dataSource);
String sql = "DELETE FROM small_file WHERE path = ?";
jdbcTemplate.update(sql, path);
if (recursive) {
sql = "DELETE FROM small_file WHERE path LIKE ?";
jdbcTemplate.update(sql, path + "/%");
}
}
public int[] batchDelete(final List<String> paths) {
JdbcTemplate jdbcTemplate = new JdbcTemplate(dataSource);
final String sql = "DELETE FROM small_file WHERE path = ?";
return jdbcTemplate.batchUpdate(sql, new BatchPreparedStatementSetter() {
@Override
public void setValues(PreparedStatement ps, int i) throws SQLException {
ps.setString(1, paths.get(i));
}
@Override
public int getBatchSize() {
return paths.size();
}
});
}
public FileState getFileStateByPath(String path) {
JdbcTemplate jdbcTemplate = new JdbcTemplate(dataSource);
return jdbcTemplate.queryForObject("SELECT * FROM small_file WHERE path = ?",
new Object[]{path}, new FileStateRowMapper());
}
public List<String> getSmallFilesByContainerFile(String containerFilePath) {
JdbcTemplate jdbcTemplate = new JdbcTemplate(dataSource);
String sql = "SELECT path FROM small_file where container_file_path = ?";
return jdbcTemplate.queryForList(sql, String.class, containerFilePath);
}
public List<String> getAllContainerFiles() {
JdbcTemplate jdbcTemplate = new JdbcTemplate(dataSource);
String sql = "SELECT DISTINCT container_file_path FROM small_file";
return jdbcTemplate.queryForList(sql, String.class);
}
private class FileStateRowMapper implements RowMapper<FileState> {
@Override
public FileState mapRow(ResultSet resultSet, int i)
throws SQLException {
return new CompactFileState(resultSet.getString("path"),
new FileContainerInfo(
resultSet.getString("container_file_path"),
resultSet.getLong("offset"),
resultSet.getLong("length"))
);
}
}
}
| java | Apache-2.0 | e0c90f054687a18c4e095547ac5e31b8b313b3ef | 2026-01-05T02:41:11.405497Z | false |
Intel-bigdata/SSM | https://github.com/Intel-bigdata/SSM/blob/e0c90f054687a18c4e095547ac5e31b8b313b3ef/smart-metastore/src/main/java/org/smartdata/metastore/dao/CountEvictor.java | smart-metastore/src/main/java/org/smartdata/metastore/dao/CountEvictor.java | /**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.smartdata.metastore.dao;
import org.smartdata.metastore.MetaStore;
import java.util.Iterator;
public class CountEvictor extends TableEvictor {
private final int maxCount;
public CountEvictor(MetaStore adapter, int count) {
super(adapter);
this.maxCount = count;
}
@Override
public void evictTables(AccessCountTableDeque tables, int size) {
if (size > maxCount) {
int evictedCount = 0;
for (Iterator<AccessCountTable> iterator = tables.iterator(); iterator.hasNext();) {
AccessCountTable table = iterator.next();
evictedCount++;
if (evictedCount > size - maxCount) {
break;
} else {
this.dropTable(table);
iterator.remove();
}
}
}
}
}
| java | Apache-2.0 | e0c90f054687a18c4e095547ac5e31b8b313b3ef | 2026-01-05T02:41:11.405497Z | false |
Intel-bigdata/SSM | https://github.com/Intel-bigdata/SSM/blob/e0c90f054687a18c4e095547ac5e31b8b313b3ef/smart-metastore/src/main/java/org/smartdata/metastore/dao/BackUpInfoDao.java | smart-metastore/src/main/java/org/smartdata/metastore/dao/BackUpInfoDao.java | /**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.smartdata.metastore.dao;
import org.apache.commons.lang.StringUtils;
import org.smartdata.model.BackUpInfo;
import org.springframework.jdbc.core.JdbcTemplate;
import org.springframework.jdbc.core.RowMapper;
import org.springframework.jdbc.core.simple.SimpleJdbcInsert;
import javax.sql.DataSource;
import java.sql.ResultSet;
import java.sql.SQLException;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
public class BackUpInfoDao {
private DataSource dataSource;
public void setDataSource(DataSource dataSource) {
this.dataSource = dataSource;
}
public BackUpInfoDao(DataSource dataSource){
this.dataSource = dataSource;
}
public List<BackUpInfo> getAll() {
JdbcTemplate jdbcTemplate = new JdbcTemplate(dataSource);
return jdbcTemplate.query("SELECT * FROM backup_file", new BackUpInfoRowMapper());
}
public List<BackUpInfo> getByIds(List<Long> rids) {
JdbcTemplate jdbcTemplate = new JdbcTemplate(dataSource);
return jdbcTemplate.query("SELECT * FROM backup_file WHERE rid IN (?)",
new Object[]{StringUtils.join(rids, ",")},
new BackUpInfoRowMapper());
}
public int getCountByRid(int rid){
JdbcTemplate jdbcTemplate = new JdbcTemplate(dataSource);
return jdbcTemplate.queryForObject(
"SELECT COUNT(*) FROM backup_file WHERE rid = ?", new Object[rid], Integer.class);
}
public BackUpInfo getByRid(long rid) {
JdbcTemplate jdbcTemplate = new JdbcTemplate(dataSource);
return jdbcTemplate.queryForObject("SELECT * FROM backup_file WHERE rid = ?",
new Object[]{rid}, new BackUpInfoRowMapper());
}
public List<BackUpInfo> getBySrc(String src) {
JdbcTemplate jdbcTemplate = new JdbcTemplate(dataSource);
return jdbcTemplate.query(
"SELECT * FROM backup_file WHERE src = ?", new Object[] {src}, new BackUpInfoRowMapper());
}
public List<BackUpInfo> getByDest(String dest) {
JdbcTemplate jdbcTemplate = new JdbcTemplate(dataSource);
return jdbcTemplate.query(
"SELECT * FROM backup_file WHERE dest = ?", new Object[] {dest}, new BackUpInfoRowMapper());
}
public void delete(long rid) {
JdbcTemplate jdbcTemplate = new JdbcTemplate(dataSource);
final String sql = "DELETE FROM backup_file WHERE rid = ?";
jdbcTemplate.update(sql, rid);
}
public void insert(BackUpInfo backUpInfo) {
SimpleJdbcInsert simpleJdbcInsert = new SimpleJdbcInsert(dataSource);
simpleJdbcInsert.setTableName("backup_file");
simpleJdbcInsert.execute(toMap(backUpInfo));
}
public void insert(BackUpInfo[] backUpInfos) {
SimpleJdbcInsert simpleJdbcInsert = new SimpleJdbcInsert(dataSource);
simpleJdbcInsert.setTableName("backup_file");
Map<String, Object>[] maps = new Map[backUpInfos.length];
for (int i = 0; i < backUpInfos.length; i++) {
maps[i] = toMap(backUpInfos[i]);
}
simpleJdbcInsert.executeBatch(maps);
}
public int update(long rid, long period) {
JdbcTemplate jdbcTemplate = new JdbcTemplate(dataSource);
String sql = "UPDATE backup_file SET period = ? WHERE rid = ?";
return jdbcTemplate.update(sql, period, rid);
}
public void deleteAll(){
JdbcTemplate jdbcTemplate = new JdbcTemplate(dataSource);
final String sql = "DELETE FROM backup_file";
jdbcTemplate.execute(sql);
}
private Map<String, Object> toMap(BackUpInfo backUpInfo) {
Map<String, Object> parameters = new HashMap<>();
parameters.put("rid", backUpInfo.getRid());
parameters.put("src", backUpInfo.getSrc());
parameters.put("dest", backUpInfo.getDest());
parameters.put("period", backUpInfo.getPeriod());
return parameters;
}
class BackUpInfoRowMapper implements RowMapper<BackUpInfo> {
@Override
public BackUpInfo mapRow(ResultSet resultSet, int i) throws SQLException {
BackUpInfo backUpInfo = new BackUpInfo();
backUpInfo.setRid(resultSet.getLong("rid"));
backUpInfo.setSrc(resultSet.getString("src"));
backUpInfo.setDest(resultSet.getString("dest"));
backUpInfo.setPeriod(resultSet.getLong("period"));
return backUpInfo;
}
}
}
| java | Apache-2.0 | e0c90f054687a18c4e095547ac5e31b8b313b3ef | 2026-01-05T02:41:11.405497Z | false |
Intel-bigdata/SSM | https://github.com/Intel-bigdata/SSM/blob/e0c90f054687a18c4e095547ac5e31b8b313b3ef/smart-metastore/src/main/java/org/smartdata/metastore/dao/MetaStoreHelper.java | smart-metastore/src/main/java/org/smartdata/metastore/dao/MetaStoreHelper.java | /**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.smartdata.metastore.dao;
import org.springframework.jdbc.core.JdbcTemplate;
import org.springframework.jdbc.core.ResultSetExtractor;
import javax.sql.DataSource;
import java.sql.ResultSet;
import java.sql.SQLException;
import java.util.ArrayList;
import java.util.List;
public class MetaStoreHelper {
private DataSource dataSource;
public void setDataSource(DataSource dataSource) {
this.dataSource = dataSource;
}
public MetaStoreHelper(DataSource dataSource) {
this.dataSource = dataSource;
}
public void execute(String sql) {
JdbcTemplate jdbcTemplate = new JdbcTemplate(dataSource);
jdbcTemplate.execute(sql);
}
public void dropTable(String tableName) {
String sql = "DROP TABLE IF EXISTS " + tableName;
JdbcTemplate jdbcTemplate = new JdbcTemplate(dataSource);
jdbcTemplate.execute(sql);
}
public void dropView(String viewName) {
String sql = "DROP VIEW IF EXISTS " + viewName;
JdbcTemplate jdbcTemplate = new JdbcTemplate(dataSource);
jdbcTemplate.execute(sql);
}
public List<String> getFilesPath(String sql) {
JdbcTemplate jdbcTemplate = new JdbcTemplate(dataSource);
return jdbcTemplate.query(sql, new ResultSetExtractor<List<String>>() {
public List<String> extractData(ResultSet rs) throws SQLException {
List<String> files = new ArrayList<>();
while (rs.next()) {
files.add(rs.getString(1));
}
return files;
}
});
}
}
| java | Apache-2.0 | e0c90f054687a18c4e095547ac5e31b8b313b3ef | 2026-01-05T02:41:11.405497Z | false |
Intel-bigdata/SSM | https://github.com/Intel-bigdata/SSM/blob/e0c90f054687a18c4e095547ac5e31b8b313b3ef/smart-metastore/src/main/java/org/smartdata/metastore/dao/TableAddOpListener.java | smart-metastore/src/main/java/org/smartdata/metastore/dao/TableAddOpListener.java | /**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.smartdata.metastore.dao;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.smartdata.metastore.MetaStoreException;
import org.smartdata.metastore.utils.Constants;
import java.util.HashSet;
import java.util.List;
import java.util.Set;
import java.util.concurrent.ExecutorService;
public abstract class TableAddOpListener {
static final Logger LOG = LoggerFactory.getLogger(TableAddOpListener.class);
private Set<AccessCountTable> tablesUnderAggregating;
AccessCountTableDeque coarseGrainedTableDeque;
AccessCountTableAggregator tableAggregator;
ExecutorService executorService;
TableAddOpListener(
AccessCountTableDeque deque,
AccessCountTableAggregator aggregator,
ExecutorService executorService) {
this.coarseGrainedTableDeque = deque;
this.tableAggregator = aggregator;
this.executorService = executorService;
this.tablesUnderAggregating = new HashSet<>();
}
public void tableAdded(AccessCountTableDeque fineGrainedTableDeque, AccessCountTable table) {
final AccessCountTable lastCoarseGrainedTable = lastCoarseGrainedTableFor(table.getEndTime());
// Todo: optimize contains
if (!coarseGrainedTableDeque.contains(lastCoarseGrainedTable)) {
final List<AccessCountTable> tablesToAggregate =
fineGrainedTableDeque.getTables(
lastCoarseGrainedTable.getStartTime(), lastCoarseGrainedTable.getEndTime());
if (tablesToAggregate.size() > 0
&& !tablesUnderAggregating.contains(lastCoarseGrainedTable)) {
tablesUnderAggregating.add(lastCoarseGrainedTable);
executorService.submit(
new Runnable() {
@Override
public void run() {
try {
tableAggregator.aggregate(lastCoarseGrainedTable, tablesToAggregate);
coarseGrainedTableDeque.addAndNotifyListener(lastCoarseGrainedTable);
tablesUnderAggregating.remove(lastCoarseGrainedTable);
} catch (MetaStoreException e) {
LOG.error(
"Add AccessCount Table {} error", lastCoarseGrainedTable.getTableName(), e);
}
}
});
}
}
}
public abstract AccessCountTable lastCoarseGrainedTableFor(Long startTime);
public static class MinuteTableListener extends TableAddOpListener {
public MinuteTableListener(
AccessCountTableDeque deque,
AccessCountTableAggregator aggregator,
ExecutorService service) {
super(deque, aggregator, service);
}
@Override
public AccessCountTable lastCoarseGrainedTableFor(Long endTime) {
Long lastEnd = endTime - (endTime % Constants.ONE_MINUTE_IN_MILLIS);
Long lastStart = lastEnd - Constants.ONE_MINUTE_IN_MILLIS;
return new AccessCountTable(lastStart, lastEnd);
}
}
public static class HourTableListener extends TableAddOpListener {
public HourTableListener(
AccessCountTableDeque deque,
AccessCountTableAggregator aggregator,
ExecutorService service) {
super(deque, aggregator, service);
}
@Override
public AccessCountTable lastCoarseGrainedTableFor(Long endTime) {
Long lastEnd = endTime - (endTime % Constants.ONE_HOUR_IN_MILLIS);
Long lastStart = lastEnd - Constants.ONE_HOUR_IN_MILLIS;
return new AccessCountTable(lastStart, lastEnd);
}
}
public static class DayTableListener extends TableAddOpListener {
public DayTableListener(
AccessCountTableDeque deque,
AccessCountTableAggregator aggregator,
ExecutorService service) {
super(deque, aggregator, service);
}
@Override
public AccessCountTable lastCoarseGrainedTableFor(Long endTime) {
Long lastEnd = endTime - (endTime % Constants.ONE_DAY_IN_MILLIS);
Long lastStart = lastEnd - Constants.ONE_DAY_IN_MILLIS;
return new AccessCountTable(lastStart, lastEnd);
}
}
// Todo: WeekTableListener, MonthTableListener, YearTableListener
}
| java | Apache-2.0 | e0c90f054687a18c4e095547ac5e31b8b313b3ef | 2026-01-05T02:41:11.405497Z | false |
Intel-bigdata/SSM | https://github.com/Intel-bigdata/SSM/blob/e0c90f054687a18c4e095547ac5e31b8b313b3ef/smart-metastore/src/main/java/org/smartdata/metastore/dao/TableEvictor.java | smart-metastore/src/main/java/org/smartdata/metastore/dao/TableEvictor.java | /**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.smartdata.metastore.dao;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.smartdata.metastore.MetaStore;
import org.smartdata.metastore.MetaStoreException;
public abstract class TableEvictor {
public static final Logger LOG = LoggerFactory.getLogger(TableEvictor.class);
private MetaStore metaStore;
public TableEvictor(MetaStore metaStore) {
this.metaStore = metaStore;
}
public void dropTable(AccessCountTable accessCountTable) {
try {
this.metaStore.dropTable(accessCountTable.getTableName());
this.metaStore.deleteAccessCountTable(accessCountTable);
LOG.debug("Dropped access count table " + accessCountTable.getTableName());
} catch (MetaStoreException e) {
LOG.error("Drop access count table {} failed", accessCountTable.getTableName(), e);
}
}
abstract void evictTables(AccessCountTableDeque tables, int size);
}
| java | Apache-2.0 | e0c90f054687a18c4e095547ac5e31b8b313b3ef | 2026-01-05T02:41:11.405497Z | false |
Intel-bigdata/SSM | https://github.com/Intel-bigdata/SSM/blob/e0c90f054687a18c4e095547ac5e31b8b313b3ef/smart-metastore/src/main/java/org/smartdata/metastore/dao/AccessCountTable.java | smart-metastore/src/main/java/org/smartdata/metastore/dao/AccessCountTable.java | /**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.smartdata.metastore.dao;
import com.google.common.annotations.VisibleForTesting;
import org.smartdata.metastore.utils.TimeGranularity;
import org.smartdata.metastore.utils.TimeUtils;
import java.util.Random;
public class AccessCountTable {
private final String tableName;
private final Long startTime;
private final Long endTime;
private final TimeGranularity granularity;
private final boolean isEphemeral;
public AccessCountTable(Long startTime, Long endTime) {
this(startTime, endTime, false);
}
public AccessCountTable(Long startTime, Long endTime, boolean isEphemeral) {
this(getTableName(startTime, endTime, isEphemeral), startTime, endTime, isEphemeral);
}
@VisibleForTesting
protected AccessCountTable(String name, Long startTime, Long endTime, boolean isEphemeral) {
this.startTime = startTime;
this.endTime = endTime;
this.granularity = TimeUtils.getGranularity(endTime - startTime);
this.tableName = name;
this.isEphemeral = isEphemeral;
}
public String getTableName() {
return tableName;
}
private static String getTableName(Long startTime, Long endTime, boolean isView) {
String tableName = "accessCount_" + startTime + "_" + endTime;
if (isView) {
tableName += "_view_" + Math.abs(new Random().nextInt());
}
return tableName;
}
public Long getStartTime() {
return startTime;
}
public Long getEndTime() {
return endTime;
}
public TimeGranularity getGranularity() {
return granularity;
}
@Override
public boolean equals(Object o) {
if (o == null) {
return false;
}
if (o == this) {
return true;
}
if (o.getClass() != getClass()) {
return false;
}
AccessCountTable other = (AccessCountTable) o;
return other.getStartTime().equals(this.startTime)
&& other.getEndTime().equals(this.endTime)
&& other.getGranularity().equals(this.granularity);
}
@Override
public int hashCode() {
int result = tableName.hashCode();
result = 31 * result + startTime.hashCode();
result = 31 * result + endTime.hashCode();
result = 31 * result + granularity.hashCode();
result = 31 * result + (isEphemeral ? 1 : 0);
return result;
}
@Override
public String toString() {
return String.format(
"AccessCountTable %s starts from %s ends with %s and granularity is %s",
this.tableName, this.startTime, this.endTime, this.granularity);
}
public boolean isEphemeral() {
return isEphemeral;
}
}
| java | Apache-2.0 | e0c90f054687a18c4e095547ac5e31b8b313b3ef | 2026-01-05T02:41:11.405497Z | false |
Intel-bigdata/SSM | https://github.com/Intel-bigdata/SSM/blob/e0c90f054687a18c4e095547ac5e31b8b313b3ef/smart-metastore/src/main/java/org/smartdata/metastore/dao/StoragePolicyDao.java | smart-metastore/src/main/java/org/smartdata/metastore/dao/StoragePolicyDao.java | /**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.smartdata.metastore.dao;
import org.smartdata.model.StoragePolicy;
import org.springframework.jdbc.core.JdbcTemplate;
import org.springframework.jdbc.core.RowMapper;
import javax.sql.DataSource;
import java.sql.ResultSet;
import java.sql.SQLException;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
public class StoragePolicyDao {
private DataSource dataSource;
private static final String TABLE_NAME = "storage_policy";
private Map<Integer, String> data = null;
public void setDataSource(DataSource dataSource) {
this.dataSource = dataSource;
this.data = getStoragePolicyFromDB();
}
public StoragePolicyDao(DataSource dataSource) {
this.dataSource = dataSource;
this.data = getStoragePolicyFromDB();
}
private Map<Integer, String> getStoragePolicyFromDB() {
JdbcTemplate jdbcTemplate = new JdbcTemplate(dataSource);
String sql = "SELECT * FROM " + TABLE_NAME;
List<StoragePolicy> list = jdbcTemplate.query(sql,
new RowMapper<StoragePolicy>() {
public StoragePolicy mapRow(ResultSet rs, int rowNum) throws SQLException {
return new StoragePolicy(rs.getByte("sid"),
rs.getString("policy_name"));
}
});
Map<Integer, String> map = new HashMap<>();
for (StoragePolicy s : list) {
map.put((int) (s.getSid()), s.getPolicyName());
}
return map;
}
public Map<Integer, String> getStoragePolicyIdNameMap() {
return this.data;
}
public String getStoragePolicyName(int sid) {
return this.data.get(sid);
}
public Integer getStorageSid(String policyName) {
for (Map.Entry<Integer, String> entry : this.data.entrySet()) {
if (entry.getValue().equals(policyName)) {
return entry.getKey();
}
}
return -1;
}
public synchronized void insertStoragePolicyTable(StoragePolicy s) {
if (!isExist(s.getPolicyName())) {
JdbcTemplate jdbcTemplate = new JdbcTemplate(dataSource);
String sql = "INSERT INTO storage_policy (sid, policy_name) VALUES('"
+ s.getSid() + "','" + s.getPolicyName() + "');";
jdbcTemplate.execute(sql);
this.data.put((int) (s.getSid()), s.getPolicyName());
}
}
public synchronized void deleteStoragePolicy(int sid) {
if (isExist(sid)) {
JdbcTemplate jdbcTemplate = new JdbcTemplate(dataSource);
final String sql = "DELETE FROM " + TABLE_NAME + " WHERE sid = ?";
jdbcTemplate.update(sql, sid);
this.data.remove(sid);
}
}
public synchronized void deleteStoragePolicy(String policyName) {
Integer sid = getStorageSid(policyName);
deleteStoragePolicy(sid);
}
public boolean isExist(int sid) {
if (getStoragePolicyName(sid) != null) {
return true;
}
return false;
}
public boolean isExist(String policyName) {
if (getStorageSid(policyName) != -1) {
return true;
}
return false;
}
class StoragePolicyRowMapper implements RowMapper<StoragePolicy> {
@Override
public StoragePolicy mapRow(ResultSet resultSet, int i) throws SQLException {
return new StoragePolicy(resultSet.getByte("sid"), resultSet.getString("policy_name"));
}
}
}
| java | Apache-2.0 | e0c90f054687a18c4e095547ac5e31b8b313b3ef | 2026-01-05T02:41:11.405497Z | false |
Intel-bigdata/SSM | https://github.com/Intel-bigdata/SSM/blob/e0c90f054687a18c4e095547ac5e31b8b313b3ef/smart-metastore/src/main/java/org/smartdata/metastore/dao/AccessCountTableAggregator.java | smart-metastore/src/main/java/org/smartdata/metastore/dao/AccessCountTableAggregator.java | /**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.smartdata.metastore.dao;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.smartdata.metastore.MetaStore;
import org.smartdata.metastore.MetaStoreException;
import java.util.List;
import java.util.concurrent.locks.ReentrantLock;
public class AccessCountTableAggregator {
private final MetaStore metaStore;
public static final Logger LOG =
LoggerFactory.getLogger(AccessCountTableAggregator.class);
public AccessCountTableAggregator(MetaStore metaStore) {
this.metaStore = metaStore;
}
public void aggregate(AccessCountTable destinationTable,
List<AccessCountTable> tablesToAggregate) throws MetaStoreException {
if (tablesToAggregate.size() > 0) {
ReentrantLock accessCountLock = metaStore.getAccessCountLock();
if (accessCountLock != null) {
accessCountLock.lock();
}
try {
metaStore.aggregateTables(destinationTable, tablesToAggregate);
metaStore.insertAccessCountTable(destinationTable);
} finally {
if (accessCountLock != null) {
accessCountLock.unlock();
}
}
}
}
}
| java | Apache-2.0 | e0c90f054687a18c4e095547ac5e31b8b313b3ef | 2026-01-05T02:41:11.405497Z | false |
Intel-bigdata/SSM | https://github.com/Intel-bigdata/SSM/blob/e0c90f054687a18c4e095547ac5e31b8b313b3ef/smart-metastore/src/main/java/org/smartdata/metastore/dao/AccessEventAggregator.java | smart-metastore/src/main/java/org/smartdata/metastore/dao/AccessEventAggregator.java | /**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.smartdata.metastore.dao;
import org.apache.commons.lang.StringUtils;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.smartdata.metastore.MetaStore;
import org.smartdata.metastore.MetaStoreException;
import org.smartdata.metrics.FileAccessEvent;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.HashSet;
import java.util.List;
import java.util.Map;
import java.util.Map.Entry;
import java.util.Set;
public class AccessEventAggregator {
private final MetaStore adapter;
private final long aggregationGranularity;
private final AccessCountTableManager accessCountTableManager;
private Window currentWindow;
private List<FileAccessEvent> eventBuffer;
private Map<String, Integer> lastAccessCount = new HashMap<>();
public static final Logger LOG =
LoggerFactory.getLogger(AccessEventAggregator.class);
public AccessEventAggregator(MetaStore adapter, AccessCountTableManager manager) {
this(adapter, manager, 5 * 1000L);
}
public AccessEventAggregator(MetaStore adapter,
AccessCountTableManager manager, long aggregationGranularity) {
this.adapter = adapter;
this.accessCountTableManager = manager;
this.aggregationGranularity = aggregationGranularity;
this.eventBuffer = new ArrayList<>();
}
public void addAccessEvents(List<FileAccessEvent> eventList) {
if (this.currentWindow == null && !eventList.isEmpty()) {
this.currentWindow = assignWindow(eventList.get(0).getTimestamp());
}
for (FileAccessEvent event : eventList) {
if (!this.currentWindow.contains(event.getTimestamp())) {
// New Window occurs
this.createTable();
this.currentWindow = assignWindow(event.getTimestamp());
this.eventBuffer.clear();
}
// Exclude watermark event
if (!event.getPath().isEmpty()) {
this.eventBuffer.add(event);
}
}
}
private void createTable() {
AccessCountTable table = new AccessCountTable(currentWindow.start, currentWindow.end);
String createTable = AccessCountDao.createAccessCountTableSQL(table.getTableName());
try {
if (adapter.getTablesNum(new String[]{table.getTableName()}) != 0) {
adapter.dropTable(table.getTableName());
}
adapter.execute(createTable);
adapter.insertAccessCountTable(table);
} catch (MetaStoreException e) {
LOG.error("Create table error: " + table, e);
return;
}
if (this.eventBuffer.size() > 0 || lastAccessCount.size() > 0) {
Map<String, Integer> accessCount = this.getAccessCountMap(eventBuffer);
Set<String> now = new HashSet<>();
now.addAll(accessCount.keySet());
accessCount = mergeMap(accessCount, lastAccessCount);
final Map<String, Long> pathToIDs;
try {
pathToIDs = adapter.getFileIDs(accessCount.keySet());
} catch (MetaStoreException e) {
// TODO: dirty handle here
LOG.error("Create Table " + table.getTableName(), e);
return;
}
now.removeAll(pathToIDs.keySet());
Map<String, Integer> tmpLast = new HashMap<>();
for (String key : now) {
tmpLast.put(key, accessCount.get(key));
}
List<String> values = new ArrayList<>();
for (String key : pathToIDs.keySet()) {
values.add(String.format("(%d, %d)", pathToIDs.get(key),
accessCount.get(key)));
}
if (LOG.isDebugEnabled()) {
if (lastAccessCount.size() != 0) {
Set<String> non = lastAccessCount.keySet();
non.removeAll(pathToIDs.keySet());
if (non.size() != 0) {
String result = "Access events ignored for file:\n";
for (String p : non) {
result += p + " --> " + lastAccessCount.get(p) + "\n";
}
LOG.debug(result);
}
}
}
lastAccessCount = tmpLast;
if (values.size() != 0) {
String insertValue = String.format(
"INSERT INTO %s (%s, %s) VALUES %s",
table.getTableName(),
AccessCountDao.FILE_FIELD,
AccessCountDao.ACCESSCOUNT_FIELD,
StringUtils.join(values, ", "));
try {
this.adapter.execute(insertValue);
this.adapter.updateCachedFiles(pathToIDs, eventBuffer);
if (LOG.isDebugEnabled()) {
LOG.debug("Table created: " + table);
}
} catch (MetaStoreException e) {
LOG.error("Create table error: " + table, e);
}
}
}
this.accessCountTableManager.addTable(table);
}
private Map<String, Integer> mergeMap(Map<String, Integer> map1, Map<String, Integer> map2) {
for (Entry<String, Integer> entry : map2.entrySet()) {
String key = entry.getKey();
if (map1.containsKey(key)) {
map1.put(key, map1.get(key) + entry.getValue());
} else {
map1.put(key, map2.get(key));
}
}
return map1;
}
private Map<String, Integer> getAccessCountMap(List<FileAccessEvent> events) {
Map<String, Integer> map = new HashMap<>();
for (FileAccessEvent event : events) {
String path = event.getPath();
if (map.containsKey(path)) {
map.put(path, map.get(path) + 1);
} else {
map.put(path, 1);
}
}
return map;
}
private Window assignWindow(long time) {
long start = time - (time % aggregationGranularity);
return new Window(start, start + aggregationGranularity);
}
private class Window {
private long start;
private long end;
public Window(long start, long end) {
this.start = start;
this.end = end;
}
// [start, end)
public boolean contains(long time) {
return this.start <= time && this.end > time;
}
@Override
public boolean equals(Object o) {
if (!(o instanceof Window)) {
return false;
} else {
Window other = (Window) o;
return this.start == other.start && this.end == other.end;
}
}
}
}
| java | Apache-2.0 | e0c90f054687a18c4e095547ac5e31b8b313b3ef | 2026-01-05T02:41:11.405497Z | false |
Intel-bigdata/SSM | https://github.com/Intel-bigdata/SSM/blob/e0c90f054687a18c4e095547ac5e31b8b313b3ef/smart-metastore/src/main/java/org/smartdata/metastore/dao/ActionDao.java | smart-metastore/src/main/java/org/smartdata/metastore/dao/ActionDao.java | /**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.smartdata.metastore.dao;
import com.google.gson.Gson;
import com.google.gson.reflect.TypeToken;
import org.apache.commons.lang.StringEscapeUtils;
import org.smartdata.metastore.MetaStoreException;
import org.smartdata.metastore.utils.MetaStoreUtils;
import org.smartdata.model.ActionInfo;
import org.springframework.jdbc.core.BatchPreparedStatementSetter;
import org.springframework.jdbc.core.JdbcTemplate;
import org.springframework.jdbc.core.RowMapper;
import org.springframework.jdbc.core.namedparam.MapSqlParameterSource;
import org.springframework.jdbc.core.namedparam.NamedParameterJdbcTemplate;
import org.springframework.jdbc.core.simple.SimpleJdbcInsert;
import javax.sql.DataSource;
import java.sql.Connection;
import java.sql.PreparedStatement;
import java.sql.ResultSet;
import java.sql.SQLException;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
public class ActionDao {
private static final String TABLE_NAME = "action";
private static final String RUNNING_TIME = "running_time";
private DataSource dataSource;
private final List<String> tableColumns;
public ActionDao(DataSource dataSource) throws MetaStoreException {
this.dataSource = dataSource;
Connection conn = null;
try {
conn = dataSource.getConnection();
tableColumns = MetaStoreUtils.getTableColumns(conn, "action");
} catch (SQLException e) {
throw new MetaStoreException(e);
} finally {
if (conn != null) {
try {
conn.close();
} catch (Exception e) {
// ignore
}
}
}
tableColumns.add(RUNNING_TIME);
}
public List<ActionInfo> getAll() {
JdbcTemplate jdbcTemplate = new JdbcTemplate(dataSource);
return jdbcTemplate.query("SELECT * FROM " + TABLE_NAME,
new ActionRowMapper());
}
public Long getCountOfAction() {
JdbcTemplate jdbcTemplate = new JdbcTemplate(dataSource);
String sql = "SELECT COUNT(*) FROM " + TABLE_NAME;
return jdbcTemplate.queryForObject(sql, Long.class);
}
public ActionInfo getById(long aid) {
JdbcTemplate jdbcTemplate = new JdbcTemplate(dataSource);
return jdbcTemplate.queryForObject(
"SELECT * FROM " + TABLE_NAME + " WHERE aid = ?",
new Object[] {aid},
new ActionRowMapper());
}
public List<ActionInfo> getByIds(List<Long> aids) {
NamedParameterJdbcTemplate namedParameterJdbcTemplate =
new NamedParameterJdbcTemplate(dataSource);
MapSqlParameterSource parameterSource = new MapSqlParameterSource();
parameterSource.addValue("aids", aids);
return namedParameterJdbcTemplate.query(
"SELECT * FROM " + TABLE_NAME + " WHERE aid IN (:aids)",
parameterSource,
new ActionRowMapper());
}
public List<ActionInfo> getByCid(long cid) {
JdbcTemplate jdbcTemplate = new JdbcTemplate(dataSource);
return jdbcTemplate.query(
"SELECT * FROM " + TABLE_NAME + " WHERE cid = ?",
new Object[] {cid},
new ActionRowMapper());
}
public List<ActionInfo> getByCondition(String aidCondition,
String cidCondition) {
JdbcTemplate jdbcTemplate = new JdbcTemplate(dataSource);
String sqlPrefix = "SELECT * FROM " + TABLE_NAME + " WHERE ";
String sqlAid = (aidCondition == null) ? "" : "AND aid " + aidCondition;
String sqlCid = (cidCondition == null) ? "" : "AND cid " + cidCondition;
String sqlFinal = "";
if (aidCondition != null || cidCondition != null) {
sqlFinal = sqlPrefix + sqlAid + sqlCid;
sqlFinal = sqlFinal.replaceFirst("AND ", "");
} else {
sqlFinal = sqlPrefix.replaceFirst("WHERE ", "");
}
return jdbcTemplate.query(sqlFinal, new ActionRowMapper());
}
public List<ActionInfo> getLatestActions(int size) {
JdbcTemplate jdbcTemplate = new JdbcTemplate(dataSource);
if (size != 0) {
jdbcTemplate.setMaxRows(size);
}
String sql = "SELECT * FROM " + TABLE_NAME + " ORDER BY aid DESC";
return jdbcTemplate.query(sql, new ActionRowMapper());
}
public List<ActionInfo> getLatestActions(String actionName, int size) {
JdbcTemplate jdbcTemplate = new JdbcTemplate(dataSource);
if (size != 0) {
jdbcTemplate.setMaxRows(size);
}
String sql = "SELECT * FROM " + TABLE_NAME + " WHERE action_name = ? ORDER BY aid DESC";
return jdbcTemplate.query(sql, new ActionRowMapper(), actionName);
}
public List<ActionInfo> getLatestActions(String actionName, int size,
boolean successful, boolean finished) {
JdbcTemplate jdbcTemplate = new JdbcTemplate(dataSource);
if (size != 0) {
jdbcTemplate.setMaxRows(size);
}
String sql =
"SELECT * FROM "
+ TABLE_NAME
+ " WHERE action_name = ? AND successful = ? AND finished = ? ORDER BY aid DESC";
return jdbcTemplate.query(sql, new ActionRowMapper(), actionName, successful, finished);
}
public List<ActionInfo> getLatestActions(String actionName, boolean successful,
int size) {
JdbcTemplate jdbcTemplate = new JdbcTemplate(dataSource);
if (size != 0) {
jdbcTemplate.setMaxRows(size);
}
String sql =
"SELECT * FROM "
+ TABLE_NAME
+ " WHERE action_name = ? AND successful = ? ORDER BY aid DESC";
return jdbcTemplate.query(sql, new ActionRowMapper(), actionName, successful);
}
public List<ActionInfo> getAPageOfAction(long start, long offset, List<String> orderBy,
List<Boolean> isDesc) {
JdbcTemplate jdbcTemplate = new JdbcTemplate(dataSource);
boolean ifHasAid = false;
String sql = "SELECT * FROM " + TABLE_NAME + " ORDER BY ";
for (int i = 0; i < orderBy.size(); i++) {
String ob = orderBy.get(i);
if (!tableColumns.contains(ob)) {
continue;
}
if (ob.equals("aid")) {
ifHasAid = true;
}
if (ob.equals(RUNNING_TIME)) {
sql = sql + "(finish_time - create_time)";
} else {
sql = sql + ob;
}
if (isDesc.size() > i) {
if (isDesc.get(i)) {
sql = sql + " desc ";
}
sql = sql + ",";
}
}
if (!ifHasAid) {
sql = sql + "aid,";
}
//delete the last char
sql = sql.substring(0, sql.length() - 1);
//add limit
sql = sql + " LIMIT " + start + "," + offset + ";";
return jdbcTemplate.query(sql, new ActionRowPartMapper());
}
public List<ActionInfo> getAPageOfAction(long start, long offset) {
JdbcTemplate jdbcTemplate = new JdbcTemplate(dataSource);
String sql = "SELECT * FROM " + TABLE_NAME + " LIMIT " + start + "," + offset + ";";
return jdbcTemplate.query(sql, new ActionRowPartMapper());
}
public List<ActionInfo> searchAction(String path, long start, long offset, List<String> orderBy,
List<Boolean> isDesc, long[] retTotalNumActions) {
List<ActionInfo> ret;
JdbcTemplate jdbcTemplate = new JdbcTemplate(dataSource);
boolean ifHasAid = false;
String sqlFilter = TABLE_NAME + " WHERE ("
+ "aid LIKE '%" + path + "%' ESCAPE '/' "
+ "OR cid LIKE '%" + path + "%' ESCAPE '/' "
+ "OR args LIKE '%" + path + "%' ESCAPE '/' "
+ "OR result LIKE '%" + path + "%' ESCAPE '/' "
+ "OR exec_host LIKE '%" + path + "%' ESCAPE '/' "
+ "OR progress LIKE '%" + path + "%' ESCAPE '/' "
+ "OR log LIKE '%" + path + "%' ESCAPE '/' "
+ "OR action_name LIKE '%" + path + "%' ESCAPE '/')";
String sql = "SELECT * FROM " + sqlFilter;
String sqlCount = "SELECT count(*) FROM " + sqlFilter + ";";
if (orderBy.size() == 0) {
sql += " LIMIT " + start + "," + offset + ";";
ret = jdbcTemplate.query(sql, new ActionRowMapper());
} else {
sql += " ORDER BY ";
for (int i = 0; i < orderBy.size(); i++) {
String ob = orderBy.get(i);
if (!tableColumns.contains(ob)) {
continue;
}
if (ob.equals("aid")) {
ifHasAid = true;
}
if (ob.equals(RUNNING_TIME)) {
sql = sql + "(finish_time - create_time)";
} else {
sql = sql + ob;
}
if (isDesc.size() > i) {
if (isDesc.get(i)) {
sql = sql + " desc ";
}
sql = sql + ",";
}
}
if (!ifHasAid) {
sql = sql + "aid,";
}
//delete the last char
sql = sql.substring(0, sql.length() - 1);
//add limit
sql = sql + " LIMIT " + start + "," + offset + ";";
ret = jdbcTemplate.query(sql, new ActionRowMapper());
}
if (retTotalNumActions != null) {
retTotalNumActions[0] = jdbcTemplate.queryForObject(sqlCount, Long.class);
}
return ret;
}
public List<ActionInfo> getLatestActions(String actionType, int size,
boolean finished) {
JdbcTemplate jdbcTemplate = new JdbcTemplate(dataSource);
if (size != 0) {
jdbcTemplate.setMaxRows(size);
}
String sql =
"SELECT * FROM " + TABLE_NAME + " WHERE action_name = ? AND finished = ? ORDER BY aid DESC";
return jdbcTemplate.query(sql, new ActionRowMapper(), actionType, finished);
}
public void delete(long aid) {
JdbcTemplate jdbcTemplate = new JdbcTemplate(dataSource);
final String sql = "DELETE FROM " + TABLE_NAME + " WHERE aid = ?";
jdbcTemplate.update(sql, aid);
}
public void deleteCmdletActions(long cid) {
JdbcTemplate jdbcTemplate = new JdbcTemplate(dataSource);
final String sql = "DELETE FROM " + TABLE_NAME + " WHERE cid = ?";
jdbcTemplate.update(sql, cid);
}
public int[] batchDeleteCmdletActions(final List<Long> cids) {
JdbcTemplate jdbcTemplate = new JdbcTemplate(dataSource);
final String sql = "DELETE FROM " + TABLE_NAME + " WHERE cid = ?";
return jdbcTemplate.batchUpdate(
sql,
new BatchPreparedStatementSetter() {
public void setValues(PreparedStatement ps, int i) throws SQLException {
ps.setLong(1, cids.get(i));
}
public int getBatchSize() {
return cids.size();
}
});
}
public void deleteAll() {
JdbcTemplate jdbcTemplate = new JdbcTemplate(dataSource);
final String sql = "DELETE FROM " + TABLE_NAME;
jdbcTemplate.execute(sql);
}
public void insert(ActionInfo actionInfo) {
SimpleJdbcInsert simpleJdbcInsert = new SimpleJdbcInsert(dataSource);
simpleJdbcInsert.setTableName(TABLE_NAME);
simpleJdbcInsert.execute(toMap(actionInfo));
}
public void insert(ActionInfo[] actionInfos) {
SimpleJdbcInsert simpleJdbcInsert = new SimpleJdbcInsert(dataSource);
simpleJdbcInsert.setTableName(TABLE_NAME);
Map<String, Object>[] maps = new Map[actionInfos.length];
for (int i = 0; i < actionInfos.length; i++) {
maps[i] = toMap(actionInfos[i]);
}
simpleJdbcInsert.executeBatch(maps);
}
public int[] replace(final ActionInfo[] actionInfos) {
JdbcTemplate jdbcTemplate = new JdbcTemplate(dataSource);
String sql =
"REPLACE INTO "
+ TABLE_NAME
+ "(aid, "
+ "cid, "
+ "action_name, "
+ "args, "
+ "result, "
+ "log, "
+ "successful, "
+ "create_time, "
+ "finished, "
+ "finish_time, "
+ "exec_host, "
+ "progress)"
+ " VALUES(?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)";
return jdbcTemplate.batchUpdate(sql,
new BatchPreparedStatementSetter() {
public void setValues(PreparedStatement ps,
int i) throws SQLException {
ps.setLong(1, actionInfos[i].getActionId());
ps.setLong(2, actionInfos[i].getCmdletId());
ps.setString(3, actionInfos[i].getActionName());
ps.setString(4, actionInfos[i].getArgsJsonString());
ps.setString(5, actionInfos[i].getResult());
ps.setString(6, actionInfos[i].getLog());
ps.setBoolean(7, actionInfos[i].isSuccessful());
ps.setLong(8, actionInfos[i].getCreateTime());
ps.setBoolean(9, actionInfos[i].isFinished());
ps.setLong(10, actionInfos[i].getFinishTime());
ps.setString(11, actionInfos[i].getExecHost());
ps.setFloat(12, actionInfos[i].getProgress());
}
public int getBatchSize() {
return actionInfos.length;
}
});
}
public int update(final ActionInfo actionInfo) {
return update(new ActionInfo[]{actionInfo})[0];
}
public int[] update(final ActionInfo[] actionInfos) {
JdbcTemplate jdbcTemplate = new JdbcTemplate(dataSource);
String sql =
"UPDATE "
+ TABLE_NAME
+ " SET "
+ "result = ?, "
+ "log = ?, "
+ "successful = ?, "
+ "create_time = ?, "
+ "finished = ?, "
+ "finish_time = ?, "
+ "exec_host = ?, "
+ "progress = ? "
+ "WHERE aid = ?";
return jdbcTemplate.batchUpdate(sql,
new BatchPreparedStatementSetter() {
public void setValues(PreparedStatement ps,
int i) throws SQLException {
ps.setString(1, actionInfos[i].getResult());
ps.setString(2, actionInfos[i].getLog());
ps.setBoolean(3, actionInfos[i].isSuccessful());
ps.setLong(4, actionInfos[i].getCreateTime());
ps.setBoolean(5, actionInfos[i].isFinished());
ps.setLong(6, actionInfos[i].getFinishTime());
ps.setString(7, actionInfos[i].getExecHost());
ps.setFloat(8, actionInfos[i].getProgress());
ps.setLong(9, actionInfos[i].getActionId());
}
public int getBatchSize() {
return actionInfos.length;
}
});
}
public long getMaxId() {
JdbcTemplate jdbcTemplate = new JdbcTemplate(dataSource);
Long ret = jdbcTemplate
.queryForObject("SELECT MAX(aid) FROM " + TABLE_NAME, Long.class);
if (ret == null) {
return 0;
} else {
return ret + 1;
}
}
private Map<String, Object> toMap(ActionInfo actionInfo) {
Map<String, Object> parameters = new HashMap<>();
parameters.put("aid", actionInfo.getActionId());
parameters.put("cid", actionInfo.getCmdletId());
parameters.put("action_name", actionInfo.getActionName());
parameters.put("args", actionInfo.getArgsJsonString());
parameters
.put("result", StringEscapeUtils.escapeJava(actionInfo.getResult()));
parameters.put("log", StringEscapeUtils.escapeJava(actionInfo.getLog()));
parameters.put("successful", actionInfo.isSuccessful());
parameters.put("create_time", actionInfo.getCreateTime());
parameters.put("finished", actionInfo.isFinished());
parameters.put("finish_time", actionInfo.getFinishTime());
parameters.put("exec_host", actionInfo.getExecHost());
parameters.put("progress", actionInfo.getProgress());
return parameters;
}
class ActionRowMapper implements RowMapper<ActionInfo> {
@Override
public ActionInfo mapRow(ResultSet resultSet, int i) throws SQLException {
ActionInfo actionInfo = new ActionInfo();
actionInfo.setActionId(resultSet.getLong("aid"));
actionInfo.setCmdletId(resultSet.getLong("cid"));
actionInfo.setActionName(resultSet.getString("action_name"));
actionInfo.setArgsFromJsonString(resultSet.getString("args"));
actionInfo.setResult(
StringEscapeUtils.unescapeJava(resultSet.getString("result")));
actionInfo
.setLog(StringEscapeUtils.unescapeJava(resultSet.getString("log")));
actionInfo.setSuccessful(resultSet.getBoolean("successful"));
actionInfo.setCreateTime(resultSet.getLong("create_time"));
actionInfo.setFinished(resultSet.getBoolean("finished"));
actionInfo.setFinishTime(resultSet.getLong("finish_time"));
actionInfo.setExecHost(resultSet.getString("exec_host"));
actionInfo.setProgress(resultSet.getFloat("progress"));
return actionInfo;
}
}
/**
* No need to set result & log. If arg value is too long, it will be
* truncated.
*/
class ActionRowPartMapper implements RowMapper<ActionInfo> {
@Override
public ActionInfo mapRow(ResultSet resultSet, int i) throws SQLException {
ActionInfo actionInfo = new ActionInfo();
actionInfo.setActionId(resultSet.getLong("aid"));
actionInfo.setCmdletId(resultSet.getLong("cid"));
actionInfo.setActionName(resultSet.getString("action_name"));
actionInfo.setArgsFromJsonString(resultSet.getString("args"));
actionInfo.setArgs(
getTruncatedArgs(resultSet.getString("args")));
actionInfo.setSuccessful(resultSet.getBoolean("successful"));
actionInfo.setCreateTime(resultSet.getLong("create_time"));
actionInfo.setFinished(resultSet.getBoolean("finished"));
actionInfo.setFinishTime(resultSet.getLong("finish_time"));
actionInfo.setExecHost(resultSet.getString("exec_host"));
actionInfo.setProgress(resultSet.getFloat("progress"));
return actionInfo;
}
public Map<String, String> getTruncatedArgs(String jsonArgs) {
Gson gson = new Gson();
Map<String, String> args = gson.fromJson(jsonArgs,
new TypeToken<Map<String, String>>() {
}.getType());
for (Map.Entry<String, String> entry : args.entrySet()) {
if (entry.getValue().length() > 50) {
entry.setValue(entry.getValue().substring(0, 50) + "...");
}
}
return args;
}
}
}
| java | Apache-2.0 | e0c90f054687a18c4e095547ac5e31b8b313b3ef | 2026-01-05T02:41:11.405497Z | false |
Intel-bigdata/SSM | https://github.com/Intel-bigdata/SSM/blob/e0c90f054687a18c4e095547ac5e31b8b313b3ef/smart-metastore/src/main/java/org/smartdata/metastore/dao/CacheFileDao.java | smart-metastore/src/main/java/org/smartdata/metastore/dao/CacheFileDao.java | /**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.smartdata.metastore.dao;
import org.apache.commons.collections.CollectionUtils;
import org.smartdata.metrics.FileAccessEvent;
import org.smartdata.model.CachedFileStatus;
import org.springframework.jdbc.core.JdbcTemplate;
import org.springframework.jdbc.core.RowMapper;
import org.springframework.jdbc.core.simple.SimpleJdbcInsert;
import javax.sql.DataSource;
import java.sql.ResultSet;
import java.sql.SQLException;
import java.util.Collection;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
public class CacheFileDao {
private DataSource dataSource;
public void setDataSource(DataSource dataSource) {
this.dataSource = dataSource;
}
public CacheFileDao(DataSource dataSource) {
this.dataSource = dataSource;
}
public List<CachedFileStatus> getAll() {
JdbcTemplate jdbcTemplate = new JdbcTemplate(dataSource);
return jdbcTemplate.query("SELECT * FROM cached_file",
new CacheFileRowMapper());
}
public CachedFileStatus getById(long fid) {
JdbcTemplate jdbcTemplate = new JdbcTemplate(dataSource);
return jdbcTemplate.queryForObject("SELECT * FROM cached_file WHERE fid = ?",
new Object[]{fid}, new CacheFileRowMapper());
}
public List<Long> getFids() {
JdbcTemplate jdbcTemplate = new JdbcTemplate(dataSource);
String sql = "SELECT fid FROM cached_file";
List<Long> fids = jdbcTemplate.query(sql,
new RowMapper<Long>() {
public Long mapRow(ResultSet rs, int rowNum) throws SQLException {
return rs.getLong("fid");
}
});
return fids;
}
public void insert(CachedFileStatus cachedFileStatus) {
SimpleJdbcInsert simpleJdbcInsert = new SimpleJdbcInsert(dataSource);
simpleJdbcInsert.setTableName("cached_file");
simpleJdbcInsert.execute(toMap(cachedFileStatus));
}
public void insert(long fid, String path, long fromTime,
long lastAccessTime, int numAccessed) {
SimpleJdbcInsert simpleJdbcInsert = new SimpleJdbcInsert(dataSource);
simpleJdbcInsert.setTableName("cached_file");
simpleJdbcInsert.execute(toMap(new CachedFileStatus(fid, path,
fromTime, lastAccessTime, numAccessed)));
}
public void insert(CachedFileStatus[] cachedFileStatusList) {
SimpleJdbcInsert simpleJdbcInsert = new SimpleJdbcInsert(dataSource);
simpleJdbcInsert.setTableName("cached_file");
Map<String, Object>[] maps = new Map[cachedFileStatusList.length];
for (int i = 0; i < cachedFileStatusList.length; i++) {
maps[i] = toMap(cachedFileStatusList[i]);
}
simpleJdbcInsert.executeBatch(maps);
}
public int update(Long fid, Long lastAccessTime, Integer numAccessed) {
JdbcTemplate jdbcTemplate = new JdbcTemplate(dataSource);
String sql = "UPDATE cached_file SET last_access_time = ?, accessed_num = ? WHERE fid = ?";
return jdbcTemplate.update(sql, lastAccessTime, numAccessed, fid);
}
public void update(Map<String, Long> pathToIds,
List<FileAccessEvent> events) {
Map<Long, CachedFileStatus> idToStatus = new HashMap<>();
List<CachedFileStatus> cachedFileStatuses = getAll();
for (CachedFileStatus status : cachedFileStatuses) {
idToStatus.put(status.getFid(), status);
}
Collection<Long> cachedIds = idToStatus.keySet();
Collection<Long> needToUpdate = CollectionUtils.intersection(cachedIds, pathToIds.values());
if (!needToUpdate.isEmpty()) {
Map<Long, Integer> idToCount = new HashMap<>();
Map<Long, Long> idToLastTime = new HashMap<>();
for (FileAccessEvent event : events) {
Long fid = pathToIds.get(event.getPath());
if (needToUpdate.contains(fid)) {
if (!idToCount.containsKey(fid)) {
idToCount.put(fid, 0);
}
idToCount.put(fid, idToCount.get(fid) + 1);
if (!idToLastTime.containsKey(fid)) {
idToLastTime.put(fid, event.getTimestamp());
}
idToLastTime.put(fid, Math.max(event.getTimestamp(), idToLastTime.get(fid)));
}
}
for (Long fid : needToUpdate) {
Integer newAccessCount = idToStatus.get(fid).getNumAccessed() + idToCount.get(fid);
this.update(fid, idToLastTime.get(fid), newAccessCount);
}
}
}
public void deleteById(long fid) {
JdbcTemplate jdbcTemplate = new JdbcTemplate(dataSource);
final String sql = "DELETE FROM cached_file WHERE fid = ?";
jdbcTemplate.update(sql, fid);
}
public void deleteAll() {
JdbcTemplate jdbcTemplate = new JdbcTemplate(dataSource);
String sql = "DELETE FROM cached_file";
jdbcTemplate.execute(sql);
}
private Map<String, Object> toMap(CachedFileStatus cachedFileStatus) {
Map<String, Object> parameters = new HashMap<>();
parameters.put("fid", cachedFileStatus.getFid());
parameters.put("path", cachedFileStatus.getPath());
parameters.put("from_time", cachedFileStatus.getFromTime());
parameters.put("last_access_time", cachedFileStatus.getLastAccessTime());
parameters.put("accessed_num", cachedFileStatus.getNumAccessed());
return parameters;
}
class CacheFileRowMapper implements RowMapper<CachedFileStatus> {
@Override
public CachedFileStatus mapRow(ResultSet resultSet, int i) throws SQLException {
CachedFileStatus cachedFileStatus = new CachedFileStatus();
cachedFileStatus.setFid(resultSet.getLong("fid"));
cachedFileStatus.setPath(resultSet.getString("path"));
cachedFileStatus.setFromTime(resultSet.getLong("from_time"));
cachedFileStatus.setLastAccessTime(resultSet.getLong("last_access_time"));
cachedFileStatus.setNumAccessed(resultSet.getInt("accessed_num"));
return cachedFileStatus;
}
}
}
| java | Apache-2.0 | e0c90f054687a18c4e095547ac5e31b8b313b3ef | 2026-01-05T02:41:11.405497Z | false |
Intel-bigdata/SSM | https://github.com/Intel-bigdata/SSM/blob/e0c90f054687a18c4e095547ac5e31b8b313b3ef/smart-metastore/src/main/java/org/smartdata/metastore/dao/GlobalConfigDao.java | smart-metastore/src/main/java/org/smartdata/metastore/dao/GlobalConfigDao.java | /**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.smartdata.metastore.dao;
import org.apache.commons.lang.StringUtils;
import org.smartdata.model.GlobalConfig;
import org.springframework.jdbc.core.JdbcTemplate;
import org.springframework.jdbc.core.RowMapper;
import org.springframework.jdbc.core.simple.SimpleJdbcInsert;
import javax.sql.DataSource;
import java.sql.ResultSet;
import java.sql.SQLException;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
public class GlobalConfigDao {
private DataSource dataSource;
public void setDataSource(DataSource dataSource) {
this.dataSource = dataSource;
}
public GlobalConfigDao(DataSource dataSource) {
this.dataSource = dataSource;
}
public List<GlobalConfig> getAll() {
JdbcTemplate jdbcTemplate = new JdbcTemplate(dataSource);
return jdbcTemplate.query("SELECT * FROM global_config", new GlobalConfigRowMapper());
}
public List<GlobalConfig> getByIds(List<Long> cid) {
JdbcTemplate jdbcTemplate = new JdbcTemplate(dataSource);
return jdbcTemplate.query(
"SELECT * FROM global_config WHERE cid IN (?)",
new Object[] {StringUtils.join(cid, ",")},
new GlobalConfigRowMapper());
}
public GlobalConfig getById(long cid) {
JdbcTemplate jdbcTemplate = new JdbcTemplate(dataSource);
return jdbcTemplate.queryForObject(
"SELECT * FROM global_config WHERE cid = ?",
new Object[] {cid},
new GlobalConfigRowMapper());
}
public GlobalConfig getByPropertyName(String propertyName) {
JdbcTemplate jdbcTemplate = new JdbcTemplate(dataSource);
return jdbcTemplate.queryForObject(
"SELECT * FROM global_config WHERE property_name = ?",
new Object[] {propertyName},
new GlobalConfigRowMapper());
}
public void deleteByName(String propertyName) {
JdbcTemplate jdbcTemplate = new JdbcTemplate(dataSource);
final String sql = "DELETE FROM global_config WHERE property_name = ?";
jdbcTemplate.update(sql, propertyName);
}
public void delete(long cid) {
JdbcTemplate jdbcTemplate = new JdbcTemplate(dataSource);
final String sql = "DELETE FROM global_config WHERE cid = ?";
jdbcTemplate.update(sql, cid);
}
public long insert(GlobalConfig globalConfig) {
SimpleJdbcInsert simpleJdbcInsert = new SimpleJdbcInsert(dataSource);
simpleJdbcInsert.setTableName("global_config");
simpleJdbcInsert.usingGeneratedKeyColumns("cid");
long cid = simpleJdbcInsert.executeAndReturnKey(toMaps(globalConfig)).longValue();
globalConfig.setCid(cid);
return cid;
}
// TODO slove the increment of key
public void insert(GlobalConfig[] globalConfigs) {
SimpleJdbcInsert simpleJdbcInsert = new SimpleJdbcInsert(dataSource);
simpleJdbcInsert.setTableName("global_config");
simpleJdbcInsert.usingGeneratedKeyColumns("cid");
Map<String, Object>[] maps = new Map[globalConfigs.length];
for (int i = 0; i < globalConfigs.length; i++) {
maps[i] = toMaps(globalConfigs[i]);
}
int[] cids = simpleJdbcInsert.executeBatch(maps);
for (int i = 0; i < globalConfigs.length; i++) {
globalConfigs[i].setCid(cids[i]);
}
}
public long getCountByName(String name) {
JdbcTemplate jdbcTemplate = new JdbcTemplate(dataSource);
return jdbcTemplate.queryForObject(
"SELECT COUNT(*) FROM global_config WHERE property_name = ?", Long.class, name);
}
public int update(String propertyName, String propertyValue) {
JdbcTemplate jdbcTemplate = new JdbcTemplate(dataSource);
String sql = "UPDATE global_config SET property_value = ? WHERE property_name = ?";
return jdbcTemplate.update(sql, propertyValue, propertyName);
}
private Map<String, Object> toMaps(GlobalConfig globalConfig) {
Map<String, Object> parameters = new HashMap<>();
parameters.put("cid", globalConfig.getCid());
parameters.put("property_name", globalConfig.getPropertyName());
parameters.put("property_value", globalConfig.getPropertyValue().toString());
return parameters;
}
class GlobalConfigRowMapper implements RowMapper<GlobalConfig> {
@Override
public GlobalConfig mapRow(ResultSet resultSet, int i) throws SQLException {
GlobalConfig globalConfig = new GlobalConfig();
globalConfig.setCid(resultSet.getLong("cid"));
globalConfig.setPropertyName(resultSet.getString("property_name"));
globalConfig.setPropertyValue(resultSet.getString("property_value"));
return globalConfig;
}
}
}
| java | Apache-2.0 | e0c90f054687a18c4e095547ac5e31b8b313b3ef | 2026-01-05T02:41:11.405497Z | false |
Intel-bigdata/SSM | https://github.com/Intel-bigdata/SSM/blob/e0c90f054687a18c4e095547ac5e31b8b313b3ef/smart-metastore/src/main/java/org/smartdata/metastore/dao/AccessCountDao.java | smart-metastore/src/main/java/org/smartdata/metastore/dao/AccessCountDao.java | /**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.smartdata.metastore.dao;
import org.springframework.jdbc.core.JdbcTemplate;
import org.springframework.jdbc.core.RowMapper;
import org.springframework.jdbc.core.simple.SimpleJdbcInsert;
import org.springframework.jdbc.support.rowset.SqlRowSet;
import javax.sql.DataSource;
import java.sql.ResultSet;
import java.sql.SQLException;
import java.util.HashMap;
import java.util.Iterator;
import java.util.List;
import java.util.Map;
public class AccessCountDao {
private DataSource dataSource;
static final String FILE_FIELD = "fid";
static final String ACCESSCOUNT_FIELD = "count";
public void setDataSource(DataSource dataSource) {
this.dataSource = dataSource;
}
public AccessCountDao(DataSource dataSource) {
this.dataSource = dataSource;
}
public void insert(AccessCountTable accessCountTable) {
SimpleJdbcInsert simpleJdbcInsert = new SimpleJdbcInsert(dataSource);
simpleJdbcInsert.setTableName("access_count_table");
simpleJdbcInsert.execute(toMap(accessCountTable));
}
public void insert(AccessCountTable[] accessCountTables) {
for (AccessCountTable accessCountTable : accessCountTables) {
insert(accessCountTable);
}
}
public List<AccessCountTable> getAccessCountTableByName(String name) {
JdbcTemplate jdbcTemplate = new JdbcTemplate(dataSource);
String sql = "SELECT * FROM access_count_table WHERE table_name = '" + name + "'";
return jdbcTemplate.query(sql, new AccessCountRowMapper());
}
public void delete(Long startTime, Long endTime) {
JdbcTemplate jdbcTemplate = new JdbcTemplate(dataSource);
final String sql =
String.format(
"DELETE FROM access_count_table WHERE start_time >= %s AND end_time <= %s",
startTime,
endTime);
jdbcTemplate.update(sql);
}
public void delete(AccessCountTable table) {
JdbcTemplate jdbcTemplate = new JdbcTemplate(dataSource);
final String sql = "DELETE FROM access_count_table WHERE table_name = ?";
jdbcTemplate.update(sql, table.getTableName());
}
public static String createAccessCountTableSQL(String tableName) {
return String.format(
"CREATE TABLE %s (%s INTEGER NOT NULL, %s INTEGER NOT NULL)",
tableName, FILE_FIELD, ACCESSCOUNT_FIELD);
}
public List<AccessCountTable> getAllSortedTables() {
JdbcTemplate jdbcTemplate = new JdbcTemplate(dataSource);
String sql = "SELECT * FROM access_count_table ORDER BY start_time ASC";
return jdbcTemplate.query(sql, new AccessCountRowMapper());
}
public void aggregateTables(
AccessCountTable destinationTable, List<AccessCountTable> tablesToAggregate) {
JdbcTemplate jdbcTemplate = new JdbcTemplate(dataSource);
String create = AccessCountDao.createAccessCountTableSQL(destinationTable.getTableName());
jdbcTemplate.execute(create);
String insert =
String.format(
"INSERT INTO %s SELECT tmp1.%s, tmp1.%s FROM ((SELECT %s, SUM(%s) AS %s FROM(%s) "
+ "tmp0 GROUP BY %s) AS tmp1 LEFT JOIN file ON file.fid = tmp1.fid);",
destinationTable.getTableName(),
AccessCountDao.FILE_FIELD,
AccessCountDao.ACCESSCOUNT_FIELD,
AccessCountDao.FILE_FIELD,
AccessCountDao.ACCESSCOUNT_FIELD,
AccessCountDao.ACCESSCOUNT_FIELD,
getUnionStatement(tablesToAggregate),
AccessCountDao.FILE_FIELD);
jdbcTemplate.execute(insert);
}
public Map<Long, Integer> getHotFiles(List<AccessCountTable> tables, int topNum)
throws SQLException {
JdbcTemplate jdbcTemplate = new JdbcTemplate(dataSource);
String statement =
String.format(
"SELECT %s, SUM(%s) AS %s FROM (%s) tmp WHERE %s IN (SELECT fid FROM file) "
+ "GROUP BY %s ORDER BY %s DESC LIMIT %s",
AccessCountDao.FILE_FIELD,
AccessCountDao.ACCESSCOUNT_FIELD,
AccessCountDao.ACCESSCOUNT_FIELD,
getUnionStatement(tables),
AccessCountDao.FILE_FIELD,
AccessCountDao.FILE_FIELD,
AccessCountDao.ACCESSCOUNT_FIELD,
topNum);
SqlRowSet sqlRowSet = jdbcTemplate.queryForRowSet(statement);
Map<Long, Integer> accessCounts = new HashMap<>();
while (sqlRowSet.next()) {
accessCounts.put(
sqlRowSet.getLong(AccessCountDao.FILE_FIELD),
sqlRowSet.getInt(AccessCountDao.ACCESSCOUNT_FIELD));
}
return accessCounts;
}
private String getUnionStatement(List<AccessCountTable> tables) {
StringBuilder union = new StringBuilder();
Iterator<AccessCountTable> tableIterator = tables.iterator();
while (tableIterator.hasNext()) {
AccessCountTable table = tableIterator.next();
if (tableIterator.hasNext()) {
union.append("SELECT * FROM " + table.getTableName() + " UNION ALL ");
} else {
union.append("SELECT * FROM " + table.getTableName());
}
}
return union.toString();
}
public void createProportionTable(AccessCountTable dest, AccessCountTable source)
throws SQLException {
JdbcTemplate jdbcTemplate = new JdbcTemplate(dataSource);
double percentage =
((double) dest.getEndTime() - dest.getStartTime())
/ (source.getEndTime() - source.getStartTime());
jdbcTemplate.execute(AccessCountDao.createAccessCountTableSQL(dest.getTableName()));
String sql =
String.format(
"INSERT INTO %s SELECT %s, ROUND(%s.%s * %s) AS %s FROM %s",
dest.getTableName(),
AccessCountDao.FILE_FIELD,
source.getTableName(),
AccessCountDao.ACCESSCOUNT_FIELD,
percentage,
AccessCountDao.ACCESSCOUNT_FIELD,
source.getTableName());
jdbcTemplate.execute(sql);
}
public void updateFid(long fidSrc, long fidDest) throws SQLException {
int failedNum = 0;
JdbcTemplate jdbcTemplate = new JdbcTemplate(dataSource);
List<AccessCountTable> accessCountTables = getAllSortedTables();
for (AccessCountTable table : accessCountTables) {
String sql = String.format("update %s set %s=%s where %s=%s", table.getTableName(),
AccessCountDao.FILE_FIELD, fidDest, AccessCountDao.FILE_FIELD, fidSrc);
try {
jdbcTemplate.execute(sql);
} catch (Exception e) {
failedNum++;
}
}
// Otherwise, ignore the exception because table evictor can evict access
// count tables, which is not synchronized. Even so, there is no impact on
// the measurement for data temperature.
if (failedNum == accessCountTables.size()) {
// Throw exception if all tables are not updated.
throw new SQLException("Failed to update fid!");
}
}
private Map<String, Object> toMap(AccessCountTable accessCountTable) {
Map<String, Object> parameters = new HashMap<>();
parameters.put("table_name", accessCountTable.getTableName());
parameters.put("start_time", accessCountTable.getStartTime());
parameters.put("end_time", accessCountTable.getEndTime());
return parameters;
}
class AccessCountRowMapper implements RowMapper<AccessCountTable> {
@Override
public AccessCountTable mapRow(ResultSet resultSet, int i) throws SQLException {
AccessCountTable accessCountTable = new AccessCountTable(
resultSet.getLong("start_time"),
resultSet.getLong("end_time"));
return accessCountTable;
}
}
}
| java | Apache-2.0 | e0c90f054687a18c4e095547ac5e31b8b313b3ef | 2026-01-05T02:41:11.405497Z | false |
Intel-bigdata/SSM | https://github.com/Intel-bigdata/SSM/blob/e0c90f054687a18c4e095547ac5e31b8b313b3ef/smart-metastore/src/main/java/org/smartdata/metastore/dao/DurationEvictor.java | smart-metastore/src/main/java/org/smartdata/metastore/dao/DurationEvictor.java | /**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.smartdata.metastore.dao;
import org.smartdata.metastore.MetaStore;
import java.util.Iterator;
public class DurationEvictor extends TableEvictor {
private final long duration;
public DurationEvictor(MetaStore adapter, long duration) {
super(adapter);
this.duration = duration;
}
@Override
public void evictTables(AccessCountTableDeque tables, int size) {
if (tables.peek() != null){
AccessCountTable latestTable = tables.peekLast();
Long threshHold = latestTable.getEndTime() - duration;
for (Iterator<AccessCountTable> iterator = tables.iterator(); iterator.hasNext();) {
AccessCountTable table = iterator.next();
if (table.getStartTime() < threshHold) {
this.dropTable(table);
iterator.remove();
} else {
break;
}
}
}
}
}
| java | Apache-2.0 | e0c90f054687a18c4e095547ac5e31b8b313b3ef | 2026-01-05T02:41:11.405497Z | false |
Intel-bigdata/SSM | https://github.com/Intel-bigdata/SSM/blob/e0c90f054687a18c4e095547ac5e31b8b313b3ef/smart-metastore/src/main/java/org/smartdata/metastore/dao/ClusterInfoDao.java | smart-metastore/src/main/java/org/smartdata/metastore/dao/ClusterInfoDao.java | /**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.smartdata.metastore.dao;
import org.apache.commons.lang.StringUtils;
import org.smartdata.model.ClusterInfo;
import org.springframework.jdbc.core.JdbcTemplate;
import org.springframework.jdbc.core.RowMapper;
import org.springframework.jdbc.core.simple.SimpleJdbcInsert;
import javax.sql.DataSource;
import java.sql.ResultSet;
import java.sql.SQLException;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
public class ClusterInfoDao {
private static final String TABLE_NAME = "cluster_info";
private DataSource dataSource;
public void setDataSource(DataSource dataSource) {
this.dataSource = dataSource;
}
public ClusterInfoDao(DataSource dataSource) {
this.dataSource = dataSource;
}
public List<ClusterInfo> getAll() {
JdbcTemplate jdbcTemplate = new JdbcTemplate(dataSource);
return jdbcTemplate.query("SELECT * FROM " + TABLE_NAME, new ClusterinfoRowMapper());
}
public ClusterInfo getById(long cid) {
JdbcTemplate jdbcTemplate = new JdbcTemplate(dataSource);
return jdbcTemplate.query("SELECT * FROM " + TABLE_NAME + " WHERE cid = ?",
new Object[]{cid},
new ClusterinfoRowMapper()).get(0);
}
public List<ClusterInfo> getByIds(List<Long> cids) {
JdbcTemplate jdbcTemplate = new JdbcTemplate(dataSource);
return jdbcTemplate.query("SELECT * FROM " + TABLE_NAME + " WHERE cid IN (?)",
new Object[]{StringUtils.join(cids, ",")},
new ClusterinfoRowMapper());
}
public void delete(long cid) {
JdbcTemplate jdbcTemplate = new JdbcTemplate(dataSource);
final String sql = "DELETE FROM " + TABLE_NAME + " WHERE cid = ?";
jdbcTemplate.update(sql, cid);
}
public long insert(ClusterInfo clusterInfo) {
SimpleJdbcInsert simpleJdbcInsert = new SimpleJdbcInsert(dataSource);
simpleJdbcInsert.setTableName(TABLE_NAME);
simpleJdbcInsert.usingGeneratedKeyColumns("cid");
long cid = simpleJdbcInsert.executeAndReturnKey(toMap(clusterInfo)).longValue();
clusterInfo.setCid(cid);
return cid;
}
public void insert(ClusterInfo[] clusterInfos) {
SimpleJdbcInsert simpleJdbcInsert = new SimpleJdbcInsert(dataSource);
simpleJdbcInsert.setTableName("cluster_info");
simpleJdbcInsert.usingGeneratedKeyColumns("cid");
Map<String, Object>[] maps = new Map[clusterInfos.length];
for (int i = 0; i < clusterInfos.length; i++) {
maps[i] = toMap(clusterInfos[i]);
}
int[] cids = simpleJdbcInsert.executeBatch(maps);
for (int i = 0; i < clusterInfos.length; i++) {
clusterInfos[i].setCid(cids[i]);
}
}
public int updateState(long cid, String state) {
JdbcTemplate jdbcTemplate = new JdbcTemplate(dataSource);
String sql = "UPDATE " + TABLE_NAME + " SET state = ? WHERE cid = ?";
return jdbcTemplate.update(sql, state, cid);
}
public int updateType(long cid, String type) {
JdbcTemplate jdbcTemplate = new JdbcTemplate(dataSource);
String sql = "UPDATE " + TABLE_NAME + " SET type = ? WHERE cid = ?";
return jdbcTemplate.update(sql, type, cid);
}
public void deleteAll() {
JdbcTemplate jdbcTemplate = new JdbcTemplate(dataSource);
final String sql = "DELETE FROM " + TABLE_NAME;
jdbcTemplate.execute(sql);
}
public int getCountByName(String name) {
JdbcTemplate jdbcTemplate = new JdbcTemplate(dataSource);
final String sql = "SELECT COUNT(*) FROM " + TABLE_NAME + " WHERE name = ?";
return jdbcTemplate.queryForObject(sql, Integer.class, name);
}
private Map<String, Object> toMap(ClusterInfo clusterInfo) {
Map<String, Object> parameters = new HashMap<>();
parameters.put("cid", clusterInfo.getCid());
parameters.put("name", clusterInfo.getName());
parameters.put("url", clusterInfo.getUrl());
parameters.put("conf_path", clusterInfo.getConfPath());
parameters.put("state", clusterInfo.getState());
parameters.put("type", clusterInfo.getType());
return parameters;
}
class ClusterinfoRowMapper implements RowMapper<ClusterInfo> {
@Override
public ClusterInfo mapRow(ResultSet resultSet, int i) throws SQLException {
ClusterInfo clusterInfo = new ClusterInfo();
clusterInfo.setCid(resultSet.getLong("cid"));
clusterInfo.setName(resultSet.getString("name"));
clusterInfo.setUrl(resultSet.getString("url"));
clusterInfo.setConfPath(resultSet.getString("conf_path"));
clusterInfo.setState(resultSet.getString("state"));
clusterInfo.setType(resultSet.getString("type"));
return clusterInfo;
}
}
}
| java | Apache-2.0 | e0c90f054687a18c4e095547ac5e31b8b313b3ef | 2026-01-05T02:41:11.405497Z | false |
Intel-bigdata/SSM | https://github.com/Intel-bigdata/SSM/blob/e0c90f054687a18c4e095547ac5e31b8b313b3ef/smart-metastore/src/main/java/org/smartdata/metastore/dao/AccessCountTableManager.java | smart-metastore/src/main/java/org/smartdata/metastore/dao/AccessCountTableManager.java | /**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.smartdata.metastore.dao;
import com.google.common.annotations.VisibleForTesting;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.smartdata.metastore.MetaStore;
import org.smartdata.metastore.MetaStoreException;
import org.smartdata.metastore.utils.TimeGranularity;
import org.smartdata.metastore.utils.TimeUtils;
import org.smartdata.metrics.FileAccessEvent;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.Iterator;
import java.util.List;
import java.util.Map;
import java.util.concurrent.ExecutorService;
import java.util.concurrent.Executors;
public class AccessCountTableManager {
private static final int NUM_DAY_TABLES_TO_KEEP = 30;
private static final int NUM_HOUR_TABLES_TO_KEEP = 48;
private static final int NUM_MINUTE_TABLES_TO_KEEP = 120;
private static final int NUM_SECOND_TABLES_TO_KEEP = 30;
private MetaStore metaStore;
private Map<TimeGranularity, AccessCountTableDeque> tableDeques;
private AccessCountTableDeque secondTableDeque;
private AccessEventAggregator accessEventAggregator;
private ExecutorService executorService;
public static final Logger LOG =
LoggerFactory.getLogger(AccessCountTableManager.class);
public AccessCountTableManager(MetaStore adapter) {
this(adapter, Executors.newFixedThreadPool(4));
}
public AccessCountTableManager(MetaStore adapter, ExecutorService service) {
this.metaStore = adapter;
this.tableDeques = new HashMap<>();
this.executorService = service;
this.accessEventAggregator = new AccessEventAggregator(adapter, this);
this.initTables();
}
private void initTables() {
AccessCountTableAggregator aggregator = new AccessCountTableAggregator(metaStore);
AccessCountTableDeque dayTableDeque =
new AccessCountTableDeque(new CountEvictor(metaStore, NUM_DAY_TABLES_TO_KEEP));
TableAddOpListener dayTableListener =
new TableAddOpListener.DayTableListener(dayTableDeque, aggregator, executorService);
AccessCountTableDeque hourTableDeque =
new AccessCountTableDeque(
new CountEvictor(metaStore, NUM_HOUR_TABLES_TO_KEEP), dayTableListener);
TableAddOpListener hourTableListener =
new TableAddOpListener.HourTableListener(hourTableDeque, aggregator, executorService);
AccessCountTableDeque minuteTableDeque =
new AccessCountTableDeque(
new CountEvictor(metaStore, NUM_MINUTE_TABLES_TO_KEEP), hourTableListener);
TableAddOpListener minuteTableListener =
new TableAddOpListener.MinuteTableListener(minuteTableDeque, aggregator, executorService);
this.secondTableDeque =
new AccessCountTableDeque(
new CountEvictor(metaStore, NUM_SECOND_TABLES_TO_KEEP), minuteTableListener);
this.tableDeques.put(TimeGranularity.SECOND, this.secondTableDeque);
this.tableDeques.put(TimeGranularity.MINUTE, minuteTableDeque);
this.tableDeques.put(TimeGranularity.HOUR, hourTableDeque);
this.tableDeques.put(TimeGranularity.DAY, dayTableDeque);
this.recoverTables();
}
private void recoverTables() {
try {
List<AccessCountTable> tables = metaStore.getAllSortedTables();
for (AccessCountTable table : tables) {
TimeGranularity timeGranularity =
TimeUtils.getGranularity(table.getEndTime() - table.getStartTime());
if (tableDeques.containsKey(timeGranularity)) {
tableDeques.get(timeGranularity).add(table);
}
}
} catch (MetaStoreException e) {
LOG.error(e.toString());
}
}
public void addTable(AccessCountTable accessCountTable) {
if (LOG.isDebugEnabled()) {
LOG.debug(accessCountTable.toString());
}
this.secondTableDeque.addAndNotifyListener(accessCountTable);
}
public void onAccessEventsArrived(List<FileAccessEvent> accessEvents) {
this.accessEventAggregator.addAccessEvents(accessEvents);
}
public List<AccessCountTable> getTables(long lengthInMillis) throws MetaStoreException {
return AccessCountTableManager.getTables(this.tableDeques, this.metaStore, lengthInMillis);
}
public static List<AccessCountTable> getTables(
Map<TimeGranularity, AccessCountTableDeque> tableDeques,
MetaStore metaStore,
long lengthInMillis)
throws MetaStoreException {
if (tableDeques.isEmpty()) {
return new ArrayList<>();
}
AccessCountTableDeque secondTableDeque = tableDeques.get(TimeGranularity.SECOND);
if (secondTableDeque == null || secondTableDeque.isEmpty()) {
return new ArrayList<>();
}
long now = secondTableDeque.getLast().getEndTime();
return getTablesDuring(
tableDeques, metaStore, lengthInMillis, now, TimeUtils.getGranularity(lengthInMillis));
}
// Todo: multi-thread issue
private static List<AccessCountTable> getTablesDuring(
final Map<TimeGranularity, AccessCountTableDeque> tableDeques,
MetaStore metaStore,
final long length,
final long endTime,
final TimeGranularity timeGranularity)
throws MetaStoreException {
long startTime = endTime - length;
AccessCountTableDeque tables = tableDeques.get(timeGranularity);
List<AccessCountTable> results = new ArrayList<>();
for (Iterator<AccessCountTable> iterator = tables.iterator(); iterator.hasNext(); ) {
// Here we assume that the tables are all sorted by time.
AccessCountTable table = iterator.next();
if (table.getEndTime() > startTime) {
if (table.getStartTime() >= startTime) {
results.add(table);
startTime = table.getEndTime();
} else if (table.getStartTime() < startTime) {
// We got a table should be spilt here. But sometimes we will split out an
// table that already exists, so this situation should be avoided.
if (!tableExists(tableDeques, startTime, table.getEndTime())) {
AccessCountTable splitTable = new AccessCountTable(startTime, table.getEndTime(), true);
metaStore.createProportionTable(splitTable, table);
results.add(splitTable);
startTime = table.getEndTime();
}
}
}
}
if (startTime != endTime && !timeGranularity.equals(TimeGranularity.SECOND)) {
TimeGranularity fineGrained = TimeUtils.getFineGarinedGranularity(timeGranularity);
results.addAll(
getTablesDuring(tableDeques, metaStore, endTime - startTime, endTime, fineGrained));
}
return results;
}
private static boolean tableExists(
final Map<TimeGranularity, AccessCountTableDeque> tableDeques, long start, long end) {
TimeGranularity granularity = TimeUtils.getGranularity(end - start);
AccessCountTable fakeTable = new AccessCountTable(start, end);
return tableDeques.containsKey(granularity) && tableDeques.get(granularity).contains(fakeTable);
}
@VisibleForTesting
Map<TimeGranularity, AccessCountTableDeque> getTableDeques() {
return this.tableDeques;
}
}
| java | Apache-2.0 | e0c90f054687a18c4e095547ac5e31b8b313b3ef | 2026-01-05T02:41:11.405497Z | false |
Intel-bigdata/SSM | https://github.com/Intel-bigdata/SSM/blob/e0c90f054687a18c4e095547ac5e31b8b313b3ef/smart-metastore/src/main/java/org/smartdata/metastore/dao/CmdletDao.java | smart-metastore/src/main/java/org/smartdata/metastore/dao/CmdletDao.java | /**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.smartdata.metastore.dao;
import org.apache.commons.lang.StringUtils;
import org.smartdata.model.CmdletInfo;
import org.smartdata.model.CmdletState;
import org.springframework.jdbc.core.BatchPreparedStatementSetter;
import org.springframework.jdbc.core.JdbcTemplate;
import org.springframework.jdbc.core.RowMapper;
import org.springframework.jdbc.core.simple.SimpleJdbcInsert;
import javax.sql.DataSource;
import java.sql.PreparedStatement;
import java.sql.ResultSet;
import java.sql.SQLException;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
public class CmdletDao {
private DataSource dataSource;
private static final String TABLE_NAME = "cmdlet";
private final String terminiatedStates;
public void setDataSource(DataSource dataSource) {
this.dataSource = dataSource;
}
public CmdletDao(DataSource dataSource) {
this.dataSource = dataSource;
terminiatedStates = getTerminiatedStatesString();
}
public List<CmdletInfo> getAll() {
JdbcTemplate jdbcTemplate = new JdbcTemplate(dataSource);
return jdbcTemplate.query("SELECT * FROM " + TABLE_NAME, new CmdletRowMapper());
}
public List<CmdletInfo> getAPageOfCmdlet(long start, long offset,
List<String> orderBy, List<Boolean> isDesc) {
JdbcTemplate jdbcTemplate = new JdbcTemplate(dataSource);
boolean ifHasAid = false;
StringBuilder sql =
new StringBuilder("SELECT * FROM " + TABLE_NAME + " ORDER BY ");
for (int i = 0; i < orderBy.size(); i++) {
if (orderBy.get(i).equals("cid")) {
ifHasAid = true;
}
sql.append(orderBy.get(i));
if (isDesc.size() > i) {
if (isDesc.get(i)) {
sql.append(" desc ");
}
sql.append(",");
}
}
if (!ifHasAid) {
sql.append("cid,");
}
//delete the last char
sql = new StringBuilder(sql.substring(0, sql.length() - 1));
//add limit
sql.append(" LIMIT ").append(start).append(",").append(offset).append(";");
return jdbcTemplate.query(sql.toString(), new CmdletRowMapper());
}
public List<CmdletInfo> getAPageOfCmdlet(long start, long offset) {
JdbcTemplate jdbcTemplate = new JdbcTemplate(dataSource);
String sql = "SELECT * FROM " + TABLE_NAME + " LIMIT " + start + "," + offset + ";";
return jdbcTemplate.query(sql, new CmdletRowMapper());
}
public List<CmdletInfo> getByIds(List<Long> aids) {
JdbcTemplate jdbcTemplate = new JdbcTemplate(dataSource);
return jdbcTemplate.query(
"SELECT * FROM " + TABLE_NAME + " WHERE aid IN (?)",
new Object[]{StringUtils.join(aids, ",")},
new CmdletRowMapper());
}
public CmdletInfo getById(long cid) {
JdbcTemplate jdbcTemplate = new JdbcTemplate(dataSource);
return jdbcTemplate.queryForObject(
"SELECT * FROM " + TABLE_NAME + " WHERE cid = ?",
new Object[]{cid},
new CmdletRowMapper());
}
public List<CmdletInfo> getByRid(long rid) {
JdbcTemplate jdbcTemplate = new JdbcTemplate(dataSource);
return jdbcTemplate.query(
"SELECT * FROM " + TABLE_NAME + " WHERE rid = ?",
new Object[]{rid},
new CmdletRowMapper());
}
public long getNumByRid(long rid) {
JdbcTemplate jdbcTemplate = new JdbcTemplate(dataSource);
return jdbcTemplate.queryForObject(
"SELECT COUNT(*) FROM " + TABLE_NAME + " WHERE rid = ?",
new Object[]{rid},
Long.class);
}
public List<CmdletInfo> getByRid(long rid, long start, long offset) {
JdbcTemplate jdbcTemplate = new JdbcTemplate(dataSource);
String sql = "SELECT * FROM " + TABLE_NAME + " WHERE rid = " + rid
+ " LIMIT " + start + "," + offset + ";";
return jdbcTemplate.query(sql, new CmdletRowMapper());
}
public List<CmdletInfo> getByRid(long rid, long start, long offset,
List<String> orderBy, List<Boolean> isDesc) {
JdbcTemplate jdbcTemplate = new JdbcTemplate(dataSource);
boolean ifHasAid = false;
StringBuilder sql =
new StringBuilder("SELECT * FROM " + TABLE_NAME + " WHERE rid = " + rid
+ " ORDER BY ");
for (int i = 0; i < orderBy.size(); i++) {
if (orderBy.get(i).equals("cid")) {
ifHasAid = true;
}
sql.append(orderBy.get(i));
if (isDesc.size() > i) {
if (isDesc.get(i)) {
sql.append(" desc ");
}
sql.append(",");
}
}
if (!ifHasAid) {
sql.append("cid,");
}
//delete the last char
sql = new StringBuilder(sql.substring(0, sql.length() - 1));
//add limit
sql.append(" LIMIT ").append(start).append(",").append(offset).append(";");
return jdbcTemplate.query(sql.toString(), new CmdletRowMapper());
}
public List<CmdletInfo> getByState(CmdletState state) {
JdbcTemplate jdbcTemplate = new JdbcTemplate(dataSource);
return jdbcTemplate.query(
"SELECT * FROM " + TABLE_NAME + " WHERE state = ?",
new Object[]{state.getValue()},
new CmdletRowMapper());
}
public int getNumCmdletsInTerminiatedStates() {
JdbcTemplate jdbcTemplate = new JdbcTemplate(dataSource);
String query = "SELECT count(*) FROM " + TABLE_NAME
+ " WHERE state IN (" + terminiatedStates + ")";
return jdbcTemplate.queryForObject(query, Integer.class);
}
public List<CmdletInfo> getByCondition(
String cidCondition, String ridCondition, CmdletState state) {
JdbcTemplate jdbcTemplate = new JdbcTemplate(dataSource);
String sqlPrefix = "SELECT * FROM " + TABLE_NAME + " WHERE ";
String sqlCid = (cidCondition == null) ? "" : "AND cid " + cidCondition;
String sqlRid = (ridCondition == null) ? "" : "AND rid " + ridCondition;
String sqlState = (state == null) ? "" : "AND state = " + state.getValue();
String sqlFinal = "";
if (cidCondition != null || ridCondition != null || state != null) {
sqlFinal = sqlPrefix + sqlCid + sqlRid + sqlState;
sqlFinal = sqlFinal.replaceFirst("AND ", "");
} else {
sqlFinal = sqlPrefix.replaceFirst("WHERE ", "");
}
return jdbcTemplate.query(sqlFinal, new CmdletRowMapper());
}
public void delete(long cid) {
JdbcTemplate jdbcTemplate = new JdbcTemplate(dataSource);
final String sql = "DELETE FROM " + TABLE_NAME + " WHERE cid = ?";
jdbcTemplate.update(sql, cid);
}
public int[] batchDelete(final List<Long> cids) {
JdbcTemplate jdbcTemplate = new JdbcTemplate(dataSource);
final String sql = "DELETE FROM " + TABLE_NAME + " WHERE cid = ?";
return jdbcTemplate.batchUpdate(
sql,
new BatchPreparedStatementSetter() {
public void setValues(PreparedStatement ps, int i) throws SQLException {
ps.setLong(1, cids.get(i));
}
public int getBatchSize() {
return cids.size();
}
});
}
public int deleteBeforeTime(long timestamp) {
JdbcTemplate jdbcTemplate = new JdbcTemplate(dataSource);
final String querysql = "SELECT cid FROM " + TABLE_NAME
+ " WHERE generate_time < ? AND state IN (" + terminiatedStates + ")";
List<Long> cids = jdbcTemplate.queryForList(querysql, new Object[]{timestamp}, Long.class);
if (cids.size() == 0) {
return 0;
}
final String deleteCmds = "DELETE FROM " + TABLE_NAME
+ " WHERE generate_time < ? AND state IN (" + terminiatedStates + ")";
jdbcTemplate.update(deleteCmds, timestamp);
final String deleteActions = "DELETE FROM action WHERE cid IN ("
+ StringUtils.join(cids, ",") + ")";
jdbcTemplate.update(deleteActions);
return cids.size();
}
public int deleteKeepNewCmd (long num) {
JdbcTemplate jdbcTemplate = new JdbcTemplate(dataSource);
final String queryCids = "SELECT cid FROM " + TABLE_NAME
+ " WHERE state IN (" + terminiatedStates + ")"
+ " ORDER BY generate_time DESC LIMIT 100000 OFFSET " + num;
List<Long> cids = jdbcTemplate.queryForList(queryCids, Long.class);
if (cids.size() == 0) {
return 0;
}
String deleteCids = StringUtils.join(cids, ",");
final String deleteCmd = "DELETE FROM " + TABLE_NAME + " WHERE cid IN (" + deleteCids + ")";
jdbcTemplate.update(deleteCmd);
final String deleteActions = "DELETE FROM action WHERE cid IN (" + deleteCids + ")";
jdbcTemplate.update(deleteActions);
return cids.size();
}
public void deleteAll() {
JdbcTemplate jdbcTemplate = new JdbcTemplate(dataSource);
final String sql = "DELETE FROM " + TABLE_NAME;
jdbcTemplate.execute(sql);
}
public void insert(CmdletInfo cmdletInfo) {
SimpleJdbcInsert simpleJdbcInsert = new SimpleJdbcInsert(dataSource);
simpleJdbcInsert.setTableName(TABLE_NAME);
simpleJdbcInsert.execute(toMap(cmdletInfo));
}
public void insert(CmdletInfo[] cmdletInfos) {
SimpleJdbcInsert simpleJdbcInsert = new SimpleJdbcInsert(dataSource);
simpleJdbcInsert.setTableName(TABLE_NAME);
Map<String, Object>[] maps = new Map[cmdletInfos.length];
for (int i = 0; i < cmdletInfos.length; i++) {
maps[i] = toMap(cmdletInfos[i]);
}
simpleJdbcInsert.executeBatch(maps);
}
public int[] replace(final CmdletInfo[] cmdletInfos) {
JdbcTemplate jdbcTemplate = new JdbcTemplate(dataSource);
String sql = "REPLACE INTO " + TABLE_NAME
+ "(cid, "
+ "rid, "
+ "aids, "
+ "state, "
+ "parameters, "
+ "generate_time, "
+ "state_changed_time)"
+ " VALUES(?, ?, ?, ?, ?, ?, ?)";
return jdbcTemplate.batchUpdate(
sql,
new BatchPreparedStatementSetter() {
public void setValues(PreparedStatement ps, int i) throws SQLException {
ps.setLong(1, cmdletInfos[i].getCid());
ps.setLong(2, cmdletInfos[i].getRid());
ps.setString(3, StringUtils.join(cmdletInfos[i].getAidsString(), ","));
ps.setLong(4, cmdletInfos[i].getState().getValue());
ps.setString(5, cmdletInfos[i].getParameters());
ps.setLong(6, cmdletInfos[i].getGenerateTime());
ps.setLong(7, cmdletInfos[i].getStateChangedTime());
}
public int getBatchSize() {
return cmdletInfos.length;
}
});
}
public int update(long cid, int state) {
JdbcTemplate jdbcTemplate = new JdbcTemplate(dataSource);
String sql =
"UPDATE " + TABLE_NAME + " SET state = ?, state_changed_time = ? WHERE cid = ?";
return jdbcTemplate.update(sql, state, System.currentTimeMillis(), cid);
}
public int update(long cid, String parameters, int state) {
JdbcTemplate jdbcTemplate = new JdbcTemplate(dataSource);
String sql =
"UPDATE "
+ TABLE_NAME
+ " SET parameters = ?, state = ?, state_changed_time = ? WHERE cid = ?";
return jdbcTemplate.update(sql, parameters, state, System.currentTimeMillis(), cid);
}
public int update(final CmdletInfo cmdletInfo) {
List<CmdletInfo> cmdletInfos = new ArrayList<>();
cmdletInfos.add(cmdletInfo);
return update(cmdletInfos)[0];
}
public int[] update(final List<CmdletInfo> cmdletInfos) {
JdbcTemplate jdbcTemplate = new JdbcTemplate(dataSource);
String sql = "UPDATE " + TABLE_NAME + " SET state = ?, state_changed_time = ? WHERE cid = ?";
return jdbcTemplate.batchUpdate(
sql,
new BatchPreparedStatementSetter() {
public void setValues(PreparedStatement ps, int i) throws SQLException {
ps.setInt(1, cmdletInfos.get(i).getState().getValue());
ps.setLong(2, cmdletInfos.get(i).getStateChangedTime());
ps.setLong(3, cmdletInfos.get(i).getCid());
}
public int getBatchSize() {
return cmdletInfos.size();
}
});
}
public long getMaxId() {
JdbcTemplate jdbcTemplate = new JdbcTemplate(dataSource);
Long ret = jdbcTemplate.queryForObject("SELECT MAX(cid) FROM " + TABLE_NAME, Long.class);
if (ret == null) {
return 0;
} else {
return ret + 1;
}
}
private Map<String, Object> toMap(CmdletInfo cmdletInfo) {
Map<String, Object> parameters = new HashMap<>();
parameters.put("cid", cmdletInfo.getCid());
parameters.put("rid", cmdletInfo.getRid());
parameters.put("aids", StringUtils.join(cmdletInfo.getAidsString(), ","));
parameters.put("state", cmdletInfo.getState().getValue());
parameters.put("parameters", cmdletInfo.getParameters());
parameters.put("generate_time", cmdletInfo.getGenerateTime());
parameters.put("state_changed_time", cmdletInfo.getStateChangedTime());
return parameters;
}
private String getTerminiatedStatesString() {
String finishedState = "";
for (CmdletState cmdletState : CmdletState.getTerminalStates()) {
finishedState = finishedState + cmdletState.getValue() + ",";
}
return finishedState.substring(0, finishedState.length() - 1);
}
class CmdletRowMapper implements RowMapper<CmdletInfo> {
@Override
public CmdletInfo mapRow(ResultSet resultSet, int i) throws SQLException {
CmdletInfo.Builder builder = CmdletInfo.newBuilder();
builder.setCid(resultSet.getLong("cid"));
builder.setRid(resultSet.getLong("rid"));
builder.setAids(convertStringListToLong(resultSet.getString("aids").split(",")));
builder.setState(CmdletState.fromValue((int) resultSet.getByte("state")));
builder.setParameters(resultSet.getString("parameters"));
builder.setGenerateTime(resultSet.getLong("generate_time"));
builder.setStateChangedTime(resultSet.getLong("state_changed_time"));
return builder.build();
}
private List<Long> convertStringListToLong(String[] strings) {
List<Long> ret = new ArrayList<>();
try {
for (String s : strings) {
ret.add(Long.valueOf(s));
}
} catch (NumberFormatException e) {
// Return empty
ret.clear();
}
return ret;
}
}
}
| java | Apache-2.0 | e0c90f054687a18c4e095547ac5e31b8b313b3ef | 2026-01-05T02:41:11.405497Z | false |
Intel-bigdata/SSM | https://github.com/Intel-bigdata/SSM/blob/e0c90f054687a18c4e095547ac5e31b8b313b3ef/smart-metastore/src/main/java/org/smartdata/metastore/dao/XattrDao.java | smart-metastore/src/main/java/org/smartdata/metastore/dao/XattrDao.java | /**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.smartdata.metastore.dao;
import org.smartdata.model.XAttribute;
import org.springframework.jdbc.core.BatchPreparedStatementSetter;
import org.springframework.jdbc.core.JdbcTemplate;
import javax.sql.DataSource;
import java.sql.PreparedStatement;
import java.sql.SQLException;
import java.util.LinkedList;
import java.util.List;
import java.util.Map;
public class XattrDao {
private DataSource dataSource;
public void setDataSource(DataSource dataSource) {
this.dataSource = dataSource;
}
public XattrDao(DataSource dataSource) {
this.dataSource = dataSource;
}
public List<XAttribute> getXattrList(Long fid) throws SQLException {
String sql =
String.format("SELECT * FROM xattr WHERE fid = %s;", fid);
return getXattrList(sql);
}
public List<XAttribute> getXattrList(String sql) throws SQLException {
JdbcTemplate jdbcTemplate = new JdbcTemplate(dataSource);
List<XAttribute> list = new LinkedList<>();
List<Map<String, Object>> maplist = jdbcTemplate.queryForList(sql);
for (Map<String, Object> map : maplist) {
list.add(new XAttribute((String) map.get("namespace"),
(String) map.get("name"), (byte[]) map.get("value")));
}
return list;
}
public synchronized boolean insertXattrList(final Long fid, final List<XAttribute> attributes)
throws SQLException {
JdbcTemplate jdbcTemplate = new JdbcTemplate(dataSource);
String sql = "INSERT INTO xattr (fid, namespace, name, value) VALUES (?, ?, ?, ?)";
int[] i = jdbcTemplate.batchUpdate(sql,
new BatchPreparedStatementSetter() {
public void setValues(PreparedStatement ps, int i) throws SQLException {
ps.setLong(1, fid);
ps.setString(2, attributes.get(i).getNameSpace());
ps.setString(3, attributes.get(i).getName());
ps.setBytes(4, attributes.get(i).getValue());
}
public int getBatchSize() {
return attributes.size();
}
});
return i.length == attributes.size();
}
}
| java | Apache-2.0 | e0c90f054687a18c4e095547ac5e31b8b313b3ef | 2026-01-05T02:41:11.405497Z | false |
Intel-bigdata/SSM | https://github.com/Intel-bigdata/SSM/blob/e0c90f054687a18c4e095547ac5e31b8b313b3ef/smart-metastore/src/main/java/org/smartdata/metastore/dao/StorageDao.java | smart-metastore/src/main/java/org/smartdata/metastore/dao/StorageDao.java | /**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.smartdata.metastore.dao;
import org.smartdata.model.StorageCapacity;
import org.smartdata.model.StoragePolicy;
import org.springframework.jdbc.core.BatchPreparedStatementSetter;
import org.springframework.jdbc.core.JdbcTemplate;
import org.springframework.jdbc.core.RowMapper;
import javax.sql.DataSource;
import java.sql.PreparedStatement;
import java.sql.ResultSet;
import java.sql.SQLException;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
public class StorageDao {
private DataSource dataSource;
public void setDataSource(DataSource dataSource) {
this.dataSource = dataSource;
}
public StorageDao(DataSource dataSource) {
this.dataSource = dataSource;
}
public Map<String, StorageCapacity> getStorageTablesItem() {
JdbcTemplate jdbcTemplate = new JdbcTemplate(dataSource);
String sql = "SELECT * FROM storage";
List<StorageCapacity> list = jdbcTemplate.query(sql,
new RowMapper<StorageCapacity>() {
public StorageCapacity mapRow(ResultSet rs,
int rowNum) throws SQLException {
return new StorageCapacity(rs.getString("type"), rs.getLong("time_stamp"),
rs.getLong("capacity"), rs.getLong("free"));
}
});
Map<String, StorageCapacity> map = new HashMap<>();
for (StorageCapacity s : list) {
map.put(s.getType(), s);
}
return map;
}
public Map<Integer, String> getStoragePolicyIdNameMap() throws SQLException {
JdbcTemplate jdbcTemplate = new JdbcTemplate(dataSource);
String sql = "SELECT * FROM storage_policy";
List<StoragePolicy> list = jdbcTemplate.query(sql,
new RowMapper<StoragePolicy>() {
public StoragePolicy mapRow(ResultSet rs,
int rowNum) throws SQLException {
return new StoragePolicy(rs.getByte("sid"),
rs.getString("policy_name"));
}
});
Map<Integer, String> map = new HashMap<>();
for (StoragePolicy s : list) {
map.put((int) (s.getSid()), s.getPolicyName());
}
return map;
}
public StorageCapacity getStorageCapacity(String type) {
JdbcTemplate jdbcTemplate = new JdbcTemplate(dataSource);
String sql = "SELECT * FROM storage WHERE type = ?";
return jdbcTemplate.queryForObject(sql, new Object[]{type},
new RowMapper<StorageCapacity>() {
public StorageCapacity mapRow(ResultSet rs,
int rowNum) throws SQLException {
return new StorageCapacity(rs.getString("type"), rs.getLong("time_stamp"),
rs.getLong("capacity"), rs.getLong("free"));
}
});
}
public String getStoragePolicyName(int sid) {
JdbcTemplate jdbcTemplate = new JdbcTemplate(dataSource);
String sql = "SELECT policy_name FROM storage_policy WHERE sid = ?";
return jdbcTemplate.queryForObject(sql, new Object[]{sid}, String.class);
}
public synchronized void insertStoragePolicyTable(StoragePolicy s) {
JdbcTemplate jdbcTemplate = new JdbcTemplate(dataSource);
String sql = "INSERT INTO storage_policy (sid, policy_name) VALUES('"
+ s.getSid() + "','" + s.getPolicyName() + "');";
jdbcTemplate.execute(sql);
}
public int updateFileStoragePolicy(String path,
Integer policyId) throws SQLException {
JdbcTemplate jdbcTemplate = new JdbcTemplate(dataSource);
String sql = String.format(
"UPDATE file SET sid = %d WHERE path = '%s';",
policyId, path);
return jdbcTemplate.update(sql);
}
public void insertUpdateStoragesTable(final StorageCapacity[] storages)
throws SQLException {
if (storages.length == 0) {
return;
}
final Long curr = System.currentTimeMillis();
JdbcTemplate jdbcTemplate = new JdbcTemplate(dataSource);
String sql = "REPLACE INTO storage (type, time_stamp, capacity, free) VALUES (?,?,?,?);";
jdbcTemplate.batchUpdate(sql,
new BatchPreparedStatementSetter() {
public void setValues(PreparedStatement ps,
int i) throws SQLException {
ps.setString(1, storages[i].getType());
if (storages[i].getTimeStamp() == null) {
ps.setLong(2, curr);
} else {
ps.setLong(2, storages[i].getTimeStamp());
}
ps.setLong(3, storages[i].getCapacity());
ps.setLong(4, storages[i].getFree());
}
public int getBatchSize() {
return storages.length;
}
});
}
public int getCountOfStorageType(String type) {
JdbcTemplate jdbcTemplate = new JdbcTemplate(dataSource);
String sql = "SELECT COUNT(*) FROM storage WHERE type = ?";
return jdbcTemplate.queryForObject(sql, Integer.class, type);
}
public void deleteStorage(String storageType) {
JdbcTemplate jdbcTemplate = new JdbcTemplate(dataSource);
final String sql = "DELETE FROM storage WHERE type = ?";
jdbcTemplate.update(sql, storageType);
}
public synchronized boolean updateStoragesTable(String type, Long timeStamp,
Long capacity, Long free) throws SQLException {
JdbcTemplate jdbcTemplate = new JdbcTemplate(dataSource);
String sql = null;
String sqlPrefix = "UPDATE storage SET";
String sqlCapacity = (capacity != null) ? ", capacity = '"
+ capacity + "' " : null;
String sqlFree = (free != null) ? ", free = '" + free + "' " : null;
String sqlTimeStamp = (timeStamp != null) ? ", time_stamp = " + timeStamp + " " : null;
String sqlSuffix = "WHERE type = '" + type + "';";
if (capacity != null || free != null) {
sql = sqlPrefix + sqlCapacity + sqlFree + sqlTimeStamp + sqlSuffix;
sql = sql.replaceFirst(",", "");
}
return jdbcTemplate.update(sql) == 1;
}
public synchronized boolean updateStoragesTable(String type,
Long capacity, Long free) throws SQLException {
return updateStoragesTable(type, System.currentTimeMillis(), capacity, free);
}
}
| java | Apache-2.0 | e0c90f054687a18c4e095547ac5e31b8b313b3ef | 2026-01-05T02:41:11.405497Z | false |
Intel-bigdata/SSM | https://github.com/Intel-bigdata/SSM/blob/e0c90f054687a18c4e095547ac5e31b8b313b3ef/smart-metastore/src/main/java/org/smartdata/metastore/dao/StorageHistoryDao.java | smart-metastore/src/main/java/org/smartdata/metastore/dao/StorageHistoryDao.java | /**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.smartdata.metastore.dao;
import org.smartdata.model.StorageCapacity;
import org.springframework.jdbc.core.BatchPreparedStatementSetter;
import org.springframework.jdbc.core.JdbcTemplate;
import org.springframework.jdbc.core.RowMapper;
import javax.sql.DataSource;
import java.sql.PreparedStatement;
import java.sql.ResultSet;
import java.sql.SQLException;
import java.util.List;
public class StorageHistoryDao {
private DataSource dataSource;
public StorageHistoryDao(DataSource dataSource) {
this.dataSource = dataSource;
}
public void setDataSource(DataSource dataSource) {
this.dataSource = dataSource;
}
public void insertStorageHistTable(final StorageCapacity[] storages, final long interval)
throws SQLException {
if (storages.length == 0) {
return;
}
final Long curr = System.currentTimeMillis();
JdbcTemplate jdbcTemplate = new JdbcTemplate(dataSource);
String sql = "INSERT INTO storage_hist (type, time_stamp, capacity, free) VALUES (?,?,?,?);";
jdbcTemplate.batchUpdate(sql,
new BatchPreparedStatementSetter() {
public void setValues(PreparedStatement ps,
int i) throws SQLException {
ps.setString(1, storages[i].getType() + "-" + interval);
if (storages[i].getTimeStamp() == null) {
ps.setLong(2, curr);
} else {
ps.setLong(2, storages[i].getTimeStamp());
}
ps.setLong(3, storages[i].getCapacity());
ps.setLong(4, storages[i].getFree());
}
public int getBatchSize() {
return storages.length;
}
});
}
public List<StorageCapacity> getStorageHistoryData(String type, long interval,
long startTime, long endTime) {
JdbcTemplate jdbcTemplate = new JdbcTemplate(dataSource);
String sql = "SELECT * FROM storage_hist WHERE type = ? AND "
+ "time_stamp BETWEEN ? AND ?";
List<StorageCapacity> data = jdbcTemplate.query(sql,
new Object[]{type + "-" + interval, startTime, endTime},
new StorageHistoryRowMapper());
for (StorageCapacity sc : data) {
sc.setType(type);
}
return data;
}
public int getNumberOfStorageHistoryData(String type, long interval) {
JdbcTemplate jdbcTemplate = new JdbcTemplate(dataSource);
String sql = "SELECT COUNT(*) FROM storage_hist WHERE type = ?";
return jdbcTemplate.queryForObject(sql, new Object[]{type + "-" + interval}, Integer.class);
}
public void deleteOldRecords(String type, long interval, long beforTimeStamp) {
JdbcTemplate jdbcTemplate = new JdbcTemplate(dataSource);
String sql = "DELETE FROM storage_hist WHERE type = ? AND time_stamp <= ?";
jdbcTemplate.update(sql, type + "-" + interval, beforTimeStamp);
}
class StorageHistoryRowMapper implements RowMapper<StorageCapacity> {
@Override
public StorageCapacity mapRow(ResultSet resultSet, int i) throws SQLException {
return new StorageCapacity(resultSet.getString("type"),
resultSet.getLong("time_stamp"),
resultSet.getLong("capacity"), resultSet.getLong("free"));
}
}
}
| java | Apache-2.0 | e0c90f054687a18c4e095547ac5e31b8b313b3ef | 2026-01-05T02:41:11.405497Z | false |
Intel-bigdata/SSM | https://github.com/Intel-bigdata/SSM/blob/e0c90f054687a18c4e095547ac5e31b8b313b3ef/smart-metastore/src/main/java/org/smartdata/metastore/dao/DataNodeStorageInfoDao.java | smart-metastore/src/main/java/org/smartdata/metastore/dao/DataNodeStorageInfoDao.java | /**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.smartdata.metastore.dao;
import org.smartdata.model.DataNodeStorageInfo;
import org.springframework.jdbc.core.JdbcTemplate;
import org.springframework.jdbc.core.RowMapper;
import org.springframework.jdbc.core.simple.SimpleJdbcInsert;
import javax.sql.DataSource;
import java.sql.ResultSet;
import java.sql.SQLException;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
public class DataNodeStorageInfoDao {
private DataSource dataSource;
private static final String TABLE_NAME = "datanode_storage_info";
public void setDataSource(DataSource dataSource) {
this.dataSource = dataSource;
}
public DataNodeStorageInfoDao(DataSource dataSource) {
this.dataSource = dataSource;
}
public List<DataNodeStorageInfo> getAll() {
JdbcTemplate jdbcTemplate = new JdbcTemplate(dataSource);
return jdbcTemplate.query("SELECT * FROM " + TABLE_NAME,
new DataNodeStorageInfoRowMapper());
}
public List<DataNodeStorageInfo> getByUuid(String uuid) {
JdbcTemplate jdbcTemplate = new JdbcTemplate(dataSource);
return jdbcTemplate.query(
"SELECT * FROM " + TABLE_NAME + " WHERE uuid = ?",
new Object[]{uuid}, new DataNodeStorageInfoRowMapper());
}
public List<DataNodeStorageInfo> getBySid(int sid) {
JdbcTemplate jdbcTemplate = new JdbcTemplate(dataSource);
return jdbcTemplate.query("SELECT * FROM " + TABLE_NAME + " WHERE sid = ?",
new Object[]{sid}, new DataNodeStorageInfoRowMapper());
}
public void insert(DataNodeStorageInfo dataNodeStorageInfoInfo) {
SimpleJdbcInsert simpleJdbcInsert = new SimpleJdbcInsert(dataSource);
simpleJdbcInsert.setTableName(TABLE_NAME);
simpleJdbcInsert.execute(toMap(dataNodeStorageInfoInfo));
}
public void insert(DataNodeStorageInfo[] dataNodeStorageInfos) {
SimpleJdbcInsert simpleJdbcInsert = new SimpleJdbcInsert(dataSource);
simpleJdbcInsert.setTableName(TABLE_NAME);
Map<String, Object>[] maps = new Map[dataNodeStorageInfos.length];
for (int i = 0; i < dataNodeStorageInfos.length; i++) {
maps[i] = toMap(dataNodeStorageInfos[i]);
}
simpleJdbcInsert.executeBatch(maps);
}
public void insert(List<DataNodeStorageInfo> dataNodeStorageInfos) {
SimpleJdbcInsert simpleJdbcInsert = new SimpleJdbcInsert(dataSource);
simpleJdbcInsert.setTableName(TABLE_NAME);
Map<String, Object>[] maps = new Map[dataNodeStorageInfos.size()];
for (int i = 0; i < dataNodeStorageInfos.size(); i++) {
maps[i] = toMap(dataNodeStorageInfos.get(i));
}
simpleJdbcInsert.executeBatch(maps);
}
public void delete(String uuid) {
JdbcTemplate jdbcTemplate = new JdbcTemplate(dataSource);
final String sql = "DELETE FROM " + TABLE_NAME + " WHERE uuid = ?";
jdbcTemplate.update(sql, uuid);
}
public void deleteAll() {
JdbcTemplate jdbcTemplate = new JdbcTemplate(dataSource);
final String sql = "DELETE FROM " + TABLE_NAME;
jdbcTemplate.update(sql);
}
private Map<String, Object> toMap(DataNodeStorageInfo dataNodeStorageInfo) {
Map<String, Object> parameters = new HashMap<>();
parameters.put("uuid", dataNodeStorageInfo.getUuid());
parameters.put("sid", dataNodeStorageInfo.getSid());
parameters.put("state", dataNodeStorageInfo.getState());
parameters.put("storage_id", dataNodeStorageInfo.getStorageId());
parameters.put("failed", dataNodeStorageInfo.getFailed());
parameters.put("capacity", dataNodeStorageInfo.getCapacity());
parameters.put("dfs_used", dataNodeStorageInfo.getDfsUsed());
parameters.put("remaining", dataNodeStorageInfo.getRemaining());
parameters.put("block_pool_used", dataNodeStorageInfo.getBlockPoolUsed());
return parameters;
}
class DataNodeStorageInfoRowMapper implements RowMapper<DataNodeStorageInfo> {
@Override
public DataNodeStorageInfo mapRow(ResultSet resultSet, int i) throws SQLException {
return DataNodeStorageInfo.newBuilder()
.setUuid(resultSet.getString("uuid"))
.setSid(resultSet.getLong("sid"))
.setState(resultSet.getLong("state"))
.setStorageId(resultSet.getString("storage_id"))
.setFailed(resultSet.getLong("failed"))
.setCapacity(resultSet.getLong("capacity"))
.setDfsUsed(resultSet.getLong("dfs_used"))
.setRemaining(resultSet.getLong("remaining"))
.setBlockPoolUsed(resultSet.getLong("block_pool_used"))
.build();
}
}
}
| java | Apache-2.0 | e0c90f054687a18c4e095547ac5e31b8b313b3ef | 2026-01-05T02:41:11.405497Z | false |
Intel-bigdata/SSM | https://github.com/Intel-bigdata/SSM/blob/e0c90f054687a18c4e095547ac5e31b8b313b3ef/smart-metastore/src/main/java/org/smartdata/metastore/dao/UserInfoDao.java | smart-metastore/src/main/java/org/smartdata/metastore/dao/UserInfoDao.java | /**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.smartdata.metastore.dao;
import org.smartdata.model.UserInfo;
import org.smartdata.utils.StringUtil;
import org.springframework.jdbc.core.JdbcTemplate;
import org.springframework.jdbc.core.RowMapper;
import org.springframework.jdbc.core.simple.SimpleJdbcInsert;
import javax.sql.DataSource;
import java.sql.ResultSet;
import java.sql.SQLException;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
public class UserInfoDao {
private static final String TABLE_NAME = "user_info";
private DataSource dataSource;
public void setDataSource(DataSource dataSource) {
this.dataSource = dataSource;
}
public UserInfoDao(DataSource dataSource) {
this.dataSource = dataSource;
}
public List<UserInfo> getAll() {
JdbcTemplate jdbcTemplate = new JdbcTemplate(dataSource);
return jdbcTemplate.query("SELECT * FROM " + TABLE_NAME, new UserInfoRowMapper());
}
public boolean containsUserName(String name) {
return !list(name).isEmpty();
}
private List<UserInfo> list(String name) {
return new JdbcTemplate(dataSource)
.query(
"SELECT * FROM " + TABLE_NAME + " WHERE user_name = ?",
new Object[] {name},
new UserInfoRowMapper());
}
public UserInfo getByUserName(String name) {
List<UserInfo> infos = list(name);
return infos.isEmpty() ? null : infos.get(0);
}
public void delete(String name) {
JdbcTemplate jdbcTemplate = new JdbcTemplate(dataSource);
final String sql = "DELETE FROM " + TABLE_NAME + " WHERE user_name = ?";
jdbcTemplate.update(sql, name);
}
public void insert(UserInfo userInfo) {
SimpleJdbcInsert simpleJdbcInsert = new SimpleJdbcInsert(dataSource);
simpleJdbcInsert.setTableName(TABLE_NAME);
simpleJdbcInsert.execute(toMap(new UserInfo(userInfo.getUserName(),
StringUtil.toSHA512String(userInfo.getUserPassword()))));
}
public boolean authentic (UserInfo userInfo) {
UserInfo origin = getByUserName(userInfo.getUserName());
return origin.equals(userInfo);
}
public int newPassword(UserInfo userInfo) {
JdbcTemplate jdbcTemplate = new JdbcTemplate(dataSource);
String sql = "UPDATE " + TABLE_NAME + " SET user_password = ? WHERE user_name = ?";
return jdbcTemplate.update(sql, StringUtil.toSHA512String(userInfo.getUserPassword()),
userInfo.getUserName());
}
public void deleteAll() {
JdbcTemplate jdbcTemplate = new JdbcTemplate(dataSource);
final String sql = "DELETE FROM " + TABLE_NAME;
jdbcTemplate.execute(sql);
}
private Map<String, Object> toMap(UserInfo userInfo) {
Map<String, Object> parameters = new HashMap<>();
parameters.put("user_name", userInfo.getUserName());
parameters.put("user_password", userInfo.getUserPassword());
return parameters;
}
class UserInfoRowMapper implements RowMapper<UserInfo> {
@Override
public UserInfo mapRow(ResultSet resultSet, int i) throws SQLException {
return new UserInfo(resultSet.getString("user_name"), resultSet.getString("user_password"));
}
}
}
| java | Apache-2.0 | e0c90f054687a18c4e095547ac5e31b8b313b3ef | 2026-01-05T02:41:11.405497Z | false |
Intel-bigdata/SSM | https://github.com/Intel-bigdata/SSM/blob/e0c90f054687a18c4e095547ac5e31b8b313b3ef/smart-metastore/src/main/java/org/smartdata/metastore/dao/WhitelistDao.java | smart-metastore/src/main/java/org/smartdata/metastore/dao/WhitelistDao.java | package org.smartdata.metastore.dao;
import org.springframework.jdbc.core.JdbcTemplate;
import javax.sql.DataSource;
public class WhitelistDao {
private static final String TABLE_NAME = "whitelist";
public static final String DIRS_FIELD = "last_fetched_dirs";
private DataSource dataSource;
public void setDataSource(DataSource dataSource) {
this.dataSource = dataSource;
}
public WhitelistDao(DataSource dataSource) {
this.dataSource = dataSource;
}
public String getLastFetchedDirs() {
JdbcTemplate jdbcTemplate = new JdbcTemplate(dataSource);
String sql = "SELECT * FROM " + TABLE_NAME;
return jdbcTemplate.queryForObject(sql, String.class);
}
public void updateTable(String newWhitelist) {
JdbcTemplate jdbcTemplate = new JdbcTemplate(dataSource);
final String sql = "UPDATE whitelist SET last_fetched_dirs =?";
jdbcTemplate.update(sql, newWhitelist);
}
}
| java | Apache-2.0 | e0c90f054687a18c4e095547ac5e31b8b313b3ef | 2026-01-05T02:41:11.405497Z | false |
Intel-bigdata/SSM | https://github.com/Intel-bigdata/SSM/blob/e0c90f054687a18c4e095547ac5e31b8b313b3ef/smart-metastore/src/main/java/org/smartdata/metastore/ingestion/FileStatusIngester.java | smart-metastore/src/main/java/org/smartdata/metastore/ingestion/FileStatusIngester.java | /**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.smartdata.metastore.ingestion;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.smartdata.metastore.MetaStore;
import org.smartdata.metastore.MetaStoreException;
import org.smartdata.model.FileInfo;
import org.smartdata.model.FileInfoBatch;
public class FileStatusIngester implements Runnable {
public static final Logger LOG = LoggerFactory.getLogger(FileStatusIngester.class);
private final MetaStore dbAdapter;
private long startTime = System.currentTimeMillis();
private long lastUpdateTime = startTime;
private static int idCounter = 0;
private int id;
public FileStatusIngester(MetaStore dbAdapter) {
this.dbAdapter = dbAdapter;
id = idCounter++;
}
@Override
public void run() {
FileInfoBatch batch = IngestionTask.pollBatch();
try {
if (batch != null) {
FileInfo[] statuses = batch.getFileInfos();
if (statuses.length == batch.actualSize()) {
this.dbAdapter.insertFiles(batch.getFileInfos());
IngestionTask.numPersisted.addAndGet(statuses.length);
} else {
FileInfo[] actual = new FileInfo[batch.actualSize()];
System.arraycopy(statuses, 0, actual, 0, batch.actualSize());
this.dbAdapter.insertFiles(actual);
IngestionTask.numPersisted.addAndGet(actual.length);
}
if (LOG.isDebugEnabled()) {
LOG.debug("Consumer " + id + " " + batch.actualSize()
+ " files insert into table 'files'.");
}
}
} catch (MetaStoreException e) {
// TODO: handle this issue
LOG.error("Consumer {} error", id);
}
if (id == 0) {
long curr = System.currentTimeMillis();
if (curr - lastUpdateTime >= 5000) {
long total =
IngestionTask.numDirectoriesFetched.get() + IngestionTask.numFilesFetched.get();
if (total > 0) {
LOG.info(String.format(
"%d sec, %d%% persisted into database",
(curr - startTime) / 1000, IngestionTask.numPersisted.get() * 100 / total));
} else {
LOG.info(String.format(
"%d sec, 0%% persisted into database",
(curr - startTime) / 1000));
}
lastUpdateTime = curr;
}
}
}
}
| java | Apache-2.0 | e0c90f054687a18c4e095547ac5e31b8b313b3ef | 2026-01-05T02:41:11.405497Z | false |
Intel-bigdata/SSM | https://github.com/Intel-bigdata/SSM/blob/e0c90f054687a18c4e095547ac5e31b8b313b3ef/smart-metastore/src/main/java/org/smartdata/metastore/ingestion/IngestionTask.java | smart-metastore/src/main/java/org/smartdata/metastore/ingestion/IngestionTask.java | /**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.smartdata.metastore.ingestion;
import org.smartdata.conf.SmartConf;
import org.smartdata.model.FileInfo;
import org.smartdata.model.FileInfoBatch;
import java.util.List;
import java.util.concurrent.LinkedBlockingDeque;
import java.util.concurrent.atomic.AtomicLong;
public abstract class IngestionTask implements Runnable {
public static AtomicLong numFilesFetched = new AtomicLong(0);
public static AtomicLong numDirectoriesFetched = new AtomicLong(0);
public static AtomicLong numPersisted = new AtomicLong(0);
protected int defaultBatchSize = 20;
protected int maxPendingBatches = 80;
protected static final String ROOT = "/";
// Deque for Breadth-First-Search
protected static LinkedBlockingDeque<String> deque = new LinkedBlockingDeque<>();
// Queue for outer-consumer to fetch file status
protected static LinkedBlockingDeque<FileInfoBatch> batches = new LinkedBlockingDeque<>();
protected FileInfoBatch currentBatch;
protected static volatile boolean isFinished = false;
protected long lastUpdateTime = System.currentTimeMillis();
protected long startTime = lastUpdateTime;
public static void init(SmartConf conf) {
deque.clear();
IngestionTask.isFinished = false;
List<String> fetchDirs = conf.getCoverDir();
if (fetchDirs.isEmpty()) {
deque.add(ROOT);
} else {
for (String dir : fetchDirs) {
deque.add(dir);
}
}
}
public static void init(String dir) {
deque.clear();
IngestionTask.isFinished = false;
deque.add(dir);
}
public IngestionTask() {
this.currentBatch = new FileInfoBatch(defaultBatchSize);
}
public static boolean finished() {
return isFinished;
}
public static FileInfoBatch pollBatch() {
return batches.poll();
}
public void addFileStatus(FileInfo status) throws InterruptedException {
this.currentBatch.add(status);
if (this.currentBatch.isFull()) {
this.batches.put(currentBatch);
this.currentBatch = new FileInfoBatch(defaultBatchSize);
}
}
}
| java | Apache-2.0 | e0c90f054687a18c4e095547ac5e31b8b313b3ef | 2026-01-05T02:41:11.405497Z | false |
Intel-bigdata/SSM | https://github.com/Intel-bigdata/SSM/blob/e0c90f054687a18c4e095547ac5e31b8b313b3ef/smart-metastore/src/main/java/org/smartdata/metastore/utils/TimeGranularity.java | smart-metastore/src/main/java/org/smartdata/metastore/utils/TimeGranularity.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.smartdata.metastore.utils;
public enum TimeGranularity {
SECOND,
MINUTE,
HOUR,
DAY,
WEEK,
MONTH,
YEAR
}
| java | Apache-2.0 | e0c90f054687a18c4e095547ac5e31b8b313b3ef | 2026-01-05T02:41:11.405497Z | false |
Intel-bigdata/SSM | https://github.com/Intel-bigdata/SSM/blob/e0c90f054687a18c4e095547ac5e31b8b313b3ef/smart-metastore/src/main/java/org/smartdata/metastore/utils/TimeUtils.java | smart-metastore/src/main/java/org/smartdata/metastore/utils/TimeUtils.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.smartdata.metastore.utils;
public class TimeUtils {
public static TimeGranularity getGranularity(long length) {
if (length / Constants.ONE_DAY_IN_MILLIS > 0) {
return TimeGranularity.DAY;
} else if (length / Constants.ONE_HOUR_IN_MILLIS > 0) {
return TimeGranularity.HOUR;
} else if (length / Constants.ONE_MINUTE_IN_MILLIS > 0) {
return TimeGranularity.MINUTE;
} else {
return TimeGranularity.SECOND;
}
}
public static TimeGranularity getFineGarinedGranularity(TimeGranularity granularity) {
switch (granularity) {
case YEAR:
return TimeGranularity.MONTH;
case MONTH:
return TimeGranularity.WEEK;
case WEEK:
return TimeGranularity.DAY;
case DAY:
return TimeGranularity.HOUR;
case HOUR:
return TimeGranularity.MINUTE;
case MINUTE:
return TimeGranularity.SECOND;
}
return TimeGranularity.SECOND;
}
}
| java | Apache-2.0 | e0c90f054687a18c4e095547ac5e31b8b313b3ef | 2026-01-05T02:41:11.405497Z | false |
Intel-bigdata/SSM | https://github.com/Intel-bigdata/SSM/blob/e0c90f054687a18c4e095547ac5e31b8b313b3ef/smart-metastore/src/main/java/org/smartdata/metastore/utils/MetaStoreUtils.java | smart-metastore/src/main/java/org/smartdata/metastore/utils/MetaStoreUtils.java | /**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.smartdata.metastore.utils;
import com.mysql.jdbc.NonRegisteringDriver;
import org.apache.commons.lang.StringUtils;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.smartdata.conf.SmartConf;
import org.smartdata.conf.SmartConfKeys;
import org.smartdata.metastore.DruidPool;
import org.smartdata.metastore.MetaStore;
import org.smartdata.metastore.MetaStoreException;
import org.smartdata.utils.StringUtil;
import java.io.File;
import java.io.FileInputStream;
import java.io.IOException;
import java.net.URL;
import java.sql.Connection;
import java.sql.DriverManager;
import java.sql.ResultSet;
import java.sql.SQLException;
import java.sql.Statement;
import java.util.ArrayList;
import java.util.InvalidPropertiesFormatException;
import java.util.List;
import java.util.Map;
import java.util.Properties;
import java.util.regex.Matcher;
import java.util.regex.Pattern;
import static org.springframework.jdbc.support.JdbcUtils.closeConnection;
/**
* Utilities for table operations.
*/
public class MetaStoreUtils {
public static final String SQLITE_URL_PREFIX = "jdbc:sqlite:";
public static final String MYSQL_URL_PREFIX = "jdbc:mysql:";
public static final String[] DB_NAME_NOT_ALLOWED =
new String[] {
"mysql",
"sys",
"information_schema",
"INFORMATION_SCHEMA",
"performance_schema",
"PERFORMANCE_SCHEMA"
};
static final Logger LOG = LoggerFactory.getLogger(MetaStoreUtils.class);
private static int characterTakeUpBytes = 1;
private static final String defaultPassword = "ssm@123";
public static final String TABLESET[] = new String[]{
"access_count_table",
"blank_access_count_info",
"cached_file",
"ec_policy",
"file",
"storage",
"storage_hist",
"storage_policy",
"xattr",
"datanode_info",
"datanode_storage_info",
"rule",
"cmdlet",
"action",
"file_diff",
"global_config",
"cluster_config",
"sys_info",
"cluster_info",
"backup_file",
"file_state",
"compression_file",
"small_file",
"user_info",
"whitelist"
};
public static Connection createConnection(String url,
String userName, String password)
throws ClassNotFoundException, SQLException {
if (url.startsWith(SQLITE_URL_PREFIX)) {
Class.forName("org.sqlite.JDBC");
} else if (url.startsWith(MYSQL_URL_PREFIX)) {
Class.forName("com.mysql.jdbc.Driver");
}
Connection conn = DriverManager.getConnection(url, userName, password);
return conn;
}
public static Connection createConnection(String driver, String url,
String userName,
String password) throws ClassNotFoundException, SQLException {
Class.forName(driver);
Connection conn = DriverManager.getConnection(url, userName, password);
return conn;
}
public static Connection createSqliteConnection(String dbFilePath)
throws MetaStoreException {
try {
return createConnection("org.sqlite.JDBC", SQLITE_URL_PREFIX + dbFilePath,
null, null);
} catch (Exception e) {
throw new MetaStoreException(e);
}
}
public static int getTableSetNum(Connection conn, String tableSet[]) throws MetaStoreException {
String tables = "('" + StringUtils.join(tableSet, "','") + "')";
try {
String url = conn.getMetaData().getURL();
String query;
if (url.startsWith(MetaStoreUtils.MYSQL_URL_PREFIX)) {
String dbName = getMysqlDBName(url);
query = String.format("SELECT COUNT(*) FROM INFORMATION_SCHEMA.TABLES "
+ "WHERE TABLE_SCHEMA='%s' AND TABLE_NAME IN %s", dbName, tables);
} else if (url.startsWith(MetaStoreUtils.SQLITE_URL_PREFIX)) {
query = String.format("SELECT COUNT(*) FROM sqlite_master "
+ "WHERE TYPE='table' AND NAME IN %s", tables);
} else {
throw new MetaStoreException("The jdbc url is not valid for SSM use.");
}
int num = 0;
Statement s = conn.createStatement();
ResultSet rs = s.executeQuery(query);
if (rs.next()) {
num = rs.getInt(1);
}
return num;
} catch (Exception e) {
throw new MetaStoreException(e);
} finally {
closeConnection(conn);
}
}
public static void initializeDataBase(
Connection conn) throws MetaStoreException {
ArrayList<String> tableList = new ArrayList<>();
for (String table: TABLESET) {
tableList.add("DROP TABLE IF EXISTS " + table);
}
String deleteExistingTables[] = tableList.toArray(new String[tableList.size()]);
String password = StringUtil.toSHA512String(defaultPassword);
String createEmptyTables[] =
new String[] {
"CREATE TABLE access_count_table (\n"
+ " table_name varchar(255) PRIMARY KEY,\n"
+ " start_time bigint(20) NOT NULL,\n"
+ " end_time bigint(20) NOT NULL\n"
+ ") ;",
"CREATE TABLE blank_access_count_info (\n"
+ " fid bigint(20) NOT NULL,\n"
+ " count bigint(20) NOT NULL\n"
+ ");",
"CREATE TABLE cached_file (\n"
+ " fid bigint(20) NOT NULL,\n"
+ " path varchar(1000) NOT NULL,\n"
+ " from_time bigint(20) NOT NULL,\n"
+ " last_access_time bigint(20) NOT NULL,\n"
+ " accessed_num int(11) NOT NULL\n"
+ ");",
"CREATE INDEX cached_file_fid_idx ON cached_file (fid);",
"CREATE INDEX cached_file_path_idx ON cached_file (path);",
"CREATE TABLE ec_policy (\n"
+ " id tinyint(1) NOT NULL PRIMARY KEY,\n"
+ " policy_name varchar(255) NOT NULL\n"
+ ");",
"CREATE TABLE file (\n"
+ " path varchar(1000) NOT NULL,\n"
+ " fid bigint(20) NOT NULL,\n"
+ " length bigint(20) DEFAULT NULL,\n"
+ " block_replication smallint(6) DEFAULT NULL,\n"
+ " block_size bigint(20) DEFAULT NULL,\n"
+ " modification_time bigint(20) DEFAULT NULL,\n"
+ " access_time bigint(20) DEFAULT NULL,\n"
+ " is_dir tinyint(1) DEFAULT NULL,\n"
+ " sid tinyint(4) DEFAULT NULL,\n"
+ " owner varchar(255) DEFAULT NULL,\n"
+ " owner_group varchar(255) DEFAULT NULL,\n"
+ " permission smallint(6) DEFAULT NULL,\n"
+ " ec_policy_id tinyint(1) DEFAULT NULL\n"
+ ");",
"CREATE INDEX file_fid_idx ON file (fid);",
"CREATE INDEX file_path_idx ON file (path);",
"CREATE TABLE storage (\n"
+ " type varchar(32) PRIMARY KEY,\n"
+ " time_stamp bigint(20) DEFAULT NULL,\n"
+ " capacity bigint(20) NOT NULL,\n"
+ " free bigint(20) NOT NULL\n"
+ ");",
"CREATE TABLE storage_hist (\n" // Keep this compatible with Table 'storage'
+ " type varchar(64),\n"
+ " time_stamp bigint(20) DEFAULT NULL,\n"
+ " capacity bigint(20) NOT NULL,\n"
+ " free bigint(20) NOT NULL\n"
+ ");",
"CREATE INDEX type_idx ON storage_hist (type);",
"CREATE INDEX time_stamp_idx ON storage_hist (time_stamp);",
"CREATE TABLE storage_policy (\n"
+ " sid tinyint(4) PRIMARY KEY,\n"
+ " policy_name varchar(64) DEFAULT NULL\n"
+ ");",
"INSERT INTO storage_policy VALUES ('0', 'UNDEF');",
"INSERT INTO storage_policy VALUES ('2', 'COLD');",
"INSERT INTO storage_policy VALUES ('5', 'WARM');",
"INSERT INTO storage_policy VALUES ('7', 'HOT');",
"INSERT INTO storage_policy VALUES ('10', 'ONE_SSD');",
"INSERT INTO storage_policy VALUES ('12', 'ALL_SSD');",
"INSERT INTO storage_policy VALUES ('15', 'LAZY_PERSIST');",
"CREATE TABLE xattr (\n"
+ " fid bigint(20) NOT NULL,\n"
+ " namespace varchar(255) NOT NULL,\n"
+ " name varchar(255) NOT NULL,\n"
+ " value blob NOT NULL\n"
+ ");",
"CREATE INDEX xattr_fid_idx ON xattr (fid);",
"CREATE TABLE datanode_info (\n"
+ " uuid varchar(64) PRIMARY KEY,\n"
+ " hostname varchar(255) NOT NULL,\n"
+ // DatanodeInfo
" rpcAddress varchar(21) DEFAULT NULL,\n"
+ " cache_capacity bigint(20) DEFAULT NULL,\n"
+ " cache_used bigint(20) DEFAULT NULL,\n"
+ " location varchar(255) DEFAULT NULL\n"
+ ");",
"CREATE TABLE datanode_storage_info (\n"
+ " uuid varchar(64) NOT NULL,\n"
+ " sid tinyint(4) NOT NULL,\n"
+ // storage type
" state tinyint(4) NOT NULL,\n"
+ // DatanodeStorage.state
" storage_id varchar(64) NOT NULL,\n"
+ // StorageReport ...
" failed tinyint(1) DEFAULT NULL,\n"
+ " capacity bigint(20) DEFAULT NULL,\n"
+ " dfs_used bigint(20) DEFAULT NULL,\n"
+ " remaining bigint(20) DEFAULT NULL,\n"
+ " block_pool_used bigint(20) DEFAULT NULL\n"
+ ");",
"CREATE TABLE rule (\n"
+ " id INTEGER PRIMARY KEY AUTOINCREMENT,\n"
+ " name varchar(255) DEFAULT NULL,\n"
+ " state tinyint(4) NOT NULL,\n"
+ " rule_text varchar(4096) NOT NULL,\n"
+ " submit_time bigint(20) NOT NULL,\n"
+ " last_check_time bigint(20) DEFAULT NULL,\n"
+ " checked_count int(11) NOT NULL,\n"
+ " generated_cmdlets int(11) NOT NULL\n"
+ ");",
"CREATE TABLE cmdlet (\n"
+ " cid INTEGER PRIMARY KEY,\n"
+ " rid INTEGER NOT NULL,\n"
+ " aids varchar(4096) NOT NULL,\n"
+ " state tinyint(4) NOT NULL,\n"
+ " parameters varchar(4096) NOT NULL,\n"
+ " generate_time bigint(20) NOT NULL,\n"
+ " state_changed_time bigint(20) NOT NULL\n"
+ ");",
"CREATE TABLE action (\n"
+ " aid INTEGER PRIMARY KEY,\n"
+ " cid INTEGER NOT NULL,\n"
+ " action_name varchar(4096) NOT NULL,\n"
+ " args text NOT NULL,\n"
+ " result mediumtext NOT NULL,\n"
+ " log longtext NOT NULL,\n"
+ " successful tinyint(4) NOT NULL,\n"
+ " create_time bigint(20) NOT NULL,\n"
+ " finished tinyint(4) NOT NULL,\n"
+ " finish_time bigint(20) NOT NULL,\n"
+ " exec_host varchar(255),\n"
+ " progress float NOT NULL\n"
+ ");",
"CREATE TABLE file_diff (\n"
+ " did INTEGER PRIMARY KEY AUTOINCREMENT,\n"
+ " rid INTEGER NOT NULL,\n"
+ " diff_type varchar(4096) NOT NULL,\n"
+ " src varchar(1000) NOT NULL,\n"
+ " parameters varchar(4096) NOT NULL,\n"
+ " state tinyint(4) NOT NULL,\n"
+ " create_time bigint(20) NOT NULL\n"
+ ");",
"CREATE INDEX file_diff_idx ON file_diff (src);",
"CREATE TABLE global_config (\n"
+ " cid INTEGER PRIMARY KEY AUTOINCREMENT,\n"
+ " property_name varchar(512) NOT NULL UNIQUE,\n"
+ " property_value varchar(3072) NOT NULL\n"
+ ");",
"CREATE TABLE cluster_config (\n"
+ " cid INTEGER PRIMARY KEY AUTOINCREMENT,\n"
+ " node_name varchar(512) NOT NULL UNIQUE,\n"
+ " config_path varchar(3072) NOT NULL\n"
+ ");",
"CREATE TABLE sys_info (\n"
+ " property varchar(512) PRIMARY KEY,\n"
+ " value varchar(4096) NOT NULL\n"
+ ");",
"CREATE TABLE user_info (\n"
+ " user_name varchar(20) PRIMARY KEY,\n"
+ " user_password varchar(256) NOT NULL\n"
+ ");",
"INSERT INTO user_info VALUES('admin','" + password + "');",
"CREATE TABLE cluster_info (\n"
+ " cid INTEGER PRIMARY KEY AUTOINCREMENT,\n"
+ " name varchar(512) NOT NULL UNIQUE,\n"
+ " url varchar(4096) NOT NULL,\n"
+ " conf_path varchar(4096) NOT NULL,\n"
+ " state varchar(64) NOT NULL,\n"
+ // ClusterState
" type varchar(64) NOT NULL\n"
+ // ClusterType
");",
"CREATE TABLE backup_file (\n"
+ " rid bigint(20) NOT NULL,\n"
+ " src varchar(4096) NOT NULL,\n"
+ " dest varchar(4096) NOT NULL,\n"
+ " period bigint(20) NOT NULL\n"
+ ");",
"CREATE INDEX backup_file_rid_idx ON backup_file (rid);",
"CREATE TABLE file_state (\n"
+ " path varchar(512) PRIMARY KEY,\n"
+ " type tinyint(4) NOT NULL,\n"
+ " stage tinyint(4) NOT NULL\n"
+ ");",
"CREATE TABLE compression_file (\n"
+ " path varchar(512) PRIMARY KEY,\n"
+ " buffer_size int(11) NOT NULL,\n"
+ " compression_impl varchar(64) NOT NULL,\n"
+ " original_length bigint(20) NOT NULL,\n"
+ " compressed_length bigint(20) NOT NULL,\n"
+ " originalPos text NOT NULL,\n"
+ " compressedPos text NOT NULL\n"
+ ");",
"CREATE TABLE small_file (\n"
+ "path varchar(1000) NOT NULL PRIMARY KEY,\n"
+ "container_file_path varchar(4096) NOT NULL,\n"
+ "offset bigint(20) NOT NULL,\n"
+ "length bigint(20) NOT NULL\n"
+ ");",
"CREATE TABLE whitelist (\n"
+ "last_fetched_dirs varchar(4096) NOT NULL\n"
+ ");",
"INSERT INTO whitelist VALUES( '' );"
};
try {
for (String s : deleteExistingTables) {
// Drop table if exists
LOG.debug(s);
executeSql(conn, s);
}
// Handle mysql related features
String url = conn.getMetaData().getURL();
boolean mysql = url.startsWith(MetaStoreUtils.MYSQL_URL_PREFIX);
boolean mysqlOldRelease = false;
if (mysql) {
// Mysql version number
double mysqlVersion =
conn.getMetaData().getDatabaseMajorVersion()
+ conn.getMetaData().getDatabaseMinorVersion() * 0.1;
LOG.debug("Mysql Version Number {}", mysqlVersion);
if (mysqlVersion < 5.5) {
LOG.error("Required Mysql version >= 5.5, but current is " + mysqlVersion);
throw new MetaStoreException("Mysql version " + mysqlVersion + " is below requirement!");
} else if (mysqlVersion < 5.7 && mysqlVersion >= 5.5) {
mysqlOldRelease = true;
}
}
if (mysqlOldRelease) {
// Enable dynamic file format to avoid index length limit 767
executeSql(conn, "SET GLOBAL innodb_file_format=barracuda;");
executeSql(conn, "SET GLOBAL innodb_file_per_table=true;");
executeSql(conn, "SET GLOBAL innodb_large_prefix = ON;");
}
for (String s : createEmptyTables) {
// Solve mysql and sqlite sql difference
s = sqlCompatibility(mysql, mysqlOldRelease, s);
LOG.debug(s);
executeSql(conn, s);
}
} catch (Exception e) {
throw new MetaStoreException(e);
}
}
/**
* * Solve SQL compatibility problem caused by mysql and sqlite. * Note that mysql 5.6 or earlier
* cannot support index length larger than 767. * Meanwhile, sqlite's keywords are a little
* different from mysql.
*
* @param mysql boolean
* @param mysqlOldRelease boolean mysql version is earlier than 5.6
* @param sql String sql
* @return converted sql
*/
private static String sqlCompatibility(boolean mysql, boolean mysqlOldRelease, String sql) {
if (mysql) {
// path/src index should be set to less than 767
// to avoid "Specified key was too long" in
// Mysql 5.6 or previous version
if (mysqlOldRelease) {
// Fix index size 767 in mysql 5.6 or previous version
int maxLong = 767 / characterTakeUpBytes;
if (sql.startsWith("CREATE INDEX")
&& (sql.contains("path") || sql.contains("src"))) {
// Index longer than maxLong
sql = sql.replace(");", "(" + maxLong + "));");
} else if (sql.contains("PRIMARY KEY") || sql.contains("UNIQUE")) {
// Primary key longer than maxLong
Pattern p = Pattern.compile("(\\d{3,})(.{2,15})(PRIMARY|UNIQUE)");
Matcher m = p.matcher(sql);
if (m.find()) {
if (Integer.valueOf(m.group(1)) > maxLong) {
// Make this table dynamic
sql = sql.replace(");", ") ROW_FORMAT=DYNAMIC ENGINE=INNODB;");
LOG.debug(sql);
}
}
}
}
// Replace AUTOINCREMENT with AUTO_INCREMENT
if (sql.contains("AUTOINCREMENT")) {
sql = sql.replace("AUTOINCREMENT", "AUTO_INCREMENT");
}
}
return sql;
}
public static void executeSql(Connection conn, String sql)
throws MetaStoreException {
try {
Statement s = conn.createStatement();
s.execute(sql);
} catch (Exception e) {
LOG.error("SQL execution error " + sql);
throw new MetaStoreException(e);
}
}
public static boolean supportsBatchUpdates(Connection conn) {
try {
return conn.getMetaData().supportsBatchUpdates();
} catch (Exception e) {
return false;
}
}
public static void formatDatabase(SmartConf conf) throws MetaStoreException {
getDBAdapter(conf).formatDataBase();
}
public static void checkTables(SmartConf conf) throws MetaStoreException {
getDBAdapter(conf).checkTables();
}
public static String getMysqlDBName(String url) throws SQLException {
NonRegisteringDriver nonRegisteringDriver = new NonRegisteringDriver();
Properties properties = nonRegisteringDriver.parseURL(url, null);
return properties.getProperty(nonRegisteringDriver.DBNAME_PROPERTY_KEY);
}
public static MetaStore getDBAdapter(
SmartConf conf) throws MetaStoreException {
URL pathUrl = ClassLoader.getSystemResource("");
String path = pathUrl.getPath();
characterTakeUpBytes = conf.getInt(
SmartConfKeys.SMART_METASTORE_CHARACTER_TAKEUP_BYTES_KEY,
SmartConfKeys.SMART_METASTORE_CHARACTER_TAKEUP_BYTES_DEFAULT);
String fileName = "druid.xml";
String expectedCpPath = path + fileName;
LOG.info("Expected DB connection pool configuration path = "
+ expectedCpPath);
File cpConfigFile = new File(expectedCpPath);
if (cpConfigFile.exists()) {
LOG.info("Using pool configure file: " + expectedCpPath);
Properties p = new Properties();
try {
p.loadFromXML(new FileInputStream(cpConfigFile));
String url = conf.get(SmartConfKeys.SMART_METASTORE_DB_URL_KEY);
if (url != null) {
p.setProperty("url", url);
}
String purl = p.getProperty("url");
if (purl == null || purl.length() == 0) {
purl = getDefaultSqliteDB(); // For testing
p.setProperty("url", purl);
LOG.warn("Database URL not specified, using " + purl);
}
if (purl.startsWith(MetaStoreUtils.MYSQL_URL_PREFIX)) {
String dbName = getMysqlDBName(purl);
for (String name : DB_NAME_NOT_ALLOWED) {
if (dbName.equals(name)) {
throw new MetaStoreException(
String.format(
"The database %s in mysql is for DB system use, "
+ "please appoint other database in druid.xml.",
name));
}
}
}
try {
String pw = conf
.getPasswordFromHadoop(SmartConfKeys.SMART_METASTORE_PASSWORD);
if (pw != null && pw != "") {
p.setProperty("password", pw);
}
} catch (IOException e) {
LOG.info("Can not get metastore password from hadoop provision credentials,"
+ " use the one configured in druid.xml .");
}
for (String key : p.stringPropertyNames()) {
if (key.equals("password")) {
LOG.info("\t" + key + " = **********");
} else {
LOG.info("\t" + key + " = " + p.getProperty(key));
}
}
return new MetaStore(new DruidPool(p));
} catch (Exception e) {
if (e instanceof InvalidPropertiesFormatException) {
throw new MetaStoreException(
"Malformat druid.xml, please check the file.", e);
} else {
throw new MetaStoreException(e);
}
}
} else {
LOG.info("DB connection pool config file " + expectedCpPath
+ " NOT found.");
}
// Get Default configure from druid-template.xml
fileName = "druid-template.xml";
expectedCpPath = path + fileName;
LOG.info("Expected DB connection pool configuration path = "
+ expectedCpPath);
cpConfigFile = new File(expectedCpPath);
LOG.info("Using pool configure file: " + expectedCpPath);
Properties p = new Properties();
try {
p.loadFromXML(new FileInputStream(cpConfigFile));
} catch (Exception e) {
throw new MetaStoreException(e);
}
String url = conf.get(SmartConfKeys.SMART_METASTORE_DB_URL_KEY);
if (url != null) {
p.setProperty("url", url);
}
for (String key : p.stringPropertyNames()) {
LOG.info("\t" + key + " = " + p.getProperty(key));
}
return new MetaStore(new DruidPool(p));
}
public static Integer getKey(Map<Integer, String> map, String value) {
for (Integer key : map.keySet()) {
if (map.get(key).equals(value)) {
return key;
}
}
return null;
}
/**
* Retrieve table column names.
*
* @param conn
* @param tableName
* @return
* @throws MetaStoreException
*/
public static List<String> getTableColumns(Connection conn, String tableName)
throws MetaStoreException {
List<String> ret = new ArrayList<>();
try {
ResultSet res = conn.getMetaData().getColumns(null, null, tableName, null);
while (res.next()) {
ret.add(res.getString("COLUMN_NAME"));
}
return ret;
} catch (Exception e) {
throw new MetaStoreException(e);
}
}
/**
* This default behavior provided here is mainly for convenience.
*
* @return
*/
private static String getDefaultSqliteDB() throws MetaStoreException {
String absFilePath = System.getProperty("user.home")
+ "/smart-test-default.db";
File file = new File(absFilePath);
if (file.exists()) {
return MetaStoreUtils.SQLITE_URL_PREFIX + absFilePath;
}
try {
Connection conn = MetaStoreUtils.createSqliteConnection(absFilePath);
MetaStoreUtils.initializeDataBase(conn);
conn.close();
} catch (Exception e) {
throw new MetaStoreException(e);
}
return MetaStoreUtils.SQLITE_URL_PREFIX + absFilePath;
}
public static void dropAllTablesSqlite(
Connection conn) throws MetaStoreException {
try {
Statement s = conn.createStatement();
ResultSet rs = s.executeQuery("SELECT tbl_name FROM sqlite_master;");
List<String> list = new ArrayList<>();
while (rs.next()) {
list.add(rs.getString(1));
}
for (String tb : list) {
if (!"sqlite_sequence".equals(tb)) {
s.execute("DROP TABLE IF EXISTS '" + tb + "';");
}
}
} catch (Exception e) {
throw new MetaStoreException(e);
}
}
public static void dropAllTablesMysql(Connection conn,
String url) throws MetaStoreException {
try {
Statement stat = conn.createStatement();
String dbName = getMysqlDBName(url);
LOG.info("Drop All tables of Current DBname: " + dbName);
ResultSet rs = stat.executeQuery("SELECT TABLE_NAME FROM "
+ "INFORMATION_SCHEMA.TABLES WHERE TABLE_SCHEMA = '" + dbName + "';");
List<String> tbList = new ArrayList<>();
while (rs.next()) {
tbList.add(rs.getString(1));
}
for (String tb : tbList) {
LOG.info(tb);
stat.execute("DROP TABLE IF EXISTS " + tb + ";");
}
} catch (Exception e) {
throw new MetaStoreException(e);
}
}
}
| java | Apache-2.0 | e0c90f054687a18c4e095547ac5e31b8b313b3ef | 2026-01-05T02:41:11.405497Z | false |
Intel-bigdata/SSM | https://github.com/Intel-bigdata/SSM/blob/e0c90f054687a18c4e095547ac5e31b8b313b3ef/smart-metastore/src/main/java/org/smartdata/metastore/utils/Constants.java | smart-metastore/src/main/java/org/smartdata/metastore/utils/Constants.java | /**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.smartdata.metastore.utils;
public class Constants {
public static final long ONE_SECOND_IN_MILLIS = 1000L;
public static final long ONE_MINUTE_IN_MILLIS = 60 * ONE_SECOND_IN_MILLIS;
public static final long ONE_HOUR_IN_MILLIS = 60 * ONE_MINUTE_IN_MILLIS;
public static final long ONE_DAY_IN_MILLIS = 24 * ONE_HOUR_IN_MILLIS;
}
| java | Apache-2.0 | e0c90f054687a18c4e095547ac5e31b8b313b3ef | 2026-01-05T02:41:11.405497Z | false |
Intel-bigdata/SSM | https://github.com/Intel-bigdata/SSM/blob/e0c90f054687a18c4e095547ac5e31b8b313b3ef/smart-action/src/test/java/org/smartdata/action/MockActionStatusReporter.java | smart-action/src/test/java/org/smartdata/action/MockActionStatusReporter.java | /**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.smartdata.action;
import org.junit.Assert;
import org.smartdata.protocol.message.ActionStatus;
import org.smartdata.protocol.message.StatusMessage;
import org.smartdata.protocol.message.StatusReport;
import org.smartdata.protocol.message.StatusReporter;
public class MockActionStatusReporter implements StatusReporter {
@Override
public void report(StatusMessage status) {
if (status instanceof StatusReport) {
StatusReport statusReport = (StatusReport) status;
for (ActionStatus actionStatus: statusReport.getActionStatuses()) {
if (actionStatus.isFinished()) {
Assert.assertNull(actionStatus.getThrowable());
}
}
}
}
}
| java | Apache-2.0 | e0c90f054687a18c4e095547ac5e31b8b313b3ef | 2026-01-05T02:41:11.405497Z | false |
Intel-bigdata/SSM | https://github.com/Intel-bigdata/SSM/blob/e0c90f054687a18c4e095547ac5e31b8b313b3ef/smart-action/src/main/java/org/smartdata/action/SmartAction.java | smart-action/src/main/java/org/smartdata/action/SmartAction.java | /**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.smartdata.action;
import com.google.common.annotations.VisibleForTesting;
import org.apache.commons.io.output.ByteArrayOutputStream;
import org.apache.commons.lang.exception.ExceptionUtils;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.smartdata.SmartContext;
import org.smartdata.protocol.message.ActionStatus;
import java.io.PrintStream;
import java.io.UnsupportedEncodingException;
import java.util.Map;
/**
* Smart action, the base class. All actions should inherit this. All actions
* should be able to run in a cmdlet line or web console. User defined actions
* are also meant to extend this.
*/
public abstract class SmartAction {
static final Logger LOG = LoggerFactory.getLogger(SmartAction.class);
private long cmdletId;
private boolean lastAction;
private long actionId;
private Map<String, String> actionArgs;
private SmartContext context;
private ByteArrayOutputStream resultOs;
private PrintStream psResultOs;
private ByteArrayOutputStream logOs;
private PrintStream psLogOs;
private volatile boolean successful;
protected String name;
private long startTime;
private long finishTime;
private Throwable throwable;
private boolean finished;
public SmartAction() {
this.successful = false;
//Todo: extract the print stream out of this class
this.resultOs = new ByteArrayOutputStream(64 * 1024);
this.psResultOs = new PrintStream(resultOs, false);
this.logOs = new ByteArrayOutputStream(64 * 1024);
this.psLogOs = new PrintStream(logOs, false);
}
public String getName() {
return name;
}
public void setName(String name) {
this.name = name;
}
public long getCmdletId() {
return cmdletId;
}
public void setCmdletId(long cmdletId) {
this.cmdletId = cmdletId;
}
public boolean isLastAction() {
return lastAction;
}
public void setLastAction(boolean lastAction) {
this.lastAction = lastAction;
}
public SmartContext getContext() {
return context;
}
public void setContext(SmartContext context) {
this.context = context;
}
/**
* Used to initialize the action.
*
* @param args Action specific
*/
public void init(Map<String, String> args) {
this.actionArgs = args;
}
/**
* Get action arguments.
*
* @return
*/
public Map<String, String> getArguments() {
return actionArgs;
}
public void setArguments(Map<String, String> args) {
actionArgs = args;
}
public long getActionId() {
return actionId;
}
public void setActionId(long actionId) {
this.actionId = actionId;
}
protected abstract void execute() throws Exception;
public final void run() {
try {
setStartTime();
execute();
successful = true;
} catch (Throwable t) {
LOG.error("SmartAction execute error ", t);
setThrowable(t);
appendLog(ExceptionUtils.getFullStackTrace(t));
} finally {
setFinishTime();
finished = true;
stop();
}
}
private void setStartTime() {
this.startTime = System.currentTimeMillis();
}
private void setThrowable(Throwable t) {
this.throwable = t;
}
private void setFinishTime() {
this.finishTime = System.currentTimeMillis();
}
// The result will be shown in each action's summary page.
protected void appendResult(String result) {
psResultOs.println(result);
}
// The log will be shown in action's submission section and summary page.
protected void appendLog(String log) {
psLogOs.println(log);
}
public PrintStream getResultOs() {
return psResultOs;
}
public PrintStream getLogOs() {
return psLogOs;
}
public float getProgress() {
if (successful) {
return 1.0F;
}
return 0.0F;
}
public ActionStatus getActionStatus() throws UnsupportedEncodingException {
return new ActionStatus(
cmdletId,
lastAction,
actionId,
getProgress(),
resultOs.toString("UTF-8"),
logOs.toString("UTF-8"),
startTime,
finishTime,
throwable,
finished);
}
private void stop() {
psLogOs.close();
psResultOs.close();
}
public boolean isSuccessful() {
return successful;
}
public boolean isFinished() {
return finished;
}
@VisibleForTesting
public boolean getExpectedAfterRun() throws UnsupportedEncodingException {
ActionStatus actionStatus = getActionStatus();
return actionStatus.isFinished() && actionStatus.getThrowable() == null;
}
}
| java | Apache-2.0 | e0c90f054687a18c4e095547ac5e31b8b313b3ef | 2026-01-05T02:41:11.405497Z | false |
Intel-bigdata/SSM | https://github.com/Intel-bigdata/SSM/blob/e0c90f054687a18c4e095547ac5e31b8b313b3ef/smart-action/src/main/java/org/smartdata/action/SleepAction.java | smart-action/src/main/java/org/smartdata/action/SleepAction.java | /**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.smartdata.action;
import org.smartdata.action.annotation.ActionSignature;
@ActionSignature(
actionId = "sleep",
displayName = "sleep",
usage = SleepAction.TIME_IN_MS + " $timeToSleepInMs"
)
public class SleepAction extends SmartAction {
public static final String TIME_IN_MS = "-ms";
private boolean started = false;
private long toSleep;
private long startTm;
@Override
protected void execute() throws Exception {
if (!getArguments().containsKey(TIME_IN_MS)) {
throw new IllegalArgumentException("Time to sleep not specified (through option '"
+ TIME_IN_MS + "').");
}
toSleep = Long.valueOf(getArguments().get(TIME_IN_MS));
if (toSleep == 0) {
return;
}
startTm = System.currentTimeMillis();
started = true;
Thread.sleep(toSleep);
}
@Override
public float getProgress() {
if (!started) {
return 0;
}
if (isSuccessful()) {
return 1.0f;
}
return (System.currentTimeMillis() - startTm) * 1.0f / toSleep;
}
}
| java | Apache-2.0 | e0c90f054687a18c4e095547ac5e31b8b313b3ef | 2026-01-05T02:41:11.405497Z | false |
Intel-bigdata/SSM | https://github.com/Intel-bigdata/SSM/blob/e0c90f054687a18c4e095547ac5e31b8b313b3ef/smart-action/src/main/java/org/smartdata/action/ActionFactory.java | smart-action/src/main/java/org/smartdata/action/ActionFactory.java | /**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.smartdata.action;
import java.util.Map;
/**
* Action factory interface. Either built-in or user defined actions will be
* provided via an action factory.
*/
public interface ActionFactory {
/**
* Get all the smart actions supported and provided by this factory.
* @return supported actions
*/
Map<String, Class<? extends SmartAction>> getSupportedActions();
}
| java | Apache-2.0 | e0c90f054687a18c4e095547ac5e31b8b313b3ef | 2026-01-05T02:41:11.405497Z | false |
Intel-bigdata/SSM | https://github.com/Intel-bigdata/SSM/blob/e0c90f054687a18c4e095547ac5e31b8b313b3ef/smart-action/src/main/java/org/smartdata/action/SyncAction.java | smart-action/src/main/java/org/smartdata/action/SyncAction.java | /**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.smartdata.action;
import org.smartdata.action.annotation.ActionSignature;
/**
* Sync action is an abstract action for backup and copy.
* Users can submit a sync action with detailed src path and
* dest path, e.g., "sync -src /test/1 -dest hdfs:/remoteIP:port/test/1"
*/
@ActionSignature(
actionId = "sync",
displayName = "sync",
usage = SyncAction.SRC + " $src" + SyncAction.DEST + " $dest"
)
public class SyncAction extends SmartAction {
// related to fileDiff.src
public static final String SRC = "-src";
// related to remote cluster and fileDiff.src
public static final String DEST = "-dest";
@Override
protected void execute() throws Exception {
}
}
| java | Apache-2.0 | e0c90f054687a18c4e095547ac5e31b8b313b3ef | 2026-01-05T02:41:11.405497Z | false |
Intel-bigdata/SSM | https://github.com/Intel-bigdata/SSM/blob/e0c90f054687a18c4e095547ac5e31b8b313b3ef/smart-action/src/main/java/org/smartdata/action/AbstractActionFactory.java | smart-action/src/main/java/org/smartdata/action/AbstractActionFactory.java | /**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.smartdata.action;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.smartdata.action.annotation.ActionSignature;
import java.util.Collections;
import java.util.HashMap;
import java.util.Map;
/**
* A common action factory for action providers to use.
*/
public abstract class AbstractActionFactory implements ActionFactory {
static final Logger LOG = LoggerFactory.getLogger(AbstractActionFactory.class);
private static Map<String, Class<? extends SmartAction>> supportedActions = new HashMap<>();
static {
addAction(EchoAction.class);
addAction(SleepAction.class);
addAction(SyncAction.class);
addAction(ExecAction.class);
}
protected static void addAction(Class<? extends SmartAction> actionClass) {
ActionSignature actionSignature = actionClass.getAnnotation(ActionSignature.class);
if (actionSignature != null) {
String actionId = actionSignature.actionId();
if (!supportedActions.containsKey(actionId)) {
supportedActions.put(actionId, actionClass);
} else {
LOG.error("There is already an Action registered with id {}.", actionId);
}
} else {
LOG.error("Action {} does not has an ActionSignature.", actionClass.getName());
}
}
@Override
public Map<String, Class<? extends SmartAction>> getSupportedActions() {
return Collections.unmodifiableMap(supportedActions);
}
}
| java | Apache-2.0 | e0c90f054687a18c4e095547ac5e31b8b313b3ef | 2026-01-05T02:41:11.405497Z | false |
Intel-bigdata/SSM | https://github.com/Intel-bigdata/SSM/blob/e0c90f054687a18c4e095547ac5e31b8b313b3ef/smart-action/src/main/java/org/smartdata/action/ActionType.java | smart-action/src/main/java/org/smartdata/action/ActionType.java | /**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.smartdata.action;
/**
* Internal actions supported.
*/
public enum ActionType {
None(0), // doing nothing
External(1), // execute some cmdlet lines specified
CacheFile(2), // Move to cache
UncacheFile(3), // Move out of cache
SetStoragePolicy(4), // Set Policy Action
MoveFile(5), // Enforce storage Policy
ArchiveFile(6), // Enforce Archive Policy
ConvertToEC(7),
ConvertToReplica(8),
Distcp(9),
DiskBalance(10),
BalanceCluster(11);
private final int value;
ActionType(int value) {
this.value = value;
}
public int getValue() {
return value;
}
public static ActionType fromValue(int value) {
for (ActionType t : values()) {
if (t.getValue() == value) {
return t;
}
}
return null;
}
public static ActionType fromName(String name) {
for (ActionType t : values()) {
if (t.toString().equalsIgnoreCase(name)) {
return t;
}
}
return null;
}
}
| java | Apache-2.0 | e0c90f054687a18c4e095547ac5e31b8b313b3ef | 2026-01-05T02:41:11.405497Z | false |
Intel-bigdata/SSM | https://github.com/Intel-bigdata/SSM/blob/e0c90f054687a18c4e095547ac5e31b8b313b3ef/smart-action/src/main/java/org/smartdata/action/ActionRegistry.java | smart-action/src/main/java/org/smartdata/action/ActionRegistry.java | /**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.smartdata.action;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.smartdata.action.annotation.ActionSignature;
import org.smartdata.model.ActionDescriptor;
import java.io.IOException;
import java.util.ArrayList;
import java.util.Collections;
import java.util.List;
import java.util.Map;
import java.util.ServiceConfigurationError;
import java.util.ServiceLoader;
import java.util.Set;
import java.util.concurrent.ConcurrentHashMap;
/**
* Actions registry. Singleton.
*/
public class ActionRegistry {
static final Logger LOG = LoggerFactory.getLogger(ActionRegistry.class);
private static Map<String, Class<? extends SmartAction>> allActions = new ConcurrentHashMap<>();
static {
try {
ServiceLoader<ActionFactory> actionFactories = ServiceLoader.load(ActionFactory.class);
for (ActionFactory fact : actionFactories) {
allActions.putAll(fact.getSupportedActions());
}
} catch (ServiceConfigurationError e) {
LOG.error("Load actions failed from factory");
}
}
public static Set<String> registeredActions() {
return Collections.unmodifiableSet(allActions.keySet());
}
public static boolean registeredAction(String name) {
return allActions.containsKey(name);
}
public static List<ActionDescriptor> supportedActions() throws IOException {
ArrayList<ActionDescriptor> actionDescriptors = new ArrayList<>();
for (Class<? extends SmartAction> clazz : allActions.values()) {
ActionSignature signature = clazz.getAnnotation(ActionSignature.class);
if (signature != null) {
actionDescriptors.add(fromSignature(signature));
}
}
return actionDescriptors;
}
public static SmartAction createAction(String name) throws ActionException {
if (!registeredAction(name)) {
throw new ActionException("Unregistered action " + name);
}
try {
SmartAction smartAction = allActions.get(name).newInstance();
smartAction.setName(name);
return smartAction;
} catch (Exception e) {
LOG.error("Create {} action failed", name, e);
throw new ActionException(e);
}
}
private static ActionDescriptor fromSignature(ActionSignature signature) {
return new ActionDescriptor(
signature.actionId(), signature.displayName(), signature.usage(), signature.description());
}
}
| java | Apache-2.0 | e0c90f054687a18c4e095547ac5e31b8b313b3ef | 2026-01-05T02:41:11.405497Z | false |
Intel-bigdata/SSM | https://github.com/Intel-bigdata/SSM/blob/e0c90f054687a18c4e095547ac5e31b8b313b3ef/smart-action/src/main/java/org/smartdata/action/Utils.java | smart-action/src/main/java/org/smartdata/action/Utils.java | /**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.smartdata.action;
import java.util.Date;
public class Utils {
public static String getFormatedCurrentTime() {
return (new Date(System.currentTimeMillis())).toString();
}
}
| java | Apache-2.0 | e0c90f054687a18c4e095547ac5e31b8b313b3ef | 2026-01-05T02:41:11.405497Z | false |
Intel-bigdata/SSM | https://github.com/Intel-bigdata/SSM/blob/e0c90f054687a18c4e095547ac5e31b8b313b3ef/smart-action/src/main/java/org/smartdata/action/ActionException.java | smart-action/src/main/java/org/smartdata/action/ActionException.java | /**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.smartdata.action;
public class ActionException extends Exception {
public ActionException(String errorMsg) {
super(errorMsg);
}
public ActionException(String errorMsg, Throwable throwable) {
super(errorMsg, throwable);
}
public ActionException(Throwable throwable) {
super(throwable);
}
}
| java | Apache-2.0 | e0c90f054687a18c4e095547ac5e31b8b313b3ef | 2026-01-05T02:41:11.405497Z | false |
Intel-bigdata/SSM | https://github.com/Intel-bigdata/SSM/blob/e0c90f054687a18c4e095547ac5e31b8b313b3ef/smart-action/src/main/java/org/smartdata/action/ExecAction.java | smart-action/src/main/java/org/smartdata/action/ExecAction.java | /**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.smartdata.action;
import org.smartdata.action.annotation.ActionSignature;
import org.smartdata.utils.StringUtil;
import java.io.BufferedReader;
import java.io.File;
import java.io.IOException;
import java.io.InputStreamReader;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
/**
* An action to execute general command.
*
*/
// TODO: Add security restrictions
@ActionSignature(
actionId = "exec",
displayName = "exec",
usage = ExecAction.CMD + " $cmdString"
+ " [" + ExecAction.EXECDIR + " $executionDirectory" + "]"
+ " [" + ExecAction.ENV + " $envKVs" + "]"
)
public class ExecAction extends SmartAction {
public static final String CMD = "-cmd";
public static final String EXECDIR = "-execdir";
public static final String ENV = "-env"; // multi-KVs separated with '|'
public static final String SSM_ENV_PREFIX = "SSMENV";
private Map<String, String> env = new HashMap<>();
private String cmdStr = "";
private String execDir = "";
@Override
public void init(Map<String, String> args) {
super.init(args);
String key;
String value;
for (String arg : args.keySet()) {
switch (arg) {
case CMD:
cmdStr = args.get(arg);
break;
case EXECDIR:
execDir = args.get(arg);
break;
case ENV:
value = args.get(ENV);
if (value == null || value.length() == 0) {
break;
}
env.putAll(parseEnvString(value));
break;
default:
key = SSM_ENV_PREFIX + (arg.startsWith("-") ? arg.replaceFirst("-", "_") : arg);
env.put(key, args.get(arg));
}
}
}
private Map<String, String> parseEnvString(String envStr) {
String[] items = envStr.split("\\|");
String key;
String temp;
Map<String, String> ret = new HashMap<>();
for (String it : items) {
int idx = it.indexOf("=");
if (idx != -1) {
key = it.substring(0, idx).trim();
if (it.length() == idx + 1) {
temp = "";
} else {
temp = it.substring(idx + 1, it.length());
temp = temp.replaceAll("\\s+$", "");
}
if (key.length() > 0) {
ret.put(key, temp);
}
}
}
return ret;
}
@Override
protected void execute() throws Exception {
List<String> cmdItems = StringUtil.parseCmdletString(cmdStr);
if (cmdItems.size() == 0) {
return;
}
ProcessBuilder builder = new ProcessBuilder(cmdItems);
if (execDir != null && execDir.length() > 0) {
builder.directory(new File(execDir));
}
Map<String, String> envVars = builder.environment();
envVars.putAll(env);
builder.redirectErrorStream(true);
Process p = builder.start();
BufferedReader stdout = new BufferedReader(new InputStreamReader(p.getInputStream()));
String line;
while ((line = stdout.readLine()) != null) {
appendLog(line);
}
int eCode = p.waitFor();
if (eCode != 0) {
throw new IOException("Exit code = " + eCode);
}
}
}
| java | Apache-2.0 | e0c90f054687a18c4e095547ac5e31b8b313b3ef | 2026-01-05T02:41:11.405497Z | false |
Intel-bigdata/SSM | https://github.com/Intel-bigdata/SSM/blob/e0c90f054687a18c4e095547ac5e31b8b313b3ef/smart-action/src/main/java/org/smartdata/action/EchoAction.java | smart-action/src/main/java/org/smartdata/action/EchoAction.java | /**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.smartdata.action;
import org.smartdata.action.annotation.ActionSignature;
@ActionSignature(
actionId = "echo",
displayName = "echo",
usage = EchoAction.PRINT_MESSAGE + " $message"
)
public class EchoAction extends SmartAction {
public static final String PRINT_MESSAGE = "-msg";
@Override
protected void execute() throws Exception {
appendLog(getArguments().get(PRINT_MESSAGE));
}
}
| java | Apache-2.0 | e0c90f054687a18c4e095547ac5e31b8b313b3ef | 2026-01-05T02:41:11.405497Z | false |
Intel-bigdata/SSM | https://github.com/Intel-bigdata/SSM/blob/e0c90f054687a18c4e095547ac5e31b8b313b3ef/smart-action/src/main/java/org/smartdata/action/annotation/ActionSignature.java | smart-action/src/main/java/org/smartdata/action/annotation/ActionSignature.java | /**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.smartdata.action.annotation;
import java.lang.annotation.ElementType;
import java.lang.annotation.Retention;
import java.lang.annotation.RetentionPolicy;
import java.lang.annotation.Target;
@Retention(RetentionPolicy.RUNTIME)
@Target(ElementType.TYPE)
public @interface ActionSignature {
String actionId();
String displayName();
String usage() default "";
String description() default "";
}
| java | Apache-2.0 | e0c90f054687a18c4e095547ac5e31b8b313b3ef | 2026-01-05T02:41:11.405497Z | false |
Intel-bigdata/SSM | https://github.com/Intel-bigdata/SSM/blob/e0c90f054687a18c4e095547ac5e31b8b313b3ef/smart-metrics/src/main/java/org/smartdata/metrics/FileAccessEventSource.java | smart-metrics/src/main/java/org/smartdata/metrics/FileAccessEventSource.java | /**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.smartdata.metrics;
/**
* This interface aims to collect file access event through different ways.
*/
public interface FileAccessEventSource {
/**
* Get a collector what will produce events from this file access event source.
*/
FileAccessEventCollector getCollector();
/**
* Insert events generated from the Smart client so that the collector can consume.
* The actual implementation of FileAccessEventSource doesn't have to support this.
* @param event The event that generated from Smart client
*/
void insertEventFromSmartClient(FileAccessEvent event);
/**
* Close the source, release resources if necessary.
*/
void close();
}
| java | Apache-2.0 | e0c90f054687a18c4e095547ac5e31b8b313b3ef | 2026-01-05T02:41:11.405497Z | false |
Intel-bigdata/SSM | https://github.com/Intel-bigdata/SSM/blob/e0c90f054687a18c4e095547ac5e31b8b313b3ef/smart-metrics/src/main/java/org/smartdata/metrics/FileAccessEvent.java | smart-metrics/src/main/java/org/smartdata/metrics/FileAccessEvent.java | /**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.smartdata.metrics;
/**
* A file access event.
*/
public class FileAccessEvent implements DataAccessEvent {
private final String path;
private final String user;
private long timeStamp;
public FileAccessEvent(String path) {
this(path, -1);
}
public FileAccessEvent(String path, long timestamp) {
this(path, timestamp, "");
}
public FileAccessEvent(String path, long timeStamp, String user) {
this.path = path;
this.timeStamp = timeStamp;
this.user = user;
}
public FileAccessEvent(String path, String user) {
this(path, -1, user);
}
/**
* Get the accessed file path.
* @return file path
*/
public String getPath() {
return this.path;
}
// DFSClient has no info about the file id, except have another rpc call
// to Namenode, SmartServer can get this value from Namespace, so not
// provide id info here.
public long getFileId() {
return 0;
}
@Override
public String getAccessedBy() {
return this.user;
}
@Override
public long getTimestamp() {
return this.timeStamp;
}
public void setTimeStamp(long timeStamp) {
this.timeStamp = timeStamp;
}
}
| java | Apache-2.0 | e0c90f054687a18c4e095547ac5e31b8b313b3ef | 2026-01-05T02:41:11.405497Z | false |
Intel-bigdata/SSM | https://github.com/Intel-bigdata/SSM/blob/e0c90f054687a18c4e095547ac5e31b8b313b3ef/smart-metrics/src/main/java/org/smartdata/metrics/FileAccessEventCollector.java | smart-metrics/src/main/java/org/smartdata/metrics/FileAccessEventCollector.java | /**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.smartdata.metrics;
import java.io.IOException;
import java.util.List;
/**
* An interface for file access event collecting.
*/
public interface FileAccessEventCollector {
/**
* Collect file access events occured since last calling of this method.
* @return access events
* @throws IOException
*/
List<FileAccessEvent> collect() throws IOException;
}
| java | Apache-2.0 | e0c90f054687a18c4e095547ac5e31b8b313b3ef | 2026-01-05T02:41:11.405497Z | false |
Intel-bigdata/SSM | https://github.com/Intel-bigdata/SSM/blob/e0c90f054687a18c4e095547ac5e31b8b313b3ef/smart-metrics/src/main/java/org/smartdata/metrics/DataAccessEvent.java | smart-metrics/src/main/java/org/smartdata/metrics/DataAccessEvent.java | /**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.smartdata.metrics;
/**
* A data access event. It means ONE access to a piece of data like a file (in a file system) or
* an object (in object store).
*/
public interface DataAccessEvent {
/**
* Get the user name who accessed the data.
* @return user name
*/
String getAccessedBy();
/**
* Get the access time.
* @return access time
*/
long getTimestamp();
}
| java | Apache-2.0 | e0c90f054687a18c4e095547ac5e31b8b313b3ef | 2026-01-05T02:41:11.405497Z | false |
Intel-bigdata/SSM | https://github.com/Intel-bigdata/SSM/blob/e0c90f054687a18c4e095547ac5e31b8b313b3ef/smart-metrics/src/main/java/org/smartdata/metrics/impl/SmartServerAccessEventCollector.java | smart-metrics/src/main/java/org/smartdata/metrics/impl/SmartServerAccessEventCollector.java | /**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.smartdata.metrics.impl;
import org.smartdata.metrics.FileAccessEvent;
import org.smartdata.metrics.FileAccessEventCollector;
import java.io.IOException;
import java.util.Arrays;
import java.util.List;
import java.util.concurrent.LinkedBlockingQueue;
/**
* Collect access events from users RPC call to Smart RPC Server.
*/
public class SmartServerAccessEventCollector implements FileAccessEventCollector {
private final LinkedBlockingQueue<FileAccessEvent> outerQueue;
public SmartServerAccessEventCollector(LinkedBlockingQueue<FileAccessEvent> queue) {
this.outerQueue = queue;
}
@Override
public List<FileAccessEvent> collect() throws IOException {
FileAccessEvent[] events = new FileAccessEvent[outerQueue.size()];
this.outerQueue.toArray(events);
this.outerQueue.clear();
return Arrays.asList(events);
}
}
| java | Apache-2.0 | e0c90f054687a18c4e095547ac5e31b8b313b3ef | 2026-01-05T02:41:11.405497Z | false |
Intel-bigdata/SSM | https://github.com/Intel-bigdata/SSM/blob/e0c90f054687a18c4e095547ac5e31b8b313b3ef/smart-metrics/src/main/java/org/smartdata/metrics/impl/MetricsFactory.java | smart-metrics/src/main/java/org/smartdata/metrics/impl/MetricsFactory.java | /**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.smartdata.metrics.impl;
import org.apache.hadoop.conf.Configuration;
import org.smartdata.metrics.FileAccessEventSource;
import java.io.IOException;
/**
* A factory used to create FileAccessEventSource according to the configuration.
*/
public class MetricsFactory {
private static final String ACCESS_EVENT_SOURCE = "smart.data.file.event.source";
private static final String DEFAULT_ACCESS_EVENT_SOURCE =
SmartServerAccessEventSource.class.getName();
public static FileAccessEventSource createAccessEventSource(Configuration conf)
throws IOException {
String source = conf.get(ACCESS_EVENT_SOURCE, DEFAULT_ACCESS_EVENT_SOURCE);
try {
Class clazz = Class.forName(source);
return (FileAccessEventSource) clazz.newInstance();
} catch (ClassNotFoundException | IllegalAccessException | InstantiationException e) {
e.printStackTrace();
throw new IOException(e);
}
}
}
| java | Apache-2.0 | e0c90f054687a18c4e095547ac5e31b8b313b3ef | 2026-01-05T02:41:11.405497Z | false |
Intel-bigdata/SSM | https://github.com/Intel-bigdata/SSM/blob/e0c90f054687a18c4e095547ac5e31b8b313b3ef/smart-metrics/src/main/java/org/smartdata/metrics/impl/SmartServerAccessEventSource.java | smart-metrics/src/main/java/org/smartdata/metrics/impl/SmartServerAccessEventSource.java | /**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.smartdata.metrics.impl;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.smartdata.metrics.FileAccessEvent;
import org.smartdata.metrics.FileAccessEventCollector;
import org.smartdata.metrics.FileAccessEventSource;
import java.util.Timer;
import java.util.TimerTask;
import java.util.concurrent.LinkedBlockingQueue;
/**
* The default AccessEventSource for SmartServer.
*/
public class SmartServerAccessEventSource implements FileAccessEventSource {
static final Logger LOG = LoggerFactory.getLogger(SmartServerAccessEventSource.class);
private static final long DEFAULT_INTERVAL = 1000; //5 seconds
private final SmartServerAccessEventCollector collector;
private LinkedBlockingQueue<FileAccessEvent> eventQueue;
private Timer timer;
public SmartServerAccessEventSource() {
this.timer = new Timer();
this.eventQueue = new LinkedBlockingQueue<>();
this.collector = new SmartServerAccessEventCollector(eventQueue);
this.timer.schedule(new ProgressInsertTask(eventQueue), DEFAULT_INTERVAL, DEFAULT_INTERVAL);
}
@Override
public FileAccessEventCollector getCollector() {
return this.collector;
}
@Override
public void insertEventFromSmartClient(FileAccessEvent event) {
try {
this.eventQueue.put(event);
LOG.trace("Access:" + event.getPath());
} catch (InterruptedException e) {
LOG.error("Event queue enqueue path={} error", event.getPath(), e);
}
}
@Override
public void close() {
this.timer.cancel();
}
private static class ProgressInsertTask extends TimerTask {
private final LinkedBlockingQueue<FileAccessEvent> outerQueue;
public ProgressInsertTask(LinkedBlockingQueue<FileAccessEvent> outerQueue) {
this.outerQueue = outerQueue;
}
@Override
public void run() {
try {
//Todo: do not use HDFSFileAccessEvent
this.outerQueue.put(new FileAccessEvent("", System.currentTimeMillis()));
} catch (InterruptedException e) {
LOG.error("Outer queue enqueue error", e);
}
}
}
}
| java | Apache-2.0 | e0c90f054687a18c4e095547ac5e31b8b313b3ef | 2026-01-05T02:41:11.405497Z | false |
Intel-bigdata/SSM | https://github.com/Intel-bigdata/SSM/blob/e0c90f054687a18c4e095547ac5e31b8b313b3ef/smart-integration/src/test/java/org/smartdata/integration/TestConfRestApi.java | smart-integration/src/test/java/org/smartdata/integration/TestConfRestApi.java | /**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.smartdata.integration;
import io.restassured.RestAssured;
import io.restassured.response.Response;
import org.hamcrest.Matchers;
import org.junit.Assert;
import org.junit.Test;
import org.smartdata.conf.SmartConfKeys;
import org.smartdata.integration.rest.RestApiBase;
import java.util.Map;
/**
* Test for ConfRestApi.
*/
public class TestConfRestApi extends IntegrationTestBase {
@Test
public void testConf() {
Response response = RestAssured.get(RestApiBase.CONFROOT);
response.then().body("status", Matchers.equalTo("OK"));
Map confMap = response.jsonPath().getMap("body");
Assert.assertEquals("true", confMap.get(SmartConfKeys.SMART_DFS_ENABLED));
Assert.assertTrue(((String) confMap.get(SmartConfKeys.SMART_DFS_NAMENODE_RPCSERVER_KEY))
.contains("localhost"));
}
}
| java | Apache-2.0 | e0c90f054687a18c4e095547ac5e31b8b313b3ef | 2026-01-05T02:41:11.405497Z | false |
Intel-bigdata/SSM | https://github.com/Intel-bigdata/SSM/blob/e0c90f054687a18c4e095547ac5e31b8b313b3ef/smart-integration/src/test/java/org/smartdata/integration/TestClusterRestApi.java | smart-integration/src/test/java/org/smartdata/integration/TestClusterRestApi.java | /**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.smartdata.integration;
import io.restassured.RestAssured;
import io.restassured.path.json.JsonPath;
import io.restassured.response.Response;
import org.hamcrest.Matchers;
import org.junit.Assert;
import org.junit.Test;
import org.smartdata.integration.rest.RestApiBase;
import static org.smartdata.integration.rest.ClusterRestApi.getFileInfo;
import static org.smartdata.integration.rest.CmdletRestApi.submitCmdlet;
import static org.smartdata.integration.rest.CmdletRestApi.waitCmdletComplete;
/**
* Test for ClusterRestApi.
*/
public class TestClusterRestApi extends IntegrationTestBase {
@Test
public void testPrimary() {
Response response = RestAssured.get(RestApiBase.PRIMCLUSTERROOT);
String json = response.asString();
response.then().body("message", Matchers.equalTo("Namenode URL"));
response.then().body("body", Matchers.containsString("localhost"));
}
@Test
public void testGetFileInfo() throws Exception {
long cid = submitCmdlet("write -file /hello -length 1011");
waitCmdletComplete(cid);
Thread.sleep(2000);
JsonPath jsonPath = getFileInfo("/hello");
Assert.assertEquals(jsonPath.getLong("length"), 1011);
}
}
| java | Apache-2.0 | e0c90f054687a18c4e095547ac5e31b8b313b3ef | 2026-01-05T02:41:11.405497Z | false |
Intel-bigdata/SSM | https://github.com/Intel-bigdata/SSM/blob/e0c90f054687a18c4e095547ac5e31b8b313b3ef/smart-integration/src/test/java/org/smartdata/integration/TestSmallFileRead.java | smart-integration/src/test/java/org/smartdata/integration/TestSmallFileRead.java | /**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.smartdata.integration;
import com.google.gson.Gson;
import org.apache.hadoop.fs.FSDataOutputStream;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hdfs.DFSInputStream;
import org.junit.After;
import org.junit.Assert;
import org.junit.Before;
import org.junit.Test;
import org.smartdata.hdfs.action.SmallFileCompactAction;
import org.smartdata.hdfs.client.SmartDFSClient;
import org.smartdata.server.MiniSmartClusterHarness;
import java.nio.ByteBuffer;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.Random;
public class TestSmallFileRead extends MiniSmartClusterHarness {
private int ret;
private long fileLength;
private void createTestFiles() throws Exception {
Path path = new Path("/test/small_files/");
dfs.mkdirs(path);
for (int i = 0; i < 2; i++) {
String fileName = "/test/small_files/file_" + i;
FSDataOutputStream out = dfs.create(new Path(fileName), (short) 1);
long fileLen = 5 + (int) (Math.random() * 11);
byte[] buf = new byte[20];
Random rb = new Random(2018);
int bytesRemaining = (int) fileLen;
while (bytesRemaining > 0) {
rb.nextBytes(buf);
int bytesToWrite = (bytesRemaining < buf.length) ? bytesRemaining : buf.length;
out.write(buf, 0, bytesToWrite);
bytesRemaining -= bytesToWrite;
}
out.close();
if (i == 0) {
fileLength = fileLen;
ret = buf[0] & 0xff;
}
}
SmallFileCompactAction smallFileCompactAction = new SmallFileCompactAction();
smallFileCompactAction.setDfsClient(dfsClient);
smallFileCompactAction.setContext(smartContext);
Map<String , String> args = new HashMap<>();
List<String> smallFileList = new ArrayList<>();
smallFileList.add("/test/small_files/file_0");
smallFileList.add("/test/small_files/file_1");
args.put(SmallFileCompactAction.FILE_PATH , new Gson().toJson(smallFileList));
args.put(SmallFileCompactAction.CONTAINER_FILE,
"/test/small_files/container_file_5");
smallFileCompactAction.init(args);
smallFileCompactAction.run();
}
@Before
@Override
public void init() throws Exception {
super.init();
createTestFiles();
}
@Test
public void testRead() throws Exception {
waitTillSSMExitSafeMode();
SmartDFSClient smartDFSClient = new SmartDFSClient(smartContext.getConf());
DFSInputStream is = smartDFSClient.open("/test/small_files/file_0");
Assert.assertEquals(1, is.getAllBlocks().size());
Assert.assertEquals(fileLength, is.getFileLength());
Assert.assertEquals(0, is.getPos());
int byteRead = is.read();
Assert.assertEquals(ret, byteRead);
byte[] bytes = new byte[50];
Assert.assertEquals(fileLength - 1, is.read(bytes));
is.close();
is = smartDFSClient.open("/test/small_files/file_0");
ByteBuffer buffer = ByteBuffer.allocate(50);
Assert.assertEquals(fileLength, is.read(buffer));
is.close();
is = smartDFSClient.open("/test/small_files/file_0");
Assert.assertEquals(fileLength - 2, is.read(2, bytes, 1, 50));
is.close();
}
@After
public void tearDown() throws Exception {
dfs.getClient().delete("/test", true);
}
}
| java | Apache-2.0 | e0c90f054687a18c4e095547ac5e31b8b313b3ef | 2026-01-05T02:41:11.405497Z | false |
Intel-bigdata/SSM | https://github.com/Intel-bigdata/SSM/blob/e0c90f054687a18c4e095547ac5e31b8b313b3ef/smart-integration/src/test/java/org/smartdata/integration/TestRuleRestApi.java | smart-integration/src/test/java/org/smartdata/integration/TestRuleRestApi.java | /**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.smartdata.integration;
import io.restassured.RestAssured;
import io.restassured.response.Response;
import org.hamcrest.Matchers;
import org.junit.Test;
import org.smartdata.integration.rest.RestApiBase;
import static org.hamcrest.Matchers.contains;
import static org.hamcrest.Matchers.equalTo;
import static org.hamcrest.Matchers.is;
public class TestRuleRestApi extends IntegrationTestBase {
@Test
public void test() throws Exception {
String rule = "file : every 1s | path matches \"/home/test\" and age > 2m | archive";
//Response res = RestAssured.post(RULEROOT + "/add/" + rule);
Response res = RestAssured.with().body("ruleText=" + rule).post(RestApiBase.RULEROOT + "/add/");
res.then().body("status", equalTo("CREATED"));
long ruleId = res.jsonPath().getLong("body");
Thread.sleep(1000);
RestAssured.get(RestApiBase.RULEROOT + "/list").then().body("status", equalTo("OK"))
.body("body.size", is(1)).root("body").body("ruleText", contains(rule))
.body("numChecked", contains(0));
RestAssured.post(RestApiBase.RULEROOT + "/" + ruleId + "/start").then()
.body("status", equalTo("OK"));
Thread.sleep(2000);
RestAssured.get(RestApiBase.RULEROOT + "/" + ruleId + "/info").then()
.body("body.numChecked", Matchers.greaterThan(0));
RestAssured.post(RestApiBase.RULEROOT + "/" + ruleId + "/stop").then()
.body("status", equalTo("OK"));
RestAssured.get(RestApiBase.RULEROOT + "/" + ruleId + "/info").then()
.body("body.state", equalTo("DISABLED"))
.body("body.numCmdsGen", is(0));
RestAssured.get(RestApiBase.RULEROOT + "/" + ruleId + "/cmdlets").then()
.body("status", equalTo("OK"))
.body("body.size", is(0));
RestAssured.post(RestApiBase.RULEROOT + "/" + ruleId + "/delete").then()
.body("status", equalTo("OK"));
}
}
| java | Apache-2.0 | e0c90f054687a18c4e095547ac5e31b8b313b3ef | 2026-01-05T02:41:11.405497Z | false |
Intel-bigdata/SSM | https://github.com/Intel-bigdata/SSM/blob/e0c90f054687a18c4e095547ac5e31b8b313b3ef/smart-integration/src/test/java/org/smartdata/integration/TestCmdletRestApi.java | smart-integration/src/test/java/org/smartdata/integration/TestCmdletRestApi.java | /**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.smartdata.integration;
import org.junit.Test;
public class TestCmdletRestApi extends IntegrationTestBase {
@Test
public void test() throws Exception {
}
}
| java | Apache-2.0 | e0c90f054687a18c4e095547ac5e31b8b313b3ef | 2026-01-05T02:41:11.405497Z | false |
Intel-bigdata/SSM | https://github.com/Intel-bigdata/SSM/blob/e0c90f054687a18c4e095547ac5e31b8b313b3ef/smart-integration/src/test/java/org/smartdata/integration/TestCaseMoveData.java | smart-integration/src/test/java/org/smartdata/integration/TestCaseMoveData.java | /**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.smartdata.integration;
import org.junit.Assert;
import org.junit.Test;
import org.smartdata.integration.rest.RuleRestApi;
import static org.smartdata.integration.rest.ActionRestApi.getActionInfo;
import static org.smartdata.integration.rest.CmdletRestApi.getCmdletActionIds;
import static org.smartdata.integration.rest.CmdletRestApi.submitCmdlet;
import static org.smartdata.integration.rest.CmdletRestApi.waitCmdletComplete;
import static org.smartdata.integration.rest.RuleRestApi.startRule;
import static org.smartdata.integration.rest.RuleRestApi.waitRuleTriggered;
public class TestCaseMoveData extends IntegrationTestBase {
// @Test(timeout = 120000)
// public void testOneSsdHotData() throws Exception {
// String file = "/testOneSsd/testOneSsdFile";
// waitCmdletComplete(submitCmdlet("write -length 104 -file " + file));
// waitCmdletComplete(submitCmdlet("archive -file " + file));
// Assert.assertTrue(checkStorage(file, "ARCHIVE", "SSD"));
//
// String rule = "file : every 5s | path matches \"/testOneSsd/*\" "
// + "and accessCount(10min) > 1 | onessd";
// long ruleId = RuleRestApi.submitRule(rule);
// startRule(ruleId);
//
// waitCmdletComplete(submitCmdlet("read -file " + file));
// waitCmdletComplete(submitCmdlet("read -file " + file));
//
// waitRuleTriggered(ruleId);
//
// while (!checkStorage(file, "SSD", "ARCHIVE")) {
// Thread.sleep(1000);
// }
// }
@Test(timeout = 120000)
public void testArchiveColdData() throws Exception {
String file = "/testArchive/testArchiveFile";
waitCmdletComplete(submitCmdlet("write -length 104 -file " + file));
Assert.assertTrue(checkStorage(file, null, "ARCHIVE"));
String rule = "file : every 5s | path matches \"/testArchive/*\" and age > 10s | archive";
long ruleId = RuleRestApi.submitRule(rule);
startRule(ruleId);
waitRuleTriggered(ruleId);
while (!checkStorage(file, "ARCHIVE", null)) {
Thread.sleep(1000);
}
}
private boolean checkStorage(String file,
String containStorageType, String notContainStorageType) {
long cmdletChkArchive = submitCmdlet("checkstorage -file " + file);
waitCmdletComplete(cmdletChkArchive);
String result = getActionInfo(getCmdletActionIds(cmdletChkArchive).get(0))
.getString("result");
return checkStorageResult(result, containStorageType, notContainStorageType);
}
private boolean checkStorageResult(String result,
String containStorageType, String notContainStorageType) {
if (containStorageType != null) {
if (!containStorageType.startsWith("[")) {
containStorageType = "[" + containStorageType + "]";
}
if (!result.contains(containStorageType)) {
return false;
}
}
if (notContainStorageType != null) {
if (!notContainStorageType.startsWith("[")) {
notContainStorageType = "[" + notContainStorageType + "]";
}
if (result.contains(notContainStorageType)) {
return false;
}
}
return true;
}
}
| java | Apache-2.0 | e0c90f054687a18c4e095547ac5e31b8b313b3ef | 2026-01-05T02:41:11.405497Z | false |
Intel-bigdata/SSM | https://github.com/Intel-bigdata/SSM/blob/e0c90f054687a18c4e095547ac5e31b8b313b3ef/smart-integration/src/test/java/org/smartdata/integration/TestSystemRestApi.java | smart-integration/src/test/java/org/smartdata/integration/TestSystemRestApi.java | /**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.smartdata.integration;
import io.restassured.RestAssured;
import io.restassured.response.Response;
import org.hamcrest.Matchers;
import org.junit.Test;
import org.smartdata.integration.rest.RestApiBase;
/**
* Test for SystemRestApi.
*/
public class TestSystemRestApi extends IntegrationTestBase {
@Test
public void testVersion() throws Exception {
Response response1 = RestAssured.get(RestApiBase.SYSTEMROOT + "/version");
String json1 = response1.asString();
response1.then().body("body", Matchers.equalTo("1.6.0-SNAPSHOT"));
}
// @Test
// public void testServers() throws IOException, InterruptedException {
// Response response = RestAssured.get(RestApiBase.SYSTEMROOT + "/servers");
// response.then().body("body", Matchers.empty());
// Process worker = null;
// Process agent = null;
//
// try {
// worker = Util.startNewServer();
// Util.waitSlaveServerAvailable();
//
// agent = Util.startNewAgent();
// Util.waitAgentAvailable();
// } finally {
// if (worker != null) {
// try {
// worker.destroy();
// Util.waitSlaveServersDown();
// } catch (Throwable t) {
// // ignore
// }
// }
//
// if (agent != null) {
// try {
// agent.destroy();
// Util.waitAgentsDown();
// } catch (Throwable t) {
// // ignore
// }
// }
// }
// }
}
| java | Apache-2.0 | e0c90f054687a18c4e095547ac5e31b8b313b3ef | 2026-01-05T02:41:11.405497Z | false |
Intel-bigdata/SSM | https://github.com/Intel-bigdata/SSM/blob/e0c90f054687a18c4e095547ac5e31b8b313b3ef/smart-integration/src/test/java/org/smartdata/integration/TestActionRestApi.java | smart-integration/src/test/java/org/smartdata/integration/TestActionRestApi.java | /**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.smartdata.integration;
import io.restassured.RestAssured;
import io.restassured.response.Response;
import io.restassured.response.ValidatableResponse;
import org.hamcrest.Matchers;
import org.junit.Assert;
import org.junit.Test;
import java.util.Map;
import java.util.regex.Matcher;
import java.util.regex.Pattern;
import static org.smartdata.integration.rest.ActionRestApi.getActionIds;
import static org.smartdata.integration.rest.ActionRestApi.getActionInfoMap;
import static org.smartdata.integration.rest.ActionRestApi.submitAction;
import static org.smartdata.integration.rest.CovUtil.getLong;
import static org.smartdata.integration.rest.RestApiBase.ACTIONROOT;
/**
* Test for ActionRestApi.
*/
public class TestActionRestApi extends IntegrationTestBase {
@Test(timeout = 10000)
public void testActionTypes() {
Response response = RestAssured.get(ACTIONROOT + "/registry/list");
ValidatableResponse validatableResponse = response.then().root("body");
validatableResponse.body(
"find { it.actionName == 'allssd' }.displayName", Matchers.equalTo("allssd"));
validatableResponse.body(
"actionName",
Matchers.hasItems(
"uncache",
"write",
"cache",
"read",
"allssd",
"checkstorage",
"archive",
"onessd",
"echo"));
}
@Test(timeout = 200000)
public void testActionsInSequence() throws Exception {
// write and read
testAction("write", "-file /hello -length 10");
testAction("read", "-file /hello");
Map checkStorage1 = testAction("checkstorage", "-file /hello");
String result1 = (String) checkStorage1.get("result");
Assert.assertEquals(3, countSubstring(result1, "DISK"));
// move to all ssd
testAction("allssd", "-file /hello");
Map checkStorage2 = testAction("checkstorage", "-file /hello");
String result2 = (String) checkStorage2.get("result");
Assert.assertEquals(3, countSubstring(result2, "SSD"));
// move to archive
testAction("archive", "-file /hello");
Map checkStorage3 = testAction("checkstorage", "-file /hello");
String result3 = (String) checkStorage3.get("result");
Assert.assertEquals(3, countSubstring(result3, "ARCHIVE"));
// move to one ssd
testAction("onessd", "-file /hello");
Map checkStorage4 = testAction("checkstorage", "-file /hello");
String result4 = (String) checkStorage4.get("result");
Assert.assertEquals(2, countSubstring(result4, "DISK"));
Assert.assertEquals(1, countSubstring(result4, "SSD"));
// move to cache
testAction("cache", "-file /hello");
}
/*
@Test
public void testDistributedAction() throws Exception {
Process worker = Util.startNewServer();
try {
Process agent = Util.startNewAgent();
try {
Util.waitSlaveServerAvailable();
Util.waitAgentAvailable();
// Three actions would be executed on Master, StandbyServer and Agent
testAction("hello", "-print_message message");
testAction("hello", "-print_message message");
testAction("hello", "-print_message message");
} finally {
agent.destroy();
}
} finally {
worker.destroy();
}
}
*/
private int countSubstring(String parent, String child) {
Pattern storagePattern = Pattern.compile(child);
Matcher matcher = storagePattern.matcher(parent);
int count = 0;
while (matcher.find()) {
count++;
}
return count;
}
// submit an action and wait until it is finished with some basic info check
private Map testAction(String actionType, String args) throws Exception {
// add a write action by submitting cmdlet
Long cid = submitAction(actionType, args);
Long aid;
Map actionInfoMap;
// check action info until the action is finished
while (true) {
Thread.sleep(1000);
System.out.println("Action " + actionType + " is running...");
// get aid from cmdletInfo
aid = getActionIds(cid).get(0);
// get actionInfo
actionInfoMap = getActionInfoMap(aid);
Assert.assertEquals(actionType, actionInfoMap.get("actionName"));
Assert.assertEquals(aid, getLong(actionInfoMap.get("actionId")));
Assert.assertEquals(cid, getLong(actionInfoMap.get("cmdletId")));
Boolean finished = (Boolean) actionInfoMap.get("finished");
if (finished) {
Assert.assertEquals(true, actionInfoMap.get("successful"));
Assert.assertEquals(1.0f, (float) actionInfoMap.get("progress"), 0.000001f);
break;
}
}
// check action list
/* Response actionList = RestAssured.get(ACTIONROOT + "/list/0");
actionList.then().body("status", Matchers.equalTo("OK"));
actionList.jsonPath().getList("body.actionId", Long.class).contains(aid);
// check action type list
actionList = RestAssured.get(ACTIONROOT + "/type/0/" + actionType);
actionList.then().body("status", Matchers.equalTo("OK"));
actionList.jsonPath().getList("body.actionId", Long.class).contains(aid);
System.out.println("Action " + actionType + " is finished.");*/
return actionInfoMap;
}
}
| java | Apache-2.0 | e0c90f054687a18c4e095547ac5e31b8b313b3ef | 2026-01-05T02:41:11.405497Z | false |
Intel-bigdata/SSM | https://github.com/Intel-bigdata/SSM/blob/e0c90f054687a18c4e095547ac5e31b8b313b3ef/smart-integration/src/test/java/org/smartdata/integration/IntegrationSmartServer.java | smart-integration/src/test/java/org/smartdata/integration/IntegrationSmartServer.java | /**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.smartdata.integration;
import org.smartdata.SmartServiceState;
import org.smartdata.admin.SmartAdmin;
import org.smartdata.conf.SmartConf;
import org.smartdata.conf.SmartConfKeys;
import org.smartdata.metastore.TestDBUtil;
import org.smartdata.metastore.utils.MetaStoreUtils;
import org.smartdata.server.SmartServer;
/**
* A SmartServer for integration test.
*/
public class IntegrationSmartServer {
private SmartConf conf;
private SmartServer ssm;
private String dbFile;
private String dbUrl;
public void setUp(SmartConf conf) throws Exception {
this.conf = conf;
// Set db used
String db = conf.get(SmartConfKeys.SMART_METASTORE_DB_URL_KEY);
if (db == null || db.length() == 0) {
dbFile = TestDBUtil.getUniqueEmptySqliteDBFile();
dbUrl = MetaStoreUtils.SQLITE_URL_PREFIX + dbFile;
conf.set(SmartConfKeys.SMART_METASTORE_DB_URL_KEY, dbUrl);
}
conf.setLong(SmartConfKeys.SMART_STATUS_REPORT_PERIOD_KEY, 100);
ssm = SmartServer.launchWith(conf);
waitTillSSMExitSafeMode();
}
private void waitTillSSMExitSafeMode() throws Exception {
SmartAdmin client = new SmartAdmin(conf);
long start = System.currentTimeMillis();
int retry = 5;
while (true) {
try {
SmartServiceState state = client.getServiceState();
if (state != SmartServiceState.SAFEMODE) {
break;
}
int secs = (int) (System.currentTimeMillis() - start) / 1000;
System.out.println("Waited for " + secs + " seconds ...");
Thread.sleep(1000);
} catch (Exception e) {
if (retry <= 0) {
throw e;
}
retry--;
}
}
}
public void cleanUp() {
if (ssm != null) {
ssm.shutdown();
}
}
public SmartServer getSSM() {
return ssm;
}
}
| java | Apache-2.0 | e0c90f054687a18c4e095547ac5e31b8b313b3ef | 2026-01-05T02:41:11.405497Z | false |
Intel-bigdata/SSM | https://github.com/Intel-bigdata/SSM/blob/e0c90f054687a18c4e095547ac5e31b8b313b3ef/smart-integration/src/test/java/org/smartdata/integration/IntegrationTestBase.java | smart-integration/src/test/java/org/smartdata/integration/IntegrationTestBase.java | /**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.smartdata.integration;
import io.restassured.RestAssured;
import org.junit.AfterClass;
import org.junit.BeforeClass;
import org.smartdata.conf.SmartConf;
import org.smartdata.conf.SmartConfKeys;
import org.smartdata.integration.cluster.SmartCluster;
import org.smartdata.integration.cluster.SmartMiniCluster;
/**
* Integration test base.
*/
public class IntegrationTestBase {
protected static SmartCluster cluster;
protected static SmartConf conf;
protected static IntegrationSmartServer smartServer;
private static int zeppelinPort;
@BeforeClass
public static void setup() throws Exception {
// Set up an HDFS cluster
conf = new SmartConf();
String nn = conf.get(SmartConfKeys.SMART_DFS_NAMENODE_RPCSERVER_KEY);
if (nn == null || nn.length() == 0) {
System.out.println("Setting up an mini cluster for testing");
cluster = new SmartMiniCluster();
cluster.setUp();
conf = cluster.getConf();
} else {
System.out.println("Using extern HDFS cluster:" + nn);
}
// Start a Smart server
String httpAddr = conf.get(SmartConfKeys.SMART_SERVER_HTTP_ADDRESS_KEY,
SmartConfKeys.SMART_SERVER_HTTP_ADDRESS_DEFAULT);
zeppelinPort = Integer.parseInt(httpAddr.split(":")[1]);
conf.setBoolean(SmartConfKeys.SMART_ENABLE_ZEPPELIN_WEB, false);
smartServer = new IntegrationSmartServer();
smartServer.setUp(conf);
// Initialize RestAssured
initRestAssured();
}
private static void initRestAssured() {
RestAssured.port = zeppelinPort;
//RestAssured.registerParser("text/plain", Parser.JSON);
}
@AfterClass
public static void cleanUp() throws Exception {
if (smartServer != null) {
smartServer.cleanUp();
}
if (cluster != null) {
cluster.cleanUp();
}
}
}
| java | Apache-2.0 | e0c90f054687a18c4e095547ac5e31b8b313b3ef | 2026-01-05T02:41:11.405497Z | false |
Intel-bigdata/SSM | https://github.com/Intel-bigdata/SSM/blob/e0c90f054687a18c4e095547ac5e31b8b313b3ef/smart-integration/src/test/java/org/smartdata/integration/TestCaseCacheFile.java | smart-integration/src/test/java/org/smartdata/integration/TestCaseCacheFile.java | /**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.smartdata.integration;
import org.junit.Test;
public class TestCaseCacheFile extends IntegrationTestBase {
@Test(timeout = 40000)
public void test() throws Exception {
//TODO : Random failure, need to fix
/*String rule = "file : every 1s | accessCount(1min) > 1 | cache";
long ruleId = RuleRestApi.submitRule(rule);
startRule(ruleId);
String file = "/testCache/testCacheFile";
waitCmdletComplete(submitCmdlet("write -length 1024 -file " + file));
waitCmdletComplete(submitCmdlet("read -file " + file));
waitCmdletComplete(submitCmdlet("read -file " + file));
while (true) {
if (getCachedFilePaths().contains(file)) {
break;
}
Thread.sleep(1000);
}*/
}
}
| java | Apache-2.0 | e0c90f054687a18c4e095547ac5e31b8b313b3ef | 2026-01-05T02:41:11.405497Z | false |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.